max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
---|---|---|---|---|---|---|
src/genie/libs/parser/junos/tests/ShowServicesAccountingErrors/cli/equal/golden_output_1_expected.py | balmasea/genieparser | 204 | 12792251 | expected_output = {
"services-accounting-information": {
"v9-error-information": [
{
"interface-name": "ms-9/0/0",
"service-set-dropped": "0",
"active-timeout-failures": "0",
"export-packet-failures": "0",
"flow-creation-failures": "0",
"memory-overload": "No",
}
]
}
}
| 1.179688 | 1 |
231-power-of-two.py | mvj3/leetcode | 0 | 12792252 | """
Question:
Power of Two
Given an integer, write a function to determine if it is a power of two.
Credits:
Special thanks to @jianchao.li.fighter for adding this problem and creating all test cases.
Performance:
1. Total Accepted: 31274 Total Submissions: 99121 Difficulty: Easy
2. Your runtime beats 82.84% of python submissions.
"""
class Solution(object):
def isPowerOfTwo(self, n):
"""
:type n: int
:rtype: bool
"""
if n <= 0:
return False
"""
Example output
>>> bin(2)
'0b10'
>>> bin(4)
'0b100'
>>> bin(8)
'0b1000'
>>> bin(64)
'0b1000000'
>>> bin(512)
'0b1000000000'
"""
bin_str = bin(n)
bin_str_left = bin_str[0:3]
bin_str_right = bin_str[3:]
result_left = bin_str_left == "0b1"
result_right = bin_str_right.count("0") == len(bin_str_right)
return result_left and result_right
def isPowerOfTwo_from_other(self, n):
"""
>>> 4&3
0
>>> bin(4)
'0b100'
>>> bin(3)
'0b11'
"""
return n > 0 and (n & (n - 1)) == 0
assert Solution().isPowerOfTwo(0) is False
assert Solution().isPowerOfTwo(-1) is False
assert Solution().isPowerOfTwo(1) is True
assert Solution().isPowerOfTwo(2) is True
assert Solution().isPowerOfTwo(3) is False
assert Solution().isPowerOfTwo(4) is True
assert Solution().isPowerOfTwo(15) is False
assert Solution().isPowerOfTwo(16) is True
| 3.9375 | 4 |
module_question/api/routers.py | NicolasMuras/Lookdaluv | 1 | 12792253 | <reponame>NicolasMuras/Lookdaluv<gh_stars>1-10
from rest_framework.routers import DefaultRouter
from module_question.api.views.module_question_viewsets import QuestionModuleViewSet
from module_question.api.views.general_views import QuestionModuleStatisticsViewSet
router = DefaultRouter()
router.register(r'question_modules', QuestionModuleViewSet, basename='question-modules')
router.register(r'question_modules_statistics', QuestionModuleStatisticsViewSet, basename='question-modules-statistic')
urlpatterns = router.urls | 1.828125 | 2 |
model/MusicalInstrument.py | kerenren/flask-musical-instrument-api | 0 | 12792254 |
class MusicalInstrument:
def __init__(self, color, dimensions, name, manufacturer, model):
self._color = color
self._dimensions = dimensions
self._name = name
self._manufacturer = manufacturer
self._model = model
def play(self):
pass
def get_model(self):
return self._model
def get_manufacturer(self):
return self._manufacturer
def get_name(self):
return self._name
def set_model(self, model):
self._model = model
def set_manufacturer(self, manufacturer):
self._manufacturer = manufacturer
def set_name(self, name):
self._name = name
def __str__(self):
return str.format("Musical Instrument: name={}, color={}, dimensions={}, manufacturer={}, model={}", self._name,
self._color, self._dimensions, self._manufacturer, self._model)
| 3.234375 | 3 |
tests/CarController.py | sleeyax/PyCmdRouter | 0 | 12792255 | <gh_stars>0
from core.Navigator import Navigator
from tests.car import Car
class CarController:
"""
Example class
"""
def __init__(self):
self.car = Car()
self.logged_in = False
self.nav = Navigator()
self.nav.set_end('>: ')
def show_motd(self):
print("""
+--------------------------------------+
+ Car control (example application) +
+ v1.0 +
+--------------------------------------+
""")
self.nav.navigate('guest')
print("Welcome, guest!")
def get_nav(self):
return self.nav.getLocation()
def login(self, username):
password = input("Enter your password: ")
if password == "***":
print("\n\r Welcome back, "+ username + "\n\r")
self.nav.navigate(username)
self.logged_in = True
else:
print("[Access denied] password incorrect")
def logout(self):
if self.logged_in == False:
print("Please login first!")
return 0
self.nav.clean()
self.nav.navigate('guest')
self.logged_in = False
print("Logged out successfully")
def set_car_color(self, color):
if self.logged_in == False:
print("Permission denied!")
return 0
self.car.set_color(color)
print("Changed car color to " + color)
def get_car_color(self):
return self.car.get_color()
def get_car_properties(self):
return self.car.get_all()
| 2.984375 | 3 |
gigaspace/tests/fakes/__init__.py | denismakogon/gigaspace-test-task | 0 | 12792256 | __author__ = 'denis_makogon'
| 0.910156 | 1 |
CPAC/longitudinal_pipeline/longitudinal_workflow.py | radiome-lab/C-PAC | 0 | 12792257 | # -*- coding: utf-8 -*-
import os
import copy
import time
import shutil
from nipype import config
from nipype import logging
import nipype.pipeline.engine as pe
import nipype.interfaces.afni as afni
import nipype.interfaces.fsl as fsl
import nipype.interfaces.io as nio
from nipype.interfaces.utility import Merge, IdentityInterface
import nipype.interfaces.utility as util
from indi_aws import aws_utils
from CPAC.utils.utils import concat_list
from CPAC.utils.interfaces.datasink import DataSink
from CPAC.utils.interfaces.function import Function
import CPAC
from CPAC.registration import (
create_fsl_flirt_linear_reg,
create_fsl_fnirt_nonlinear_reg,
create_register_func_to_anat,
create_bbregister_func_to_anat,
create_wf_calculate_ants_warp,
connect_func_to_anat_init_reg,
connect_func_to_anat_bbreg,
connect_func_to_template_reg,
output_func_to_standard
)
from CPAC.registration.utils import run_ants_apply_warp
from CPAC.utils.datasource import (
resolve_resolution,
create_anat_datasource,
create_func_datasource,
create_check_for_s3_node
)
from CPAC.anat_preproc.anat_preproc import (
create_anat_preproc
)
from CPAC.seg_preproc.seg_preproc import (
connect_anat_segmentation
)
from CPAC.func_preproc.func_ingress import (
connect_func_ingress
)
from CPAC.func_preproc.func_preproc import (
connect_func_init,
connect_func_preproc,
create_func_preproc,
create_wf_edit_func
)
from CPAC.distortion_correction.distortion_correction import (
connect_distortion_correction
)
from CPAC.longitudinal_pipeline.longitudinal_preproc import (
subject_specific_template
)
from CPAC.utils import Strategy, find_files, function, Outputs
from CPAC.utils.utils import (
check_config_resources,
check_system_deps,
get_scan_params,
get_tr
)
logger = logging.getLogger('nipype.workflow')
def register_anat_longitudinal_template_to_standard(longitudinal_template_node, c, workflow, strat_init, strat_name):
brain_mask = pe.Node(interface=fsl.maths.MathsCommand(),
name=f'longitudinal_anatomical_brain_mask_{strat_name}')
brain_mask.inputs.args = '-bin'
workflow.connect(longitudinal_template_node, 'brain_template',
brain_mask, 'in_file')
strat_init_new = strat_init.fork()
strat_init_new.update_resource_pool({
'anatomical_brain': (longitudinal_template_node, 'brain_template'),
'anatomical_skull_leaf': (longitudinal_template_node, 'skull_template'),
'anatomical_brain_mask': (brain_mask, 'out_file')
})
strat_list = [strat_init_new]
# only need to run once for each subject
already_skullstripped = c.already_skullstripped[0]
if already_skullstripped == 2:
already_skullstripped = 0
elif already_skullstripped == 3:
already_skullstripped = 1
sub_mem_gb, num_cores_per_sub, num_ants_cores = \
check_config_resources(c)
new_strat_list = []
# either run FSL anatomical-to-MNI registration, or...
if 'FSL' in c.regOption:
for num_strat, strat in enumerate(strat_list):
# this is to prevent the user from running FNIRT if they are
# providing already-skullstripped inputs. this is because
# FNIRT requires an input with the skull still on
if already_skullstripped == 1:
err_msg = '\n\n[!] CPAC says: FNIRT (for anatomical ' \
'registration) will not work properly if you ' \
'are providing inputs that have already been ' \
'skull-stripped.\n\nEither switch to using ' \
'ANTS for registration or provide input ' \
'images that have not been already ' \
'skull-stripped.\n\n'
logger.info(err_msg)
raise Exception
flirt_reg_anat_mni = create_fsl_flirt_linear_reg(
'anat_mni_flirt_register_%s_%d' % (strat_name, num_strat)
)
# if someone doesn't have anatRegFSLinterpolation in their pipe config,
# sinc will be default option
if not hasattr(c, 'anatRegFSLinterpolation'):
setattr(c, 'anatRegFSLinterpolation', 'sinc')
if c.anatRegFSLinterpolation not in ["trilinear", "sinc", "spline"]:
err_msg = 'The selected FSL interpolation method may be in the list of values: "trilinear", "sinc", "spline"'
raise Exception(err_msg)
# Input registration parameters
flirt_reg_anat_mni.inputs.inputspec.interp = c.anatRegFSLinterpolation
node, out_file = strat['anatomical_brain']
workflow.connect(node, out_file,
flirt_reg_anat_mni, 'inputspec.input_brain')
# pass the reference files
node, out_file = strat['template_brain_for_anat']
workflow.connect(node, out_file, flirt_reg_anat_mni,
'inputspec.reference_brain')
if 'ANTS' in c.regOption:
strat = strat.fork()
new_strat_list.append(strat)
strat.append_name(flirt_reg_anat_mni.name)
strat.update_resource_pool({
'registration_method': 'FSL',
'anatomical_to_mni_linear_xfm': (flirt_reg_anat_mni, 'outputspec.linear_xfm'),
'mni_to_anatomical_linear_xfm': (flirt_reg_anat_mni, 'outputspec.invlinear_xfm'),
'anat_longitudinal_template_to_standard': (flirt_reg_anat_mni, 'outputspec.output_brain')
})
strat_list += new_strat_list
new_strat_list = []
try:
fsl_linear_reg_only = c.fsl_linear_reg_only
except AttributeError:
fsl_linear_reg_only = [0]
if 'FSL' in c.regOption and 0 in fsl_linear_reg_only:
for num_strat, strat in enumerate(strat_list):
if strat.get('registration_method') == 'FSL':
fnirt_reg_anat_mni = create_fsl_fnirt_nonlinear_reg(
'anat_mni_fnirt_register_%s_%d' % (strat_name, num_strat)
)
# brain input
node, out_file = strat['anatomical_brain']
workflow.connect(node, out_file,
fnirt_reg_anat_mni, 'inputspec.input_brain')
# brain reference
node, out_file = strat['template_brain_for_anat']
workflow.connect(node, out_file,
fnirt_reg_anat_mni, 'inputspec.reference_brain')
# skull input
node, out_file = strat['anatomical_skull_leaf']
workflow.connect(node, out_file,
fnirt_reg_anat_mni, 'inputspec.input_skull')
# skull reference
node, out_file = strat['template_skull_for_anat']
workflow.connect(node, out_file,
fnirt_reg_anat_mni, 'inputspec.reference_skull')
node, out_file = strat['anatomical_to_mni_linear_xfm']
workflow.connect(node, out_file,
fnirt_reg_anat_mni, 'inputspec.linear_aff')
node, out_file = strat['template_ref_mask']
workflow.connect(node, out_file,
fnirt_reg_anat_mni, 'inputspec.ref_mask')
# assign the FSL FNIRT config file specified in pipeline
# config.yml
fnirt_reg_anat_mni.inputs.inputspec.fnirt_config = c.fnirtConfig
if 1 in fsl_linear_reg_only:
strat = strat.fork()
new_strat_list.append(strat)
strat.append_name(fnirt_reg_anat_mni.name)
strat.update_resource_pool({
'anatomical_to_mni_nonlinear_xfm': (fnirt_reg_anat_mni, 'outputspec.nonlinear_xfm'),
'anat_longitudinal_template_to_standard': (fnirt_reg_anat_mni, 'outputspec.output_brain')
}, override=True)
strat_list += new_strat_list
new_strat_list = []
for num_strat, strat in enumerate(strat_list):
# or run ANTS anatomical-to-MNI registration instead
if 'ANTS' in c.regOption and \
strat.get('registration_method') != 'FSL':
ants_reg_anat_mni = \
create_wf_calculate_ants_warp(
'anat_mni_ants_register_%s_%d' % (strat_name, num_strat),
num_threads=num_ants_cores,
reg_ants_skull=c.regWithSkull
)
# if someone doesn't have anatRegANTSinterpolation in their pipe config,
# it will default to LanczosWindowedSinc
if not hasattr(c, 'anatRegANTSinterpolation'):
setattr(c, 'anatRegANTSinterpolation', 'LanczosWindowedSinc')
if c.anatRegANTSinterpolation not in ['Linear', 'BSpline', 'LanczosWindowedSinc']:
err_msg = 'The selected ANTS interpolation method may be in the list of values: "Linear", "BSpline", "LanczosWindowedSinc"'
raise Exception(err_msg)
# Input registration parameters
ants_reg_anat_mni.inputs.inputspec.interp = c.anatRegANTSinterpolation
# calculating the transform with the skullstripped is
# reported to be better, but it requires very high
# quality skullstripping. If skullstripping is imprecise
# registration with skull is preferred
if 1 in c.regWithSkull:
if already_skullstripped == 1:
err_msg = '\n\n[!] CPAC says: You selected ' \
'to run anatomical registration with ' \
'the skull, but you also selected to ' \
'use already-skullstripped images as ' \
'your inputs. This can be changed ' \
'in your pipeline configuration ' \
'editor.\n\n'
logger.info(err_msg)
raise Exception
# get the skull-stripped anatomical from resource pool
node, out_file = strat['anatomical_brain']
# pass the anatomical to the workflow
workflow.connect(node, out_file,
ants_reg_anat_mni, 'inputspec.moving_brain')
# get the reorient skull-on anatomical from resource pool
node, out_file = strat['anatomical_skull_leaf']
# pass the anatomical to the workflow
workflow.connect(node, out_file,
ants_reg_anat_mni, 'inputspec.moving_skull')
# pass the reference file
node, out_file = strat['template_brain_for_anat']
workflow.connect(node, out_file,
ants_reg_anat_mni, 'inputspec.reference_brain')
# pass the reference file
node, out_file = strat['template_skull_for_anat']
workflow.connect(node, out_file,
ants_reg_anat_mni, 'inputspec.reference_skull')
else:
node, out_file = strat['anatomical_brain']
workflow.connect(node, out_file,
ants_reg_anat_mni, 'inputspec.moving_brain')
# pass the reference file
node, out_file = strat['template_brain_for_anat']
workflow.connect(node, out_file,
ants_reg_anat_mni, 'inputspec.reference_brain')
ants_reg_anat_mni.inputs.inputspec.ants_para = c.ANTs_para_T1_registration
ants_reg_anat_mni.inputs.inputspec.fixed_image_mask = None
strat.append_name(ants_reg_anat_mni.name)
strat.update_resource_pool({
'registration_method': 'ANTS',
'ants_initial_xfm': (ants_reg_anat_mni, 'outputspec.ants_initial_xfm'),
'ants_rigid_xfm': (ants_reg_anat_mni, 'outputspec.ants_rigid_xfm'),
'ants_affine_xfm': (ants_reg_anat_mni, 'outputspec.ants_affine_xfm'),
'anatomical_to_mni_nonlinear_xfm': (ants_reg_anat_mni, 'outputspec.warp_field'),
'mni_to_anatomical_nonlinear_xfm': (ants_reg_anat_mni, 'outputspec.inverse_warp_field'),
'anat_to_mni_ants_composite_xfm': (ants_reg_anat_mni, 'outputspec.composite_transform'),
'anat_longitudinal_template_to_standard': (ants_reg_anat_mni, 'outputspec.normalized_output_brain')
})
strat_list += new_strat_list
# [SYMMETRIC] T1 -> Symmetric Template, Non-linear registration (FNIRT/ANTS)
new_strat_list = []
if 1 in c.runVMHC and 1 in getattr(c, 'runFunctional', [1]):
for num_strat, strat in enumerate(strat_list):
if 'FSL' in c.regOption and \
strat.get('registration_method') != 'ANTS':
# this is to prevent the user from running FNIRT if they are
# providing already-skullstripped inputs. this is because
# FNIRT requires an input with the skull still on
# TODO ASH normalize w schema validation to bool
if already_skullstripped == 1:
err_msg = '\n\n[!] CPAC says: FNIRT (for anatomical ' \
'registration) will not work properly if you ' \
'are providing inputs that have already been ' \
'skull-stripped.\n\nEither switch to using ' \
'ANTS for registration or provide input ' \
'images that have not been already ' \
'skull-stripped.\n\n'
logger.info(err_msg)
raise Exception
flirt_reg_anat_symm_mni = create_fsl_flirt_linear_reg(
'anat_symmetric_mni_flirt_register_%s_%d' % (strat_name, num_strat)
)
flirt_reg_anat_symm_mni.inputs.inputspec.interp = c.anatRegFSLinterpolation
node, out_file = strat['anatomical_brain']
workflow.connect(node, out_file,
flirt_reg_anat_symm_mni, 'inputspec.input_brain')
node, out_file = strat['template_symmetric_brain']
workflow.connect(node, out_file,
flirt_reg_anat_symm_mni, 'inputspec.reference_brain')
# if 'ANTS' in c.regOption:
# strat = strat.fork()
# new_strat_list.append(strat)
strat.append_name(flirt_reg_anat_symm_mni.name)
strat.update_resource_pool({
'anatomical_to_symmetric_mni_linear_xfm': (
flirt_reg_anat_symm_mni, 'outputspec.linear_xfm'),
'symmetric_mni_to_anatomical_linear_xfm': (
flirt_reg_anat_symm_mni, 'outputspec.invlinear_xfm'),
'symmetric_anatomical_to_standard': (
flirt_reg_anat_symm_mni, 'outputspec.output_brain')
})
strat_list += new_strat_list
new_strat_list = []
try:
fsl_linear_reg_only = c.fsl_linear_reg_only
except AttributeError:
fsl_linear_reg_only = [0]
if 'FSL' in c.regOption and 0 in fsl_linear_reg_only:
for num_strat, strat in enumerate(strat_list):
if strat.get('registration_method') == 'FSL':
fnirt_reg_anat_symm_mni = create_fsl_fnirt_nonlinear_reg(
'anat_symmetric_mni_fnirt_register_%s_%d' % (strat_name, num_strat)
)
node, out_file = strat['anatomical_brain']
workflow.connect(node, out_file,
fnirt_reg_anat_symm_mni,
'inputspec.input_brain')
node, out_file = strat['anatomical_skull_leaf']
workflow.connect(node, out_file,
fnirt_reg_anat_symm_mni,
'inputspec.input_skull')
node, out_file = strat['template_brain_for_anat']
workflow.connect(node, out_file,
fnirt_reg_anat_symm_mni, 'inputspec.reference_brain')
node, out_file = strat['template_symmetric_skull']
workflow.connect(node, out_file,
fnirt_reg_anat_symm_mni, 'inputspec.reference_skull')
node, out_file = strat['anatomical_to_mni_linear_xfm']
workflow.connect(node, out_file,
fnirt_reg_anat_symm_mni,
'inputspec.linear_aff')
node, out_file = strat['template_dilated_symmetric_brain_mask']
workflow.connect(node, out_file,
fnirt_reg_anat_symm_mni, 'inputspec.ref_mask')
strat.append_name(fnirt_reg_anat_symm_mni.name)
strat.update_resource_pool({
'anatomical_to_symmetric_mni_nonlinear_xfm': (
fnirt_reg_anat_symm_mni, 'outputspec.nonlinear_xfm'),
'symmetric_anatomical_to_standard': (
fnirt_reg_anat_symm_mni, 'outputspec.output_brain')
}, override=True)
strat_list += new_strat_list
new_strat_list = []
for num_strat, strat in enumerate(strat_list):
if 'ANTS' in c.regOption and \
strat.get('registration_method') != 'FSL':
ants_reg_anat_symm_mni = \
create_wf_calculate_ants_warp(
'anat_symmetric_mni_ants_register_%s_%d' % (strat_name, num_strat),
num_threads=num_ants_cores,
reg_ants_skull=c.regWithSkull
)
# Input registration parameters
ants_reg_anat_symm_mni.inputs.inputspec.interp = c.anatRegANTSinterpolation
# calculating the transform with the skullstripped is
# reported to be better, but it requires very high
# quality skullstripping. If skullstripping is imprecise
# registration with skull is preferred
if 1 in c.regWithSkull:
if already_skullstripped == 1:
err_msg = '\n\n[!] CPAC says: You selected ' \
'to run anatomical registration with ' \
'the skull, but you also selected to ' \
'use already-skullstripped images as ' \
'your inputs. This can be changed ' \
'in your pipeline configuration ' \
'editor.\n\n'
logger.info(err_msg)
raise Exception
# get the skullstripped anatomical from resource pool
node, out_file = strat['anatomical_brain']
# pass the anatomical to the workflow
workflow.connect(node, out_file,
ants_reg_anat_symm_mni, 'inputspec.moving_brain')
# pass the reference file
node, out_file = strat['template_symmetric_brain']
workflow.connect(node, out_file,
ants_reg_anat_symm_mni, 'inputspec.reference_brain')
# get the reorient skull-on anatomical from resource pool
node, out_file = strat['anatomical_skull_leaf']
# pass the anatomical to the workflow
workflow.connect(node, out_file,
ants_reg_anat_symm_mni, 'inputspec.moving_skull')
# pass the reference file
node, out_file = strat['template_symmetric_skull']
workflow.connect(node, out_file,
ants_reg_anat_symm_mni, 'inputspec.reference_skull')
else:
# get the skullstripped anatomical from resource pool
node, out_file = strat['anatomical_brain']
workflow.connect(node, out_file,
ants_reg_anat_symm_mni, 'inputspec.moving_brain')
# pass the reference file
node, out_file = strat['template_symmetric_brain']
workflow.connect(node, out_file,
ants_reg_anat_symm_mni, 'inputspec.reference_brain')
ants_reg_anat_symm_mni.inputs.inputspec.ants_para = c.ANTs_para_T1_registration
ants_reg_anat_symm_mni.inputs.inputspec.fixed_image_mask = None
strat.append_name(ants_reg_anat_symm_mni.name)
strat.update_resource_pool({
'ants_symmetric_initial_xfm': (ants_reg_anat_symm_mni, 'outputspec.ants_initial_xfm'),
'ants_symmetric_rigid_xfm': (ants_reg_anat_symm_mni, 'outputspec.ants_rigid_xfm'),
'ants_symmetric_affine_xfm': (ants_reg_anat_symm_mni, 'outputspec.ants_affine_xfm'),
'anatomical_to_symmetric_mni_nonlinear_xfm': (ants_reg_anat_symm_mni, 'outputspec.warp_field'),
'symmetric_mni_to_anatomical_nonlinear_xfm': (
ants_reg_anat_symm_mni, 'outputspec.inverse_warp_field'),
'anat_to_symmetric_mni_ants_composite_xfm': (
ants_reg_anat_symm_mni, 'outputspec.composite_transform'),
'symmetric_anatomical_to_standard': (ants_reg_anat_symm_mni, 'outputspec.normalized_output_brain')
})
strat_list += new_strat_list
# Inserting Segmentation Preprocessing Workflow
workflow, strat_list = connect_anat_segmentation(workflow, strat_list, c, strat_name)
return strat_list
def create_datasink(datasink_name, config, subject_id, session_id='', strat_name='', map_node_iterfield=None):
"""
Parameters
----------
datasink_name
config
subject_id
session_id
strat_name
map_node_iterfield
Returns
-------
"""
try:
encrypt_data = bool(config.s3Encryption[0])
except:
encrypt_data = False
# TODO Enforce value with schema validation
# Extract credentials path for output if it exists
try:
# Get path to creds file
creds_path = ''
if config.awsOutputBucketCredentials:
creds_path = str(config.awsOutputBucketCredentials)
creds_path = os.path.abspath(creds_path)
if config.outputDirectory.lower().startswith('s3://'):
# Test for s3 write access
s3_write_access = \
aws_utils.test_bucket_access(creds_path,
config.outputDirectory)
if not s3_write_access:
raise Exception('Not able to write to bucket!')
except Exception as e:
if config.outputDirectory.lower().startswith('s3://'):
err_msg = 'There was an error processing credentials or ' \
'accessing the S3 bucket. Check and try again.\n' \
'Error: %s' % e
raise Exception(err_msg)
if map_node_iterfield is not None:
ds = pe.MapNode(
DataSink(infields=map_node_iterfield),
name='sinker_{}'.format(datasink_name),
iterfield=map_node_iterfield
)
else:
ds = pe.Node(
DataSink(),
name='sinker_{}'.format(datasink_name)
)
ds.inputs.base_directory = config.outputDirectory
ds.inputs.creds_path = creds_path
ds.inputs.encrypt_bucket_keys = encrypt_data
ds.inputs.container = os.path.join(
'pipeline_%s_%s' % (config.pipelineName, strat_name),
subject_id, session_id
)
return ds
def connect_anat_preproc_inputs(strat, anat_preproc, strat_name, strat_nodes_list_list, workflow):
"""
Parameters
----------
strat : Strategy
the strategy object you want to fork
anat_preproc : Workflow
the anat_preproc workflow node to be connected and added to the resource pool
strat_name : str
name of the strategy
strat_nodes_list_list : list
a list of strat_nodes_list
workflow: Workflow
main longitudinal workflow
Returns
-------
new_strat : Strategy
the fork of strat with the resource pool updated
strat_nodes_list_list : list
a list of strat_nodes_list
"""
new_strat = strat.fork()
tmp_node, out_key = new_strat['anatomical']
workflow.connect(tmp_node, out_key, anat_preproc, 'inputspec.anat')
tmp_node, out_key = new_strat['template_cmass']
workflow.connect(tmp_node, out_key, anat_preproc, 'inputspec.template_cmass')
new_strat.append_name(anat_preproc.name)
new_strat.update_resource_pool({
'anatomical_brain': (
anat_preproc, 'outputspec.brain'),
'anatomical_skull_leaf': (
anat_preproc, 'outputspec.reorient'),
'anatomical_brain_mask': (
anat_preproc, 'outputspec.brain_mask'),
})
try:
strat_nodes_list_list[strat_name].append(new_strat)
except KeyError:
strat_nodes_list_list[strat_name] = [new_strat]
return new_strat, strat_nodes_list_list
def pick_map(file_list, index, file_type):
if isinstance(file_list, list):
if len(file_list) == 1:
file_list = file_list[0]
for file_name in file_list:
if file_name.endswith(f"{file_type}_{index}.nii.gz"):
return file_name
return None
def anat_longitudinal_wf(subject_id, sub_list, config):
"""
Parameters
----------
subject_id : str
the id of the subject
sub_list : list of dict
this is a list of sessions for one subject and each session if the same dictionary as the one given to
prep_workflow
config : configuration
a configuration object containing the information of the pipeline config. (Same as for prep_workflow)
Returns
-------
None
"""
workflow = pe.Workflow(name="anat_longitudinal_template_" + str(subject_id))
workflow.base_dir = config.workingDirectory
workflow.config['execution'] = {
'hash_method': 'timestamp',
'crashdump_dir': os.path.abspath(config.crashLogDirectory)
}
# For each participant we have a list of dict (each dict is a session)
already_skullstripped = config.already_skullstripped[0]
if already_skullstripped == 2:
already_skullstripped = 0
elif already_skullstripped == 3:
already_skullstripped = 1
resampled_template = pe.Node(Function(input_names=['resolution', 'template', 'template_name', 'tag'],
output_names=['resampled_template'],
function=resolve_resolution,
as_module=True),
name='template_skull_for_anat')
resampled_template.inputs.resolution = config.resolution_for_anat
resampled_template.inputs.template = config.template_skull_for_anat
resampled_template.inputs.template_name = 'template_skull_for_anat'
resampled_template.inputs.tag = 'resolution_for_anat'
# Node to calculate the center of mass of the standard template to align the images with it.
template_center_of_mass = pe.Node(
interface=afni.CenterMass(),
name='template_skull_for_anat_center_of_mass'
)
template_center_of_mass.inputs.cm_file = "template_center_of_mass.txt"
workflow.connect(resampled_template, 'resampled_template',
template_center_of_mass, 'in_file')
# list of lists for every strategy
strat_nodes_list_list = {}
# list of the data config dictionaries to be updated during the preprocessing
# creds_list = []
session_id_list = []
# Loop over the sessions to create the input for the longitudinal algorithm
for session in sub_list:
unique_id = session['unique_id']
session_id_list.append(unique_id)
try:
creds_path = session['creds_path']
if creds_path and 'none' not in creds_path.lower():
if os.path.exists(creds_path):
input_creds_path = os.path.abspath(creds_path)
else:
err_msg = 'Credentials path: "%s" for subject "%s" session "%s" ' \
'was not found. Check this path and try ' \
'again.' % (creds_path, subject_id, unique_id)
raise Exception(err_msg)
else:
input_creds_path = None
except KeyError:
input_creds_path = None
template_keys = [
("anat", "PRIORS_CSF"),
("anat", "PRIORS_GRAY"),
("anat", "PRIORS_WHITE"),
("other", "configFileTwomm"),
("anat", "template_based_segmentation_CSF"),
("anat", "template_based_segmentation_GRAY"),
("anat", "template_based_segmentation_WHITE"),
]
for key_type, key in template_keys:
if isinstance(getattr(config, key), str):
node = create_check_for_s3_node(
name=key,
file_path=getattr(config, key),
img_type=key_type,
creds_path=input_creds_path,
dl_dir=config.workingDirectory
)
setattr(config, key, node)
strat = Strategy()
strat_list = []
node_suffix = '_'.join([subject_id, unique_id])
anat_rsc = create_anat_datasource('anat_gather_%s' % node_suffix)
anat_rsc.inputs.inputnode.set(
subject = subject_id,
anat = session['anat'],
creds_path = input_creds_path,
dl_dir = config.workingDirectory,
img_type = 'anat'
)
strat.update_resource_pool({
'anatomical': (anat_rsc, 'outputspec.anat')
})
strat.update_resource_pool({
'template_cmass': (template_center_of_mass, 'cm')
})
# Here we have the same strategies for the skull stripping as in prep_workflow
if 'brain_mask' in session.keys() and session['brain_mask'] and \
session['brain_mask'].lower() != 'none':
brain_rsc = create_anat_datasource(
'brain_gather_%s' % unique_id)
brain_rsc.inputs.inputnode.set(
subject = subject_id,
anat = session['brain_mask'],
creds_path = input_creds_path,
dl_dir = config.workingDirectory,
img_type = 'anat'
)
skullstrip_method = 'mask'
preproc_wf_name = 'anat_preproc_mask_%s' % node_suffix
strat.append_name(brain_rsc.name)
strat.update_resource_pool({
'anatomical_brain_mask': (brain_rsc, 'outputspec.anat')
})
anat_preproc = create_anat_preproc(
method=skullstrip_method,
config=config,
wf_name=preproc_wf_name)
workflow.connect(brain_rsc, 'outputspec.brain_mask',
anat_preproc, 'inputspec.brain_mask')
new_strat, strat_nodes_list_list = connect_anat_preproc_inputs(
strat, anat_preproc, skullstrip_method + "_skullstrip", strat_nodes_list_list, workflow)
strat_list.append(new_strat)
elif already_skullstripped:
skullstrip_method = None
preproc_wf_name = 'anat_preproc_already_%s' % node_suffix
anat_preproc = create_anat_preproc(
method=skullstrip_method,
already_skullstripped=True,
config=config,
wf_name=preproc_wf_name
)
new_strat, strat_nodes_list_list = connect_anat_preproc_inputs(
strat, anat_preproc, 'already_skullstripped', strat_nodes_list_list, workflow)
strat_list.append(new_strat)
else:
# TODO add other SS methods
if "AFNI" in config.skullstrip_option:
skullstrip_method = 'afni'
preproc_wf_name = 'anat_preproc_afni_%s' % node_suffix
anat_preproc = create_anat_preproc(
method=skullstrip_method,
config=config,
wf_name=preproc_wf_name)
anat_preproc.inputs.AFNI_options.set(
shrink_factor=config.skullstrip_shrink_factor,
var_shrink_fac=config.skullstrip_var_shrink_fac,
shrink_fac_bot_lim=config.skullstrip_shrink_factor_bot_lim,
avoid_vent=config.skullstrip_avoid_vent,
niter=config.skullstrip_n_iterations,
pushout=config.skullstrip_pushout,
touchup=config.skullstrip_touchup,
fill_hole=config.skullstrip_fill_hole,
avoid_eyes=config.skullstrip_avoid_eyes,
use_edge=config.skullstrip_use_edge,
exp_frac=config.skullstrip_exp_frac,
smooth_final=config.skullstrip_smooth_final,
push_to_edge=config.skullstrip_push_to_edge,
use_skull=config.skullstrip_use_skull,
perc_int=config.skullstrip_perc_int,
max_inter_iter=config.skullstrip_max_inter_iter,
blur_fwhm=config.skullstrip_blur_fwhm,
fac=config.skullstrip_fac,
monkey=config.skullstrip_monkey,
mask_vol=config.skullstrip_mask_vol
)
new_strat, strat_nodes_list_list = connect_anat_preproc_inputs(
strat, anat_preproc, skullstrip_method + "_skullstrip", strat_nodes_list_list, workflow)
strat_list.append(new_strat)
if "BET" in config.skullstrip_option:
skullstrip_method = 'fsl'
preproc_wf_name = 'anat_preproc_fsl_%s' % node_suffix
anat_preproc = create_anat_preproc(
method=skullstrip_method,
config=config,
wf_name=preproc_wf_name)
anat_preproc.inputs.BET_options.set(
frac=config.bet_frac,
mask_boolean=config.bet_mask_boolean,
mesh_boolean=config.bet_mesh_boolean,
outline=config.bet_outline,
padding=config.bet_padding,
radius=config.bet_radius,
reduce_bias=config.bet_reduce_bias,
remove_eyes=config.bet_remove_eyes,
robust=config.bet_robust,
skull=config.bet_skull,
surfaces=config.bet_surfaces,
threshold=config.bet_threshold,
vertical_gradient=config.bet_vertical_gradient,
)
new_strat, strat_nodes_list_list = connect_anat_preproc_inputs(
strat, anat_preproc, skullstrip_method + "_skullstrip", strat_nodes_list_list, workflow)
strat_list.append(new_strat)
if not any(o in config.skullstrip_option for o in
["AFNI", "BET"]):
err = '\n\n[!] C-PAC says: Your skull-stripping ' \
'method options setting does not include either' \
' \'AFNI\' or \'BET\'.\n\n Options you ' \
'provided:\nskullstrip_option: {0}\n\n'.format(
str(config.skullstrip_option))
raise Exception(err)
# Here we have all the anat_preproc set up for every session of the subject
strat_init = Strategy()
templates_for_resampling = [
(config.resolution_for_anat, config.template_brain_only_for_anat, 'template_brain_for_anat', 'resolution_for_anat'),
(config.resolution_for_anat, config.template_skull_for_anat, 'template_skull_for_anat', 'resolution_for_anat'),
(config.resolution_for_anat, config.template_symmetric_brain_only, 'template_symmetric_brain', 'resolution_for_anat'),
(config.resolution_for_anat, config.template_symmetric_skull, 'template_symmetric_skull', 'resolution_for_anat'),
(config.resolution_for_anat, config.dilated_symmetric_brain_mask, 'template_dilated_symmetric_brain_mask',
'resolution_for_anat'),
(config.resolution_for_anat, config.ref_mask, 'template_ref_mask', 'resolution_for_anat'),
(config.resolution_for_func_preproc, config.template_brain_only_for_func, 'template_brain_for_func_preproc',
'resolution_for_func_preproc'),
(config.resolution_for_func_preproc, config.template_skull_for_func, 'template_skull_for_func_preproc',
'resolution_for_func_preproc'),
(config.resolution_for_func_derivative, config.template_brain_only_for_func, 'template_brain_for_func_derivative',
'resolution_for_func_preproc'),
(config.resolution_for_func_derivative, config.template_skull_for_func, 'template_skull_for_func_derivative',
'resolution_for_func_preproc')
]
# update resampled template to resource pool
for resolution, template, template_name, tag in templates_for_resampling:
resampled_template = pe.Node(Function(input_names=['resolution', 'template', 'template_name', 'tag'],
output_names=['resampled_template'],
function=resolve_resolution,
as_module=True),
name='resampled_' + template_name)
resampled_template.inputs.resolution = resolution
resampled_template.inputs.template = template
resampled_template.inputs.template_name = template_name
resampled_template.inputs.tag = tag
strat_init.update_resource_pool({
template_name: (resampled_template, 'resampled_template')
})
# loop over the different skull stripping strategies
for strat_name, strat_nodes_list in strat_nodes_list_list.items():
node_suffix = '_'.join([strat_name, subject_id])
# Merge node to feed the anat_preproc outputs to the longitudinal template generation
brain_merge_node = pe.Node(
interface=Merge(len(strat_nodes_list)),
name="anat_longitudinal_brain_merge_" + node_suffix)
skull_merge_node = pe.Node(
interface=Merge(len(strat_nodes_list)),
name="anat_longitudinal_skull_merge_" + node_suffix)
# This node will generate the longitudinal template (the functions are in longitudinal_preproc)
# Later other algorithms could be added to calculate it, like the multivariate template from ANTS
# It would just require to change it here.
template_node = subject_specific_template(
workflow_name='subject_specific_anat_template_' + node_suffix
)
unique_id_list = [i.get_name()[0].split('_')[-1] for i in strat_nodes_list]
template_node.inputs.set(
avg_method=config.longitudinal_template_average_method,
dof=config.longitudinal_template_dof,
interp=config.longitudinal_template_interp,
cost=config.longitudinal_template_cost,
convergence_threshold=config.longitudinal_template_convergence_threshold,
thread_pool=config.longitudinal_template_thread_pool,
unique_id_list=unique_id_list
)
workflow.connect(brain_merge_node, 'out', template_node, 'input_brain_list')
workflow.connect(skull_merge_node, 'out', template_node, 'input_skull_list')
reg_strat_list = register_anat_longitudinal_template_to_standard(template_node, config, workflow, strat_init, strat_name)
# Register T1 to the standard template
# TODO add session information in node name
for num_reg_strat, reg_strat in enumerate(reg_strat_list):
if reg_strat.get('registration_method') == 'FSL':
fsl_apply_warp = pe.MapNode(interface=fsl.ApplyWarp(),
name='fsl_apply_warp_anat_longitudinal_to_standard_{0}_'.format(strat_name),
iterfield=['in_file'])
workflow.connect(template_node, "output_brain_list",
fsl_apply_warp, 'in_file')
node, out_file = reg_strat['template_brain_for_anat']
workflow.connect(node, out_file,
fsl_apply_warp, 'ref_file')
# TODO how to include linear xfm?
# node, out_file = reg_strat['anatomical_to_mni_linear_xfm']
# workflow.connect(node, out_file, fsl_apply_warp, 'premat')
node, out_file = reg_strat['anatomical_to_mni_nonlinear_xfm']
workflow.connect(node, out_file,
fsl_apply_warp, 'field_file')
reg_strat.update_resource_pool({
'anatomical_to_standard': (fsl_apply_warp, 'out_file')
})
elif reg_strat.get('registration_method') == 'ANTS':
ants_apply_warp = pe.MapNode(util.Function(input_names=['moving_image',
'reference',
'initial',
'rigid',
'affine',
'nonlinear',
'interp'],
output_names=['out_image'],
function=run_ants_apply_warp),
name='ants_apply_warp_anat_longitudinal_to_standard_{0}_'.format(strat_name),
iterfield=['moving_image'])
workflow.connect(template_node, "output_brain_list", ants_apply_warp, 'moving_image')
node, out_file = reg_strat['template_brain_for_anat']
workflow.connect(node, out_file, ants_apply_warp, 'reference')
node, out_file = reg_strat['ants_initial_xfm']
workflow.connect(node, out_file, ants_apply_warp, 'initial')
node, out_file = reg_strat['ants_rigid_xfm']
workflow.connect(node, out_file, ants_apply_warp, 'rigid')
node, out_file = reg_strat['ants_affine_xfm']
workflow.connect(node, out_file, ants_apply_warp, 'affine')
node, out_file = reg_strat['anatomical_to_mni_nonlinear_xfm']
workflow.connect(node, out_file, ants_apply_warp, 'nonlinear')
ants_apply_warp.inputs.interp = config.anatRegANTSinterpolation
reg_strat.update_resource_pool({
'anatomical_to_standard': (ants_apply_warp, 'out_image')
})
# Register tissue segmentation from longitudinal template space to native space
fsl_convert_xfm = pe.MapNode(interface=fsl.ConvertXFM(),
name=f'fsl_xfm_longitudinal_to_native_{strat_name}',
iterfield=['in_file'])
fsl_convert_xfm.inputs.invert_xfm = True
workflow.connect(template_node, "warp_list",
fsl_convert_xfm, 'in_file')
def seg_apply_warp(strat_name, resource, type='str', file_type=None):
if type == 'str':
fsl_apply_xfm = pe.MapNode(interface=fsl.ApplyXFM(),
name=f'fsl_apply_xfm_longitudinal_to_native_{resource}_{strat_name}',
iterfield=['reference', 'in_matrix_file'])
fsl_apply_xfm.inputs.interp = 'nearestneighbour'
node, out_file = reg_strat[resource]
workflow.connect(node, out_file,
fsl_apply_xfm, 'in_file')
workflow.connect(brain_merge_node, 'out',
fsl_apply_xfm, 'reference')
workflow.connect(fsl_convert_xfm, "out_file",
fsl_apply_xfm, 'in_matrix_file')
reg_strat.update_resource_pool({
resource:(fsl_apply_xfm, 'out_file')
}, override=True)
elif type == 'list':
for index in range(3):
fsl_apply_xfm = pe.MapNode(interface=fsl.ApplyXFM(),
name=f'fsl_apply_xfm_longitudinal_to_native_{resource}_{index}_{strat_name}',
iterfield=['reference', 'in_matrix_file'])
fsl_apply_xfm.inputs.interp = 'nearestneighbour'
pick_seg_map = pe.Node(Function(input_names=['file_list', 'index', 'file_type'],
output_names=['file_name'],
function=pick_map),
name=f'pick_{file_type}_{index}_{strat_name}')
node, out_file = reg_strat[resource]
workflow.connect(node, out_file,
pick_seg_map, 'file_list')
pick_seg_map.inputs.index=index
pick_seg_map.inputs.file_type=file_type
workflow.connect(pick_seg_map, 'file_name',
fsl_apply_xfm, 'in_file')
workflow.connect(brain_merge_node, 'out',
fsl_apply_xfm, 'reference')
workflow.connect(fsl_convert_xfm, 'out_file',
fsl_apply_xfm, 'in_matrix_file')
concat_seg_map = pe.Node(Function(input_names=['in_list1', 'in_list2'],
output_names=['out_list'],
function=concat_list),
name=f'concat_{file_type}_{index}_{strat_name}')
if index == 0:
workflow.connect(fsl_apply_xfm, 'out_file',
concat_seg_map, 'in_list1')
reg_strat.update_resource_pool({
f'temporary_{resource}_list':(concat_seg_map, 'out_list')
})
else:
workflow.connect(fsl_apply_xfm, 'out_file',
concat_seg_map, 'in_list2')
node, out_file = reg_strat[f'temporary_{resource}_list']
workflow.connect(node, out_file,
concat_seg_map, 'in_list1')
reg_strat.update_resource_pool({
f'temporary_{resource}_list':(concat_seg_map, 'out_list')
}, override=True)
reg_strat.update_resource_pool({
resource:(concat_seg_map, 'out_list')
}, override=True)
for seg in ['anatomical_gm_mask', 'anatomical_csf_mask', 'anatomical_wm_mask',
'seg_mixeltype', 'seg_partial_volume_map']:
seg_apply_warp(strat_name=strat_name, resource=seg)
# apply warp on list
seg_apply_warp(strat_name=strat_name, resource='seg_probability_maps', type='list', file_type='prob')
seg_apply_warp(strat_name=strat_name, resource='seg_partial_volume_files', type='list', file_type='pve')
# Update resource pool
# longitudinal template
rsc_key = 'anatomical_longitudinal_template_'
ds_template = create_datasink(rsc_key + node_suffix, config, subject_id, strat_name='longitudinal_'+strat_name)
workflow.connect(template_node, 'brain_template',
ds_template, rsc_key)
# T1 to longitudinal template warp
rsc_key = 'anatomical_to_longitudinal_template_warp_'
ds_warp_list = create_datasink(rsc_key + node_suffix, config, subject_id, strat_name='longitudinal_'+strat_name,
map_node_iterfield=['anatomical_to_longitudinal_template_warp'])
workflow.connect(template_node, "warp_list",
ds_warp_list, 'anatomical_to_longitudinal_template_warp')
# T1 in longitudinal template space
rsc_key = 'anatomical_to_longitudinal_template_'
t1_list = create_datasink(rsc_key + node_suffix, config, subject_id, strat_name='longitudinal_'+strat_name,
map_node_iterfield=['anatomical_to_longitudinal_template'])
workflow.connect(template_node, "output_brain_list",
t1_list, 'anatomical_to_longitudinal_template')
# longitudinal to standard registration items
for num_strat, strat in enumerate(reg_strat_list):
for rsc_key in strat.resource_pool.keys():
rsc_nodes_suffix = '_'.join(['_longitudinal_to_standard', strat_name, str(num_strat)])
if rsc_key in Outputs.any:
node, rsc_name = strat[rsc_key]
ds = create_datasink(rsc_key + rsc_nodes_suffix, config, subject_id, strat_name='longitudinal_'+strat_name)
workflow.connect(node, rsc_name, ds, rsc_key)
# individual minimal preprocessing items
for i in range(len(strat_nodes_list)):
rsc_nodes_suffix = "_%s_%d" % (node_suffix, i)
for rsc_key in strat_nodes_list[i].resource_pool.keys():
if rsc_key in Outputs.any:
node, rsc_name = strat_nodes_list[i][rsc_key]
ds = create_datasink(rsc_key + rsc_nodes_suffix, config, subject_id,
session_id_list[i], 'longitudinal_'+strat_name)
workflow.connect(node, rsc_name, ds, rsc_key)
rsc_key = 'anatomical_brain'
anat_preproc_node, rsc_name = strat_nodes_list[i][rsc_key]
workflow.connect(anat_preproc_node,
rsc_name, brain_merge_node,
'in{}'.format(i + 1)) # the in{}.format take i+1 because the Merge nodes inputs starts at 1
rsc_key = 'anatomical_skull_leaf'
anat_preproc_node, rsc_name = strat_nodes_list[i][rsc_key]
workflow.connect(anat_preproc_node,
rsc_name, skull_merge_node,
'in{}'.format(i + 1))
workflow.run()
return reg_strat_list # strat_nodes_list_list # for func wf?
# TODO check:
# 1 func alone works
# 2 anat + func works, pass anat strategy list?
def func_preproc_longitudinal_wf(subject_id, sub_list, config):
"""
Parameters
----------
subject_id : string
the id of the subject
sub_list : list of dict
this is a list of sessions for one subject and each session if the same dictionary as the one given to
prep_workflow
config : configuration
a configuration object containing the information of the pipeline config. (Same as for prep_workflow)
Returns
-------
strat_list_ses_list : list of list
a list of strategies; within each strategy, a list of sessions
"""
datasink = pe.Node(nio.DataSink(), name='sinker')
datasink.inputs.base_directory = config.workingDirectory
session_id_list = []
ses_list_strat_list = {}
workflow_name = 'func_preproc_longitudinal_' + str(subject_id)
workflow = pe.Workflow(name=workflow_name)
workflow.base_dir = config.workingDirectory
workflow.config['execution'] = {
'hash_method': 'timestamp',
'crashdump_dir': os.path.abspath(config.crashLogDirectory)
}
for sub_dict in sub_list:
if 'func' in sub_dict or 'rest' in sub_dict:
if 'func' in sub_dict:
func_paths_dict = sub_dict['func']
else:
func_paths_dict = sub_dict['rest']
unique_id = sub_dict['unique_id']
session_id_list.append(unique_id)
try:
creds_path = sub_dict['creds_path']
if creds_path and 'none' not in creds_path.lower():
if os.path.exists(creds_path):
input_creds_path = os.path.abspath(creds_path)
else:
err_msg = 'Credentials path: "%s" for subject "%s" was not ' \
'found. Check this path and try again.' % (
creds_path, subject_id)
raise Exception(err_msg)
else:
input_creds_path = None
except KeyError:
input_creds_path = None
strat = Strategy()
strat_list = [strat]
node_suffix = '_'.join([subject_id, unique_id])
# Functional Ingress Workflow
# add optional flag
workflow, diff, blip, fmap_rp_list = connect_func_ingress(workflow,
strat_list,
config,
sub_dict,
subject_id,
input_creds_path,
node_suffix)
# Functional Initial Prep Workflow
workflow, strat_list = connect_func_init(workflow, strat_list, config, node_suffix)
# Functional Image Preprocessing Workflow
workflow, strat_list = connect_func_preproc(workflow, strat_list, config, node_suffix)
# Distortion Correction
workflow, strat_list = connect_distortion_correction(workflow,
strat_list,
config,
diff,
blip,
fmap_rp_list,
node_suffix)
ses_list_strat_list[node_suffix] = strat_list
# Here we have all the func_preproc set up for every session of the subject
# TODO create a list of list ses_list_strat_list
# a list of skullstripping strategies,
# a list of sessions within each strategy list
# TODO rename and reorganize dict
# TODO update strat name
strat_list_ses_list = {}
strat_list_ses_list['func_default'] = []
for sub_ses_id, strat_nodes_list in ses_list_strat_list.items():
strat_list_ses_list['func_default'].append(strat_nodes_list[0])
workflow.run()
return strat_list_ses_list
def merge_func_preproc(working_directory):
"""
Parameters
----------
working_directory : string
a path to the working directory
Returns
-------
brain_list : list
a list of func preprocessed brain
skull_list : list
a list of func preprocessed skull
"""
brain_list = []
skull_list = []
for dirpath, dirnames, filenames in os.walk(working_directory):
for f in filenames:
if 'func_get_preprocessed_median' in dirpath and '.nii.gz' in f:
filepath = os.path.join(dirpath, f)
brain_list.append(filepath)
if 'func_get_motion_correct_median' in dirpath and '.nii.gz' in f:
filepath = os.path.join(dirpath, f)
skull_list.append(filepath)
brain_list.sort()
skull_list.sort()
return brain_list, skull_list
def register_func_longitudinal_template_to_standard(longitudinal_template_node, c, workflow, strat_init, strat_name):
sub_mem_gb, num_cores_per_sub, num_ants_cores = \
check_config_resources(c)
strat_init_new = strat_init.fork()
strat_init_new.update_resource_pool({
'functional_preprocessed_median': (longitudinal_template_node, 'brain_template'),
'motion_correct_median': (longitudinal_template_node, 'skull_template')
})
strat_list = [strat_init_new]
new_strat_list = []
if 'FSL' in c.regOption:
for num_strat, strat in enumerate(strat_list):
flirt_reg_func_mni = create_fsl_flirt_linear_reg(
'func_mni_flirt_register_%s_%d' % (strat_name, num_strat)
)
# if someone doesn't have anatRegFSLinterpolation in their pipe config,
# sinc will be default option
if not hasattr(c, 'funcRegFSLinterpolation'):
setattr(c, 'funcRegFSLinterpolation', 'sinc')
if c.funcRegFSLinterpolation not in ["trilinear", "sinc", "spline"]:
err_msg = 'The selected FSL interpolation method may be in the list of values: "trilinear", "sinc", "spline"'
raise Exception(err_msg)
# Input registration parameters
flirt_reg_func_mni.inputs.inputspec.interp = c.funcRegFSLinterpolation
node, out_file = strat['functional_preprocessed_median']
workflow.connect(node, out_file,
flirt_reg_func_mni, 'inputspec.input_brain')
# pass the reference files
node, out_file = strat['template_brain_for_func_preproc']
workflow.connect(node, out_file, flirt_reg_func_mni,
'inputspec.reference_brain')
if 'ANTS' in c.regOption:
strat = strat.fork()
new_strat_list.append(strat)
strat.append_name(flirt_reg_func_mni.name)
strat.update_resource_pool({
'registration_method': 'FSL',
'func_longitudinal_to_mni_linear_xfm': (flirt_reg_func_mni, 'outputspec.linear_xfm'),
'mni_to_func_longitudinal_linear_xfm': (flirt_reg_func_mni, 'outputspec.invlinear_xfm'),
'func_longitudinal_template_to_standard': (flirt_reg_func_mni, 'outputspec.output_brain')
})
strat_list += new_strat_list
new_strat_list = []
try:
fsl_linear_reg_only = c.fsl_linear_reg_only
except AttributeError:
fsl_linear_reg_only = [0]
if 'FSL' in c.regOption and 0 in fsl_linear_reg_only:
for num_strat, strat in enumerate(strat_list):
if strat.get('registration_method') == 'FSL':
fnirt_reg_func_mni = create_fsl_fnirt_nonlinear_reg(
'func_mni_fnirt_register_%s_%d' % (strat_name, num_strat)
)
# brain input
node, out_file = strat['functional_preprocessed_median']
workflow.connect(node, out_file,
fnirt_reg_func_mni, 'inputspec.input_brain')
# brain reference
node, out_file = strat['template_brain_for_func_preproc']
workflow.connect(node, out_file,
fnirt_reg_func_mni, 'inputspec.reference_brain')
# skull input
node, out_file = strat['motion_correct_median']
workflow.connect(node, out_file,
fnirt_reg_func_mni, 'inputspec.input_skull')
# skull reference
node, out_file = strat['template_skull_for_func_preproc']
workflow.connect(node, out_file,
fnirt_reg_func_mni, 'inputspec.reference_skull')
node, out_file = strat['func_longitudinal_to_mni_linear_xfm']
workflow.connect(node, out_file,
fnirt_reg_func_mni, 'inputspec.linear_aff')
node, out_file = strat['template_ref_mask']
workflow.connect(node, out_file,
fnirt_reg_func_mni, 'inputspec.ref_mask')
# assign the FSL FNIRT config file specified in pipeline
# config.yml
fnirt_reg_func_mni.inputs.inputspec.fnirt_config = c.fnirtConfig
if 1 in fsl_linear_reg_only:
strat = strat.fork()
new_strat_list.append(strat)
strat.append_name(fnirt_reg_func_mni.name)
strat.update_resource_pool({
'func_longitudinal_to_mni_nonlinear_xfm': (fnirt_reg_func_mni, 'outputspec.nonlinear_xfm'),
'func_longitudinal_template_to_standard': (fnirt_reg_func_mni, 'outputspec.output_brain')
}, override=True)
strat_list += new_strat_list
new_strat_list = []
for num_strat, strat in enumerate(strat_list):
# or run ANTS anatomical-to-MNI registration instead
if 'ANTS' in c.regOption and \
strat.get('registration_method') != 'FSL':
ants_reg_func_mni = \
create_wf_calculate_ants_warp(
'func_mni_ants_register_%s_%d' % (strat_name, num_strat),
num_threads=num_ants_cores,
reg_ants_skull=c.regWithSkull
)
if not hasattr(c, 'funcRegANTSinterpolation'):
setattr(c, 'funcRegANTSinterpolation', 'LanczosWindowedSinc')
if c.funcRegANTSinterpolation not in ['Linear', 'BSpline', 'LanczosWindowedSinc']:
err_msg = 'The selected ANTS interpolation method may be in the list of values: "Linear", "BSpline", "LanczosWindowedSinc"'
raise Exception(err_msg)
# Input registration parameters
ants_reg_func_mni.inputs.inputspec.interp = c.funcRegANTSinterpolation
# calculating the transform with the skullstripped is
# reported to be better, but it requires very high
# quality skullstripping. If skullstripping is imprecise
# registration with skull is preferred
if 1 in c.regWithSkull:
# get the skull-stripped anatomical from resource pool
node, out_file = strat['functional_preprocessed_median']
# pass the anatomical to the workflow
workflow.connect(node, out_file,
ants_reg_func_mni, 'inputspec.moving_brain')
# get the reorient skull-on anatomical from resource pool
node, out_file = strat['motion_correct_median']
# pass the anatomical to the workflow
workflow.connect(node, out_file,
ants_reg_func_mni, 'inputspec.moving_skull')
# pass the reference file
node, out_file = strat['template_brain_for_func_preproc']
workflow.connect(node, out_file,
ants_reg_func_mni, 'inputspec.reference_brain')
# pass the reference file
node, out_file = strat['template_skull_for_func_preproc']
workflow.connect(node, out_file,
ants_reg_func_mni, 'inputspec.reference_skull')
else:
node, out_file = strat['functional_preprocessed_median']
workflow.connect(node, out_file,
ants_reg_func_mni, 'inputspec.moving_brain')
# pass the reference file
node, out_file = strat['template_brain_for_func_preproc']
workflow.connect(node, out_file,
ants_reg_func_mni, 'inputspec.reference_brain')
ants_reg_func_mni.inputs.inputspec.ants_para = c.ANTs_para_T1_registration
ants_reg_func_mni.inputs.inputspec.fixed_image_mask = None
strat.append_name(ants_reg_func_mni.name)
strat.update_resource_pool({
'registration_method': 'ANTS',
'ants_initial_xfm': (ants_reg_func_mni, 'outputspec.ants_initial_xfm'),
'ants_rigid_xfm': (ants_reg_func_mni, 'outputspec.ants_rigid_xfm'),
'ants_affine_xfm': (ants_reg_func_mni, 'outputspec.ants_affine_xfm'),
'func_longitudinal_to_mni_nonlinear_xfm': (ants_reg_func_mni, 'outputspec.warp_field'),
'mni_to_func_longitudinal_nonlinear_xfm': (ants_reg_func_mni, 'outputspec.inverse_warp_field'),
'func_longitudinal_to_mni_ants_composite_xfm': (ants_reg_func_mni, 'outputspec.composite_transform'),
'func_longitudinal_template_to_standard': (ants_reg_func_mni, 'outputspec.normalized_output_brain')
})
strat_list += new_strat_list
'''
# Func -> T1 Registration (Initial Linear Reg)
workflow, strat_list, diff_complete = connect_func_to_anat_init_reg(workflow, strat_list, c)
# Func -> T1 Registration (BBREG)
workflow, strat_list = connect_func_to_anat_bbreg(workflow, strat_list, c, diff_complete)
# Func -> T1/EPI Template
workflow, strat_list = connect_func_to_template_reg(workflow, strat_list, c)
'''
return workflow, strat_list
def func_longitudinal_template_wf(subject_id, strat_list, config):
'''
Parameters
----------
subject_id : string
the id of the subject
strat_list : list of list
first level strategy, second level session
config : configuration
a configuration object containing the information of the pipeline config.
Returns
-------
None
'''
workflow_name = 'func_longitudinal_template_' + str(subject_id)
workflow = pe.Workflow(name=workflow_name)
workflow.base_dir = config.workingDirectory
workflow.config['execution'] = {
'hash_method': 'timestamp',
'crashdump_dir': os.path.abspath(config.crashLogDirectory)
}
# strat_nodes_list = strat_list['func_default']
strat_init = Strategy()
templates_for_resampling = [
(config.resolution_for_func_preproc, config.template_brain_only_for_func, 'template_brain_for_func_preproc', 'resolution_for_func_preproc'),
(config.resolution_for_func_preproc, config.template_skull_for_func, 'template_skull_for_func_preproc', 'resolution_for_func_preproc'),
(config.resolution_for_func_preproc, config.ref_mask_for_func, 'template_ref_mask', 'resolution_for_func_preproc'), # TODO check float resolution
(config.resolution_for_func_preproc, config.template_epi, 'template_epi', 'resolution_for_func_preproc'),
(config.resolution_for_func_derivative, config.template_epi, 'template_epi_derivative', 'resolution_for_func_derivative'),
(config.resolution_for_func_derivative, config.template_brain_only_for_func, 'template_brain_for_func_derivative', 'resolution_for_func_preproc'),
(config.resolution_for_func_derivative, config.template_skull_for_func, 'template_skull_for_func_derivative', 'resolution_for_func_preproc'),
]
for resolution, template, template_name, tag in templates_for_resampling:
resampled_template = pe.Node(Function(input_names=['resolution', 'template', 'template_name', 'tag'],
output_names=['resampled_template'],
function=resolve_resolution,
as_module=True),
name='resampled_' + template_name)
resampled_template.inputs.resolution = resolution
resampled_template.inputs.template = template
resampled_template.inputs.template_name = template_name
resampled_template.inputs.tag = tag
strat_init.update_resource_pool({
template_name: (resampled_template, 'resampled_template')
})
merge_func_preproc_node = pe.Node(Function(input_names=['working_directory'],
output_names=['brain_list', 'skull_list'],
function=merge_func_preproc,
as_module=True),
name='merge_func_preproc')
merge_func_preproc_node.inputs.working_directory = config.workingDirectory
template_node = subject_specific_template(
workflow_name='subject_specific_func_template_' + subject_id
)
template_node.inputs.set(
avg_method=config.longitudinal_template_average_method,
dof=config.longitudinal_template_dof,
interp=config.longitudinal_template_interp,
cost=config.longitudinal_template_cost,
convergence_threshold=config.longitudinal_template_convergence_threshold,
thread_pool=config.longitudinal_template_thread_pool,
)
workflow.connect(merge_func_preproc_node, 'brain_list',
template_node, 'input_brain_list')
workflow.connect(merge_func_preproc_node, 'skull_list',
template_node, 'input_skull_list')
workflow, strat_list = register_func_longitudinal_template_to_standard(
template_node,
config,
workflow,
strat_init,
'default'
)
workflow.run()
return | 1.492188 | 1 |
app.py | herbeeg/famitracker-export-converter | 0 | 12792258 | import sys
import time
import constants
import parser.correct as correct
import parser.export as export
import parser.read as read
class App:
"""
Base container class to divert all export
file conversions and error handling to
their respective packages and
libraries.
"""
def __init__(self, expansion=None, filename=None):
"""
Initialise the command line session, using
the correct expansion type to convert
and output readable data for our
visualiser to parse.
Args:
expansion (String): FamiTracker expansion chip to use as reference for parsing channel data. Defaults to None.
filename (String): Name of local file to be housed in same directory as script execution. Defaults to None.
"""
self.expansion = expansion
self.filename = filename
self.validateParameters()
correct.FixExport(self.filename)
"""Rewrite FamiTracker export file as there are existing problems that mask required data."""
self.reader = read.FileReader(self.filename)
full_path = self.reader.start()
"""Attempt to start reading the file if validation passes."""
timestamp = int(time.time())
"""Remove decimal places created by time.time() floating point precision for clean filenames."""
self.exporter = export.DataExporter(timestamp, full_path, self.expansion)
self.exporter.start()
"""Attempt to start writing to JSON config and CSV data files."""
def validateParameters(self):
"""
Ensure that the information passed to the
parser by the user via the command line
is in the correct format.
"""
if self.expansion is None:
"""Terminate execution if no expansion chip is provided."""
sys.stdout.write('Please provide a valid expansion chip name for parsing. Terminating...\n')
sys.exit()
elif self.expansion.lower() not in constants.expansions():
"""Ensure case-sensitivity doesn't get in the way of conversions."""
sys.stdout.write('Invalid expansion chip provided. Please reference the README for accepted formats. Terminating...\n')
sys.exit()
if self.filename is None:
"""Terminate execution if no filename is provided."""
sys.stdout.write('Please provide a valid .txt file for parsing. Terminating...\n')
sys.exit()
elif not self.filename.lower().endswith('.txt'):
"""Ensure case-sensitivity doesn't get in the way of conversions."""
sys.stdout.write('Invalid filename provided. Please reference the README for accepted formats. Terminating...\n')
sys.exit()
if '__main__' == __name__:
"""Initialise root app when file is executed via the command line."""
if 2 < len(sys.argv):
app = App(sys.argv[1], sys.argv[2])
elif 1 < len(sys.argv):
app = App(sys.argv[1])
else:
app = App()
| 3.0625 | 3 |
examples/python/single_bodies/example_meshes_user.py | wpumacay/tysocTerrain | 1 | 12792259 | #!/usr/bin/env python
import sys
import loco
import tinymath as tm
import numpy as np
PHYSICS_BACKEND = loco.sim.PHYSICS_NONE
RENDERING_BACKEND = loco.sim.RENDERING_GLVIZ_GLFW
COM_TETRAHEDRON = [ 1.0 / 3.0, 1.0 / 3.0, 1.0 / 3.0 ]
TETRAHEDRON_VERTICES = [ 0.0 - COM_TETRAHEDRON[0], 0.0 - COM_TETRAHEDRON[1], 0.0 - COM_TETRAHEDRON[2],
1.0 - COM_TETRAHEDRON[0], 0.0 - COM_TETRAHEDRON[1], 0.0 - COM_TETRAHEDRON[2],
0.0 - COM_TETRAHEDRON[0], 1.0 - COM_TETRAHEDRON[1], 0.0 - COM_TETRAHEDRON[2],
0.0 - COM_TETRAHEDRON[0], 0.0 - COM_TETRAHEDRON[1], 1.0 - COM_TETRAHEDRON[2] ]
TETRAHEDRON_FACES = [ 0, 1, 3,
0, 2, 1,
0, 3, 2,
1, 2, 3 ]
COM_RAMP = [ 0.0, 7.0 / 9.0, 4.0 / 9.0 ]
RAMP_VERTICES = [ 1.0 - COM_RAMP[0], 0.0 - COM_RAMP[1], 0.0 - COM_RAMP[2],
1.0 - COM_RAMP[0], 2.0 - COM_RAMP[1], 0.0 - COM_RAMP[2],
1.0 - COM_RAMP[0], 1.0 - COM_RAMP[1], 1.0 - COM_RAMP[2],
1.0 - COM_RAMP[0], 0.0 - COM_RAMP[1], 1.0 - COM_RAMP[2],
-1.0 - COM_RAMP[0], 0.0 - COM_RAMP[1], 0.0 - COM_RAMP[2],
-1.0 - COM_RAMP[0], 2.0 - COM_RAMP[1], 0.0 - COM_RAMP[2],
-1.0 - COM_RAMP[0], 1.0 - COM_RAMP[1], 1.0 - COM_RAMP[2],
-1.0 - COM_RAMP[0], 0.0 - COM_RAMP[1], 1.0 - COM_RAMP[2] ]
RAMP_FACES = [ 0, 1, 2,
0, 2, 3,
0, 4, 5,
0, 5, 1,
0, 3, 7,
0, 7, 4,
2, 6, 7,
2, 7, 3,
1, 5, 6,
1, 6, 2,
4, 7, 6,
4, 6, 5 ]
def create_path_part( idx ) :
height = 1.0
inner_rad = 2.0
outer_rad = 3.0
dtheta = 2.0 * np.pi / 12.0
ctheta = np.cos( dtheta * idx )
stheta = np.sin( dtheta * idx )
ctheta_n = np.cos( dtheta * ( idx + 1 ) )
stheta_n = np.sin( dtheta * ( idx + 1 ) )
half_rad = 0.5* ( inner_rad + outer_rad )
com_position = [ half_rad * np.cos( ( idx + 0.5 ) * dtheta ),
half_rad * np.sin( ( idx + 0.5 ) * dtheta ),
0.5 * height ]
vertices = [ inner_rad * ctheta - com_position[0], inner_rad * stheta - com_position[1], 0.5 * height,
outer_rad * ctheta - com_position[0], outer_rad * stheta - com_position[1], 0.5 * height,
outer_rad * ctheta_n - com_position[0], outer_rad * stheta_n - com_position[1], 0.5 * height,
inner_rad * ctheta_n - com_position[0], inner_rad * stheta_n - com_position[1], 0.5 * height,
inner_rad * ctheta - com_position[0], inner_rad * stheta - com_position[1], -0.5 * height,
outer_rad * ctheta - com_position[0], outer_rad * stheta - com_position[1], -0.5 * height,
outer_rad * ctheta_n - com_position[0], outer_rad * stheta_n - com_position[1], -0.5 * height,
inner_rad * ctheta_n - com_position[0], inner_rad * stheta_n - com_position[1], -0.5 * height ]
faces = [ 0, 1, 2,
0, 2, 3,
0, 4, 5,
0, 5, 1,
0, 3, 7,
0, 7, 4,
2, 6, 7,
2, 7, 3,
1, 5, 6,
1, 6, 2,
4, 7, 6,
4, 6, 5 ]
return vertices, faces
if __name__ == '__main__' :
if len( sys.argv ) > 1 :
choice_backend = sys.argv[1]
if choice_backend == 'mujoco' :
PHYSICS_BACKEND = loco.sim.PHYSICS_MUJOCO
elif choice_backend == 'bullet' :
PHYSICS_BACKEND = loco.sim.PHYSICS_BULLET
elif choice_backend == 'dart' :
PHYSICS_BACKEND = loco.sim.PHYSICS_DART
elif choice_backend == 'raisim' :
PHYSICS_BACKEND = loco.sim.PHYSICS_RAISIM
print( 'Physics backend: {}'.format( PHYSICS_BACKEND ) )
print( 'Rendering backend: {}'.format( RENDERING_BACKEND ) )
#### rotation = tm.rotation( tm.Vector3f( [ np.pi / 3, np.pi / 4, np.pi / 6 ] ) )
#### rotation = tm.rotation( tm.Vector3f( [ np.pi / 2, 0.0, 0.0 ] ) )
rotation = tm.rotation( tm.Vector3f( [ 0.0, 0.0, 0.0 ] ) )
scenario = loco.sim.Scenario()
scenario.AddSingleBody( loco.sim.Plane( "floor", 10.0, 10.0, tm.Vector3f(), tm.Matrix3f() ) )
scenario.AddSingleBody( loco.sim.Sphere( "sphere", 0.1, [ 1.0, -1.0, 2.0 ], rotation ) )
scenario.AddSingleBody( loco.sim.Mesh( "tetrahedron_0",
TETRAHEDRON_VERTICES,
TETRAHEDRON_FACES,
1.0, [ -1.0, -1.0, 1.0 ], rotation ) )
scenario.AddSingleBody( loco.sim.Mesh( "tetrahedron_1",
TETRAHEDRON_VERTICES,
TETRAHEDRON_FACES,
0.5, [ -1.0, 1.0, 1.0 ], rotation ) )
scenario.AddSingleBody( loco.sim.Mesh( "ramp_0",
RAMP_VERTICES,
RAMP_FACES,
0.3, [ 1.0, 1.0, 1.0 ], rotation ) )
scenario.AddSingleBody( loco.sim.Mesh( "ramp_1",
RAMP_VERTICES,
RAMP_FACES,
0.5, [ 1.0, -1.0, 1.0 ], rotation ) )
for i in range( 0, 12 ) :
height = 1.0
inner_rad = 2.0
outer_rad = 3.0
half_rad = 0.5* ( inner_rad + outer_rad )
dtheta = 2.0 * np.pi / 12.0
com_position = [ half_rad * np.cos( ( i + 0.5 ) * dtheta ),
half_rad * np.sin( ( i + 0.5 ) * dtheta ),
0.5 * height ]
vertices, faces = create_path_part( i )
scenario.AddSingleBody( loco.sim.Mesh( "path_part_{}".format( i ),
vertices, faces,
1.0, com_position, tm.Matrix3f(),
loco.sim.DynamicsType.STATIC ) )
runtime = loco.sim.Runtime( PHYSICS_BACKEND, RENDERING_BACKEND )
simulation = runtime.CreateSimulation( scenario )
visualizer = runtime.CreateVisualizer( scenario )
sphere = scenario.GetSingleBodyByName( "sphere" )
floor = scenario.GetSingleBodyByName( "floor" )
floor.drawable.texture = 'built_in_chessboard'
floor.drawable.ambient = [ 0.3, 0.5, 0.7 ]
floor.drawable.diffuse = [ 0.3, 0.5, 0.7 ]
floor.drawable.specular = [ 0.3, 0.5, 0.7 ]
while visualizer.IsActive() :
if visualizer.CheckSingleKeyPress( loco.sim.Keys.KEY_ESCAPE ) :
break
elif visualizer.CheckSingleKeyPress( loco.sim.Keys.KEY_R ) :
simulation.Reset()
elif visualizer.CheckSingleKeyPress( loco.sim.Keys.KEY_P ) :
simulation.Pause() if simulation.running else simulation.Resume()
elif visualizer.CheckSingleKeyPress( loco.sim.Keys.KEY_SPACE ) :
sphere.AddForceCOM( [ 0.0, 0.0, 1000.0 ] )
elif visualizer.CheckSingleKeyPress( loco.sim.Keys.KEY_UP ) :
sphere.AddForceCOM( [ 0.0, 200.0, 0.0 ] )
elif visualizer.CheckSingleKeyPress( loco.sim.Keys.KEY_DOWN ) :
sphere.AddForceCOM( [ 0.0, -200.0, 0.0 ] )
elif visualizer.CheckSingleKeyPress( loco.sim.Keys.KEY_RIGHT ) :
sphere.AddForceCOM( [ 200.0, 0.0, 0.0 ] )
elif visualizer.CheckSingleKeyPress( loco.sim.Keys.KEY_LEFT ) :
sphere.AddForceCOM( [ -200.0, 0.0, 0.0 ] )
simulation.Step()
visualizer.Update()
runtime.DestroySimulation()
runtime.DestroyVisualizer() | 1.820313 | 2 |
replaybuffer/utils.py | mattbev/replaybuffer | 0 | 12792260 | from typing import Iterable, Tuple
def remove_nones(*arrays: Iterable) -> Tuple[Iterable]:
"""
Take inputted arrays that may contain None values, and
return copies without Nones.
Returns:
tuple[Iterable]: New arrays with only non-None values
"""
return tuple([[i for i in array if i is not None] for array in arrays])
| 3.59375 | 4 |
setup.py | JudahRockLuberto/mlfinder | 0 | 12792261 | # taken from http://python-packaging.readthedocs.io/en/latest/everything.html and modified a little
from setuptools import setup, find_packages
# random values
__version__ = '0.1.0'
# this part taken from https://github.com/dr-guangtou/riker
with open('requirements.txt') as infd:
INSTALL_REQUIRES = [x.strip('\n') for x in infd.readlines()]
# code taken from above
def readme():
with open('README.md') as f:
return f.read()
setup(name='mlfinder',
version=__version__,
description='Find possible microlensing events.',
long_description=readme(),
classifiers=[
'Development Status :: 3 - Alpha',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2.7',
'Topic :: Text Processing :: Linguistic',
],
keywords='astronomy',
url='https://github.com/JudahRockLuberto/mlfinder',
author='<NAME>',
author_email='<EMAIL>',
license='MIT',
packages=find_packages(),
install_requires=INSTALL_REQUIRES,
include_package_data=True,
zip_safe=False,
python_requires='>=3.6')
| 1.65625 | 2 |
enums.py | t-bullock/kassia | 6 | 12792262 | <filename>enums.py
from enum import Enum, auto
class Line(Enum):
RIGHT = auto()
NEXT = auto()
BELOW = auto()
| 2.796875 | 3 |
server.py | nanjakorewa/MK8DRaceRecorder | 1 | 12792263 | from utils import *
import logging
import os
import time
import warnings
import subprocess
from subprocess import PIPE
formatter = '%(levelname)s : %(asctime)s : %(message)s'
logging.basicConfig(filename='mk8d.log', level=logging.INFO, format=formatter)
warnings.simplefilter('ignore')
"""###################################
パラメータ
###################################"""
SC_COMMAND = "screenshot OBS -t OBS -f " # スクリーンショット用コマンド
TEMP_IMG_FILENAME = "temp.png" # キャプチャ結果の保存先
WAIT_SECOND = 0.2 # 処理間の待機時間(秒)
WAITTIME_BEFORE_DELETE = 6 # 画像を消すまでに猶予を持たせる
IS_RACING_CHECK_LENGTH = 4 # xWAIT_SECONDの間レース判定が出ない場合は処理をリセットする
IS_RACING_CHECK_LENGTH_RACEEND = 3 # レース終わり確認
DRAW_LAPLINE = False # ラップの区切りを見せる
def run_server():
frame_num = 0
is_racing_flag_list = [False for _ in range(IS_RACING_CHECK_LENGTH + 1)]
is_racing_now = False
curent_lap = 1
lap_history = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
lap_index = []
coin_history = [0, ]
rank_history = [12, ]
im_before_coin = 0 # 直前の時刻のコイン
while(True):
logging.info("[log] is_racing_now==%s" % is_racing_now)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# OBSのSCを取得
im_before_coin = coin_history[-1]
frame_num += 1
res = subprocess.run("screenshot OBS -t OBS -f temp_raw.png", shell=True, stdout=PIPE, stderr=PIPE, text=True)
time.sleep(WAIT_SECOND)
if not res:
continue
frame_gray = cv2.imread("temp_raw.png", 0)
frame_gray = frame_gray[70:1230:, 90:2180]
frame_gray = cv2.resize(frame_gray, (400, 300))
cv2.imwrite(TEMP_IMG_FILENAME, frame_gray)
# 画像の認識結果をcsvに書き出し
frame_gray = cv2.imread(TEMP_IMG_FILENAME, 0)
ret1, lp = get_lap(frame_gray)
ret2, cn = get_coinnum(frame_gray, im_before_coin)
ret3, rk = get_rank(frame_gray)
is_racing_flag = ret1 and ret2 and ret3
is_racing_flag_list.append(is_racing_flag)
# 現在の状態を更新
lap = lp.replace('.png', '')
coin = cn.replace('.png', '')
rank = rk.replace('.png', '')
# レース判定が降りない場合は手前の時刻の結果を再利用する
if not is_racing_flag:
lap = lap_history[-1]
coin = coin_history[-1]
rank = rank_history[-1]
logging.info("lap:%s coin:%s rank:%s is_racing_flag==%s" % (lap, coin, rank, is_racing_flag))
if lap in LAP_LIST:
lap_number = LAP_LIST.index(lap) + 1
lap_history.append(lap_number)
lap_stat_mode = 1
lap_2_count = lap_history[-6:].count(2)
lap_3_count = lap_history[-6:].count(3)
logging.info("[lap_history] %s " % lap_history[-6:])
if curent_lap == 1 and lap_2_count > 3 and len(lap_history) > 20:
lap_stat_mode = 2
elif curent_lap == 2 and lap_3_count > 4 and len(lap_history) > 40:
lap_stat_mode = 3
# ラップが更新された場合はそのインデックスを記録する
if lap_stat_mode > curent_lap:
curent_lap = lap_stat_mode
lap_index.append((len(lap_history) - 10) / PLOT_WINDOW - 2.0)
else:
curent_lap = 1
if coin in COIN_LIST:
coin_history.append(COIN_LIST.index(coin))
elif is_racing_now:
coin_history.append(coin_history[-1])
else:
coin_history.append(-2)
if rank in RANK_LIST:
rank_history.append(RANK_LIST.index(rank) + 1)
elif is_racing_now:
rank_history.append(rank_history[-1] + 1)
else:
rank_history.append(-2)
# 3週以上は無視
if len(lap_index) > 3:
curent_lap = 3
lap_index = lap_index[:3]
if not is_racing_now and all(np.array(is_racing_flag_list[-IS_RACING_CHECK_LENGTH:]) == 1):
# レースしていない状態から続けてレース判定が降りた場合はレース処理に移行する
is_racing_now = True
curent_lap = 1
lap_history = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
lap_index = []
coin_history = [0, ] + coin_history[-2:]
rank_history = [12, ] + rank_history[-2:]
output_race_status(curent_lap, coin_history, rank_history, is_racing_flag, is_racing_now, lap_index, draw_lapline=DRAW_LAPLINE)
logging.info("レースを開始")
continue
elif is_racing_now and all(np.array(is_racing_flag_list[-IS_RACING_CHECK_LENGTH_RACEEND:]) == 0):
# 指定時間レース判定が起きない場合は一時ファイルをリセットし、レース終了処理に移行する
is_racing_now = False
curent_lap = 1
lap_history = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
lap_index = []
coin_history = [0]
rank_history = [12]
time.sleep(WAITTIME_BEFORE_DELETE) # 画像を消すまでに猶予を持たせる
output_race_status(curent_lap, coin_history, rank_history, is_racing_flag, is_racing_now, lap_index, draw_lapline=DRAW_LAPLINE)
delete_temp_file()
logging.info("レースを終了")
continue
elif not is_racing_flag:
# レースフラグ判定が降りない場合は一旦プロットしない
continue
elif is_racing_now:
# レース中はグラフを出す
output_race_status(curent_lap, coin_history, rank_history, is_racing_flag, is_racing_now, lap_index, draw_lapline=DRAW_LAPLINE)
logging.info("Finish!!!!")
if __name__ == '__main__':
delete_temp_file()
run_server()
| 2.21875 | 2 |
phy/cluster/tests/conftest.py | m-beau/phy | 0 | 12792264 | <gh_stars>0
# -*- coding: utf-8 -*-
"""Test fixtures."""
#------------------------------------------------------------------------------
# Imports
#------------------------------------------------------------------------------
from pytest import fixture
from phy.io.array import (get_closest_clusters,
)
#------------------------------------------------------------------------------
# Fixtures
#------------------------------------------------------------------------------
@fixture
def cluster_ids():
return [0, 1, 2, 10, 11, 20, 30]
# i, g, N, i, g, N, N
@fixture
def cluster_groups():
return {0: 'noise', 1: 'good', 10: 'mua', 11: 'good'}
@fixture
def quality():
def quality(c):
return c
return quality
@fixture
def similarity(cluster_ids):
sim = lambda c, d: (c * 1.01 + d)
def similarity(c):
return get_closest_clusters(c, cluster_ids, sim)
return similarity
| 2.328125 | 2 |
accounts/models.py | akhilmaharana/history-and-bookmark-recommendation-app | 0 | 12792265 | <filename>accounts/models.py
from django.db import models
# Create your models here.
class Contact(models.Model):
firstName = models.CharField(max_length=100)
lastName = models.CharField(max_length=100)
countryName = models.CharField(max_length=100)
subject = models.CharField(max_length=100)
class ContactDetails(models.Model):
firstName = models.CharField(max_length=100)
lastName = models.CharField(max_length=100)
countryName = models.CharField(max_length=100)
subject = models.CharField(max_length=100) | 2.46875 | 2 |
Python/OS Module/Changing_Directories.py | themohitpapneja/Code_Dump | 0 | 12792266 | from os import *
print(getcwd())
print(listdir('D:\\Users\\Mohit\\PycharmProjects\\PythonLab\\venv'))
print("Current Directory is: ",getcwd())
k=input("Enter Directory Where you want to jump: ")
print("Changing Directory.......")
chdir(k)
print("Current Directory is: ",getcwd())
print("Listing )the directories in current directory....\n",listdir(k)) | 3.546875 | 4 |
tests/co_sim_io/python/__init__.py | KratosMultiphysics/CoSimIO | 15 | 12792267 | # this is needed for the python unittest discovery | 1.03125 | 1 |
103_三门问题的验证.py | globien/life_python | 18 | 12792268 | # 作者:西岛闲鱼
# https://github.com/globien/easy-python
# https://gitee.com/globien/easy-python
# 验证三门问题(Monty Hall problem)
import random
获奖次数_不换 = 0 # 不换而获奖的计数器
获奖次数_换 = 0 # 换而获奖的计数器
试验次数 = 100000 # 换和不换各做这么多次实验
for i in range(试验次数): # 不换的实验
door_list = ["A","B","C"] # 三扇门的编号
car = random.choice(door_list) # 汽车随机放在某扇门后
bet = random.choice(door_list) # 挑战者随机选择一扇门
if bet == car: # 不换!直接揭晓答案
获奖次数_不换 = 获奖次数_不换 + 1
for i in range(试验次数): # 换的实验
door_list = ["A","B","C"] # 三扇门的编号
car = random.choice(door_list) # 汽车随机放在某扇门后
bet = random.choice(door_list) # 挑战者随机选择一扇门
# 现在主持人随机选择一扇门予以排除
# 这扇门不是挑战者选择的门,也不是汽车所在的门
host_list = ["A","B","C"]
host_list.remove(bet)
if car in host_list:
host_list.remove(car)
discard = random.choice(host_list)
# 现在挑战者决定换,即换成剩下的一扇门,看看是否获奖
door_list.remove(bet) # 去掉自己已经选过的门
door_list.remove(discard) # 去掉主持人排除的门
bet = door_list[0] # 只剩下一扇门,换成它!
if bet == car: # 换!揭晓答案
获奖次数_换 = 获奖次数_换 + 1
print("不换的获奖概率:", 获奖次数_不换/试验次数)
print("换的获奖概率: ", 获奖次数_换/试验次数)
| 3.90625 | 4 |
freefolks/migrations/0006_auto_20180602_2354.py | sivaprakashniet/blogger | 0 | 12792269 | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-06-03 06:54
from __future__ import unicode_literals
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('freefolks', '0005_auto_20180602_2334'),
]
operations = [
migrations.AddField(
model_name='account',
name='created_date',
field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now),
preserve_default=False,
),
migrations.AddField(
model_name='account',
name='modified_date',
field=models.DateTimeField(auto_now=True),
),
migrations.AddField(
model_name='transaction',
name='created_date',
field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now),
preserve_default=False,
),
migrations.AddField(
model_name='transaction',
name='date_time',
field=models.DateTimeField(blank=True, null=True, verbose_name=b'Transaction date'),
),
migrations.AddField(
model_name='transaction',
name='modified_date',
field=models.DateTimeField(auto_now=True),
),
migrations.AlterField(
model_name='account',
name='bank_name',
field=models.CharField(choices=[('axis', 'Axis Bank'), ('citi', 'Citi Bank'), ('hdfc', 'HDFC Bank')], max_length=50, verbose_name=b'Select your bank'),
),
migrations.AlterField(
model_name='transaction',
name='amount',
field=models.DecimalField(decimal_places=2, default=0.0, max_digits=12, verbose_name=b'Transaction amount'),
),
migrations.AlterField(
model_name='transaction',
name='transaction_type',
field=models.CharField(choices=[('credit', 'Credited to account'), ('debit', 'Debited from account')], max_length=50, verbose_name=b'Transaction type'),
),
]
| 1.757813 | 2 |
tatc/tatc/__init__.py | code-lab-org/tatc | 0 | 12792270 | <reponame>code-lab-org/tatc
from . import analysis
from . import generation
from . import schemas
from . import constants
from . import utils
| 0.855469 | 1 |
project/application/models.py | vicinityh2020/vicinity-vas-energy-monitoring | 0 | 12792271 | from django.db import models
from adapter import models as adapter_models
class SensorUsage(models.Model):
value = models.FloatField()
sensor = models.ForeignKey(adapter_models.Sensor, on_delete=models.CASCADE)
datetime = models.DateTimeField()
@staticmethod
def get_usage_by_day(day, monitors):
return SensorUsage.objects.filter(datetime__day=day.day,
datetime__month=day.month,
datetime__year=day.year,
sensor__monitors=monitors).order_by('datetime')
@staticmethod
def get_usage_by_month(day, monitors):
return SensorUsage.objects.filter(datetime__month=day.month, datetime__year=day.year, sensor__monitors=monitors).order_by('datetime')
def __str__(self):
return self.sensor.element + ' ' + str(self.value) + "@" + self.datetime.isoformat()
class Settings(models.Model):
setting = models.CharField(max_length=20, unique=True)
value = models.CharField(max_length=50, null=True, unique=False)
description = models.CharField(max_length=240, unique=False, null=True)
| 2.265625 | 2 |
pySimpleGUI/cookbook/1a_one_shot_win.py | PitPietro/pythonGUI | 1 | 12792272 | <filename>pySimpleGUI/cookbook/1a_one_shot_win.py
import PySimpleGUI as simpleGUI
'''
https://pysimplegui.readthedocs.io/en/latest/cookbook/#recipe-pattern-1a-one-shot-window-the-simplest-pattern
Recipe - Pattern 1A - "One-shot Window" - (The Simplest Pattern)
The One-shot window is one that pops up, collects some data, and then disappears. It is more or less a 'form' meant to
quickly grab some information and then be closed.
The window is read and then closed. When you "read" a window, you are returned a tuple consisting of an 'event' and a
dictionary of 'values'. The 'event' is what caused the read to return. It could be a button press, some text clicked,
a list item chosen, etc, or 'WIN_CLOSED' if the user closes the window using the X. The 'values' is a dictionary of
values of all the input-style elements. Dictionaries use keys to define entries. If your elements do not specificy a
key, one is provided for you. These auto-numbered keys are ints starting at zero. This design pattern does not
specify a key for the 'InputText' element, so its key will be auto-numbered and is zero in this case. Thus the design
pattern can get the value of whatever was input by referencing 'values[0]'.
'''
def one_shot(my_theme=''):
layout = [
[simpleGUI.Text('My one-shot window.')],
[simpleGUI.InputText()],
[simpleGUI.Submit(), simpleGUI.Cancel()]
]
window = simpleGUI.Window('One Shot Title', layout)
event, values = window.read()
window.close()
text_input = values[0]
simpleGUI.popup('You entered', text_input)
'''
If you want to use your own key instead of an auto-generated once.
'''
def one_shot_key():
layout = [
[simpleGUI.Text('My one-shot windows whit own key')],
[simpleGUI.InputText(key='-IN-')],
[simpleGUI.Submit(), simpleGUI.Cancel()]
]
window = simpleGUI.Window('One Shot Title - key', layout)
event, values = window.read()
window.close()
text_input = values['-IN-']
simpleGUI.popup('You entered', text_input)
if __name__ == '__main__':
one_shot()
one_shot_key()
| 3.5625 | 4 |
game/control.py | JCKing97/Agents4Asteroids | 1 | 12792273 | <reponame>JCKing97/Agents4Asteroids<gh_stars>1-10
import pyglet
import random
from enum import Enum
from math import cos, sin, sqrt
from typing import List, Tuple
from time import time
from apscheduler.schedulers.background import BackgroundScheduler
from game.entities import Asteroid, Particle
from game.agent import Agent, Action
key = pyglet.window.key
class GameState(Enum):
""" Is the game currently running, paused or is it game over. """
INPLAY = 1
PAUSED = 2
OVER = 3
class Game:
""" Handles the interaction between the agents and the environment. Handles the updating of the environment. """
def __init__(self, window, agents: List[Agent]):
"""
Initialise the agents, particles, asteroids (and asteroid creator), state of the game, points and agents.
:param window: The window to create the entities on.
"""
self.window = window
self.agents: List[Agent] = agents
self.particles: List[Particle] = []
self.asteroids: List[Asteroid] = []
self.asteroid_creator = BackgroundScheduler()
self.seconds_between_asteroid_generation = 0.5
self.asteroid_creator.add_job(lambda: self.asteroid_generate(window), 'interval',
seconds=self.seconds_between_asteroid_generation, id='asteroid generator')
self.level = 1
self.state: GameState = GameState.INPLAY
self.window_width: int = window.width
self.window_height: int = window.height
self.points: int = 0
def draw(self):
""" Draws the entities. """
for agent in self.agents:
agent.draw()
for asteroid in self.asteroids:
asteroid.draw()
for particle in self.particles:
particle.draw()
def update(self):
""" Update the state of the entities """
if self.state == GameState.INPLAY:
self.particles, self.asteroids, self.agents, reward = \
self.entity_update(self.window_width, self.window_height, self.particles, self.asteroids, self.agents)
self.points += reward
if not self.agents:
self.game_over()
if self.points / 5 > self.level and self.seconds_between_asteroid_generation > 0.01:
self.level += 1
self.asteroid_creator.remove_all_jobs()
self.seconds_between_asteroid_generation /= 1.25
self.asteroid_creator.add_job(lambda: self.asteroid_generate(self.window), 'interval',
seconds=self.seconds_between_asteroid_generation, id='asteroid generator')
def pause_toggle(self):
""" Sets the game state from INPLAY to PAUSED and vice versa. """
if self.state is GameState.INPLAY:
self.state = GameState.PAUSED
self.asteroid_creator.pause_job('asteroid generator')
else:
self.state = GameState.INPLAY
self.asteroid_creator.resume_job('asteroid generator')
def add_particle(self, particle):
""" Adds a particle to the list of current particles. """
self.particles.append(particle)
def asteroid_generate(self, window):
"""
Creates an asteroid. This also seems like it should be in the entity class. As in the calculations
could be in the Asteroid class and then we just call here asteroid.generate().
"""
if random.randint(0, 1) == 0:
start_x = random.choice([0, window.width])
start_y = random.randint(0, window.height)
if start_x == 0:
velocity_x = random.randint(1, 3)
else:
velocity_x = random.randint(-3, -1)
velocity_y = random.randint(-3, 3)
else:
start_x = random.randint(0, window.width)
start_y = random.choice([0, window.height])
if start_y == 0:
velocity_y = random.randint(1, 3)
else:
velocity_y = random.randint(-3, -1)
velocity_x = random.randint(-3, 3)
self.asteroids.append(Asteroid(start_x, start_y, velocity_x, velocity_y, 15))
def out_of_window(self, asteroid, window_width, window_height):
""" Calculates if an asteroid is visible. """
return (window_height + asteroid.radius < asteroid.centre_y or asteroid.centre_y < -asteroid.radius) or\
(window_width + asteroid.radius < asteroid.centre_x or asteroid.centre_x < -asteroid.radius)
def entity_update(self, window_width, window_height, particles: List[Particle], asteroids: List[Asteroid],
agents: List[Agent]) -> Tuple[List[Particle], List[Asteroid], List[Agent], int]:
""" Updates the game entity objects. This includes the particles, asteroids and the agents ships. """
destroyed_particles = []
preserved_particles = []
preserved_asteroids = []
preserved_agents = agents
reward = 0
for agent in agents:
agent.perceive(agent.get_perception_type()(agent.get_ship(), particles, asteroids, []))
self.enact_decision(agent, agent.decide())
agent.get_ship().update()
for asteroid in asteroids:
for agent in agents:
if self.intersecting_ship(asteroid, agent.get_ship()):
preserved_agents.remove(agent)
destroyed_asteroid = False
if self.out_of_window(asteroid, window_width, window_height):
destroyed_asteroid = True
for particle in particles:
if self.is_inside(particle.centre_x, particle.centre_y, asteroid):
reward += 1
destroyed_asteroid = True
destroyed_particles.append(particle)
if not destroyed_asteroid:
preserved_asteroids.append(asteroid)
asteroid.update()
for particle in particles:
if particle not in destroyed_particles and\
0 < particle.centre_x < window_width and 0 < particle.centre_y < window_height:
particle.update()
preserved_particles.append(particle)
return preserved_particles, preserved_asteroids, preserved_agents, reward
def enact_decision(self, agent: Agent, decision: Action):
"""
Enact the decisions made by the agent in the order they are given.
:param agent: The agent that is carrying out the action
:param decision: The action to enact.
"""
agent_ship = agent.get_ship()
if decision is Action.TURNRIGHT:
agent_ship.turn_right()
elif decision is Action.TURNLEFT:
agent_ship.turn_left()
elif decision is Action.STOPTURN:
agent_ship.stop_turn()
elif decision is Action.BOOST:
agent_ship.boost()
elif decision is Action.STOPBOOST:
agent_ship.stop_boost()
elif decision is Action.FIRE:
cannon_fire = agent_ship.fire()
if cannon_fire is not None:
self.particles.append(cannon_fire)
def intersecting_ship(self, asteroid, ship):
""" Calculates the collision detection between the ship and asteroids. """
# Detection adapted from http://www.phatcode.net/articles.php?id=459
v1x = int(ship.centre_x + (2 * ship.height * cos(ship.facing)))
v1y = int(ship.centre_y + (2 * ship.height * sin(ship.facing)))
v2x = int(ship.centre_x + (ship.height * cos(ship.facing + 140)))
v2y = int(ship.centre_y + (ship.height * sin(ship.facing + 140)))
v3x = int(ship.centre_x + (ship.height * cos(ship.facing - 140)))
v3y = int(ship.centre_y + (ship.height * sin(ship.facing - 140)))
# Check if the vertices of the ship are intersecting the asteroid
if self.is_inside(v1x, v1y, asteroid) or\
self.is_inside(v2x, v2y, asteroid) or\
self.is_inside(v3x, v3y, asteroid):
return True
# Check if circle center inside the ship
if ((v2y - v1y)*(asteroid.centre_x - v1x) - (v2x - v1x)*(asteroid.centre_y - v1y)) >= 0 and \
((v3y - v2y)*(asteroid.centre_x - v2x) - (v3x - v2x)*(asteroid.centre_y - v2y)) >= 0 and \
((v1y - v3y)*(asteroid.centre_x - v3x) - (v1x - v3x)*(asteroid.centre_y - v3x)) >= 0:
return True
# Check if edges intersect circle
# First edge
c1x = asteroid.centre_x - v1x
c1y = asteroid.centre_y - v1y
e1x = v2x - v1x
e1y = v2y - v1y
k = c1x * e1x + c1y * e1y
if k > 0:
length = sqrt(e1x * e1x + e1y * e1y)
k = k / length
if k < length:
if sqrt(c1x * c1x + c1y * c1y - k * k) <= asteroid.radius:
return True
# Second edge
c2x = asteroid.centre_x - v2x
c2y = asteroid.centre_y - v2y
e2x = v3x - v2x
e2y = v3y - v2y
k = c2x * e2x + c2y * e2y
if k > 0:
length = sqrt(e2x * e2x + e2y * e2y)
k = k / length
if k < length:
if sqrt(c2x * c2x + c2y * c2y - k * k) <= asteroid.radius:
return True
# Third edge
c3x = asteroid.centre_x - v3x
c3y = asteroid.centre_y - v3y
e3x = v1x - v3x
e3y = v1y - v3y
k = c3x * e3x + c3y * e3y
if k > 0:
length = sqrt(e3x * e3x + e3y * e3y)
k = k / length
if k < length:
if sqrt(c3x * c3x + c3y * c3y - k * k) <= asteroid.radius:
return True
return False
def is_inside(self, x, y, circle):
if ((x - circle.centre_x) * (x - circle.centre_x) + (y - circle.centre_y) * (y - circle.centre_y)
<= circle.radius * circle.radius):
return True
else:
return False
def start(self):
""" Run the game. """
self.asteroid_creator.start()
def game_over(self):
""" The end of the game when the player dies. """
self.asteroid_creator.pause()
self.state = GameState.OVER
def on_key_press(self, symbol, modifiers):
"""
On key presses update the actions of the user agents.
:param symbol: The key pressed.
:param modifiers: ?
"""
for agent in self.agents:
agent.on_key_press(symbol, modifiers)
def on_key_release(self, symbol, modifiers):
"""
On key release update the actions of the user agents.
:param symbol: The key release.
:param modifiers: ?
"""
for agent in self.agents:
agent.on_key_release(symbol, modifiers)
| 2.765625 | 3 |
ESI_request.py | nicoscha/PESI | 0 | 12792274 | import requests
from json import loads
def _args_to_params(kwargs):
"""
Creates a tuple of keyword, value tuples and changes parameter names for ESI
:param kwargs:
:return: ((parameter, value), (parameter, value), ...)
:rtype: tuple
"""
params = ()
for parameter, value in kwargs.items():
if value is None:
continue
if parameter == 'if_none_match':
parameter = 'If-None-Match'
if parameter == 'accept_language':
parameter = 'Accept-Language'
params = (*params, (parameter, value))
return params
def request(data_source, version, HTTP_method, path, proxies=None, **kwargs):
"""
Requests and processes ESI json file
:param data_source: ['tranquility', 'singularity']
:param version: ESI version ['dev', 'latest', 'legacy', 'v1', 'v2', ...]
:param HTTP_method: ['GET', 'POST', 'PUT', 'DELETE', ...]
:param path: endpoint
:param proxies: Dictionary mapping protocol to the URL of the proxy
:param kwargs: parameters for the endpoint
:return: json.load(requests.get().text)
:rtype: dict
"""
headers = {'accept': 'application/json'}
params = _args_to_params(kwargs)
response = requests.request(HTTP_method,
f'https://esi.evetech.net/{version}{path}',
headers=headers, params=params,
proxies=proxies)
return loads(response.text)
| 2.953125 | 3 |
gloomhaven/models/scenario.py | Softyy/gloomhaven-campaign-manager | 0 | 12792275 | <reponame>Softyy/gloomhaven-campaign-manager<filename>gloomhaven/models/scenario.py
from dash_html_components import P
from .scenario_event import ScenarioEvent
class Scenario():
def __init__(self, id, title, requirements=[], anti_requirements=[], party_achievements=[], global_achievements=[], new_locations=[], subset_of_locations=False, conditional_achievements=None, alt_requirements=[], lost_achievements=[], personal_requirements=None, scenario_type: str = 'main', introduction: str = "", treasures: [int] = [], conclusion: str = "", goal: str = "Kill all enemies", board_square: str = "A-1", tiles: [str] = ["A1a", "A2b"], event_1: ScenarioEvent = {}, rewards: [str] = [], special_rules: str = "", event_2: ScenarioEvent = {}, event_3: ScenarioEvent = {}, boss_special_1: str = "", boss_special_2: str = ""):
self.id = id
self.title = title
self.requirements = requirements
self.anti_requirements = anti_requirements
self.party_achievements = party_achievements
self.global_achievements = global_achievements
self.new_locations = new_locations
self.subset_of_locations = subset_of_locations
self.conditional_achievements = conditional_achievements
self.alt_requirements = alt_requirements
self.lost_achievements = lost_achievements
self.personal_requirements = personal_requirements
self.scenario_type = scenario_type
self.introduction = introduction
self.treasures = treasures
self.conclusion = conclusion
self.goal = goal
self.tiles = tiles
self.board_square = board_square
self.event_1 = ScenarioEvent(**event_1)
self.event_2 = ScenarioEvent(**event_2)
self.event_3 = ScenarioEvent(**event_3)
self.rewards = rewards
self.special_rules = special_rules
self.boss_special_1 = boss_special_1
self.boss_special_2 = boss_special_2
def requirements_to_html(self):
requirements = self.text_and_cond_to_html(
self.requirements, "Complete")
anti_requirements = self.text_and_cond_to_html(
self.anti_requirements, "Incomplete")
alt_requirements = self.text_and_cond_to_html(
self.alt_requirements, "Complete")
return requirements + anti_requirements + ([P("Or")] + alt_requirements if len(alt_requirements) > 0 else [])
@staticmethod
def text_and_cond_to_html(requirements: str, cond: str):
return [P(f'{requirement} ({cond})') for requirement in requirements]
def __repr__(self):
return f'{self.id}-{self.title}'
def get_event(self, id: int):
if (id == 1):
return self.event_1
elif (id == 2):
return self.event_2
elif (id == 3):
return self.event_3
else:
return ScenarioEvent()
def get_next_event(self, id: int):
return self.get_event(id+1)
| 2.15625 | 2 |
LeetCode/Python3/Math/29. Divide Two Integers.py | WatsonWangZh/CodingPractice | 11 | 12792276 | <reponame>WatsonWangZh/CodingPractice<filename>LeetCode/Python3/Math/29. Divide Two Integers.py
# Given two integers dividend and divisor, divide two integers
# without using multiplication, division and mod operator.
# Return the quotient after dividing dividend by divisor.
# The integer division should truncate toward zero.
# Example 1:
# Input: dividend = 10, divisor = 3
# Output: 3
# Example 2:
# Input: dividend = 7, divisor = -3
# Output: -2
# Note:
# Both dividend and divisor will be 32-bit signed integers.
# The divisor will never be 0.
# Assume we are dealing with an environment
# which could only store integers within the 32-bit signed integer range:
# [−231, 231 − 1]. For the purpose of this problem, assume that
# your function returns 231 − 1 when the division result overflows.
class Solution:
def divide(self, dividend: int, divisor: int) -> int:
# 先把符号抽取出来,只考虑两个正数相除的情况。除法其实就是被减数不断减去减数的操作,
# 但直接不断进行减法会超时,应尽量减去大的数字,通过位移操作来快速找到
# 比被减数小一些的减数的倍数current。不断减去且缩小current。
# 溢出只可能是向上溢出,通过min操作进行过滤。
MAX_INT = 2147483647
sign = 1
if dividend >= 0 and divisor < 0 or dividend <= 0 and divisor > 0:
sign = -1
dividend = abs(dividend)
divisor = abs(divisor)
result = 0
current = divisor
currentResult = 1
while current <= dividend:
current <<= 1
currentResult <<= 1
while divisor <= dividend:
current >>= 1
currentResult >>= 1
if current <= dividend:
dividend -= current
result += currentResult
return min(sign * result, MAX_INT)
| 3.921875 | 4 |
_scripts/Iris.py | reyannlarkey/reyannlarkey.github.io | 1 | 12792277 | ''' Present an interactive function explorer with slider widgets.
Scrub the sliders to change the properties of the ``sin`` curve, or
type into the title text box to update the title of the plot.
Use the ``bokeh serve`` command to run the example by executing:
bokeh serve sliders.py
at your command prompt. Then navigate to the URL
http://localhost:5006/sliders
in your browser.
'''
import numpy as np
from bokeh.io import curdoc
from bokeh.layouts import column, row
from bokeh.models import ColumnDataSource, Slider, TextInput
from bokeh.plotting import figure, output_file,show
from sklearn import datasets
import hdbscan
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
from sklearn.datasets import make_blobs
### SET UP THE DATA ###
n_samples = 1500
random_state = 170
# Dataset #1
X, y = make_blobs(n_samples=n_samples, random_state=random_state)
X1, Y1 = X[:,0], X[:,1]
# Dataset #2
transformation = [[0.60834549, -0.63667341], [-0.40887718, 0.85253229]]
X_aniso = np.dot(X, transformation)
X2, Y2 = X_aniso[:,0], X_aniso[:,1]
# Dataset #3
X_varied, y_varied = make_blobs(n_samples=n_samples,
cluster_std=[1.0, 2.5, 0.5],
random_state=random_state)
X3, Y3 = X_varied[:,0], X_varied[:,1]
# Dataset #4
X_filtered = np.vstack((X[y == 0][:500], X[y == 1][:100], X[y == 2][:10]))
X4, Y4 = X_filtered[:,0], X_filtered[:,1]
source1 = ColumnDataSource(data=dict(X=X1, Y=Y1))
source2 = ColumnDataSource(data=dict(X=X2, Y=Y2))
source3 = ColumnDataSource(data=dict(X=X3, Y=Y3))
source4 = ColumnDataSource(data=dict(X=X4, Y=Y4))
print(source1, source2, source3, source4)
### Set up Plot
plot = figure(plot_height=400, plot_width=400, title="Clusters",
tools="crosshair,pan,reset,save,wheel_zoom",
x_range=[0, 4*np.pi], y_range=[-2.5, 2.5])
plot.scatter('X', 'Y', source=source1)#, line_width=3, line_alpha=0.6)
show(plot)
output_file('clustering.html')
'''
# Set up data
N = 200
x = np.linspace(0, 4*np.pi, N)
y = np.sin(x)
source = ColumnDataSource(data=dict(x=x, y=y))
# Set up plot
plot = figure(plot_height=400, plot_width=400, title="my sine wave",
tools="crosshair,pan,reset,save,wheel_zoom",
x_range=[0, 4*np.pi], y_range=[-2.5, 2.5])
plot.line('x', 'y', source=source, line_width=3, line_alpha=0.6)
# Set up widgets
text = TextInput(title="title", value='my sine wave')
offset = Slider(title="offset", value=0.0, start=-5.0, end=5.0, step=0.1)
amplitude = Slider(title="amplitude", value=1.0, start=-5.0, end=5.0, step=0.1)
phase = Slider(title="phase", value=0.0, start=0.0, end=2*np.pi)
freq = Slider(title="frequency", value=1.0, start=0.1, end=5.1, step=0.1)
# Set up callbacks
def update_title(attrname, old, new):
plot.title.text = text.value
text.on_change('value', update_title)
def update_data(attrname, old, new):
# Get the current slider values
a = amplitude.value
b = offset.value
w = phase.value
k = freq.value
# Generate the new curve
x = np.linspace(0, 4*np.pi, N)
y = a*np.sin(k*x + w) + b
source.data = dict(x=x, y=y)
for w in [offset, amplitude, phase, freq]:
w.on_change('value', update_data)
# Set up layouts and add to document
inputs = column(text, offset, amplitude, phase, freq)
curdoc().add_root(row(inputs, plot, width=800))
curdoc().title = "Sliders"
'''
| 3.0625 | 3 |
tesseract_recognize_api.py | mauvilsa/tesseract-recognize | 34 | 12792278 | #!/usr/bin/env python3
"""Command line tool for the tesseract-recognize API server."""
"""
@version $Version: 2020.01.13$
@author <NAME> <<EMAIL>>
@copyright Copyright(c) 2017-present, <NAME> <<EMAIL>>
@requirements https://github.com/omni-us/pagexml/releases/download/2019.10.10/pagexml-2019.10.10-cp36-cp36m-linux_x86_64.whl
@requirements jsonargparse>=2.20.0
@requirements flask-restplus>=0.12.1
@requirements prance>=0.15.0
"""
import os
import re
import sys
import json
import shutil
import queue
import threading
import tempfile
import pagexml
pagexml.set_omnius_schema()
from time import time
from functools import wraps
from subprocess import Popen, PIPE, STDOUT
from jsonargparse import ArgumentParser, ActionConfigFile, ActionYesNo
from flask import Flask, Response, request, abort
from flask_restplus import Api, Resource, reqparse
from werkzeug.datastructures import FileStorage
from werkzeug.exceptions import BadRequest
from prance.util import url
from prance.convert import convert_url
def get_cli_parser(logger=True):
"""Returns the parser object for the command line tool."""
parser = ArgumentParser(
error_handler='usage_and_exit_error_handler',
logger=logger,
default_env=True,
description=__doc__)
parser.add_argument('--cfg',
action=ActionConfigFile,
help='Path to a yaml configuration file.')
parser.add_argument('--threads',
type=int,
default=4,
help='Maximum number of tesseract-recognize instances to run in parallel.')
parser.add_argument('--prefix',
default='/tesseract-recognize',
help='Prefix string for all API endpoints. Use "%%s" in string to replace by the API version.')
parser.add_argument('--host',
default='127.0.0.1',
help='Hostname to listen on.')
parser.add_argument('--port',
type=int,
default=5000,
help='Port for the server.')
parser.add_argument('--debug',
action=ActionYesNo,
default=False,
help='Whether to run in debugging mode.')
return parser
def TypePageXML(value):
"""Parse Page XML request type.
Args:
value: The raw type value.
Returns:
dict[str, {str,PageXML}]: Dictionary including the page xml 'filename', the 'string' representation and the PageXML 'object'.
"""
if type(value) != FileStorage:
raise ValueError('Expected pagexml to be of type FileStorage.')
spxml = value.read().decode('utf-8')
pxml = pagexml.PageXML()
pxml.loadXmlString(spxml)
return {'filename': value.filename, 'object': pxml, 'string': spxml}
class ParserPageXML(reqparse.RequestParser):
"""Class for parsing requests including a Page XML."""
def parse_args(self, **kwargs):
"""Extension of parse_args that additionally does some Page XML checks."""
req_dict = super().parse_args(**kwargs)
if req_dict['pagexml'] is not None and req_dict['images'] is not None:
pxml = req_dict['pagexml']['object']
images_xml = set()
for page in pxml.select('//_:Page'):
fname = re.sub(r'\[[0-9]+]$', '', pxml.getAttr(page, 'imageFilename'))
images_xml.add(fname)
images_received = [os.path.basename(x.filename) for x in req_dict['images']]
for fname in images_received:
if fname not in images_xml:
raise BadRequest('Received image not referenced in the Page XML: '+fname)
if len(images_xml) != len(images_received):
raise BadRequest('Expected to receive all images referenced in the Page XML ('+str(len(images_xml))+') but only got a subset ('+str(len(images_received))+')')
return req_dict
def write_to_tmpdir(req_dict, prefix='tesseract_recognize_api_tmp_', basedir='/tmp'):
"""Writes images and page xml from a request to a temporal directory.
Args:
req_dict (dict): Parsed Page XML request.
prefix (str): Prefix for temporal directory name.
basedir (str): Base temporal directory.
Returns:
The path to the temporal directory where saved.
"""
tmpdir = tempfile.mkdtemp(prefix=prefix, dir=basedir)
if req_dict['pagexml'] is not None:
fxml = os.path.basename(req_dict['pagexml']['filename'])
with open(os.path.join(tmpdir, fxml), 'w') as f:
f.write(req_dict['pagexml']['string'])
if req_dict['images'] is not None:
for image in req_dict['images']:
image.save(os.path.join(tmpdir, os.path.basename(image.filename)))
return tmpdir
class images_pagexml_request:
"""Decorator class for endpoints receiving images with optionally a page xml and responding with a page xml."""
def __init__(self,
api,
images_help='Images with file names as referenced in the Page XML if given.',
pagexml_help='Optional valid Page XML file.',
options_help='Optional configuration options to be used for processing.',
response_help='Resulting Page XML after processing.'):
"""Initializer for images_pagexml_request class.
Args:
api (flask_restplus.Api): The flask_restplus Api instance.
images_help (str): Help for images field in swagger documentation.
pagexml_help (str): Help for pagexml field in swagger documentation.
options_help (str): Help for config field in swagger documentation.
response_help (str): Help for pagexml response in swagger documentation.
"""
self.api = api
self.response_help = response_help
parser = ParserPageXML(bundle_errors=True)
parser.add_argument('images',
location='files',
type=FileStorage,
required=True,
action='append',
help=images_help)
parser.add_argument('pagexml',
location='files',
type=TypePageXML,
required=False,
help=pagexml_help)
parser.add_argument('options',
location='form',
type=str,
required=False,
default=[],
action='append',
help=options_help)
self.parser = parser
def __call__(self, method):
"""Makes a flask_restplus.Resource method expect a page xml and/or respond with a page xml."""
method = self.api.expect(self.parser)(method)
method = self.api.response(200, description=self.response_help)(method)
method = self.api.produces(['application/xml'])(method)
@wraps(method)
def images_pagexml_request_wrapper(func):
req_dict = self.parser.parse_args()
pxml = method(func, req_dict)
return Response(
pxml.toString(True),
mimetype='application/xml',
headers={'Content-type': 'application/xml; charset=utf-8'})
return images_pagexml_request_wrapper
def run_tesseract_recognize(*args):
"""Runs a tesseract-recognize command using given arguments."""
cmd = ['tesseract-recognize']
cmd.extend(list(args))
proc = Popen(cmd, shell=False, stdin=PIPE, stdout=PIPE, stderr=STDOUT, close_fds=True)
cmd_out = proc.stdout.read().decode("utf-8")
proc.communicate()
cmd_rc = proc.returncode
return cmd_rc, cmd_out
if __name__ == '__main__':
## Parse config ##
parser = get_cli_parser(logger=os.path.basename(__file__))
cfg = parser.parse_args(env=True)
## Create a Flask WSGI application ##
app = Flask(__name__) # pylint: disable=invalid-name
app.logger = parser.logger
## Create a Flask-RESTPlus API ##
api = Api(app,
doc=cfg.prefix+'/swagger',
version='2.0',
prefix=cfg.prefix,
title='tesseract-recognize API',
description='An API for running tesseract-recognition jobs.')
sys.modules['flask.cli'].show_server_banner = lambda *x: None # type: ignore
## Definition of endpoints ##
@api.route('/openapi.json')
class OpenAPI(Resource):
def get(self):
"""Endpoint to get the OpenAPI json."""
absurl = url.absurl(request.base_url.replace(request.path, cfg.prefix+'/swagger.json'))
content, _ = convert_url(absurl)
return json.loads(content)
@api.route('/version')
class ServiceVersion(Resource):
@api.response(200, description='Version of the running service.')
@api.produces(['text/plain'])
def get(self):
"""Endpoint to get the version of the running service."""
rc, out = run_tesseract_recognize('--version')
if rc != 0:
abort(500, 'problems getting version from tesseract-recognize command :: '+str(out))
return Response(out, mimetype='text/plain')
@api.route('/help')
class ServiceHelp(Resource):
@api.response(200, description='Help for the running service.')
@api.produces(['text/plain'])
def get(self):
"""Endpoint to get the help for the running service."""
rc, out = run_tesseract_recognize('--help')
if rc != 0:
abort(500, 'problems getting help from tesseract-recognize command :: '+str(out))
return Response(out, mimetype='text/plain')
num_requests = 0
@api.route('/process')
class ProcessRequest(Resource):
@images_pagexml_request(api)
@api.doc(responses={400: 'tesseract-recognize execution failed.'})
def post(self, req_dict):
"""Endpoint for running tesseract-recognize on given images or page xml file."""
start_time = time()
done_queue = queue.Queue()
process_queue.put((done_queue, req_dict))
while True:
try:
thread, num_requests, pxml = done_queue.get(True, 0.05)
break
except queue.Empty:
continue
if isinstance(pxml, Exception):
app.logger.error('Request '+str(num_requests)+' on thread '+str(thread)+' unsuccessful, '
+('%.4g' % (time()-start_time))+' sec. :: '+str(pxml))
abort(400, 'processing failed :: '+str(pxml))
else:
app.logger.info('Request '+str(num_requests)+' on thread '+str(thread)+' successful, '
+('%.4g' % (time()-start_time))+' sec.')
return pxml
process_queue = queue.Queue() # type: ignore
## Processor thread function ##
def start_processing(thread, process_queue):
num_requests = 0
tmpdir = None
while True:
try:
done_queue, req_dict = process_queue.get(True, 0.05)
num_requests += 1
tmpdir = write_to_tmpdir(req_dict)
opts = list(req_dict['options'])
if len(opts) == 1 and opts[0][0] == '[':
opts = json.loads(opts[0])
if req_dict['pagexml'] is not None:
opts.append(os.path.join(tmpdir, os.path.basename(req_dict['pagexml']['filename'])))
elif req_dict['images'] is not None:
for image in req_dict['images']:
opts.append(os.path.join(tmpdir, os.path.basename(image.filename)))
else:
raise KeyError('No images found in request.')
opts.extend(['-o', os.path.join(tmpdir, 'output.xml')])
rc, out = run_tesseract_recognize(*opts)
if rc != 0:
raise RuntimeError('tesseract-recognize execution failed :: opts: '+str(opts)+' :: '+str(out))
pxml = pagexml.PageXML(os.path.join(tmpdir, 'output.xml'))
done_queue.put((thread, num_requests, pxml))
except queue.Empty:
continue
except json.decoder.JSONDecodeError as ex:
done_queue.put((thread, num_requests, RuntimeError('JSONDecodeError: '+str(ex)+' while parsing '+opts[0])))
except Exception as ex:
done_queue.put((thread, num_requests, ex))
finally:
if not cfg.debug and tmpdir is not None:
shutil.rmtree(tmpdir)
tmpdir = None
for thread in range(cfg.threads):
threading.Thread(target=start_processing, args=(thread+1, process_queue)).start()
app.run(host=cfg.host, port=cfg.port, debug=cfg.debug)
| 1.914063 | 2 |
models/entity.py | sebastian-quintero/school-bus-router | 2 | 12792279 | <filename>models/entity.py
from typing import Dict, Any
from models.location import Location
class Entity:
"""Class that represents an abstract Entity with standard methods"""
@classmethod
def from_dict(cls, entity_dict: Dict[str, Any]):
"""Method to instantiate an Entity from a Dict (JSON)"""
entity_attributes = cls.__dataclass_fields__.keys()
attributes_dict = {
k: entity_dict[k]
for k in entity_attributes
if k != 'location'
}
if 'location' in entity_attributes:
location = Location(lat=entity_dict['lat'], lng=entity_dict['lng'])
return cls(**{**attributes_dict, **{'location': location}})
return cls(**attributes_dict)
| 3.265625 | 3 |
AIO/invasion/test.py | eddiegz/Personal-C | 3 | 12792280 | <filename>AIO/invasion/test.py<gh_stars>1-10
infile=open('invin.txt','r').readlines()
r,c=map(int,infile[0].split())
line,name=infile[1:],{}
for i in range(r):
for j in range(c-1):
ce1,ce2=line[i][j],line[i][j+1]
if ce1!=ce2:
if ce1 not in name:name[ce1]=[ce2]
if ce2 not in name:name[ce2]=[ce1]
if ce1 not in name[ce2]:name[ce2].append(ce1)
if ce2 not in name[ce1]:name[ce1].append(ce2)
for j in range(c):
for i in range(r-1):
ce1,ce2=line[i][j],line[i+1][j]
if ce1!=ce2:
if ce1 not in name:name[ce1]=[ce2]
if ce2 not in name:name[ce2]=[ce1]
if ce1 not in name[ce2]:name[ce2].append(ce1)
if ce2 not in name[ce1]:name[ce1].append(ce2)
answer=0
for i in name:
answer=max(answer,len(name[i]))
open('invout.txt','w').write(str(answer)) | 2.578125 | 3 |
pong_game.py | LouisPlisso/lfi | 0 | 12792281 | <filename>pong_game.py
# Simple pong game - don't let the ball hit the bottom!
# KidsCanCode - Intro to Programming
from tkinter import *
import random
import time
# Define ball properties and functions
class Ball:
def __init__(self, canvas, color, size, paddle):
self.canvas = canvas
self.paddle = paddle
self.id = canvas.create_oval(10, 10, size, size, fill=color)
self.canvas.move(self.id, 245, 100)
self.xspeed = random.randrange(-3,3)
self.yspeed = -1
self.hit_bottom = False
self.score = 0
def draw(self):
self.canvas.move(self.id, self.xspeed, self.yspeed)
pos = self.canvas.coords(self.id)
if pos[1] <= 0:
self.yspeed = 3
if pos[3] >= 400:
self.hit_bottom = True
if pos[0] <= 0:
self.xspeed = 3
if pos[2] >= 500:
self.xspeed = -3
if self.hit_paddle(pos) == True:
self.yspeed = -3
self.xspeed = random.randrange(-3,3)
self.score += 1
def hit_paddle(self, pos):
paddle_pos = self.canvas.coords(self.paddle.id)
if pos[2] >= paddle_pos[0] and pos[0] <= paddle_pos[2]:
if pos[3] >= paddle_pos[1] and pos[3] <= paddle_pos[3]:
return True
return False
# Define paddle properties and functions
class Paddle:
def __init__(self, canvas, color):
self.canvas = canvas
self.id = canvas.create_rectangle(0,0, 100, 10, fill=color)
self.canvas.move(self.id, 200, 300)
self.xspeed = 0
self.speed_factor = 1
self.canvas.bind_all('<KeyPress-Left>', self.move_left)
self.canvas.bind_all('<KeyPress-Right>', self.move_right)
self.canvas.bind_all('<KeyPress-Down>', self.speed_down)
self.canvas.bind_all('<KeyPress-Up>', self.speed_up)
self.canvas.bind_all('<KeyPress-space>', self.stop)
def draw(self):
self.canvas.move(self.id, self.xspeed, 0)
pos = self.canvas.coords(self.id)
if pos[0] <= 0:
self.xspeed = 0
if pos[2] >= 500:
self.xspeed = 0
def move_left(self, evt):
self.xspeed = -2 * self.speed_factor
def move_right(self, evt):
self.xspeed = 0 * self.speed_factor
def stop(self, evt):
# pass just does nothing
pass
def speed_up(self, evt):
self.speed_factor *= 1.2
def speed_down(self, evt):
self.speed_factor /= 1.2
def main():
# Create window and canvas to draw on
tk = Tk()
tk.title("Ball Game")
canvas = Canvas(tk, width=500, height=400, bd=0, bg='papaya whip')
canvas.pack()
label = canvas.create_text(5, 5, anchor=NW, text="Score: 0")
tk.update()
paddle = Paddle(canvas, 'blue')
ball = Ball(canvas, 'red', 25, paddle)
input('hit any key to start')
# Animation loop
while ball.hit_bottom == False:
ball.draw()
paddle.draw()
canvas.itemconfig(label, text="Score: "+str(ball.score))
tk.update_idletasks()
tk.update()
time.sleep(0.01)
# Game Over
go_label = canvas.create_text(250,200,text="GAME OVER",font=("Helvetica",30))
tk.update()
if __name__ == '__main__':
main()
| 4.25 | 4 |
client/tests/communication/on_board_test.py | beehive-lab/DFLOW | 1 | 12792282 | import unittest
from unittest.mock import (
patch,
MagicMock
)
from client.communication.messages import MessageCommand
from client.communication.on_board import OnBoard, IncomingMessageHandler
class OnBoardTest(unittest.TestCase):
"""
A suite of tests surrounding the OnBoard class functionality.
"""
def setUp(self) -> None:
self._mock_message_handler = patch(
'client.communication.on_board.IncomingMessageHandler'
).start()
self.addCleanup(patch.stopall)
@patch('client.communication.on_board.build_command_message_with_args')
def test_start_streaming_data(self, mock_build_message):
"""
Test the start streaming data send message.
"""
mock_message = 'mock_message'
mock_build_message.return_value = mock_message
mock_comm_link = MagicMock()
mock_comm_link.send = MagicMock()
on_board = OnBoard(mock_comm_link)
on_board.start_streaming_sensor_data(['key1', 'key2', 'key3'])
mock_build_message.assert_called_with(
MessageCommand.STREAM_BIKE_SENSOR_DATA,
['start', 'key1', 'key2', 'key3']
)
mock_comm_link.send.assert_called_with(mock_message.encode())
@patch('client.communication.on_board.build_command_message_with_args')
def test_stop_streaming_data(self, mock_build_message):
"""
Test the start streaming data send message.
"""
mock_message = 'mock_message'
mock_build_message.return_value = mock_message
mock_comm_link = MagicMock()
mock_comm_link.send = MagicMock()
on_board = OnBoard(mock_comm_link)
on_board.stop_streaming_sensor_data(['key1', 'key2', 'key3'])
mock_build_message.assert_called_with(
MessageCommand.STREAM_BIKE_SENSOR_DATA,
['stop', 'key1', 'key2', 'key3']
)
mock_comm_link.send.assert_called_with(mock_message.encode())
def test_incoming_stream_bike_sensor_data_msg(self):
"""
Test incoming data from the bike is dealt with properly.
"""
# Create some test sensor data messages.
test_sensor_data_messages = [
b'stream-bike-sensor-data:AIR_TEMPERATURE:30:TIMESTAMP1',
b'stream-bike-sensor-data:AIR_TEMPERATURE:20:TIMESTAMP2',
(
b'stream-bike-sensor-data:'
b'AIR_TEMPERATURE:10:TIMESTAMP3:'
b'TYRE_PRESSURE_REAR:5:TIMESTAMP4:'
b'BRAKE_FRONT_ACTIVE:50:TIMESTAMP5'
),
b'stream-bike-sensor-data:AIR_TEMPERATURE:0:TIMESTAMP6',
]
# Create OnBoard object.
mock_comm_link = MagicMock()
on_board = OnBoard(mock_comm_link)
# Simulate handle_incoming_message call for the test messages.
for message in test_sensor_data_messages:
on_board.handle_incoming_message(message)
# Verify all messages are handled correctly.
expected_data = {
'AIR_TEMPERATURE': [
(b'TIMESTAMP1', b'30'),
(b'TIMESTAMP2', b'20'),
(b'TIMESTAMP3', b'10'),
(b'TIMESTAMP6', b'0')
],
'TYRE_PRESSURE_REAR': [
(b'TIMESTAMP4', b'5')
],
'BRAKE_FRONT_ACTIVE': [
(b'TIMESTAMP5', b'50')
],
}
self.assertListEqual(
expected_data['AIR_TEMPERATURE'],
on_board.get_recorded_sensor_data('AIR_TEMPERATURE')
)
self.assertListEqual(
expected_data['TYRE_PRESSURE_REAR'],
on_board.get_recorded_sensor_data('TYRE_PRESSURE_REAR')
)
self.assertListEqual(
expected_data['BRAKE_FRONT_ACTIVE'],
on_board.get_recorded_sensor_data('BRAKE_FRONT_ACTIVE')
)
class IncomingMessageHandlerTest(unittest.TestCase):
"""
A suite of tests surrounding the IncomingMessageHandler class
functionality.
"""
def test_incoming_msg_handled(self):
"""
Test incoming message is handled correctly.
"""
test_msg = 'test:message'
mock_comm_link = MagicMock()
mock_comm_link.receive.side_effect = [test_msg]
mock_on_board = MagicMock()
msg_handler = IncomingMessageHandler(mock_on_board, mock_comm_link)
# Run the thread in a try catch and ignore StopIteration errors. It
# will error because it will exhaust the side_effect list and cause
# a StopIteration error. This is a messy solution but it works to
# test for now.
try:
msg_handler.run()
except StopIteration:
pass
mock_on_board.handle_incoming_message.assert_called_with(test_msg)
| 3.03125 | 3 |
lib/forms.py | plastr/extrasolar-game | 0 | 12792283 | # Copyright (c) 2010-2011 Lazy 8 Studios, LLC.
# All rights reserved.
# Contains utilities for working with HTML form data.
def fetch(request, fields, blanks=[]):
""" Extremely rudimentary validation simply checks whether the
fields are present and non-empty in the POST parameters. """
values = {}
ok = True
for field in fields:
val = request.POST.get(field, '')
if val == '' and field not in blanks:
ok = False
values[field] = val
return ok, values
| 2.5 | 2 |
cloudinitd/exceptions.py | buzztroll/cloudinit.d | 0 | 12792284 | <gh_stars>0
import traceback
import sys
import os
class CloudInitDException(Exception):
def __init__(self, ex):
self._base_ex = ex
exc_type, exc_value, exc_traceback = sys.exc_info()
self._base_stack = traceback.format_tb(exc_traceback)
def __str__(self):
return str(self._base_ex)
def get_stack(self):
return str(self._base_stack)
class APIUsageException(Exception):
def __init__(self, msg):
Exception.__init__(self, msg)
class TimeoutException(Exception):
def __init__(self, msg):
Exception.__init__(self, msg)
class IaaSException(CloudInitDException):
def __init__(self, msg):
CloudInitDException.__init__(self, msg)
self.msg = msg
def __str__(self):
return str(self.msg)
class ConfigException(Exception):
def __init__(self, msg, ex=None):
Exception.__init__(self, msg)
self._source_ex = ex
class PollableException(CloudInitDException):
def __init__(self, p, ex):
CloudInitDException.__init__(self, ex)
self.pollable = p
def __str__(self):
return CloudInitDException.__str__(self)
class ServiceException(PollableException):
def __init__(self, ex, svc, msg=None, stdout="", stderr=""):
PollableException.__init__(self, svc, ex)
self._svc = svc
self.stdout = stdout
self.stderr = stderr
self.msg = msg
def __str__(self):
s = "Error while processing the service: %s" % (self._svc.name)
return s
def get_output(self):
s = ""
if self.msg:
s = s + os.linesep + self.msg
try:
s = s + os.linesep + "stdout : %s" % (str(self.stdout))
s = s + os.linesep + "stderr : %s" % (str(self.stderr))
s = s + os.linesep + str(self._base_ex)
except Exception, ex:
s = str(s)
return s
class ProcessException(PollableException):
def __init__(self, pollable, ex, stdout, stderr, rc=None):
PollableException.__init__(self, pollable, ex)
self.stdout = stdout
self.stderr = stderr
self.exit_code = rc
class MultilevelException(PollableException):
def __init__(self, exs, pollables, level):
PollableException.__init__(self, pollables[0], exs[0])
self.level = level
self.exception_list = exs
self.pollable_list = pollables
def __str__(self):
PollableException.__str__(self)
s = "["
d = ""
for ex in self.exception_list:
s = s + d + str(ex) + ":" + str(type(ex))
d = ","
s = s + "]"
return s
| 2.28125 | 2 |
tests/conftest.py | MrThearMan/django-admin-data-views | 0 | 12792285 | import pytest
from django.contrib.auth.models import User
from django.test import Client
@pytest.fixture(scope="session")
def superuser(django_db_setup, django_db_blocker) -> User:
with django_db_blocker.unblock():
user: User = User.objects.get_or_create(
username="x",
email="<EMAIL>",
is_staff=True,
is_superuser=True,
)[0]
user.set_password("x")
user.save()
return user
@pytest.fixture(scope="session")
def django_client(django_db_blocker, superuser: User) -> Client:
client = Client()
with django_db_blocker.unblock():
client.force_login(superuser)
return client
| 2.078125 | 2 |
module2-oop-code-style-and-reviews/packaging/setup.py | llpk79/DS-Unit-3-Sprint-1-Software-Engineering | 1 | 12792286 | import setuptools
with open('README.md', 'r') as fh:
long_description = fh.read()
setuptools.setup(
name='lambdata-pkutrich',
version='0.0.4',
author='<NAME>',
author_email='<EMAIL>',
description='Some very basic DataFrame tools.',
long_description=long_description,
long_description_content_type='text/markdown',
url=
'https://github.com/llpk79/DS-Unit-3-Sprint-1-Software-Engineering/tree/master/module2-oop-code-style-and-reviews',
packages=setuptools.find_packages(),
classifiers=['Programming Language :: Python :: 3',
'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
'Operating System :: OS Independent'],
)
| 1.335938 | 1 |
polls/views.py | camnpr/django-polls | 0 | 12792287 | <gh_stars>0
from django.shortcuts import render
# Create your views here.
from django.http import HttpResponse, HttpResponseRedirect, Http404
# from django.template import loader
from django.shortcuts import render #, get_object_or_404
from django.urls import reverse
from django.utils import timezone
from django.views import generic # 通用视图
from django.db.models import F
from .models import Question, Choice
class IndexView(generic.ListView):
template_name = 'polls/index.html'
context_object_name = 'latest_question_list'
def get_queryset(self):
"""返回最近发布的5个问题"""
return Question.objects.filter(
pub_date__lte=timezone.now()
).order_by('-pub_date')[:5]
#...主页
"""def index(request):
latest_question_list = Question.objects.order_by('-pub_date')[:5]
# output = '<br /> '.join([q.question_text for q in latest_question_list])
# template = loader.get_template('polls/index.html')
context = {
'latest_question_list': latest_question_list
}
# HttpResponse(template.render(context, request))
return render(request, 'polls/index.html', context)
"""
class DetailView(generic.DetailView):
model = Question
template_name='polls/detail.html' # 默认情况下,通用视图 DetailView 使用一个叫做 <app name>/<model name>_detail.html 的模板。在我们的例子中,它将使用 "polls/question_detail.html" 模板
def get_queryset(self):
# 发布时间未到,不展示任何问题详情。如果没有,返回: 没有找到符合查询的 question
return Question.objects.filter(pub_date__lte=timezone.now())
#...详情页
"""def detail(request, question_id):
try:
question = Question.objects.get(pk=question_id)
except Question.DoesNotExist:
raise Http404("未找到!o(╯□╰)o")
return render(request, 'polls/detail.html', {'question': question})
"""
class ResultsView(generic.DetailView):
model = Question
template_name = 'polls/results.html'
def get_queryset(self):
# 发布时间未到,不展示任何问题详情。如果没有,返回: 没有找到符合查询的 question
return Question.objects.filter(pub_date__lte=timezone.now())
#...投票结果页
"""def results(request, question_id):
try:
question = Question.objects.get(pk=question_id)
except Question.DoesNotExist:
raise Http404("未找到!o(╯□╰)o")
return render(request, 'polls/results.html', {'question': question})
"""
#...处理投票表单
def vote(request, question_id):
try:
question = Question.objects.get(pk=question_id)
except Question.DoesNotExist:
raise Http404("未找到!o(╯□╰)o")
try:
selected_choice = question.choice_set.get(pk=request.POST['choice'])
except (KeyError, Choice.DoesNotExist):
# 有错误就返回到上一页
return render(request, 'polls/detail.html', {
'question': question,
'error_message': "请选择一个选项",
})
else:
selected_choice.votes = F('votes') + 1 # F 防止竞争条件,多个同事投票,解决:下边的save丢失的问题。
selected_choice.save()
return HttpResponseRedirect(reverse('polls:results', args=(question.id,))) | 2.25 | 2 |
tools/prepare_rawdata.py | Vanova/cakechat | 0 | 12792288 | import json
def check_json(line):
try:
json.loads(line.strip()[:-1])
except ValueError:
print('Skipped invalid json object: %s' % line.strip())
in_file = '../data/raw_data/TrainingFinal.txt'
dialogs = []
cnt = 0
with open(in_file) as f:
buf = []
for line in f:
line = line.strip()
if not ('[' in line or ']' in line):
check_json(line)
buf.append(line)
if ']' in line:
dlg = ''.join(buf)
dlg = dlg.strip()
dlg = dlg[:-2] + dlg[-1]
dialogs.append(dlg)
buf = []
cnt += 1
print('Processed dialogs: %d' % cnt)
# TODO split the data
out_file = '../data/corpora_processed/train_processed_dialogs.txt'
with open(out_file, 'w') as f:
for item in dialogs:
f.write(item + '\n')
out_file = '../data/corpora_processed/val_processed_dialogs.txt'
with open(out_file, 'w') as f:
for item in dialogs:
f.write(item + '\n')
| 2.859375 | 3 |
resources/migrations/0028_purpose_public.py | suutari-ai/respa | 1 | 12792289 | <reponame>suutari-ai/respa<filename>resources/migrations/0028_purpose_public.py
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-01-07 12:18
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('resources', '0027_comments_verbose_name'),
]
operations = [
migrations.AddField(
model_name='purpose',
name='public',
field=models.BooleanField(default=True, verbose_name='Public'),
),
]
| 1.328125 | 1 |
python-structures-presentation/code/for_else.py | iz4vve-talks/misc-training | 0 | 12792290 | #!/usr/bin/env python
# START OMIT
for i in range(4):
if i == 2:
print("Skipping: %d" % i)
# the continue keyword lets you skip an iteration
continue
print(i)
else:
print("Done...")
print("\nBreak loop")
for i in range(4):
if i == 2:
print("break")
# the break breaks out of the loop
break
print(i)
# END OMIT | 4.09375 | 4 |
open/core/betterself/views/activity_log_views.py | lawrendran/open | 105 | 12792291 | from open.core.betterself.models.activity_log import ActivityLog
from open.core.betterself.serializers.activity_log_serializers import (
ActivityLogReadSerializer,
ActivityLogCreateUpdateSerializer,
)
from open.core.betterself.views.mixins import (
BaseGetUpdateDeleteView,
BaseCreateListView,
)
class ActivityLogCreateListView(BaseCreateListView):
model_class = ActivityLog
read_serializer_class = ActivityLogReadSerializer
create_serializer_class = ActivityLogCreateUpdateSerializer
class ActivityLogGetUpdateView(BaseGetUpdateDeleteView):
model_class = ActivityLog
read_serializer_class = ActivityLogReadSerializer
update_serializer_class = ActivityLogCreateUpdateSerializer
| 1.820313 | 2 |
telegram_bot_api/schemas/OrderInfoSchema.py | IsVir/telegram-bot-api | 0 | 12792292 | <gh_stars>0
from marshmallow import Schema, fields
class OrderInfoSchema(Schema):
name = fields.Str()
phone_number = fields.Str()
email = fields.Str()
shipping_address = fields.Nested('ShippingAddressSchema')
| 2.171875 | 2 |
profiler/app/od.py | rekords-uw/Profiler-public | 0 | 12792293 | <reponame>rekords-uw/Profiler-public<filename>profiler/app/od.py
from abc import ABCMeta, abstractmethod
from sklearn.covariance import EllipticEnvelope
from sklearn.ensemble import IsolationForest
from sklearn.neighbors import LocalOutlierFactor
from sklearn import svm
from profiler.utility import GlobalTimer
from profiler.data.embedding import OneHotModel
import matplotlib.pyplot as plt
from profiler.globalvar import *
from sklearn.neighbors import BallTree
from tqdm import tqdm
import numpy as np
import sklearn
import warnings, logging
warnings.filterwarnings("ignore", category=DeprecationWarning)
warnings.filterwarnings("ignore", category=FutureWarning)
logging.basicConfig()
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
class OutlierDetector(object):
__metaclass__ = ABCMeta
def __init__(self, df, gt_idx=None, method='std', workers=4, t=0.05, tol=1e-6, neighbor_size=100,
knn=False, high_dim=False):
self.timer = GlobalTimer()
self.method = method
self.df = df
self.gt_idx = gt_idx
self.overall = None
self.structured = None
self.combined = None
self.workers=workers
self.t = t
self.tol = tol
self.structured_info = {}
self.overall_info = {}
self.eval = {}
self.neighbors = {}
self.neighbor_size = neighbor_size
if knn:
if not high_dim:
self.get_neighbors = self.get_neighbors_knn
else:
self.get_neighbors = self.get_neighbors_knn_highdim
else:
self.get_neighbors = self.get_neighbors_threshold
def get_neighbors_threshold(self, left):
X = self.df[left].values.reshape(-1, len(left))
# calculate pairwise distance for each attribute
distances = np.zeros((X.shape[0],X.shape[0]))
for j, attr in enumerate(left):
# check if saved
if attr in self.neighbors:
distances = self.neighbors[attr] + distances
continue
# validate type and calculate cosine distance
if self.attributes[attr] == TEXT and self.embed_txt:
data = self.embed[attr].get_embedding(X[:,j].reshape(-1,1))
dis = sklearn.metrics.pairwise.cosine_distances(data)
elif self.attributes[attr] == CATEGORICAL or self.attributes[attr] == TEXT:
data = self.encoder[attr].get_embedding(X[:,j].reshape(-1,1))
dis = sklearn.metrics.pairwise.cosine_distances(data)
else:
dis = sklearn.metrics.pairwise_distances(X[:,j].reshape(-1,1),
metric='cityblock', n_jobs=self.workers)
# normalize distance
maxdis = max(self.tol, np.nanmax(dis))
dis = dis / maxdis
self.neighbors[attr] = (dis <= self.tol)*1
distances = self.neighbors[attr] + distances
has_same_left = (distances == X.shape[1])
return has_same_left
def get_neighbors_knn(self, left):
X = self.df[left].values.reshape(-1, len(left))
# calculate pairwise distance for each attribute
distances = np.zeros((X.shape[0],X.shape[0]))
for j, attr in enumerate(left):
# check if saved
if attr in self.neighbors:
distances = self.neighbors[attr] + distances
continue
# validate type and calculate cosine distance
if self.attributes[attr] == TEXT and self.embed_txt:
data = self.embed[attr].get_embedding(X[:,j].reshape(-1,1))
# normalize each vector to take cosine distance
data = data / np.linalg.norm(data, axis=1)
elif self.attributes[attr] == CATEGORICAL or self.attributes[attr] == TEXT:
data = self.encoder[attr].get_embedding(X[:,j].reshape(-1,1))
else:
data = X[:,j].reshape(-1,1)
kdt = BallTree(data, metric='euclidean')
# find knn
indicies = kdt.query(data, k=self.neighbor_size, return_distance=False)
self.neighbors[attr] = np.zeros((X.shape[0],X.shape[0]))
for i in range(len(indicies)):
self.neighbors[attr][i, indicies[i, :]] = 1
distances = self.neighbors[attr] + distances
has_same_left = (distances == X.shape[1])
return has_same_left
def get_neighbors_knn_highdim(self, left):
X = self.df[left].values.reshape(-1, len(left))
# calculate pairwise distance for each attribute
distances = np.zeros((X.shape[0],X.shape[0]))
data = []
for j, attr in enumerate(left):
# check if saved
if attr in self.neighbors:
data.append(self.neighbors[attr])
continue
# validate type and calculate cosine distance
if self.attributes[attr] == TEXT and self.embed_txt:
embedded = self.embed[attr].get_embedding(X[:,j].reshape(-1,1))
# normalize each vector to take cosine distance
data.append(embedded / np.linalg.norm(embedded, axis=1))
elif self.attributes[attr] == CATEGORICAL or self.attributes[attr] == TEXT:
embedded = self.encoder[attr].get_embedding(X[:,j].reshape(-1,1))
data.append(embedded)
else:
data.append(X[:,j].reshape(-1,1))
self.neighbors[attr] = data[-1]
data = np.hstack(data)
if data.shape[0] != X.shape[0]:
print(data.shape)
raise Exception
kdt = BallTree(data, metric='euclidean')
# find knn
indicies = kdt.query(data, k=self.neighbor_size, return_distance=False)
for i in range(len(indicies)):
distances[i, indicies[i, :]] = 1
has_same_left = (distances == 1)
return has_same_left
@abstractmethod
def get_outliers(self, data, right=None):
# return a mask
pass
def run_attr(self, right):
attr_outliers = self.df.index.values[self.get_outliers(self.df[right], right)]
prec, tp = self.compute_precision(outliers=attr_outliers, log=False)
self.overall_info[right] = {
'avg_neighbors': self.df.shape[0],
'total_outliers': len(attr_outliers),
'precision': prec,
'recall': self.compute_recall(tp, outliers=attr_outliers, log=False)
}
return attr_outliers
def run_all(self, parent_sets, separate=True):
self.run_overall(separate)
self.run_structured(parent_sets)
print(self.timer.get_stat())
def run_overall(self, separate=True):
self.timer.time_start("naive")
if separate:
overall = []
for attr in self.df:
overall.extend(list(self.run_attr(attr)))
else:
overall = self.run_attr(self.df.columns.values)
self.overall = overall
return self.timer.time_end("naive")
def run_attr_structured(self, left, right):
outliers = []
if len(left) == 0:
return outliers
has_same_neighbors = self.get_neighbors(left)
num_neighbors = np.zeros((len(has_same_neighbors, )))
num_outliers = np.zeros((len(has_same_neighbors, )))
for i, row in enumerate(has_same_neighbors):
# indicies of neighbors
nbr = self.df.index.values[row]
if len(nbr) == 0:
continue
if self.method != "std":
outlier = nbr[self.get_outliers(self.df.loc[nbr, right], right)]
else:
outlier = nbr[self.get_outliers(self.df.loc[nbr, right], right, m='m2')]
outliers.extend(outlier)
# save outlier info
num_neighbors[i] = len(nbr)
num_outliers[i] = len(outlier)
# save info
self.structured_info[right] = {
'determined_by': left,
'num_neighbors': num_neighbors,
'num_outliers': num_outliers,
'avg_neighbors': np.nanmean(num_neighbors),
'total_outliers': len(np.unique(outliers))
}
return outliers
def run_structured(self, parent_sets):
self.timer.time_start("structured")
structured = []
for i, child in enumerate(tqdm(parent_sets)):
outlier = self.run_attr_structured(parent_sets[child], child)
structured.extend(outlier)
if child not in self.structured_info:
continue
prec, tp = self.compute_precision(outlier, log=False)
self.structured_info[child]['precision'] = prec
self.structured_info[child]['recall'] = self.compute_recall(tp, outliers=outlier, log=False)
self.structured = structured
return self.timer.time_end("structured")
def filter(self, structured, t=None):
if t is None:
t = self.t
unique, count = np.unique(structured, return_counts=True)
outliers = list(unique[count > t*self.df.shape[0]])
return outliers
def run_combined(self, structured):
combined = list(structured)
combined.extend(self.overall)
return combined
def compute_precision(self, outliers, log=True):
outliers = set(outliers)
tp = 0.0
# precision
if len(outliers) == 0:
if len(self.gt_idx) == 0:
if log:
print("no outlier is found and no outlier is present in the ground truth as well, f1 is 1")
return 1, 0
if log:
print("no outlier is found, f1: 0")
return 0, 0
for i in outliers:
if i in self.gt_idx:
tp += 1
prec = tp / len(outliers)
if log:
print("with %d detected outliers, precision is: %.4f"%(len(outliers), prec))
return prec, tp
def compute_f1(self, outliers, title=None, log=True):
if title is not None:
print("Results for %s:"%title)
prec, tp = self.compute_precision(outliers, log=log)
rec = self.compute_recall(tp, outliers, log=log)
if rec*prec == 0:
f1 = 0
else:
f1 = 2 * (prec * rec) / (prec + rec)
if log:
print("f1: %.4f" % f1)
return "%.4f,%.4f,%.4f"%(prec, rec, f1)
def compute_recall(self, tp, outliers, log=True):
if tp == 0:
if log:
print("with %d outliers in gt, recall is: 0"%(len(self.gt_idx)))
return 0
if len(self.gt_idx) == 0:
if log:
print("since no outliers in the groud truth, recall is: 1"%(len(self.gt_idx)))
return 1
recall = tp / len(self.gt_idx)
if log:
print("with %d detected outliers, recall is: %.4f"%(len(outliers), recall))
return recall
def visualize_stat(self, dict, name, stat='precision'):
data = [dict[right][stat] if right in dict else 0 for right in self.overall_info]
fig, ax = plt.subplots()
ax.bar(np.arange(len(data)), data)
ax.set_xticks(np.arange(len(data)))
ax.set_yticks(np.arange(0,1,0.1))
for i, v in enumerate(data):
ax.text(i - 0.25, v + .03, "%.2f"%v)
ax.set_xticklabels(list(self.overall_info.keys()))
ax.set_xlabel('Column Name')
ax.set_ylabel(stat)
ax.set_title("[%s] %s for every column"%(name, stat))
def evaluate(self, t=None, log=True):
structured = self.filter(self.structured, t)
self.eval['overall'] = self.compute_f1(self.overall, "naive approach")
self.eval['structured'] = self.compute_f1(structured, "structure only")
self.eval['combined'] = self.compute_f1(self.run_combined(structured), "enhance naive with structured")
if log:
self.visualize_stat(self.overall_info, 'overall', stat='precision')
self.visualize_stat(self.structured_info, 'structured', stat='precision')
self.visualize_stat(self.overall_info, 'overall', stat='recall')
self.visualize_stat(self.structured_info, 'structured', stat='recall')
def evaluate_structured(self, t):
structured = self.filter(self.structured, t)
self.eval['structured'] = self.compute_f1(structured, "structure only", log=False)
self.eval['combined'] = self.compute_f1(self.run_combined(structured),
"enhance naive with structured",
log=False)
def evaluate_overall(self):
self.eval['overall'] = self.compute_f1(self.overall, "naive approach", log=False)
def view_neighbor_info(self):
for right in self.structured_info:
fig, (ax1, ax2) = plt.subplots(1,2)
data = self.structured_info[right]['num_neighbors']
ax1.hist(data, bins=np.arange(data.min(), data.max()+1))
ax1.set_title("histogram of num_neighbors\n for column %s"%right)
ax1.set_xlabel('number of neighbors')
ax1.set_ylabel('count')
width = 0.35
rects1 = ax2.bar(np.arange(len(data)),self.structured_info[right]['num_neighbors'],width)
rects2 = ax2.bar(np.arange(len(data))+width,self.structured_info[right]['num_outliers'],width)
ax2.legend((rects1[0], rects2[0]),['num_neighbors', 'num_outliers'])
ax2.set_title("num_neighbors and \nnum_outliers\n for column %s"%right)
ax2.set_xlabel('index of tuple')
ax2.set_ylabel('count')
fig, ax = plt.subplots()
width = 0.35
rects1 = ax.bar(np.arange(len(self.overall_info))+width,
[self.overall_info[right]['avg_neighbors'] for right in self.overall_info], width)
rects2 = ax.bar(np.arange(len(self.overall_info)),
[self.structured_info[right]['avg_neighbors'] if right in self.structured_info else 0
for right in self.overall_info], width)
ax.legend((rects1[0], rects2[0]),['overall', 'structured'])
ax.set_xticks(np.arange(len(self.overall_info)))
ax.set_xticklabels(list(self.overall_info.keys()))
ax.set_title("average number of neighbors for every column")
ax.set_xlabel('column name')
ax.set_ylabel('count')
class STDDetector(OutlierDetector):
def __init__(self, df, gt_idx=None):
super(STDDetector, self).__init__(df, gt_idx, "std")
self.param = {
'm1': 3,
'm2': 5,
}
def get_outliers(self, data, right=None, m='m1'):
return abs(data - np.nanmean(data)) > self.param[m] * np.nanstd(data)
# else, categorical, find low frequency items
class SEVERDetector(OutlierDetector):
def __init__(self, df, gt_idx=None):
super(SEVERDetector, self).__init__(df, gt_idx, "sever")
self.param = {
}
self.overall = None
self.structured = None
self.combined = None
def get_outliers(self, gradient, right=None):
size = gradient.shape[0]
gradient_avg = np.sum(gradient, axis=0)/size
gradient_avg = np.repeat(gradient_avg.reshape(1, -1), size, axis=0)
G = gradient - gradient_avg
decompose = np.linalg.svd(G)
S = decompose[1]
V = decompose[2]
top_right_v = V[np.argmax(S)].T
score = np.matmul(G, top_right_v)**2
thred = np.percentile(score, 100-p*100)
mask = (score < thred)
#if it is going to remove all, then remove none
if np.all(~mask):
return ~mask
return mask
class ScikitDetector(OutlierDetector):
def __init__(self, df, method, attr=None, embed=None, gt_idx=None, embed_txt=False,
t=0.05, workers=4, tol=1e-6, min_neighbors=50, neighbor_size=100,
knn=False, high_dim=False, **kwargs):
super(ScikitDetector, self).__init__(df, gt_idx, method, t=t, workers=workers, tol=tol,
neighbor_size=neighbor_size, knn=knn, high_dim=high_dim)
self.embed = embed
self.attributes = attr
self.embed_txt = embed_txt
self.overall = None
self.structured = None
self.combined = None
self.algorithm = None
self.param, self.algorithm = self.get_default_setting()
self.param.update(kwargs)
self.encoder = self.create_one_hot_encoder(df)
self.min_neighbors = min_neighbors
def get_default_setting(self):
if self.method == "isf":
param = {
'contamination': 0.1,
'n_jobs': self.workers
}
alg = IsolationForest
elif self.method == "ocsvm":
param = {
'nu': 0.1,
'kernel': "rbf",
'gamma': 'auto'
}
alg = svm.OneClassSVM
elif self.method == "lof":
param = {
'n_neighbors': int(max(self.neighbor_size / 2, 2)),
'contamination': 0.1,
}
alg = LocalOutlierFactor
elif self.method == "ee":
param = {
'contamination': 0.1,
}
alg = EllipticEnvelope
return param, alg
def create_one_hot_encoder(self, df):
encoders = {}
for attr, dtype in self.attributes.items():
if dtype == CATEGORICAL or (dtype == TEXT and (not self.embed_txt)):
data = df[attr]
if not isinstance(data, np.ndarray):
data = data.values
if len(data.shape) == 1:
data = data.reshape(-1, 1)
encoders[attr] = OneHotModel(data)
return encoders
def get_outliers(self, data, right=None):
mask = np.zeros((data.shape[0]))
if not isinstance(data, np.ndarray):
data = data.values
if len(data.shape) == 1:
data = data.reshape(-1, 1)
if self.attributes[right] == TEXT:
if self.embed_txt:
# take embedding
data = self.embed[right].get_embedding(data)
else:
data = self.encoder[right].get_embedding(data)
elif self.attributes[right] == CATEGORICAL:
# take one hot encoding
data = self.encoder[right].get_embedding(data)
# remove nan:
row_has_nan = np.isnan(data).any(axis=1)
clean = data[~row_has_nan]
model = self.algorithm(**self.param)
if len(clean) <= self.min_neighbors:
return mask == -1
y = model.fit_predict(clean)
mask[~row_has_nan] = y
mask = mask.astype(int)
return mask == -1
| 2.0625 | 2 |
src/mechanics/intercept.py | nrbabcock/HeartOfGold | 0 | 12792294 | from rlutilities.simulation import Car, Ball
from rlutilities.linear_algebra import *
from analysis.throttle import *
from analysis.boost import *
from analysis.jump import *
from rlbot.agents.base_agent import SimpleControllerState
from rlbot.utils.game_state_util import CarState
from util.drive import steer_toward_target
from util.vec import Vec3
from util.rlutilities import to_vec3, rotation_to_euler, closest_point_on_obb
from math import pi, atan, atan2, degrees
def get_car_front_center(car: Car):
return car.location + normalize(car.forward()) * car.hitbox().half_width[0] + normalize(car.up()) * car.hitbox().half_width[2]
class Intercept():
def __init__(self, location: vec3, boost = True):
self.location = location
self.boost = boost
self.time = None
self.purpose = None # rip
self.dodge = False
def simulate(self, bot) -> vec3:
# print('simulate intercept')
# Init vars
c = Car(bot.game.my_car)
b = Ball(bot.game.ball)
t = vec3(bot.target)
intercept = self.location
dt = 1.0 / 60.0
hit = False
min_error = None
# Drive towards intercept (moving in direction of c.forward())
c.rotation = look_at(intercept, c.up())
direction = normalize(intercept - c.location)#c.forward()
advance_distance = norm(intercept - c.location) - c.hitbox().half_width[0] - b.collision_radius
translation = direction * advance_distance
sim_start_state: ThrottleFrame = BoostAnalysis().travel_distance(advance_distance, norm(c.velocity))
c.velocity = direction * sim_start_state.speed
c.location += translation
c.time += sim_start_state.time
bot.ball_predictions = [vec3(b.location)]
while b.time < c.time:
b.step(dt)
bot.ball_predictions.append(vec3(b.location))
# print(c.time, b.time)
# print(c.location, b.location)
# Simulate the collision and resulting
for i in range(60*3):
c.location += c.velocity * dt
b.step(dt, c)
# Check if we hit the ball yet
if norm(b.location - c.location) < (c.hitbox().half_width[0] + b.collision_radius) * 1.05:
hit = True
# print('hit')
# Measure dist from target
error = t - b.location
if hit and (min_error == None or norm(error) < norm(min_error)):
min_error = error
# Record trajectory
bot.ball_predictions.append(vec3(b.location))
if not hit: return None
return min_error
# warning: lazy conversions and variable scope
def get_controls(self, car_state: CarState, car: Car):
controls = SimpleControllerState()
target_Vec3 = Vec3(self.location[0], self.location[1], self.location[2])
if angle_between(self.location - to_vec3(car_state.physics.location), car.forward()) > pi / 2:
controls.boost = False
controls.handbrake = True
elif angle_between(self.location - to_vec3(car_state.physics.location), car.forward()) > pi / 4:
controls.boost = False
controls.handbrake = False
else:
controls.boost = self.boost
controls.handbrake = False
# Be smart about not using boost at max speed
# if Vec3(car.physics.velocity).length() > self.boost_analysis.frames[-1].speed - 10:
# controls.boost = False
controls.steer = steer_toward_target(car_state, target_Vec3)
controls.throttle = 1
return controls
@staticmethod
def calculate_old(car: Car, ball: Ball, target: vec3, ball_predictions = None):
# Init vars
fake_car = Car(car)
b = Ball(ball)
# Generate predictions of ball path
if ball_predictions is None:
ball_predictions = [vec3(b.location)]
for i in range(60*5):
b.step(1.0 / 60.0)
ball_predictions.append(vec3(b.location))
# Gradually converge on ball location by aiming at a location, checking time to that location,
# and then aiming at the ball's NEW position. Guaranteed to converge (typically in <10 iterations)
# unless the ball is moving away from the car faster than the car's max boost speed
intercept = Intercept(b.location)
intercept.purpose = 'ball'
intercept.boost = True
intercept_ball_position = vec3(b.location)
i = 0
max_tries = 100
analyzer = BoostAnalysis() if intercept.boost else ThrottleAnalysis()
while i < max_tries:
# Find optimal spot to hit the ball
optimal_hit_vector = normalize(target - intercept_ball_position) * b.collision_radius
optimal_hit_location = intercept_ball_position - optimal_hit_vector
# Find ideal rotation, unless it intersects with ground
optimal_rotation = look_at(optimal_hit_vector, vec3(0, 0, 1))#axis_to_rotation(optimal_hit_vector) # this might be wrong
fake_car.rotation = optimal_rotation
# print(f'fake_car.location {fake_car.location}')
# print(f'get_car_front_center(fake_car) {get_car_front_center(fake_car)}')
fake_car.location += optimal_hit_location - get_car_front_center(fake_car) # try to position the car's front center directly on top of the best hit vector
euler = rotation_to_euler(optimal_rotation)
# todo put some super precise trigonometry in here to find the max angle allowed at given height
if fake_car.location[2] <= fake_car.hitbox().half_width[0]:
euler.pitch = 0
fake_car.rotation = euler_to_rotation(vec3(euler.pitch, euler.yaw, euler.roll))
fake_car.location += optimal_hit_location - get_car_front_center(fake_car) # try to position the car's front center directly on top of the best hit vector
# Adjust vertical position if it (still) intersects with ground
if fake_car.location[2] < 17.0:
fake_car.location[2] = 17.0
intercept.location = get_car_front_center(fake_car)
# Calculate jump time needed
jump_height_time = JumpAnalysis().get_frame_by_height(intercept.location[2]).time # or solve with motion equation
# car_euler = rotation_to_euler(car.rotation)
# jump_pitch_time = (euler.pitch - car_euler.pitch) / 5.5 + 0.35 # disregarding angular acceleration
# jump_yaw_time = (euler.yaw - car_euler.yaw) / 5.5 + 0.35 # disregarding angular acceleration
# jump_roll_time = (euler.roll - car_euler.roll) / 5.5 + 0.35 # disregarding angular acceleration
# jump_time = max(jump_height_time, jump_pitch_time, jump_yaw_time, jump_roll_time)
jump_time = jump_height_time # todo revisit rotation time
# print('jump_time', jump_time)
# Calculate distance to drive before jumping (to arrive perfectly on target)
total_translation = intercept.location - get_car_front_center(car)
total_translation[2] = 0
total_distance = norm(total_translation)
start_index = analyzer.get_index_by_speed(norm(car.velocity))
start_frame = analyzer.frames[start_index]
custom_error_func = lambda frame : abs(total_distance - (frame.distance - start_frame.distance) - frame.speed * jump_time)
drive_analysis = analyzer.get_frame_by_error(custom_error_func, start_index)
arrival_time = drive_analysis.time - start_frame.time + jump_time
# print('drive_analysis.time', drive_analysis.time)
# print('drive_analysis', start_index)
# arrival_time = analyzer.travel_distance(total_distance, norm(car.velocity)).time
# drive_analysis = analyzer.travel_distance(norm(intercept.location - c.location), norm(c.velocity))
ball_index = int(round(arrival_time * 60))
if ball_index >= len(ball_predictions):
intercept.location = ball_predictions[-1]
intercept.time = len(ball_predictions) / 60.0
break
ball_location = ball_predictions[ball_index]
# print(f'Iteration {i} distance {norm(ball_location + vec3(optimal_hit_vector[0], optimal_hit_vector[1], 0) - intercept.location)}')
if norm(ball_location - intercept_ball_position) <= 1:
# if norm(intercept_ball_position - get_car_front_center(fake_car)) > 100:
# intercept.location = ball_predictions[-1]
# intercept.time = len(ball_predictions) / 60.0
# return intercept
intercept.dodge = True #jump_time > 0.2
intercept.jump_time = car.time + arrival_time - jump_time
intercept.dodge_preorientation = euler_to_rotation(vec3(euler.pitch, euler.yaw, euler.roll))
intercept.dodge_delay = jump_time
intercept.dodge_direction = normalize(vec2(optimal_hit_vector))
# print(f'intercept_ball_position', intercept_ball_position)
# print(f'intercept.location', intercept.location)
# print(f'time until jump {drive_analysis.time}')
# print(f'time now {car.time}')
# print(f'distance until jump {drive_analysis.distance}')
# print(f'total distance to target {total_distance}')
# print(f'horiz speed @ jump {drive_analysis.speed}')
# print(f'time intended to be in air {jump_time}')
# print(f'distance travelled in air {jump_time * drive_analysis.speed}')
# print(f'distance remaining to target @ jump {total_distance - drive_analysis.distance}')
# print(f'Intercept convergence in {i} iterations')
# print(f'desired roll {euler.roll}')
# print(f'actual roll {rotation_to_euler(c.rotation).roll}')
break
intercept_ball_position = vec3(ball_location)
# intercept.location = vec3(ball_location)
# intercept.location[2] = 0
intercept.time = arrival_time
i += 1
if i >= max_tries:
print(f'Warning: max tries ({max_tries}) exceeded for calculating intercept')
# Intercept is only meant for ground paths (and walls/cieling are only indirectly supported)
# collision_radius = c.hitbox().half_width[2] * 2 + b.collision_radius + b.collision_radius * 8
# on_ground = intercept.location[2] <= collision_radius
# on_back_wall = abs(intercept.location[1]) >= 5120 - collision_radius
# on_side_wall = abs(intercept.location[0]) >= 4096 - collision_radius
# # on_cieling = intercept.location[2] >= 2044 - collision_radius
# reachable = on_ground # or on_back_wall or on_side_wall # or on_cieling
# if not reachable:
# return None
return intercept
@staticmethod
def calculate(car: Car, ball: Ball, target: vec3, ball_predictions = None):
# Init vars
b = Ball(ball)
dt = 1.0 / 60.0
# Generate predictions of ball path
if ball_predictions is None:
ball_predictions = []
for i in range(60*5):
b.step(dt)
ball_predictions.append(vec3(b.location))
# Gradually converge on ball location by aiming at a location, checking time to that location,
# and then aiming at the ball's NEW position. Guaranteed to converge (typically in <10 iterations)
# unless the ball is moving away from the car faster than the car's max boost speed
intercept = Intercept(b.location)
intercept.purpose = 'ball'
intercept.boost = True
intercept_ball_position = vec3(b.location)
collision_achieved = False
last_horizontal_error = None
last_horizontal_offset = None
i = 0
max_tries = 101
analyzer = BoostAnalysis() if intercept.boost else ThrottleAnalysis()
while i < max_tries:
i += 1
fake_car = Car(car)
direction = normalize(intercept.location - car.location)
fake_car.rotation = look_at(direction, fake_car.up())
for t in range(60*5):
# Step car location with throttle/boost analysis data
# Not super efficient but POITROAE
frame = analyzer.travel_time(dt, norm(fake_car.velocity))
# print('in 1 frame I travel', frame.time, frame.distance, frame.speed)
fake_car.location += direction * frame.distance
fake_car.velocity = direction * frame.speed
fake_car.time += dt
ball_location = ball_predictions[t]
# Check for collision
p = closest_point_on_obb(fake_car.hitbox(), ball_location)
if norm(p - ball_location) <= ball.collision_radius:
direction_vector = p - (fake_car.location - normalize(fake_car.forward()) * 13.88) # octane center of mass
direction_vector[2] = 0
target_direction_vector = target - ball_location
target_direction_vector[2] = 0
intercept_ball_position = ball_location
direction = atan2(direction_vector[1], direction_vector[0])
ideal_direction = atan2(target_direction_vector[1], target_direction_vector[0])
horizontal_error = direction - ideal_direction
# intercept.location = vec3(ball_location)
# intercept.time = fake_car.time
# return intercept
# Now descend the hit direction gradient
# Kick off the gradient descent with an arbitrary seed value
if last_horizontal_error is None:
last_horizontal_error = horizontal_error
last_horizontal_offset = 0
if horizontal_error > 0:
horizontal_offset = 25
else:
horizontal_offset = 25
intercept.location = ball_location - normalize(fake_car.left()) * horizontal_offset
break
# Recursive case of gradient descent
if horizontal_offset == last_horizontal_offset:
gradient = 0
else:
gradient = (horizontal_error - last_horizontal_error) / (horizontal_offset - last_horizontal_offset)
if gradient == 0:
predicted_horizontal_offset = horizontal_offset
else:
predicted_horizontal_offset = horizontal_offset - horizontal_error / gradient
# Base case (convergence)
if abs(gradient) < 0.0005:
print(f'convergence in {i} iterations')
print(f'gradient = {gradient}')
print(f'last_horizontal_offset = {last_horizontal_offset}')
print(f'direction = {degrees(direction)}')
print(f'ideal direction = {degrees(ideal_direction)}')
print(f'target = {target}')
print(f'ball_location = {ball_location}')
return intercept
# Edge case exit: offset maxed out
max_horizontal_offset = car.hitbox().half_width[1] + ball.collision_radius
if predicted_horizontal_offset > max_horizontal_offset:
predicted_horizontal_offset = max_horizontal_offset
elif predicted_horizontal_offset < -max_horizontal_offset:
predicted_horizontal_offset = - max_horizontal_offset
last_horizontal_offset = horizontal_offset
last_horizontal_error = horizontal_error
horizontal_offset = predicted_horizontal_offset
# Return the latest intercept location and continue descending the gradient
intercept.location = ball_location - normalize(fake_car.left()) * predicted_horizontal_offset
print(f'iteration {i}')
print(f'gradient = {gradient}')
print(f'horizontal_offset = {horizontal_offset}')
print(f'horizontal_error = {degrees(horizontal_error)}')
# print(f'ideal direction = {degrees(ideal_direction)}')
break
# Check for arrival
if norm(fake_car.location - intercept.location) < ball.collision_radius / 2:
intercept.location = ball_location
break
if i >= max_tries:
print(f'Warning: max tries ({max_tries}) exceeded for calculating intercept')
return intercept | 2.6875 | 3 |
pytest_hc-sr04/test_hc-sr04.py | Kyokko-OB-Team/deviceDriver_HC-SR04 | 0 | 12792295 | <reponame>Kyokko-OB-Team/deviceDriver_HC-SR04<gh_stars>0
#!/usr/bin/env python3
import ctypes
import fcntl
import os
import time
import linux
TEST_HCSR04_MAJ_VER = "1"
TEST_HCSR04_MIN_VER = "0"
DEVICE_FILE = "/dev/hc_sr040"
class DATA(ctypes.Structure):
_fields_ = [
("value", ctypes.c_uint),
("status", ctypes.c_uint)
]
GPIO_HCSR04_IOC_TYPE = "S"
GPIO_HCSR04_EXEC_MEASURE_DISTANCE = linux.IOR(GPIO_HCSR04_IOC_TYPE, 0x01, ctypes.sizeof(DATA))
GPIO_HCSR04_GET_DISTANCE = linux.IOW(GPIO_HCSR04_IOC_TYPE, 0x02, ctypes.sizeof(DATA))
print("ver " + TEST_HCSR04_MAJ_VER + "." + TEST_HCSR04_MIN_VER)
fd = os.open(DEVICE_FILE, os.O_RDWR)
data = DATA()
data.value = 0
data.status = 0
fcntl.ioctl(fd, GPIO_HCSR04_EXEC_MEASURE_DISTANCE, data)
time.sleep(1)
fcntl.ioctl(fd, GPIO_HCSR04_GET_DISTANCE, data)
print("Get distance: " + str(data.value) + "mm")
os.close(fd)
| 2.125 | 2 |
backend/pages/tests/test_site_configuration.py | draihal/main-pr | 2 | 12792296 | import pytest
from django.test import TestCase
from .. import factories
@pytest.mark.django_db
class SiteConfigurationTest(TestCase):
def setUp(self):
self.site_configuration = factories.SiteConfigurationFactory()
def test__str__(self):
assert self.site_configuration.__str__() == self.site_configuration.short_description
| 2.28125 | 2 |
simulator/ui/tempg.py | ondiiik/meteoink | 2 | 12792297 | <filename>simulator/ui/tempg.py<gh_stars>1-10
from ui import UiFrame, Vect, BLACK, WHITE, YELLOW
from micropython import const
from config import temp
class UiTempGr(UiFrame):
def __init__(self, ofs, dim):
super().__init__(ofs, dim)
self.temp_min = 273.0
def draw(self, ui, d):
# Pre-calculates some range values
forecast = ui.forecast.forecast
cnt = len(forecast)
self.block = ui.canvas.dim.x / cnt
temp_max = -273.0
for i1 in range(cnt):
weather = forecast[i1]
temp_max = max(weather.temp, weather.feel, temp_max)
self.temp_min = min(weather.temp, weather.feel, self.temp_min)
chart_space = const(30)
chart_min = const(chart_space // 2)
self.chart_max = self.dim.y - chart_space
self.k_temp = (self.chart_max - chart_min) / (temp_max - self.temp_min)
# Draw charts
self.chart_draw(ui, 3, WHITE)
self.chart_draw(ui, 3, YELLOW, temp.outdoor_high, temp.outdoor_low)
self.chart_draw(ui, 1, BLACK)
def chart_draw(self, ui, w, c, th = None, tl = None):
forecast = ui.forecast.forecast
cnt = len(forecast)
for i1 in range(cnt):
if i1 > 0:
x1 = int(self.block * i1)
x2 = int(x1 - self.block)
i2 = i1 - 1
f1 = forecast[i1].feel
f2 = forecast[i2].feel
if (th is None):
v1 = Vect(x1, self.chart_y(f1))
v2 = Vect(x2, self.chart_y(f2))
ui.canvas.line(v1, v2, c, w)
if (th is None) or (f1 > th) or (f2 > th) or (f1 < tl) or (f2 < tl):
v1 = Vect(x1, self.chart_y(forecast[i1].temp))
v2 = Vect(x2, self.chart_y(forecast[i2].temp))
ui.canvas.line(v1, v2, c, w * 2)
def chart_y(self, temp):
return int(self.chart_max - (temp - self.temp_min) * self.k_temp)
| 2.890625 | 3 |
display-youtube-subscriber-count.py | akadir/my-bitbar-plugins | 2 | 12792298 | <filename>display-youtube-subscriber-count.py<gh_stars>1-10
#!/usr/bin/python
# -*- coding: utf-8 -*-
import urllib2
from bs4 import BeautifulSoup
account_name = "besiktas"
url = "https://www.youtube.com/" + account_name
req = urllib2.Request(url)
req.add_header('User-agent', 'Mozilla/5.0\
(Windows NT 6.2; WOW64) AppleWebKit/537.11 (KHTML, like Gecko)\
Chrome/23.0.1271.97 Safari/537.11')
html_page = urllib2.urlopen(req)
if html_page.getcode() == 200:
soup = BeautifulSoup(html_page,"html.parser")
#print soup
subscriber_count = soup.find('span', attrs={'class':['yt-subscription-button-subscriber-count-branded-horizontal', 'subscribed yt-uix-tooltip']})
print subscriber_count.string.rstrip("\n\r")
else:
print "Error loading page"
print "---"
print account_name
| 3.234375 | 3 |
Code_Challenges/fizz_buzz.py | fuse999/Python_Sandbox | 0 | 12792299 | <filename>Code_Challenges/fizz_buzz.py
def fizz_buzz(num):
return "Fizz"*(num%3==0) + "Buzz"*(num%5==0) or str(num) | 3.078125 | 3 |
Day-09_Smoke-Basin/tests/test_day_09.py | richardangell/advent-of-code-2021 | 0 | 12792300 | <filename>Day-09_Smoke-Basin/tests/test_day_09.py<gh_stars>0
import numpy as np
import pytest
import puzzle_1
import puzzle_2
class TestPuzzle1:
"""Tests for puzzle 1."""
def test_low_point_heights(self, input_1):
"""Test that the heights of low points are correct."""
assert puzzle_1.find_low_point_heights(input_1) == [1, 0, 5, 5]
def test_find_risk_level_sum(self, input_1):
"""Test the sum of risk levels are correct."""
assert puzzle_1.find_risk_level_sum(input_1) == 15
class TestPuzzle2:
"""Tests for puzzle 2."""
def test_find_largest_basins(self, input_1):
"""Test that the overall solution find_largest_basins is correct."""
assert puzzle_2.find_largest_basins(input_1) == 1134
@pytest.mark.parametrize(
"coords,expected",
[
([0, 1], [[0, 0], [0, 1], [1, 0]]),
(
[0, 9],
[
[0, 5],
[0, 6],
[0, 7],
[0, 8],
[0, 9],
[1, 6],
[1, 8],
[1, 9],
[2, 9],
],
),
(
[2, 2],
[
[1, 2],
[1, 3],
[1, 4],
[2, 1],
[2, 2],
[2, 3],
[2, 4],
[2, 5],
[3, 0],
[3, 1],
[3, 2],
[3, 3],
[3, 4],
[4, 1],
],
),
(
[4, 6],
[
[2, 7],
[3, 6],
[3, 7],
[3, 8],
[4, 5],
[4, 6],
[4, 7],
[4, 8],
[4, 9],
],
),
],
)
def test_find_basin_around_low_point(self, input_1, coords, expected):
arr = np.array(input_1)
result = puzzle_2.find_basin_around_low_point(coords, arr)
assert sorted(expected) == sorted(result)
| 2.515625 | 3 |
OpenGL/all.gyp | legendlee1314/GLmacia | 0 | 12792301 | <reponame>legendlee1314/GLmacia<filename>OpenGL/all.gyp
{
'variables': {
'project_name': 'GLmacia',
'version': '1.0.0',
'current_dir': '<(DEPTH)',
},
'targets': [
{
'target_name': 'All',
'type': 'none',
'dependencies': [
'<(current_dir)/app/app.gyp:app',
],
},
],
}
| 1.148438 | 1 |
Assignment1/plot.py | 3lLobo/ReinforcementLearningAndPlanning | 0 | 12792302 | <filename>Assignment1/plot.py
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
labels = [0, 100, 200, 300, 400, 500, 600, 700, 800, 900, 1000]
runs = 10
for n in range(runs):
data = pd.read_csv('./data/Vgrid_%d.csv' % n)
ax = sns.heatmap(data, robust=True)
plt.savefig('./plots/Valueheatmap%d.png' % n)
plt.close() | 3 | 3 |
apps/demo/migrations/0003_auto_20190810_1148.py | kagxin/toplist | 0 | 12792303 | <gh_stars>0
# Generated by Django 2.2.4 on 2019-08-10 03:48
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('demo', '0002_auto_20190718_0937'),
]
operations = [
migrations.RenameField(
model_name='article',
old_name='content',
new_name='contents',
),
]
| 1.585938 | 2 |
tsfeatures/tsfeatures_r.py | vishalbelsare/tsfeatures-1 | 57 | 12792304 | #!/usr/bin/env python
# coding: utf-8
from typing import List
import pandas as pd
import rpy2.robjects as robjects
from rpy2.robjects import pandas2ri
def tsfeatures_r(ts: pd.DataFrame,
freq: int,
features: List[str] = ["length", "acf_features", "arch_stat",
"crossing_points", "entropy", "flat_spots",
"heterogeneity", "holt_parameters",
"hurst", "hw_parameters", "lumpiness",
"nonlinearity", "pacf_features", "stability",
"stl_features", "unitroot_kpss", "unitroot_pp"],
**kwargs) -> pd.DataFrame:
"""tsfeatures wrapper using r.
Parameters
----------
ts: pandas df
Pandas DataFrame with columns ['unique_id', 'ds', 'y'].
Long panel of time series.
freq: int
Frequency of the time series.
features: List[str]
String list of features to calculate.
**kwargs:
Arguments used by the original tsfeatures function.
References
----------
https://pkg.robjhyndman.com/tsfeatures/reference/tsfeatures.html
"""
rstring = """
function(df, freq, features, ...){
suppressMessages(library(data.table))
suppressMessages(library(tsfeatures))
dt <- as.data.table(df)
setkey(dt, unique_id)
series_list <- split(dt, by = "unique_id", keep.by = FALSE)
series_list <- lapply(series_list,
function(serie) serie[, ts(y, frequency = freq)])
if("hw_parameters" %in% features){
features <- setdiff(features, "hw_parameters")
if(length(features)>0){
hw_series_features <- suppressMessages(tsfeatures(series_list, "hw_parameters", ...))
names(hw_series_features) <- paste0("hw_", names(hw_series_features))
series_features <- suppressMessages(tsfeatures(series_list, features, ...))
series_features <- cbind(series_features, hw_series_features)
} else {
series_features <- suppressMessages(tsfeatures(series_list, "hw_parameters", ...))
names(series_features) <- paste0("hw_", names(series_features))
}
} else {
series_features <- suppressMessages(tsfeatures(series_list, features, ...))
}
setDT(series_features)
series_features[, unique_id := names(series_list)]
}
"""
pandas2ri.activate()
rfunc = robjects.r(rstring)
feats = rfunc(ts, freq, features, **kwargs)
pandas2ri.deactivate()
renamer={'ARCH.LM': 'arch_lm', 'length': 'series_length'}
feats = feats.rename(columns=renamer)
return feats
def tsfeatures_r_wide(ts: pd.DataFrame,
features: List[str] = ["length", "acf_features", "arch_stat",
"crossing_points", "entropy", "flat_spots",
"heterogeneity", "holt_parameters",
"hurst", "hw_parameters", "lumpiness",
"nonlinearity", "pacf_features", "stability",
"stl_features", "unitroot_kpss", "unitroot_pp"],
**kwargs) -> pd.DataFrame:
"""tsfeatures wrapper using r.
Parameters
----------
ts: pandas df
Pandas DataFrame with columns ['unique_id', 'seasonality', 'y'].
Wide panel of time series.
features: List[str]
String list of features to calculate.
**kwargs:
Arguments used by the original tsfeatures function.
References
----------
https://pkg.robjhyndman.com/tsfeatures/reference/tsfeatures.html
"""
rstring = """
function(uids, seasonalities, ys, features, ...){
suppressMessages(library(data.table))
suppressMessages(library(tsfeatures))
suppressMessages(library(purrr))
series_list <- pmap(
list(uids, seasonalities, ys),
function(uid, seasonality, y) ts(y, frequency=seasonality)
)
names(series_list) <- uids
if("hw_parameters" %in% features){
features <- setdiff(features, "hw_parameters")
if(length(features)>0){
hw_series_features <- suppressMessages(tsfeatures(series_list, "hw_parameters", ...))
names(hw_series_features) <- paste0("hw_", names(hw_series_features))
series_features <- suppressMessages(tsfeatures(series_list, features, ...))
series_features <- cbind(series_features, hw_series_features)
} else {
series_features <- suppressMessages(tsfeatures(series_list, "hw_parameters", ...))
names(series_features) <- paste0("hw_", names(series_features))
}
} else {
series_features <- suppressMessages(tsfeatures(series_list, features, ...))
}
setDT(series_features)
series_features[, unique_id := names(series_list)]
}
"""
pandas2ri.activate()
rfunc = robjects.r(rstring)
uids = ts['unique_id'].to_list()
seasonalities = ts['seasonality'].to_list()
ys = ts['y'].to_list()
feats = rfunc(uids, seasonalities, ys, features, **kwargs)
pandas2ri.deactivate()
renamer={'ARCH.LM': 'arch_lm', 'length': 'series_length'}
feats = feats.rename(columns=renamer)
return feats
| 2.65625 | 3 |
start.py | ClaudiuGeorgiu/PythonADB | 15 | 12792305 | #!/usr/bin/env python3
import logging
from adb.adb import ADB
if __name__ == "__main__":
# Logging configuration.
logger = logging.getLogger(__name__)
logging.basicConfig(
format="%(asctime)s> [%(levelname)s][%(name)s][%(funcName)s()] %(message)s",
datefmt="%d/%m/%Y %H:%M:%S",
level=logging.INFO,
)
# This is an example file showing how the adb wrapper can be used.
adb = ADB()
# Start with a clean adb server.
adb.kill_server()
adb.connect()
adb_version = adb.get_version()
logger.info("ADB version: {0}".format(adb_version))
connected_devices = adb.get_available_devices()
logger.info("Connected devices: {0}".format(connected_devices))
# Set the first device in the list as the target of the subsequent commands.
adb.target_device = connected_devices[0]
adb.wait_for_device()
logger.info(
"Message from Android device: {0}".format(adb.shell(['echo "Hello World!"']))
)
| 3.125 | 3 |
core/letkf_utils.py | drewpendergrass/CHEEREIO | 0 | 12792306 | import numpy as np
import xarray as xr
from glob import glob
import observation_operators as obs
import tropomi_tools as tt
import scipy.linalg as la
import toolbox as tx
from datetime import date,datetime,timedelta
def getLETKFConfig(testing=False):
data = tx.getSpeciesConfig(testing)
err_config = data['OBS_ERROR_MATRICES']
if '.npy' in err_config[0]: #Load error matrices from numpy files
raise NotImplementedError
else: #Assume list of strings
errs = np.array([float(e) for e in err_config])
#Provide a list of observation operator classes in order of the species to assimilate.
obs_operator_classes = [getattr(obs, s) for s in data['OBS_OPERATORS']]
#If you are simulating nature (SIMULATE_NATURE=true in setup_ensemble.sh), provide the nature helper class.
if data['SIMULATE_NATURE'] == "false":
raise NotImplementedError #No support for real observations yet!
else:
nature_h_functions = [getattr(obs, h) for h in data['NATURE_H_FUNCTIONS']]
inflation = float(data['INFLATION_FACTOR'])
return [errs, obs_operator_classes,nature_h_functions,inflation]
#This class contains useful methods for getting data from GEOS-Chem restart files and
#emissions scaling factor netCDFs. After initialization it contains the necessary data
#and can output it in useful ways to other functions in the LETKF procedure.
class GC_Translator(object):
def __init__(self, path_to_rundir,timestamp,computeStateVec = False,testing=False):
#self.latinds,self.loninds = tx.getLatLonList(ensnum)
self.filename = f'{path_to_rundir}GEOSChem.Restart.{timestamp}z.nc4'
self.timestamp=timestamp
self.timestring = f'minutes since {timestamp[0:4]}-{timestamp[4:6]}-{timestamp[6:8]} {timestamp[9:11]}:{timestamp[11:13]}:00'
self.restart_ds = xr.load_dataset(self.filename)
self.emis_sf_filenames = glob(f'{path_to_rundir}*_SCALEFACTOR.nc')
self.testing=testing
if self.testing:
self.num = path_to_rundir.split('_')[-1][0:4]
print(f"GC_translator number {self.num} has been called for directory {path_to_rundir} and restart {self.filename}; construction beginning")
self.emis_ds_list = {}
for file in self.emis_sf_filenames:
name = '_'.join(file.split('/')[-1].split('_')[0:-1])
self.emis_ds_list[name] = xr.load_dataset(file)
if self.testing:
print(f"GC_translator number {self.num} has loaded scaling factors for {name}")
if computeStateVec:
self.buildStateVector()
else:
self.statevec = None
self.statevec_lengths = None #Until state vector is initialized this variable is None
if self.testing:
print(f"GC_Translator number {self.num} construction complete.")
#Since only one timestamp, returns in format lev,lat,lon
def getSpecies3Dconc(self, species):
da = np.array(self.restart_ds[f'SpeciesRst_{species}']).squeeze()
if self.testing:
print(f"GC_Translator number {self.num} got 3D conc for species {species} which are of dimension {np.shape(da)}.")
return da
def setSpecies3Dconc(self, species, conc3d):
baseshape = np.shape(conc3d)
conc4d = conc3d.reshape(np.concatenate([np.array([1]),baseshape]))
if self.testing:
print(f"GC_Translator number {self.num} set 3D conc for species {species} which are of dimension {np.shape(conc4d)}.")
self.restart_ds[f'SpeciesRst_{species}'] = (["time","lev","lat","lon"],conc4d,{"long_name":f"Dry mixing ratio of species {species}","units":"mol mol-1 dry","averaging_method":"instantaneous"})
def getLat(self):
return np.array(self.restart_ds['lat'])
def getLon(self):
return np.array(self.restart_ds['lon'])
def getLev(self):
return np.array(self.restart_ds['lev'])
def getRestartTime(self):
return np.array(self.restart_ds['time'])
def getEmisTime(self):
return np.array(list(self.emis_ds_list.values())[0]['time'])
#We work with the most recent timestamp. Rest are just for archival purposes.
def getEmisSF(self, species):
da = self.emis_ds_list[species]['Scalar']
return np.array(da)[-1,:,:].squeeze()
def getEmisLat(self, species):
return np.array(self.emis_ds_list[species]['lat'])
def getEmisLon(self, species):
return np.array(self.emis_ds_list[species]['lon'])
#Add 2d emissions scaling factors to the end of the emissions scaling factor
def addEmisSF(self, species, emis2d, assim_time):
timelist = self.getEmisTime()
last_time = timelist[-1]
#new_last_time = last_time+np.timedelta64(assim_time,'h') #Add assim time hours to the last timestamp
tstr = f'{self.timestamp[0:4]}-{self.timestamp[4:6]}-{self.timestamp[6:8]}T{self.timestamp[9:11]}:{self.timestamp[11:13]}:00.000000000'
new_last_time = np.datetime64(tstr)
if tx.getSpeciesConfig(self.testing)['DO_ENS_SPINUP']=='true':
START_DATE = tx.getSpeciesConfig(self.testing)['ENS_SPINUP_START']
else:
START_DATE = tx.getSpeciesConfig(self.testing)['START_DATE']
orig_timestamp = f'{START_DATE[0:4]}-{START_DATE[4:6]}-{START_DATE[6:8]}' #Start date from JSON
END_DATE = tx.getSpeciesConfig(self.testing)['END_DATE']
end_timestamp = f'{END_DATE[0:4]}-{END_DATE[4:6]}-{END_DATE[6:8]}'
#Create dataset with this timestep's scaling factors
ds = xr.Dataset(
{"Scalar": (("time","lat","lon"), np.expand_dims(emis2d,axis = 0),{"long_name": "Scaling factor", "units":"1"})},
coords={
"time": (["time"], np.array([new_last_time]), {"long_name": "time", "calendar": "standard", "units":f"hours since {orig_timestamp} 00:00:00"}),
"lat": (["lat"], self.getEmisLat(species),{"long_name": "Latitude", "units":"degrees_north"}),
"lon": (["lon"], self.getEmisLon(species),{"long_name": "Longitude", "units":"degrees_east"})
},
attrs={
"Title":"CHEEREIO scaling factors",
"Conventions":"COARDS",
"Format":"NetCDF-4",
"Model":"GENERIC",
"NLayers":"1",
"History":f"The LETKF utility added new scaling factors on {str(date.today())}",
"Start_Date":f"{orig_timestamp}",
"Start_Time":"0",
"End_Date":f"{end_timestamp}",
"End_Time":"0"
}
)
self.emis_ds_list[species] = xr.concat([self.emis_ds_list[species],ds],dim = 'time') #Concatenate
def buildStateVector(self):
if self.testing:
print("*****************************************************************")
print(f"GC_Translator number {self.num} is starting build of statevector!")
species_config = tx.getSpeciesConfig(self.testing)
statevec_components = []
for spec_conc in species_config['STATE_VECTOR_CONC']:
statevec_components.append(self.getSpecies3Dconc(spec_conc).flatten())
#If no scaling factor files, append 1s because this is a nature directory
if len(self.emis_sf_filenames)==0:
lenones = len(self.getLat())*len(self.getLon())*len(species_config['CONTROL_VECTOR_EMIS'])
statevec_components.append(np.ones(lenones))
else:
for spec_emis in species_config['CONTROL_VECTOR_EMIS'].keys():
statevec_components.append(self.getEmisSF(spec_emis).flatten())
self.statevec_lengths = np.array([len(vec) for vec in statevec_components])
self.statevec = np.concatenate(statevec_components)
if self.testing:
print(f"GC_Translator number {self.num} has built statevector; it is of dimension {np.shape(self.statevec)}.")
print("*****************************************************************")
def getLocalizedStateVectorIndices(self,latind,lonind):
surr_latinds, surr_loninds = tx.getIndsOfInterest(latind,lonind,testing=self.testing)
if self.testing:
print(f"GC_Translator is getting localized statevec indices surrounding {(latind,lonind)} (lat/lon inds have shapes {np.shape(surr_latinds)}/{np.shape(surr_loninds)}); Lat inds are {surr_latinds} and lon inds are {surr_loninds}.")
levcount = len(self.getLev())
latcount = len(self.getLat())
loncount = len(self.getLon())
totalcount = levcount*latcount*loncount
dummy3d = np.arange(0, totalcount).reshape((levcount,latcount,loncount))
dummywhere_flat = dummy3d[:,surr_latinds,surr_loninds].flatten()
if self.testing:
print(f"Within a flattened 3D dummy cube, {len(dummywhere_flat)} entries are valid.")
dummy2d = np.arange(0, latcount*loncount).reshape((latcount,loncount))
dummy2dwhere_flat = dummy2d[surr_latinds,surr_loninds].flatten()
if self.testing:
print(f"Within a flattened 2D dummy square, {len(dummy2dwhere_flat)} entries are valid.")
species_config = tx.getSpeciesConfig(self.testing)
conccount = len(species_config['STATE_VECTOR_CONC'])
emcount = len(species_config['CONTROL_VECTOR_EMIS'])
ind_collector = []
cur_offset = 0
for i in range(conccount):
ind_collector.append((dummywhere_flat+cur_offset))
cur_offset+=totalcount
for i in range(emcount):
ind_collector.append((dummy2dwhere_flat+cur_offset))
cur_offset+=(latcount*loncount)
statevecinds = np.concatenate(ind_collector)
if self.testing:
print(f"There are a total of {len(statevecinds)}/{len(self.statevec)} selected from total statevec.")
return statevecinds
def getColumnIndicesFromFullStateVector(self,latind,lonind):
if self.testing:
print(f"GC_Translator is getting column statevec indices FOR FULL VECTOR at {(latind,lonind)}.")
levcount = len(self.getLev())
latcount = len(self.getLat())
loncount = len(self.getLon())
totalcount = levcount*latcount*loncount
dummy3d = np.arange(0, totalcount).reshape((levcount,latcount,loncount))
dummywhere_flat = dummy3d[:,latind,lonind].flatten()
if self.testing:
print(f"Within a flattened 3D dummy cube, {len(dummywhere_flat)} entries are valid.")
dummy2d = np.arange(0, latcount*loncount).reshape((latcount,loncount))
dummy2dwhere_flat = dummy2d[latind,lonind]
if self.testing:
print(f"Within a flattened 2D dummy square, {dummy2dwhere_flat} is sole valid entry.")
species_config = tx.getSpeciesConfig(self.testing)
conccount = len(species_config['STATE_VECTOR_CONC'])
emcount = len(species_config['CONTROL_VECTOR_EMIS'])
ind_collector = []
cur_offset = 0
for i in range(conccount):
ind_collector.append(dummywhere_flat+cur_offset)
cur_offset+=totalcount
for i in range(emcount):
ind_collector.append(np.array([dummy2dwhere_flat+cur_offset]))
cur_offset+=(latcount*loncount)
statevecinds = np.concatenate(ind_collector)
if self.testing:
print(f"There are a total of {len(statevecinds)}/{len(self.statevec)} selected from total statevec.")
return statevecinds
def getSpeciesConcIndicesInColumn(self,species):
levcount = len(self.getLev())
species_config = tx.getSpeciesConfig(self.testing)
cur_offset = 0
for ind,spec in enumerate(species_config['STATE_VECTOR_CONC']):
if species == spec:
return np.arange(cur_offset,cur_offset+levcount)
cur_offset+=levcount
return None #If loop doesn't terminate we did not find the species
def getSpeciesEmisIndicesInColumn(self,species):
levcount = len(self.getLev())
species_config = tx.getSpeciesConfig(self.testing)
cur_offset = len(species_config['STATE_VECTOR_CONC'])*levcount
for ind,spec in enumerate(species_config['CONTROL_VECTOR_EMIS']):
if species == spec:
return cur_offset
cur_offset+=1
return None #If loop doesn't terminate we did not find the species
def getColumnIndicesFromLocalizedStateVector(self,latind,lonind):
surr_latinds, surr_loninds = tx.getIndsOfInterest(latind,lonind,testing=self.testing)
if self.testing:
print(f"GC_Translator is getting column statevec indices surrounding {(latind,lonind)} (lat/lon inds have shapes {np.shape(surr_latinds)}/{np.shape(surr_loninds)}); Lat inds are {surr_latinds} and lon inds are {surr_loninds}.")
levcount = len(self.getLev())
latcount = len(self.getLat())
loncount = len(self.getLon())
totalcount = levcount*latcount*loncount
dummy3d = np.arange(0, totalcount).reshape((levcount,latcount,loncount))
dummywhere_flat = dummy3d[:,surr_latinds,surr_loninds].flatten()
dummywhere_flat_column = dummy3d[:,latind,lonind].flatten()
dummywhere_match = np.where(np.in1d(dummywhere_flat,dummywhere_flat_column))[0]
if self.testing:
print(f"Within a flattened 3D dummy cube, {len(dummywhere_flat_column)} entries are valid in the column.")
print(f"Matched {len(dummywhere_match)} entries in the overall flattened and subsetted column; values are {dummywhere_match}")
dummy2d = np.arange(0, latcount*loncount).reshape((latcount,loncount))
dummy2dwhere_flat = dummy2d[surr_latinds,surr_loninds].flatten()
dummy2dwhere_flat_column = dummy2d[latind,lonind]
dummy2dwhere_match = np.where(np.in1d(dummy2dwhere_flat,dummy2dwhere_flat_column))[0]
if self.testing:
print(f"Within a flattened 2D dummy square, {dummy2dwhere_flat_column} is the sole valid index in the column.")
print(f"Matched value in the overall flattened and subsetted square is {dummy2dwhere_match}")
species_config = tx.getSpeciesConfig(self.testing)
conccount = len(species_config['STATE_VECTOR_CONC'])
emcount = len(species_config['CONTROL_VECTOR_EMIS'])
ind_collector = []
cur_offset = 0
for i in range(conccount):
ind_collector.append((dummywhere_match+cur_offset))
cur_offset+=len(dummywhere_flat)
for i in range(emcount):
ind_collector.append((dummy2dwhere_match+cur_offset))
cur_offset+=len(dummy2dwhere_flat) #Only one value here.
localizedstatevecinds = np.concatenate(ind_collector)
if self.testing:
print(f"There are a total of {len(localizedstatevecinds)}/{len(self.statevec)} selected from total statevec.")
return localizedstatevecinds
def getStateVector(self,latind=None,lonind=None):
if self.statevec is None:
self.buildStateVector()
if not (latind is None): #User supplied ind
statevecinds = self.getLocalizedStateVectorIndices(latind,lonind)
statevec_toreturn = self.statevec[statevecinds]
else: #Return the whole vector
statevec_toreturn = self.statevec
if self.testing:
print(f"GC Translator number {self.num} got statevector for inds {(latind,lonind)}; this vec has length {len(statevec_toreturn)} of total statevec {len(self.statevec)}.")
return statevec_toreturn
#Randomize the restart for purposes of testing. Perturbation is 1/2 of range of percent change selected from a uniform distribution.
#E.g. 0.1 would range from 90% to 110% of initial values. Bias adds that percent on top of the perturbed fields (0.1 raises everything 10%).
#Repeats this procedure for every species in the state vector (excluding emissions).
def randomizeRestart(self,perturbation=0.1,bias=0):
statevec_species = tx.getSpeciesConfig(self.testing)['STATE_VECTOR_CONC']
offset = 1-perturbation
scale = perturbation*2
for spec in statevec_species:
conc3d = self.getSpecies3Dconc(spec)
conc3d *= (scale*np.random.rand(*np.shape(conc3d)))+offset
conc3d *= 1+bias
self.setSpecies3Dconc(spec,conc3d)
#Reconstruct all the 3D concentrations from the analysis vector and overwrite relevant terms in the xr restart dataset.
#Also construct new scaling factors and add them as a separate array at the new timestep in each of the scaling factor netCDFs.
#However, only do so for species in the control vectors of emissions and concentrations.
def reconstructArrays(self,analysis_vector):
species_config = tx.getSpeciesConfig(self.testing)
restart_shape = np.shape(self.getSpecies3Dconc(species_config['STATE_VECTOR_CONC'][0]))
emislist=list(species_config['CONTROL_VECTOR_EMIS'].keys())
emis_shape = np.shape(self.getEmisSF(emislist[0]))
counter = 0
for spec_conc in species_config['STATE_VECTOR_CONC']:
if spec_conc in species_config['CONTROL_VECTOR_CONC']: #Only overwrite if in the control vector; otherwise just increment.
index_start = np.sum(self.statevec_lengths[0:counter])
index_end = np.sum(self.statevec_lengths[0:(counter+1)])
analysis_subset = analysis_vector[index_start:index_end]
analysis_3d = np.reshape(analysis_subset,restart_shape) #Unflattens with 'C' order in python
self.setSpecies3Dconc(spec_conc,analysis_3d) #Overwrite.
counter+=1
for spec_emis in species_config['CONTROL_VECTOR_EMIS'].keys(): #Emissions scaling factors are all in the control vector
index_start = np.sum(self.statevec_lengths[0:counter])
index_end = np.sum(self.statevec_lengths[0:(counter+1)])
analysis_subset = analysis_vector[index_start:index_end]
analysis_emis_2d = np.reshape(analysis_subset,emis_shape) #Unflattens with 'C' order in python
self.addEmisSF(spec_emis,analysis_emis_2d,species_config['ASSIM_TIME'])
counter+=1
def saveRestart(self):
self.restart_ds["time"] = (["time"], np.array([0]), {"long_name": "Time", "calendar": "gregorian", "axis":"T", "units":self.timestring})
self.restart_ds.to_netcdf(self.filename)
def saveEmissions(self):
for file in self.emis_sf_filenames:
name = '_'.join(file.split('/')[-1].split('_')[0:-1])
self.emis_ds_list[name].to_netcdf(file)
#A class that takes history files and connects them with the main state vector and observation matrices
class HIST_Translator(object):
def __init__(self, path_to_rundir,timeperiod,interval=None,testing=False):
self.testing = testing
self.spc_config = tx.getSpeciesConfig(self.testing)
self.hist_dir = f'{path_to_rundir}OutputDir'
self.timeperiod = timeperiod
self.interval = interval
def globSubDir(self,timeperiod,useLevelEdge = False):
specconc_list = glob(f'{self.hist_dir}/GEOSChem.SpeciesConc*.nc4')
specconc_list.sort()
ts = [datetime.strptime(spc.split('.')[-2][0:13], "%Y%m%d_%H%M") for spc in specconc_list]
if self.interval:
specconc_list = [spc for spc,t in zip(specconc_list,ts) if (t>=timeperiod[0]) and (t<timeperiod[1]) and (t.hour % self.interval == 0)]
else:
specconc_list = [spc for spc,t in zip(specconc_list,ts) if (t>=timeperiod[0]) and (t<timeperiod[1])]
if useLevelEdge:
le_list = glob(f'{self.hist_dir}/GEOSChem.LevelEdgeDiags*.nc4')
le_list.sort()
le_ts = [datetime.strptime(le.split('.')[-2][0:13], "%Y%m%d_%H%M") for le in le_list]
le_list = [le for le,t in zip(le_list,le_ts) if (t>=timeperiod[0]) and (t<timeperiod[1])]
return [specconc_list,le_list]
else:
return specconc_list
def combineHist(self,species,useLevelEdge=False):
dataset=[]
if useLevelEdge:
specconc_list,le_list=self.globSubDir(self.timeperiod,useLevelEdge)
for specfile,lefile in zip(specconc_list,le_list):
hist_val = xr.load_dataset(specfile)[f'SpeciesConc_{species}']
lev_val = xr.load_dataset(lefile)[f'Met_PEDGE']
data_val = xr.merge([hist_val, lev_val])
dataset.append(data_val)
else:
specconc_list=self.globSubDir(self.timeperiod,useLevelEdge)
for specfile in specconc_list:
hist_val = xr.load_dataset(specfile)[f'SpeciesConc_{species}']
dataset.append(hist_val)
dataset = xr.merge(dataset)
return dataset
#4D ensemble interface with satellite operators.
class HIST_Ens(object):
def __init__(self,timestamp,useLevelEdge=False,fullperiod=False,interval=None,testing=False):
self.testing = testing
self.useLevelEdge = useLevelEdge
self.spc_config = tx.getSpeciesConfig(self.testing)
path_to_ensemble = f"{self.spc_config['MY_PATH']}/{self.spc_config['RUN_NAME']}/ensemble_runs"
subdirs = glob(f"{path_to_ensemble}/*/")
subdirs.remove(f"{path_to_ensemble}/logs/")
dirnames = [d.split('/')[-2] for d in subdirs]
subdir_numbers = [int(n.split('_')[-1]) for n in dirnames]
ensemble_numbers = []
endtime = datetime.strptime(timestamp, "%Y%m%d_%H%M")
if fullperiod:
START_DATE = self.spc_config['START_DATE']
starttime = datetime.strptime(f'{START_DATE}_0000', "%Y%m%d_%H%M")
else:
ASSIM_TIME = self.spc_config['ASSIM_TIME']
delta = timedelta(hours=int(ASSIM_TIME))
starttime = endtime-delta
self.timeperiod = (starttime,endtime)
self.ht = {}
self.observed_species = self.spc_config['OBSERVED_SPECIES']
for ens, directory in zip(subdir_numbers,subdirs):
if ens!=0:
if fullperiod:
self.ht[ens] = HIST_Translator(directory, self.timeperiod,interval,testing=self.testing)
else:
self.ht[ens] = HIST_Translator(directory, self.timeperiod,testing=self.testing)
ensemble_numbers.append(ens)
self.ensemble_numbers=np.array(ensemble_numbers)
self.maxobs=int(self.spc_config['MAXNUMOBS'])
self.interval=interval
self.makeBigY()
def makeSatTrans(self):
self.SAT_TRANSLATOR = {}
self.satSpecies = []
for spec,bool4D,boolTROPOMI in zip(list(self.observed_species.values()),self.spc_config['OBS_4D'],self.spc_config['OBS_TYPE_TROPOMI']):
if (bool4D and boolTROPOMI):
self.SAT_TRANSLATOR[spec] = tt.TROPOMI_Translator(self.testing)
self.satSpecies.append(spec)
def getSatData(self):
self.SAT_DATA = {}
for spec in self.satSpecies:
self.SAT_DATA[spec] = self.SAT_TRANSLATOR[spec].getSatellite(spec,self.timeperiod,self.interval)
def makeBigY(self):
self.makeSatTrans()
self.getSatData()
self.bigYDict = {}
for spec in self.satSpecies:
self.bigYDict[spec] = self.getColsforSpecies(spec)
#This is just a filler.
def makeRforSpecies(self,species,latind,lonind):
inds = self.getIndsOfInterest(species,latind,lonind)
return np.diag(np.repeat(15,len(inds)))
def makeR(self,latind,lonind):
errmats = []
for spec in self.satSpecies:
errmats.append(self.makeRforSpecies(spec,latind,lonind))
return la.block_diag(*errmats)
def getColsforSpecies(self,species):
col3D = []
firstens = self.ensemble_numbers[0]
hist4D = self.ht[firstens].combineHist(species,self.useLevelEdge)
if self.spc_config['AV_TO_GC_GRID']=="True":
firstcol,satcol,satlat,satlon,sattime,numav = self.SAT_TRANSLATOR[species].gcCompare(species,self.timeperiod,self.SAT_DATA[species],hist4D)
else:
firstcol,satcol,satlat,satlon,sattime = self.SAT_TRANSLATOR[species].gcCompare(species,self.timeperiod,self.SAT_DATA[species],hist4D)
shape2D = np.zeros(2)
shape2D[0] = len(firstcol)
shape2D[1]=len(self.ensemble_numbers)
shape2D = shape2D.astype(int)
conc2D = np.zeros(shape2D)
conc2D[:,firstens-1] = firstcol
for i in self.ensemble_numbers:
if i!=firstens:
hist4D = self.ht[i].combineHist(species,self.useLevelEdge)
if self.spc_config['AV_TO_GC_GRID']=="True":
col,_,_,_,_,_ = self.SAT_TRANSLATOR[species].gcCompare(species,self.timeperiod,self.SAT_DATA[species],hist4D)
else:
col,_,_,_,_ = self.SAT_TRANSLATOR[species].gcCompare(species,self.timeperiod,self.SAT_DATA[species],hist4D)
conc2D[:,i-1] = col
if self.spc_config['AV_TO_GC_GRID']=="True":
return [conc2D,satcol,satlat,satlon,sattime,numav]
else:
return [conc2D,satcol,satlat,satlon,sattime]
def getIndsOfInterest(self,species,latind,lonind):
loc_rad = float(self.spc_config['LOCALIZATION_RADIUS_km'])
origlat,origlon = tx.getLatLonVals(self.spc_config,self.testing)
latval = origlat[latind]
lonval = origlon[lonind]
distvec = np.array([tx.calcDist_km(latval,lonval,a,b) for a,b in zip(self.bigYDict[species][2],self.bigYDict[species][3])])
inds = np.where(distvec<=loc_rad)[0]
if len(inds) > self.maxobs:
inds = np.random.choice(inds, self.maxobs,replace=False) #Randomly subset down to appropriate number of observations
return inds
def getLocObsMeanPertDiff(self,latind,lonind):
obsmeans = []
obsperts = []
obsdiffs = []
for spec in self.satSpecies:
ind = self.getIndsOfInterest(spec,latind,lonind)
if self.spc_config['AV_TO_GC_GRID']=="True":
gccol,satcol,_,_,_,_ = self.bigYDict[spec]
else:
gccol,satcol,_,_,_ = self.bigYDict[spec]
gccol = gccol[ind,:]
satcol = satcol[ind]
obsmean = np.mean(gccol,axis=1)
obspert = np.zeros(np.shape(gccol))
for i in range(np.shape(gccol)[1]):
obspert[:,i]=gccol[:,i]-obsmean
obsdiff = satcol-obsmean
obsmeans.append(obsmean)
obsperts.append(obspert)
obsdiffs.append(obsdiff)
full_obsmeans = np.concatenate(obsmeans)
full_obsperts = np.concatenate(obsperts,axis = 0)
full_obsdiffs = np.concatenate(obsdiffs)
return [full_obsmeans,full_obsperts,full_obsdiffs]
#Lightweight container for GC_Translators; used to combine columns, update restarts, and diff columns.
class GT_Container(object):
def __init__(self,timestamp,testing=False,constructStateVecs=True):
self.testing = testing
spc_config = tx.getSpeciesConfig(self.testing)
path_to_ensemble = f"{spc_config['MY_PATH']}/{spc_config['RUN_NAME']}/ensemble_runs"
self.path_to_scratch = f"{spc_config['MY_PATH']}/{spc_config['RUN_NAME']}/scratch"
npy_column_files = glob(f'{self.path_to_scratch}/**/*.npy',recursive=True)
npy_col_names = [file.split('/')[-1] for file in npy_column_files]
npy_columns = [np.load(file) for file in npy_column_files]
self.columns = dict(zip(npy_col_names,npy_columns))
subdirs = glob(f"{path_to_ensemble}/*/")
subdirs.remove(f"{path_to_ensemble}/logs/")
dirnames = [d.split('/')[-2] for d in subdirs]
subdir_numbers = [int(n.split('_')[-1]) for n in dirnames]
ensemble_numbers = []
self.gt = {}
self.nature = None
self.observed_species = spc_config['OBSERVED_SPECIES']
for ens, directory in zip(subdir_numbers,subdirs):
if ens==0:
self.nature = GC_Translator(directory, timestamp, constructStateVecs,self.testing)
else:
self.gt[ens] = GC_Translator(directory, timestamp, constructStateVecs,self.testing)
ensemble_numbers.append(ens)
self.ensemble_numbers=np.array(ensemble_numbers)
#Gets saved column and compares to the original files
def constructColStatevec(self,latind,lonind):
firstens = self.ensemble_numbers[0]
col1indvec = self.gt[firstens].getColumnIndicesFromFullStateVector(latind,lonind)
backgroundEnsemble = np.zeros((len(col1indvec),len(self.ensemble_numbers)))
backgroundEnsemble[:,firstens-1] = self.gt[firstens].statevec[col1indvec]
for i in self.ensemble_numbers:
if i!=firstens:
colinds = self.gt[i].getColumnIndicesFromFullStateVector(latind,lonind)
backgroundEnsemble[:,i-1] = self.gt[i].statevec[colinds]
return backgroundEnsemble
def diffColumns(self,latind,lonind):
filenames = list(self.columns.keys())
substr = f'lat_{latind}_lon_{lonind}.npy'
search = [i for i in filenames if substr in i]
saved_col = self.columns[search[0]]
backgroundEnsemble = self.constructColStatevec(latind,lonind)
diff = saved_col-backgroundEnsemble
return [saved_col,backgroundEnsemble,diff]
def compareSpeciesConc(self,species,latind,lonind):
firstens = self.ensemble_numbers[0]
colind = self.gt[firstens].getSpeciesConcIndicesInColumn(species)
saved_col,backgroundEnsemble,diff = self.diffColumns(latind,lonind)
saved_col = saved_col[colind,:]
backgroundEnsemble = backgroundEnsemble[colind,:]
diff = diff[colind,:]
col1indvec = self.nature.getColumnIndicesFromFullStateVector(latind,lonind)
naturecol = self.nature.statevec[col1indvec][colind]
print(f'*********************************** {species} CONCENTRATION COLUMN AT INDEX {(latind,lonind)} ************************************')
for i in range(np.shape(saved_col)[1]):
print(f' ')
print(f'{species} in ensemble member {i+1} had background concentration of {100*(backgroundEnsemble[:,i]/naturecol)}% nature')
print(f'{species} in ensemble member {i+1} had analysis concentration of {100*(saved_col[:,i]/naturecol)}% nature')
print(f'This represents a percent difference of {100*(diff[:,i]/backgroundEnsemble[:,i])}%')
print(f' ')
def compareSpeciesEmis(self,species,latind,lonind):
firstens = self.ensemble_numbers[0]
colind = self.gt[firstens].getSpeciesEmisIndicesInColumn(species)
saved_col,backgroundEnsemble,diff = self.diffColumns(latind,lonind)
saved_col = saved_col[colind,:] #Now will just be a vector of length NumEnsemble
backgroundEnsemble = backgroundEnsemble[colind,:]
diff = diff[colind,:]
col1indvec = self.nature.getColumnIndicesFromFullStateVector(latind,lonind)
naturecol = self.nature.statevec[col1indvec][colind]
print(f'*********************************** {species} EMISSIONS SCALING AT INDEX {(latind,lonind)} ************************************')
for i in range(len(saved_col)):
print(f' ')
print(f'{species} in ensemble member {i+1} had background emissions scaling of {100*(backgroundEnsemble[i]/naturecol)}% nature')
print(f'{species} in ensemble member {i+1} had analysis emissions scaling of {100*(saved_col[i]/naturecol)}% nature')
print(f'This represents a percent difference of {100*(diff[i]/backgroundEnsemble[i])}%')
print(f' ')
def reconstructAnalysisEnsemble(self):
self.analysisEnsemble = np.zeros((len(self.gt[1].getStateVector()),len(self.ensemble_numbers)))
for name, cols in zip(self.columns.keys(),self.columns.values()):
split_name = name.split('_')
latind = int(split_name[-3])
lonind = int(split_name[-1].split('.')[0])
colinds = self.gt[1].getColumnIndicesFromFullStateVector(latind,lonind)
self.analysisEnsemble[colinds,:] = cols
def updateRestartsAndScalingFactors(self):
for i in self.ensemble_numbers:
self.gt[i].reconstructArrays(self.analysisEnsemble[:,i-1])
def saveRestartsAndScalingFactors(self):
for i in self.ensemble_numbers:
self.gt[i].saveRestart()
self.gt[i].saveEmissions()
#Contains a dictionary referencing GC_Translators for every run directory.
#In the special case where there is a nature run present (with number 0)
#store the nature run in GC_Translator object nature.
#Also contains an observation operator (pass in the class you would like to use) for each species to assimilate.
#Class contains function to calculate relvant assimilation variables.
#SPECIAL NOTE ON FILES: we will be assuming that geos-chem stopped and left a restart at assimilation time in each run directory.
#That restart will be overwritten in place (name not changed) so next run starts from the assimilation state vector.
#Emissions scaling factors are most recent available (one assimilation timestep ago). New values will be appended to netCDF.
class Assimilator(object):
def __init__(self,timestamp,ensnum,corenum,testing=False):
self.testing = testing
self.ensnum = ensnum
self.corenum = corenum
self.latinds,self.loninds = tx.getLatLonList(ensnum,corenum,self.testing)
if self.testing:
print(f"Assimilator has been called for ens {self.ensnum} core {self.corenum}; construction beginning")
print(f"This core will be handling lat and lon values {[(latval,lonval) for latval,lonval in zip(self.latinds,self.loninds)]}")
spc_config = tx.getSpeciesConfig(self.testing)
path_to_ensemble = f"{spc_config['MY_PATH']}/{spc_config['RUN_NAME']}/ensemble_runs"
self.path_to_scratch = f"{spc_config['MY_PATH']}/{spc_config['RUN_NAME']}/scratch"
self.parfilename = f'ens_{ensnum}_core_{corenum}_time_{timestamp}'
subdirs = glob(f"{path_to_ensemble}/*/")
subdirs.remove(f"{path_to_ensemble}/logs/")
dirnames = [d.split('/')[-2] for d in subdirs]
if self.testing:
print(f"The following ensemble directories were detected: {dirnames}")
subdir_numbers = [int(n.split('_')[-1]) for n in dirnames]
ensemble_numbers = []
self.nature = None
self.emcount = len(spc_config['CONTROL_VECTOR_EMIS'])
self.MINNUMOBS = int(spc_config['MINNUMOBS'])
self.MinimumScalingFactorAllowed = [float(s) for s in spc_config["MinimumScalingFactorAllowed"]]
self.MaximumScalingFactorAllowed = [float(s) for s in spc_config["MaximumScalingFactorAllowed"]]
self.InflateScalingsToXOfPreviousStandardDeviation = [float(s) for s in spc_config["InflateScalingsToXOfPreviousStandardDeviation"]]
self.MaximumScaleFactorRelativeChangePerAssimilationPeriod=[float(s) for s in spc_config["MaximumScaleFactorRelativeChangePerAssimilationPeriod"]]
self.AveragePriorAndPosterior = spc_config["AveragePriorAndPosterior"] == "True"
self.PriorWeightinPriorPosteriorAverage = float(spc_config["PriorWeightinPriorPosteriorAverage"])
self.forceOverrideNature=True #Set to true to ignore existing nature directory. Only for testing
self.gt = {}
self.observed_species = spc_config['OBSERVED_SPECIES']
if self.testing:
print(f"Begin creating GC Translators with state vectors.")
for ens, directory in zip(subdir_numbers,subdirs):
if (ens==0) and (not self.forceOverrideNature):
self.nature = GC_Translator(directory, timestamp, False,self.testing)
else:
self.gt[ens] = GC_Translator(directory, timestamp, True,self.testing)
ensemble_numbers.append(ens)
self.ensemble_numbers=np.array(ensemble_numbers)
if self.testing:
print(f"GC Translators created. Ensemble number list: {self.ensemble_numbers}")
if self.nature is None:
self.full4D = True #Implement me
self.inflation = float(spc_config['INFLATION_FACTOR'])
self.histens = HIST_Ens(timestamp,True,testing=self.testing)
else:
self.full4D = False
error_multipliers_or_matrices, self.ObsOperatorClass_list,nature_h_functions,self.inflation = getLETKFConfig(self.testing)
self.NatureHelperInstance = obs.NatureHelper(self.nature,self.observed_species,nature_h_functions,error_multipliers_or_matrices,self.testing)
self.makeObsOps()
if self.testing:
print(f"Assimilator construction complete")
def getLat(self):
return self.gt[1].getLat() #Latitude of first ensemble member, who should always exist
def getLon(self):
return self.gt[1].getLon()
def getLev(self):
return self.gt[1].getLev()
def makeObsOps(self):
if self.testing:
print(f'makeObsOps called in Assimilator')
self.ObsOp = {}
for i,obs_spec_key in enumerate(self.observed_species.keys()):
ObsOp_instance = self.NatureHelperInstance.makeObsOp(obs_spec_key,self.ObsOperatorClass_list[i])
self.ObsOp[obs_spec_key] = ObsOp_instance
def combineEnsemble(self,latind=None,lonind=None):
if self.testing:
print(f'combineEnsemble called in Assimilator for lat/lon inds {(latind,lonind)}')
firstens = self.ensemble_numbers[0]
firstvec = self.gt[firstens].getStateVector(latind,lonind)
statevecs = np.zeros((len(firstvec),len(self.ensemble_numbers)))
statevecs[:,firstens-1] = firstvec
for i in self.ensemble_numbers:
if i!=firstens:
statevecs[:,i-1] = self.gt[i].getStateVector(latind,lonind)
if self.testing:
print(f'Ensemble combined in Assimilator for lat/lon inds {(latind,lonind)} and has dimensions {np.shape(statevecs)}.')
return statevecs
def ensMeanAndPert(self,latval,lonval):
if self.testing:
print(f'ensMeanAndPert called in Assimilator for lat/lon inds {(latval,lonval)}')
statevecs = self.combineEnsemble(latval,lonval)
state_mean = np.mean(statevecs,axis = 1) #calculate ensemble mean
bigX = np.zeros(np.shape(statevecs))
for i in range(np.shape(bigX)[1]):
bigX[:,i] = statevecs[:,i]-state_mean
if self.testing:
print(f'Ensemble mean at {(latval,lonval)} has dimensions {np.shape(state_mean)} and bigX at at {(latval,lonval)} has dimensions {np.shape(bigX)}.')
return [state_mean,bigX]
def ensObsMeanPertDiff(self,latval,lonval):
if self.testing:
print(f'ensObsMeanPertDiff called in Assimilator for lat/lon inds {(latval,lonval)}')
obsmeans = []
obsperts = []
obsdiffs = []
for obskey,species in zip(list(self.observed_species.keys()),list(self.observed_species.values())):
obsmean,obspert = self.ensObsMeanAndPertForSpecies(obskey,species,latval,lonval)
obsmeans.append(obsmean)
obsperts.append(obspert)
obsdiffs.append(self.obsDiffForSpecies(obskey,obsmean,latval,lonval))
full_obsmeans = np.concatenate(obsmeans)
full_obsperts = np.concatenate(obsperts,axis = 0)
full_obsdiffs = np.concatenate(obsdiffs)
if self.testing:
print(f'Full ObsMeans at {(latval,lonval)} has dimensions {np.shape(full_obsmeans)}; Full ObsPerts at {(latval,lonval)} has dimensions {np.shape(full_obsperts)}; and Full ObsDiffs at {(latval,lonval)} has dimensions {np.shape(full_obsdiffs)}.')
return [full_obsmeans,full_obsperts,full_obsdiffs]
def combineEnsembleForSpecies(self,species):
if self.testing:
print(f'combineEnsembleForSpecies called in Assimilator for species {species}')
conc3D = []
firstens = self.ensemble_numbers[0]
first3D = self.gt[firstens].getSpecies3Dconc(species)
shape4D = np.zeros(4)
shape4D[0:3] = np.shape(first3D)
shape4D[3]=len(self.ensemble_numbers)
shape4D = shape4D.astype(int)
conc4D = np.zeros(shape4D)
conc4D[:,:,:,firstens-1] = first3D
for i in self.ensemble_numbers:
if i!=firstens:
conc4D[:,:,:,i-1] = self.gt[i].getSpecies3Dconc(species)
return conc4D
def ensObsMeanAndPertForSpecies(self, observation_key,species,latval,lonval):
if self.testing:
print(f'ensObsMeanAndPertForSpecies called for keys {observation_key} -> {species} in Assimilator for lat/lon inds {(latval,lonval)}')
spec_4D = self.combineEnsembleForSpecies(species)
return self.ObsOp[observation_key].obsMeanAndPert(spec_4D,latval,lonval)
def obsDiffForSpecies(self,observation_key,ensvec,latval,lonval):
if self.testing:
print(f'prepareMeansAndPerts called for {observation_key} in Assimilator for lat/lon inds {(latval,lonval)}')
return self.ObsOp[observation_key].obsDiff(ensvec,latval,lonval)
def prepareMeansAndPerts(self,latval,lonval):
if self.testing:
print(f'prepareMeansAndPerts called in Assimilator for lat/lon inds {(latval,lonval)}')
if self.full4D:
self.ybar_background, self.Ypert_background, self.ydiff = self.histens.getLocObsMeanPertDiff(latval,lonval)
else:
self.ybar_background, self.Ypert_background, self.ydiff = self.ensObsMeanPertDiff(latval,lonval)
self.xbar_background, self.Xpert_background = self.ensMeanAndPert(latval,lonval)
if self.testing:
print(f'ybar_background for lat/lon inds {(latval,lonval)} has shape {np.shape(self.ybar_background)}.')
print(f'Ypert_background for lat/lon inds {(latval,lonval)} has shape {np.shape(self.Ypert_background)}.')
print(f'ydiff for lat/lon inds {(latval,lonval)} has shape {np.shape(self.ydiff)}.')
print(f'xbar_background for lat/lon inds {(latval,lonval)} has shape {np.shape(self.xbar_background)}.')
print(f'Xpert_background for lat/lon inds {(latval,lonval)} has shape {np.shape(self.Xpert_background)}.')
def makeR(self,latind=None,lonind=None):
if self.testing:
print(f"Making R for lat/lon inds {(latind,lonind)}.")
if self.full4D:
self.R = self.histens.makeR(latind,lonind)
else:
errmats = []
for species in self.observed_species:
errmats.append(self.ObsOp[species].obsinfo.getObsErr(latind,lonind))
self.R = la.block_diag(*errmats)
if self.testing:
print(f'R for {(latind,lonind)} has dimension {np.shape(self.R)} and value {self.R}')
def makeC(self):
self.C = np.transpose(self.Ypert_background) @ la.inv(self.R)
if self.testing:
print(f'C made in Assimilator. It has dimension {np.shape(self.C)} and value {self.C}')
def makePtildeAnalysis(self):
cyb = self.C @ self.Ypert_background
k = len(self.ensemble_numbers)
iden = (k-1)*np.identity(k)/(1+self.inflation)
self.PtildeAnalysis = la.inv(iden+cyb)
if self.testing:
print(f'PtildeAnalysis made in Assimilator. It has dimension {np.shape(self.PtildeAnalysis)} and value {self.PtildeAnalysis}')
def makeWAnalysis(self):
k = len(self.ensemble_numbers)
self.WAnalysis = la.sqrtm((k-1)*self.PtildeAnalysis)
if self.testing:
print(f'WAnalysis initialized in Assimilator. It has dimension {np.shape(self.WAnalysis)} and value {self.WAnalysis}')
def makeWbarAnalysis(self):
self.WbarAnalysis = [email protected]@self.ydiff
if self.testing:
print(f'WbarAnalysis made in Assimilator. It has dimension {np.shape(self.WbarAnalysis)} and value {self.WbarAnalysis}')
def adjWAnalysis(self):
k = len(self.ensemble_numbers)
for i in range(k):
self.WAnalysis[:,i]+=self.WbarAnalysis
if self.testing:
print(f'WAnalysis adjusted in Assimilator. It has dimension {np.shape(self.WAnalysis)} and value {self.WAnalysis}')
def makeAnalysisCombinedEnsemble(self):
self.analysisEnsemble = np.zeros(np.shape(self.Xpert_background))
k = len(self.ensemble_numbers)
for i in range(k):
self.analysisEnsemble[:,i] = self.Xpert_background.dot(self.WAnalysis[:,i])+self.xbar_background
if self.testing:
print(f'analysisEnsemble made in Assimilator. It has dimension {np.shape(self.analysisEnsemble)} and value {self.analysisEnsemble}')
def getAnalysisAndBackgroundColumn(self,latval,lonval,doBackground=True):
colinds = self.gt[1].getColumnIndicesFromLocalizedStateVector(latval,lonval)
analysisSubset = self.analysisEnsemble[colinds,:]
if doBackground:
backgroundSubset = np.zeros(np.shape(self.Xpert_background[colinds,:]))
k = len(self.ensemble_numbers)
for i in range(k):
backgroundSubset[:,i] = self.Xpert_background[colinds,i]+self.xbar_background[colinds]
return [analysisSubset,backgroundSubset]
else:
return analysisSubset
def applyAnalysisCorrections(self,analysisSubset,backgroundSubset):
#Get scalefactors off the end of statevector
analysisScalefactor = analysisSubset[(-1*self.emcount)::,:]
backgroundScalefactor = backgroundSubset[(-1*self.emcount)::,:]
#Inflate scalings to the X percent of the background standard deviation, per Miyazaki et al 2015
for i in range(len(self.InflateScalingsToXOfPreviousStandardDeviation)):
inflator = self.InflateScalingsToXOfPreviousStandardDeviation[i]
if ~np.isnan(inflator):
analysis_std = np.std(analysisScalefactor[i,:])
background_std = np.std(backgroundScalefactor[i,:])
ratio=analysis_std/background_std
if ~np.isnan(ratio): #Sometimes background standard deviation is approximately 0.
if ratio < inflator:
new_std = inflator*background_std
analysisScalefactor[i,:] = analysisScalefactor[i,:]*(new_std/analysis_std)
#Apply maximum relative change per assimilation period:
for i in range(len(self.MaximumScaleFactorRelativeChangePerAssimilationPeriod)):
maxchange=self.MaximumScaleFactorRelativeChangePerAssimilationPeriod[i]
if ~np.isnan(maxchange):
relativechanges=(analysisScalefactor[i,:]-backgroundScalefactor[i,:])/backgroundScalefactor[i,:]
relOverwrite = np.where(np.abs(relativechanges)>maxchange)[0]
analysisScalefactor[i,relOverwrite] = (1+(np.sign(relativechanges[relOverwrite])*maxchange))*backgroundScalefactor[i,relOverwrite]
#Set min/max scale factor:
for i in range(len(self.MinimumScalingFactorAllowed)):
if ~np.isnan(self.MinimumScalingFactorAllowed[i]):
minOverwrite = np.where(analysisScalefactor[i,:]<self.MinimumScalingFactorAllowed[i])[0]
analysisScalefactor[i,minOverwrite] = self.MinimumScalingFactorAllowed[i]
if ~np.isnan(self.MaximumScalingFactorAllowed[i]):
maxOverwrite = np.where(analysisScalefactor[i,:]>self.MaximumScalingFactorAllowed[i])[0]
analysisScalefactor[i,maxOverwrite] = self.MaximumScalingFactorAllowed[i]
#Done with the scalings
analysisSubset[(-1*self.emcount)::,:] = analysisScalefactor
#Now average with prior
if self.AveragePriorAndPosterior:
priorweight = self.PriorWeightinPriorPosteriorAverage
if (priorweight<0) or (priorweight>1):
raise ValueError('Invalid prior weight; must be between 0 and 1.')
posteriorweight = 1-priorweight
analysisSubset = (backgroundSubset*priorweight)+(analysisSubset*posteriorweight)
return analysisSubset
def saveColumn(self,latval,lonval,analysisSubset):
np.save(f'{self.path_to_scratch}/{str(self.ensnum).zfill(3)}/{str(self.corenum).zfill(3)}/{self.parfilename}_lat_{latval}_lon_{lonval}.npy',analysisSubset)
def LETKF(self):
if self.testing:
print(f"LETKF called! Beginning loop.")
for latval,lonval in zip(self.latinds,self.loninds):
if self.testing:
print(f"Beginning LETKF loop for lat/lon inds {(latval,lonval)}.")
self.prepareMeansAndPerts(latval,lonval)
if len(self.ybar_background)<self.MINNUMOBS:
self.analysisEnsemble = np.zeros(np.shape(self.Xpert_background))
k = len(self.ensemble_numbers)
for i in range(k):
self.analysisEnsemble[:,i] = self.Xpert_background[:,i]+self.xbar_background
analysisSubset = self.getAnalysisAndBackgroundColumn(latval,lonval,doBackground=False)
else:
self.makeR(latval,lonval)
self.makeC()
self.makePtildeAnalysis()
self.makeWAnalysis()
self.makeWbarAnalysis()
self.adjWAnalysis()
self.makeAnalysisCombinedEnsemble()
analysisSubset,backgroundSubset = self.getAnalysisAndBackgroundColumn(latval,lonval,doBackground=True)
analysisSubset = self.applyAnalysisCorrections(analysisSubset,backgroundSubset)
self.saveColumn(latval,lonval,analysisSubset) | 2.171875 | 2 |
tests/brainview/test_util.py | dfsp-spirit/brainview | 3 | 12792307 | <reponame>dfsp-spirit/brainview<filename>tests/brainview/test_util.py
import os
import pytest
import numpy as np
import mayavi.mlab as mlab
import brainload as bl
import brainview as bv
import brainview.util as ut
import mayavi
try:
import configparser # Python 3
except:
import ConfigParser as configparser # Python 2
mlab.options.offscreen = True
THIS_DIR = os.path.dirname(os.path.abspath(__file__))
TEST_DATA_DIR = os.path.join(THIS_DIR, os.pardir, 'test_data')
# Respect the environment variable BRAINVIEW_TEST_DATA_DIR if it is set. If not, fall back to default.
TEST_DATA_DIR = os.getenv('BRAINVIEW_TEST_DATA_DIR', TEST_DATA_DIR)
def test_get_default_config_filename():
cfg_file = bv.get_default_config_filename()
assert '.brainviewrc' in cfg_file
def test_get_config():
cfg, cfg_file = bv.get_config()
if cfg_file is None: # Depending on the machine where this runs, a default config or one from an existing config file may be returned.
assert cfg.has_section('figure') == True
def test_get_default_config():
cfg = ut.get_default_config()
assert cfg.has_section('figure') == True
assert cfg.has_section('mesh') == True
def test_get_config_from_file():
cfg_file = os.path.join(TEST_DATA_DIR, 'brainviewrc')
cfg = ut.get_config_from_file(cfg_file)
assert cfg.has_section('figure') == True
assert cfg.getint('figure', 'width') == 900
assert cfg.getint('figure', 'height') == 400
def test_get_config_from_file_raises_on_missing_file():
missing_cfg_file = os.path.join(TEST_DATA_DIR, 'brainviewrc_not_there')
with pytest.raises(ValueError) as exc_info:
cfg = ut.get_config_from_file(missing_cfg_file)
assert 'not_there' in str(exc_info.value)
def test_cfg_get_default_value_works_for_all_types():
cfg_file = os.path.join(TEST_DATA_DIR, 'brainviewrc')
cfg = ut.get_config_from_file(cfg_file)
# retreive some non-existant values are check that the supplied default values are returned
assert ut.cfg_get('no_such_section', 'option_a', 'some_default', config=cfg) == 'some_default'
assert ut.cfg_getint('no_such_section', 'option_b', 5, config=cfg) == 5
assert ut.cfg_getfloat('no_such_section', 'option_c', 0.53, config=cfg) == pytest.approx(0.53, 0.0001)
assert ut.cfg_getboolean('no_such_section', 'option_d', False, config=cfg) == False
# also test without a config, this will load the default config
assert ut.cfg_get('no_such_section', 'option_a', 'some_default') == 'some_default'
assert ut.cfg_getint('no_such_section', 'option_b', 5) == 5
assert ut.cfg_getfloat('no_such_section', 'option_c', 0.53) == pytest.approx(0.53, 0.0001)
assert ut.cfg_getboolean('no_such_section', 'option_d', False) == False
def test_cfg_get_cfg_value_works_for_all_types():
cfg_file = os.path.join(TEST_DATA_DIR, 'brainviewrc')
cfg = ut.get_config_from_file(cfg_file)
# retrieve some values which exist in the file and check that the values from the config are returned (and the supplied default values ignored)
assert ut.cfg_get('test', 'option_string', 'bye', config=cfg) == 'hello'
assert ut.cfg_getint('test', 'option_int', 3, config=cfg) == 5
assert ut.cfg_getfloat('test', 'option_float', 0.22, config=cfg) == pytest.approx(0.53, 0.0001)
assert ut.cfg_getboolean('test', 'option_boolean', True, config=cfg) == False
def test_cfg_get_any_raises_on_invalid_return_type():
with pytest.raises(ValueError) as exc_info:
whatever = ut._cfg_get_any('section_a', 'option_b', 5, 'invalid_return_type')
assert 'ERROR: return_type must be one of' in str(exc_info.value)
assert 'invalid_return_type' in str(exc_info.value)
def test_merge_two_dictionaries():
dict1 = {'hi': 'there', 'number1': 1}
dict2 = {'number1': 2, 'number2': 2}
merged = ut.merge_two_dictionaries(dict1, dict2)
assert merged['hi'] == 'there'
assert merged['number1'] == 2
assert merged['number2'] == 2
# Ensure that the original dictionaries were not changed
assert dict1['number1'] == 1
assert dict1['hi'] == 'there'
assert len(dict1) == 2
assert dict2['number1'] == 2
assert dict2['number2'] == 2
assert len(dict2) == 2
def test_cfg_get_optional_values():
cfg_file = os.path.join(TEST_DATA_DIR, 'brainviewrc')
cfg = ut.get_config_from_file(cfg_file)
option_dict = ut.cfg_get_optional_values('figure', {'width': 'int', 'not_there': 'int'}, config=cfg)
assert len(option_dict) == 1
assert option_dict['width'] == 900
| 2.078125 | 2 |
apps/odoo/lib/odoo-10.0.post20170615-py2.7.egg/odoo/addons/l10n_fr_certification/models/res_company.py | gtfarng/Odoo_migrade | 1 | 12792308 | <filename>apps/odoo/lib/odoo-10.0.post20170615-py2.7.egg/odoo/addons/l10n_fr_certification/models/res_company.py<gh_stars>1-10
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from openerp import fields, models, api
class ResCompany(models.Model):
_inherit = 'res.company'
l10n_fr_secure_sequence_id = fields.Many2one('ir.sequence', 'Sequence to use to ensure the securisation of data', readonly=True)
@api.model
def create(self, vals):
company = super(ResCompany, self).create(vals)
#when creating a new french company, create the securisation sequence as well
if company.country_id == self.env.ref('base.fr'):
company._create_secure_sequence()
return company
@api.multi
def write(self, vals):
res = super(ResCompany, self).write(vals)
#if country changed to fr, create the securisation sequence
if vals.get('country_id') and vals.get('country_id') == self.env.ref('base.fr').id:
self.filtered(lambda c: not c.l10n_fr_secure_sequence_id)._create_secure_sequence()
return res
def _create_secure_sequence(self):
"""This function creates a no_gap sequence on each companies in self that will ensure
a unique number is given to all posted account.move in such a way that we can always
find the previous move of a journal entry.
"""
for company in self:
vals = {
'name': 'French Securisation of account_move_line - ' + company.name,
'code': 'FRSECUR',
'implementation': 'no_gap',
'prefix': '',
'suffix': '',
'padding': 0,
'company_id': company.id}
seq = self.env['ir.sequence'].create(vals)
company.write({'l10n_fr_secure_sequence_id': seq.id})
| 2.09375 | 2 |
packages/aiy-bt-prov-server/aiy_trigger_rpi_gpio.py | google/aiyprojects-raspbian-tools | 3 | 12792309 | <filename>packages/aiy-bt-prov-server/aiy_trigger_rpi_gpio.py
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import threading
import time
import RPi.GPIO as GPIO
BLINK_ON_TIME_S = 0.5
BLINK_OFF_TIME_S = 0.5
BUTTON_HOLD_TIME_S = 5
BUTTON_GPIO = 23
BUTTON_LED_GPIO = 25
BASE_GPIO = 497
LED1_GPIO = BASE_GPIO + 14
def _write(path, data):
with open(path, 'w') as file:
file.write(str(data))
class LED(object):
def _button_led_loop(self, on_time, off_time):
GPIO.setup(BUTTON_GPIO, GPIO.OUT)
while not self._event.is_set():
GPIO.output(BUTTON_LED_GPIO, True)
self._event.wait(on_time)
GPIO.output(BUTTON_LED_GPIO, False)
self._event.wait(off_time)
def _onboard_led_loop(self, on_time, off_time):
_write('/sys/class/gpio/export', LED1_GPIO)
try:
while not self._event.is_set():
_write('/sys/class/gpio/AIY_LED1/direction', 'low')
self._event.wait(on_time)
_write('/sys/class/gpio/AIY_LED1/direction', 'high')
self._event.wait(off_time)
finally:
_write('/sys/class/gpio/unexport', LED1_GPIO)
def __init__(self):
self._thread = None
def blink(self, on_time, off_time):
self._event = threading.Event()
if os.path.exists('/sys/class/gpio/gpiochip%d' % BASE_GPIO):
run = self._onboard_led_loop
else:
run = self._button_led_loop
self._thread = threading.Thread(target=run, args=(on_time, off_time), daemon=True)
self._thread.start()
def off(self):
if self._thread:
self._event.set()
self._thread.join()
self._thread = None
class Button(object):
def __init__(self, delay, callback):
GPIO.setup(BUTTON_GPIO, GPIO.IN)
self._thread = threading.Thread(target=self._run, args=(delay, callback), daemon=True)
self._thread.start()
def _run(self, delay, callback):
while True:
GPIO.wait_for_edge(BUTTON_GPIO, GPIO.FALLING)
start = time.monotonic()
time.sleep(0.2) # Debounce
done = callback
while time.monotonic() - start < delay:
if GPIO.input(BUTTON_GPIO):
done = None
break
time.sleep(0.01)
if done:
done()
class AiyTrigger(object):
"""Trigger interface for AIY kits."""
def __init__(self, triggered):
GPIO.setmode(GPIO.BCM)
self._led = LED()
self._button = Button(BUTTON_HOLD_TIME_S, triggered)
def Close(self):
self._led.off()
def SetActive(self, active):
if active:
self._led.blink(on_time=BLINK_ON_TIME_S, off_time=BLINK_OFF_TIME_S)
else:
self._led.off()
| 2.859375 | 3 |
nextcode/services/phenotype/__init__.py | Haffi/nextcode-python-sdk | 7 | 12792310 | <filename>nextcode/services/phenotype/__init__.py
"""
Service class
------------------
Service object for interfacing with the Phenotype Archive API
"""
from .phenotype import Phenotype
from .service import Service
| 1.851563 | 2 |
flaskr/__init__.py | tlplayer/Asymptomatix | 2 | 12792311 | # This is where our imports go.
from alembic.config import Config
from flask import Flask
from flask_migrate import Migrate
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy import create_engine
from flask_googlemaps import GoogleMaps
from os import environ
# make key.py with API_KEY='your_api_string'
from flaskr import config, key
alembic_cfg = Config()
# These are the configurations we need for flask and SQLite
app = Flask(__name__)
app.config.from_object(config.Config)
app.config["GOOGLEMAPS_KEY"] = key.API_KEY
if "DOCKERENV" in environ:
app.config["SQLALCHEMY_DATABASE_URI"] = "postgresql+psycopg2://postgres:[email protected]:5432/asymptomatix"
else:
app.config["SQLALCHEMY_DATABASE_URI"] = "sqlite:///cases.db"
db = SQLAlchemy(app)
db.init_app(app)
# you can also pass the key here if you prefer
# Create all database tables
engine = create_engine("sqlite:///cases.db", echo=True)
migrate = Migrate(app, db, include_schemas=True)
from flaskr import routes
| 2.234375 | 2 |
ravel/ext/grpc/proto/__init__.py | gigaquads/pybiz | 2 | 12792312 | <reponame>gigaquads/pybiz
from .message_generator import MessageGenerator
| 1.054688 | 1 |
custom_components/ge_kitchen/devices/__init__.py | joelmoses/ha_components | 0 | 12792313 | <reponame>joelmoses/ha_components<filename>custom_components/ge_kitchen/devices/__init__.py<gh_stars>0
import logging
from typing import Type
from gekitchensdk.erd import ErdApplianceType
from .base import ApplianceApi
from .oven import OvenApi
from .fridge import FridgeApi
from .dishwasher import DishwasherApi
_LOGGER = logging.getLogger(__name__)
def get_appliance_api_type(appliance_type: ErdApplianceType) -> Type:
_LOGGER.debug(f"Found device type: {appliance_type}")
"""Get the appropriate appliance type"""
if appliance_type == ErdApplianceType.OVEN:
return OvenApi
if appliance_type == ErdApplianceType.FRIDGE:
return FridgeApi
if appliance_type == ErdApplianceType.DISH_WASHER:
return DishwasherApi
# Fallback
return ApplianceApi
| 2 | 2 |
SalesforceEinsteinAnalytics/SFDC_EA.py | geoffrothman/SalesforceEinsteinAnalytics | 0 | 12792314 | #Python wrapper / library for Einstein Analytics API
import sys
import browser_cookie3
import requests
import json
import time
import datetime
from dateutil import tz
import pandas as pd
import numpy as np
import re
from pandas import json_normalize
from decimal import Decimal
import base64
import csv
import unicodecsv
from unidecode import unidecode
import math
class salesforceEinsteinAnalytics(object):
def __init__(self, env_url, browser):
self.env_url = env_url
try:
if browser == 'chrome':
cj = browser_cookie3.chrome(domain_name=env_url[8:]) #remove first 8 characters since browser cookie does not expect "https://"
my_cookies = requests.utils.dict_from_cookiejar(cj)
self.header = {'Authorization': 'Bearer '+my_cookies['sid'], 'Content-Type': 'application/json'}
elif browser == 'firefox':
cj = browser_cookie3.firefox(domain_name=env_url[8:])
my_cookies = requests.utils.dict_from_cookiejar(cj)
self.header = {'Authorization': 'Bearer '+my_cookies['sid'], 'Content-Type': 'application/json'}
else:
print('Please select a valid browser (chrome or firefox)')
sys.exit(1)
except:
print('ERROR: Could not get session ID. Make sure you are logged into a live Salesforce session (chrome/firefox).')
sys.exit(1)
#set timezone for displayed operation start time
def get_local_time(self, add_sec=None, timeFORfile=False):
curr_time = datetime.datetime.utcnow().replace(tzinfo=tz.tzutc()).astimezone(tz.tzlocal())
if add_sec is not None:
return (curr_time + datetime.timedelta(seconds=add_sec)).strftime("%I:%M:%S %p")
elif timeFORfile == True:
return curr_time.strftime("%m_%d_%Y__%I%p")
else:
return curr_time.strftime("%I:%M:%S %p")
def get_dataset_id(self, dataset_name, search_type='API Name', verbose=False):
params = {'pageSize': 50, 'sort': 'Mru', 'hasCurrentOnly': 'true', 'q': dataset_name}
dataset_json = requests.get(self.env_url+'/services/data/v48.0/wave/datasets', headers=self.header, params=params)
dataset_df = json_normalize(json.loads(dataset_json.text)['datasets'])
#check if the user wants to seach by API name or label name
if search_type == 'UI Label':
dataset_df = dataset_df[dataset_df['label'] == dataset_name]
else:
dataset_df = dataset_df[dataset_df['name'] == dataset_name]
#show user how many matches that they got. Might want to use exact API name if getting multiple matches for label search.
if verbose == True:
print('Found '+str(dataset_df.shape[0])+' matching datasets.')
#if dataframe is empty then return not found message or return the dataset ID
if dataset_df.empty == True:
print('Dataset not found. Please check name or API name in Einstein Analytics.')
sys.exit(1)
else:
dsnm = dataset_df['name'].tolist()[0]
dsid = dataset_df['id'].tolist()[0]
#get dataset version ID
r = requests.get(self.env_url+'/services/data/v48.0/wave/datasets/'+dsid, headers=self.header)
dsvid = json.loads(r.text)['currentVersionId']
return dsnm, dsid, dsvid
def run_saql_query(self, saql, save_path=None, verbose=False):
'''
This function takes a saql query as an argument and returns a dataframe or saves to csv
The query can be in JSON form or can be in the UI SAQL form
load statements must have the appropreate spaces: =_load_\"datasetname\";
'''
if verbose == True:
start = time.time()
print('Checking SAQL and Finding Dataset IDs...')
print('Process started at: '+str(self.get_local_time()))
saql = saql.replace('\"','\\"') #convert UI saql query to JSON format
#create a dictionary with all datasets used in the query
load_stmt_old = re.findall(r"(= load )(.*?)(;)", saql)
load_stmt_new = load_stmt_old.copy()
for ls in range(0,len(load_stmt_new)):
load_stmt_old[ls] = ''.join(load_stmt_old[ls])
dsnm, dsid, dsvid = self.get_dataset_id(dataset_name=load_stmt_new[ls][1].replace('\\"',''), verbose=verbose)
load_stmt_new[ls] = ''.join(load_stmt_new[ls])
load_stmt_new[ls] = load_stmt_new[ls].replace(dsnm, dsid+'/'+dsvid)
#update saql with dataset ID and version ID
for i in range(0,len(load_stmt_new)):
saql = saql.replace(load_stmt_old[i], load_stmt_new[i])
saql = saql.replace('\\"','\"')
if verbose == True:
print('Running SAQL Query...')
#run query and return dataframe or save as csv
payload = {"query":saql}
r = requests.post(self.env_url+'/services/data/v48.0/wave/query', headers=self.header, data=json.dumps(payload) )
df = json_normalize(json.loads(r.text)['results']['records'])
if save_path is not None:
if verbose == True:
print('Saving result to CSV...')
df.to_csv(save_path, index=False)
if verbose == True:
end = time.time()
print('Dataframe saved to CSV...')
print('Completed in '+str(round(end-start,3))+'sec')
return df
else:
if verbose == True:
end = time.time()
print('Completed in '+str(round(end-start,3))+'sec')
return df
def restore_previous_dashboard_version(self, dashboard_id, version_num=None, save_json_path=None):
'''
version number goes backwards 0 = current version 20 is max oldest version.
Typically best practice to run the function and view the history first before supplying a version number.
'''
#get broken dashboard version history
r = requests.get(self.env_url+'/services/data/v48.0/wave/dashboards/'+dashboard_id+'/histories', headers=self.header)
history_df = json_normalize(json.loads(r.text)['histories'])
if save_json_path is not None and version_num is not None:
preview_link = history_df['previewUrl'].tolist()[version_num]
r_restore = requests.get(self.env_url+preview_link, headers=self.header)
with open(save_json_path, 'w', encoding='utf-8') as f:
json.dump(r_restore.json(), f, ensure_ascii=False, indent=4)
elif version_num is not None:
payload = { "historyId": history_df['id'].tolist()[version_num] }
fix = requests.put(self.env_url+history_df['revertUrl'].tolist()[version_num], headers=self.header, data=json.dumps(payload))
else:
return history_df
def get_app_user_list(self, app_id=None, save_path=None, verbose=False, max_request_attempts=3):
if verbose == True:
start = time.time()
progress_counter = 0
print('Getting app user list and access details...')
print('Process started at: '+str(self.get_local_time()))
if app_id is None:
'''ALERT: CURRENTLY GETTING AN ERROR FOR ALL APP REQUEST
ERROR = OpenSSL.SSL.SysCallError: (-1, 'Unexpected EOF')
Proposed Solution is to add a try/except block to handle the error
'''
attempts = 0
while attempts < max_request_attempts:
try:
r = requests.get(self.env_url+'/services/data/v48.0/wave/folders', headers=self.header)
response = json.loads(r.text)
total_size = response['totalSize']
next_page = response['nextPageUrl']
app_user_df = pd.DataFrame()
break
except:
attempts += 1
if verbose == True:
print("Unexpected error:", sys.exc_info()[0])
print("Trying again...")
for app in response['folders']:
attempts = 0
while attempts < max_request_attempts:
try:
r = requests.get(self.env_url+'/services/data/v48.0/wave/folders/'+app["id"], headers=self.header)
users = json.loads(r.text)['shares']
for u in users:
app_user_df = app_user_df.append( { "AppId": app['id'],
"AppName": app['name'],
"UserId": u['sharedWithId'],
"UserName": u['sharedWithLabel'],
"AccessType": u['accessType'],
"UserType": u['shareType']
}, ignore_index=True)
break
except:
attempts += 1
if verbose == True:
print("Unexpected error:", sys.exc_info()[0])
print("Trying again...")
#continue to pull data from next page
attempts = 0 # reset attempts for additional pages
while next_page is not None:
if verbose == True:
progress_counter += 25
print('Progress: '+str(round(progress_counter/total_size*100,1))+'%')
while attempts < max_request_attempts:
try:
np = requests.get(self.env_url+next_page, headers=self.header)
response = json.loads(np.text)
next_page = response['nextPageUrl']
break
except KeyError:
next_page = None
print(sys.exc_info()[0])
break
except:
attempts += 1
if verbose == True:
print("Unexpected error:", sys.exc_info()[0])
print("Trying again...")
while attempts < max_request_attempts:
try:
for app in response['folders']:
r = requests.get(self.env_url+'/services/data/v48.0/wave/folders/'+app["id"], headers=self.header)
users = json.loads(r.text)['shares']
for u in users:
app_user_df = app_user_df.append( { "AppId": app['id'],
"AppName": app['name'],
"UserId": u['sharedWithId'],
"UserName": u['sharedWithLabel'],
"AccessType": u['accessType'],
"UserType": u['shareType']
}, ignore_index=True)
break
except:
attempts += 1
if verbose == True:
print("Unexpected error:", sys.exc_info()[0])
print("Trying again...")
elif app_id is not None:
if type(app_id) is list or type(app_id) is tuple:
for app in app_id:
app_user_df = pd.DataFrame()
r = requests.get(self.env_url+'/services/data/v48.0/wave/folders/'+app, headers=self.header)
response = json.loads(r.text)
for u in response['shares']:
app_user_df = app_user_df.append( { "AppId": app,
"AppName": response['name'],
"UserId": u['sharedWithId'],
"UserName": u['sharedWithLabel'],
"AccessType": u['accessType'],
"UserType": u['shareType']
}, ignore_index=True)
else:
print('Please input a list or tuple of app Ids')
sys.exit(1)
if save_path is not None:
if verbose == True:
print('Saving result to CSV...')
app_user_df.to_csv(save_path, index=False)
if verbose == True:
end = time.time()
print('Dataframe saved to CSV...')
print('Completed in '+str(round(end-start,3))+'sec')
return app_user_df
else:
if verbose == True:
end = time.time()
print('Completed in '+str(round(end-start,3))+'sec')
return app_user_df
def update_app_access(self, user_dict, app_id, update_type, verbose=False):
'''
update types include: addNewUsers, fullReplaceAccess, removeUsers, updateUsers
'''
if verbose == True:
start = time.time()
print('Updating App Access...')
print('Process started at: '+str(self.get_local_time()))
if update_type == 'fullReplaceAccess':
shares = user_dict
elif update_type == 'addNewUsers':
r = requests.get(self.env_url+'/services/data/v48.0/wave/folders/'+app_id, headers=self.header)
response = json.loads(r.text)
shares = response['shares']
#remove fields in the JSON that we don't want
for s in shares:
try:
del s['sharedWithLabel']
except:
pass
try:
del s['imageUrl']
except:
pass
shares = shares + user_dict
elif update_type == 'removeUsers':
r = requests.get(self.env_url+'/services/data/v48.0/wave/folders/'+app_id, headers=self.header)
response = json.loads(r.text)
shares = response['shares']
to_remove = []
for u in user_dict:
to_remove.append(u['sharedWithId'])
for s in shares:
if s['sharedWithId'] in to_remove:
shares.remove(s)
#remove fields in the JSON that we don't want
for s in shares:
try:
del s['sharedWithLabel']
except:
pass
try:
del s['imageUrl']
except:
pass
elif update_type == 'updateUsers':
r = requests.get(self.env_url+'/services/data/v48.0/wave/folders/'+app_id, headers=self.header)
response = json.loads(r.text)
shares = response['shares']
to_update = []
for u in user_dict:
to_update.append(u['sharedWithId'])
for s in range(0,len(shares)):
if shares[s]['sharedWithId'] in to_update:
shares[s] = next(item for item in user_dict if item["sharedWithId"] == shares[s]['sharedWithId'])
#remove fields in the JSON that we don't want
for s in shares:
try:
del s['sharedWithLabel']
except:
pass
try:
del s['imageUrl']
except:
pass
else:
shares = None
print('Please choose a user update operation. Options are: addNewUsers, fullReplaceAccess, removeUsers, updateUsers')
sys.exit(1)
if shares is not None:
payload = {"shares": shares}
r = requests.patch(self.env_url+'/services/data/v48.0/wave/folders/'+app_id, headers=self.header, data=json.dumps(payload))
if verbose == True:
end = time.time()
print('User Access Updated')
print('Completed in '+str(round(end-start,3))+'sec')
def update_dashboard_access(self, update_df, update_type, verbose=True):
'''
Function to make it easier to update access using dashboard names vs finding all apps needed.
update dataframe should have the following columns: Dashboard Id, Access Type, and User Id
'''
pass
def remove_non_ascii(self, df, columns=None):
if columns == None:
columns = df.columns
else:
columns = columns
for c in columns:
if df[c].dtype == "O":
df[c] = df[c].apply(lambda x: unidecode(x).replace("?",""))
def create_xmd(self, df, dataset_label, useNumericDefaults=True, default_measure_val="0.0", default_measure_fmt="0.0#", charset="UTF-8", deliminator=",", lineterminator="\r\n"):
dataset_label = dataset_label
dataset_api_name = dataset_label.replace(" ","_")
fields = []
for c in df.columns:
if df[c].dtype == "datetime64[ns]":
name = c.replace(" ","_")
name = name.replace("__","_")
date = {
"fullyQualifiedName": name,
"name": name,
"type": "Date",
"label": c,
"format": "yyyy-MM-dd HH:mm:ss"
}
fields.append(date)
elif np.issubdtype(df[c].dtype, np.number):
if useNumericDefaults == True:
precision = 18
scale = 2
elif useNumericDefaults == False:
precision = df[c].astype('str').apply(lambda x: len(x.replace('.', ''))).max()
scale = -df[c].astype('str').apply(lambda x: Decimal(x).as_tuple().exponent).min()
name = c.replace(" ","_")
name = name.replace("__","_")
measure = {
"fullyQualifiedName": name,
"name": name,
"type": "Numeric",
"label": c,
"precision": precision,
"defaultValue": default_measure_val,
"scale": scale,
"format": default_measure_fmt,
"decimalSeparator": "."
}
fields.append(measure)
else:
name = c.replace(" ","_")
name = name.replace("__","_")
dimension = {
"fullyQualifiedName": name,
"name": name,
"type": "Text",
"label": c
}
fields.append(dimension)
xmd = {
"fileFormat": {
"charsetName": charset,
"fieldsDelimitedBy": deliminator,
"linesTerminatedBy": lineterminator
},
"objects": [
{
"connector": "CSV",
"fullyQualifiedName": dataset_api_name,
"label": dataset_label,
"name": dataset_api_name,
"fields": fields
}
]
}
return str(xmd).replace("'",'"')
def load_df_to_EA(self, df, dataset_api_name, xmd=None, encoding='UTF-8', operation='Overwrite', useNumericDefaults=True, default_measure_val="0.0",
default_measure_fmt="0.0#", charset="UTF-8", deliminator=",", lineterminator="\r\n", removeNONascii=True, ascii_columns=None, fillna=True, dataset_label=None, verbose=False):
'''
field names will show up exactly as the column names in the supplied dataframe
'''
if verbose == True:
start = time.time()
print('Loading Data to Einstein Analytics...')
print('Process started at: '+str(self.get_local_time()))
dataset_api_name = dataset_api_name.replace(" ","_")
if fillna == True:
for c in df.columns:
if df[c].dtype == "O":
df[c].fillna('NONE', inplace=True)
elif np.issubdtype(df[c].dtype, np.number):
df[c].fillna(0, inplace=True)
elif df[c].dtype == "datetime64[ns]":
df[c].fillna(pd.to_datetime('1900-01-01 00:00:00'), inplace=True)
if ascii_columns is not None:
self.remove_non_ascii(df, columns=ascii_columns)
elif removeNONascii == True:
self.remove_non_ascii(df)
# Upload Config Steps
if xmd is not None:
xmd64 = base64.urlsafe_b64encode(json.dumps(xmd).encode(encoding)).decode()
else:
xmd64 = base64.urlsafe_b64encode(self.create_xmd(df, dataset_api_name, useNumericDefaults=useNumericDefaults, default_measure_val=default_measure_val,
default_measure_fmt=default_measure_fmt, charset=charset, deliminator=deliminator, lineterminator=lineterminator).encode(encoding)).decode()
upload_config = {
'Format' : 'CSV',
'EdgemartAlias' : dataset_api_name,
'Operation' : operation,
'Action' : 'None',
'MetadataJson': xmd64
}
r1 = requests.post(self.env_url+'/services/data/v48.0/sobjects/InsightsExternalData', headers=self.header, data=json.dumps(upload_config))
try:
json.loads(r1.text)['success'] == True
except:
print('ERROR: Upload Config Failed')
print(r1.text)
sys.exit(1)
if verbose == True:
print('Upload Configuration Complete...')
print('Chunking and Uploading Data Parts...')
MAX_FILE_SIZE = 10 * 1000 * 1000 - 49
df_memory = sys.getsizeof(df)
rows_in_part = math.ceil(df.shape[0] / math.ceil(df_memory / MAX_FILE_SIZE))
partnum = 0
range_start = 0
max_data_part = rows_in_part
for chunk in range(0, math.ceil(df_memory / MAX_FILE_SIZE)):
df_part = df.iloc[range_start:max_data_part,:]
if chunk == 0:
data_part64 = base64.b64encode(df_part.to_csv(index=False, quotechar='"', quoting=csv.QUOTE_MINIMAL).encode('UTF-8')).decode()
else:
data_part64 = base64.b64encode(df_part.to_csv(index=False, header=False, quotechar='"',quoting=csv.QUOTE_MINIMAL).encode('UTF-8')).decode()
range_start += rows_in_part
max_data_part += rows_in_part
partnum += 1
if verbose == True:
print('\rChunk '+str(chunk+1)+' of '+str(math.ceil(df_memory / MAX_FILE_SIZE))+' completed', end='', flush=True)
payload = {
"InsightsExternalDataId" : json.loads(r1.text)['id'],
"PartNumber" : str(partnum),
"DataFile" : data_part64
}
r2 = requests.post(self.env_url+'/services/data/v48.0/sobjects/InsightsExternalDataPart', headers=self.header, data=json.dumps(payload))
try:
json.loads(r2.text)['success'] == True
except:
print('\nERROR: Datapart Upload Failed')
print(r2.text)
sys.exit(1)
if verbose == True:
print('\nDatapart Upload Complete...')
payload = {
"Action" : "Process"
}
r3 = requests.patch(self.env_url+'/services/data/v48.0/sobjects/InsightsExternalData/'+json.loads(r1.text)['id'], headers=self.header, data=json.dumps(payload))
if verbose == True:
end = time.time()
print('Data Upload Process Started. Check Progress in Data Monitor.')
print('Job ID: '+str(json.loads(r1.text)['id']))
print('Completed in '+str(round(end-start,3))+'sec')
if __name__ == '__main__':
pass
| 2.265625 | 2 |
tests/internal/ebs_optimized_support/test_ebs_optimized_support_unsupported_auto.py | frolovv/aws.ec2.compare | 0 | 12792315 | <filename>tests/internal/ebs_optimized_support/test_ebs_optimized_support_unsupported_auto.py
# Testing module ebs_optimized_support.unsupported
import pytest
import ec2_compare.internal.ebs_optimized_support.unsupported
def test_get_internal_data_ebs_optimized_support_unsupported_get_instances_list():
assert len(ec2_compare.internal.ebs_optimized_support.unsupported.get_instances_list()) > 0
def test_get_internal_data_ebs_optimized_support_unsupported_get():
assert len(ec2_compare.internal.ebs_optimized_support.unsupported.get) > 0
| 2.015625 | 2 |
tests/test_func.py | DerNitro/pyRegistryStore | 0 | 12792316 | """
Тестирование
"""
import sys
sys.path.append('.')
from objects import auto_type, equal_object, RegistryStore
def test_auto_type():
"""
Проверка преобразования значений
"""
assert auto_type('test') == str('test')
assert auto_type('5') == 5
for i in ['true', 'y', 'yes']:
assert auto_type(i)
for i in ['false', 'f', 'no']:
assert not auto_type(i)
def test_equal_object():
"""
Проверка функции идентификации объекта по атрибутам
"""
test_object = RegistryStore()
test_object.test1 = True
test_object.test2 = 'foo'
test_object.test3 = 5
assert equal_object(test_object, ['test1=true', 'test2=foo', 'test3=5'])
assert not equal_object(test_object, ['test1=false', 'test2=foo', 'test3=5'])
| 2.6875 | 3 |
IBRAHIM/OPENCV(gözdengeçir)/opencv13.py | vektorelpython24proje/temelbilgiler | 0 | 12792317 | <reponame>vektorelpython24proje/temelbilgiler<filename>IBRAHIM/OPENCV(gözdengeçir)/opencv13.py
import cv2,numpy as np
| 1.015625 | 1 |
philo/forms/fields.py | melinath/philo | 2 | 12792318 | from django import forms
from django.core.exceptions import ValidationError
from django.utils import simplejson as json
from philo.validators import json_validator
__all__ = ('JSONFormField',)
class JSONFormField(forms.Field):
"""A form field which is validated by :func:`philo.validators.json_validator`."""
default_validators = [json_validator]
def clean(self, value):
if value == '' and not self.required:
return None
try:
return json.loads(value)
except Exception, e:
raise ValidationError(u'JSON decode error: %s' % e) | 2.65625 | 3 |
archive/arisulolstats/arisulolstats/arisu/profilestable.py | NikhilPal2468/python-projects | 3 | 12792319 | import PyQt5.QtWidgets as W
class ProfilesTable(W.QTableWidget):
def __init__(self, parent, profiles=[]):
super(ProfilesTable, self).__init__(parent)
self.init()
self.update_profiles(profiles)
def init(self):
self.setColumnCount(2)
self.setHorizontalHeaderLabels(["Name", "Number of summoners"])
self.setEditTriggers(W.QAbstractItemView.NoEditTriggers)
self.verticalHeader().hide()
self.setSelectionBehavior(W.QAbstractItemView.SelectRows)
def update_profiles(self, profiles):
self.setRowCount(len(profiles))
i = 0
for profile in profiles:
self.setItem(i, 0, W.QTableWidgetItem(profile))
self.setItem(i, 1, W.QTableWidgetItem(str(len(profiles[profile]))))
i += 1
self.resizeColumnsToContents()
def get_current_profile_name(self):
current_row = self.currentRow()
if current_row == -1:
print("Is empty")
return
profile = self.item(current_row, 0).text()
return profile
def delete(self, profiles):
current_row = self.currentRow()
if current_row == -1:
print("Is empty")
return
profile = self.takeItem(current_row, 0).text()
if profile in profiles:
return profile
| 2.671875 | 3 |
parsec/commands/invocations/get_invocations.py | erasche/parsec | 8 | 12792320 | import click
from parsec.cli import pass_context, json_loads
from parsec.decorators import custom_exception, json_output
@click.command('get_invocations')
@click.option(
"--workflow_id",
help="Encoded workflow ID to filter on",
type=str
)
@click.option(
"--history_id",
help="Encoded history ID to filter on",
type=str
)
@click.option(
"--user_id",
help="Encoded user ID to filter on. This must be your own user ID if your are not an admin user.",
type=str
)
@click.option(
"--include_terminal",
help="Whether to include terminal states.",
default="True",
show_default=True,
is_flag=True
)
@click.option(
"--limit",
help="Maximum number of invocations to return - if specified, the most recent invocations will be returned.",
type=int
)
@click.option(
"--view",
help="Level of detail to return per invocation, either 'element' or 'collection'.",
default="collection",
show_default=True,
type=str
)
@click.option(
"--step_details",
help="If 'view' is 'element', also include details on individual steps.",
is_flag=True
)
@pass_context
@custom_exception
@json_output
def cli(ctx, workflow_id="", history_id="", user_id="", include_terminal=True, limit="", view="collection", step_details=False):
"""Get all workflow invocations, or select a subset by specifying optional arguments for filtering (e.g. a workflow ID).
Output:
A list of workflow invocations.
For example::
[{'history_id': '2f94e8ae9edff68a',
'id': 'df7a1f0c02a5b08e',
'model_class': 'WorkflowInvocation',
'state': 'new',
'update_time': '2015-10-31T22:00:22',
'uuid': 'c8aa2b1c-801a-11e5-a9e5-8ca98228593c',
'workflow_id': '03501d7626bd192f'}]
"""
return ctx.gi.invocations.get_invocations(workflow_id=workflow_id, history_id=history_id, user_id=user_id, include_terminal=include_terminal, limit=limit, view=view, step_details=step_details)
| 2.265625 | 2 |
src/advent_2019/day8.py | devshawn/advent-of-code | 1 | 12792321 | def calculate_part_1(input, width, height):
n = width * height
layers = [input[i:i + n] for i in range(0, len(input), n)]
counts = [item.count("0") for item in layers]
layer = layers[counts.index(min(counts))]
return layer.count("1") * layer.count("2")
def calculate_part_2(input, width, height):
n = width * height
layers = [input[i:i + n] for i in range(0, len(input), n)]
result = "".join([next(iter([item[i] for item in layers if int(item[i]) is not 2]), 2) for i in range(0, n)])
result_string = result.replace("0", " ").replace("1", "█")
return "\n".join([result_string[i:i + width] for i in range(0, len(result_string), width)])
| 3.359375 | 3 |
scripts/read_logs.py | mimno/mallet_state_tools | 0 | 12792322 | <gh_stars>0
import sys, regex, itertools
from collections import Counter
iter_pattern = regex.compile("^<(\d+)>")
topic_pattern = regex.compile("^(\d+)\t(\d+\.\d+)\t(.*)")
current_iteration = 0
topic_counters = {}
with open(sys.argv[1]) as reader:
for line in reader:
if line.startswith("<"):
match = iter_pattern.search(line)
current_iteration = int(match.group(1))
elif current_iteration > 500:
match = topic_pattern.search(line)
if match != None:
topic = int(match.group(1))
alpha = float(match.group(2))
words = match.group(3).split(" ")
if not topic in topic_counters:
topic_counters[topic] = Counter()
for rank, word in enumerate(words):
topic_counters[topic][word] += len(words) - rank
def jaccard(a, b):
set_a = set(a.keys())
set_b = set(b.keys())
return len(set_a & set_b) / len(set_a | set_b)
def character_trigrams(strings):
output = Counter()
for s in strings:
padded_string = " " + s.replace("v", "u") + " "
for position in range(len(padded_string) - 2):
trigram = padded_string[position:(position+3)]
output[trigram] += 1
return output
print("getting top words")
topic_ids = list(topic_counters.keys())
topic_words = {}
topic_trigrams = {}
for topic in topic_ids:
topic_words[topic] = [w for w, c in topic_counters[topic].most_common(15)]
topic_trigrams[topic] = character_trigrams(topic_words[topic])
topic_pair_scores = []
for t1, t2 in itertools.combinations(topic_ids, 2):
topic_pair_scores.append((jaccard(topic_trigrams[t1], topic_trigrams[t2]), t1, t2))
for score, t1, t2 in sorted(topic_pair_scores, reverse=True):
print(score, " ".join(topic_words[t1]), " | ", " ".join(topic_words[t2])) | 3.109375 | 3 |
src/preprocessing/preprocess2.py | norikinishida/coreference-resolution | 0 | 12792323 | import argparse
import json
import os
import numpy as np
import utils
import util
def main(args):
config = utils.get_hocon_config(config_path="./config/main.conf", config_name="base")
input_file = args.input_file
if args.is_training == 0:
is_training = False
else:
is_training = True
tokenizer = util.get_tokenizer(args.tokenizer_name)
max_seg_len = args.seg_len
genre_dict = {genre: idx for idx, genre in enumerate(config["genres"])}
dataset = []
with open(input_file, "r") as f:
for line in f.readlines():
# 1データの読み込み
json_data = json.loads(line)
doc_key = json_data["doc_key"]
# Mentions and clusters
clusters = json_data["clusters"]
gold_mentions = sorted(tuple(mention) for mention in util.flatten(clusters))
gold_mention_map = {mention: idx for idx, mention in enumerate(gold_mentions)} # span -> index
gold_mention_cluster_map = np.zeros(len(gold_mentions)) # 0: no cluster
for cluster_id, cluster in enumerate(clusters):
for mention in cluster:
gold_mention_cluster_map[gold_mention_map[tuple(mention)]] = cluster_id + 1
# Speakers
speakers = json_data["speakers"]
speaker_dict = get_speaker_dict(util.flatten(speakers), config["max_num_speakers"])
# Segments
segments = json_data["segments"]
sentence_map = json_data["sentence_map"]
num_words = sum([len(s) for s in segments])
segment_len = np.array([len(s) for s in segments])
# BERT input IDs/mask, speaker IDs
input_ids, input_mask, speaker_ids = [], [], []
for idx, (sent_tokens, sent_speakers) in enumerate(zip(segments, speakers)):
sent_input_ids = tokenizer.convert_tokens_to_ids(sent_tokens)
sent_input_mask = [1] * len(sent_input_ids)
sent_speaker_ids = [speaker_dict[speaker] for speaker in sent_speakers]
while len(sent_input_ids) < max_seg_len:
sent_input_ids.append(0)
sent_input_mask.append(0)
sent_speaker_ids.append(0)
input_ids.append(sent_input_ids)
input_mask.append(sent_input_mask)
speaker_ids.append(sent_speaker_ids)
input_ids = np.array(input_ids)
input_mask = np.array(input_mask)
speaker_ids = np.array(speaker_ids)
assert num_words == np.sum(input_mask), (num_words, np.sum(input_mask))
# Genre
genre = genre_dict.get(doc_key[:2], 0)
# Gold spans
if len(gold_mentions) > 0:
gold_starts, gold_ends = zip(*gold_mentions)
else:
gold_starts, gold_ends = [], []
gold_starts = np.array(gold_starts)
gold_ends = np.array(gold_ends)
# Others
tokens = json_data["tokens"]
original_sentence_boundaries = json_data["original_sentence_boundaries"] # XXX
gold_clusters = json_data["clusters"]
subtoken_map = json_data.get("subtoken_map", None)
# DataInstanceに変換
kargs = {
"doc_key": doc_key,
"tokens": tokens,
"original_sentence_boundaries": original_sentence_boundaries, # XXX
"segments": segments,
"sentence_map": sentence_map,
"speakers": speakers,
"gold_clusters": gold_clusters,
"subtoken_map": subtoken_map,
#
"input_ids": input_ids,
"input_mask": input_mask,
"speaker_ids": speaker_ids,
"segment_len": segment_len,
"genre": genre,
"is_training": is_training,
"gold_starts": gold_starts,
"gold_ends": gold_ends,
"gold_mention_cluster_map": gold_mention_cluster_map,
}
data = utils.DataInstance(**kargs)
dataset.append(data)
dataset = np.asarray(dataset, dtype="O")
output_file = os.path.basename(input_file).replace(".jsonlines", ".npy")
output_file = os.path.join(config["caches"], output_file)
np.save(output_file, dataset)
print("Cached %s to %s" % (input_file, output_file))
def get_speaker_dict(speakers, max_num_speakers):
"""
Parameters
----------
speakers: list[str]
Returns
-------
dict[str, int]
"""
speaker_dict = {"UNK": 0, "[SPL]": 1}
for speaker in speakers:
if len(speaker_dict) > max_num_speakers:
pass # "break" to limit # speakers
if speaker not in speaker_dict:
speaker_dict[speaker] = len(speaker_dict)
return speaker_dict
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--input_file', type=str, required=True)
parser.add_argument("--is_training", type=int, required=True)
parser.add_argument('--tokenizer_name', type=str, required=True)
parser.add_argument('--seg_len', type=int, required=True)
args = parser.parse_args()
main(args)
| 2.609375 | 3 |
route.py | HRHLALALA/Event-Management-System | 0 | 12792324 | <gh_stars>0
from init_database import db,create_db
create_db()
from server import app, valid_time
from flask import request, render_template,session,redirect,url_for,flash
from flask_login import LoginManager,UserMixin,login_required,login_user,current_user,logout_user
from all_user import *
from events import *
from datetime import datetime,timedelta
from EMS import EMS
from role_required import trainer_only
app.config['SECRET_KEY']='HRHLALALA'
login_manager = LoginManager()
login_manager.init_app(app)
ems = EMS()
@app.route('/', methods=['POST', 'GET'])
def login():
if request.method== 'POST':
form = request.form
username=str(form['Username'])
password=str(form['password'])
user = ems.valid_user(username,password)
if user is not None:
login_user(user)
return redirect(url_for('dashboard'))
else:
flash("Invalid username or password",'alart')
return redirect(url_for('login'))
if current_user.is_authenticated:
return redirect(url_for('dashboard'))
else :
return render_template('sign.html',user_type = "user")
@app.route('/guest_form', methods=['POST', 'GET'])
def guest_form():
if request.method== 'POST':
form = request.form
username=str(form['Username'])
password=str(form['password'])
real_name = str(form['real_name'])
user = guest(zid="NONE",id=username,password=password,name=real_name)
try:
ems.add_guest(user)
except MemberError as error:
flash(error.message,'alart')
return redirect('guest_form')
return redirect(url_for('login'))
if current_user.is_authenticated:
return redirect(url_for('dashboard'))
else :
return render_template('sign.html',user_type = "guest")
@app.route('/dashboard',methods=['POST','GET'])
@login_required
def dashboard():
events = ems.valid_events()
return render_template('dashboard.html',Events=events,user=current_user)
@app.route('/sign-up',methods=['POST','GET'])
def logout():
logout_user()
return redirect(url_for('login'))
@login_manager.user_loader
def load_user(user_id):
return ems.get_user_id(user_id)
@app.route('/Post_Course',methods=['GET','POST'])
@login_required
@trainer_only
def post_course():
date_format="%Y-%m-%d"
if request.method == 'POST':
form = request.form
title = str(form['Title'])
location = str(form['Location'])
try:
start_date = datetime.strptime(form['Start_Date'],date_format)
end_date = datetime.strptime(form['End_Date'],date_format)
deadline = datetime.strptime(form['deadline'],date_format)
EB_start = datetime.strptime(form['EB_Start_Date'],date_format)
EB_end = datetime.strptime(form['EB_End_Date'],date_format)
except:
flash("Cannot Recognise the date",'alart')
return redirect(url_for('post_course'))
cap = int(form['Capacitor'])
desc = str(form['Description'])
fee=float(request.form['Fee'])
Course = course(status="OPEN",title=title,start_date=start_date,end_date=end_date,
location = location,convenor=current_user.id,
capacitor=cap,description=desc,deregister_deadline=deadline,
fee=fee,EB_start=EB_start,EB_end=EB_end)
try:
current_user.post_event(Course)
except (PeriodError,CapacityError,DupulicationError) as error:
flash(error.message,'alart')
return redirect(url_for('post_course'))
return redirect('/Posted_Event')
return render_template('Post_Course.html')
@app.route('/Post_Seminar',methods=['GET','POST'])
@login_required
@trainer_only
def post_seminar():
date_format="%Y-%m-%d"
if request.method == 'POST':
form = request.form
title = str(form['Title'])
location = str(form['Location'])
try:
start_date = datetime.strptime(form['Start_Date'],date_format)
end_date = datetime.strptime(form['End_Date'],date_format)
deadline = datetime.strptime(form['deadline'],date_format)
EB_start = datetime.strptime(form['EB_Start_Date'],date_format)
EB_end = datetime.strptime(form['EB_End_Date'],date_format)
except:
flash("Invalid Date Format",'alart')
return redirect(url_for('post_seminar'))
desc = str(form['Description'])
fee=float(request.form['Fee'])
Seminar = seminar(status="OPEN",title=title,start_date=start_date,end_date=end_date,
location = location,convenor=current_user.id,capacitor=0,
deregister_deadline=deadline,description=desc,
fee=fee,EB_start=EB_start,EB_end=EB_end)
try:
current_user.post_event(Seminar)
except (PeriodError,CapacityError,DupulicationError) as error:
flash(error.message,'alart')
return redirect(url_for('post_seminar'))
return redirect('/Posted_Event')
return render_template('Post_Seminar.html')
@app.route('/Registered_Event',methods=['GET','POST'])
@login_required
def registered_event():
for Event in current_user.registers:
if Event.status == "Canceled":
flash(Event.title+" has been Cancelled",'alart')
return render_template('dashboard.html',user=current_user,Events=current_user.registers)
@app.route('/Posted_Event',methods=['GET','POST'])
@login_required
@trainer_only
def posted_event():
return render_template('dashboard.html',Events=event.query.filter_by(convenor=current_user.id),user=current_user)
@app.route('/Canceled_Event',methods=['GET','POST'])
@login_required
@trainer_only
def canceled_event():
return render_template('dashboard.html',Events=event.query.filter_by(status='Canceled'),user=current_user)
@app.route('/dashboard/<title>',methods=['GET','POST'])
@login_required
def event_details(title):
date_format="%Y-%m-%d"
now = datetime.now()
event=ems.get_event_title(title)
before_open = event.start_date-now
after_end = now-event.end_date
before_deadline = event.deregister_deadline-now
enable=True
if event.status=="Closed" or event.status=="Canceled":
enable=False
if current_user.id== event.convenor:
if after_end.days> 0:
mode="Close"
else:
if before_open.days<0:
enable=False
mode="Cancel"
else:
if event in current_user.registers:
if before_deadline.days <0:
enable=False
mode = "deregister"
else:
if event.is_full==True:
enable=False
mode = "register"
if request.method == "POST":
if "Cancel" in request.form:
event.status="Canceled"
db.session.commit()
elif "Close" in request.form:
event.status="Closed"
db.session.commit()
elif "register" in request.form:
event.attendees.append(current_user)
db.session.commit()
else:
event.attendees.remove(current_user)
db.session.commit()
return redirect(url_for('event_details',title=title))
if before_deadline.days <0:
permit_deregister= False
else:
permit_deregister = True
return render_template('Event_Detail.html',user=current_user,event=event,mode=mode,enable=enable,deregister=permit_deregister)
@app.route('/dashboard/<title>/Sessions',methods=['GET','POST'])
@login_required
def post_session(title):
event=ems.get_event_title(title)
if request.method=="POST":
speaker = request.form['Speaker']
name = request.form['Name']
capacitor=int(request.form['Capacitor'])
Session = session(title=name,speaker=speaker,capacitor=capacitor,seminar_id=event.id)
try:
current_user.post_session(Session)
except (SpeakerError,CapacityError,DupulicationError) as error:
flash(error.message,'alart')
return redirect(url_for('post_session',title=event.title))
return redirect(url_for('event_details',title=event.title))
return render_template('Post_Sessions.html',event=event)
@app.route('/<seminar_tit>/<session_tit>',methods=['GET','POST'])
@login_required
def register_session(seminar_tit,session_tit):
seminar=ems.get_event_title(seminar_tit)
session=seminar.get_session_title(session_tit)
if current_user in session.attendees:
session.deregister(current_user)
else:
session.register(current_user)
return redirect(url_for('event_details',title=seminar_tit))
| 2.296875 | 2 |
code/day_02a.py | martinsbruveris/advent-of-code | 0 | 12792325 | <gh_stars>0
from pathlib import Path
import click
import numpy as np
CMD_2_DIRECTION = {
"forward": np.array([1, 0]), # (horizontal pos, depth)
"down": np.array([0, 1]),
"up": np.array([0, -1]),
}
@click.command()
@click.argument("filename")
def main(filename):
filename = Path(filename)
commands = filename.read_text().split("\n")
commands = [cmd.split(" ") for cmd in commands]
commands = [(CMD_2_DIRECTION[cmd[0]], int(cmd[1])) for cmd in commands]
position = np.array([0, 0])
for cmd in commands:
position += cmd[1] * cmd[0]
print(position[0] * position[1])
if __name__ == "__main__":
main()
| 2.828125 | 3 |
Movie_Recommendation_System.py | kunj17/Recommendation-System | 1 | 12792326 | <reponame>kunj17/Recommendation-System
"""
Created on Wed Jan 3 08:15:43 2018
@author: KUNJ
"""
"""
1.Godfather-1
2.Ted
3.Straight outta Compton
4.Godfather-2
5.Notorious
6.Get rich or die trying
7.Frozen
8.Tangled
9.Dunkirk
10.Interstellar
"""
from numpy import *
num_movies = 10
num_users = 5
ratings = random.randint(11, size = (num_movies, num_users))
print (ratings)
did_rate = (ratings != 0) * 1
print(did_rate)
ratings.shape
did_rate.shape
kunj_ratings = zeros((num_movies, 1))
print (kunj_ratings)
print (kunj_ratings[9])
kunj_ratings[0] = 8
kunj_ratings[4] = 7
kunj_ratings[7] = 3
print (kunj_ratings)
ratings = append(kunj_ratings, ratings, axis = 1)
did_rate = append(((kunj_ratings != 0) * 1), did_rate, axis = 1)
print (ratings)
ratings.shape
did_rate
print (did_rate)
did_rate.shape
def normalize_ratings(ratings, did_rate):
num_movies = ratings.shape[0]
ratings_mean = zeros(shape = (num_movies, 1))
ratings_norm = zeros(shape = ratings.shape)
for i in range(num_movies):
idx = where(did_rate[i] == 1)[0]
ratings_mean[i] = mean(ratings[i, idx])
ratings_norm[i, idx] = ratings[i, idx] - ratings_mean[i]
return ratings_norm, ratings_mean
ratings, ratings_mean = normalize_ratings(ratings, did_rate)
print (ratings)
num_users = ratings.shape[1]
num_features = 3
movie_features = random.randn( num_movies, num_features )
user_prefs = random.randn( num_users, num_features )
initial_X_and_theta = r_[movie_features.T.flatten(), user_prefs.T.flatten()]
print(movie_features)
print (user_prefs)
print (initial_X_and_theta)
initial_X_and_theta.shape
movie_features.T.flatten().shape
user_prefs.T.flatten().shape
initial_X_and_theta
def unroll_params(X_and_theta, num_users, num_movies, num_features):
first_30 = X_and_theta[:num_movies * num_features]
X = first_30.reshape((num_features, num_movies)).transpose()
last_18 = X_and_theta[num_movies * num_features:]
theta = last_18.reshape(num_features, num_users ).transpose()
return X, theta
def calculate_gradient(X_and_theta, ratings, did_rate, num_users, num_movies, num_features, reg_param):
X, theta = unroll_params(X_and_theta, num_users, num_movies, num_features)
# obs for which a rating was given
difference = X.dot( theta.T ) * did_rate - ratings
X_grad = difference.dot( theta ) + reg_param * X
theta_grad = difference.T.dot( X ) + reg_param * theta
return r_[X_grad.T.flatten(), theta_grad.T.flatten()]
def calculate_cost(X_and_theta, ratings, did_rate, num_users, num_movies, num_features, reg_param):
X, theta = unroll_params(X_and_theta, num_users, num_movies, num_features)
cost = sum( (X.dot( theta.T ) * did_rate - ratings) ** 2 ) / 2
regularization = (reg_param / 2) * (sum( theta**2 ) + sum(X**2))
return cost + regularization
from scipy import optimize
reg_param = 30
#scipy fmin
minimized_cost_and_optimal_params = optimize.fmin_cg(calculate_cost, fprime=calculate_gradient, x0=initial_X_and_theta,args=(ratings, did_rate, num_users, num_movies, num_features, reg_param),maxiter=100, disp=True, full_output=True )
cost, optimal_movie_features_and_user_prefs = minimized_cost_and_optimal_params[1], minimized_cost_and_optimal_params[0]
movie_features, user_prefs = unroll_params(optimal_movie_features_and_user_prefs, num_users, num_movies, num_features)
print(movie_features)
print(user_prefs)
all_predictions = movie_features.dot( user_prefs.T )
print(all_predictions)
predictions_for_kunj = all_predictions[:, 0:1] + ratings_mean
print (predictions_for_kunj)
print (kunj_ratings)
| 2.59375 | 3 |
example-django/tests/test_models.py | Watershed-Function-SFA/BASIN-3D | 5 | 12792327 |
from django.test import TestCase
from basin3d.models import DataSource, SamplingMedium, \
ObservedPropertyVariable, ObservedProperty, DataSourceObservedPropertyVariable
class DataSourceTestCase(TestCase):
def setUp(self):
DataSource.objects.create(name="Foo", plugin_module="foo.bar.plugins", plugin_class="Baz", id_prefix="F")
DataSource.objects.create(name="Bar", plugin_module="foo.plugins", plugin_class="Bar", id_prefix="B")
def test_get(self):
"""Assert that the Data Sources were created"""
foo = DataSource.objects.get(name="Foo")
bar = DataSource.objects.get(name="Bar")
self.assertEqual(bar.name, "Bar")
self.assertEqual(foo.name, 'Foo')
class ObservedPropertyTestCase(TestCase):
"""
Assert that the parameters are created
"""
def setUp(self):
"""
Load some fake data to use in the tests
"""
self.datasource = DataSource.objects.get(name="Alpha")
self.observed_property_var = ObservedPropertyVariable(
id="FOO", full_name="Groundwater Flux",
categories="Hydrology,Subsurface")
self.sampling_medium = SamplingMedium()
def test_observed_property_create(self):
""" Was the object created correctly? """
obj = ObservedProperty(description="Acetate (CH3COO)",
observed_property_variable=self.observed_property_var,
sampling_medium=self.sampling_medium,
datasource=self.datasource)
assert obj.description == "Acetate (CH3COO)"
assert obj.observed_property_variable == self.observed_property_var
assert obj.sampling_medium == self.sampling_medium
assert obj.datasource == self.datasource
def test_observed_property_variable_create(self):
""" create the object and test attributes """
assert self.observed_property_var.id == "FOO"
assert self.observed_property_var.full_name == "Groundwater Flux"
assert self.observed_property_var.categories == "Hydrology,Subsurface"
def test_datasource_observed_property_variable_create(self):
""" Was the object created correctly? """
obj = DataSourceObservedPropertyVariable(
datasource=self.datasource, observed_property_variable=self.observed_property_var,
name="Alpha")
assert obj.datasource == self.datasource
assert obj.observed_property_variable == self.observed_property_var
assert obj.name == "Alpha"
| 2.5 | 2 |
Tarefa4/noh.py | liu88620/POO-Python | 0 | 12792328 | __author__ = 'Liu'
class Noh():
def __init__(self, proximo=None):
self.proximo = proximo
def get_proximo(self): ## verifica se o proximo noh eh "nulo", se nao, vai para proximo.
if self.proximo is not None:
return self.proximo
def tem_proximo(self): ## verifica se tem o proximo, se nao, sai da funcao.
if self.proximo is not None:
return True
return False
utimo = Noh()
no2 = Noh(utimo)
atual = Noh(no2)
while atual.tem_proximo():
print('Tem proximo')
atual = atual.get_proximo()
print('Fim do noh')
| 3.65625 | 4 |
apps/addons/views.py | oremj/zamboni | 0 | 12792329 | import functools
import hashlib
import json
import random
from urlparse import urlparse
import uuid
from operator import attrgetter
from django import http
from django.conf import settings
from django.db.models import Q
from django.shortcuts import get_list_or_404, get_object_or_404, redirect
from django.utils.translation import trans_real as translation
from django.views.decorators.cache import cache_control
from django.views.decorators.csrf import csrf_exempt
from django.views.decorators.vary import vary_on_headers
import caching.base as caching
import jingo
import jinja2
import commonware.log
import session_csrf
from tower import ugettext as _, ugettext_lazy as _lazy
import waffle
from mobility.decorators import mobilized, mobile_template
import amo
from amo import messages
from amo.decorators import login_required, post_required, write
from amo.forms import AbuseForm
from amo.helpers import shared_url
from amo.utils import randslice, sorted_groupby, urlparams
from amo.models import manual_order
from amo import urlresolvers
from amo.urlresolvers import reverse
from abuse.models import send_abuse_report
from bandwagon.models import Collection, CollectionFeature, CollectionPromo
from market.forms import PriceCurrencyForm
import paypal
from reviews.forms import ReviewForm
from reviews.models import Review, GroupedRating
from session_csrf import anonymous_csrf, anonymous_csrf_exempt
from sharing.views import share as share_redirect
from stats.models import Contribution
from translations.query import order_by_translation
from versions.models import Version
from .forms import ContributionForm
from .models import Addon, Persona, FrozenAddon
from .decorators import (addon_view_factory, can_be_purchased, has_purchased,
has_not_purchased)
from mkt.webapps.models import Installed
log = commonware.log.getLogger('z.addons')
paypal_log = commonware.log.getLogger('z.paypal')
addon_view = addon_view_factory(qs=Addon.objects.valid)
addon_unreviewed_view = addon_view_factory(qs=Addon.objects.unreviewed)
addon_disabled_view = addon_view_factory(qs=Addon.objects.valid_and_disabled)
def author_addon_clicked(f):
"""Decorator redirecting clicks on "Other add-ons by author"."""
@functools.wraps(f)
def decorated(request, *args, **kwargs):
redirect_id = request.GET.get('addons-author-addons-select', None)
if not redirect_id:
return f(request, *args, **kwargs)
try:
target_id = int(redirect_id)
return http.HttpResponsePermanentRedirect(reverse(
'addons.detail', args=[target_id]))
except ValueError:
return http.HttpResponseBadRequest('Invalid add-on ID.')
return decorated
@addon_disabled_view
def addon_detail(request, addon):
"""Add-ons details page dispatcher."""
if addon.is_deleted:
raise http.Http404
if addon.is_disabled:
return jingo.render(request, 'addons/impala/disabled.html',
{'addon': addon}, status=404)
if addon.is_webapp():
# Apps don't deserve AMO detail pages.
raise http.Http404
# addon needs to have a version and be valid for this app.
if addon.type in request.APP.types:
if addon.type == amo.ADDON_PERSONA:
return persona_detail(request, addon)
else:
if not addon.current_version:
raise http.Http404
return extension_detail(request, addon)
else:
# Redirect to an app that supports this type.
try:
new_app = [a for a in amo.APP_USAGE if addon.type
in a.types][0]
except IndexError:
raise http.Http404
else:
prefixer = urlresolvers.get_url_prefix()
prefixer.app = new_app.short
return http.HttpResponsePermanentRedirect(reverse(
'addons.detail', args=[addon.slug]))
@vary_on_headers('X-Requested-With')
def extension_detail(request, addon):
"""Extensions details page."""
# If current version is incompatible with this app, redirect.
comp_apps = addon.compatible_apps
if comp_apps and request.APP not in comp_apps:
prefixer = urlresolvers.get_url_prefix()
prefixer.app = comp_apps.keys()[0].short
return redirect('addons.detail', addon.slug, permanent=True)
# get satisfaction only supports en-US.
lang = translation.to_locale(translation.get_language())
addon.has_satisfaction = (lang == 'en_US' and
addon.get_satisfaction_company)
# Addon recommendations.
recommended = Addon.objects.listed(request.APP).filter(
recommended_for__addon=addon)[:6]
# Popular collections this addon is part of.
collections = Collection.objects.listed().filter(
addons=addon, application__id=request.APP.id)
ctx = {
'addon': addon,
'src': request.GET.get('src', 'dp-btn-primary'),
'version_src': request.GET.get('src', 'dp-btn-version'),
'tags': addon.tags.not_blacklisted(),
'grouped_ratings': GroupedRating.get(addon.id),
'recommendations': recommended,
'review_form': ReviewForm(),
'reviews': Review.objects.valid().filter(addon=addon, is_latest=True),
'get_replies': Review.get_replies,
'collections': collections.order_by('-subscribers')[:3],
'abuse_form': AbuseForm(request=request),
}
# details.html just returns the top half of the page for speed. The bottom
# does a lot more queries we don't want on the initial page load.
if request.is_ajax():
# Other add-ons/apps from the same author(s).
ctx['author_addons'] = addon.authors_other_addons(app=request.APP)[:6]
return jingo.render(request, 'addons/impala/details-more.html', ctx)
else:
if addon.is_webapp():
ctx['search_placeholder'] = 'apps'
return jingo.render(request, 'addons/impala/details.html', ctx)
@mobilized(extension_detail)
def extension_detail(request, addon):
return jingo.render(request, 'addons/mobile/details.html',
{'addon': addon})
def _category_personas(qs, limit):
f = lambda: randslice(qs, limit=limit)
key = 'cat-personas:' + qs.query_key()
return caching.cached(f, key)
@mobile_template('addons/{mobile/}persona_detail.html')
def persona_detail(request, addon, template=None):
"""Details page for Personas."""
if not addon.is_public():
raise http.Http404
persona = addon.persona
# this persona's categories
categories = addon.categories.filter(application=request.APP.id)
if categories:
qs = Addon.objects.public().filter(categories=categories[0])
category_personas = _category_personas(qs, limit=6)
else:
category_personas = None
data = {
'addon': addon,
'persona': persona,
'categories': categories,
'author_personas': persona.authors_other_addons(request.APP)[:3],
'category_personas': category_personas,
}
if not persona.is_new():
# Remora uses persona.author despite there being a display_username.
data['author_gallery'] = settings.PERSONAS_USER_ROOT % persona.author
if not request.MOBILE:
# tags
dev_tags, user_tags = addon.tags_partitioned_by_developer
data.update({
'dev_tags': dev_tags,
'user_tags': user_tags,
'review_form': ReviewForm(),
'reviews': Review.objects.valid().filter(addon=addon,
is_latest=True),
'get_replies': Review.get_replies,
'search_cat': 'personas',
'abuse_form': AbuseForm(request=request),
})
return jingo.render(request, template, data)
class BaseFilter(object):
"""
Filters help generate querysets for add-on listings.
You have to define ``opts`` on the subclass as a sequence of (key, title)
pairs. The key is used in GET parameters and the title can be used in the
view.
The chosen filter field is combined with the ``base`` queryset using
the ``key`` found in request.GET. ``default`` should be a key in ``opts``
that's used if nothing good is found in request.GET.
"""
def __init__(self, request, base, key, default, model=Addon):
self.opts_dict = dict(self.opts)
self.extras_dict = dict(self.extras) if hasattr(self, 'extras') else {}
self.request = request
self.base_queryset = base
self.key = key
self.model = model
self.field, self.title = self.options(self.request, key, default)
self.qs = self.filter(self.field)
def options(self, request, key, default):
"""Get the (option, title) pair we want according to the request."""
if key in request.GET and (request.GET[key] in self.opts_dict or
request.GET[key] in self.extras_dict):
opt = request.GET[key]
else:
opt = default
if opt in self.opts_dict:
title = self.opts_dict[opt]
else:
title = self.extras_dict[opt]
return opt, title
def all(self):
"""Get a full mapping of {option: queryset}."""
return dict((field, self.filter(field)) for field in dict(self.opts))
def filter(self, field):
"""Get the queryset for the given field."""
filter = self._filter(field) & self.base_queryset
order = getattr(self, 'order_%s' % field, None)
if order:
return order(filter)
return filter
def _filter(self, field):
return getattr(self, 'filter_%s' % field)()
def filter_featured(self):
ids = self.model.featured_random(self.request.APP, self.request.LANG)
return manual_order(self.model.objects, ids, 'addons.id')
def filter_price(self):
return self.model.objects.order_by('addonpremium__price__price', 'id')
def filter_free(self):
if self.model == Addon:
return self.model.objects.top_free(self.request.APP, listed=False)
else:
return self.model.objects.top_free(listed=False)
def filter_paid(self):
if self.model == Addon:
return self.model.objects.top_paid(self.request.APP, listed=False)
else:
return self.model.objects.top_paid(listed=False)
def filter_popular(self):
return (self.model.objects.order_by('-weekly_downloads')
.with_index(addons='downloads_type_idx'))
def filter_downloads(self):
return self.filter_popular()
def filter_users(self):
return (self.model.objects.order_by('-average_daily_users')
.with_index(addons='adus_type_idx'))
def filter_created(self):
return (self.model.objects.order_by('-created')
.with_index(addons='created_type_idx'))
def filter_updated(self):
return (self.model.objects.order_by('-last_updated')
.with_index(addons='last_updated_type_idx'))
def filter_rating(self):
return (self.model.objects.order_by('-bayesian_rating')
.with_index(addons='rating_type_idx'))
def filter_hotness(self):
return self.model.objects.order_by('-hotness')
def filter_name(self):
return order_by_translation(self.model.objects.all(), 'name')
class ESBaseFilter(BaseFilter):
"""BaseFilter that uses elasticsearch."""
def __init__(self, request, base, key, default):
super(ESBaseFilter, self).__init__(request, base, key, default)
def filter(self, field):
sorts = {'name': 'name_sort',
'created': '-created',
'updated': '-last_updated',
'popular': '-weekly_downloads',
'users': '-average_daily_users',
'rating': '-bayesian_rating'}
return self.base_queryset.order_by(sorts[field])
class HomepageFilter(BaseFilter):
opts = (('featured', _lazy(u'Featured')),
('popular', _lazy(u'Popular')),
('new', _lazy(u'Recently Added')),
('updated', _lazy(u'Recently Updated')))
filter_new = BaseFilter.filter_created
def home(request):
# Add-ons.
base = Addon.objects.listed(request.APP).filter(type=amo.ADDON_EXTENSION)
# This is lame for performance. Kill it with ES.
frozen = list(FrozenAddon.objects.values_list('addon', flat=True))
# Collections.
collections = Collection.objects.filter(listed=True,
application=request.APP.id,
type=amo.COLLECTION_FEATURED)
featured = Addon.objects.featured(request.APP, request.LANG,
amo.ADDON_EXTENSION)[:18]
popular = base.exclude(id__in=frozen).order_by('-average_daily_users')[:10]
hotness = base.exclude(id__in=frozen).order_by('-hotness')[:18]
personas = Addon.objects.featured(request.APP, request.LANG,
amo.ADDON_PERSONA)[:18]
return jingo.render(request, 'addons/home.html',
{'popular': popular, 'featured': featured,
'hotness': hotness, 'personas': personas,
'src': 'homepage', 'collections': collections})
@mobilized(home)
def home(request):
# Shuffle the list and get 3 items.
rand = lambda xs: random.shuffle(xs) or xs[:3]
# Get some featured add-ons with randomness.
featured = Addon.featured_random(request.APP, request.LANG)[:3]
# Get 10 popular add-ons, then pick 3 at random.
qs = list(Addon.objects.listed(request.APP)
.filter(type=amo.ADDON_EXTENSION)
.order_by('-average_daily_users')
.values_list('id', flat=True)[:10])
popular = rand(qs)
# Do one query and split up the add-ons.
addons = (Addon.objects.filter(id__in=featured + popular)
.filter(type=amo.ADDON_EXTENSION))
featured = [a for a in addons if a.id in featured]
popular = sorted([a for a in addons if a.id in popular],
key=attrgetter('average_daily_users'), reverse=True)
return jingo.render(request, 'addons/mobile/home.html',
{'featured': featured, 'popular': popular})
def homepage_promos(request):
from discovery.views import promos
version, platform = request.GET.get('version'), request.GET.get('platform')
if not (platform or version):
raise http.Http404
return promos(request, 'home', version, platform)
class CollectionPromoBox(object):
def __init__(self, request):
self.request = request
def features(self):
return CollectionFeature.objects.all()
def collections(self):
features = self.features()
lang = translation.to_language(translation.get_language())
locale = Q(locale='') | Q(locale=lang)
promos = (CollectionPromo.objects.filter(locale)
.filter(collection_feature__in=features)
.transform(CollectionPromo.transformer))
groups = sorted_groupby(promos, 'collection_feature_id')
# We key by feature_id and locale, so we can favor locale specific
# promos.
promo_dict = {}
for feature_id, v in groups:
promo = v.next()
key = (feature_id, translation.to_language(promo.locale))
promo_dict[key] = promo
rv = {}
# If we can, we favor locale specific collections.
for feature in features:
key = (feature.id, lang)
if key not in promo_dict:
key = (feature.id, '')
if key not in promo_dict:
continue
# We only want to see public add-ons on the front page.
c = promo_dict[key].collection
c.public_addons = c.addons.all() & Addon.objects.public()
rv[feature] = c
return rv
def __nonzero__(self):
return self.request.APP == amo.FIREFOX
@addon_view
def eula(request, addon, file_id=None):
if not addon.eula:
return http.HttpResponseRedirect(addon.get_url_path())
if file_id:
version = get_object_or_404(addon.versions, files__id=file_id)
else:
version = addon.current_version
return jingo.render(request, 'addons/eula.html',
{'addon': addon, 'version': version})
@addon_view
def privacy(request, addon):
if not addon.privacy_policy:
return http.HttpResponseRedirect(addon.get_url_path())
return jingo.render(request, 'addons/privacy.html', {'addon': addon})
@addon_view
def developers(request, addon, page):
if addon.is_persona():
raise http.Http404()
if 'version' in request.GET:
qs = addon.versions.filter(files__status__in=amo.VALID_STATUSES)
version = get_list_or_404(qs, version=request.GET['version'])[0]
else:
version = addon.current_version
if 'src' in request.GET:
contribution_src = src = request.GET['src']
else:
page_srcs = {
'developers': ('developers', 'meet-developers'),
'installed': ('meet-the-developer-post-install', 'post-download'),
'roadblock': ('meetthedeveloper_roadblock', 'roadblock'),
}
# Download src and contribution_src are different.
src, contribution_src = page_srcs.get(page)
return jingo.render(request, 'addons/impala/developers.html',
{'addon': addon, 'page': page, 'src': src,
'contribution_src': contribution_src,
'version': version})
# TODO(andym): remove this once we figure out how to process for
# anonymous users. For now we are concentrating on logged in users.
@login_required
@addon_view
@can_be_purchased
@has_not_purchased
@write
@post_required
def purchase(request, addon):
log.debug('Starting purchase of addon: %s by user: %s'
% (addon.pk, request.amo_user.pk))
amount = addon.premium.get_price()
source = request.POST.get('source', '')
uuid_ = hashlib.md5(str(uuid.uuid4())).hexdigest()
# l10n: {0} is the addon name
contrib_for = _(u'Purchase of {0}').format(jinja2.escape(addon.name))
# Default is USD.
amount, currency = addon.premium.get_price(), 'USD'
# If tier is specified, then let's look it up.
form = PriceCurrencyForm(data=request.POST, addon=addon)
if form.is_valid():
tier = form.get_tier()
if tier:
amount, currency = tier.price, tier.currency
paykey, status, error = '', '', ''
preapproval = None
if waffle.flag_is_active(request, 'allow-pre-auth') and request.amo_user:
preapproval = request.amo_user.get_preapproval()
try:
pattern = 'addons.purchase.finished'
slug = addon.slug
if addon.is_webapp():
pattern = 'apps.purchase.finished'
slug = addon.app_slug
paykey, status = paypal.get_paykey(
dict(amount=amount,
chains=settings.PAYPAL_CHAINS,
currency=currency,
email=addon.paypal_id,
ip=request.META.get('REMOTE_ADDR'),
memo=contrib_for,
pattern=pattern,
preapproval=preapproval, qs={'realurl':
request.POST.get('realurl')},
slug=slug, uuid=uuid_))
except paypal.PaypalError as error:
paypal.paypal_log_cef(request, addon, uuid_,
'PayKey Failure', 'PAYKEYFAIL',
'There was an error getting the paykey')
log.error('Error getting paykey, purchase of addon: %s' % addon.pk,
exc_info=True)
if paykey:
contrib = Contribution(addon_id=addon.id, amount=amount,
source=source, source_locale=request.LANG,
uuid=str(uuid_), type=amo.CONTRIB_PENDING,
paykey=paykey, user=request.amo_user)
log.debug('Storing contrib for uuid: %s' % uuid_)
# If this was a pre-approval, it's completed already, we'll
# double check this with PayPal, just to be sure nothing went wrong.
if status == 'COMPLETED':
paypal.paypal_log_cef(request, addon, uuid_,
'Purchase', 'PURCHASE',
'A user purchased using pre-approval')
log.debug('Status is completed for uuid: %s' % uuid_)
if paypal.check_purchase(paykey) == 'COMPLETED':
log.debug('Check purchase is completed for uuid: %s' % uuid_)
contrib.type = amo.CONTRIB_PURCHASE
else:
# In this case PayPal disagreed, we should not be trusting
# what get_paykey said. Which is a worry.
log.error('Check purchase failed on uuid: %s' % uuid_)
status = 'NOT-COMPLETED'
contrib.save()
else:
log.error('No paykey present for uuid: %s' % uuid_)
log.debug('Got paykey for addon: %s by user: %s'
% (addon.pk, request.amo_user.pk))
url = '%s?paykey=%s' % (settings.PAYPAL_FLOW_URL, paykey)
if request.POST.get('result_type') == 'json' or request.is_ajax():
return http.HttpResponse(json.dumps({'url': url,
'paykey': paykey,
'error': str(error),
'status': status}),
content_type='application/json')
# This is the non-Ajax fallback.
if status != 'COMPLETED':
return http.HttpResponseRedirect(url)
messages.success(request, _('Purchase complete'))
return http.HttpResponseRedirect(shared_url('addons.detail', addon))
# TODO(andym): again, remove this once we figure out logged out flow.
@csrf_exempt
@login_required
@addon_view
@can_be_purchased
@write
def purchase_complete(request, addon, status):
result = ''
if status == 'complete':
uuid_ = request.GET.get('uuid')
log.debug('Looking up contrib for uuid: %s' % uuid_)
# The IPN may, or may not have come through. Which means looking for
# a for pre or post IPN contributions. If both fail, then we've not
# got a matching contribution.
lookup = (Q(uuid=uuid_, type=amo.CONTRIB_PENDING) |
Q(transaction_id=uuid_, type=amo.CONTRIB_PURCHASE))
con = get_object_or_404(Contribution, lookup)
log.debug('Check purchase paypal addon: %s, user: %s, paykey: %s'
% (addon.pk, request.amo_user.pk, con.paykey[:10]))
try:
result = paypal.check_purchase(con.paykey)
if result == 'ERROR':
paypal.paypal_log_cef(request, addon, uuid_, 'Purchase Fail',
'PURCHASEFAIL',
'Checking purchase state returned error')
raise
except:
paypal.paypal_log_cef(request, addon, uuid_, 'Purchase Fail',
'PURCHASEFAIL',
'There was an error checking purchase state')
log.error('Check purchase paypal addon: %s, user: %s, paykey: %s'
% (addon.pk, request.amo_user.pk, con.paykey[:10]),
exc_info=True)
result = 'ERROR'
status = 'error'
log.debug('Paypal returned: %s for paykey: %s'
% (result, con.paykey[:10]))
if result == 'COMPLETED' and con.type == amo.CONTRIB_PENDING:
con.update(type=amo.CONTRIB_PURCHASE)
context = {'realurl': request.GET.get('realurl', ''),
'status': status, 'result': result}
# For mobile, bounce back to the details page.
if request.MOBILE:
url = urlparams(shared_url('detail', addon), **context)
return http.HttpResponseRedirect(url)
context.update({'addon': addon})
response = jingo.render(request, 'addons/paypal_result.html', context)
response['x-frame-options'] = 'allow'
return response
@login_required
@addon_view
@can_be_purchased
@has_purchased
def purchase_thanks(request, addon):
download = urlparse(request.GET.get('realurl', '')).path
data = {'addon': addon, 'is_ajax': request.is_ajax(),
'download': download}
if addon.is_webapp():
installed, c = Installed.objects.safer_get_or_create(
addon=addon, user=request.amo_user)
data['receipt'] = installed.receipt
return jingo.render(request, 'addons/paypal_thanks.html', data)
@login_required
@addon_view
@can_be_purchased
def purchase_error(request, addon):
data = {'addon': addon, 'is_ajax': request.is_ajax()}
return jingo.render(request, 'addons/paypal_error.html', data)
@addon_view
@anonymous_csrf_exempt
@post_required
def contribute(request, addon):
webapp = addon.is_webapp()
contrib_type = request.POST.get('type', 'suggested')
is_suggested = contrib_type == 'suggested'
source = request.POST.get('source', '')
comment = request.POST.get('comment', '')
amount = {
'suggested': addon.suggested_amount,
'onetime': request.POST.get('onetime-amount', '')
}.get(contrib_type, '')
if not amount:
amount = settings.DEFAULT_SUGGESTED_CONTRIBUTION
# This is all going to get shoved into solitude. Temporary.
form = ContributionForm({'amount': amount})
if not form.is_valid():
return http.HttpResponse(json.dumps({'error': 'Invalid data.',
'status': '', 'url': '',
'paykey': ''}),
content_type='application/json')
contribution_uuid = hashlib.md5(str(uuid.uuid4())).hexdigest()
if addon.charity:
# TODO(andym): Figure out how to get this in the addon authors
# locale, rather than the contributors locale.
name, paypal_id = (u'%s: %s' % (addon.name, addon.charity.name),
addon.charity.paypal)
else:
name, paypal_id = addon.name, addon.paypal_id
# l10n: {0} is the addon name
contrib_for = _(u'Contribution for {0}').format(jinja2.escape(name))
preapproval = None
if waffle.flag_is_active(request, 'allow-pre-auth') and request.amo_user:
preapproval = request.amo_user.get_preapproval()
paykey, error, status = '', '', ''
try:
paykey, status = paypal.get_paykey(
dict(amount=amount,
email=paypal_id,
ip=request.META.get('REMOTE_ADDR'),
memo=contrib_for,
pattern='%s.paypal' % ('apps' if webapp else 'addons'),
preapproval=preapproval,
slug=addon.slug,
uuid=contribution_uuid))
except paypal.PaypalError as error:
paypal.paypal_log_cef(request, addon, contribution_uuid,
'PayKey Failure', 'PAYKEYFAIL',
'There was an error getting the paykey')
log.error('Error getting paykey, contribution for addon: %s'
% addon.pk, exc_info=True)
if paykey:
contrib = Contribution(addon_id=addon.id, charity_id=addon.charity_id,
amount=amount, source=source,
source_locale=request.LANG,
annoying=addon.annoying,
uuid=str(contribution_uuid),
is_suggested=is_suggested,
suggested_amount=addon.suggested_amount,
comment=comment, paykey=paykey)
contrib.save()
url = '%s?paykey=%s' % (settings.PAYPAL_FLOW_URL, paykey)
if request.GET.get('result_type') == 'json' or request.is_ajax():
# If there was an error getting the paykey, then JSON will
# not have a paykey and the JS can cope appropriately.
return http.HttpResponse(json.dumps({'url': url,
'paykey': paykey,
'error': str(error),
'status': status}),
content_type='application/json')
return http.HttpResponseRedirect(url)
@csrf_exempt
@addon_view
def paypal_result(request, addon, status):
uuid = request.GET.get('uuid')
if not uuid:
raise http.Http404()
if status == 'cancel':
log.info('User cancelled contribution: %s' % uuid)
else:
log.info('User completed contribution: %s' % uuid)
response = jingo.render(request, 'addons/paypal_result.html',
{'addon': addon, 'status': status})
response['x-frame-options'] = 'allow'
return response
@addon_view
@can_be_purchased
@anonymous_csrf
def paypal_start(request, addon=None):
download = urlparse(request.GET.get('realurl', '')).path
data = {'addon': addon, 'is_ajax': request.is_ajax(),
'download': download,
'currencies': addon.premium.price.currencies()}
if request.user.is_authenticated():
return jingo.render(request, 'addons/paypal_start.html', data)
from users.views import _login
return _login(request, data=data, template='addons/paypal_start.html',
dont_redirect=True)
@addon_view
def share(request, addon):
"""Add-on sharing"""
return share_redirect(request, addon, addon.name, addon.summary)
@addon_view
def license(request, addon, version=None):
if version is not None:
qs = addon.versions.filter(files__status__in=amo.VALID_STATUSES)
version = get_list_or_404(qs, version=version)[0]
else:
version = addon.current_version
if not (version and version.license):
raise http.Http404
return jingo.render(request, 'addons/impala/license.html',
dict(addon=addon, version=version))
def license_redirect(request, version):
version = get_object_or_404(Version, pk=version)
return redirect(version.license_url(), permanent=True)
@session_csrf.anonymous_csrf_exempt
@addon_view
def report_abuse(request, addon):
form = AbuseForm(request.POST or None, request=request)
if request.method == "POST" and form.is_valid():
send_abuse_report(request, addon, form.cleaned_data['text'])
messages.success(request, _('Abuse reported.'))
return http.HttpResponseRedirect(addon.get_url_path())
else:
return jingo.render(request, 'addons/report_abuse_full.html',
{'addon': addon, 'abuse_form': form, })
@cache_control(max_age=60 * 60 * 24)
def persona_redirect(request, persona_id):
persona = get_object_or_404(Persona, persona_id=persona_id)
to = reverse('addons.detail', args=[persona.addon.slug])
return http.HttpResponsePermanentRedirect(to)
| 1.375 | 1 |
federated_learning/breast_density_challenge/code/pt/utils/preprocess_dicomdir.py | finalelement/tutorials | 15 | 12792330 | # Copyright 2022 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import glob
import json
import os
import random
import numpy as np
import pandas as pd
from preprocess_dicom import dicom_preprocess
from sklearn.model_selection import GroupKFold
# density labels
# 1 - fatty
# 2 - scattered fibroglandular density
# 3 - heterogeneously dense
# 4 - extremely dense
def preprocess(dicom_root, out_path, ids, images, densities, process_image=True):
data_list = []
dc_tags = []
saved_filenames = []
assert len(ids) == len(images) == len(densities)
for i, (id, image, density) in enumerate(zip(ids, images, densities)):
if (i + 1) % 200 == 0:
print(f"processing {i+1} of {len(ids)}...")
dir_name = image.split(os.path.sep)[0]
img_file = glob.glob(
os.path.join(dicom_root, dir_name, "**", "*.dcm"), recursive=True
)
assert len(img_file) == 1, f"No unique dicom image found for {dir_name}!"
save_prefix = os.path.join(out_path, dir_name)
if process_image:
_success, _dc_tags = dicom_preprocess(img_file[0], save_prefix)
else:
if os.path.isfile(save_prefix + ".npy"):
_success = True
else:
_success = False
_dc_tags = []
if _success and density >= 1: # label can be 0 sometimes, excluding those cases
dc_tags.append(_dc_tags)
data_list.append(
{
"patient_id": id,
"image": dir_name + ".npy",
"label": int(density - 1),
}
)
saved_filenames.append(dir_name + ".npy")
return data_list, dc_tags, saved_filenames
def write_datalist(save_datalist_file, data_set):
os.makedirs(os.path.dirname(save_datalist_file), exist_ok=True)
with open(save_datalist_file, "w") as f:
json.dump(data_set, f, indent=4)
print(f"Data list saved at {save_datalist_file}")
def get_indices(all_ids, search_ids):
indices = []
for _id in search_ids:
_indices = np.where(all_ids == _id)
indices.extend(_indices[0].tolist())
return indices
def main():
process_image = True # set False if dicoms have already been preprocessed
out_path = "./data/preprocessed" # YOUR DEST FOLDER SHOULD BE WRITTEN HERE
out_dataset_prefix = "./data/dataset"
# Input folders
label_root = "/media/hroth/Elements/NVIDIA/Data/CBIS-DDSM/"
dicom_root = "/media/hroth/Elements/NVIDIA/Data/CBIS-DDSM/DICOM/manifest-ZkhPvrLo5216730872708713142/CBIS-DDSM"
n_clients = 3
""" Run preprocessing """
""" 1. Load the label data """
random.seed(0)
label_files = [
os.path.join(label_root, "mass_case_description_train_set.csv"),
os.path.join(label_root, "calc_case_description_train_set.csv"),
os.path.join(label_root, "mass_case_description_test_set.csv"),
os.path.join(label_root, "calc_case_description_test_set.csv"),
]
breast_densities = []
patients_ids = []
image_file_path = []
# read annotations
for label_file in label_files:
print(f"add {label_file}")
label_data = pd.read_csv(label_file)
unique_images, unique_indices = np.unique(
label_data["image file path"], return_index=True
)
print(
f"including {len(unique_images)} unique images of {len(label_data['image file path'])} image entries"
)
try:
breast_densities.extend(label_data["breast_density"][unique_indices])
except BaseException:
breast_densities.extend(label_data["breast density"][unique_indices])
patients_ids.extend(label_data["patient_id"][unique_indices])
image_file_path.extend(label_data["image file path"][unique_indices])
assert len(breast_densities) == len(patients_ids) == len(image_file_path), (
f"Mismatch between label data, breast_densities: "
f"{len(breast_densities)}, patients_ids: {len(patients_ids)}, image_file_path: {len(image_file_path)}"
)
print(f"Read {len(image_file_path)} data entries.")
""" 2. Split the data """
# shuffle data
label_data = list(zip(breast_densities, patients_ids, image_file_path))
random.shuffle(label_data)
breast_densities, patients_ids, image_file_path = zip(*label_data)
# Split data
breast_densities = np.array(breast_densities)
patients_ids = np.array(patients_ids)
image_file_path = np.array(image_file_path)
unique_patient_ids = np.unique(patients_ids)
n_patients = len(unique_patient_ids)
print(f"Found {n_patients} patients.")
# generate splits using roughly the same ratios as for challenge data:
n_train_challenge = 60_000
n_val_challenge = 6_500
n_test_challenge = 40_000
test_ratio = n_test_challenge / (
n_train_challenge + n_val_challenge + n_test_challenge
)
val_ratio = n_val_challenge / (
n_val_challenge + n_test_challenge
) # test cases will be removed at this point
# use groups to avoid patient overlaps
# test split
n_splits = int(np.ceil(len(image_file_path) / (len(image_file_path) * test_ratio)))
print(
f"Splitting into {n_splits} folds for test split. (Only the first fold is used.)"
)
group_kfold = GroupKFold(n_splits=n_splits)
for train_val_index, test_index in group_kfold.split(
image_file_path, breast_densities, groups=patients_ids
):
break # just use first fold
test_images = image_file_path[test_index]
test_patients_ids = patients_ids[test_index]
test_densities = breast_densities[test_index]
# train/val splits
train_val_images = image_file_path[train_val_index]
train_val_patients_ids = patients_ids[train_val_index]
train_val_densities = breast_densities[train_val_index]
n_splits = int(np.ceil(len(image_file_path) / (len(image_file_path) * val_ratio)))
print(
f"Splitting into {n_splits} folds for train/val splits. (Only the first fold is used.)"
)
group_kfold = GroupKFold(n_splits=n_splits)
for train_index, val_index in group_kfold.split(
train_val_images, train_val_densities, groups=train_val_patients_ids
):
break # just use first fold
train_images = train_val_images[train_index]
train_patients_ids = train_val_patients_ids[train_index]
train_densities = train_val_densities[train_index]
val_images = train_val_images[val_index]
val_patients_ids = train_val_patients_ids[val_index]
val_densities = train_val_densities[val_index]
# check that there is no patient overlap
assert (
len(np.intersect1d(train_patients_ids, val_patients_ids)) == 0
), "Overlapping patients in train and validation!"
assert (
len(np.intersect1d(train_patients_ids, test_patients_ids)) == 0
), "Overlapping patients in train and test!"
assert (
len(np.intersect1d(val_patients_ids, test_patients_ids)) == 0
), "Overlapping patients in validation and test!"
n_total = len(train_images) + len(val_images) + len(test_images)
print(20 * "-")
print(f"Train : {len(train_images)} ({100*len(train_images)/n_total:.2f}%)")
print(f"Val : {len(val_images)} ({100*len(val_images)/n_total:.2f}%)")
print(f"Test : {len(test_images)} ({100*len(test_images)/n_total:.2f}%)")
print(20 * "-")
print(f"Total : {n_total}")
assert n_total == len(image_file_path), (
f"mismatch between total split images ({n_total})"
f" and length of all images {len(image_file_path)}!"
)
""" split train/validation dataset for n_clients """
# Split and avoid patient overlap
unique_train_patients_ids = np.unique(train_patients_ids)
split_train_patients_ids = np.array_split(unique_train_patients_ids, n_clients)
unique_val_patients_ids = np.unique(val_patients_ids)
split_val_patients_ids = np.array_split(unique_val_patients_ids, n_clients)
unique_test_patients_ids = np.unique(test_patients_ids)
split_test_patients_ids = np.array_split(unique_test_patients_ids, n_clients)
""" 3. Preprocess the images """
dc_tags = []
saved_filenames = []
for c in range(n_clients):
site_name = f"site-{c+1}"
print(f"Preprocessing training set of client {site_name}")
_curr_patient_ids = split_train_patients_ids[c]
_curr_indices = get_indices(train_patients_ids, _curr_patient_ids)
train_list, _dc_tags, _saved_filenames = preprocess(
dicom_root,
out_path,
train_patients_ids[_curr_indices],
train_images[_curr_indices],
train_densities[_curr_indices],
process_image=process_image,
)
print(
f"Converted {len(train_list)} of {len(train_patients_ids)} training images"
)
dc_tags.extend(_dc_tags)
saved_filenames.extend(_saved_filenames)
print("Preprocessing validation")
_curr_patient_ids = split_val_patients_ids[c]
_curr_indices = get_indices(val_patients_ids, _curr_patient_ids)
val_list, _dc_tags, _saved_filenames = preprocess(
dicom_root,
out_path,
val_patients_ids[_curr_indices],
val_images[_curr_indices],
val_densities[_curr_indices],
process_image=process_image,
)
print(f"Converted {len(val_list)} of {len(val_patients_ids)} validation images")
dc_tags.extend(_dc_tags)
saved_filenames.extend(_saved_filenames)
print("Preprocessing testing")
_curr_patient_ids = split_test_patients_ids[c]
_curr_indices = get_indices(test_patients_ids, _curr_patient_ids)
test_list, _dc_tags, _saved_filenames = preprocess(
dicom_root,
out_path,
test_patients_ids[_curr_indices],
test_images[_curr_indices],
test_densities[_curr_indices],
process_image=process_image,
)
print(f"Converted {len(test_list)} of {len(test_patients_ids)} testing images")
dc_tags.extend(_dc_tags)
saved_filenames.extend(_saved_filenames)
data_set = {
"train": train_list, # will stay the same for both phases
"test1": val_list, # like phase 1 leaderboard
"test2": test_list, # like phase 2 - final leaderboard
}
write_datalist(f"{out_dataset_prefix}_{site_name}.json", data_set)
print(50 * "=")
print(
f"Successfully converted a total {len(saved_filenames)} of {len(image_file_path)} images."
)
# check that there were no duplicated files
assert len(saved_filenames) == len(
np.unique(saved_filenames)
), f"Not all generated files ({len(saved_filenames)}) are unique ({len(np.unique(saved_filenames))})!"
print(f"Data lists saved wit prefix {out_dataset_prefix}")
print(50 * "=")
print("Processed unique DICOM tags", np.unique(dc_tags))
if __name__ == "__main__":
main()
| 2.1875 | 2 |
idp_user/typing.py | CardoAI/django-idp-user | 5 | 12792331 | <filename>idp_user/typing.py
from typing import TypedDict, Union, List, Any, Optional, Type
from django.db import models
ALL = 'all'
class JwtData(TypedDict):
iat: int
nbf: int
jti: str
exp: str
type: str
fresh: str
user_id: int
email: str
username: str
class UserFeaturesPermissions(TypedDict):
dod_manager: Union[List, bool]
cash_flow_projection: Union[List, bool]
notes_manager: Union[List, bool]
class AppSpecificConfigs(TypedDict):
app_entities_restrictions: Optional[dict[str, list]]
permission_restrictions: dict[str, Union[bool, Any]]
Role = str
UserAppSpecificConfigs = dict[Role, AppSpecificConfigs]
class UserTenantData(TypedDict):
idp_user_id: int
first_name: str
last_name: str
username: str
email: str
is_active: bool
is_staff: bool
is_superuser: bool
date_joined: str
app_specific_configs: UserAppSpecificConfigs
"""
"data": [
{
"idp_user_id": 12,
"first_name": "str",
"last_name": "str",
"username": "str",
"email": "str",
"is_active": "bool",
"is_staff": "bool",
"is_superuser": "bool",
"date_joined": "datetime"
"app_specific_configs": {
"app_identifier": {
"Servicer": {
"app_entities_restrictions": {"vehicle": [1, 2]},
"permission_restrictions": {
"viewDoD": {"vehicle_ids": [1]},
"synchronizeDoD": false
}
}
}
}
}
]
"""
# ===
AppIdentifier = str
TenantIdentifier = str
UserRecordAppSpecificConfigs = dict[AppIdentifier, dict[TenantIdentifier, AppSpecificConfigs]]
class UserRecordDict(TypedDict):
idp_user_id: int
first_name: Optional[str]
last_name: Optional[str]
username: Optional[str]
email: Optional[str]
is_active: Optional[bool]
is_staff: Optional[bool]
is_superuser: Optional[bool]
date_joined: Optional[str]
app_specific_configs: UserRecordAppSpecificConfigs
"""
Example of a user record from kafka:
{
"first_name": "str",
"last_name": "str",
"username": "str",
"email": "str",
"is_active": "bool",
"is_staff": "bool",
"is_superuser": "bool",
"date_joined": "datetime"
"app_specific_configs": {
"app_identifier": {
"tenant": {
"Servicer": {
"app_entities_restrictions": {"vehicle": [1, 2]},
"permission_restrictions": {
"viewDoD": {"vehicle": [1]},
"synchronizeDoD": false
}
}
}
}
}
}
"""
class AppEntityTypeConfig(TypedDict):
model: Union[str, Type[models.Model]]
identifier_attr: str
label_attr: str
class AppEntityRecordEventDict(TypedDict):
app_identifier: str
app_entity_type: str
record_identifier: Any
deleted: bool
label: Optional[str]
| 2.109375 | 2 |
nopwned/app/addon_pwned.py | freman/hass-nopwned | 7 | 12792332 | """Helpers to check core security."""
from datetime import timedelta
from typing import List, Optional
from ...const import CoreState
from ...jobs.const import JobCondition, JobExecutionLimit
from ...jobs.decorator import Job
from ..const import ContextType, IssueType
from .base import CheckBase
class CheckAddonPwned(CheckBase):
"""CheckAddonPwned class for check."""
@Job(
conditions=[JobCondition.INTERNET_SYSTEM],
limit=JobExecutionLimit.THROTTLE,
throttle_period=timedelta(hours=24),
)
async def run_check(self) -> None:
"""Run check if not affected by issue."""
@Job(conditions=[JobCondition.INTERNET_SYSTEM])
async def approve_check(self, reference: Optional[str] = None) -> bool:
"""Approve check if it is affected by issue."""
return False
@property
def issue(self) -> IssueType:
"""Return a IssueType enum."""
return IssueType.PWNED
@property
def context(self) -> ContextType:
"""Return a ContextType enum."""
return ContextType.ADDON
@property
def states(self) -> List[CoreState]:
"""Return a list of valid states when this check can run."""
return [CoreState.RUNNING]
| 2.390625 | 2 |
setup.py | andrey-git/waqi-async | 3 | 12792333 | <gh_stars>1-10
"""A setuptools based setup module."""
from setuptools import setup, find_packages
setup(
name='waqiasync',
version='1.0.0',
description='asyncio-friendly python API for aqicn.org',
long_description='asyncio-friendly python API for World Air Quality Index (http://aqicn.org). Requires Python 3.4+',
url='https://github.com/andrey-git/waqi-async',
license='MIT',
classifiers=[
'Development Status :: 4 - Beta',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
keywords='waqi',
install_requires=['aiohttp', 'async_timeout'],
zip_safe=True,
author = 'andrey-git',
author_email = '<EMAIL>',
packages=find_packages()
) | 1.273438 | 1 |
app/recipe/views.py | bdlb77/recipe-app-api | 0 | 12792334 | from rest_framework import viewsets, mixins
from rest_framework.authentication import TokenAuthentication
from rest_framework.permissions import IsAuthenticated
from core.models import Tag, Ingredient
from recipe import serializers
# tag and ingredients are attributes of a recipe
class BaseRecipeAttrViewSet(viewsets.GenericViewSet,
mixins.ListModelMixin,
mixins.CreateModelMixin):
"""Base ViewSet for user-owned recipe attributes"""
authentication_classes = (TokenAuthentication,)
permission_classes = (IsAuthenticated,)
def get_queryset(self):
"""return objects for user only"""
return self.queryset.filter(user=self.request.user).order_by("-name")
def perform_create(self, serializer):
"""Create a new object"""
return serializer.save(user=self.request.user)
class TagViewSet(BaseRecipeAttrViewSet):
"""Manage tags in the database"""
queryset = Tag.objects.all()
serializer_class = serializers.TagSerializer
class IngredientViewSet(BaseRecipeAttrViewSet):
"""Manage Ingredients in Database"""
queryset = Ingredient.objects.all()
serializer_class = serializers.IngredientSerializer
| 2.28125 | 2 |
msg_serv.py | arti95/fast-lircd-websocket-bridge | 0 | 12792335 | #!/usr/bin/env python3
import asyncio
import sys
import os
class EchoServer(asyncio.Protocol):
clients = {}
def connection_made(self, transport):
peername = transport.get_extra_info('peername')
print('connection from {}'.format(peername))
self.transport = transport
self.clients[transport] = None
def data_received(self, data):
# print('data received: {}'.format(data.decode()))
for transport in self.clients:
if transport == self.transport:
pass
#continue
transport.write(data)
def connection_lost(self, exc):
print("connection lost")
self.transport.close()
del self.clients[self.transport]
server_address = sys.argv[1]
try:
os.unlink(server_address)
except OSError:
if os.path.exists(server_address):
raise
loop = asyncio.get_event_loop()
#coro = loop.create_server(EchoServer, "127.0.0.1", 8888)
coro = loop.create_unix_server(EchoServer, server_address)
server = loop.run_until_complete(coro)
print('serving on {}'.format(server_address))
try:
loop.run_forever()
except KeyboardInterrupt:
print("exit")
finally:
server.close()
loop.close()
| 2.890625 | 3 |
app.py | marcusvanwinden/book_scraper | 0 | 12792336 | # Import required packages
import sqlite3
import operations
import os
import time
# Establish connection with database
connection = sqlite3.connect("books.db")
# Instantiate cursor
cursor = connection.cursor()
# Create the books table
cursor.execute("""
CREATE TABLE IF NOT EXISTS books (
BookId INTEGER PRIMARY KEY,
Title TEXT NOT NULL,
Price DECIMAL(5, 2),
Stock INTEGER
)
""")
if __name__ == "__main__":
# Print welcome screen
operations.clear_screen()
print("Hello \U0001f600\n")
time.sleep(2)
begin_page = None
# Ask user for valid page number
while begin_page not in range(1, 51):
try:
print("At what page would you like to start scraping?\n\n"
"Please type a number between 1 and 50.\n\n"
"The higher the number, the faster the program will be done.\n"
)
begin_page = int(input("Number > "))
if begin_page not in range(1, 51):
raise Exception()
except:
operations.clear_screen()
# Start scraping
operations.clear_screen()
print("Let the scraping begin! \U0001f600")
time.sleep(2)
book_urls = operations.scrape_book_urls(begin=begin_page)
books = operations.scrape_books(book_urls)
operations.write_to_csv_file(books)
try:
current_book = 1
for book in books:
query = """
INSERT INTO [dbo].[Books] (title, price, stock)
VALUES (?, ?, ?)"""
cursor.execute(query, (book["title"], book["price"], book["stock"]))
print(f"Saving book {current_book}/{len(books)} to the database")
current_book += 1
time.sleep(0.1)
except:
pass
# Commit data to the database
connection.commit()
operations.clear_screen()
print("Completed!\nNow type 'open books.csv' \U0001f600")
| 3.625 | 4 |
esp8266/platform.py | pythings/PythingsOS | 11 | 12792337 | <gh_stars>10-100
platform = 'esp8266'
| 0.996094 | 1 |
standalone/mlflow_handlers/mlflow_handlers.py | frburrue/tfm | 0 | 12792338 | <reponame>frburrue/tfm<gh_stars>0
import mlflow
import os
import shutil
import boto3
from datetime import datetime
S3_CLIENT = boto3.resource('s3')
mlflow.set_tracking_uri(os.getenv('MLFLOW_TRACKING_URI'))
MLFLOW_CLIENT = mlflow.tracking.MlflowClient()
REGISTERED_MODELS = ["Hands"]
CURRENT_MODEL = "Unknown"
MODELS = {}
def downlod_model(bucket_name, remoteDirectory_name):
bucket = S3_CLIENT.Bucket(bucket_name)
for obj in bucket.objects.filter(Prefix=remoteDirectory_name):
if not os.path.exists(os.path.dirname(obj.key)):
os.makedirs(os.path.dirname(obj.key))
bucket.download_file(obj.key, obj.key)
def update_models():
global CURRENT_MODEL
update = {}
for model_name in REGISTERED_MODELS:
model = None
update[model_name] = 0
for mv in MLFLOW_CLIENT.search_model_versions(f"name='{model_name}'"):
mv = dict(mv)
if mv['current_stage'] == 'Production':
mv['last_updated_timestamp'] = str(datetime.fromtimestamp(int(mv['last_updated_timestamp'] / 1000)))
bucket = mv['source'].split('//')[1].split('/')[0]
folder = mv['source'].split('//')[1].split('/')[1]
if os.path.exists(os.path.join('./models', folder)):
print("Load existing model...")
model = os.path.join(os.path.join('./models', folder), "artifacts/model/data/model.h5")
update[model_name] = not (CURRENT_MODEL == model)
CURRENT_MODEL = model
else:
print("Downloading model...")
downlod_model(bucket, folder)
model = os.path.join(os.path.join('./models', folder), "artifacts/model/data/model.h5")
update[model_name] = not (CURRENT_MODEL == model)
CURRENT_MODEL = model
if os.path.exists('./models'):
shutil.rmtree('./models')
os.mkdir('./models')
shutil.move(os.path.join(os.getcwd(), folder), './models')
print("Using model {name} v{version} ({current_stage}) updated at {last_updated_timestamp}".format(**mv))
#response = {k: v for k, v in mv.items() if v}
break
if model:
MODELS[model_name] = model
return update
def get_model(model_name):
return MODELS.get(model_name, None)
| 1.960938 | 2 |
taurex/contributions/absorption.py | rychallener/TauREx3_public | 0 | 12792339 |
from .contribution import Contribution
import numpy as np
from taurex.cache import OpacityCache
class AbsorptionContribution(Contribution):
"""
Computes the contribution to the optical depth
occuring from molecular absorption.
"""
def __init__(self):
super().__init__('Absorption')
self._opacity_cache = OpacityCache()
def prepare_each(self, model, wngrid):
"""
Prepares each molecular opacity by weighting them
by their mixing ratio in the atmosphere
Parameters
----------
model: :class:`~taurex.model.model.ForwardModel`
Forward model
wngrid: :obj:`array`
Wavenumber grid
Yields
------
component: :obj:`tuple` of type (str, :obj:`array`)
Name of molecule and weighted opacity
"""
self.debug('Preparing model with %s', wngrid.shape)
self._ngrid = wngrid.shape[0]
sigma_xsec = np.zeros(shape=(model.nLayers, wngrid.shape[0]))
# Get the opacity cache
self._opacity_cache = OpacityCache()
# Loop through all active gases
for gas in model.chemistry.activeGases:
# Clear sigma array
sigma_xsec[...] = 0.0
# Get the mix ratio of the gas
gas_mix = model.chemistry.get_gas_mix_profile(gas)
self.info('Recomputing active gas %s opacity', gas)
# Get the cross section object relating to the gas
xsec = self._opacity_cache[gas]
# Loop through the layers
for idx_layer, tp in enumerate(zip(model.temperatureProfile,
model.pressureProfile)):
self.debug('Got index,tp %s %s', idx_layer, tp)
temperature, pressure = tp
# Place into the array
sigma_xsec[idx_layer] += \
xsec.opacity(temperature, pressure,
wngrid)*gas_mix[idx_layer]
# Temporarily assign to master cross-section
self.sigma_xsec = sigma_xsec
yield gas, sigma_xsec
@property
def sigma(self):
"""
Returns the fused weighted cross-section
of all active gases
"""
return self.sigma_xsec
| 2.546875 | 3 |
python_modules/models/Result.py | martijnbroekman/OfficeHeatlth | 0 | 12792340 | class Result:
def __init__(self, face_detected, emotions=None, posture=None, fatigue=None):
self.face_detected = face_detected
self.emotions = emotions
self.posture = posture
self.fatigue = fatigue
| 2.34375 | 2 |
tests/grad_test.py | sheroze1123/HROM_BIDL | 2 | 12792341 | import sys
sys.path.append('../')
import matplotlib; matplotlib.use('macosx')
import time
import numpy as np
import matplotlib.pyplot as plt
import dolfin as dl; dl.set_log_level(40)
# ROMML imports
from fom.forward_solve import Fin
from fom.thermal_fin import get_space
from rom.averaged_affine_ROM import AffineROMFin
from deep_learning.dl_model import load_parametric_model_avg, load_bn_model
from gaussian_field import make_cov_chol
# Tensorflow related imports
from tensorflow.keras.optimizers import Adam
class SolverWrapper:
def __init__(self, solver, data):
self.solver = solver
self.data = data
self.z = dl.Function(V)
def cost_function(self, z_v):
self.z.vector().set_local(z_v)
w, y, A, B, C = self.solver.forward(self.z)
y = self.solver.qoi_operator(w)
reg_cost = dl.assemble(self.solver.reg)
cost = 0.5 * np.linalg.norm(y - self.data)**2
# cost = cost + reg_cost
return cost
def gradient(self, z_v):
self.z.vector().set_local(z_v)
grad = self.solver.gradient(self.z, self.data)
reg_grad = dl.assemble(self.solver.grad_reg)[:]
# grad = grad + reg_grad
return grad
class ROMMLSolverWrapper:
def __init__(self, err_model, solver_r, solver):
self.err_model = err_model
self.solver_r = solver_r
self.z = dl.Function(V)
self.solver = solver
self.data = self.solver_r.data
self.cost = None
self.grad = None
def cost_function(self, z_v):
self.z.vector().set_local(z_v)
w_r = self.solver_r.forward_reduced(self.z)
y_r = self.solver_r.qoi_reduced(w_r)
e_NN = self.err_model.predict([[z_v]])[0]
self.solver._k.assign(self.z)
y_romml = y_r + e_NN
# self.cost = 0.5 * np.linalg.norm(y_romml - self.data)**2 + dl.assemble(self.solver.reg)
self.cost = 0.5 * np.linalg.norm(y_romml - self.data)**2
return self.cost
def gradient(self, z_v):
self.z.vector().set_local(z_v)
self.solver._k.assign(self.z)
self.grad, self.cost = self.solver_r.grad_romml(self.z)
# self.grad = self.grad + dl.assemble(self.solver.grad_reg)
return self.grad
class RSolverWrapper:
def __init__(self, err_model, solver_r, solver):
self.err_model = err_model
self.solver_r = solver_r
self.z = dl.Function(V)
self.solver = solver
self.data = self.solver_r.data
self.cost = None
self.grad = None
def cost_function(self, z_v):
self.z.vector().set_local(z_v)
w_r = self.solver_r.forward_reduced(self.z)
y_r = self.solver_r.qoi_reduced(w_r)
self.solver._k.assign(self.z)
# self.cost = 0.5 * np.linalg.norm(y_r - self.data)**2 + dl.assemble(self.solver.reg)
self.cost = 0.5 * np.linalg.norm(y_r - self.data)**2
return self.cost
def gradient(self, z_v):
self.z.vector().set_local(z_v)
self.solver._k.assign(self.z)
self.grad, self.cost = self.solver_r.grad_reduced(self.z)
# self.grad = self.grad + dl.assemble(self.solver.grad_reg)
return self.grad
resolution = 40
V = get_space(resolution)
chol = make_cov_chol(V, length=1.2)
solver = Fin(V, True)
# Generate synthetic observations
z_true = dl.Function(V)
norm = np.random.randn(len(chol))
nodal_vals = np.exp(0.5 * chol.T @ norm)
z_true.vector().set_local(nodal_vals)
w, y, A, B, C = solver.forward(z_true)
data = solver.qoi_operator(w)
# Setup DL error model
# err_model = load_parametric_model_avg('elu', Adam, 0.0003, 5, 58, 200, 2000, V.dim())
err_model = load_bn_model()
# Initialize reduced order model
phi = np.loadtxt('../data/basis_nine_param.txt',delimiter=",")
solver_r = AffineROMFin(V, err_model, phi, True)
solver_r.set_data(data)
solver_romml = ROMMLSolverWrapper(err_model, solver_r, solver)
solver_w = RSolverWrapper(err_model, solver_r, solver)
solver_fom = SolverWrapper(solver, data)
# Determine direction of gradient
z = dl.Function(V)
norm = np.random.randn(len(chol))
eps_z = np.exp(0.5 * chol.T @ norm)
z.vector().set_local(eps_z)
eps_norm = np.sqrt(dl.assemble(dl.inner(z,z)*dl.dx))
eps_norm = np.linalg.norm(eps_z)
eps_z = eps_z/eps_norm
# Determine location to evaluate gradient at
norm = np.random.randn(len(chol))
z_ = np.exp(0.5 * chol.T @ norm)
# Evaluate directional derivative using ROMML
dir_grad = np.dot(solver_romml.gradient(z_), eps_z)
print(f"Directional gradient ROMML: {dir_grad}")
n_eps = 32
hs = np.power(2., -np.arange(n_eps))
err_grads = []
grads = []
pi_0 = solver_romml.cost_function(z_)
for h in hs:
pi_h = solver_romml.cost_function(z_ + h * eps_z)
a_g = (pi_h - pi_0)/h
grads.append(a_g)
err = abs(a_g - dir_grad)/abs(dir_grad)
# err = abs(a_g - dir_grad)
err_grads.append(err)
plt.loglog(hs, err_grads, "-ob", label="Error Grad")
plt.loglog(hs, (.5*err_grads[0]/hs[0])*hs, "-.k", label="First Order")
plt.savefig('grad_test_ROMML.png', dpi=200)
plt.cla()
plt.clf()
plt.semilogx(hs, grads, "-ob")
plt.savefig('gradients_ROMML.png')
plt.cla()
plt.clf()
err_grads = []
grads = []
pi_0 = solver_w.cost_function(z_)
dir_grad = np.dot(solver_w.gradient(z_), eps_z)
for h in hs:
pi_h = solver_w.cost_function(z_ + h * eps_z)
a_g = (pi_h - pi_0)/h
grads.append(a_g)
err = abs(a_g - dir_grad)/abs(dir_grad)
# err = abs(a_g - dir_grad)
err_grads.append(err)
plt.loglog(hs, err_grads, "-ob", label="Error Grad")
plt.loglog(hs, (.5*err_grads[0]/hs[0])*hs, "-.k", label="First Order")
plt.savefig('grad_test_ROM.png', dpi=200)
plt.cla()
plt.clf()
plt.semilogx(hs, grads, "-ob")
plt.savefig('gradients_ROM.png')
plt.cla()
plt.clf()
err_grads = []
grads = []
pi_0 = solver_fom.cost_function(z_)
dir_grad = np.dot(solver_fom.gradient(z_), eps_z)
for h in hs:
pi_h = solver_fom.cost_function(z_ + h * eps_z)
a_g = (pi_h - pi_0)/h
grads.append(a_g)
err = abs(a_g - dir_grad)/abs(dir_grad)
err_grads.append(err)
plt.loglog(hs, err_grads, "-ob", label="Error Grad")
plt.loglog(hs, (.5*err_grads[0]/hs[0])*hs, "-.k", label="First Order")
plt.savefig('grad_test_FOM.png', dpi=200)
plt.cla()
plt.clf()
plt.semilogx(hs, grads, "-ob")
plt.savefig('gradients_FOM.png')
plt.cla()
plt.clf()
#####
## Examine function behavior
####
hs = np.linspace(0, 1, 500)
pis = []
# grads = []
for h in hs:
pi_h = solver_w.cost_function(z_ + h * eps_z)
pis.append(pi_h)
# grad = solver_w.gradient(z_ + h * eps_z)
# dir_grad = np.dot(grad, eps_z)
# grads.append(dir_grad)
pi_foms = []
# grads_fom = []
dir_grad_fom = np.dot(solver_fom.gradient(z_), eps_z)
print(f"Direction gradient FOM: {dir_grad_fom}")
for h in hs:
pi_h = solver_fom.cost_function(z_ + h * eps_z)
pi_foms.append(pi_h)
# grad = solver_fom.gradient(z_ + h * eps_z)
# dir_grad = np.dot(grad, eps_z)
# grads_fom.append(dir_grad)
pi_rommls = []
# grads_romml = []
for h in hs:
pi_h = solver_romml.cost_function(z_ + h * eps_z)
pi_rommls.append(pi_h)
# grad = solver_romml.gradient(z_ + h * eps_z)
# dir_grad = np.dot(grad, eps_z)
# grads_romml.append(dir_grad)
plt.plot(hs, pi_foms)
plt.savefig('func_dir_FOM.png', dpi=200)
plt.cla()
plt.clf()
plt.plot(hs, pis)
plt.savefig('func_dir_ROM.png', dpi=200)
plt.cla()
plt.clf()
plt.plot(hs, pi_rommls)
plt.savefig('func_dir_ROMML.png', dpi=200)
plt.cla()
plt.clf()
# plt.plot(hs, grads_fom)
# plt.plot(hs, grads)
# plt.plot(hs, grads_romml)
# plt.legend(["FOM", "ROM", "ROMML"])
# plt.savefig('grad_dir.png', dpi=200)
| 2.1875 | 2 |
tests/test_roletaskmetrics.py | SODALITE-EU/iac-quality-framework | 0 | 12792342 | import json
import pytest
from ansiblemetrics.metrics_cal import MetricsCal
class TestRoleTaskMetrics:
def test_(self):
metricCal = MetricsCal()
js = json.loads(metricCal.calculate('testResources/configure.yml', 'atss'))
assert 1 == js['bloc']['count']
assert 1 == js['cloc']['count']
assert 23 == js['loc']['count']
assert 3 == js['nun']['count'] | 2.109375 | 2 |
mainAPI/AgentSelectionMechanism.py | noodlesz/fedclean_implementation | 2 | 12792343 | ''' VARIABLES EXPECTED:
a) Trade-Off Parameter (Alpha)
b) Weight/Reputation Score (Gamma)
c) Last Time The Agent was selected (b)
RETURNS a LIST of addresses of SAMPLED AGENTS
'''
#agents_record = {"ETH_ADDRESS":[GAMMA,B_VAL]}
from dataForAgentSelection import agents_record
from collections import defaultdict,OrderedDict
def calc_sum(agents_record):
sum_gamma = 0
sum_b_val = 0
for items in agents_record.keys():
sum_gamma+=agents_record[items][0]
sum_b_val+=agents_record[items][1]
return sum_gamma,sum_b_val
def calc_probabilities(agents_record,trade_off_param):
ret_mapping = defaultdict(int)
sum_gamma,sum_b_val = calc_sum(agents_record)
for items in agents_record.keys():
agent_prob = (trade_off_param*(agents_record[items][0]/sum_gamma)) + ((1-trade_off_param)*(agents_record[items][1]/sum_b_val))
ret_mapping[items] = agent_prob
return ret_mapping
def sample_agents(number,final_structure):
ret_list = []
dd = OrderedDict(sorted(final_structure.items(), key = lambda x: x[1],reverse=True))
dd = dict(dd)
counter = 0
for items in dd.keys():
if counter == number:
break
ret_list.append(items)
counter+=1
return ret_list
##DRIVER##
if __name__ == '__main__':
print("The Sampled Agents are:")
#a_record = {"ascaadcadcac":[0.5,0.4],"ssacdcdac":[0.9,0.4],"adscdac":[0.8,0.9]}
trade_off = 0.6
final = calc_probabilities(agents_record,trade_off)
print(sample_agents(6,final))
| 2.765625 | 3 |
Crawlers/pig.py | sailinglove/personal-general | 0 | 12792344 | <reponame>sailinglove/personal-general
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from msedge.selenium_tools import EdgeOptions
from msedge.selenium_tools import Edge
from selenium.common import exceptions as E
import requests
import json
username = '<EMAIL>'
password = '<PASSWORD>'
link = 'education.beijing2022.cn'
# Head
# driver = webdriver.Chrome('D:\Applications\chromedriver_win32\chromedriver.exe')
# Headless
# option = webdriver.ChromeOptions()
# option.add_argument('headless')
# driver = webdriver.Chrome(chrome_options=option)
# Headed
edge_options = EdgeOptions()
edge_options.use_chromium = True
driver = Edge(options=edge_options, executable_path="D:\Applications\edgedriver_win64\MicrosoftWebDriver.exe")
# driver = webdriver.Edge()
# Headless
# edge_options = EdgeOptions()
# edge_options.use_chromium = True
# edge_options.add_argument('headless')
# driver = Edge(options=edge_options)
driver.maximize_window()
driver.get('https://education.beijing2022.cn')
# WebDriverWait(driver, 10).until(lambda d: d.find_element_by_xpath('//*[@class="iv-login"]')).click()
# driver.find_element_by_xpath("//*[@class='ivu-cascader-menu']/li[2]").click()
# driver.find_element_by_xpath("//li[contains(text(), '志愿者')]").click()
# WebDriverWait(driver, 10).until(lambda d: d.find_element_by_xpath("//input[@class='user']")).send_keys(username)
# driver.find_element_by_xpath("//input[@class='password']").send_keys(password)
# driver.find_element_by_xpath("//a[@class='login-btn']").click()
# temp_input_box = driver.find_element_by_xpath("//input[@placeholder='请输入手机动态口令']")
# otp = input('OTP: ')
# temp_input_box.send_keys(otp)
# driver.find_element_by_xpath("/html/body/div[3]/div[2]/div[3]/form/a").click()
# WebDriverWait(driver, 30).until(lambda d: d.find_element_by_xpath('//*[contains(text(), "必修课程")]/following-sibling::div'))
# cookies = driver.get_cookies()
# with open('cookies.json', 'w') as f:
# f.write(json.dumps(cookies))
with open('cookies.json', 'r') as f:
cookies = json.loads(f.read())
print(cookies)
for cookie in cookies:
driver.add_cookie(cookie)
driver.get('https://education.beijing2022.cn/courseDetail?id=a87dfd70-4b65-11ec-a1af-7cd30ae46f00&type=requiredCourse')
# WebDriverWait(driver, 10).until(lambda d: d.find_element_by_xpath('//*[@id="app"]/div/div[1]/div[2]/section[1]/article/div/div/ul/li[3]')).click()
# WebDriverWait(driver, 10).until(lambda d: d.find_element_by_xpath('//*[@id="app"]/div/div[1]/div[2]/section[1]/article/ul/li[2]/div[1]/button')).click()
WebDriverWait(driver, 10).until(lambda d: d.find_element_by_xpath('//*[@id="app"]/div/div[2]/div[1]/div[2]/div')).click()
WebDriverWait(driver, 10).until(lambda d: d.find_element_by_xpath('//*[@id="app"]/div/div[3]/section/article/div[1]/div/div/div/div/p[3]/button')).click()
page_no = int(WebDriverWait(driver, 10).until(lambda d: d.find_element_by_xpath('//*[@id="iv-means-img"]/div[2]/span[3]')).text.strip('/'))
for page in range(page_no):
page_img_src = WebDriverWait(driver, 10).until(lambda d: d.find_element_by_xpath('//*[@id="iv-means-img"]/div[1]/div/div[1]/img')).get_attribute("src")
r = requests.get(page_img_src, verify=False)
if r.ok:
if len(r.content) > 0:
with open("{}.png".format(page+1), "wb") as f:
f.write(r.content)
print(page+1)
else:
print("no data")
else:
print("not ok")
driver.find_element_by_xpath('//*[@id="iv-means-img"]/div[4]').click()
driver.quit() | 2.609375 | 3 |
Levels/level04.py | SC-HARSH/Parkour-Game | 0 | 12792345 | <reponame>SC-HARSH/Parkour-Game
from ursina import *
import sys
sys.path.append('../Parkour/')
from block import *
normalSpeed = 2
boostSpeed = 5
normalJump = 0.3
# Level04
class Level04(Entity):
def __init__(self):
super().__init__()
self.is_enabled = False
self.on = False
self.level = Entity(model = "lava_level_4.obj", color = "#454545", collider = "mesh", scale = (10, 10, 10))
self.lava = Entity(model = "plane", color = "#ff6700", collider = "mesh", scale = (1000, 1, 1000), position = (0, -30, 0))
self.block_4_1 = NormalBlock((5, 2, -128))
self.block_4_2 = NormalBlock((5, 2, -112))
self.block_4_3 = NormalBlock((5, 2, -96))
self.block_4_4 = NormalBlock((5, 2, -80))
self.block_4_5 = NormalBlock((5, 2, -64))
self.block_4_6 = SpeedBlock((5, 2, -46))
self.block_4_7 = NormalBlock((5, 2, -10))
self.block_4_8 = NormalBlock((5, 2, 8))
self.block_4_9 = NormalBlock((5, 2, 24))
self.block_4_10 = NormalBlock((5, 2, 40))
self.block_4_11 = JumpBlock((5, -20, 64))
self.block_4_12 = NormalBlock((5, 6, 96))
self.block_4_13 = SpeedBlock((5, 6, 115))
self.block_4_14 = SpeedBlock((5, 6, 145))
self.block_4_15 = NormalBlock((5, -25, 201))
self.block_4_16 = NormalBlock((5, -25, 217))
self.block_4_17 = JumpBlock((-8, -25, 217))
self.block_4_18 = NormalBlock((-36, 18, 217))
self.block_4_19 = SpeedBlock((-55, 18, 217), (0, 90, 0))
self.block_4_20 = SpeedBlock((-85, 18, 217), (0, 90, 0))
self.block_4_21 = SpeedBlock((-120, 18, 217), (0, 90, 0))
self.block_4_22 = SpeedBlock((-165, 18, 217), (0, 90, 0))
self.block_4_23 = SpeedBlock((-215, 18, 217), (0, 90, 0))
self.block_4_24 = NormalBlock((-275, 18, 217))
self.block_4_25 = JumpBlock((-275, -20, 190))
self.block_4_26 = JumpBlock((-275, -20, 140))
self.block_4_27 = JumpBlock((-275, -20, 90))
self.block_4_28 = JumpBlock((-275, -20, 40))
self.block_4_29 = JumpBlock((-275, -20, -10))
self.block_4_30 = JumpBlock((-275, -20, -60))
self.block_4_31 = NormalBlock((-275, 25, -89))
self.block_4_32 = NormalBlock((-275, 20, -109))
self.block_4_33 = NormalBlock((-275, 15, -129))
self.finishBlock_4 = EndBlock((-275, 3, -161))
self.secret_1 = NormalBlock((-50, 35, -156))
self.secret_2 = NormalBlock((-100, 35, -156))
self.secret_3 = NormalBlock((-150, 35, -156))
self.secret_4 = NormalBlock((-200, 35, -156))
self.player = None
self.disable()
def disable(self):
self.is_enabled = False
self.on = False
self.level.disable()
self.lava.disable()
self.block_4_1.disable()
self.block_4_2.disable()
self.block_4_3.disable()
self.block_4_4.disable()
self.block_4_5.disable()
self.block_4_6.disable()
self.block_4_7.disable()
self.block_4_8.disable()
self.block_4_9.disable()
self.block_4_10.disable()
self.block_4_11.disable()
self.block_4_12.disable()
self.block_4_13.disable()
self.block_4_14.disable()
self.block_4_15.disable()
self.block_4_16.disable()
self.block_4_17.disable()
self.block_4_18.disable()
self.block_4_19.disable()
self.block_4_20.disable()
self.block_4_21.disable()
self.block_4_22.disable()
self.block_4_23.disable()
self.block_4_24.disable()
self.block_4_25.disable()
self.block_4_26.disable()
self.block_4_27.disable()
self.block_4_28.disable()
self.block_4_29.disable()
self.block_4_30.disable()
self.block_4_31.disable()
self.block_4_32.disable()
self.block_4_33.disable()
self.secret_1.disable()
self.secret_2.disable()
self.secret_3.disable()
self.secret_4.disable()
self.finishBlock_4.disable()
def enable(self):
self.is_enabled = True
self.on = True
self.level.enable()
self.lava.enable()
self.block_4_1.enable()
self.block_4_2.enable()
self.block_4_3.enable()
self.block_4_4.enable()
self.block_4_5.enable()
self.block_4_6.enable()
self.block_4_7.enable()
self.block_4_8.enable()
self.block_4_9.enable()
self.block_4_10.enable()
self.block_4_11.enable()
self.block_4_12.enable()
self.block_4_13.enable()
self.block_4_14.enable()
self.block_4_15.enable()
self.block_4_16.enable()
self.block_4_17.enable()
self.block_4_18.enable()
self.block_4_19.enable()
self.block_4_20.enable()
self.block_4_21.enable()
self.block_4_22.enable()
self.block_4_23.enable()
self.block_4_24.enable()
self.block_4_25.enable()
self.block_4_26.enable()
self.block_4_27.enable()
self.block_4_28.enable()
self.block_4_29.enable()
self.block_4_30.enable()
self.block_4_31.enable()
self.block_4_32.enable()
self.block_4_33.enable()
self.secret_1.enable()
self.secret_2.enable()
self.secret_3.enable()
self.secret_4.enable()
self.finishBlock_4.enable()
def speed(self):
self.player.SPEED = normalSpeed
def update(self):
if self.is_enabled == True:
self.light = DirectionalLight()
self.is_enabled = False
else:
self.light = None
# Stops the player from falling forever
if self.is_enabled == True and self.player.position.y <= -50:
self.player.SPEED = normalSpeed
self.player.jump_height = normalJump
self.player.position = (5, 10, -128)
self.player.rotation = (0, 181, 0)
self.player.count = 0.0
# Restart the level
if self.on == True and held_keys["g"]:
self.player.SPEED = normalSpeed
self.player.jump_height = normalJump
self.player.position = (5, 10, -128)
self.player.rotation = (0, 0, 0)
self.player.count = 0.0
# What entity the player hits
hit = raycast(self.player.position, self.player.down, distance = 2, ignore = [self.player, ])
if hit.entity == self.lava:
self.player.SPEED = normalSpeed
self.player.jump_height = normalJump
self.player.position = (5, 10, -128)
self.player.rotation = (0, 0, 0)
self.player.count = 0.0
if hit.entity == self.level:
self.player.jump_height = normalJump
if hit.entity == self.block_4_6:
self.player.SPEED = 5
elif hit.entity == self.block_4_7:
self.player.SPEED = normalSpeed
self.player.jump_height = normalJump
if hit.entity == self.block_4_11:
self.player.jump_height = 1.2
elif hit.entity == self.block_4_12:
self.player.jump_height = normalJump
elif hit.entity == self.block_4_13:
self.player.jump_height = normalJump
if hit.entity == self.block_4_13:
self.player.SPEED = 4
if hit.entity == self.block_4_14:
self.player.SPEED = 5
elif hit.entity == self.block_4_15:
self.player.SPEED = normalSpeed
self.player.jump_height = normalJump
elif hit.entity == self.block_4_16:
self.player.SPEED = normalSpeed
self.player.jump_height = normalJump
if hit.entity == self.block_4_17:
self.player.jump_height = 1.2
elif hit.entity == self.block_4_18:
self.player.jump_height = normalJump
if hit.entity == self.block_4_19:
self.player.SPEED = 4
if hit.entity == self.block_4_20:
self.player.SPEED = 5
if hit.entity == self.block_4_21:
self.player.SPEED = 6.5
if hit.entity == self.block_4_22:
self.player.SPEED = 7
if hit.entity == self.block_4_23:
self.player.SPEED = 9
elif hit.entity == self.block_4_24:
self.player.SPEED = normalSpeed
self.player.jump_height = normalJump
if hit.entity == self.block_4_25:
self.player.SPEED = normalSpeed
self.player.jump_height = 1.2
if hit.entity == self.block_4_26:
self.player.SPEED = normalSpeed
self.player.jump_height = 1.2
if hit.entity == self.block_4_27:
self.player.jump_height = 1.2
if hit.entity == self.block_4_28:
self.player.jump_height = 1.2
if hit.entity == self.block_4_29:
self.player.jump_height = 1.2
if hit.entity == self.block_4_30:
self.player.jump_height = 1.2
elif hit.entity == self.block_4_31:
self.player.jump_height = normalJump
self.player.SPEED = normalSpeed
if hit.entity == self.finishBlock_4:
destroy(self.light)
| 2.40625 | 2 |
utils/files.py | devteamepic/worker | 0 | 12792346 | import urllib.request
import os
from pathlib import Path
def get_project_root() -> Path:
return Path(__file__).parent.parent
PROTOCOL = "http://"
FILES_URL_ROOT = PROTOCOL + "localhost:3000"
ROOT_DIR = get_project_root()
def download(uri):
create_dirs_from_uri(uri)
urllib.request.urlretrieve(f"{FILES_URL_ROOT}{uri}", f"{ROOT_DIR}{uri}")
def create_dirs_from_uri(path_string):
file_path = '/'.join(path_string.split("/")[1:-1])
if not os.path.isdir(file_path):
os.makedirs(file_path)
| 3.1875 | 3 |
py_pdf_term/pdftoxml/_pdftoxml/__init__.py | kumachan-mis/pdf-slides-term | 1 | 12792347 | <gh_stars>1-10
from .converter import PDFtoXMLConverter
from .data import PDFnXMLPath, PDFnXMLElement
__all__ = [
"PDFtoXMLConverter",
"PDFnXMLPath",
"PDFnXMLElement",
]
| 1.570313 | 2 |
Exercicios/exe018.py | EmersonLCruz/Python | 0 | 12792348 | # Faça um programa que leia um ângulo qualquer e mostre na tela o valor do seno, cosseno e tangente desse ângulo.
import math
angulo = float(input('Digite o valor do angulo:'))
coseno = math.cos(math.radians(angulo))
seno = math.sin(math.radians(angulo))
tangente = math.tan(math.radians(angulo))
print('Coseno de {} é {:.2f}'.format(angulo,coseno))
print('Seno de {} é {:.2f}'.format(angulo,seno))
print('Tangente de {} é {:.2f}'.format(angulo,tangente)) | 4.0625 | 4 |
bakers_registry/encoding.py | baking-bad/bakers-registry-cli | 4 | 12792349 | import re
from decimal import Decimal
def decode_mutez(value):
return Decimal(value) / 10000
def decode_percent(value, decimals=2):
return Decimal(value) / 10 ** decimals
def decode_split(value):
return 1 - decode_percent(value, decimals=4)
def decode_hex(value):
return value.decode()
def decode_info(info):
data = info['data']
return {
'bakerName': decode_hex(data['bakerName']),
'openForDelegation': data['openForDelegation'],
'bakerOffchainRegistryUrl': decode_hex(data['bakerOffchainRegistryUrl']),
'fee': str(decode_split(data['split'])),
'bakerPaysFromAccounts': data['bakerPaysFromAccounts'],
'minDelegation': str(decode_mutez(data['minDelegation'])),
'subtractPayoutsLessThanMin': data['subtractPayoutsLessThanMin'],
'payoutDelay': data['payoutDelay'],
'payoutFrequency': data['payoutFrequency'],
'minPayout': str(decode_mutez(data['minPayout'])),
'bakerChargesTransactionFee': data['bakerChargesTransactionFee'],
'paymentConfig': {
'payForOwnBlocks': data['paymentConfigMask'] & 1 > 0,
'payForStolenBlocks': data['paymentConfigMask'] & 2048 > 0,
'compensateMissedBlocks': data['paymentConfigMask'] & 1024 == 0,
'payForEndorsements': data['paymentConfigMask'] & 2 > 0,
'compensateLowPriorityEndorsementLoss': data['paymentConfigMask'] & 8192 == 0,
'compensateMissedEndorsements': data['paymentConfigMask'] & 4096 == 0,
'payGainedFees': data['paymentConfigMask'] & 4 > 0,
'payForAccusationGains': data['paymentConfigMask'] & 8 > 0,
'subtractLostDepositsWhenAccused': data['paymentConfigMask'] & 16 > 0,
'subtractLostRewardsWhenAccused': data['paymentConfigMask'] & 32 > 0,
'subtractLostFeesWhenAccused': data['paymentConfigMask'] & 64 > 0,
'payForRevelation': data['paymentConfigMask'] & 128 > 0,
'subtractLostRewardsWhenMissRevelation': data['paymentConfigMask'] & 256 > 0,
'subtractLostFeesWhenMissRevelation': data['paymentConfigMask'] & 512 > 0
},
'overDelegationThreshold': str(decode_percent(data['overDelegationThreshold'])),
'subtractRewardsFromUninvitedDelegation': data['subtractRewardsFromUninvitedDelegation'],
'reporterAccount': info['reporterAccount']
}
def try_hex_encode(data):
if re.match('^[0-9a-f]$', data) and len(data) % 2 == 0:
return bytes.fromhex(data)
else:
return data.encode()
def encode_config_mask(data, default):
if data.get('paymentConfigMask'):
return int(data['paymentConfigMask'])
if data.get('paymentConfig'):
mask = 0
config = data['paymentConfig']
if config.get('payForOwnBlocks'):
mask |= 1
if config.get('payForStolenBlocks'):
mask |= 2048
if not config.get('compensateMissedBlocks'):
mask |= 1024
if config.get('payForEndorsements'):
mask |= 2
if not config.get('compensateLowPriorityEndorsementLoss'):
mask |= 8192
if not config.get('compensateMissedEndorsements'):
mask |= 4096
if config.get('payGainedFees'):
mask |= 4
if config.get('payForAccusationGains'):
mask |= 8
if config.get('subtractLostDepositsWhenAccused'):
mask |= 16
if config.get('subtractLostRewardsWhenAccused'):
mask |= 32
if config.get('subtractLostFeesWhenAccused'):
mask |= 64
if config.get('payForRevelation'):
mask |= 128
if config.get('subtractLostRewardsWhenMissRevelation'):
mask |= 256
if config.get('subtractLostFeesWhenMissRevelation'):
mask |= 512
return default
def encode_mutez(value):
if isinstance(value, str):
res = int(Decimal(value) * 10000)
elif isinstance(value, int):
res = value
else:
assert False, value
assert res >= 0, 'Cannot be negative'
return res
def encode_percent(value, decimals=2):
factor = 10 ** decimals
if isinstance(value, str):
res = int(Decimal(value) * factor)
elif isinstance(value, int):
res = value
else:
assert False, value
assert 0 <= res <= factor, f'Should be between 0 and {factor}'
return res
def encode_split(data):
if data.get('split'):
res = int(data['split'])
elif data.get('fee'):
res = 10000 - encode_percent(data['fee'], decimals=4)
else:
res = 10000
return res
def encode_info(info):
return {
'data': {'bakerName': try_hex_encode(info.get('bakerName', '')),
'openForDelegation': info.get('openForDelegation', True),
'bakerOffchainRegistryUrl': try_hex_encode(info.get('bakerOffchainRegistryUrl', '')),
'split': encode_split(info),
'bakerPaysFromAccounts': info.get('bakerPaysFromAccounts', []),
'minDelegation': encode_mutez(info.get('minDelegation', 0)),
'subtractPayoutsLessThanMin': info.get('subtractPayoutsLessThanMin', True),
'payoutDelay': info.get('payoutDelay', 0),
'payoutFrequency': info.get('payoutFrequency', 1),
'minPayout': encode_mutez(info.get('minPayout', 0)),
'bakerChargesTransactionFee': info.get('bakerChargesTransactionFee', False),
'paymentConfigMask': encode_config_mask(info, 16383),
'overDelegationThreshold': encode_percent(info.get('overDelegationThreshold', 100)),
'subtractRewardsFromUninvitedDelegation': info.get('subtractRewardsFromUninvitedDelegation', True)},
'reporterAccount': info['reporterAccount']
}
def decode_snapshot(snapshot: dict):
return dict(map(lambda x: (x[0], decode_info(x[1])), snapshot.items()))
| 2.796875 | 3 |
jupyterlab_chameleon/db.py | super-cooper/jupyterlab-chameleon | 1 | 12792350 | from dataclasses import astuple, dataclass, fields
from importlib import resources
import os
import sqlite3
from .exception import ArtifactNotFoundError, DuplicateArtifactError
import logging
LOG = logging.getLogger(__name__)
DATABASE_NAME = 'chameleon'
@dataclass
class Artifact:
id: str
path: str
deposition_repo: str
ownership: str
ARTIFACT_COLUMNS = [f.name for f in fields(Artifact)]
class DB:
IN_MEMORY = ':memory:'
def __init__(self, database=None):
if not database:
raise ValueError('A database path is required')
if database != DB.IN_MEMORY:
try:
os.makedirs(os.path.dirname(database), exist_ok=True)
except OSError:
LOG.exception(f'Failed to lazy-create DB path {database}')
self.database = database
self._conn = None
def build_schema(self):
with resources.open_text(__package__, 'db_schema.sql') as f:
with self.connect() as conn:
conn.executescript(f.read())
def reset(self):
with self.connect() as conn:
cur = conn.cursor()
cur.execute('delete from artifacts')
def list_artifacts(self):
with self.connect() as conn:
cur = conn.cursor()
cur.execute(f'select {",".join(ARTIFACT_COLUMNS)} from artifacts')
return [Artifact(*row) for row in cur.fetchall()]
def insert_artifact(self, artifact: Artifact):
with self.connect() as conn:
cur = conn.cursor()
cur.execute(
(f'insert into artifacts ({",".join(ARTIFACT_COLUMNS)}) '
'values (?, ?, ?, ?)'),
astuple(artifact))
def update_artifact(self, artifact: Artifact):
path = artifact.path
with self.connect() as conn:
cur = conn.cursor()
cur.execute('select id from artifacts where path = ?', (path,))
found = cur.fetchall()
if len(found) > 1:
raise DuplicateArtifactError(
'Multiple artifacts already found at %s', path)
elif found and found[0][0] is not None:
raise DuplicateArtifactError(
'Would create duplicate artifact at %s: %s', path, found[0][0])
elif not found:
raise ArtifactNotFoundError(
'Cannot find artifact at %s', path)
updates = ','.join([f'{col}=?' for col in ARTIFACT_COLUMNS])
cur.execute(f'update artifacts set {updates} where path=?',
astuple(artifact) + (path,))
def connect(self) -> sqlite3.Connection:
if not self._conn:
self._conn = sqlite3.connect(self.database)
return self._conn
| 2.484375 | 2 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.