hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
759bf327009e2a0d20028cd808a8ae5f1f115897
| 429 |
py
|
Python
|
generate_target.py
|
jmpmulter/coolon-pipeline
|
8e92aa717e89d4ec901366a20da58e4fab14f778
|
[
"MIT"
] | null | null | null |
generate_target.py
|
jmpmulter/coolon-pipeline
|
8e92aa717e89d4ec901366a20da58e4fab14f778
|
[
"MIT"
] | null | null | null |
generate_target.py
|
jmpmulter/coolon-pipeline
|
8e92aa717e89d4ec901366a20da58e4fab14f778
|
[
"MIT"
] | null | null | null |
#generate_target.py
#Generates a list of target genes from a flybase output
import os
import sys
def main():
in_path = sys.argv[1]
out_path = sys.argv[2]
infile = open(in_path, "r")
outfile = open(out_path, "x")
for l0 in infile:
l0s = l0.split(",")
for item in l0s:
if "Dsec" in item:
outfile.write(item.split("\\")[1]+"\n")
if __name__ == "__main__":
main()
| 23.833333 | 55 | 0.575758 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 107 | 0.249417 |
759bf36e0615b479286d04e29a2aa057689eab17
| 3,512 |
py
|
Python
|
pgAdmin/utils/master_password.py
|
WeilerWebServices/PostgreSQL
|
ae594ed077bebbad1be3c1d95c38b7c2c2683e8c
|
[
"PostgreSQL"
] | null | null | null |
pgAdmin/utils/master_password.py
|
WeilerWebServices/PostgreSQL
|
ae594ed077bebbad1be3c1d95c38b7c2c2683e8c
|
[
"PostgreSQL"
] | 3 |
2021-09-02T15:51:44.000Z
|
2022-03-02T09:53:17.000Z
|
win64-postgresql/pgAdmin 4/web/pgadmin/utils/master_password.py
|
vnetcon/curvy
|
ed3749bd5d298c7ab6c0625de91c211d6da4c762
|
[
"Apache-2.0"
] | null | null | null |
import config
from flask import current_app
from flask_login import current_user
from pgadmin.model import db, User, Server
from pgadmin.utils.crypto import encrypt, decrypt
MASTERPASS_CHECK_TEXT = 'ideas are bulletproof'
def set_crypt_key(_key, _new_login=True):
"""
Set the crypt key
:param _key: The key
:param _new_login: Is fresh login or password change
"""
current_app.keyManager.set(_key, _new_login)
def get_crypt_key():
"""
Returns the crypt key
:return: the key
"""
enc_key = current_app.keyManager.get()
# if desktop mode and master pass disabled then use the password hash
if not config.MASTER_PASSWORD_REQUIRED \
and not config.SERVER_MODE:
return True, current_user.password
# if desktop mode and master pass enabled
elif config.MASTER_PASSWORD_REQUIRED \
and not config.SERVER_MODE and enc_key is None:
return False, None
else:
return True, enc_key
def validate_master_password(password):
"""
Validate the password/key against the stored encrypted text
:param password: password/key
:return: Valid or not
"""
# master pass is incorrect if decryption fails
try:
decrypted_text = decrypt(current_user.masterpass_check, password)
if isinstance(decrypted_text, bytes):
decrypted_text = decrypted_text.decode()
if MASTERPASS_CHECK_TEXT != decrypted_text:
return False
else:
return True
except Exception as _:
False
def set_masterpass_check_text(password, clear=False):
"""
Set the encrypted text which will be used later to validate entered key
:param password: password/key
:param clear: remove the encrypted text
"""
try:
masterpass_check = None
if not clear:
masterpass_check = encrypt(MASTERPASS_CHECK_TEXT, password)
# set the encrypted sample text with the new
# master pass
db.session.query(User) \
.filter(User.id == current_user.id) \
.update({User.masterpass_check: masterpass_check})
db.session.commit()
except Exception as _:
db.session.rollback()
raise
def cleanup_master_password():
"""
Remove the master password and saved passwords from DB which are
encrypted using master password. Also remove the encrypted text
"""
# also remove the master password check string as it will help if master
# password entered/enabled again
set_masterpass_check_text('', clear=True)
from pgadmin.browser.server_groups.servers.utils \
import remove_saved_passwords
remove_saved_passwords(current_user.id)
current_app.keyManager.hard_reset()
from pgadmin.utils.driver import get_driver
driver = get_driver(config.PG_DEFAULT_DRIVER)
for server in Server.query.filter_by(user_id=current_user.id).all():
manager = driver.connection_manager(server.id)
manager.update(server)
def process_masterpass_disabled():
"""
On master password disable, remove the connection data from session as it
may have saved password which will cause trouble
:param session: Flask session
:param conn_data: connection manager copy from session if any
"""
if not config.SERVER_MODE and not config.MASTER_PASSWORD_REQUIRED \
and current_user.masterpass_check is not None:
cleanup_master_password()
return True
return False
| 29.266667 | 77 | 0.691913 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,205 | 0.343109 |
759c9c03daed3987b022572f95ef159fbf48d6f3
| 238 |
py
|
Python
|
hrsalespipes/dashboard/urls.py
|
hanztura/hrsalespipes
|
77accf3132726ced05d84fa2a41891b841f310b8
|
[
"Apache-2.0"
] | 3 |
2020-03-26T12:43:43.000Z
|
2021-05-10T14:35:51.000Z
|
hrsalespipes/dashboard/urls.py
|
hanztura/hrsalespipes
|
77accf3132726ced05d84fa2a41891b841f310b8
|
[
"Apache-2.0"
] | 5 |
2021-04-08T21:15:15.000Z
|
2022-02-10T11:03:12.000Z
|
hrsalespipes/dashboard/urls.py
|
hanztura/hrsalespipes
|
77accf3132726ced05d84fa2a41891b841f310b8
|
[
"Apache-2.0"
] | 1 |
2022-01-30T19:24:48.000Z
|
2022-01-30T19:24:48.000Z
|
from django.urls import path
from .views import DashboardTemplateView, DashboardView
app_name = 'dashboard'
urlpatterns = [
path('test/', DashboardView.as_view(), name='test'),
path('', DashboardView.as_view(), name='index'),
]
| 23.8 | 56 | 0.714286 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 33 | 0.138655 |
759ce6e746deead9ce63c2abe9211efd40789622
| 904 |
py
|
Python
|
tests/test_group_deletion.py
|
igoldin74/python_for_testers
|
c992f85f7b08487e79c4c45ab86e0fdeb2c47b20
|
[
"Apache-2.0"
] | null | null | null |
tests/test_group_deletion.py
|
igoldin74/python_for_testers
|
c992f85f7b08487e79c4c45ab86e0fdeb2c47b20
|
[
"Apache-2.0"
] | null | null | null |
tests/test_group_deletion.py
|
igoldin74/python_for_testers
|
c992f85f7b08487e79c4c45ab86e0fdeb2c47b20
|
[
"Apache-2.0"
] | null | null | null |
import random
from model.group import Group
def test_group_removal(app, db, check_ui):
old_group_list = db.get_group_list()
group = random.choice(old_group_list)
if len(db.get_group_list()) == 0:
app.group.create(Group(name="test_group_random_name", header="random_header", footer="random_footer"))
app.group.delete_group_by_id(group.id)
assert app.group.count() == len(old_group_list) - 1
new_group_list = db.get_group_list()
old_group_list.remove(group)
assert old_group_list == new_group_list
if check_ui: # this will execute when "--check_ui" run option is added
def clean(group): # this func removes spaces from group names
return Group(id=group.id, name=group.name.strip())
db_list = map(clean, new_group_list)
assert sorted(db_list, key=Group.id_or_max) == sorted(app.group.get_group_list(), key=Group.id_or_max)
| 45.2 | 110 | 0.713496 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 154 | 0.170354 |
759d23943bc7a51dc76aa89f5a85cc113775bdad
| 1,946 |
py
|
Python
|
projects/wizard_of_wikipedia_ko/generator/train_end2end.py
|
kimsan0622/anonymous_kowow
|
25f55add8e657b2186dfdedca3e5035b567b235e
|
[
"MIT"
] | 2 |
2021-09-06T16:58:53.000Z
|
2022-01-14T04:17:48.000Z
|
projects/wizard_of_wikipedia_ko/generator/train_end2end.py
|
kimsan0622/anonymous_kowow
|
25f55add8e657b2186dfdedca3e5035b567b235e
|
[
"MIT"
] | null | null | null |
projects/wizard_of_wikipedia_ko/generator/train_end2end.py
|
kimsan0622/anonymous_kowow
|
25f55add8e657b2186dfdedca3e5035b567b235e
|
[
"MIT"
] | 1 |
2022-01-14T09:01:41.000Z
|
2022-01-14T09:01:41.000Z
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from parlai.scripts.train_model import setup_args, TrainLoop
if __name__ == '__main__':
parser = setup_args()
parser.set_defaults(
task='wizard_of_wikipedia_ko:generator:train',
model='projects.wizard_of_wikipedia_ko.generator.t5:T5EndToEndAgent',
model_file='/tmp/end2end_generator/model',
t5_model_arch='pretrained_model/t5.1.1.base.gin_ke.ke_v100_span_corruption_600K',
text_truncate=256,
ln='ko',
log_every_n_secs=10,
validation_patience=12,
validation_metric='ppl',
validation_metric_mode='min',
validation_every_n_epochs=0.5,
truncate=256,
max_knowledge=32,
knowledge_alpha=0.95,
knowledge_truncate=64,
learningrate=5e-4,
warmup_updates=5000,
clip=0.1,
lr_scheduler='invsqrt',
embedding_type='fasttext',
beam_size=1,
skip_generation=False,
batchsize=64,
)
TrainLoop(parser.parse_args()).train()
# parlai train_model -m projects.wizard_of_wikipedia_ko.generator.t5:T5EndToEndAgent -mf model/ke-t5_test -t wizard_of_wikipedia_ko:generator:random_split --ln en -bs 4 -eps 1 -lr 1e-5 --num-epochs 1 --optimizer adam --t5-model-arch pretrained_model/t5.1.1.base.gin_ke.ke_v100_span_corruption_600K --text_truncate 512
# parlai train_model -t wizard_of_wikipedia_ko:generator:random_split --ln ke_mix -m projects.wizard_of_wikipedia_ko.generator.t5:T5EndToEndAgent -mf model/ke-t5_test --t5-model-arch ../pretrained_model/t5.1.1.base.gin_ke.ke_v100_span_corruption_600K --log-every-n-secs 10 --validation-patience 12 --validation-metric ppl --validation-metric-mode min --validation-every-n-epochs 0.5 -bs 4 --max_knowledge 32 --num-epochs 1
| 48.65 | 424 | 0.722508 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,176 | 0.604317 |
759dbd8419466a5b58d9ed3efce98d055fc109cf
| 37,914 |
py
|
Python
|
notebooks/__code/normalization/normalization_with_simplify_selection.py
|
mabrahamdevops/python_notebooks
|
6d5e7383b60cc7fd476f6e85ab93e239c9c32330
|
[
"BSD-3-Clause"
] | null | null | null |
notebooks/__code/normalization/normalization_with_simplify_selection.py
|
mabrahamdevops/python_notebooks
|
6d5e7383b60cc7fd476f6e85ab93e239c9c32330
|
[
"BSD-3-Clause"
] | null | null | null |
notebooks/__code/normalization/normalization_with_simplify_selection.py
|
mabrahamdevops/python_notebooks
|
6d5e7383b60cc7fd476f6e85ab93e239c9c32330
|
[
"BSD-3-Clause"
] | null | null | null |
import os
import collections
import numpy as np
from ipywidgets import widgets
from IPython.core.display import display, HTML
import logging
from NeuNorm.normalization import Normalization
from __code import file_handler
from __code.ipywe import myfileselector
from __code.normalization.get import Get
from __code.normalization.metadata_handler import MetadataHandler, MetadataName, METADATA_KEYS
from __code.normalization import utilities
JSON_DEBUGGING = False
MAX_DF_COUNTS_ALLOWED = 900
METADATA_ERROR_ALLOWED = 1
LIST_METADATA_NOT_INSTRUMENT_RELATED = ['filename', 'time_stamp', 'time_stamp_user_format']
class NormalizationWithSimplifySelection:
working_dir = ''
def __init__(self, working_dir=''):
self.working_dir = working_dir
self.list_of_images = []
self.input_data_folder = []
# {0: {65027: 55.0,
# 65028: 59.2,
# 65029: 1.0,
# 'filename': 'full_filename',
# 'time_stamp': 1454544.34545,
# 'time_stamp_user_format': '2019-11-19 02:48:47'},
# ...,
# }
self.sample_metadata_dict = {}
self.ob_metadata_dict = {}
self.df_metadata_dict = {}
# key of dictionary being the acquisition time
# {50: {'config0': {'list_sample': [self.sample_metadata_dict[0],
# self.sample_metadata_dict[1],..],
# 'list_ob': [self.ob_metadata_dict[0],
# self.ob_metadata_dict[1],
# ...],
# 'list_df': [file1, file2, file3],
# 'metadata_infos': {},
# 'first_images': {'sample': {},
# 'ob': {},
# 'df': {}},
# 'last_images': {'sample': {},
# 'ob': {},
# 'df': {}},
# 'time_range_s_selected': {'before': np.NaN,
# 'after': np.NaN},
# 'time_range_s': {'before': np.NaN,
# 'after': np.NaN},
# },
# 'config1': {...},
# },
# 30: {...},
# }
self.final_full_master_dict = {}
# same as the final_full_master_dict but in this one, the OB outside the time range
# defined as excluded
self.final_with_time_range_master_dict = {}
o_get = Get(parent=self)
log_file_name = o_get.log_file_name()
logging.basicConfig(filename=log_file_name,
filemode='w',
format='[%(levelname)s] - %(asctime)s - %(message)s',
level=logging.INFO) # logging.INFO, logging.DEBUG
logging.info("*** Starting new session ***")
def select_sample_folder(self):
folder_sample_widget = myfileselector.MyFileSelectorPanel(instruction='select folder of images to normalize',
start_dir=self.working_dir,
next=self.retrieve_sample_metadata_from_sample_folder,
type='directory',
multiple=False)
folder_sample_widget.show()
def retrieve_sample_metadata_from_sample_folder(self, sample_folder):
logging.info(f"select sample folder: {sample_folder}")
[list_of_images, _] = file_handler.retrieve_list_of_most_dominant_extension_from_folder(folder=sample_folder)
can_we_continue = self.images_files_found_in_list(list_of_images)
if can_we_continue:
logging.info(f"-> number of images found: {len(list_of_images)}")
self.retrieve_sample_metadata(list_of_images)
else:
logging.info(f"-> No images found!")
display(HTML('<span style="font-size: 20px; color:Red">No images found in the folder selected!</span>'))
def images_files_found_in_list(self, list_of_images):
for _file in list_of_images:
if (".tiff" in _file) or (".tif" in _file) or (".fits" in _file):
return True
return False
def retrieve_sample_metadata(self, list_of_images):
__name__ = "retrieve_sample_metadata"
logging.info(f"Retrieving sample metadata ({__name__})")
self.list_of_images = list_of_images
self.sample_metadata_dict = MetadataHandler.retrieve_metadata(list_of_files=list_of_images,
display_infos=False,
label='sample')
# logging.info(f"self.sample_metadata_dict: {self.sample_metadata_dict}")
self.auto_retrieve_ob_metadata()
self.auto_retrieve_df_metadata()
self.match_files()
self.calculate_first_and_last_ob()
self.calculate_time_range()
self.display_time_range_selection_widgets()
def select_ob_folder(self):
self.select_folder(message='open beam',
next_function=self.retrieve_ob_metadata())
def retrieve_ob_metadata(self, selected_folder):
list_of_ob_files = Get.list_of_tiff_files(folder=selected_folder)
self.ob_metadata_dict = MetadataHandler.retrieve_metadata(list_of_files=list_of_ob_files)
def auto_retrieve_ob_metadata(self):
logging.info(f"> auto_retrieve_ob_metadata")
folder = os.path.join(self.working_dir, 'raw', 'ob')
logging.info(f"-> folder: {folder}")
list_of_ob_files = file_handler.get_list_of_all_files_in_subfolders(folder=folder,
extensions=['tiff', 'tif'])
logging.info(f"-> nbr of ob files found: {len(list_of_ob_files)}")
self.ob_metadata_dict = MetadataHandler.retrieve_metadata(list_of_files=list_of_ob_files,
label='ob')
# logging.info(f"ob metadata dict")
# logging.info(f"-> {self.ob_metadata_dict}")
def select_folder(self, message="", next_function=None):
folder_widget = myfileselector.MyFileSelectorPanel(instruction='select {} folder'.format(message),
start_dir=self.working_dir,
next=next_function,
type='directory',
multiple=False)
folder_widget.show()
def select_df_folder(self):
self.select_folder(message='dark field',
next_function=self.retrieve_df_metadata())
def retrieve_df_metadata(self, selected_folder):
list_of_df_files = Get.list_of_tiff_files(folder=selected_folder)
self.df_metadata_dict = MetadataHandler.retrieve_metadata(list_of_files=list_of_df_files)
def auto_retrieve_df_metadata(self):
folder = os.path.join(self.working_dir, 'raw', 'df')
list_of_df_files = file_handler.get_list_of_all_files_in_subfolders(folder=folder,
extensions=['tiff', 'tif'])
logging.info(f"-> nbr of df files found: {len(list_of_df_files)}")
self.df_metadata_dict = MetadataHandler.retrieve_metadata(list_of_files=list_of_df_files,
label='df')
def match_files(self):
"""This is where the files will be associated with their respective OB, DF by using the metadata"""
if not JSON_DEBUGGING:
self.create_master_sample_dict()
self.match_ob()
self.match_df()
if JSON_DEBUGGING:
# for debugging only, exporting the json
import json
with open('/Users/j35/Desktop/which_ob_and_df_to_use.json', 'w') as outfile:
json.dump(self.final_full_master_dict, outfile)
def match_ob(self):
"""we will go through all the ob and associate them with the right sample based on
- acquisition time
- detector type
- aperture
"""
list_ob_dict = self.ob_metadata_dict
final_full_master_dict = self.final_full_master_dict
list_of_sample_acquisition = final_full_master_dict.keys()
for _index_ob in list_ob_dict.keys():
_all_ob_instrument_metadata = Get.get_instrument_metadata_only(list_ob_dict[_index_ob])
_ob_instrument_metadata = utilities.isolate_instrument_metadata(
_all_ob_instrument_metadata)
_acquisition_time = _all_ob_instrument_metadata[MetadataName.EXPOSURE_TIME.value]['value']
if _acquisition_time in list_of_sample_acquisition:
for _config_id in final_full_master_dict[_acquisition_time].keys():
_sample_metadata_infos = final_full_master_dict[_acquisition_time][_config_id]['metadata_infos']
if utilities.all_metadata_match(_sample_metadata_infos, _ob_instrument_metadata):
final_full_master_dict[_acquisition_time][_config_id]['list_ob'].append(list_ob_dict[_index_ob])
self.final_full_master_dict = final_full_master_dict
def match_df(self):
"""
we will go through all the df of the IPTS and will associate the df with the right samples
based on:
- detector type used
- acquisition time
"""
list_df_dict = self.df_metadata_dict
final_full_master_dict = self.final_full_master_dict
list_of_sample_acquisition = final_full_master_dict.keys()
for _index_df in list_df_dict.keys():
_all_df_instrument_metadata = Get.get_instrument_metadata_only(list_df_dict[_index_df])
_df_instrument_metadata = utilities.isolate_instrument_metadata(
_all_df_instrument_metadata)
_acquisition_time = _all_df_instrument_metadata[MetadataName.EXPOSURE_TIME.value]['value']
if _acquisition_time in list_of_sample_acquisition:
for _config_id in final_full_master_dict[_acquisition_time].keys():
_sample_metadata_infos = final_full_master_dict[_acquisition_time][_config_id]['metadata_infos']
if utilities.all_metadata_match(_sample_metadata_infos, _df_instrument_metadata,
list_key_to_check=[METADATA_KEYS['df'][
1].value]):
final_full_master_dict[_acquisition_time][_config_id]['list_df'].append(list_df_dict[_index_df])
self.final_full_master_dict = final_full_master_dict
def create_master_sample_dict(self):
final_full_master_dict = collections.OrderedDict()
sample_metadata_dict = self.sample_metadata_dict
# we need to keep record of which image was the first one taken and which image was the last one taken
first_sample_image = sample_metadata_dict[0]
last_sample_image = sample_metadata_dict[0]
for _file_index in sample_metadata_dict.keys():
_dict_file_index = sample_metadata_dict[_file_index]
_sample_file = _dict_file_index['filename']
_acquisition_time = _dict_file_index[MetadataName.EXPOSURE_TIME.value]['value']
_instrument_metadata = utilities.isolate_instrument_metadata(_dict_file_index)
_sample_time_stamp = _dict_file_index['time_stamp']
# find which image was first and which image was last
if _sample_time_stamp < first_sample_image['time_stamp']:
first_sample_image = _dict_file_index
elif _sample_time_stamp > last_sample_image['time_stamp']:
last_sample_image = _dict_file_index
# first entry or first time seeing that acquisition time
if (len(final_full_master_dict) == 0) or not (_acquisition_time in final_full_master_dict.keys()):
_first_images_dict = {'sample': first_sample_image,
'ob' : {},
'df' : {}}
_last_images_dict = {'sample': last_sample_image,
'ob' : {},
'df' : {}}
_temp_dict = {'list_sample' : [_dict_file_index],
'first_images' : _first_images_dict,
'last_images' : _last_images_dict,
'list_ob' : [],
'list_df' : [],
'time_range_s_selected': {'before': np.NaN,
'after' : np.NaN},
'time_range_s' : {'before': np.NaN,
'after' : np.NaN},
'metadata_infos' : Get.get_instrument_metadata_only(
_instrument_metadata)}
final_full_master_dict[_acquisition_time] = {}
final_full_master_dict[_acquisition_time]['config0'] = _temp_dict
else:
# check that all the metadata_infos match for the first group of that acquisition time,
# otherwise check the next one or create a group
if _acquisition_time in final_full_master_dict.keys():
_dict_for_this_acquisition_time = final_full_master_dict[_acquisition_time]
_found_a_match = False
for _config_key in _dict_for_this_acquisition_time.keys():
_config = _dict_for_this_acquisition_time[_config_key]
if (utilities.all_metadata_match(metadata_1=_config['metadata_infos'],
metadata_2=_instrument_metadata)):
_config['list_sample'].append(_dict_file_index)
_first_images_dict = {'sample': first_sample_image,
'ob' : {},
'df' : {}}
_last_images_dict = {'sample': last_sample_image,
'ob' : {},
'df' : {}}
_config['first_images'] = _first_images_dict
_config['last_images'] = _last_images_dict
_found_a_match = True
if not _found_a_match:
_first_images_dict = {'sample': first_sample_image,
'ob' : {},
'df' : {}}
_last_images_dict = {'sample': last_sample_image,
'ob' : {},
'df' : {}}
_temp_dict = {'list_sample' : [_dict_file_index],
'first_images' : _first_images_dict,
'last_images' : _last_images_dict,
'list_ob' : [],
'list_df' : [],
'time_range_s_selected': {'before': np.NaN,
'after' : np.NaN},
'time_range_s' : {'before': np.NaN,
'after' : np.NaN},
'metadata_infos' : Get.get_instrument_metadata_only(
_instrument_metadata)}
nbr_config = len(_dict_for_this_acquisition_time.keys())
_dict_for_this_acquisition_time['config{}'.format(nbr_config)] = _temp_dict
else:
_first_images_dict = {'sample': first_sample_image,
'ob' : {},
'df' : {}}
_last_images_dict = {'sample': last_sample_image,
'ob' : {},
'df' : {}}
_temp_dict = {'list_sample' : [_dict_file_index],
'first_images' : _first_images_dict,
'last_images' : _last_images_dict,
'list_ob' : [],
'list_df' : [],
'time_range_s_selected': {'before': np.NAN,
'after' : np.NaN},
'time_range_s' : {'before': np.NaN,
'after' : np.NaN},
'metadata_infos' : Get.get_instrument_metadata_only(
_instrument_metadata)}
final_full_master_dict[_acquisition_time] = {}
final_full_master_dict[_acquisition_time]['config0'] = _temp_dict
self.final_full_master_dict = final_full_master_dict
def calculate_first_and_last_ob(self):
"""this will loop through all the acquisition time keys, and config keys, to figure out
what is the first ob and last ob in this dictionary"""
_final_full_master_dict = self.final_full_master_dict
for _acquisition in _final_full_master_dict.keys():
current_acquisition_dict = _final_full_master_dict[_acquisition]
_first_ob_time = np.NaN
_first_ob = {}
_last_ob_time = np.NaN
_last_ob = {}
for _config in current_acquisition_dict.keys():
current_acquisition_config_dict = current_acquisition_dict[_config]
for _ob in current_acquisition_config_dict['list_ob']:
_current_ob_time = _ob['time_stamp']
if np.isnan(_first_ob_time):
_first_ob_time = _current_ob_time
_last_ob_time = _current_ob_time
_first_ob = _last_ob = _ob
elif _current_ob_time < _first_ob_time:
_first_ob_time = _current_ob_time
_first_ob = _ob
elif _current_ob_time > _last_ob_time:
_last_ob_time = _current_ob_time
_last_ob = _ob
current_acquisition_config_dict['first_images']['ob'] = _first_ob
current_acquisition_config_dict['last_images']['ob'] = _last_ob
def calculate_time_range(self):
"""this method will calculate the max time range of OB taken before or after and will use that
for the slider selection time range
Provide option to use all (that means, do not used any time range)
"""
_final_full_master_dict = self.final_full_master_dict
for _acquisition in _final_full_master_dict.keys():
current_acquisition_dict = _final_full_master_dict[_acquisition]
for _config in current_acquisition_dict.keys():
current_acquisition_config_dict = current_acquisition_dict[_config]
first_sample_image = current_acquisition_config_dict['first_images']['sample']
first_ob_image = current_acquisition_config_dict['first_images']['ob']
delta_time_before = first_sample_image.get('time_stamp', 0) - first_ob_image.get('time_stamp', 0)
_time_range_s_before = delta_time_before if delta_time_before > 0 else 0
last_sample_image = current_acquisition_config_dict['last_images']['sample']
last_ob_image = current_acquisition_config_dict['last_images']['ob']
delta_time_after = last_ob_image.get('time_stamp', 0) - last_sample_image.get('time_stamp', 0)
_time_range_s_after = delta_time_after if delta_time_after > 0 else 0
_final_full_master_dict[_acquisition][_config]['time_range_s']['before'] = _time_range_s_before
_final_full_master_dict[_acquisition][_config]['time_range_s']['after'] = _time_range_s_after
def display_time_range_selection_widgets(self):
_final_full_master_dict = self.final_full_master_dict
_config_tab_dict = {} # will keep record of each config tab for each acquisition
_acquisition_tabs = widgets.Tab()
o_get = Get(parent=self)
for _acquisition_index, _acquisition in enumerate(_final_full_master_dict.keys()):
_dict_of_this_acquisition = _final_full_master_dict[_acquisition]
_config_tab = widgets.Tab()
_current_acquisition_tab_widgets_id = {'config_tab_id': _config_tab}
for _index, _config in enumerate(_dict_of_this_acquisition.keys()):
_dict_config = _dict_of_this_acquisition[_config]
_dict = o_get.full_layout_for_this_config(_dict_config)
_layout = _dict['verti_layout']
_config_widgets_id_dict = _dict['config_widgets_id_dict']
_config_tab.children += (_layout,)
_config_tab.set_title(_index, _config)
_current_acquisition_tab_widgets_id[_index] = _config_widgets_id_dict
_config_tab_dict[_acquisition_index] = _current_acquisition_tab_widgets_id
_acquisition_tabs.children += (_config_tab,) # add all the config tab to top acquisition tab
_acquisition_tabs.set_title(_acquisition_index, "Acquisition: {}s".format(_acquisition))
_config_tab
display(_acquisition_tabs)
self.acquisition_tab = _acquisition_tabs
self.config_tab_dict = _config_tab_dict
def calculate_max_time_before_and_after_exp_for_this_config(self, dict_config):
max_time_before = 0
first_sample_image_time_stamp = dict_config['first_images']['sample']['time_stamp']
first_ob_image_time_stamp = dict_config['first_images']['ob'].get('time_stamp', 0)
if first_ob_image_time_stamp > first_sample_image_time_stamp:
max_time_before = 0
else:
max_time_before = (first_sample_image_time_stamp - first_ob_image_time_stamp)
max_time_after = 0
last_sample_image_time_stamp = dict_config['last_images']['sample']['time_stamp']
last_ob_image_time_stamp = dict_config['last_images']['ob'].get('time_stamp', 0)
if last_ob_image_time_stamp < last_sample_image_time_stamp:
max_time_after = 0
else:
max_time_after = last_ob_image_time_stamp - last_sample_image_time_stamp
return [max_time_before, max_time_after]
def populate_metadata_table(self, current_config):
metadata_config = current_config['metadata_infos']
table_label = widgets.Label("List of Metadata used to match data set",
layout=widgets.Layout(width='30%'))
table_value = "<table style='width:50%;background-color:#eee'>"
for _key, _value in metadata_config.items():
table_value += "<tr><th>{}</th><th>{}</th></tr>".format(_value['name'], _value['value'])
table_value += "</table>"
table = widgets.HTML(value=table_value)
return [table_label, table]
def update_use_this_config_widget(self, state):
pass
# new_state = state['new']
# [active_acquisition, active_config] = self.get_active_tabs()
# self.config_tab_dict[active_acquisition][active_config]['normalize_this_config'] = new_state
def update_config_widgets(self, state):
if state['new'] is False:
# use all files
message = None
visibility = 'hidden'
else:
# user defines ranges
message = True
visibility = 'visible'
o_get = Get(parent=self)
[time_before_selected_ui, time_after_selected_ui] = o_get.time_before_and_after_ui_of_this_config()
experiment_label_ui = o_get.experiment_label_ui_of_this_config()
experiment_label_ui.layout.visibility = visibility
if visibility == 'hidden':
time_before_selected_ui.layout.visibility = 'hidden'
time_after_selected_ui.layout.visibility = 'hidden'
else:
self.show_or_not_before_and_after_sliders()
self.update_time_range_event(message)
def show_or_not_before_and_after_sliders(self):
o_get = Get(parent=self)
current_config = o_get.current_config_dict()
[max_time_elapse_before_experiment, max_time_elapse_after_experiment] = \
self.calculate_max_time_before_and_after_exp_for_this_config(current_config)
slider_before_visibility = 'visible' if max_time_elapse_before_experiment > 0 else 'hidden'
slider_after_visibility = 'visible' if max_time_elapse_after_experiment > 0 else 'hidden'
[time_before_selected_ui, time_after_selected_ui] = o_get.time_before_and_after_ui_of_this_config()
time_before_selected_ui.layout.visibility = slider_before_visibility
time_after_selected_ui.layout.visibility = slider_after_visibility
def is_custom_time_range_checked_for_this_config(self):
o_get = Get(parent=self)
current_config = o_get.current_config_of_widgets_id()
return current_config['use_custom_time_range_checkbox'].value
def update_time_range_event(self, value):
# reach when user interact with the sliders in the config tab
self.update_time_range_message(value)
self.update_list_of_files_in_widgets_using_new_time_range()
def update_list_of_files_in_widgets_using_new_time_range(self):
o_get = Get(parent=self)
# retrieve acquisition and config values
acquisition_key = o_get.active_tab_acquisition_key() # ex: '55.0'
config_key = o_get.active_tab_config_key() # ex: 'config0'
# retrieve list of ob and df for this config for this acquisition
final_full_master_dict = self.final_full_master_dict
dict_for_this_config = final_full_master_dict[float(acquisition_key)][config_key]
list_ob = dict_for_this_config['list_ob']
# no need to do anything more if user wants to use all the files
if not self.is_custom_time_range_checked_for_this_config():
list_ob_to_keep = [_file['filename'] for _file in list_ob]
else:
# retrieve first and last sample file for this config and for this acquisition
first_sample_image_time_stamp = dict_for_this_config['first_images']['sample']['time_stamp']
last_sample_images_time_stamp = dict_for_this_config['last_images']['sample']['time_stamp']
# retrieve time before and after selected
[time_before_selected, time_after_selected] = o_get.time_before_and_after_of_this_config()
# calculate list of ob that are within that time range
list_ob_to_keep = []
for _ob_file in list_ob:
_ob_time_stamp = _ob_file['time_stamp']
if (_ob_time_stamp < first_sample_image_time_stamp) and \
((first_sample_image_time_stamp - _ob_time_stamp) <= np.abs(time_before_selected)):
list_ob_to_keep.append(_ob_file['filename'])
elif (_ob_time_stamp > last_sample_images_time_stamp) and \
((_ob_time_stamp - last_sample_images_time_stamp) <= np.abs(time_after_selected)):
list_ob_to_keep.append(_ob_file['filename'])
self.update_list_of_ob_for_current_config_tab(list_ob=list_ob_to_keep)
def update_list_of_ob_for_current_config_tab(self, list_ob=[]):
o_get = Get(parent=self)
[active_acquisition, active_config] = o_get.active_tabs()
# short_version_list_ob = NormalizationWithSimplifySelection.keep_basename_only(list_files=list_ob)
self.config_tab_dict[active_acquisition][active_config]['list_of_ob'].options = list_ob
# select everything by default
self.config_tab_dict[active_acquisition][active_config]['list_of_ob'].value = list_ob
def update_time_range_message(self, value):
o_get = Get(parent=self)
if value is None:
_message = "Use <b><font color='red'>All </b> " \
"<font color='black'>OBs and DFs " \
"matching the samples images</font>"
else:
[time_before_selected, time_after_selected] = o_get.time_before_and_after_of_this_config()
time_before_selected = np.abs(time_before_selected)
def _format_time(_time_s):
if _time_s < 60:
return "{:.2f}s".format(_time_s)
elif _time_s < 3600:
_time_mn = int(_time_s / 60.)
_time_s = int(_time_s % 60)
return "{:d}mn {:d}s".format(_time_mn, _time_s)
else:
_time_hr = int(_time_s / 3600.)
_time_s_left = _time_s - _time_hr * 3600
_time_mn = int(_time_s_left / 60.)
_time_s = int(_time_s_left % 60)
return "{:d}hr {:d}mn {:d}s".format(_time_hr, _time_mn, _time_s)
str_time_before = _format_time(time_before_selected)
str_time_after = _format_time(time_after_selected)
logging.info(f"str_time_before: {time_before_selected} -> {str_time_before}")
_message = "Use OB taken up to <b><font color='red'>" + str_time_before + "</b> " \
"<font color='black'>before and up to </font>" \
"<b><font color='red'>" + str_time_after + "</b> " \
"<font color='black'>after experiment!</font>"
time_before_and_after_message_ui = o_get.time_before_and_after_message_ui_of_this_config()
time_before_and_after_message_ui.value = _message
def checking_normalization_workflow(self):
self.create_final_json()
self.normalization_recap()
def create_final_json(self):
_final_full_master_dict = self.final_full_master_dict
_config_tab_dict = self.config_tab_dict
_final_json_dict = {}
for _acquisition_index, _acquisition in enumerate(_final_full_master_dict.keys()):
_final_json_for_this_acquisition = {}
_config_of_this_acquisition = _config_tab_dict[_acquisition_index]
_dict_of_this_acquisition = _final_full_master_dict[_acquisition]
for _config_index, _config in enumerate(_dict_of_this_acquisition.keys()):
this_config_tab_dict = _config_tab_dict[_acquisition_index][_config_index]
normalize_flag = this_config_tab_dict['use_this_config']
list_sample = this_config_tab_dict['list_of_sample_runs'].options
list_ob = this_config_tab_dict['list_of_ob'].value
list_df = this_config_tab_dict['list_of_df'].value
_final_json_for_this_acquisition[_config] = {'list_sample' : list_sample,
'list_df' : list_df,
'list_ob' : list_ob,
'normalize_this_config': normalize_flag}
_final_json_dict[_acquisition] = _final_json_for_this_acquisition
self.final_json_dict = _final_json_dict
def normalization_recap(self):
"""this will show all the config that will be run and if they have the minimum requirements or not,
which mean, at least 1 OB"""
final_json = self.final_json_dict
self.number_of_normalization = 0
table = "<table style='width:50%;border:1px solid black'>"
table += "<tr style='background-color:#eee'><th>Acquisition (s)</th><th>Config. name</th>" \
"<th>Nbr sample</th><th>Nbr OB</th><th>Nbr DF</th><th>Status</th></tr>"
for _name_acquisition in final_json.keys():
_current_acquisition_dict = final_json[_name_acquisition]
for _name_config in _current_acquisition_dict.keys():
_current_config_dict = _current_acquisition_dict[_name_config]
normalize_this_config = _current_config_dict['normalize_this_config']
nbr_ob = len(_current_config_dict['list_ob'])
nbr_df = len(_current_config_dict['list_df'])
nbr_sample = len(_current_config_dict['list_sample'])
self.number_of_normalization += 1 if nbr_ob > 0 else 0
table += utilities.populate_normalization_recap_row(
acquisition=_name_acquisition,
config=_name_config,
nbr_sample=nbr_sample,
nbr_ob=nbr_ob,
nbr_df=nbr_df,
normalize_this_config=normalize_this_config)
table += "</table>"
table_ui = widgets.HTML(table)
display(table_ui)
def select_output_folder(self):
self.output_folder_ui = myfileselector.FileSelectorPanelWithJumpFolders(
instruction='select where to create the ' + \
'normalized folders',
start_dir=self.working_dir,
ipts_folder=self.working_dir,
next=self.normalization,
type='directory',
newdir_toolbar_button=True)
def normalization(self, output_folder):
display(HTML('<span style="font-size: 20px; color:blue">Make sure you do not close the notebook until'
'the busy signal (dark circle top right) is is gone!</span>'))
self.output_folder_ui.shortcut_buttons.close() # hack to hide the buttons
final_json = self.final_json_dict
number_of_normalization = self.number_of_normalization
horizontal_layout = widgets.HBox([widgets.Label("Normalization progress",
layout=widgets.Layout(width='20%')),
widgets.IntProgress(max=number_of_normalization + 1,
value=0,
layout=widgets.Layout(width='50%'))])
normalization_progress = horizontal_layout.children[1]
display(horizontal_layout)
list_full_output_normalization_folder_name = []
for _name_acquisition in final_json.keys():
_current_acquisition_dict = final_json[_name_acquisition]
for _name_config in _current_acquisition_dict.keys():
_current_config = _current_acquisition_dict[_name_config]
list_ob = _current_config['list_ob']
if len(list_ob) == 0:
normalization_progress.value += 1
continue
if not _current_config['normalize_this_config'].value:
normalization_progress.value += 1
continue
list_sample = _current_config['list_sample']
full_output_normalization_folder_name = \
utilities.make_full_output_normalization_folder_name(
output_folder=output_folder,
first_sample_file_name=list_sample[0],
name_acquisition=_name_acquisition,
name_config=_name_config)
list_full_output_normalization_folder_name.append(full_output_normalization_folder_name)
list_df = _current_config['list_df']
o_load = Normalization()
o_load.load(file=list(list_sample), notebook=True)
o_load.load(file=list(list_ob), data_type='ob')
if len(list_df) > 0:
o_load.load(file=list(list_df), data_type='df')
o_load.normalization()
o_load.export(folder=full_output_normalization_folder_name, file_type='tif')
del o_load
normalization_progress.value += 1
horizontal_layout.close()
display(HTML('<span style="font-size: 20px; color:blue">Following folders have been created:</span>'))
for _folder in list_full_output_normalization_folder_name:
_folder = _folder if _folder else "None"
display(HTML('<span style="font-size: 15px; color:blue"> -> ' + _folder + '</span>'))
| 51.304465 | 175 | 0.579786 | 37,297 | 0.983726 | 0 | 0 | 0 | 0 | 0 | 0 | 7,358 | 0.194071 |
759e0a9f6bfd13dc1e30f52a13990d9895e8e99e
| 12,719 |
py
|
Python
|
backups_manager_lib_test_util.py
|
cantstopthesignal/backups_lib
|
dec602fc90d285b8581af35e514eb90309b6da89
|
[
"Apache-2.0"
] | null | null | null |
backups_manager_lib_test_util.py
|
cantstopthesignal/backups_lib
|
dec602fc90d285b8581af35e514eb90309b6da89
|
[
"Apache-2.0"
] | null | null | null |
backups_manager_lib_test_util.py
|
cantstopthesignal/backups_lib
|
dec602fc90d285b8581af35e514eb90309b6da89
|
[
"Apache-2.0"
] | null | null | null |
import contextlib
import io
import os
import re
import subprocess
from . import backups_manager_lib
from . import backups_main
from . import lib
from .test_util import AssertEquals
from .test_util import AssertLinesEqual
from .test_util import CreateDir
from .test_util import CreateFile
from .test_util import DoBackupsMain
def CreateConfig(parent_dir, backups_filename_prefix='backups', filter_merge_path=None):
config_path = os.path.join(parent_dir, '%s.config' % backups_filename_prefix)
config = backups_manager_lib.BackupsConfig(config_path)
config.image_path = os.path.join(parent_dir, '%s.sparsebundle' % backups_filename_prefix)
config.mount_path = os.path.join(parent_dir, '%s_mount' % backups_filename_prefix)
config.src_path = CreateDir(parent_dir, '%s_src' % backups_filename_prefix)
config.checkpoints_dir = CreateDir(parent_dir, '%s_checkpoints' % backups_filename_prefix)
config.filter_merge_path = filter_merge_path
config.Write()
return config
def CreateBackupsBundle(config, create_example_content=True):
lib.GetDiskImageHelper().CreateImage(
config.image_path, size='10G', filesystem='APFS', image_type='SPARSEBUNDLE', volume_name='Backups')
with lib.ImageAttacher(config.image_path, config.mount_path, readonly=False,
browseable=False) as attacher:
backups_dir = CreateDir(attacher.GetMountPoint(), backups_manager_lib.BACKUPS_SUBDIR)
backup1_dir = CreateDir(backups_dir, '2020-01-01-120000')
CreateDir(backup1_dir, '.metadata')
disk_dir = CreateDir(backup1_dir, 'Root')
if create_example_content:
CreateFile(disk_dir, 'f1')
CreateFile(disk_dir, 'fX')
CreateFile(disk_dir, 'fT')
def CreateLatestManifestCheckpoint(config):
backups_manager = backups_manager_lib.BackupsManager.Open(
config, readonly=False, browseable=False)
try:
last_backup = backups_manager.GetLastDone()
src_root = last_backup.GetContentRootPath()
output_lines = DoBackupsMain(['create-checkpoint',
'--src-root', src_root,
'--checksum-all',
'--manifest-only',
'--no-encrypt',
'--checkpoint-name', last_backup.GetName(),
'--checkpoints-dir', config.checkpoints_dir],
expected_output=None)
m = re.match('^Created checkpoint at (.+)$', output_lines[-1])
assert m
checkpoint_path = m.group(1)
AssertLinesEqual(output_lines[:-1],
['>d+++++++ .',
'>f+++++++ f1',
'>f+++++++ fT',
'>f+++++++ fX',
'Transferring 4 paths (0b)'])
manifest = lib.ReadManifestFromImageOrPath(checkpoint_path)
manifest.SetPath(last_backup.GetManifestPath())
manifest.Write()
return checkpoint_path
finally:
backups_manager.Close()
def VerifyBackupManifest(backup, path=None):
if path is None:
manifest = lib.Manifest.Load(backup.GetManifestPath())
else:
manifest = lib.ReadManifestFromImageOrPath(path)
output = io.StringIO()
verifier = lib.ManifestVerifier(manifest, backup.GetContentRootPath(), output,
checksum_path_matcher=lib.PathMatcherAll())
success = verifier.Verify()
output_lines = [ line for line in output.getvalue().strip().split('\n') if line ]
output.close()
AssertLinesEqual(output_lines, [])
if not success:
raise Exception('Verification failed')
@contextlib.contextmanager
def SetLogThrottlerLogAlways(log_throttler):
old_value = log_throttler.GetLogAlways()
log_throttler.SetLogAlways(True)
try:
yield
finally:
log_throttler.SetLogAlways(old_value)
def DoCreateCheckpoint(src_root, checkpoints_dir, checkpoint_name, expected_output=[],
last_checkpoint_path=None, filter_merge_path=None):
args = ['create-checkpoint',
'--no-encrypt',
'--checksum-all',
'--src-root', src_root,
'--checkpoints-dir', checkpoints_dir,
'--checkpoint-name', checkpoint_name]
if last_checkpoint_path is not None:
args.extend(['--last-checkpoint', last_checkpoint_path])
if filter_merge_path is not None:
args.extend(['--filter-merge-path', filter_merge_path])
output = io.StringIO()
AssertEquals(backups_main.Main(args, output), True)
output_lines = []
checkpoint_path = None
for line in output.getvalue().strip().split('\n'):
m = re.match('^Created checkpoint at (.+)$', line)
if m:
checkpoint_path = m.group(1)
continue
output_lines.append(line)
output.close()
AssertLinesEqual(output_lines, expected_output)
return checkpoint_path
def DoCreateBackup(config, backup_name=None, dry_run=False, expected_output=[]):
cmd_args = ['create-backup',
'--no-encrypt',
'--backups-config', config.path]
if backup_name is not None:
cmd_args.extend(['--backup-name', backup_name])
lines = DoBackupsMain(cmd_args, dry_run=dry_run, expected_output=None)
checkpoint_path = None
output_lines = []
for line in lines:
m = re.match('^Created checkpoint at (.+)$', line)
if m:
checkpoint_path = m.group(1)
continue
output_lines.append(line)
AssertLinesEqual(output_lines, expected_output)
return checkpoint_path
def DoApplyToBackups(config, dry_run=False, deduplicate_min_file_size=1024,
checksum_all=True, checksum_hardlinks=True, expected_success=True,
expected_output=[]):
cmd_args = ['apply-to-backups',
'--backups-config', config.path,
'--deduplicate-min-file-size', str(deduplicate_min_file_size)]
if not checksum_all:
cmd_args.append('--no-checksum-all')
if not checksum_hardlinks:
cmd_args.append('--no-checksum-hardlinks')
DoBackupsMain(cmd_args, dry_run=dry_run, expected_success=expected_success,
expected_output=expected_output)
def DoListBackups(config, dry_run=False, expected_backups=[]):
cmd_args = ['list-backups',
'--backups-config', config.path]
DoBackupsMain(cmd_args, dry_run=dry_run, expected_output=expected_backups)
def DoVerifyBackups(config, dry_run=False, min_backup=None, max_backup=None,
full=True, continue_on_error=False, checksum_all=True,
expected_success=True, expected_output=[]):
cmd_args = ['verify-backups',
'--backups-config', config.path]
if min_backup is not None:
cmd_args.extend(['--min-backup', min_backup])
if max_backup is not None:
cmd_args.extend(['--max-backup', max_backup])
if not full:
cmd_args.append('--no-full')
if continue_on_error:
cmd_args.append('--continue-on-error')
if not checksum_all:
cmd_args.append('--no-checksum-all')
DoBackupsMain(cmd_args, dry_run=dry_run, expected_success=expected_success,
expected_output=expected_output)
def DoAddMissingManifestsToBackups(config, expected_output=[]):
cmd_args = ['add-missing-manifests-to-backups',
'--backups-config', config.path]
DoBackupsMain(cmd_args, expected_output=expected_output)
def DoDeduplicateBackups(
config, min_backup=None, max_backup=None, match_older_mtimes=False, dry_run=False, verbose=False,
expected_output=[]):
cmd_args = ['deduplicate-backups',
'--min-file-size', '1024',
'--backups-config', config.path]
if min_backup is not None:
cmd_args.extend(['--min-backup', min_backup])
if max_backup is not None:
cmd_args.extend(['--max-backup', max_backup])
if match_older_mtimes:
cmd_args.append('--match-older-mtimes')
DoBackupsMain(cmd_args, dry_run=dry_run, verbose=verbose, expected_output=expected_output)
def DoCloneBackup(config, backup_name, dry_run=False, expected_success=True, expected_output=[]):
cmd_args = ['clone-backup',
'--backups-config', config.path,
'--backup-name', backup_name]
DoBackupsMain(cmd_args, dry_run=dry_run, expected_success=expected_success,
expected_output=expected_output)
def DoDeleteBackups(config, backup_names, dry_run=False, expected_success=True, expected_output=[]):
cmd_args = ['delete-backups',
'--backups-config', config.path]
for backup_name in backup_names:
cmd_args.extend(['--backup-name', backup_name])
DoBackupsMain(cmd_args, dry_run=dry_run, expected_success=expected_success,
expected_output=expected_output)
def DoDeleteBackupsInteractive(config, backup_names=[], min_backup=None, max_backup=None,
ignore_matching_renames=False, include_latest_backup=False,
dry_run=False, verbose=False,
expected_success=True, expected_output=[]):
cmd_args = ['delete-backups-interactive',
'--backups-config', config.path]
for backup_name in backup_names:
cmd_args.extend(['--backup-name', backup_name])
if min_backup is not None:
cmd_args.extend(['--min-backup', min_backup])
if max_backup is not None:
cmd_args.extend(['--max-backup', max_backup])
if ignore_matching_renames:
cmd_args.append('--ignore-matching-renames')
if include_latest_backup:
cmd_args.append('--include-latest-backup')
DoBackupsMain(cmd_args, dry_run=dry_run, verbose=verbose, expected_success=expected_success,
expected_output=expected_output)
def DoDumpUniqueFilesInBackups(config, backup_names=[], min_backup=None, max_backup=None,
ignore_matching_renames=False, match_previous_only=False,
match_next_only=False, dry_run=False, verbose=False,
expected_success=True, expected_output=[]):
cmd_args = ['dump-unique-files-in-backups',
'--backups-config', config.path]
for backup_name in backup_names:
cmd_args.extend(['--backup-name', backup_name])
if min_backup is not None:
cmd_args.extend(['--min-backup', min_backup])
if max_backup is not None:
cmd_args.extend(['--max-backup', max_backup])
if ignore_matching_renames:
cmd_args.append('--ignore-matching-renames')
if match_previous_only:
cmd_args.append('--match-previous-only')
if match_next_only:
cmd_args.append('--match-next-only')
DoBackupsMain(cmd_args, dry_run=dry_run, verbose=verbose, expected_success=expected_success,
expected_output=expected_output)
def DoExtractFromBackups(config, dry_run=False, min_backup=None, max_backup=None,
output_image_path=None, paths=[], expected_success=True,
expected_output=[]):
cmd_args = ['extract-from-backups',
'--backups-config', config.path,
'--no-encrypt',
'--deduplicate-min-file-size', '1024']
if output_image_path is not None:
cmd_args.extend(['--output-image-path', output_image_path])
for path in paths:
cmd_args.extend(['--path', path])
if min_backup is not None:
cmd_args.extend(['--min-backup', min_backup])
if max_backup is not None:
cmd_args.extend(['--max-backup', max_backup])
DoBackupsMain(cmd_args, dry_run=dry_run, expected_success=expected_success,
expected_output=expected_output)
def DoMergeIntoBackups(config, dry_run=False, min_backup=None, max_backup=None,
from_image_path=None, expected_success=True,
expected_output=[]):
cmd_args = ['merge-into-backups',
'--backups-config', config.path,
'--deduplicate-min-file-size', '1024']
if from_image_path is not None:
cmd_args.extend(['--from-image-path', from_image_path])
if min_backup is not None:
cmd_args.extend(['--min-backup', min_backup])
if max_backup is not None:
cmd_args.extend(['--max-backup', max_backup])
DoBackupsMain(cmd_args, dry_run=dry_run, expected_success=expected_success,
expected_output=expected_output)
def DoDeleteInBackups(config, dry_run=False, min_backup=None, max_backup=None,
paths=[], expected_success=True, expected_output=[]):
cmd_args = ['delete-in-backups',
'--backups-config', config.path]
if min_backup is not None:
cmd_args.extend(['--min-backup', min_backup])
if max_backup is not None:
cmd_args.extend(['--max-backup', max_backup])
for path in paths:
cmd_args.extend(['--path', path])
DoBackupsMain(cmd_args, dry_run=dry_run, expected_success=expected_success,
expected_output=expected_output)
| 40.123028 | 103 | 0.676232 | 0 | 0 | 192 | 0.015096 | 219 | 0.017218 | 0 | 0 | 1,825 | 0.143486 |
759e1233cd5221eb7c3d3a4d3d8e9c2c06bf7609
| 234 |
py
|
Python
|
backend/templatetags/back_tag.py
|
h1gfun4/h1gfun4.github.io
|
e460467cb505b525ecd5b01b9eb3fd73de7ec6e1
|
[
"MIT"
] | null | null | null |
backend/templatetags/back_tag.py
|
h1gfun4/h1gfun4.github.io
|
e460467cb505b525ecd5b01b9eb3fd73de7ec6e1
|
[
"MIT"
] | null | null | null |
backend/templatetags/back_tag.py
|
h1gfun4/h1gfun4.github.io
|
e460467cb505b525ecd5b01b9eb3fd73de7ec6e1
|
[
"MIT"
] | null | null | null |
from django import template
from backend.models import Back
register = template.Library()
@register.inclusion_tag('backend/tags/scrollMenuB.html')
def get_back():
scrollB = Back.objects.all()
return {"scrollMenuB": scrollB }
| 26 | 56 | 0.75641 | 0 | 0 | 0 | 0 | 142 | 0.606838 | 0 | 0 | 44 | 0.188034 |
759fec04ca6bf4fd01f099c1761a43c8c03c98c7
| 9,116 |
py
|
Python
|
ecommerce/views.py
|
umarmughal824/bootcamp-ecommerce
|
681bcc788a66867b8f240790c0ed33680b73932b
|
[
"BSD-3-Clause"
] | 2 |
2018-06-20T19:37:03.000Z
|
2021-01-06T09:51:40.000Z
|
ecommerce/views.py
|
mitodl/bootcamp-ecommerce
|
ba7d6aefe56c6481ae2a5afc84cdd644538b6d50
|
[
"BSD-3-Clause"
] | 1,226 |
2017-02-23T14:52:28.000Z
|
2022-03-29T13:19:54.000Z
|
ecommerce/views.py
|
umarmughal824/bootcamp-ecommerce
|
681bcc788a66867b8f240790c0ed33680b73932b
|
[
"BSD-3-Clause"
] | 3 |
2017-03-20T03:51:27.000Z
|
2021-03-19T15:54:31.000Z
|
"""Views for ecommerce"""
from decimal import Decimal
import logging
from django.conf import settings
from django.contrib.auth import get_user_model
from django.http.response import Http404
from django.shortcuts import get_object_or_404
from django.urls import reverse
from ipware import get_client_ip
from rest_framework import status as statuses
from rest_framework.authentication import SessionAuthentication
from rest_framework.generics import CreateAPIView, GenericAPIView, RetrieveAPIView
from rest_framework.permissions import IsAuthenticated
from rest_framework.renderers import TemplateHTMLRenderer
from rest_framework.response import Response
from rest_framework.validators import ValidationError
from rest_framework.views import APIView
from applications.constants import AppStates
from applications.models import BootcampApplication
from backends.edxorg import EdxOrgOAuth2
from ecommerce.api import (
complete_successful_order,
create_unfulfilled_order,
generate_cybersource_sa_payload,
get_new_order_by_reference_number,
handle_rejected_order,
serialize_user_bootcamp_run,
serialize_user_bootcamp_runs,
)
from ecommerce.constants import CYBERSOURCE_DECISION_ACCEPT, CYBERSOURCE_DECISION_CANCEL
from ecommerce.exceptions import EcommerceException
from ecommerce.models import Line, Order, Receipt
from ecommerce.permissions import IsSignedByCyberSource
from ecommerce.serializers import (
CheckoutDataSerializer,
PaymentSerializer,
OrderSerializer,
)
from hubspot.task_helpers import sync_hubspot_application_from_order
from klasses.models import BootcampRun
from klasses.permissions import CanReadIfSelf
from main.permissions import UserIsOwnerOrAdminPermission
from main.serializers import serialize_maybe_user
log = logging.getLogger(__name__)
User = get_user_model()
class PaymentView(CreateAPIView):
"""
View for payment API. This creates an Order in our system and provides a dictionary to send to Cybersource.
"""
authentication_classes = (SessionAuthentication,)
permission_classes = (IsAuthenticated,)
serializer_class = PaymentSerializer
def post(self, request, *args, **kwargs):
"""
Create an unfulfilled order and return a response for it.
"""
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
payment_amount = Decimal(serializer.data["payment_amount"])
application_id = serializer.data["application_id"]
application = get_object_or_404(
BootcampApplication, id=application_id, user=self.request.user
)
if application.state != AppStates.AWAITING_PAYMENT.value:
log.error(
"User attempted to pay for application %d with invalid state %s",
application.id,
application.state,
)
raise ValidationError("Invalid application state")
order = create_unfulfilled_order(
application=application, payment_amount=payment_amount
)
# Sync order data with hubspot
sync_hubspot_application_from_order(order)
redirect_url = self.request.build_absolute_uri(reverse("applications"))
user_ip, _ = get_client_ip(request)
return Response(
{
"payload": generate_cybersource_sa_payload(
order, redirect_url, ip_address=user_ip
),
"url": settings.CYBERSOURCE_SECURE_ACCEPTANCE_URL,
}
)
class OrderFulfillmentView(APIView):
"""
View for order fulfillment API. This API is special in that only CyberSource should talk to it.
Instead of authenticating with OAuth or via session this looks at the signature of the message
to verify authenticity.
"""
authentication_classes = ()
permission_classes = (IsSignedByCyberSource,)
def post(self, request, *args, **kwargs): # pylint: disable=unused-argument
"""
Confirmation from CyberSource which fulfills an existing Order.
"""
# First, save this information in a receipt
receipt = Receipt.objects.create(data=request.data)
# Link the order with the receipt if we can parse it
reference_number = request.data["req_reference_number"]
order = get_new_order_by_reference_number(reference_number)
receipt.order = order
receipt.save()
decision = request.data["decision"]
if order.status == Order.FAILED and decision == CYBERSOURCE_DECISION_CANCEL:
# This is a duplicate message, ignore since it's already handled
return Response(status=statuses.HTTP_200_OK)
elif order.status != Order.CREATED:
raise EcommerceException(
"Order {} is expected to have status 'created'".format(order.id)
)
if decision != CYBERSOURCE_DECISION_ACCEPT:
handle_rejected_order(order=order, decision=decision)
else:
# import pdb; pdb.set_trace()
complete_successful_order(order)
# Sync order data with hubspot
sync_hubspot_application_from_order(order)
# The response does not matter to CyberSource
return Response(status=statuses.HTTP_200_OK)
class UserBootcampRunDetail(GenericAPIView):
"""
Class based view for user bootcamp run view.
"""
authentication_classes = (SessionAuthentication,)
permission_classes = (IsAuthenticated, CanReadIfSelf)
lookup_field = "run_key"
lookup_url_kwarg = "run_key"
queryset = BootcampRun.objects.all()
def get(
self, request, username, *args, **kwargs
): # pylint: disable=unused-argument
"""
Returns a serialized bootcamp run and payment for a user
"""
user = get_object_or_404(
User, social_auth__uid=username, social_auth__provider=EdxOrgOAuth2.name
)
bootcamp_run = self.get_object()
return Response(
serialize_user_bootcamp_run(user=user, bootcamp_run=bootcamp_run)
)
class UserBootcampRunStatement(RetrieveAPIView):
"""
View class for a user's bootcamp run payment statement
"""
authentication_classes = (SessionAuthentication,)
permission_classes = (IsAuthenticated,)
lookup_field = "run_key"
lookup_url_kwarg = "run_key"
queryset = BootcampRun.objects.all()
renderer_classes = (TemplateHTMLRenderer,)
def get(self, request, *args, **kwargs):
"""
Fetches a user's bootcamp run payment information and renders their statement
(or raises a 404 if they have no payments for the specified bootcamp run)
"""
bootcamp_run = self.get_object()
if Line.for_user_bootcamp_run(request.user, bootcamp_run).count() == 0:
raise Http404
return Response(
{
"user": serialize_maybe_user(request.user),
"bootcamp_run": serialize_user_bootcamp_run(
user=request.user, bootcamp_run=bootcamp_run
),
},
template_name="bootcamp/statement.html",
)
class UserBootcampRunList(APIView):
"""
Class based view for user bootcamp run list view.
"""
authentication_classes = (SessionAuthentication,)
permission_classes = (IsAuthenticated, CanReadIfSelf)
def get(
self, request, username, *args, **kwargs
): # pylint: disable=unused-argument
"""
Returns serialized bootcamp runs and payments for all runs that a user can pay for.
"""
user = get_object_or_404(
User, social_auth__uid=username, social_auth__provider=EdxOrgOAuth2.name
)
return Response(serialize_user_bootcamp_runs(user=user))
class CheckoutDataView(RetrieveAPIView):
"""
List application ecommerce data for a user, for payable applications
"""
authentication_classes = (SessionAuthentication,)
permission_classes = (IsAuthenticated,)
serializer_class = CheckoutDataSerializer
def get_queryset(self):
"""Filter on valid applications for the user"""
return (
BootcampApplication.objects.filter(
user=self.request.user, state=AppStates.AWAITING_PAYMENT.value
)
.select_related("bootcamp_run")
.prefetch_related(
"bootcamp_run__personal_prices",
"bootcamp_run__installment_set",
"orders",
"orders__line_set",
)
.order_by("id")
)
def get_object(self):
"""Get the application given the query parameter"""
application_id = self.request.query_params.get("application")
return get_object_or_404(self.get_queryset(), id=application_id)
class OrderView(RetrieveAPIView):
"""API view for Orders"""
permission_classes = (IsAuthenticated, UserIsOwnerOrAdminPermission)
serializer_class = OrderSerializer
queryset = Order.objects.all()
owner_field = "user"
| 34.793893 | 111 | 0.691751 | 7,267 | 0.79717 | 0 | 0 | 0 | 0 | 0 | 0 | 2,154 | 0.236288 |
75a10f7c8bb2269ffd29a74f44cb282618db5d67
| 3,334 |
py
|
Python
|
sagemaker-python-sdk/pytorch_lstm_word_language_model/source/generate.py
|
BluePilgrim/amazon-sagemaker-examples
|
e20c855dd912331a9380980712f2fef7d05d3d2d
|
[
"Apache-2.0"
] | 7 |
2018-10-25T16:35:54.000Z
|
2022-02-12T15:24:11.000Z
|
sagemaker-python-sdk/pytorch_lstm_word_language_model/source/generate.py
|
vlordier/amazon-sagemaker-examples
|
6c59b6e435f040bdbe6a7c346fc0ce397f7746d8
|
[
"Apache-2.0"
] | 1 |
2019-04-10T20:21:18.000Z
|
2019-04-10T20:21:18.000Z
|
sagemaker-python-sdk/pytorch_lstm_word_language_model/source/generate.py
|
vlordier/amazon-sagemaker-examples
|
6c59b6e435f040bdbe6a7c346fc0ce397f7746d8
|
[
"Apache-2.0"
] | 2 |
2020-02-19T03:10:18.000Z
|
2022-03-16T12:49:31.000Z
|
import json
import logging
import os
import torch
from rnn import RNNModel
import data
JSON_CONTENT_TYPE = 'application/json'
logger = logging.getLogger(__name__)
def model_fn(model_dir):
logger.info('Loading the model.')
model_info = {}
with open(os.path.join(model_dir, 'model_info.pth'), 'rb') as f:
model_info = torch.load(f)
print('model_info: {}'.format(model_info))
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
logger.info('Current device: {}'.format(device))
model = RNNModel(rnn_type=model_info['rnn_type'], ntoken=model_info['ntoken'],
ninp=model_info['ninp'], nhid=model_info['nhid'], nlayers=model_info['nlayers'],
dropout=model_info['dropout'], tie_weights=model_info['tie_weights'])
with open(os.path.join(model_dir, 'model.pth'), 'rb') as f:
model.load_state_dict(torch.load(f))
# after load the rnn params are not a continuous chunk of memory
# this makes them a continuous chunk, and will speed up forward pass
model.rnn.flatten_parameters()
model.to(device).eval()
logger.info('Loading the data.')
corpus = data.Corpus(model_dir)
logger.info('Done loading model and corpus. Corpus dictionary size: {}'.format(len(corpus.dictionary)))
return {'model': model, 'corpus': corpus}
def input_fn(serialized_input_data, content_type=JSON_CONTENT_TYPE):
logger.info('Deserializing the input data.')
if content_type == JSON_CONTENT_TYPE:
input_data = json.loads(serialized_input_data)
if input_data['temperature'] < 1e-3:
raise Exception('\'temperature\' has to be greater or equal 1e-3')
return input_data
raise Exception('Requested unsupported ContentType in content_type: ' + content_type)
def output_fn(prediction_output, accept=JSON_CONTENT_TYPE):
logger.info('Serializing the generated output.')
if accept == JSON_CONTENT_TYPE:
return json.dumps(prediction_output), accept
raise Exception('Requested unsupported ContentType in Accept: ' + accept)
def predict_fn(input_data, model):
logger.info('Generating text based on input parameters.')
corpus = model['corpus']
model = model['model']
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
logger.info('Current device: {}'.format(device))
torch.manual_seed(input_data['seed'])
ntokens = len(corpus.dictionary)
input = torch.randint(ntokens, (1, 1), dtype=torch.long).to(device)
hidden = model.init_hidden(1)
logger.info('Generating {} words.'.format(input_data['words']))
result = []
with torch.no_grad(): # no tracking history
for i in range(input_data['words']):
output, hidden = model(input, hidden)
word_weights = output.squeeze().div(input_data['temperature']).exp().cpu()
word_idx = torch.multinomial(word_weights, 1)[0]
input.fill_(word_idx)
word = corpus.dictionary.idx2word[word_idx]
word = word if type(word) == str else word.decode()
if word == '<eos>':
word = '\n'
elif i % 12 == 11:
word = word + '\n'
else:
word = word + ' '
result.append(word)
return ''.join(result)
| 39.223529 | 107 | 0.654469 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 820 | 0.245951 |
75a2597adcdcae122cb7a9e4d78b3707b95ae319
| 889 |
py
|
Python
|
get_data.py
|
fromdatavistodatascience/Boston-Airpot-Traffic-Visualisation
|
9f30e89e68c25e6fbcf13d84fee561b53ff70d84
|
[
"MIT"
] | null | null | null |
get_data.py
|
fromdatavistodatascience/Boston-Airpot-Traffic-Visualisation
|
9f30e89e68c25e6fbcf13d84fee561b53ff70d84
|
[
"MIT"
] | null | null | null |
get_data.py
|
fromdatavistodatascience/Boston-Airpot-Traffic-Visualisation
|
9f30e89e68c25e6fbcf13d84fee561b53ff70d84
|
[
"MIT"
] | null | null | null |
import pandas as pd
import numpy as np
import json
import requests
#Retrieving my api keys information to access the Google API.
def get_keys(path):
with open(path) as f:
return json.load(f)
keys = get_keys("/Users/jjherranzsarrion/.secret/google_blog2_api.json")
api_key = keys['api_key']
url = 'https://maps.googleapis.com/maps/api/directions/json?'
origin = 'Sheepfold+Dog+Park+Fells+Path+Stoneham+MA'
destination = 'Terminal+C+Boston+Logan+International+Airport+Boston+MA+02128'
departure_time = '1566819000' #time in seconds from midnight 1st Jan 1970 (Unix start time) until Monday 19th August at 07:30 AM.
url_params = f"origin={origin}&destination={destination}&departure_time={departure_time}&key={api_key}"
request_url = url + url_params
response = requests.get(request_url)
with open('response.json', 'w') as f:
json.dump(response.json(), f)
| 31.75 | 130 | 0.743532 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 506 | 0.569179 |
75a4801e3fd9b2dd8d7fd997f38c4f96f2672de6
| 1,807 |
py
|
Python
|
openslides_protocol/apps.py
|
OpenSlides/openslides-protocol
|
71366a4f251165384dd359a31fdc0fab79a652a1
|
[
"MIT"
] | null | null | null |
openslides_protocol/apps.py
|
OpenSlides/openslides-protocol
|
71366a4f251165384dd359a31fdc0fab79a652a1
|
[
"MIT"
] | 11 |
2017-08-02T10:48:24.000Z
|
2018-10-19T13:53:51.000Z
|
openslides_protocol/apps.py
|
OpenSlides/openslides-protocol
|
71366a4f251165384dd359a31fdc0fab79a652a1
|
[
"MIT"
] | 2 |
2017-05-10T14:11:34.000Z
|
2018-01-10T11:44:10.000Z
|
from django.apps import AppConfig
from openslides.utils.collection import Collection
from . import (
__description__,
__license__,
__url__,
__verbose_name__,
__version__,
)
class ProtocolAppConfig(AppConfig):
name = 'openslides_protocol'
verbose_name = __verbose_name__
description = __description__
version = __version__
license = __license__
url = __url__
angular_site_module = True
js_files = [
'static/js/openslides_protocol/base.js',
'static/js/openslides_protocol/site.js',
'static/js/openslides_protocol/templatehooks.js',
'static/js/openslides_protocol/templates.js'
]
def ready(self):
# Import all required stuff.
from openslides.core.config import config
from openslides.core.signals import post_permission_creation
from openslides.utils.rest_api import router
from .config_variables import get_config_variables
from .signals import add_permissions_to_builtin_groups
from .views import ObjectProtocolViewSet, ProtocolViewSet
# Define config variables
config.update_config_variables(get_config_variables())
# Connect signals.
post_permission_creation.connect(
add_permissions_to_builtin_groups,
dispatch_uid='protocol_add_permissions_to_builtin_groups'
)
# Register viewsets.
router.register(self.get_model('ObjectProtocol').get_collection_string(), ObjectProtocolViewSet)
router.register(self.get_model('Protocol').get_collection_string(), ProtocolViewSet)
def get_startup_elements(self):
yield Collection(self.get_model('ObjectProtocol').get_collection_string())
yield Collection(self.get_model('Protocol').get_collection_string())
| 34.09434 | 104 | 0.722191 | 1,610 | 0.89098 | 191 | 0.1057 | 0 | 0 | 0 | 0 | 378 | 0.209186 |
75a4a4e7e2bdd3feb7c08bb48946dfcce4709039
| 701 |
py
|
Python
|
roster/migrations/0040_auto_20200428_0914.py
|
ankanb240/otis-web
|
45eda65b419705c65c02b15872a137969d53d8e9
|
[
"MIT"
] | 15 |
2021-08-28T18:18:37.000Z
|
2022-03-13T07:48:15.000Z
|
roster/migrations/0040_auto_20200428_0914.py
|
ankanb240/otis-web
|
45eda65b419705c65c02b15872a137969d53d8e9
|
[
"MIT"
] | 65 |
2021-08-20T02:37:27.000Z
|
2022-02-07T17:19:23.000Z
|
roster/migrations/0040_auto_20200428_0914.py
|
ankanb240/otis-web
|
45eda65b419705c65c02b15872a137969d53d8e9
|
[
"MIT"
] | 31 |
2020-01-09T02:35:29.000Z
|
2022-03-13T07:48:18.000Z
|
# Generated by Django 3.0.3 on 2020-04-28 13:14
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('roster', '0039_unlock_units'),
]
operations = [
migrations.AlterField(
model_name='student',
name='num_units_done',
field=models.SmallIntegerField(default=0, help_text="The number of completed units. This is set manually for Evan's book-keeping."),
),
migrations.AlterField(
model_name='student',
name='vision',
field=models.SmallIntegerField(default=3, help_text='Deprecated and no longer in use. To be deleted.'),
),
]
| 29.208333 | 144 | 0.621969 | 608 | 0.867332 | 0 | 0 | 0 | 0 | 0 | 0 | 243 | 0.346648 |
75a691a31ac0f85d25914cc8c58acb2e67e97fd0
| 9,700 |
py
|
Python
|
scripts/gen_report.py
|
twjang/korea_apartment_price
|
cd1414dfe6fe46e7d47625d2f65abe07f7c2db75
|
[
"MIT"
] | 1 |
2021-12-14T13:03:38.000Z
|
2021-12-14T13:03:38.000Z
|
scripts/gen_report.py
|
twjang/korea_apartment_price
|
cd1414dfe6fe46e7d47625d2f65abe07f7c2db75
|
[
"MIT"
] | null | null | null |
scripts/gen_report.py
|
twjang/korea_apartment_price
|
cd1414dfe6fe46e7d47625d2f65abe07f7c2db75
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import json
from typing import List, Optional, Tuple
import datetime
import re
import io
import base64
import os
import sys
import argparse
from plotly.missing_ipywidgets import FigureWidget
from tqdm import tqdm
import minify_html
ROOT=os.path.realpath(os.path.join(os.path.dirname(__file__), '..'))
sys.path.append(ROOT)
import plotly
import plotly.io
import plotly.graph_objects as go
from plotly.subplots import make_subplots
import korea_apartment_price
from korea_apartment_price.db import ApartmentId, EntryNotFound
from korea_apartment_price.utils import editdist
def date_serial2date(x:int):
year = x // 10000
month = (x // 100) % 100
date = (x) % 100
return datetime.datetime(year, month, date)
def render_graph(apts: List[ApartmentId], date_from=20190101)->Tuple[str, FigureWidget]:
sizes = set(korea_apartment_price.db.query_trades(apt_ids=apts, filters=[korea_apartment_price.db.pick_size], date_from=date_from, include_canceled=True))
if len(sizes) == 0:
sizes = set([apt['size'] for apt in apts])
favorite_size = apts[0]['size']
chosen_size = list(sorted([(abs(s-favorite_size), s) for s in sizes]))[0][1]
fig = go.Figure()
aptname = re.sub(r'[0-9]+[ ]*단지[ ]*$', '', apts[0]["name"])
title=(f'{apts[0]["address"]}', f'{aptname} (전용 {chosen_size}평)')
fig.update_layout(height = 500, margin=dict(l=10, r=10, b=10, t=10))
fig.update_yaxes(
showline=True,
linecolor='black',
linewidth=1,
mirror=True
)
fig.update_xaxes(
tickformat='%Y-%m-%d',
hoverformat='%Y-%m-%d',
showline=True,
linecolor='black',
linewidth=1,
mirror=True
)
trades = korea_apartment_price.db.query_trades(apt_ids=apts, size_from=chosen_size-0.9, size_to=chosen_size+0.9, date_from=date_from, include_canceled=True)
trades_x = [date_serial2date(t['date_serial']) for t in trades if not t['is_canceled']]
trades_y = [t['price'] / 10000 for t in trades if not t['is_canceled']]
labels = [f'{t["floor"]}층' for t in trades if not t['is_canceled']]
canceled_trades_x = [date_serial2date(t['date_serial']) for t in trades if t['is_canceled']]
canceled_trades_y = [t['price'] / 10000 for t in trades if t['is_canceled']]
canceled_labels = [f'{t["floor"]}층(취소)' for t in trades if t['is_canceled']]
el = go.Scattergl(x=trades_x, y=trades_y, showlegend = False, marker={'color': 'blue', 'size': 10}, mode='markers', hovertext=labels, name='실거래')
el_canceled = go.Scattergl(x=canceled_trades_x, y=canceled_trades_y, showlegend = False, marker={'color': 'orange', 'size': 10, 'symbol': 'x'}, mode='markers', hovertext=canceled_labels, name='취소')
fig.add_trace(el)
fig.add_trace(el_canceled)
for apt in apts:
try:
kb_orderbook = sorted(korea_apartment_price.db.query_kb_orderbook(apt, size_from=chosen_size-1, size_to=chosen_size+1, fetched_from=date_from), key=lambda x: x['fetched_at'])
break
except EntryNotFound:
print(apt)
pass
fetched_date_cnt = {}
fetched_price_date_cnt = {}
fetched_price_date_lbls = {}
for od in kb_orderbook:
date_end = od['fetched_at']
if od['detail']['최소매매가'] is not None:
price = int(od['detail']['최소매매가']) / 10000
else:
price = od['price'] / 10000
fetched_date_cnt[date_end] = fetched_date_cnt.get(date_end, 0) + 1
fetched_price_date_cnt[(date_end, price)] = fetched_price_date_cnt.get((date_end, price), 0) + 1
if not (date_end, price) in fetched_price_date_lbls:
fetched_price_date_lbls[(date_end, price)] = set()
curlbl = ''
if od['apt_dong'] is not None and len(od['apt_dong']) > 0:
curlbl += f'{od["apt_dong"]}동'
if od['apt_ho'] is not None and len(od['apt_ho']) > 0:
curlbl += f'{od["apt_ho"]}호'
elif od['floor'] is not None and len(od['floor']) > 0:
curlbl += f'{od["floor"]}'
if curlbl == '': curlbl='정보없음'
curlbl = curlbl.replace('제', '').replace('T', '')
fetched_price_date_lbls[(date_end, price)].add(curlbl)
fetched_dates = sorted(fetched_date_cnt.keys())
max_cnt = max([1] + list(fetched_price_date_cnt.values()))
for (date_end, price), cnt in sorted(fetched_price_date_cnt.items()):
date_start = None
for trial_date_start in fetched_dates:
if trial_date_start < date_end: date_start = trial_date_start
if date_start is None:
date_start = date_end - datetime.timedelta(2)
opacity = min(1.0, 0.1 + 0.9 * cnt / max_cnt)
fig.add_trace(go.Scattergl(x=[date_start, date_end], y=[price, price], line={'width':2, 'color':'red'}, marker=None, opacity=opacity, showlegend = False, name='', hoverinfo='skip', mode='lines'))
details = sorted(list(fetched_price_date_lbls[(date_end, price)]))
details = '<br>' + '<br>'.join(sorted(details))
marker = go.Scattergl(x=[date_end], y=[price], text=[f'{cnt}개 {details}'], line=None, marker={'color':'red', 'size': 3}, opacity=opacity, showlegend = False, name='', mode='markers')
fig.add_trace(marker)
return title, fig
parser = argparse.ArgumentParser()
parser.add_argument('aptlst', help='a csv file that contains gu and the apartment name')
parser.add_argument('output', help='output html report path')
args = parser.parse_args()
apts = []
print('[+] reading apartment list')
with open(args.aptlst, 'r') as f:
for line in tqdm(f.readlines()):
line = line.strip()
line = line.split(',', 2)
if len(line) not in [2, 3]:
print (f'Warning: ignoring line "{line}"')
continue
if len(line) == 2:
addr, name = [s.strip() for s in line]
size = 18
else:
addr, name, size = [s.strip() for s in line]
size = int(size)
selected=korea_apartment_price.shortcuts.search(addr, name)
best_editdist = None
best_apt = None
for apt in selected:
apt['size'] = size
cur_editdist = editdist(name, apt['name'])
if best_apt is None or best_editdist > cur_editdist:
best_apt = apt
best_editdist = cur_editdist
if best_apt is not None:
apts.append(best_apt)
else:
print(f'[!] couldn\'t find apt entries for query=({addr}, {name})')
uniq_apts = {}
for apt in apts:
uniq_apts[(apt['address'], apt['name'], apt['size'])] = apt
apts = [uniq_apts[k] for k in sorted(uniq_apts.keys())]
######## XXX
#apts = apts[-3:]
uniq_apts = {}
for apt in apts:
aptname = re.sub(r'[0-9]+[ ]*단지[ ]*$', '', apt["name"])
key = apt['address'], aptname, apt['size']
if not key in uniq_apts: uniq_apts[key] = []
uniq_apts[key].append(apt)
apt_keys = sorted(uniq_apts.keys())
print('[+] generating report')
for apt_addr, apt_name, apt_size in apt_keys:
print(f'{apt_addr} {apt_name} [전용 {apt_size}평]')
data = []
data_by_addr = {}
addrlst = []
for aptidx, apt_key in enumerate(tqdm(apt_keys)):
apts = uniq_apts[apt_key]
(addr, aptname), fig = render_graph(apts)
cur_chart = json.loads(plotly.io.to_json(fig))
if 'data' in cur_chart:
for e in cur_chart['data']:
e['type'] = 'scattergl'
data.append({
'addr': addr,
'aptname': aptname,
'fig': cur_chart,
})
if not addr in data_by_addr: data_by_addr[addr] = []
data_by_addr[addr].append(aptidx)
addrlst = sorted(list(data_by_addr.keys()))
datestr = datetime.datetime.now().strftime('%Y-%m-%d')
html = f"""<!DOCTYPE html>
<html lang="kr">
<head>
<meta charset="utf-8" />
<meta http-equiv="x-ua-compatible" content="ie=edge" />
<meta name="viewport" content="width=device-width, initial-scale=1" />
<title>{datestr} 아파트 보고서</title>
<script src="https://code.jquery.com/jquery-3.6.0.js"></script>
<script src="https://code.jquery.com/ui/1.13.0/jquery-ui.js"></script>
<script type="text/javascript" src="https://cdn.plot.ly/plotly-latest.min.js"></script>
<script type="text/javascript" id="MathJax-script" async src="https://cdn.jsdelivr.net/npm/mathjax@3/es5/tex-mml-chtml.js"></script>
<link href="https://cdn.jsdelivr.net/npm/[email protected]/dist/css/select2.min.css" rel="stylesheet" />
<script src="https://cdn.jsdelivr.net/npm/[email protected]/dist/js/select2.min.js"></script>
<script src="https://cdn.tailwindcss.com"></script>
<link rel="stylesheet" href="//code.jquery.com/ui/1.13.0/themes/base/jquery-ui.css">
</head>
"""
html += f"""<script>let chartData={json.dumps(data, ensure_ascii=False, separators=(',', ':'))};</script>"""
html += """<script>
function updateChart(idx) {
let chartdiv = document.getElementById('chart');
console.log(idx);
Plotly.react(chart, chartData[idx]['fig']['data'], chartData[idx]['fig']['layout'], {displayModeBar: false});
}
$(document).ready(()=>{
$('#aptselect').select2();
$('#aptselect').on('select2:select', function (e) {
let data = e.params.data;
updateChart(parseInt(data.id));
});
let chartdiv = document.getElementById('chart');
Plotly.newPlot(chart, chartData[0]['fig']['data'], chartData[0]['fig']['layout'], {displayModeBar: false});
});
</script>
"""
options = ""
for cur_addr in addrlst:
options += f'<optgroup label="{cur_addr}">'
for cur_data_idx in data_by_addr[cur_addr]:
cur_data = data[cur_data_idx]
options += f'<option value="{cur_data_idx}" {"selected" if cur_data_idx == 0 else ""}>{cur_data["aptname"]}</option>'
options += '</optgroup>'
html += f"""
<body>
<div class="h-screen m-0 p-0 flex flex-col">
<div class="grow-0">
<h3 class="text-center font-bold text-lg">{datestr} 아파트 보고서</h3>
<div class="m-3">
<select class="w-full p-3" id="aptselect" name="aptselect">
{options}
</select>
</div>
</div>
<div class="grow p-1"><div id="chart"></div></div>
</body>
</html>"""
with open(args.output, 'w') as f:
f.write(html)
print('[+] done')
| 34.767025 | 199 | 0.66299 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,403 | 0.347316 |
75a788822e459f07f9a702bbea56e3b4f3a259e6
| 16,405 |
py
|
Python
|
select-utxos.py
|
The-New-Fork/python-blocknotify
|
56733218c0231044e859a967f767a6674b4ac85e
|
[
"Apache-2.0"
] | 1 |
2021-10-01T15:54:51.000Z
|
2021-10-01T15:54:51.000Z
|
select-utxos.py
|
The-New-Fork/python-blocknotify
|
56733218c0231044e859a967f767a6674b4ac85e
|
[
"Apache-2.0"
] | null | null | null |
select-utxos.py
|
The-New-Fork/python-blocknotify
|
56733218c0231044e859a967f767a6674b4ac85e
|
[
"Apache-2.0"
] | 1 |
2021-07-22T08:11:50.000Z
|
2021-07-22T08:11:50.000Z
|
from lib import rpclib
from slickrpc import Proxy
from lib import transaction, bitcoin, util
from lib.util import bfh, bh2u
from lib.transaction import Transaction
import requests
import pytest
import subprocess
import json
import sys
import os
from dotenv import load_dotenv
load_dotenv(verbose=True)
IMPORT_API_HOST = str(os.getenv("IMPORT_API_HOST"))
IMPORT_API_PORT = str(os.getenv("IMPORT_API_PORT"))
IMPORT_API_BASE_URL = IMPORT_API_HOST
rpc_user = os.getenv("IJUICE_KOMODO_NODE_USERNAME")
rpc_password = os.getenv("IJUICE_KOMODO_NODE_PASSWORD")
port = os.getenv("IJUICE_KOMODO_NODE_RPC_PORT")
address = ""
amount = 0
greedy = True
if len(sys.argv) >= 3:
address = sys.argv[1]
amount = float(sys.argv[2])
greedy = bool(sys.argv[3])
#this_node_pubkey = os.getenv("THIS_NODE_PUBKEY")
#this_node_wif = os.getenv("THIS_NODE_WIF")
def get_utxos_api(address):
komodo_node_ip = os.getenv("IJUICE_KOMODO_NODE_IPV4_ADDR")
rpc_connect = rpc_connection = Proxy("http://" + rpc_user + ":" + rpc_password + "@" + komodo_node_ip + ":" + port)
url = "https://blockchain-explorer.thenewfork.staging.do.unchain.io/insight-api-komodo/addrs/"+ address +"/utxo"
try:
res = requests.get(url)
except Exception as e:
print(e)
return res.text
array_of_utxos = []
array_of_utxos_final = []
amount_final = -10000000000
def get_utxos(utxos, amount, greedy):
global array_of_utxos
global array_of_utxos_final
global amount_final
if len(array_of_utxos) >= len(array_of_utxos_final) and len(array_of_utxos_final) > 0:
return False
if amount <= 0 and amount > amount_final:
return True
flag = False
cheap_copy = array_of_utxos
for utxo in utxos:
for uxto_in_array in array_of_utxos:
if uxto_in_array['txid'] == utxo['txid']:
flag = True
if flag == False:
array_of_utxos = array_of_utxos + [utxo]
if get_utxos(utxos, amount - utxo['amount'], greedy) == True:
array_of_utxos_final = array_of_utxos
amount_final = amount
if greedy == True:
return True
flag = False
array_of_utxos = cheap_copy
return False
string = get_utxos_api(address)
to_python = ""
try:
to_python = json.loads(string)
except Exception as e:
print(e)
exit()
final = []
for utxo in to_python:
if utxo['confirmations'] > 10:
final = final + [utxo]
get_utxos(final, amount, greedy)
print(array_of_utxos_final)
#TESTING
def is_json(myjson):
try:
json_object = json.loads(myjson)
except ValueError as e:
return False
return True
def test_api():
test = get_utxos_api("RLw3bxciVDqY31qSZh8L4EuM2uo3GJEVEW")
assert is_json(test) == True
def test_get_utxos():
testcase = [{"address":"RLw3bxciVDqY31qSZh8L4EuM2uo3GJEVEW","txid":"6d2dbbf64d839bedece788632d6233337494d1d51247823058832a16c1cf1d92","vout":0,"scriptPubKey":"76a9147fd21d91b20b713c5a73fe77db4c262117b77d2888ac","amount":0.01833945,"satoshis":1833945,"confirmations":0,"ts":1602181139},{"address":"RLw3bxciVDqY31qSZh8L4EuM2uo3GJEVEW","txid":"ba474f6ddff5883a13bd456570769cd8de54b448cd5baa872fd99d253dc3df79","vout":0,"scriptPubKey":"2102f2cdd772ab57eae35996c0d39ad34fe06304c4d3981ffe71a596634fa26f8744ac","amount":0.04444815,"satoshis":4444815,"height":104219,"confirmations":1},{"address":"RLw3bxciVDqY31qSZh8L4EuM2uo3GJEVEW","txid":"95f7f0a9ccd4256be902d773f884c6b13bff465feaa87b56a61a8773a3cd990b","vout":0,"scriptPubKey":"2102f2cdd772ab57eae35996c0d39ad34fe06304c4d3981ffe71a596634fa26f8744ac","amount":0.01014884,"satoshis":1014884,"height":104104,"confirmations":116},{"address":"RLw3bxciVDqY31qSZh8L4EuM2uo3GJEVEW","txid":"1e562a43ce53a17c1b0cd2f3a7561d943a849d870e0efd4c9f37c8ce750c015b","vout":0,"scriptPubKey":"76a9147fd21d91b20b713c5a73fe77db4c262117b77d2888ac","amount":0.05,"satoshis":5000000,"height":103904,"confirmations":316},{"address":"RLw3bxciVDqY31qSZh8L4EuM2uo3GJEVEW","txid":"cad9777cfd1ea164236800506b24ff633702914a87000be019d82523911fdce2","vout":0,"scriptPubKey":"76a9147fd21d91b20b713c5a73fe77db4c262117b77d2888ac","amount":0.05,"satoshis":5000000,"height":103902,"confirmations":318},{"address":"RLw3bxciVDqY31qSZh8L4EuM2uo3GJEVEW","txid":"41451a102cfd2780377c33a67d1ed96b3f70fbb616664a7f431115f83f1beb22","vout":0,"scriptPubKey":"76a9147fd21d91b20b713c5a73fe77db4c262117b77d2888ac","amount":0.05,"satoshis":5000000,"height":103901,"confirmations":319},{"address":"RLw3bxciVDqY31qSZh8L4EuM2uo3GJEVEW","txid":"7e7390d8176edb9fef91cbd1843c656da7543169baf361971d6bc7eefa498066","vout":0,"scriptPubKey":"76a9147fd21d91b20b713c5a73fe77db4c262117b77d2888ac","amount":0.05,"satoshis":5000000,"height":99497,"confirmations":4723},{"address":"RLw3bxciVDqY31qSZh8L4EuM2uo3GJEVEW","txid":"19c73bac8031b52b2c3f9f93c3e40f03dc4747a093703907c0e0a8ef09192fac","vout":0,"scriptPubKey":"76a9147fd21d91b20b713c5a73fe77db4c262117b77d2888ac","amount":0.05,"satoshis":5000000,"height":99496,"confirmations":4724},{"address":"RLw3bxciVDqY31qSZh8L4EuM2uo3GJEVEW","txid":"db60669396d0bb0aa7b81b9325edfe708c879ff0253c9919af1b892efdefac10","vout":0,"scriptPubKey":"76a9147fd21d91b20b713c5a73fe77db4c262117b77d2888ac","amount":0.05,"satoshis":5000000,"height":99492,"confirmations":4728},{"address":"RLw3bxciVDqY31qSZh8L4EuM2uo3GJEVEW","txid":"2a39930043b87bc3976c6fc39445708103c6c00f88cb8acd18ad24bbaa83a72e","vout":0,"scriptPubKey":"76a9147fd21d91b20b713c5a73fe77db4c262117b77d2888ac","amount":0.05,"satoshis":5000000,"height":99486,"confirmations":4734},{"address":"RLw3bxciVDqY31qSZh8L4EuM2uo3GJEVEW","txid":"62b29ebbed4e423a72247c116dafe39643c0f6318c4cc435973f1650407a4c06","vout":0,"scriptPubKey":"76a9147fd21d91b20b713c5a73fe77db4c262117b77d2888ac","amount":0.05,"satoshis":5000000,"height":99481,"confirmations":4739},{"address":"RLw3bxciVDqY31qSZh8L4EuM2uo3GJEVEW","txid":"32a9965986c5922bf9b0de8fbfbac6a9eea70ba8f9a094b084123e97c918631e","vout":0,"scriptPubKey":"76a9147fd21d91b20b713c5a73fe77db4c262117b77d2888ac","amount":0.05,"satoshis":5000000,"height":99477,"confirmations":4743},{"address":"RLw3bxciVDqY31qSZh8L4EuM2uo3GJEVEW","txid":"8e8ae844ac5a192602031ef0ae1b69aa60900ad73feb3604a0cd2042978c3f80","vout":0,"scriptPubKey":"76a9147fd21d91b20b713c5a73fe77db4c262117b77d2888ac","amount":0.05,"satoshis":5000000,"height":99476,"confirmations":4744},{"address":"RLw3bxciVDqY31qSZh8L4EuM2uo3GJEVEW","txid":"9ccc0668d3bd89be852ad45cf51c415c212cf861ca0e7b6622b6d71d139ebfd0","vout":0,"scriptPubKey":"76a9147fd21d91b20b713c5a73fe77db4c262117b77d2888ac","amount":0.05,"satoshis":5000000,"height":99476,"confirmations":4744},{"address":"RLw3bxciVDqY31qSZh8L4EuM2uo3GJEVEW","txid":"ee2495ab86e04fb7c9a0d051df12621516d86845e72b05bc901e222366b4c8fb","vout":0,"scriptPubKey":"76a9147fd21d91b20b713c5a73fe77db4c262117b77d2888ac","amount":0.05,"satoshis":5000000,"height":99476,"confirmations":4744},{"address":"RLw3bxciVDqY31qSZh8L4EuM2uo3GJEVEW","txid":"4ce3087fc3e3b3f8d586b2d77b4584d819130d141461a3a23c83d22d35128ecf","vout":0,"scriptPubKey":"76a9147fd21d91b20b713c5a73fe77db4c262117b77d2888ac","amount":0.05,"satoshis":5000000,"height":99475,"confirmations":4745},{"address":"RLw3bxciVDqY31qSZh8L4EuM2uo3GJEVEW","txid":"6edbe4a746e1f84851eda54fc05e7f967367318866a65d73060847ac60497bc9","vout":0,"scriptPubKey":"76a9147fd21d91b20b713c5a73fe77db4c262117b77d2888ac","amount":0.05,"satoshis":5000000,"height":99474,"confirmations":4746},{"address":"RLw3bxciVDqY31qSZh8L4EuM2uo3GJEVEW","txid":"c16f7f55dee528b925489e9ec4979a4a6215c9cf11b7a1db02ff822189956f0a","vout":0,"scriptPubKey":"76a9147fd21d91b20b713c5a73fe77db4c262117b77d2888ac","amount":0.05,"satoshis":5000000,"height":99473,"confirmations":4747},{"address":"RLw3bxciVDqY31qSZh8L4EuM2uo3GJEVEW","txid":"11bb33a95f3f1c713e801754031ff4b0fa7fe17242b2c74d223dee08c2568ae4","vout":0,"scriptPubKey":"76a9147fd21d91b20b713c5a73fe77db4c262117b77d2888ac","amount":0.05,"satoshis":5000000,"height":99473,"confirmations":4747},{"address":"RLw3bxciVDqY31qSZh8L4EuM2uo3GJEVEW","txid":"2dc4e28f322a641169afbce755db55d8cc4547771a29a4e75f0af850016f67aa","vout":0,"scriptPubKey":"76a9147fd21d91b20b713c5a73fe77db4c262117b77d2888ac","amount":0.05,"satoshis":5000000,"height":99342,"confirmations":4878},{"address":"RLw3bxciVDqY31qSZh8L4EuM2uo3GJEVEW","txid":"98604c684398bc399a45168d30f7ff4515da1145d53f7584d4388b3d69053b7f","vout":0,"scriptPubKey":"76a9147fd21d91b20b713c5a73fe77db4c262117b77d2888ac","amount":0.05,"satoshis":5000000,"height":99341,"confirmations":4879},{"address":"RLw3bxciVDqY31qSZh8L4EuM2uo3GJEVEW","txid":"ba506982f94df57e2e80418e8a7393568b2892f1c01184d1ea46419c21413ee4","vout":0,"scriptPubKey":"76a9147fd21d91b20b713c5a73fe77db4c262117b77d2888ac","amount":0.05,"satoshis":5000000,"height":99339,"confirmations":4881},{"address":"RLw3bxciVDqY31qSZh8L4EuM2uo3GJEVEW","txid":"7956be14d1e0681bec8cc8528d7fede271254cbc6ca7d34ae413748ce972182b","vout":0,"scriptPubKey":"76a9147fd21d91b20b713c5a73fe77db4c262117b77d2888ac","amount":0.05,"satoshis":5000000,"height":99339,"confirmations":4881},{"address":"RLw3bxciVDqY31qSZh8L4EuM2uo3GJEVEW","txid":"49444806b8f9d32efd9536578dfd106e56fa5594bda37f772b7c4b5e582f971f","vout":0,"scriptPubKey":"76a9147fd21d91b20b713c5a73fe77db4c262117b77d2888ac","amount":0.05,"satoshis":5000000,"height":99324,"confirmations":4896},{"address":"RLw3bxciVDqY31qSZh8L4EuM2uo3GJEVEW","txid":"0fbb4254adce7fc38a3391cd695061d05e43bdf2c27bdad0a4ba0ee076a966e1","vout":0,"scriptPubKey":"76a9147fd21d91b20b713c5a73fe77db4c262117b77d2888ac","amount":0.05,"satoshis":5000000,"height":99320,"confirmations":4900},{"address":"RLw3bxciVDqY31qSZh8L4EuM2uo3GJEVEW","txid":"6f0f621ae5b071a1a3ab653ee296c426dfaf099586095606a6dcc11c89893c3a","vout":0,"scriptPubKey":"76a9147fd21d91b20b713c5a73fe77db4c262117b77d2888ac","amount":0.05,"satoshis":5000000,"height":99316,"confirmations":4904},{"address":"RLw3bxciVDqY31qSZh8L4EuM2uo3GJEVEW","txid":"9b25d4de15729fd11cc8d9b40da4eaa3093186a7c7caf4b991bb7101fb9dd56f","vout":0,"scriptPubKey":"76a9147fd21d91b20b713c5a73fe77db4c262117b77d2888ac","amount":0.05,"satoshis":5000000,"height":99312,"confirmations":4908},{"address":"RLw3bxciVDqY31qSZh8L4EuM2uo3GJEVEW","txid":"c75171dd9737181cde71adf9196f8fddb3710abcd038242a6f99984aba9d1d77","vout":0,"scriptPubKey":"76a9147fd21d91b20b713c5a73fe77db4c262117b77d2888ac","amount":0.05,"satoshis":5000000,"height":99307,"confirmations":4913},{"address":"RLw3bxciVDqY31qSZh8L4EuM2uo3GJEVEW","txid":"94e047f42834c829fda5f0dc2cdda88d37c67968c180f8e0bb8a61ef812f2934","vout":0,"scriptPubKey":"76a9147fd21d91b20b713c5a73fe77db4c262117b77d2888ac","amount":0.05,"satoshis":5000000,"height":99304,"confirmations":4916},{"address":"RLw3bxciVDqY31qSZh8L4EuM2uo3GJEVEW","txid":"c56cbc57e260b418519cf43c209b90079a47c0fd50aca8671e35597cc5f6c9d7","vout":0,"scriptPubKey":"76a9147fd21d91b20b713c5a73fe77db4c262117b77d2888ac","amount":0.05,"satoshis":5000000,"height":99301,"confirmations":4919},{"address":"RLw3bxciVDqY31qSZh8L4EuM2uo3GJEVEW","txid":"1b368d690f8f3db7239248d5140b710ea75f6a0b788c61bb434759087df9e884","vout":0,"scriptPubKey":"76a9147fd21d91b20b713c5a73fe77db4c262117b77d2888ac","amount":0.05,"satoshis":5000000,"height":99299,"confirmations":4921},{"address":"RLw3bxciVDqY31qSZh8L4EuM2uo3GJEVEW","txid":"923ffde12287052acbeda7cd825fcb390db099dcd4a6ef42a503ccbed32aca5d","vout":0,"scriptPubKey":"76a9147fd21d91b20b713c5a73fe77db4c262117b77d2888ac","amount":0.05,"satoshis":5000000,"height":99297,"confirmations":4923},{"address":"RLw3bxciVDqY31qSZh8L4EuM2uo3GJEVEW","txid":"8a0f23e3f8230458e299f96996fbce97859b07d6b85bfd83d2610aa8ca159c7a","vout":0,"scriptPubKey":"76a9147fd21d91b20b713c5a73fe77db4c262117b77d2888ac","amount":0.05,"satoshis":5000000,"height":94589,"confirmations":9631},{"address":"RLw3bxciVDqY31qSZh8L4EuM2uo3GJEVEW","txid":"5c73c06f2999b00453f5eacbcb60845ba2554a0a540860a051d55ee18a490935","vout":0,"scriptPubKey":"76a9147fd21d91b20b713c5a73fe77db4c262117b77d2888ac","amount":0.05,"satoshis":5000000,"height":94442,"confirmations":9778},{"address":"RLw3bxciVDqY31qSZh8L4EuM2uo3GJEVEW","txid":"fed38c710ceaf82d0ef54316df7447171d4b1ec6d499a4b231846b8c9dd33a31","vout":0,"scriptPubKey":"2102f2cdd772ab57eae35996c0d39ad34fe06304c4d3981ffe71a596634fa26f8744ac","amount":0.28400257,"satoshis":28400257,"height":94145,"confirmations":10075},{"address":"RLw3bxciVDqY31qSZh8L4EuM2uo3GJEVEW","txid":"bdde13adb442e0a0b2c5b7220957a2e4d3b9fbbbc47ad3523d35cd996495b608","vout":0,"scriptPubKey":"2102f2cdd772ab57eae35996c0d39ad34fe06304c4d3981ffe71a596634fa26f8744ac","amount":0.0743478,"satoshis":7434780,"height":94144,"confirmations":10076},{"address":"RLw3bxciVDqY31qSZh8L4EuM2uo3GJEVEW","txid":"c064d930a22eaa5a73d0b04201abb304d6d2dffab0f11a3f7652a16724c3d484","vout":2,"scriptPubKey":"2102f2cdd772ab57eae35996c0d39ad34fe06304c4d3981ffe71a596634fa26f8744ac","amount":0.3054992,"satoshis":30549920,"height":94137,"confirmations":10083},{"address":"RLw3bxciVDqY31qSZh8L4EuM2uo3GJEVEW","txid":"7dcb33a923f7c25fc8738eb5fc7a230455b55b7285281fc6b41dfa42db900e88","vout":0,"scriptPubKey":"2102f2cdd772ab57eae35996c0d39ad34fe06304c4d3981ffe71a596634fa26f8744ac","amount":0.95629085,"satoshis":95629085,"height":94137,"confirmations":10083},{"address":"RLw3bxciVDqY31qSZh8L4EuM2uo3GJEVEW","txid":"a25b4fb86c86c22fa127838496ae35e75c92ae30d1d80e85ed7fd6135371ddb5","vout":0,"scriptPubKey":"2102f2cdd772ab57eae35996c0d39ad34fe06304c4d3981ffe71a596634fa26f8744ac","amount":0.8299,"satoshis":82990000,"height":94137,"confirmations":10083},{"address":"RLw3bxciVDqY31qSZh8L4EuM2uo3GJEVEW","txid":"f76a8d2ebdab28f39ac76365c36aaaaa4c7cce36ac12f38f32b27548f9ddc6e4","vout":0,"scriptPubKey":"2102f2cdd772ab57eae35996c0d39ad34fe06304c4d3981ffe71a596634fa26f8744ac","amount":0.69274518,"satoshis":69274518,"height":94137,"confirmations":10083},{"address":"RLw3bxciVDqY31qSZh8L4EuM2uo3GJEVEW","txid":"fa58a094f7de0c816f1a40ab3322afded4ccdf89cbe3b6b2702ac1011062a0d2","vout":0,"scriptPubKey":"76a9147fd21d91b20b713c5a73fe77db4c262117b77d2888ac","amount":3.979,"satoshis":397900000,"height":91157,"confirmations":13063},{"address":"RLw3bxciVDqY31qSZh8L4EuM2uo3GJEVEW","txid":"b27d126a997f960cdc9e4b82aac74c2c26437005e7025c1bdd188d2ea9b561d1","vout":1,"scriptPubKey":"76a9147fd21d91b20b713c5a73fe77db4c262117b77d2888ac","amount":1.11,"satoshis":111000000,"height":90011,"confirmations":14209},{"address":"RLw3bxciVDqY31qSZh8L4EuM2uo3GJEVEW","txid":"8005ab6aaa009c48a1c43d01b21b09f8a2e6c853a3a197d46f0c0fa1344e14e1","vout":1,"scriptPubKey":"76a9147fd21d91b20b713c5a73fe77db4c262117b77d2888ac","amount":1.11,"satoshis":111000000,"height":90011,"confirmations":14209},{"address":"RLw3bxciVDqY31qSZh8L4EuM2uo3GJEVEW","txid":"15a307cd75a630718b63a28a7465e01309dc1d5c0542791fc384b35e86f30b2c","vout":1,"scriptPubKey":"76a9147fd21d91b20b713c5a73fe77db4c262117b77d2888ac","amount":1.11,"satoshis":111000000,"height":90010,"confirmations":14210},{"address":"RLw3bxciVDqY31qSZh8L4EuM2uo3GJEVEW","txid":"5a090d5dd686bed104ae13472262e7cd9d96608f74631351f1252e0d40be70d4","vout":1,"scriptPubKey":"76a9147fd21d91b20b713c5a73fe77db4c262117b77d2888ac","amount":1.11,"satoshis":111000000,"height":90010,"confirmations":14210},{"address":"RLw3bxciVDqY31qSZh8L4EuM2uo3GJEVEW","txid":"3cdf23999fa1354eded15493bda356d5829cc60a1c0d708a07f2cd8406f47328","vout":1,"scriptPubKey":"76a9147fd21d91b20b713c5a73fe77db4c262117b77d2888ac","amount":1.11,"satoshis":111000000,"height":90010,"confirmations":14210},{"address":"RLw3bxciVDqY31qSZh8L4EuM2uo3GJEVEW","txid":"be25a04b0dc9196cf9b65dff78ec8c57e58114aae398699046680e25d03fa015","vout":0,"scriptPubKey":"2102f2cdd772ab57eae35996c0d39ad34fe06304c4d3981ffe71a596634fa26f8744ac","amount":1.10944966,"satoshis":110944966,"height":89762,"confirmations":14458},{"address":"RLw3bxciVDqY31qSZh8L4EuM2uo3GJEVEW","txid":"6e975f08b1ee2a3aa02c2b96ebef588b405576acf24f4c81aff1a929085f168b","vout":0,"scriptPubKey":"2102f2cdd772ab57eae35996c0d39ad34fe06304c4d3981ffe71a596634fa26f8744ac","amount":0.97999673,"satoshis":97999673,"height":89762,"confirmations":14458}]
get_utxos(testcase, 0.01, True)
assert array_of_utxos_final == [{'address': 'RLw3bxciVDqY31qSZh8L4EuM2uo3GJEVEW', 'txid': '6d2dbbf64d839bedece788632d6233337494d1d51247823058832a16c1cf1d92', 'vout': 0, 'scriptPubKey': '76a9147fd21d91b20b713c5a73fe77db4c262117b77d2888ac', 'amount': 0.01833945, 'satoshis': 1833945, 'confirmations': 0, 'ts': 1602181139}]
| 132.298387 | 13,219 | 0.820055 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 11,901 | 0.72545 |
75a9038ac82fdcf9e0ab0266716bd2dfcbf5e311
| 52 |
py
|
Python
|
pynito/__init__.py
|
qdonohue/pynito
|
bcd7eeb3cdcdb63af2b43dca53b2e7899772e518
|
[
"MIT"
] | null | null | null |
pynito/__init__.py
|
qdonohue/pynito
|
bcd7eeb3cdcdb63af2b43dca53b2e7899772e518
|
[
"MIT"
] | null | null | null |
pynito/__init__.py
|
qdonohue/pynito
|
bcd7eeb3cdcdb63af2b43dca53b2e7899772e518
|
[
"MIT"
] | 1 |
2020-11-21T12:32:27.000Z
|
2020-11-21T12:32:27.000Z
|
from pynito.cognitodecryptor import CognitoDecryptor
| 52 | 52 | 0.923077 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
75ac67c019d243b02047c3a4e50c8d709addc5ed
| 5,241 |
py
|
Python
|
examples/qt/barcode-reader.py
|
claire-chan/python
|
9a22ab20a8d0171f491730199edfd7ce7e4d806c
|
[
"MIT"
] | 12 |
2020-01-08T13:43:19.000Z
|
2022-03-09T08:35:45.000Z
|
examples/qt/barcode-reader.py
|
claire-chan/python
|
9a22ab20a8d0171f491730199edfd7ce7e4d806c
|
[
"MIT"
] | 2 |
2020-09-10T07:06:50.000Z
|
2022-01-04T17:29:54.000Z
|
examples/qt/barcode-reader.py
|
claire-chan/python
|
9a22ab20a8d0171f491730199edfd7ce7e4d806c
|
[
"MIT"
] | 11 |
2020-03-16T18:22:13.000Z
|
2022-01-07T08:23:08.000Z
|
import sys
from PySide2.QtGui import QPixmap, QImage
from PySide2.QtWidgets import QApplication, QLabel, QPushButton, QVBoxLayout, QWidget, QFileDialog, QTextEdit, QSizePolicy, QMessageBox, QHBoxLayout
from PySide2.QtCore import Slot, Qt, QStringListModel, QSize, QTimer
from dbr import DynamsoftBarcodeReader
dbr = DynamsoftBarcodeReader()
import os
import cv2
class UI_Window(QWidget):
def __init__(self):
QWidget.__init__(self)
# The default barcode image.
dir_path = os.path.dirname(os.path.realpath(__file__))
filename = os.path.join(dir_path, 'image.tif')
# Create a timer.
self.timer = QTimer()
self.timer.timeout.connect(self.nextFrameSlot)
# Create a layout.
layout = QVBoxLayout()
# Add a button
self.btn = QPushButton("Load an image")
self.btn.clicked.connect(self.pickFile)
layout.addWidget(self.btn)
# Add a button
button_layout = QHBoxLayout()
btnCamera = QPushButton("Open camera")
btnCamera.clicked.connect(self.openCamera)
button_layout.addWidget(btnCamera)
btnCamera = QPushButton("Stop camera")
btnCamera.clicked.connect(self.stopCamera)
button_layout.addWidget(btnCamera)
layout.addLayout(button_layout)
# Add a label
self.label = QLabel()
self.label.setFixedSize(640, 640)
pixmap = self.resizeImage(filename)
self.label.setPixmap(pixmap)
layout.addWidget(self.label)
# Add a text area
self.results = QTextEdit()
self.readBarcode(filename)
layout.addWidget(self.results)
# Set the layout
self.setLayout(layout)
self.setWindowTitle("Dynamsoft Barcode Reader")
self.setFixedSize(800, 800)
# https://stackoverflow.com/questions/1414781/prompt-on-exit-in-pyqt-application
def closeEvent(self, event):
msg = "Close the app?"
reply = QMessageBox.question(self, 'Message',
msg, QMessageBox.Yes, QMessageBox.No)
if reply == QMessageBox.Yes:
event.accept()
self.stopCamera()
else:
event.ignore()
def readBarcode(self, filename):
dbr.initLicense("Your License")
results = dbr.decodeFile(filename, 0x3FF | 0x2000000 | 0x4000000 | 0x8000000 | 0x10000000)
out = ''
index = 0
for result in results:
out += "Index: " + str(index) + "\n"
out += "Barcode format: " + result[0] + '\n'
out += "Barcode value: " + result[1] + '\n'
out += '-----------------------------------\n'
index += 1
self.results.setText(out)
def resizeImage(self, filename):
pixmap = QPixmap(filename)
lwidth = self.label.maximumWidth()
pwidth = pixmap.width()
lheight = self.label.maximumHeight()
pheight = pixmap.height()
wratio = pwidth * 1.0 / lwidth
hratio = pheight * 1.0 / lheight
if pwidth > lwidth or pheight > lheight:
if wratio > hratio:
lheight = pheight / wratio
else:
lwidth = pwidth / hratio
scaled_pixmap = pixmap.scaled(lwidth, lheight)
return scaled_pixmap
else:
return pixmap
def pickFile(self):
self.stopCamera()
# Load an image file.
filename = QFileDialog.getOpenFileName(self, 'Open file',
'E:\\Program Files (x86)\\Dynamsoft\\Barcode Reader 7.2\\Images', "Barcode images (*)")
# Show barcode images
pixmap = self.resizeImage(filename[0])
self.label.setPixmap(pixmap)
# Read barcodes
self.readBarcode(filename[0])
def openCamera(self):
self.vc = cv2.VideoCapture(0)
# vc.set(5, 30) #set FPS
self.vc.set(3, 640) #set width
self.vc.set(4, 480) #set height
if not self.vc.isOpened():
msgBox = QMessageBox()
msgBox.setText("Failed to open camera.")
msgBox.exec_()
return
self.timer.start(1000./24)
def stopCamera(self):
self.timer.stop()
# https://stackoverflow.com/questions/41103148/capture-webcam-video-using-pyqt
def nextFrameSlot(self):
rval, frame = self.vc.read()
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
image = QImage(frame, frame.shape[1], frame.shape[0], QImage.Format_RGB888)
pixmap = QPixmap.fromImage(image)
self.label.setPixmap(pixmap)
results = dbr.decodeBuffer(frame, 0x3FF | 0x2000000 | 0x4000000 | 0x8000000 | 0x10000000)
out = ''
index = 0
for result in results:
out += "Index: " + str(index) + "\n"
out += "Barcode format: " + result[0] + '\n'
out += "Barcode value: " + result[1] + '\n'
out += '-----------------------------------\n'
index += 1
self.results.setText(out)
def main():
app = QApplication(sys.argv)
ex = UI_Window()
ex.show()
sys.exit(app.exec_())
if __name__ == '__main__':
main()
| 31.011834 | 148 | 0.577752 | 4,717 | 0.900019 | 0 | 0 | 0 | 0 | 0 | 0 | 838 | 0.159893 |
75af09b693b1a39a86476d750fe6c76d93b99535
| 6,820 |
py
|
Python
|
mdetsims/dbsim/erins_code/util.py
|
kaiwen-kakuiii/metadetect-sims
|
a0fd133ca5bc946c6ce769e8657ef2ce10226953
|
[
"BSD-3-Clause"
] | 2 |
2021-07-12T09:41:51.000Z
|
2022-01-27T08:13:33.000Z
|
mdetsims/dbsim/erins_code/util.py
|
kaiwen-kakuiii/metadetect-sims
|
a0fd133ca5bc946c6ce769e8657ef2ce10226953
|
[
"BSD-3-Clause"
] | 6 |
2019-04-04T23:53:27.000Z
|
2021-07-30T11:35:20.000Z
|
mdetsims/dbsim/erins_code/util.py
|
kaiwen-kakuiii/metadetect-sims
|
a0fd133ca5bc946c6ce769e8657ef2ce10226953
|
[
"BSD-3-Clause"
] | 2 |
2020-10-30T18:14:29.000Z
|
2021-07-22T16:34:56.000Z
|
import sys
import logging
import numpy as np
logger = logging.getLogger(__name__)
class TryAgainError(Exception):
"""
signal to skip this image(s) and try a new one
"""
def __init__(self, message):
# Call the base class constructor with the parameters it needs
Exception.__init__(self, message)
def setup_logging(level):
if level=='info':
l=logging.INFO
elif level=='debug':
l=logging.DEBUG
elif level=='warning':
l=logging.WARNING
elif level=='error':
l=logging.ERROR
else:
l=logging.CRITICAL
logging.basicConfig(stream=sys.stdout, level=l)
def log_pars(pars, fmt='%8.3g',front=None):
"""
print the parameters with a uniform width
"""
s = []
if front is not None:
s.append(front)
if pars is not None:
fmt = ' '.join( [fmt+' ']*len(pars) )
s.append( fmt % tuple(pars) )
s = ' '.join(s)
logger.debug(s)
class Namer(object):
"""
create strings with a specified front prefix
"""
def __init__(self, front=None, back=None):
if front=='':
front=None
if back=='' or back=='noshear':
back=None
self.front=front
self.back=back
if self.front is None and self.back is None:
self.nomod=True
else:
self.nomod=False
def __call__(self, name):
n = name
if not self.nomod:
if self.front is not None:
n = '%s_%s' % (self.front, n)
if self.back is not None:
n = '%s_%s' % (n, self.back)
return n
def convert_run_to_seed(run):
"""
convert the input config file name to an integer for use
as a seed
"""
import hashlib
h = hashlib.sha256(run.encode('utf-8')).hexdigest()
seed = int(h, base=16) % 2**30
logger.info("got seed %d from run %s" % (seed,run))
return seed
def get_trials_nsplit(c):
"""
split into chunks
"""
from math import ceil
ntrials = c['ntrials']
tmsec = c['desired_hours']*3600.0
sec_per = c['sec_per']
ntrials_per = int(round( tmsec/sec_per ) )
nsplit = int(ceil( ntrials/float(ntrials_per) ))
time_hours = ntrials_per*sec_per/3600.0
logger.info("ntrials requested: %s" % (ntrials))
logger.info('seconds per image: %s sec per with rand: %s' % (c['sec_per'],sec_per))
logger.info('nsplit: %d ntrials per: %d time (hours): %s' % (nsplit,ntrials_per,time_hours))
return ntrials_per, nsplit, time_hours
def get_trials_per_job_mpi(njobs, ntrials):
"""
split for mpi
"""
return int(round(float(ntrials)/njobs))
#
# matching by row,col
#
def match_truth(data, truth, radius_arcsec=0.2, pixel_scale=0.263):
"""
get indices in the data that match truth catalog by x,y position
"""
radius_pixels = radius_arcsec/pixel_scale
print("matching")
allow=1
mdata, mtruth = close_match(
data['x'],
data['y'],
truth['x'],
truth['y'],
radius_pixels,
allow,
)
nmatch=mdata.size
ntot=data.size
frac=float(nmatch)/ntot
print(' matched %d/%d %.3f within '
'%.3f arcsec' % (nmatch, ntot, frac,radius_arcsec))
return mdata
def close_match(t1,s1,t2,s2,ep,allow,verbose=False):
"""
Find the nearest neighbors between two arrays of x/y
parameters
----------
x1, y1: scalar or array
coordinates of a set of points. Must be same length.
x2, y2: scalar or array
coordinates of a second set of points. Must be same length.
ep: scalar
maximum match distance between pairs (pixels)
allow: scalar
maximum number of matches in second array to each element in first array.
verbose: boolean
make loud
Original by Dave Johnston, University of Michigan, 1997
Translated from IDL by Eli Rykoff, SLAC
modified slightly by erin sheldon
"""
t1=np.atleast_1d(t1)
s1=np.atleast_1d(s1)
t2=np.atleast_1d(t2)
s2=np.atleast_1d(s2)
n1=t1.size
n2=t2.size
matcharr=np.zeros([n1,allow],dtype='i8')
matcharr.fill(-1)
ind=np.arange(n2,dtype='i8')
sor=t2.argsort()
t2s=t2[sor]
s2s=s2[sor]
ind=ind[sor]
runi=0
endt=t2s[n2-1]
for i in range(n1):
t=t1[i]
tm=t-ep
tp=t+ep
in1=_binary_search(t2s,tm) # I can improve this?
if in1 == -1:
if (tm < endt) : in1=0
if in1 != -1:
in1=in1+1
in2=in1-1
jj=in2+1
while (jj < n2):
if (t2s[in2+1] < tp):
in2+=1
jj+=1
else :
jj=n2
if (n2 == 1) :
in2=0 # hmmm
if (in1 <= in2):
if (n2 != 1) :
check = s2s[in1:in2+1]
tcheck = t2s[in1:in2+1]
else :
check = s2s[0]
tcheck=t2s[0]
s=s1[i]
t=t1[i]
offby=abs(check-s)
toffby=abs(tcheck-t)
good=np.where(np.logical_and(offby < ep,toffby < ep))[0]+in1
ngood=good.size
if (ngood != 0) :
if (ngood > allow) :
offby=offby[good-in1]
toffby=toffby[good-in1]
dist=np.sqrt(offby**2+toffby**2)
good=good[dist.argsort()]
ngood=allow
good=good[0:ngood]
matcharr[i,0:ngood]=good
runi=runi+ngood
if verbose:
print("total put in bytarr:",runi)
#matches=np.where(matcharr != -1)[0]
matches=np.where(matcharr != -1)
#if (matches.size == 0):
if (matches[0].size == 0):
if verbose:
print("no matches found")
m1=np.array([])
m2=np.array([])
return m1,m2
m1 = matches[0] % n1
m2 = matcharr[matches]
m2 = ind[m2].flatten()
if verbose:
print(m1.size,' matches')
return m1,m2
def _binary_search(arr,x,edgedefault=False,round=False):
n=arr.size
if (x < arr[0]) or (x > arr[n-1]):
if (edgedefault):
if (x < arr[0]): index = 0
elif (x > arr[n-1]): index = n-1
else: index = -1
return index
down=-1
up=n
while (up-down) > 1:
mid=down+(up-down)//2
if x >= arr[mid]:
down=mid
else:
up=mid
index=down
if (round) and (index != n-1):
if (abs(x-arr[index]) >= abs(x-arr[index+1])): index=index+1
return index
| 23.680556 | 96 | 0.523167 | 929 | 0.136217 | 0 | 0 | 0 | 0 | 0 | 0 | 1,620 | 0.237537 |
75b16b8f307524cf047b1b8450582a6ea17185b4
| 1,470 |
py
|
Python
|
utilities/thumbnail-creation/thumbnail from category.py
|
DASdaNen4f/microsoftw
|
0ff9e052738e0effb9a484210ac27990f0f14f6f
|
[
"CC-BY-4.0",
"MIT"
] | 97 |
2019-05-07T15:43:30.000Z
|
2022-03-30T01:43:47.000Z
|
utilities/thumbnail-creation/thumbnail from category.py
|
DASdaNen4f/microsoftw
|
0ff9e052738e0effb9a484210ac27990f0f14f6f
|
[
"CC-BY-4.0",
"MIT"
] | 7 |
2020-05-05T17:12:08.000Z
|
2022-03-11T23:41:25.000Z
|
utilities/thumbnail-creation/thumbnail from category.py
|
DASdaNen4f/microsoftw
|
0ff9e052738e0effb9a484210ac27990f0f14f6f
|
[
"CC-BY-4.0",
"MIT"
] | 29 |
2019-05-30T22:23:25.000Z
|
2022-02-24T15:13:51.000Z
|
import pandas as pd
from PIL import Image
import requests
from io import BytesIO
import os
import math
df = pd.read_csv('C:\\Users\\v-ngdian\\Documents\\utilities\\thumbnail creator\\MetArtworksAugmented.csv')
size = 512, 512
ids = []
def make_thumbnail(objectID, url, foldername):
try:
response = requests.get(url)
image = Image.open(BytesIO(response.content))
ids.append(objectID)
image.thumbnail(size, Image.ANTIALIAS)
filepath = os.path.dirname(os.path.abspath(__file__))
filepath = os.path.join(filepath, foldername, str(objectID) + '.jpg')
image.save(filepath, "JPEG")
except Exception as e:
print("Invalid URL: {}".format(url))
return
def run(category, foldername):
df_filtered = df[df['Object Name'] == category]
print("There are {} objects in ".format(df_filtered.shape[0]) + category)
counter = -1
for index, row in df_filtered.iterrows():
counter += 1
objectID = row['Object ID']
url = row['PrimaryImageUrl']
if counter%50==0:
print("Working on object: " + str(counter) + " with id: " + str(objectID))
if isinstance(url, float) and math.isnan(url):
next
elif not isinstance(objectID, int):
print("Object id: {} not an integer".format(objectID))
next
else:
make_thumbnail(objectID, url, foldername)
run("vase", "vases")
print(ids)
| 29.4 | 106 | 0.622449 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 260 | 0.176871 |
75b207a985e8fc2e2ac54f7ef3b3b97efd0e8a7f
| 1,050 |
py
|
Python
|
examples/tcp.py
|
promisedio/uv
|
b2da55e28da4a3185d810055468389822ec94f2b
|
[
"MIT"
] | null | null | null |
examples/tcp.py
|
promisedio/uv
|
b2da55e28da4a3185d810055468389822ec94f2b
|
[
"MIT"
] | null | null | null |
examples/tcp.py
|
promisedio/uv
|
b2da55e28da4a3185d810055468389822ec94f2b
|
[
"MIT"
] | null | null | null |
import ssl
import certifi
from promisedio import loop, ns, promise, timer
async def example1():
context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
context.verify_mode = ssl.CERT_REQUIRED
context.check_hostname = True
context.load_default_certs()
context.load_verify_locations(
cafile=certifi.where(),
capath=None,
cadata=None
)
for x in range(100):
try:
stream = await ns.open_connection(("209.131.162.45", 443), ssl=context, server_hostname="www.verisign.com",
timeout=0.2)
except timer.TimeoutError:
pass
print(stream.getsockname())
print(stream.getpeername())
await stream.write(b"GET / HTTP 1.1\n\n")
print(await stream.read())
await stream.shutdown()
async def example2():
stream = await ns.open_connection(("192.168.1.99", 8080), timeout=2)
print(stream.getsockname())
print(stream.getpeername())
await stream.shutdown()
promise.exec_async(example1())
loop.run_forever()
| 26.25 | 119 | 0.648571 | 0 | 0 | 0 | 0 | 0 | 0 | 918 | 0.874286 | 69 | 0.065714 |
75b2d9cf0513bac35f38ddd5680c08dee820e7ca
| 3,232 |
py
|
Python
|
sahara/tests/unit/service/validation/edp/test_job.py
|
hortonworksqe/sahara
|
b8edeaf2b6a475728bf9fd2ddc3a860dc6c23270
|
[
"Apache-2.0"
] | 1 |
2016-04-13T17:07:05.000Z
|
2016-04-13T17:07:05.000Z
|
sahara/tests/unit/service/validation/edp/test_job.py
|
hortonworksqe/sahara
|
b8edeaf2b6a475728bf9fd2ddc3a860dc6c23270
|
[
"Apache-2.0"
] | null | null | null |
sahara/tests/unit/service/validation/edp/test_job.py
|
hortonworksqe/sahara
|
b8edeaf2b6a475728bf9fd2ddc3a860dc6c23270
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2013 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from sahara.service.validations.edp import job as j
from sahara.tests.unit.service.validation import utils as u
from sahara.utils import edp
class TestJobValidation(u.ValidationTestCase):
def setUp(self):
super(TestJobValidation, self).setUp()
self._create_object_fun = j.check_mains_libs
self.scheme = j.JOB_SCHEMA
def test_empty_libs(self):
for job_type in [edp.JOB_TYPE_MAPREDUCE, edp.JOB_TYPE_JAVA]:
self._assert_create_object_validation(
data={
"name": "jar.jar",
"type": job_type
},
bad_req_i=(1, "INVALID_DATA",
"%s flow requires libs" % job_type))
self._assert_create_object_validation(
data={
"name": "jar.jar",
"type": edp.JOB_TYPE_MAPREDUCE_STREAMING,
})
def test_mains_unused(self):
for job_type in [edp.JOB_TYPE_MAPREDUCE, edp.JOB_TYPE_JAVA]:
self._assert_create_object_validation(
data={
"name": "jar.jar",
"type": job_type,
"mains": ["lib1"],
"libs": ["lib2"]
},
bad_req_i=(1, "INVALID_DATA",
"%s flow does not use mains" % job_type))
def test_empty_pig_mains(self):
data = {
"name": "pig.pig",
"type": edp.JOB_TYPE_PIG,
"libs": ['lib-uuid']
}
self._assert_create_object_validation(
data=data, bad_req_i=(1, "INVALID_DATA",
"Pig flow requires main script"))
data.update({"type": edp.JOB_TYPE_HIVE})
self._assert_create_object_validation(
data=data, bad_req_i=(1, "INVALID_DATA",
"Hive flow requires main script"))
def test_overlap_libs(self):
for job_type in [edp.JOB_TYPE_HIVE, edp.JOB_TYPE_PIG]:
self._assert_create_object_validation(
data={
"name": "jar.jar",
"type": job_type,
"libs": ["lib1", "lib2"],
"mains": ["lib1"]
},
bad_req_i=(1, "INVALID_DATA", "'mains' and 'libs' overlap"))
def test_jar_rejected(self):
self._assert_create_object_validation(
data={
"name": "jar.jar",
"type": "Jar",
},
bad_req_i=(1, "VALIDATION_ERROR",
"'Jar' is not one of " + str(edp.JOB_TYPES_ALL)))
| 35.130435 | 76 | 0.552599 | 2,505 | 0.775062 | 0 | 0 | 0 | 0 | 0 | 0 | 1,029 | 0.318379 |
75b2e04cb5f586ec15b752e5cc06367509fd6133
| 1,004 |
py
|
Python
|
RecoLocalCalo/HGCalRecProducers/python/hgcalLayerClusters_cff.py
|
bisnupriyasahu/cmssw
|
6cf37ca459246525be0e8a6f5172c6123637d259
|
[
"Apache-2.0"
] | 3 |
2018-08-24T19:10:26.000Z
|
2019-02-19T11:45:32.000Z
|
RecoLocalCalo/HGCalRecProducers/python/hgcalLayerClusters_cff.py
|
bisnupriyasahu/cmssw
|
6cf37ca459246525be0e8a6f5172c6123637d259
|
[
"Apache-2.0"
] | 3 |
2018-08-23T13:40:24.000Z
|
2019-12-05T21:16:03.000Z
|
RecoLocalCalo/HGCalRecProducers/python/hgcalLayerClusters_cff.py
|
bisnupriyasahu/cmssw
|
6cf37ca459246525be0e8a6f5172c6123637d259
|
[
"Apache-2.0"
] | 5 |
2018-08-21T16:37:52.000Z
|
2020-01-09T13:33:17.000Z
|
import FWCore.ParameterSet.Config as cms
from RecoLocalCalo.HGCalRecProducers.hgcalLayerClusters_cfi import hgcalLayerClusters as hgcalLayerClusters_
from RecoLocalCalo.HGCalRecProducers.HGCalRecHit_cfi import dEdX, HGCalRecHit
from RecoLocalCalo.HGCalRecProducers.HGCalUncalibRecHit_cfi import HGCalUncalibRecHit
from SimCalorimetry.HGCalSimProducers.hgcalDigitizer_cfi import fC_per_ele, hgceeDigitizer, hgchebackDigitizer
hgcalLayerClusters = hgcalLayerClusters_.clone()
hgcalLayerClusters.timeOffset = hgceeDigitizer.tofDelay
hgcalLayerClusters.plugin.dEdXweights = cms.vdouble(dEdX.weights)
hgcalLayerClusters.plugin.fcPerMip = cms.vdouble(HGCalUncalibRecHit.HGCEEConfig.fCPerMIP)
hgcalLayerClusters.plugin.thicknessCorrection = cms.vdouble(HGCalRecHit.thicknessCorrection)
hgcalLayerClusters.plugin.fcPerEle = cms.double(fC_per_ele)
hgcalLayerClusters.plugin.noises = cms.PSet(refToPSet_ = cms.string('HGCAL_noises'))
hgcalLayerClusters.plugin.noiseMip = hgchebackDigitizer.digiCfg.noise_MIP
| 50.2 | 110 | 0.878486 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 14 | 0.013944 |
75b2efb0dac87ecec2330f57bb9b5abeb2ef6c62
| 1,705 |
py
|
Python
|
modules/AzureBridge/main.py
|
open-edge-insights/eii-azure-bridge
|
346da9d56be78c6e06a470dfbaf808d568427679
|
[
"MIT"
] | null | null | null |
modules/AzureBridge/main.py
|
open-edge-insights/eii-azure-bridge
|
346da9d56be78c6e06a470dfbaf808d568427679
|
[
"MIT"
] | null | null | null |
modules/AzureBridge/main.py
|
open-edge-insights/eii-azure-bridge
|
346da9d56be78c6e06a470dfbaf808d568427679
|
[
"MIT"
] | 2 |
2022-02-07T09:05:54.000Z
|
2022-03-17T04:32:50.000Z
|
# Copyright (c) 2020 Intel Corporation.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM,OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""EII Message Bus Azure Edge Runtime Bridge
"""
import asyncio
import traceback as tb
from eab.bridge_state import BridgeState
def main():
"""Main method.
"""
bs = None
try:
bs = BridgeState.get_instance()
loop = asyncio.get_event_loop()
loop.run_forever()
except Exception as e:
print(f'[ERROR] {e}\n{tb.format_exc()}')
raise
finally:
if bs is not None:
# Fully stop the bridge
bs.stop()
# Clean up asyncio
loop.stop()
loop.close()
if __name__ == "__main__":
main()
| 34.1 | 78 | 0.70088 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,232 | 0.722581 |
75b2fe433461c1164efd99a7fb0d0c61b5a14512
| 8,033 |
py
|
Python
|
src/spaceone/inventory/manager/bigquery/sql_workspace_manager.py
|
spaceone-dev/plugin-google-cloud-inven-collector
|
3e103412e7598ee9fa5f68b6241a831a40e8b9bc
|
[
"Apache-2.0"
] | null | null | null |
src/spaceone/inventory/manager/bigquery/sql_workspace_manager.py
|
spaceone-dev/plugin-google-cloud-inven-collector
|
3e103412e7598ee9fa5f68b6241a831a40e8b9bc
|
[
"Apache-2.0"
] | null | null | null |
src/spaceone/inventory/manager/bigquery/sql_workspace_manager.py
|
spaceone-dev/plugin-google-cloud-inven-collector
|
3e103412e7598ee9fa5f68b6241a831a40e8b9bc
|
[
"Apache-2.0"
] | null | null | null |
import logging
import time
from spaceone.inventory.libs.manager import GoogleCloudManager
from spaceone.inventory.libs.schema.base import ReferenceModel
from spaceone.inventory.connector.bigquery.sql_workspace import SQLWorkspaceConnector
from spaceone.inventory.model.bigquery.sql_workspace.cloud_service import BigQueryWorkSpace, SQLWorkSpaceResource, \
SQLWorkSpaceResponse, ProjectModel
from spaceone.inventory.model.bigquery.sql_workspace.cloud_service_type import CLOUD_SERVICE_TYPES
from datetime import datetime
_LOGGER = logging.getLogger(__name__)
class SQLWorkspaceManager(GoogleCloudManager):
connector_name = 'SQLWorkspaceConnector'
cloud_service_types = CLOUD_SERVICE_TYPES
def collect_cloud_service(self, params):
_LOGGER.debug(f'** Big Query SQL Workspace START **')
start_time = time.time()
"""
Args:
params:
- options
- schema
- secret_data
- filter
- zones
Response:
CloudServiceResponse/ErrorResourceResponse
"""
collected_cloud_services = []
error_responses = []
data_set_id = ""
secret_data = params['secret_data']
project_id = secret_data['project_id']
##################################
# 0. Gather All Related Resources
# List all information through connector
##################################
big_query_conn: SQLWorkspaceConnector = self.locator.get_connector(self.connector_name, **params)
data_sets = big_query_conn.list_dataset()
projects = big_query_conn.list_projects()
update_bq_dt_tables = []
table_schemas = []
for data_set in data_sets:
try:
##################################
# 1. Set Basic Information
##################################
data_refer = data_set.get('datasetReference', {})
data_set_id = data_refer.get('datasetId')
dataset_project_id = data_refer.get('projectId')
bq_dataset = big_query_conn.get_dataset(data_set_id)
creation_time = bq_dataset.get('creationTime', '')
last_modified_time = bq_dataset.get('lastModifiedTime')
region = self._get_region(bq_dataset.get('location', ''))
exp_partition_ms = bq_dataset.get('defaultPartitionExpirationMs')
exp_table_ms = bq_dataset.get('defaultTableExpirationMs')
# skip if dataset id is invisible
if self._get_visible_on_console(data_set_id):
bq_dt_tables = big_query_conn.list_tables(data_set_id)
update_bq_dt_tables, table_schemas = self._get_table_list_with_schema(big_query_conn, bq_dt_tables)
labels = self.convert_labels_format(bq_dataset.get('labels', {}))
##################################
# 2. Make Base Data
##################################
bq_dataset.update({
'name': data_set_id,
'project': project_id,
'tables': update_bq_dt_tables,
'table_schemas': table_schemas,
'region': region,
'visible_on_console': self._get_visible_on_console(data_set_id),
'matching_projects': self._get_matching_project(dataset_project_id, projects),
'creationTime': self._convert_unix_timestamp(creation_time),
'lastModifiedTime': self._convert_unix_timestamp(last_modified_time),
'default_partition_expiration_ms_display': self._convert_milliseconds_to_minutes(exp_partition_ms),
'default_table_expiration_ms_display': self._convert_milliseconds_to_minutes(exp_table_ms),
'labels': labels
})
big_query_data = BigQueryWorkSpace(bq_dataset, strict=False)
##################################
# 3. Make Return Resource
##################################
big_query_work_space_resource = SQLWorkSpaceResource({
'name': data_set_id,
'account': project_id,
'region_code': region,
'tags': labels,
'data': big_query_data,
'reference': ReferenceModel(big_query_data.reference())
})
##################################
# 4. Make Collected Region Code
##################################
self.set_region_code(region)
##################################
# 5. Make Resource Response Object
# List of SQLWorkSpaceResponse Object
##################################
collected_cloud_services.append(SQLWorkSpaceResponse({'resource': big_query_work_space_resource}))
except Exception as e:
_LOGGER.error(f'[collect_cloud_service] => {e}', exc_info=True)
error_response = self.generate_resource_error_response(e, 'BigQuery', 'SQLWorkspace', data_set_id)
error_responses.append(error_response)
_LOGGER.debug(f'** Big Query Finished {time.time() - start_time} Seconds **')
return collected_cloud_services, error_responses
def _get_region(self, location):
matched_info = self.match_region_info(location)
return matched_info.get('region_code') if matched_info else 'global'
def _get_table_list_with_schema(self, big_conn: SQLWorkspaceConnector, bq_dt_tables):
update_bq_dt_tables = []
table_schemas = []
for bq_dt_table in bq_dt_tables:
table_ref = bq_dt_table.get('tableReference')
table_single = big_conn.get_tables(table_ref.get('datasetId'), table_ref.get('tableId'))
if table_single is not None:
creation_time = table_single.get('creationTime')
expiration_time = table_single.get('expirationTime')
last_modified_time = table_single.get('lastModifiedTime')
table_single.update({
'creationTime': self._convert_unix_timestamp(creation_time),
'expirationTime': self._convert_unix_timestamp(expiration_time),
'lastModifiedTime': self._convert_unix_timestamp(last_modified_time)
})
_table_schemas = table_single.get('schema', {})
if _table_schemas != {}:
fields = _table_schemas.get('fields', [])
table_single.update({'schema': fields})
update_bq_dt_tables.append(table_single)
for single_schema in fields:
single_schema.update({'table_id': table_ref.get('tableId')})
table_schemas.append(single_schema)
return update_bq_dt_tables, table_schemas
@staticmethod
def _get_matching_project(project_id, projects):
_projects = []
for project in projects:
if project_id == project.get('id'):
_projects.append(ProjectModel(project, strict=False))
return _projects
@staticmethod
def _get_visible_on_console(dataset_id):
return False if dataset_id.startswith('_') else True
@staticmethod
def _convert_milliseconds_to_minutes(milliseconds):
if milliseconds:
minutes = (int(milliseconds)/1000)/60
return minutes
else:
return None
@staticmethod
def _convert_unix_timestamp(unix_timestamp):
try:
return datetime.fromtimestamp(int(unix_timestamp) / 1000)
except Exception as e:
_LOGGER.error(f'[_convert_unix_timestamp] {e}')
return
| 42.957219 | 119 | 0.578115 | 7,465 | 0.929292 | 0 | 0 | 848 | 0.105565 | 0 | 0 | 1,794 | 0.223329 |
75b72dca2e43b5612d13506d6b92693bca1eea41
| 192 |
py
|
Python
|
1_Python/Aulas/Aula13a.py
|
guilhermebaos/Curso-em-Video-Python
|
0e67f6f59fa3216889bd2dde4a26b532c7c545fd
|
[
"MIT"
] | null | null | null |
1_Python/Aulas/Aula13a.py
|
guilhermebaos/Curso-em-Video-Python
|
0e67f6f59fa3216889bd2dde4a26b532c7c545fd
|
[
"MIT"
] | null | null | null |
1_Python/Aulas/Aula13a.py
|
guilhermebaos/Curso-em-Video-Python
|
0e67f6f59fa3216889bd2dde4a26b532c7c545fd
|
[
"MIT"
] | null | null | null |
for a in range(0,6):
print('Olá!', a)
print('Parei. \n')
for b in range(6, 0, -1):
print('Olá1', b)
print('Parei. \n')
for c in range(0, 6, 2):
print('Olá!', c)
print('Parei. \n')
| 19.2 | 25 | 0.53125 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 54 | 0.276923 |
75b763c3212f1f5ddcadc048b167842b24fdff2e
| 1,732 |
py
|
Python
|
worker_zeromq/resource.py
|
espang/projects
|
3a4d93592bc3427a6abd8d2170081155862754a8
|
[
"MIT"
] | null | null | null |
worker_zeromq/resource.py
|
espang/projects
|
3a4d93592bc3427a6abd8d2170081155862754a8
|
[
"MIT"
] | null | null | null |
worker_zeromq/resource.py
|
espang/projects
|
3a4d93592bc3427a6abd8d2170081155862754a8
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Fri Feb 26 09:11:06 2016
@author: eikes
"""
import ConfigParser
from components import Component
from result import VariableResult
_config = ConfigParser.ConfigParser()
_config.read('scenario.cfg')
_section = 'MySection'
_results = 'results'
def _create_comp(index):
global _config, _section
connections = map(str.strip, _config.get(
_section,
'comp.{0}.connections'.format(index),
).split(','))
return Component(
_config.get(_section, 'comp.{0}.name'.format(index)),
_config.get(_section, 'comp.{0}.type'.format(index)),
_config.get(_section, 'comp.{0}.reference_values'.format(index)),
connections,
_config.get(_section, 'comp.{0}.replace_values'.format(index)),
_config.getfloat(_section, 'comp.{0}.factor'.format(index)),
)
def _create_results():
global _config, _results
quantity = _config.getint(_results, 'quantity')
results = []
for i in range(1, quantity+1):
label = _config.get(_results, 'result.{0}.name'.format(i))
comp = _config.get(_results, 'result.{0}.comp'.format(i))
calc_type = _config.getint(_results, 'result.{0}.type'.format(i))
results.append(
VariableResult(pk=i, label=label, comp_name=comp, calc_type=calc_type)
)
return results
LP_FILE_PATH = _config.get(_section, 'lp')
TRC_FILE_PATH = _config.get(_section, 'trc')
QUANTITY = _config.getint(_section, 'quantity')
COMPONENTS = [ _create_comp(i) for i in range(1, QUANTITY+1) ]
RESULTS = _create_results()
SIMULATIONS = _config.getint(_section, 'simulations')
WORKER = _config.getint(_section, 'worker')
S_VALUE = float(1.5855e+07)
| 27.0625 | 90 | 0.663972 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 341 | 0.196882 |
75b8a1f71cb2c99f52c326ad6e518a675e652f84
| 466 |
py
|
Python
|
sub-array-algorithm-frustated-coders.py
|
annukamat/My-Competitive-Journey
|
adb13a5723483cde13e5f3859b3a7ad840b86c97
|
[
"MIT"
] | 7 |
2018-11-08T11:39:27.000Z
|
2020-09-10T17:50:57.000Z
|
sub-array-algorithm-frustated-coders.py
|
annukamat/My-Competitive-Journey
|
adb13a5723483cde13e5f3859b3a7ad840b86c97
|
[
"MIT"
] | null | null | null |
sub-array-algorithm-frustated-coders.py
|
annukamat/My-Competitive-Journey
|
adb13a5723483cde13e5f3859b3a7ad840b86c97
|
[
"MIT"
] | 2 |
2019-09-16T14:34:03.000Z
|
2019-10-12T19:24:00.000Z
|
ncoders = int(input("enter no. of coders : "))
l=map(int,input().split(" "))
sl=[]
l = sorted(list(l))
top = 1
for rotator in range(1,ncoders):
sl = l[:rotator]
if(top != ncoders):
if(max(sl) < l[top]):
l[l.index(max(sl))] = 0
top = top +1
elif(max(sl) == l[top]):
l[l.index(max(sl[:len(sl)-1]))] = 0
top = top+1
else:
break
print(l)
print(sum(l))
| 18.64 | 47 | 0.44206 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 27 | 0.05794 |
75ba91add5ced077993a147299ed8098ccb69a59
| 8,081 |
py
|
Python
|
source/soca/cluster_web_ui/api/v1/dcv/image.py
|
cfsnate/scale-out-computing-on-aws
|
1cc316e988dca3200811ff5527a088a1706901e5
|
[
"Apache-2.0"
] | 77 |
2019-11-14T22:54:48.000Z
|
2022-02-09T06:06:39.000Z
|
source/soca/cluster_web_ui/api/v1/dcv/image.py
|
cfsnate/scale-out-computing-on-aws
|
1cc316e988dca3200811ff5527a088a1706901e5
|
[
"Apache-2.0"
] | 47 |
2020-01-15T18:51:32.000Z
|
2022-03-08T19:46:39.000Z
|
source/soca/cluster_web_ui/api/v1/dcv/image.py
|
cfsnate/scale-out-computing-on-aws
|
1cc316e988dca3200811ff5527a088a1706901e5
|
[
"Apache-2.0"
] | 50 |
2019-11-14T22:51:28.000Z
|
2022-03-14T22:49:53.000Z
|
######################################################################################################################
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. #
# #
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance #
# with the License. A copy of the License is located at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# or in the 'license' file accompanying this file. This file is distributed on an 'AS IS' BASIS, WITHOUT WARRANTIES #
# OR CONDITIONS OF ANY KIND, express or implied. See the License for the specific language governing permissions #
# and limitations under the License. #
######################################################################################################################
import config
from flask_restful import Resource, reqparse
import logging
from decorators import admin_api, restricted_api, private_api
import botocore
import datetime
from models import db, AmiList
import boto3
import errors
from sqlalchemy import exc
from sqlalchemy.exc import SQLAlchemyError
logger = logging.getLogger("api")
session = boto3.session.Session()
aws_region = session.region_name
ec2_client = boto3.client('ec2', aws_region, config=config.boto_extra_config())
def get_ami_info():
ami_info = {}
for session_info in AmiList.query.filter_by(is_active=True).all():
ami_info[session_info.ami_label] = session_info.ami_id
return ami_info
class ManageImage(Resource):
@admin_api
def post(self):
"""
Register a new EC2 AMI as DCV image on SOCA
---
tags:
- DCV
parameters:
- in: body
name: body
schema:
required:
- os
- ami_id
- ami_label
- root_size
properties:
ami_id:
type: string
description: EC2 ID of the AMI
os:
type: string
description: Windows or Linux
ami_label:
type: string
description: Friendly name for your image
root_size:
type: string
description: Minimum size of your EC2 AMI
responses:
200:
description: Pair of user/token is valid
401:
description: Invalid user/token pair
"""
parser = reqparse.RequestParser()
parser.add_argument('ami_id', type=str, location='form')
parser.add_argument('os', type=str, location='form')
parser.add_argument('ami_label', type=str, location='form')
parser.add_argument('root_size', type=str, location='form')
args = parser.parse_args()
ami_id = args["ami_id"]
ami_label = str(args["ami_label"])
os = args["os"]
if args["os"] is None or args["ami_label"] is None or args["ami_id"] is None or args["root_size"] is None:
return errors.all_errors('CLIENT_MISSING_PARAMETER', "os (str), ami_id (str), ami_label (str) and root_size (str) are required.")
if args["os"].lower() not in ["centos7", "rhel7", "amazonlinux2", "windows"]:
return errors.all_errors('CLIENT_MISSING_PARAMETER', "os must be centos7, rhel7, amazonlinux2, or windows")
try:
root_size = int(args["root_size"])
except ValueError:
return errors.all_errors('IMAGE_REGISTER_ERROR', f"{root_size} must be a valid integer")
soca_labels = get_ami_info()
# Register AMI to SOCA
if ami_label not in soca_labels.keys():
try:
ec2_response = ec2_client.describe_images(ImageIds=[ami_id],
Filters=[{'Name': 'state', 'Values': ['available']}])
if (len(ec2_response["Images"]) != 0):
new_ami = AmiList(ami_id=ami_id,
ami_type=os.lower(),
ami_label=ami_label,
is_active=True,
ami_root_disk_size=root_size,
created_on=datetime.datetime.utcnow())
try:
db.session.add(new_ami)
db.session.commit()
return {"success": True, "message": f"{ami_id} registered successfully in SOCA as {ami_label}"}, 200
except SQLAlchemyError as e:
db.session.rollback()
logger.error(f"Failed Creating AMI {ami_label} {ami_id} {e}")
return errors.all_errors('IMAGE_REGISTER_ERROR', f"{ami_id} registration not successful")
else:
logger.error(f"{ami_id} is not available in AWS account")
return errors.all_errors('IMAGE_REGISTER_ERROR', f"{ami_id} is not available in AWS account. If you just created it, make sure the state of the image is 'available' on the AWS console")
except botocore.exceptions.ClientError as error:
logger.error(f"Failed Creating AMI {ami_label} {ami_id} {error}")
return errors.all_errors('IMAGE_REGISTER_ERROR', f"{ami_id} Couldn't locate {ami_id} in AWS account. Make sure you do have permission to view it")
else:
logger.error(f"Label already in use {ami_label}")
return errors.all_errors('IMAGE_REGISTER_ERROR', f"Label {ami_label} already in use. Please enter a unique label")
@admin_api
def delete(self):
"""
Delete an EC2 AMI registered as DCV image on SOCA
---
tags:
- DCV
parameters:
- in: body
name: body
schema:
required:
- ami_label
properties:
ami_label:
type: string
description: Friendly name for your image
responses:
200:
description: Pair of user/token is valid
401:
description: Invalid user/token pair
"""
parser = reqparse.RequestParser()
parser.add_argument('ami_label', type=str, location='form')
args = parser.parse_args()
if args["ami_label"] is None:
return errors.all_errors('CLIENT_MISSING_PARAMETER', "ami_label (str) is required.")
check_session = AmiList.query.filter_by(ami_label=args["ami_label"], is_active=True).first()
if check_session:
check_session.is_active = False
check_session.deactivated_on = datetime.datetime.utcnow()
try:
db.session.commit()
logger.info(f"AMI Label {args['ami_label']} deleted from SOCA")
return {"success": True, "message": f"{args['ami_label']} deleted from SOCA successfully"}, 200
except exc.SQLAlchemyError as e:
db.session.rollback()
logger.error(f"AMI Label {args['ami_label']} delete failed {e}")
return errors.all_errors('IMAGE_DELETE_ERROR', f"{args['ami_label']} could not have been deleted because of {e}")
else:
return errors.all_errors('IMAGE_DELETE_ERROR', f"{args['ami_label']} could not be found")
| 45.655367 | 205 | 0.516891 | 5,978 | 0.73976 | 0 | 0 | 5,939 | 0.734934 | 0 | 0 | 4,471 | 0.553273 |
75bb6e08d53656c02653379a24d3bf7833708bba
| 807 |
py
|
Python
|
Day 5/python/main.py
|
BenBMoore/leetcode-challenges
|
97359abbeb24daf8cc33fe2bf1d5748ac824aab4
|
[
"MIT"
] | null | null | null |
Day 5/python/main.py
|
BenBMoore/leetcode-challenges
|
97359abbeb24daf8cc33fe2bf1d5748ac824aab4
|
[
"MIT"
] | null | null | null |
Day 5/python/main.py
|
BenBMoore/leetcode-challenges
|
97359abbeb24daf8cc33fe2bf1d5748ac824aab4
|
[
"MIT"
] | null | null | null |
import argparse
from typing import List
class Solution:
def max_profit(self, prices: List[int]) -> int:
best_profit = 0
for idx in range(0, len(prices) - 1):
# If the price is not greater, then "sell at the peak", else buy/hold
if prices[idx + 1] > prices[idx]:
best_profit += prices[idx + 1] - prices[idx]
return best_profit
def main():
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument('integers', metavar='N', type=int, nargs='+',
help='An integer for processing by the happy number process')
args = parser.parse_args()
number = args.integers
max_sum = Solution().max_sub_array(number)
print(max_sum)
if __name__ == "__main__":
main()
| 27.827586 | 85 | 0.619579 | 353 | 0.437423 | 0 | 0 | 0 | 0 | 0 | 0 | 174 | 0.215613 |
75bdd147dbc8647c0747f11af9d4431656daa233
| 947 |
py
|
Python
|
ex2.py
|
timwuu/AnaPoker
|
7cb125c4639a5cd557a6b45c92b5793dcc39def8
|
[
"MIT"
] | null | null | null |
ex2.py
|
timwuu/AnaPoker
|
7cb125c4639a5cd557a6b45c92b5793dcc39def8
|
[
"MIT"
] | null | null | null |
ex2.py
|
timwuu/AnaPoker
|
7cb125c4639a5cd557a6b45c92b5793dcc39def8
|
[
"MIT"
] | null | null | null |
import calcWinRate as cwr
def pp( a, b, table, k):
result = cwr.calc_win_rate( a, b, table, k)
print( "{} vs {} with {}".format( cwr.card_lst(a), cwr.card_lst(b), cwr.card_lst(table)))
print( "{:2.2%} vs {:2.2%}\n".format(result[0], result[1]))
k= 10000 # simulate k times
# --- example 0 ---
# --- 1-draw straight vs 4-card flush
player_a = [51,43] #AQ
player_b = [52,48] #AKs
table_cards = [47,40,28] #K,J,8
pp( player_a, player_b, table_cards, k)
# --- straight vs 4-card flush
player_a = [51,43] #AQ
player_b = [52,48] #AKs
table_cards = [47,40,28,33] #K,J,8,T
pp( player_a, player_b, table_cards, k)
# --- straight vs three of kind
player_a = [51,43] #AQ
player_b = [47,46] #KK
table_cards = [48,40,26,33] #K,J,8,T
pp( player_a, player_b, table_cards, k)
# --- straight vs two pairs
player_a = [51,43] #AQ
player_b = [47,39] #KJs
table_cards = [48,40,26,33] #K,J,8,T
pp( player_a, player_b, table_cards, k)
| 22.023256 | 93 | 0.62302 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 259 | 0.273495 |
75bf78052e28e2d4673d9f69709a11b7958bfff3
| 1,085 |
py
|
Python
|
Utils/Permission.py
|
koi312500/Koi_Bot_Discord
|
9d7a70f42cdb1110e6382125ade39d3aec21b3b9
|
[
"MIT"
] | null | null | null |
Utils/Permission.py
|
koi312500/Koi_Bot_Discord
|
9d7a70f42cdb1110e6382125ade39d3aec21b3b9
|
[
"MIT"
] | 1 |
2021-06-23T01:16:36.000Z
|
2021-06-23T01:16:36.000Z
|
Utils/Permission.py
|
koi312500/Koi_Bot_Discord
|
9d7a70f42cdb1110e6382125ade39d3aec21b3b9
|
[
"MIT"
] | null | null | null |
import discord
from discord.ext import commands
from Utils.UserClass import UserClass as User
permission_message = ["Guest [Permission Level : 0]", "User [Permission Level : 1]", "Developer [Permission Level : 2]", "Owner [Permission Level : 3]"]
async def check_permission(ctx, level):
now_user = User(ctx.author)
if now_user.permission >= level:
return False
else:
embed = discord.Embed(title=f"User Permission Error", color=0xff0000)
embed.set_footer(text = "Sented by Koi_Bot#4999ㆍUser Permission Error")
if now_user.permission == 0 and level == 1:
embed.add_field(name = "Suggestion", value = "/accept_term으로 약관 동의를 하시면, 'User [Permission Level : 1]' 권한을 얻어, 이 명령어를 실행 하실 수 있습니다.", inline = False)
embed.add_field(name = "Your Permission", value = f"{str(permission_message[int(now_user.permission)])}", inline = True)
embed.add_field(name = "Command Executable Permission", value = f"{str(permission_message[int(level)])}", inline = True)
await ctx.respond(embed=embed)
return True
| 57.105263 | 161 | 0.682028 | 0 | 0 | 0 | 0 | 0 | 0 | 896 | 0.782533 | 494 | 0.431441 |
75bfcbaef981a9d2b8f3eecff56d9741a7a40637
| 436 |
py
|
Python
|
10.py
|
seanmanson/euler
|
b01418cf44c1113a0c574b5158aa5b89d725cca2
|
[
"MIT"
] | null | null | null |
10.py
|
seanmanson/euler
|
b01418cf44c1113a0c574b5158aa5b89d725cca2
|
[
"MIT"
] | null | null | null |
10.py
|
seanmanson/euler
|
b01418cf44c1113a0c574b5158aa5b89d725cca2
|
[
"MIT"
] | null | null | null |
import math
test = []
def testPrime(num):
sq = int(math.sqrt(num))
for i, factor in enumerate(test):
if (i > sq):
break
if (num % factor == 0):
return False
test.append(num)
return True
sumPrimes = 2
for i in range(3, 2000000, 2):
if not testPrime(i):
continue
sumPrimes+=i
if (i % 10000 == 1):
print("progress : ", i, sumPrimes)
print (sumPrimes)
| 18.166667 | 42 | 0.538991 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 13 | 0.029817 |
75c33edb1fb71d6cd1c893b5ce0674035ed9e6dd
| 37,403 |
py
|
Python
|
clangelscript.py
|
gwihlidal/Clangelscript
|
e83f77d78bf57c25f67922b65aad2f8e74ce2699
|
[
"MIT"
] | 1 |
2019-06-21T06:37:16.000Z
|
2019-06-21T06:37:16.000Z
|
clangelscript.py
|
gwihlidal/clangelscript
|
e83f77d78bf57c25f67922b65aad2f8e74ce2699
|
[
"MIT"
] | null | null | null |
clangelscript.py
|
gwihlidal/clangelscript
|
e83f77d78bf57c25f67922b65aad2f8e74ce2699
|
[
"MIT"
] | null | null | null |
import sys
import re
import json
import os.path
import copy
from mako.template import Template
from clang import cindex
configfile = "clangelscript.json"
f = open(configfile)
data = f.read()
data = re.sub(r"//[^n]*n", "\n", data)
config = json.loads(data)
f.close()
if "ObjectTypes" in config:
arr = config["ObjectTypes"]
config["ObjectTypes"] = {}
for name in arr:
config["ObjectTypes"][re.compile(name)] = arr[name]
def get(name, default=None, conf=config):
if name in conf:
return conf[name]
else:
return default
fir = get("FileIncludeRegex", None)
fer = get("FileExcludeRegex", None)
mir = get("MethodIncludeRegex", None)
mer = get("MethodExcludeRegex", None)
oir = get("ObjectIncludeRegex", None)
oer = get("ObjectExcludeRegex", None)
mfir = get("FieldIncludeRegex", None)
mfer = get("FieldExcludeRegex", None)
generic_regex = get("GenericWrapperRegex", None)
maahr = get("MethodArgumentAutoHandleRegex", None)
mrahr = get("MethodReturnAutoHandleRegex", None)
fir = re.compile(fir) if fir else fir
fer = re.compile(fer) if fer else fer
mir = re.compile(mir) if mir else mir
mer = re.compile(mer) if mer else mer
oir = re.compile(oir) if oir else oir
oer = re.compile(oer) if oer else oer
mfir = re.compile(mfir) if mfir else mfir
mfer = re.compile(mfer) if mfer else mfer
maahr = re.compile(maahr) if maahr else maahr
mrahr = re.compile(mrahr) if mrahr else mrahr
generic_regex = re.compile(generic_regex) if generic_regex else generic_regex
verbose = get("Verbose", False)
doassert = get("Assert", True)
keep_unknowns = get("KeepUnknowns", False)
output_filename = get("OutputFile", None)
funcname = get("FunctionName", "registerScripting")
generic_wrappers = []
index = cindex.Index.create()
clang_args = get("ClangArguments", [])
#clang_args.insert(0, "-I%s/clang/include" % os.path.dirname(os.path.abspath(__file__)))
new_args = []
for arg in clang_args:
new_args.append(arg.replace("${ConfigFilePath}", os.path.dirname(os.path.abspath(configfile))))
clang_args = new_args
tu = index.parse(None, clang_args, [], 13)
warn_count = 0
def logWarning(msg):
global warn_count
warn_count += 1
if verbose:
sys.stderr.write(msg + "\n")
def get_type(type, cursor=None):
pointer = type.kind == cindex.TypeKind.POINTER
typename = ""
ref = type.kind == cindex.TypeKind.LVALUEREFERENCE
if type.kind == cindex.TypeKind.TYPEDEF or type.kind == cindex.TypeKind.RECORD or type.kind == cindex.TypeKind.ENUM:
typename = type.get_declaration()
elif pointer or ref:
t2 = type.get_pointee()
typename = t2.get_declaration()
if typename is None or typename.kind.is_invalid():
typename = get_type(t2)
elif type.kind == cindex.TypeKind.ULONG:
typename = "unsigned long"
elif type.kind == cindex.TypeKind.UINT:
typename = "unsigned int"
elif type.kind == cindex.TypeKind.USHORT:
typename = "unsigned short"
elif type.kind == cindex.TypeKind.CONSTANTARRAY:
if cursor is None:
raise Exception("Constant array, but cursor not provided so can't solve the type")
typename = get_type(type.get_array_element_type())
else:
typename = type.kind.name.lower()
if typename is None:
raise Exception("Typename was None %s" % type.kind)
elif isinstance(typename, cindex.Cursor):
if typename.spelling == None:
raise Exception("Typename was None %s" % type.kind)
fullname = [typename.spelling]
cursor = typename.lexical_parent
while not cursor is None and (cursor.kind == cindex.CursorKind.NAMESPACE or cursor.kind == cindex.CursorKind.CLASS_DECL):
fullname.insert(0, cursor.displayname)
cursor = cursor.lexical_parent
typename = "::".join(fullname)
elif typename == "unexposed":
raise Exception("Typename is unexposed")
return "%s%s" % (typename, "*" if pointer else "&" if ref else "")
def is_int(literal):
try:
i = int(literal)
return True
except:
try:
i = int(literal, 16)
return True
except:
pass
return False
objecttype_scoreboard = {}
def add_use(typename):
val = (0, 0)
p = 0
if "*" in typename:
p = 1
typename = typename[:-1]
if typename in objecttype_scoreboard:
val = objecttype_scoreboard[typename]
objecttype_scoreboard[typename] = (val[0]+p, val[1]+1-p)
typedef = {}
def get_real_type(name):
ptr = "*" in name
ref = "&" in name
if ptr or ref:
name = name[:-1]
while name in typedef:
name = typedef[name]
if ptr:
return name + "*"
if ref:
return name + "&"
return name
def is_const(cursor):
#tokens = cindex.tokenize(tu, cursor.extent)
tokens = list(cindex.TokenGroup.get_tokens(tu, cursor.extent))
for token in tokens:
if token.spelling == "const":
return True
return False
as_builtins = {
"unsigned long": "uint64",
"unsigned int": "uint",
"unsigned short": "uint16",
"unsigned char": "uint8",
"long": "int64",
"void": "void",
"double": "double",
"float": "float",
"char": "int8",
"short": "int16",
"int": "int",
"long": "int64",
"bool": "bool"
}
def get_as_type(name):
ptr = "*" in name
ref = "&" in name
name = name.replace("*", "").replace("&", "")
if name in as_builtins:
if ptr:
raise Exception("Built-in value type %s used as a reference type" % (as_builtins[name]))
name = as_builtins[name]
return "%s%s%s" % (name, "@" if ptr else "", "&" if ref else "")
class Type:
def __init__(self, kind):
typename = get_type(kind)
self.cname = typename
typename = get_real_type(typename)
self.resolved = typename
add_use(typename)
self.const = kind.is_const_qualified()
get_as_type(self.resolved)
def __repr__(self):
return self.cname
def get_as_type(self):
as_type = None
if "ObjectTypes" in config:
for regex in config["ObjectTypes"]:
if regex.search(self.cname) != None:
conf = config["ObjectTypes"][regex]
if "AngelScriptType" in conf:
as_type = regex.sub(conf["AngelScriptType"], self.cname)
break
if as_type == None:
as_type = get_as_type(self.resolved)
return "%s%s" % ("const " if self.const else "", as_type)
def is_known(self):
name = self.resolved.replace("*", "").replace("&", "")
if name in objecttypes:
return True
if name in as_builtins:
return True
if "ObjectTypes" in config:
for regex in config["ObjectTypes"]:
if regex.search(self.cname) != None:
return True
return False
def get_c_type(self):
return "%s%s" % ("const " if self.const else "", self.cname)
def is_reference_type(name):
if "ObjectTypes" in config:
for regex in config["ObjectTypes"]:
if regex.search(name) and "Reference" in config["ObjectTypes"][regex]:
return config["ObjectTypes"][regex]["Reference"]
if name in objecttypes:
ot = objecttypes[name]
for p in ot.parents:
v = is_reference_type(p)
if not v is None:
return v
if name in objecttype_scoreboard:
score = objecttype_scoreboard[name]
return score[0] > score[1]
return None
operatornamedict = {
"-operator": "opNeg",
"~operator": "opCom",
"++operator": "opPreInc",
"--operator": "opPreDec",
"operator==": "opEquals",
#"operator!=": "opEquals",
"operator<": "opCmp",
# "operator<=": "opCmp",
# "operator>": "opCmp",
# "operator>=": "opCmp",
"operator++": "opPostInc",
"operator--": "opPostDec",
"operator+": "opAdd",
"operator-": "opSub",
"operator*": "opMul",
"operator/": "opDiv",
"operator%": "opMod",
"operator&": "opAnd",
"operator|": "opOr",
"operator^": "opXor",
"operator<<": "opShl",
"operator>>": "opShr",
"operator>>>": "opUShr",
"operator[]": "opIndex",
"operator=": "opAssign",
"operator+=": "opAddAssign",
"operator-=": "opSubAssign",
"operator*=": "opMulAssign",
"operator/=": "opDivAssign",
"operator%=": "opModAssign",
"operator&=": "opAndAssign",
"operator|=": "opOrAssign",
"operator^=": "opXorAssign",
"operator<<=": "opShlAssign",
"operator>>=": "opShrAssign",
"operator>>>=": "opUShrAssign",
}
class Function(object):
def __init__(self, cursor, clazz=None, behaviour=None):
self.args = []
if cursor is None:
return
children = list(cursor.get_children())
for child in children:
if child.kind == cindex.CursorKind.PARM_DECL:
t = Type(child.type)
t.const = is_const(child)
self.args.append(t)
self.name = cursor.spelling
self.return_type = Type(cursor.result_type)
self.clazz = clazz
self.const = False
self.behaviour = behaviour
if self.clazz and not behaviour:
start = cursor.extent.start
end = cursor.extent.end
i = 0
while i < len(children):
if children[i].kind == cindex.CursorKind.PARM_DECL:
start = children[i].extent.end
if children[i].kind == cindex.CursorKind.COMPOUND_STMT:
if i > 0:
start = children[i-1].extent.end
end = children[i].extent.start
break
i += 1
if i == len(children):
break
start = children[i-1].extent.end
r = cindex.SourceRange.from_locations(start, end)
f = open(cursor.location.file.name)
f.seek(start.offset)
length = end.offset-start.offset
data = f.read(length)
f.close()
self.const = re.search(r"\s*const\s*(=\s*0)?$", data) != None
if len(children) > 0 and children[0].kind != cindex.CursorKind.PARM_DECL:
f = open(cursor.location.file.name)
f.seek(cursor.extent.start.offset)
length = children[0].extent.start.offset-cursor.extent.start.offset
data = f.read(length)
f.close()
data = re.sub(r"%s.*" % self.name, "", data)
self.return_type.const = re.search(r"\s*const\s*$", data) != None
self.asname()
if mir or mer:
pn = self.pretty_name()
if mer and mer.search(pn):
raise Exception("Function matches exclusion pattern. %s" % pn)
if mir and not mir.search(pn):
raise Exception("Function does not match inclusion pattern. %s" % pn)
def uses(self, typename):
if self.return_type.resolved == typename:
return True
for t in self.args:
if t.resolved == typename:
return True
return False
def pretty_name(self):
cargs = ", ".join([t.get_c_type() for t in self.args])
if self.clazz:
return "%s %s::%s(%s)" % (self.return_type, self.clazz, self.name, cargs)
else:
return "%s %s(%s)" % (self.return_type, self.name, cargs)
def asname(self):
name = self.name
if "operator" in name:
if name not in operatornamedict:
raise Exception("Operator not supported in AngelScript %s" % self.pretty_name())
name = operatornamedict[name]
asargs = []
auto_handle_args = False
auto_handle_return = False
if maahr and maahr.search(self.pretty_name()) != None:
auto_handle_args = True
if mrahr and mrahr.search(self.pretty_name()) != None:
auto_handle_return = True
for a in self.args:
asname = a.get_as_type()
ref = "&" in asname
if ref:
asname2 = get_as_type(a.resolved)[:-1]
extra = ""
if not is_reference_type(asname2):
# Value types can only be in or out references. Defaulting to in
asname += "in"
if "@" in asname and auto_handle_args:
asname2 = asname[:-1]
add = True
if asname2 in objecttypes:
ot = objecttypes[asname2]
if "asOBJ_NOCOUNT" in ot.get_flags():
add = False
if add:
asname += "+"
asargs.append(asname)
asargs = ", ".join(asargs)
if self.behaviour == "asBEHAVE_CONSTRUCT" or self.behaviour == "asBEHAVE_FACTORY":
name = "void f(%s)" % (asargs)
if is_reference_type(self.clazz):
add = auto_handle_return
if self.clazz in objecttypes:
ot = objecttypes[self.clazz]
if "asOBJ_NOCOUNT" in ot.get_flags():
add = False
name = "%s@%s %s(%s)" % (self.clazz, "+" if add else "", self.clazz, asargs)
self.behaviour = "asBEHAVE_FACTORY"
elif self.behaviour == "asBEHAVE_DESTRUCT":
name = "void f()"
else:
asname = self.return_type.get_as_type()
if "@" in asname and auto_handle_return:
asname2 = asname[:-1]
add = True
if asname2 in objecttypes:
ot = objecttypes[asname2]
if "asOBJ_NOCOUNT" in ot.get_flags():
add = False
if add:
asname += "+"
name = "%s %s(%s)" % (asname, name, asargs)
if self.clazz and self.const:
name += " const"
return name
def get_generic(self):
lut = {
"double": "Double",
"float": "Float",
"uint": "DWord",
"int": "DWord",
"uint16": "Word",
"int16": "Word",
"uint8": "Byte",
"int8": "Byte",
"bool": "Byte"
}
name = self.name
if "operator" in name:
name = operatornamedict[name]
name = name.replace("~", "tilde") + "_generic"
for arg in self.args:
name += "_" + arg.get_c_type().replace("&", "amp").replace("*", "star").replace(" ", "space").replace(":", "colon")
if self.clazz:
name = self.clazz + "_" + name
func = "void %s(asIScriptGeneric *gen)\n{\n" % name
asret = self.return_type.get_as_type()
call = "%s(" % self.name
if self.clazz:
if is_reference_type(self.clazz) and self.behaviour == "asBEHAVE_CONSTRUCT":
self.behaviour = "asBEHAVE_FACTORY"
if self.behaviour == "asBEHAVE_FACTORY":
call = "gen->SetReturnAddress(new %s(" % (self.name)
elif self.behaviour == "asBEHAVE_CONSTRUCT":
call = "new(gen->GetObject()) %s(" % self.name
else:
call = "static_cast<%s*>(gen->GetObject())->%s" % (self.clazz, call)
for i in range(len(self.args)):
if i > 0:
call += ", "
arg = self.args[i]
t = arg.get_as_type()
if t in lut:
call += "gen->GetArg%s(%d)" % (lut[t], i)
else:
ct = arg.get_c_type()
pt = "*" in ct
star = "*" if not pt else ""
if "&" in ct:
call += "%sstatic_cast<%s%s>(gen->GetArgAddress(%d))" % (star, arg.get_c_type().replace("&", ""), star, i)
else:
call += "%sstatic_cast<%s%s>(gen->GetArgObject(%d))" % (star, arg.get_c_type(), star, i)
call += ")"
if self.behaviour == "asBEHAVE_FACTORY":
call += ")"
asret2 = asret.replace("const ", "").strip()
if asret2 in lut:
func += "\tgen->SetReturn%s(%s);\n" % (lut[asret2], call)
elif asret == "void":
func += "\t" + call + ";\n"
else:
ct = self.return_type.get_c_type()
pt = "*" in ct
star = "*" if not pt else ""
if pt:
func += "\tgen->SetReturnAddress(%s);\n" % (call)
elif "&" in ct:
func += "\tgen->SetReturnAddress((void*)&%s);\n" % (call)
else:
func += "\t" + self.return_type.get_c_type().replace("&", "").replace("const ", "") + " ret = %s;\n" % call
func += "\tgen->SetReturnObject(&ret);\n"
#func += "\t" + self.return_type.get_c_type() + " ret = %s;\n" % call
#func += "\tnew(gen->GetAddressOfReturnLocation()) %s(ret);\n" % self.return_type.get_c_type().replace("&", "")
func += "}\n"
if func not in generic_wrappers:
generic_wrappers.append(func)
return "asFUNCTION(%s), asCALL_GENERIC" % (name)
def get_register_string(self):
global generic_wrappers
cargs = ", ".join([at.get_c_type() for at in self.args])
if self.clazz == None:
callconv = "asCALL_CDECL"
call = "asFUNCTIONPR(%s, (%s), %s), %s" % (self.name, cargs, self.return_type.get_c_type(), callconv)
if generic_regex and generic_regex.search(self.pretty_name()):
call = self.get_generic()
return _assert("engine->RegisterGlobalFunction(\"%s\", %s)" % (self.asname(), call))
else:
const = " const" if self.const else ""
call = "asMETHODPR(%s, %s, (%s)%s, %s), asCALL_THISCALL" % (self.clazz, self.name, cargs, const, self.return_type.get_c_type())
if (generic_regex and generic_regex.search(self.pretty_name())) or \
self.behaviour == "asBEHAVE_CONSTRUCT" or \
self.behaviour == "asBEHAVE_DESTRUCT" or \
self.behaviour == "asBEHAVE_FACTORY":
call = self.get_generic()
if self.behaviour == None:
return _assert("engine->RegisterObjectMethod(\"%s\", \"%s\", %s)" % (self.clazz, self.asname(), call))
else:
name = self.asname()
return _assert("engine->RegisterObjectBehaviour(\"%s\", %s, \"%s\", %s)" % (self.clazz, self.behaviour, name, call))
def is_pure_virtual(cursor):
# TODO: Use iterator here
children = list(cursor.get_children())
start = cursor.extent.start
end = cursor.extent.end
while len(children) != 0:
child = children[-1]
children = list(child.get_children())
start = child.extent.end
f = open(cursor.location.file.name)
f.seek(start.offset)
length = end.offset-start.offset
data = f.read(length)
f.close()
return re.search(r"=\s*0\s*$", data) != None
objectindex = 0
class ObjectType:
def add_field(self, children, array):
for child in children:
if child.kind == cindex.CursorKind.CXX_BASE_SPECIFIER:
self.add_fields(child.get_reference().get_children(), array)
if child.kind == cindex.CursorKind.FIELD_DECL:
array.append(child)
def __init__(self, cursor, children, name):
global objectindex
self.cursor = cursor
self.name = name
self.flags = {"asOBJ_APP_CLASS": True}
fields = []
self.parents = []
self.index = objectindex
objectindex += 1
self.has_pure_virtuals = False
access = cindex.AccessSpecifier.PRIVATE if cursor.kind == cindex.CursorKind.CLASS_DECL else cindex.AccessSpecifier.PUBLIC
idx = access.from_param;
for child in children:
if child.kind == cindex.CursorKind.CXX_BASE_SPECIFIER:
c = child.get_resolved_cursor()
parentname = c.spelling
if parentname in objecttypes:
ot = objecttypes[parentname]
self.parents.extend(ot.parents)
self.parents.append(parentname)
toadd = []
for om in objectmethods:
if om.clazz == parentname:
f = copy.deepcopy(om)
f.clazz = self.name
toadd.append(f)
objectmethods.extend(toadd)
toadd = []
for of in objectfields:
if of.clazz == parentname:
f = copy.deepcopy(of)
f.clazz = self.name
toadd.append(f)
objectfields.extend(toadd)
continue
if child.kind == cindex.CursorKind.CXX_ACCESS_SPEC_DECL:
access = child.access_specifier
continue
if not access == cindex.AccessSpecifier.PUBLIC:
continue
if child.kind == cindex.CursorKind.CXX_METHOD:
if child.spelling == "operator=":
self.flags["asOBJ_APP_CLASS_ASSIGNMENT"] = True
if child.is_static_method():
# TODO
logWarning("Skipping member method %s::%s as it's static" % (self.name, child.spelling))
continue
try:
objectmethods.append(Function(child, self.name))
except Exception as e:
logWarning("Skipping member method %s::%s - %s" % (self.name, child.spelling, e))
if is_pure_virtual(child):
self.has_pure_virtuals = True
elif child.kind == cindex.CursorKind.CONSTRUCTOR:
self.flags["asOBJ_APP_CLASS_CONSTRUCTOR"] = True
try:
f = Function(child, self.name, "asBEHAVE_CONSTRUCT")
behaviours.append(f)
except Exception as e:
logWarning("Skipping constructor %s::%s - %s" % (self.name, child.spelling, e))
elif child.kind == cindex.CursorKind.DESTRUCTOR:
self.flags["asOBJ_APP_CLASS_DESTRUCTOR"] = True
try:
f = Function(child, self.name, "asBEHAVE_DESTRUCT")
behaviours.append(f)
except Exception as e:
logWarning("Skipping destructor %s::%s - %s" % (self.name, child.spelling, e))
elif child.kind == cindex.CursorKind.FIELD_DECL:
try:
type = Type(child.type)
objectfields.append(ObjectField(self.name, child.spelling, type))
except Exception as e:
logWarning("Skipping member field %s::%s - %s" % (self.name, child.spelling, e))
elif child.kind == cindex.CursorKind.TYPEDEF_DECL:
name, kind = get_typedef(child)
if name:
typedef[name] = kind
logWarning("Typedefs within classes are not supported by AngelScript")
else:
logWarning("Unhandled cursor: %s, %s" % (child.displayname, child.kind))
if "asOBJ_APP_CLASS_DESTRUCTOR" not in self.flags:
self.flags["asOBJ_POD"] = True
self.add_field(children, fields)
if len(fields):
try:
child = fields.pop(0)
t = get_real_type(get_type(child.type, child))
allEqual = True
for field in fields:
t2 = get_real_type(get_type(field.type, field))
if t2 != t:
break
if allEqual:
if t == "float":
self.flags["asOBJ_APP_CLASS_ALLFLOATS"] = True
elif t == "int" or t == "unsigned int":
self.flags["asOBJ_APP_CLASS_ALLINTS"] = True
else:
logWarning("%s does not have all fields of equal type. Trying ALLINTS anyway" % (self.name, t))
self.flags["asOBJ_APP_CLASS_ALLINTS"] = True
except:
pass
def get_flags(self):
flags = [] if is_reference_type(self.name) else list(self.flags)
if "ObjectTypes" in config:
for regex in config["ObjectTypes"]:
if regex.search(self.name):
conf = config["ObjectTypes"][regex]
if "Flags" in conf:
flags = conf["Flags"]
if "ExtraFlags" in conf:
flags.extend(conf["ExtraFlags"])
if not is_reference_type(self.name):
if "asOBJ_NOCOUNT" in flags:
flags.remove("asOBJ_NOCOUNT")
return flags
def get_register_string(self):
flags = self.get_flags()
f = "%s%s%s" % ("asOBJ_REF" if is_reference_type(self.name) else "asOBJ_VALUE", "|" if len(flags) else "", "|".join(flags))
if not is_reference_type(self.name):
return _assert("engine->RegisterObjectType(\"%s\", sizeof(%s), %s)" % (self.name, self.name, f))
ret = _assert("engine->RegisterObjectType(\"%s\", 0, %s)" % (self.name, f))
for parent in self.parents:
extra = "_nocount" if "asOBJ_NOCOUNT" in flags else ""
ret += "\n\t" + _assert("engine->RegisterObjectBehaviour(\"%s\", asBEHAVE_REF_CAST, \"%s@ f()\", asFUNCTION((refCast%s<%s,%s>)), asCALL_CDECL_OBJLAST)" % (parent, self.name, extra, parent, self.name))
ret += "\n\t" + _assert("engine->RegisterObjectBehaviour(\"%s\", asBEHAVE_IMPLICIT_REF_CAST, \"%s@ f()\", asFUNCTION((refCast%s<%s,%s>)), asCALL_CDECL_OBJLAST)" % (self.name, parent, extra, self.name, parent))
if not "asOBJ_NOCOUNT" in flags:
f = Function(None)
f.name = "AddRef"
f.clazz = self.name
f.const = False
t = cindex.Type(cindex.TypeKind.VOID.from_param())
f.behaviour = "asBEHAVE_ADDREF"
f.return_type = Type(t)
behaviours.append(f)
f = copy.deepcopy(f)
f.name = "DelRef"
f.behaviour = "asBEHAVE_RELEASE"
behaviours.append(f)
return ret
class ObjectField:
def __init__(self, clazz, name, type):
self.clazz = clazz
self.name = name
self.type = type
pn = self.pretty_name()
if mfer and mfer.search(pn):
raise Exception("Matches exclude pattern")
if mfir and not mfir.search(pn):
raise Exception("Doesn't match include pattern")
def uses(self, typename):
return self.type.resolved == typename
def pretty_name(self):
return "%s %s::%s" % (self.type, self.clazz, self.name)
def get_register_string(self):
return _assert("engine->RegisterObjectProperty(\"%s\", \"%s %s\", asOFFSET(%s,%s))" % (self.clazz, self.type, self.name, self.clazz, self.name))
typedefs = []
enums = []
objecttypes = {}
functions = []
objectmethods = []
objectfields = []
includes = []
behaviours = []
def _assert(line):
if doassert:
return "RegisterVerifyAPI(%s);" % line
else:
return "%s;" % line
def get_typedef(cursor):
#tokens = cindex.tokenize(tu, cursor.extent)
tokens = list(cindex.TokenGroup.get_tokens(tu, cursor.extent))
good = True
if len(tokens) >= 4:
for x in tokens[1:-2]:
if x.kind != cindex.TokenKind.IDENTIFIER and x.kind != cindex.TokenKind.KEYWORD:
good = False
break
else:
good = False
if good:
kind = " ".join([t.spelling for t in tokens[1:len(tokens)-2]])
name = tokens[len(tokens)-2].spelling
else:
data = ""
for token in tokens:
data += token.spelling + " "
return None, data
return name, kind
def add_include(filename):
if not filename in includes and filename.endswith(".h"):
includes.append(filename)
def walk(cursor):
global typedefs
global enums
global objecttypes
global functions
global objectmethods
for child in cursor.get_children():
if not child.location.file:
continue
filename = child.location.file.name
if child.kind == cindex.CursorKind.TYPEDEF_DECL:
name, kind = get_typedef(child)
if name:
typedef[name] = kind
if fer and fer.search(filename):
continue
if fir and not fir.search(filename):
continue
if child.kind == cindex.CursorKind.MACRO_DEFINITION:
tokens = list(cindex.TokenGroup.get_tokens(tu, child.extent))
if tokens[0].kind == cindex.TokenKind.IDENTIFIER and tokens[1].kind == cindex.TokenKind.LITERAL and is_int(tokens[1].spelling):
define = _assert("engine->RegisterEnumValue(\"HASH_DEFINES\", \"%s\", %s)" % (tokens[0].spelling, tokens[1].spelling))
if define not in enums:
enums.append(define)
elif child.kind == cindex.CursorKind.FUNCTION_DECL:
try:
f = Function(child)
if "operator" in f.name:
raise Exception("Non member operator functions not supported currently")
else:
functions.append(f)
add_include(filename)
except Exception as e:
logWarning("Skipping function %s - %s" % (child.spelling, e))
elif child.kind == cindex.CursorKind.TYPEDEF_DECL:
name, kind = get_typedef(child)
if name:
typedef[name] = kind
if get_real_type(kind) not in as_builtins:
logWarning("Typedef %s = %s can't be registered as it doesn't resolve to an AngelScript builtin type" % (name, kind))
else:
typedefs.append(_assert("engine->RegisterTypedef(\"%s\", \"%s\")" % (name, get_real_type(kind))))
else:
logWarning("Typedef too complex, skipping: %s" % name)
elif child.kind == cindex.CursorKind.CLASS_DECL or child.kind == cindex.CursorKind.STRUCT_DECL:
children = list(child.get_children())
if len(children) == 0:
continue
if oer and oer.search(child.spelling):
continue
if oir and not oir.search(child.spelling):
continue
classname = child.spelling
if len(classname) == 0:
classname = child.displayname
if len(classname) == 0:
logWarning("Skipping class or struct defined at %s" % cursor.extent)
continue
if classname in objecttypes:
# TODO: different namespaces
logWarning("Skipping type %s, as it is already defined" % classname)
o = ObjectType(child, children, classname)
objecttypes[classname] = o
add_include(filename)
elif child.kind == cindex.CursorKind.MACRO_INSTANTIATION or \
child.kind == cindex.CursorKind.CONVERSION_FUNCTION or \
child.kind == cindex.CursorKind.INCLUSION_DIRECTIVE or \
child.kind == cindex.CursorKind.UNEXPOSED_DECL:
continue
# TODO: Make sure this is what we want
elif child.kind == cindex.CursorKind.CONSTRUCTOR or \
child.kind == cindex.CursorKind.CXX_METHOD:
continue
else:
logWarning("Unhandled cursor: %s, %s" % (child.displayname, child.kind))
# Removes usage of object types that are used both as a reference and a value type
def mismatch_filter(source, toremove):
toadd =source
ret = []
while len(toadd):
curr = toadd.pop(0)
if curr.uses(toremove):
logWarning("\t%s" % curr.pretty_name())
else:
ret.append(curr)
return ret
def remove_ref_val_mismatches():
global functions
global objectmethods
global behaviours
for key in objecttype_scoreboard:
isref = is_reference_type(key)
ref, val = objecttype_scoreboard[key]
if (isref and val == 0) or (not isref and ref == 0):
continue
logWarning("\"%s\" is used both as a reference type (%d) and a value type (%d). The following will be removed:" % (key, ref, val))
toremove = "%s%s" % (key, "*" if not isref else "")
functions = mismatch_filter(functions, toremove)
objectmethods = mismatch_filter(objectmethods, toremove)
behaviours = mismatch_filter(behaviours, toremove)
def unknown_filter(source):
toadd = source
ret = []
while len(toadd):
keep = True
curr = toadd.pop(0)
broken = None
for t in curr.args:
if not t.is_known():
broken = t.resolved
keep = False
if not curr.return_type.is_known():
broken = curr.return_type.resolved
keep = False
if not keep:
logWarning("Removing %s as it's using an unknown type %s [disable with -ku]" % (curr.pretty_name(), broken))
else:
ret.append(curr)
return ret
def remove_unknowns():
global functions
global objectmethods
global behaviours
functions = unknown_filter(functions)
objectmethods = unknown_filter(objectmethods)
behaviours = unknown_filter(behaviours)
def dup_filter(source):
toadd = source
ret = []
names = []
while len(toadd):
keep = True
curr = toadd.pop(0)
pn = curr.pretty_name()
if pn in names:
logWarning("Removing duplicate function %s" % pn)
else:
ret.append(curr)
names.append(pn)
return ret
def remove_duplicates():
global functions
global objectmethods
global behaviours
functions = dup_filter(functions)
objectmethods = dup_filter(objectmethods)
behaviours = dup_filter(behaviours)
def remove_reference_destructors():
global behaviours
toadd = behaviours
behaviours = []
while len(toadd):
curr = toadd.pop(0)
if is_reference_type(curr.clazz) and curr.behaviour == "asBEHAVE_DESTRUCT":
logWarning("Removing destructor for reference type %s" % curr.clazz)
else:
behaviours.append(curr)
def remove_pure_virtual_constructors():
global behaviours
toadd = behaviours
behaviours = []
while len(toadd):
curr = toadd.pop(0)
virt = False
if curr.clazz in objecttypes:
virt = objecttypes[curr.clazz].has_pure_virtuals
if virt and (curr.behaviour == "asBEHAVE_CONSTRUCT" or curr.behaviour == "asBEHAVE_FACTORY"):
logWarning("Removing constructor for type %s which has pure virtual members" % curr.clazz)
else:
behaviours.append(curr)
walk(tu.cursor)
# File processed, do some post processing
remove_ref_val_mismatches()
if not keep_unknowns:
remove_unknowns()
remove_duplicates()
remove_reference_destructors()
remove_pure_virtual_constructors()
if output_filename != None:
output_filename = output_filename.replace("${this_file_path}", os.path.dirname(os.path.abspath(configfile)))
ot = [objecttypes[o] for o in objecttypes]
ot.sort(cmp=lambda a, b: cmp(a.index, b.index))
for diag in tu.diagnostics:
logWarning("clang had the following to say: %s" % (diag.spelling))
objectTypeStrings = []
for o in ot:
objectTypeStrings.append(o.get_register_string())
typeDefStrings = []
for o in typedefs:
typeDefStrings.append(o.get_register_string())
functionStrings = []
for o in functions:
functionStrings.append(o.get_register_string())
behaviourStrings = []
for o in behaviours:
behaviourStrings.append(o.get_register_string())
objectMethodStrings = []
for o in objectmethods:
objectMethodStrings.append(o.get_register_string())
objectFieldStrings = []
for o in objectfields:
objectFieldStrings.append(o.get_register_string())
tpl = Template(filename='ScriptBind.mako')
rendered = tpl.render(
genericWrappers=generic_wrappers,
funcName=funcname,
includes=includes,
objectTypes=objectTypeStrings,
typeDefs=typeDefStrings,
hashDefines=_assert("engine->RegisterEnum(\"HASH_DEFINES\")"),
enums="",
functions=functionStrings,
behaviours=behaviourStrings,
objectMethods=objectMethodStrings,
objectFields=objectFieldStrings)
with open(output_filename, "w") as f:
f.write(rendered)
sys.stderr.write("Finished with %d warnings\n" % warn_count)
| 35.252592 | 221 | 0.558859 | 19,502 | 0.521402 | 0 | 0 | 0 | 0 | 0 | 0 | 6,529 | 0.174558 |
75c4b53c5b63ac8649c46b0cdcaee35a32ddb87c
| 573 |
py
|
Python
|
www/test.py
|
Lneedy/pyBlobDemo
|
19ff1d9b5478f62bbc7f510bffa81adc7915a73b
|
[
"MIT"
] | null | null | null |
www/test.py
|
Lneedy/pyBlobDemo
|
19ff1d9b5478f62bbc7f510bffa81adc7915a73b
|
[
"MIT"
] | null | null | null |
www/test.py
|
Lneedy/pyBlobDemo
|
19ff1d9b5478f62bbc7f510bffa81adc7915a73b
|
[
"MIT"
] | null | null | null |
'''
测试数据库存储数据demo
>> mysql -u root -p < schema.sql
'''
import orm
from models import User, Blog, Comment
import asyncio
async def test (loop):
await orm.create_pool(loop, user='www-data', password='www-data', db='awesome')
# u = User(id='2', name='Test1', email='[email protected]', passwd='1234567890', image='about:blank')
# await u.save()
u = await User.findAll()
print('sql save success! %s' % u)
if __name__ == '__main__':
loop = asyncio.get_event_loop()
loop.run_until_complete(test(loop))
print('Test finished')
loop.close()
| 23.875 | 105 | 0.65445 | 0 | 0 | 0 | 0 | 0 | 0 | 300 | 0.507614 | 266 | 0.450085 |
75c58beec52cc06cb6843a182d38d84b973164ec
| 1,358 |
py
|
Python
|
serializers_test/avro_avg.py
|
lioritan/Side-Projects
|
647bdbf0d3b71ea113739fb7ad2b299aea28c653
|
[
"MIT"
] | null | null | null |
serializers_test/avro_avg.py
|
lioritan/Side-Projects
|
647bdbf0d3b71ea113739fb7ad2b299aea28c653
|
[
"MIT"
] | null | null | null |
serializers_test/avro_avg.py
|
lioritan/Side-Projects
|
647bdbf0d3b71ea113739fb7ad2b299aea28c653
|
[
"MIT"
] | null | null | null |
import avro.schema
import json
import fastavro
SCHEMA = {
"namespace": "avg_obj",
"type": "record",
"name": "Meme",
"fields": [
{"name": "user", "type": {
"type": "record",
"name": "PostUser",
"fields": [
{"name": "user_id", "type": "string"},
{"name": "first_name", "type": ["null", "string"], "default": "null"},
{"name": "last_name", "type": ["null", "string"], "default": "null"},
{"name": "user_type", "type": ["null",
{"type": "enum",
"name": "UserType",
"symbols": ["FREE", "REGULAR", "PREMIUM"]
}], "default": "null"},
]}},
{"name": "title", "type": ["null", "string"], "default": "null"},
{"name": "content", "type": ["null", "bytes"], "default": "null"},
{"name": "top_string", "type": ["null", "string"], "default": "null"},
{"name": "botom_string", "type": ["null", "string"], "default": "null"},
{"name": "likes", "type": ["null", "long"], "default": 0},
{"name": "hates", "type": ["null", "long"], "default": 0},
]
}
avro_schema = fastavro.parse_schema(SCHEMA)
| 38.8 | 89 | 0.401325 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 634 | 0.466863 |
75c61bdb0e5516f5f220ea06ae4eb78827a719a4
| 210 |
py
|
Python
|
naloga002.py
|
pzi-si/pzi-src-2
|
819069db98873becf8c8ff93bb1e8fb9dca3036c
|
[
"CC0-1.0"
] | null | null | null |
naloga002.py
|
pzi-si/pzi-src-2
|
819069db98873becf8c8ff93bb1e8fb9dca3036c
|
[
"CC0-1.0"
] | null | null | null |
naloga002.py
|
pzi-si/pzi-src-2
|
819069db98873becf8c8ff93bb1e8fb9dca3036c
|
[
"CC0-1.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
""" Program, ki vas vpraša po imenu, nato pa vas pozdravi. """
# povprašamo po imenu
ime = input("Kako ti je ime? ")
# pozdravimo
print(f"Pozdravljen_a, {ime}!")
| 21 | 62 | 0.642857 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 183 | 0.863208 |
75ca90abf615365ec5eda2bc92c9c7ddc159748c
| 3,699 |
py
|
Python
|
cookbook/3 Linear Regression/lin_reg_l1_l2_loss.py
|
keetsky/tensorflow_learn
|
77205434c2e3d70d482a756f5f679622d10f49b2
|
[
"Apache-2.0"
] | null | null | null |
cookbook/3 Linear Regression/lin_reg_l1_l2_loss.py
|
keetsky/tensorflow_learn
|
77205434c2e3d70d482a756f5f679622d10f49b2
|
[
"Apache-2.0"
] | null | null | null |
cookbook/3 Linear Regression/lin_reg_l1_l2_loss.py
|
keetsky/tensorflow_learn
|
77205434c2e3d70d482a756f5f679622d10f49b2
|
[
"Apache-2.0"
] | null | null | null |
'''
# Linear Regression: understanding loss function in linear regression
#----------------------------------
#
# This function shows how to use Tensorflow to
# solve linear regression.
# y = Ax + b
#
# We will use the iris data, specifically:
# y = Sepal Length
# x = Petal Width
'''
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from sklearn import datasets
from tensorflow.python.framework import ops
#%%
#L2 Loss
ops.reset_default_graph()
sess=tf.Session()
# Load the data
# iris.data = [(Sepal Length, Sepal Width, Petal Length, Petal Width)]
iris=datasets.load_iris()
x_vals=np.array([x[3] for x in iris.data])
y_vals=np.array([y[0] for y in iris.data])
# Declare batch size
batch_size = 25
# Initialize placeholders
x_data=tf.placeholder(shape=[None,1],dtype=tf.float32)
y_=tf.placeholder(shape=[None,1], dtype=tf.float32)
#create variable for linear regression
A=tf.Variable(tf.random_normal(shape=[1,1]))
b=tf.Variable(tf.random_normal(shape=[1,1]))
#declare model operations
y=tf.add(tf.matmul(x_data,A),b)
#declare loss functions (1/2/m) (y_-y)^2
loss=tf.reduce_mean(tf.square(y_- y))
#Declare optimizer
op=tf.train.GradientDescentOptimizer(0.4)
train_step=op.minimize(loss)
#initialize variables
init=tf.global_variables_initializer()
sess.run(init)
#training loop
loss_vec_l2=[]
for i in range(100):
rand_index=np.random.choice(len(x_vals),size=batch_size)#随机从len(x_vals)中选取25个下标
rand_x=np.transpose([x_vals[rand_index]])
rand_y=np.transpose([y_vals[rand_index]])
sess.run(train_step,feed_dict={x_data:rand_x,y_:rand_y})
temp_loss=sess.run(loss,feed_dict={x_data:rand_x,y_:rand_y})
loss_vec_l2.append(temp_loss)
if (i+1)%25==0:
print('Step #' + str(i+1) + ' A = ' + str(sess.run(A)) + ' b = ' + str(sess.run(b)))
print('Loss = ' + str(temp_loss))
#%%
#L1 Loss
ops.reset_default_graph()
# Create graph
sess = tf.Session()
# Load the data
# iris.data = [(Sepal Length, Sepal Width, Petal Length, Petal Width)]
iris = datasets.load_iris()
x_vals = np.array([x[3] for x in iris.data])
y_vals = np.array([y[0] for y in iris.data])
# Declare batch size and number of iterations
batch_size = 25
learning_rate = 0.4 # Will not converge with learning rate at 0.4
iterations = 100
# Initialize placeholders
x_data = tf.placeholder(shape=[None, 1], dtype=tf.float32)
y_target = tf.placeholder(shape=[None, 1], dtype=tf.float32)
# Create variables for linear regression
A = tf.Variable(tf.random_normal(shape=[1,1]))
b = tf.Variable(tf.random_normal(shape=[1,1]))
# Declare model operations
model_output = tf.add(tf.matmul(x_data, A), b)
# Declare loss functions
loss_l1 = tf.reduce_mean(tf.abs(y_target - model_output))
# Initialize variables
init = tf.initialize_all_variables()
sess.run(init)
# Declare optimizers
my_opt_l1 = tf.train.GradientDescentOptimizer(learning_rate)
train_step_l1 = my_opt_l1.minimize(loss_l1)
# Training loop
loss_vec_l1 = []
for i in range(iterations):
rand_index = np.random.choice(len(x_vals), size=batch_size)
rand_x = np.transpose([x_vals[rand_index]])
rand_y = np.transpose([y_vals[rand_index]])
sess.run(train_step_l1, feed_dict={x_data: rand_x, y_target: rand_y})
temp_loss_l1 = sess.run(loss_l1, feed_dict={x_data: rand_x, y_target: rand_y})
loss_vec_l1.append(temp_loss_l1)
if (i+1)%25==0:
print('Step #' + str(i+1) + ' A = ' + str(sess.run(A)) + ' b = ' + str(sess.run(b)))
#%%
#plot loss over time(steps)
plt.plot(loss_vec_l1, 'k-', label='L1 Loss')
plt.plot(loss_vec_l2, 'r--', label='L2 Loss')
plt.title('L1 and L2 Loss per Generation')
plt.xlabel('Generation')
plt.ylabel('L1 Loss')
plt.legend(loc='upper right')
plt.show()
| 30.073171 | 92 | 0.711544 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,174 | 0.315846 |
75caae991e7575297539a0a5755bf9b4493ee335
| 3,258 |
py
|
Python
|
pysh.py
|
tri-llionaire/tri-llionaire.github.io
|
5134d3ec0ff1e3b7eab469ea05300b505895212f
|
[
"MIT"
] | 1 |
2018-04-24T14:53:23.000Z
|
2018-04-24T14:53:23.000Z
|
pysh.py
|
tri-llionaire/tri-llionaire.github.io
|
5134d3ec0ff1e3b7eab469ea05300b505895212f
|
[
"MIT"
] | null | null | null |
pysh.py
|
tri-llionaire/tri-llionaire.github.io
|
5134d3ec0ff1e3b7eab469ea05300b505895212f
|
[
"MIT"
] | 1 |
2018-08-25T21:15:07.000Z
|
2018-08-25T21:15:07.000Z
|
#pysh: shell in python
import sys
cmdlist = ['start','exit','cd','md','ls','pd','cf','cl']
convert = []
waiting = 0
print 'pysh 1.0.5 19.03.11 #6. type start to enter, exit to leave.'
paths = ['pysh/']
direct = 'pysh/'
added = []
entered = raw_input(': ')
if entered == 'start':
while entered != ['exit']:
entered = raw_input('{} '.format(direct))
entered = entered.split()
for x in entered:
if x in cmdlist:
if waiting == 0:
if x == 'ls':
for i in paths:
if i.startswith(direct) and len(i) > len(direct):
temp = len(direct)
splitted = i[temp:].split('/')
if len(splitted) > 1 and (splitted[0] + '/') not in added:
print splitted[0] + '/'
added.append(splitted[0] + '/')
elif len(splitted) < 2 and splitted[0] not in added:
print splitted[0]
added.append(splitted[0])
else:
pass
else:
pass
elif x == 'pd':
print direct
elif x == 'cd':
waiting = 1
elif x == 'md':
waiting = 2
elif x == 'cf':
waiting = 3
elif x == 'start':
print 'already in pysh'
elif x == 'cl':
sys.stdout.write('\x1b[2J\x1b[H')
else:
break
else:
print 'pysh: consecutive cmd {}'.format(x)
else:
if waiting == 1:
if x == '..':
direct = direct[:-1].rsplit('/',1)[0] + '/'
else:
if direct + x + '/' in paths:
direct = direct + x + '/'
elif x.endswith('/'):
if direct + x in paths:
direct = direct + x
else:
print 'pysh: directory \'{}\' not found'.format(x)
else:
print 'pysh: can\'t cd to file \'{}\''.format(x)
waiting = 0
elif waiting == 2:
if x.endswith('/'):
paths.append(direct + x)
else:
paths.append(direct + x + '/')
waiting = 0
elif waiting == 3:
if x.endswith('/'):
paths.append(direct + x - '/')
else:
paths.append(direct + x)
waiting = 0
else:
print 'pysh: {} not found.'.format(x)
break
else:
print 'startup: {} not found'.format(entered)
| 40.222222 | 90 | 0.329343 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 398 | 0.122161 |
75cd39985df9ba1fac685c50b84e7d3ed1571cd1
| 3,813 |
py
|
Python
|
Scripts/IDA_SyncDecompiledFuncs.py
|
THEONLYDarkShadow/alive_reversing
|
680d87088023f2d5f2a40c42d6543809281374fb
|
[
"MIT"
] | 208 |
2018-06-06T13:14:03.000Z
|
2022-03-30T02:21:27.000Z
|
Scripts/IDA_SyncDecompiledFuncs.py
|
THEONLYDarkShadow/alive_reversing
|
680d87088023f2d5f2a40c42d6543809281374fb
|
[
"MIT"
] | 537 |
2018-06-06T16:50:45.000Z
|
2022-03-31T16:41:15.000Z
|
Scripts/IDA_SyncDecompiledFuncs.py
|
THEONLYDarkShadow/alive_reversing
|
680d87088023f2d5f2a40c42d6543809281374fb
|
[
"MIT"
] | 42 |
2018-06-06T00:40:08.000Z
|
2022-03-23T08:38:55.000Z
|
from idautils import *
from idaapi import *
from idc import *
import urllib2
def ida_set_function_colour(function_address, colour):
idc.set_color(function_address, CIC_FUNC, colour)
def ida_get_function_colour(function_address):
function = idaapi.get_func(function_address)
if not function:
return 0
return function.color
def ida_func_exists(function_address):
for segment in Segments():
# get all functions
for function_ea in Functions(segment, SegEnd(segment)):
if function_address == function_ea:
return True
return False
def downloadFile(url):
response = urllib2.urlopen(url)
html = response.read()
return html
def toAddressList(html):
lines = html.split('\n')
ret = []
for line in lines:
addr = line.split(" ")[0].strip()
if len(addr) > 0:
ret.append(addr)
return ret
class FunctionData():
def __init__(self):
self.bIsImpl = False
self.bIsStub = False
self.bIsCovered = False
def ColourName(self):
if self.bIsImpl:
return "Decompiled"
elif self.bIsStub and self.bIsCovered:
return "Covered stub"
elif self.bIsStub:
return "Stub"
else:
return "Covered"
def Colour(self):
if self.bIsImpl:
return 0xEEFFF0 #0xB4DED2
elif self.bIsStub and self.bIsCovered:
# Covered and stubbed
return 0xC57AAF
elif self.bIsStub:
# None covered stub
return 0xD2B4DE
else:
# Coverage only case
return 0xA569BD
def LineToInt(line):
line = line.strip();
return long(line)
def EnsureKey(address, dict):
if not dict.has_key(address):
dict[address] = FunctionData()
def AddDecompiled(address, dict):
EnsureKey(address, dict)
dict[address].bIsImpl = True
def AddStubbed(address, dict):
EnsureKey(address, dict)
dict[address].bIsStub = True
def AddCovered(address, dict):
print "Add covered " + asHex(address)
EnsureKey(address, dict)
dict[address].bIsCovered = True
def asHex(value):
return (hex(value).rstrip("L") or "0").upper()
def sync_function_colour(address, functionData):
if (ida_func_exists(address)):
# Everything else is open season
colourToSet = functionData.Colour()
if ida_get_function_colour(address) == colourToSet:
print asHex(address) + " already set to " + functionData.ColourName() + "(" + asHex(colourToSet) + ")"
else:
print "Set " + asHex(address) + " to " + functionData.ColourName() + "(" + asHex(colourToSet) + ")"
ida_set_function_colour(address, colourToSet)
else:
print asHex(address) + " function not found in IDB!"
def main():
functionDataDict = {}
with open('C:\GOG Games\Abes Oddysee\decompiled_functions.txt', 'r') as f:
for line in f:
AddDecompiled(LineToInt(line), functionDataDict)
with open('C:\GOG Games\Abes Oddysee\stubbed_functions.txt', 'r') as f:
for line in f:
AddStubbed(LineToInt(line), functionDataDict)
#funcsWithCoverage = toAddressList(downloadFile("https://gist.githubusercontent.com/paulsapps/ea894a929f02c7bb7c931af12ad08151/raw/38cf5fcd0f8ba6b27a2a08043f81be7f8b34b4e4/gistfile1.txt"))
#for func in funcsWithCoverage:
# print "func is " + func
# AddCovered(int(func, 16), functionDataDict)
for address in functionDataDict.iterkeys():
data = functionDataDict[address]
sync_function_colour(address, data)
if __name__ == '__main__':
main()
| 30.504 | 193 | 0.620771 | 780 | 0.204563 | 0 | 0 | 0 | 0 | 0 | 0 | 681 | 0.1786 |
75cdec8d921818ac60703e7cb57923284eb229e2
| 2,499 |
py
|
Python
|
alipay/aop/api/domain/AlipayCommerceEducateTuitioncodeMonitorCreateModel.py
|
antopen/alipay-sdk-python-all
|
8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c
|
[
"Apache-2.0"
] | null | null | null |
alipay/aop/api/domain/AlipayCommerceEducateTuitioncodeMonitorCreateModel.py
|
antopen/alipay-sdk-python-all
|
8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c
|
[
"Apache-2.0"
] | null | null | null |
alipay/aop/api/domain/AlipayCommerceEducateTuitioncodeMonitorCreateModel.py
|
antopen/alipay-sdk-python-all
|
8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AlipayCommerceEducateTuitioncodeMonitorCreateModel(object):
def __init__(self):
self._bank_type = None
self._login_account = None
self._out_apply_id = None
self._parent_no = None
@property
def bank_type(self):
return self._bank_type
@bank_type.setter
def bank_type(self, value):
self._bank_type = value
@property
def login_account(self):
return self._login_account
@login_account.setter
def login_account(self, value):
self._login_account = value
@property
def out_apply_id(self):
return self._out_apply_id
@out_apply_id.setter
def out_apply_id(self, value):
self._out_apply_id = value
@property
def parent_no(self):
return self._parent_no
@parent_no.setter
def parent_no(self, value):
self._parent_no = value
def to_alipay_dict(self):
params = dict()
if self.bank_type:
if hasattr(self.bank_type, 'to_alipay_dict'):
params['bank_type'] = self.bank_type.to_alipay_dict()
else:
params['bank_type'] = self.bank_type
if self.login_account:
if hasattr(self.login_account, 'to_alipay_dict'):
params['login_account'] = self.login_account.to_alipay_dict()
else:
params['login_account'] = self.login_account
if self.out_apply_id:
if hasattr(self.out_apply_id, 'to_alipay_dict'):
params['out_apply_id'] = self.out_apply_id.to_alipay_dict()
else:
params['out_apply_id'] = self.out_apply_id
if self.parent_no:
if hasattr(self.parent_no, 'to_alipay_dict'):
params['parent_no'] = self.parent_no.to_alipay_dict()
else:
params['parent_no'] = self.parent_no
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayCommerceEducateTuitioncodeMonitorCreateModel()
if 'bank_type' in d:
o.bank_type = d['bank_type']
if 'login_account' in d:
o.login_account = d['login_account']
if 'out_apply_id' in d:
o.out_apply_id = d['out_apply_id']
if 'parent_no' in d:
o.parent_no = d['parent_no']
return o
| 29.05814 | 77 | 0.605442 | 2,382 | 0.953181 | 0 | 0 | 1,086 | 0.434574 | 0 | 0 | 312 | 0.12485 |
75d0eb05faa1f187e229cf597a3a8352882ca242
| 2,888 |
py
|
Python
|
tests/tests_tabu.py
|
Antash696/VRP
|
386b84adbe34be37aabc1e638515ce722849a952
|
[
"MIT"
] | 33 |
2017-10-18T01:18:27.000Z
|
2021-10-04T14:17:52.000Z
|
tests/tests_tabu.py
|
dj-boy/VRP
|
386b84adbe34be37aabc1e638515ce722849a952
|
[
"MIT"
] | 1 |
2020-12-21T01:59:21.000Z
|
2020-12-21T01:59:21.000Z
|
tests/tests_tabu.py
|
dj-boy/VRP
|
386b84adbe34be37aabc1e638515ce722849a952
|
[
"MIT"
] | 19 |
2017-06-26T15:02:00.000Z
|
2022-03-31T08:44:20.000Z
|
import unittest
from code import instance as i
from code import datamapping as dm
from code import greedyfirst as gf
from code import algorithm as a
from code import baseobjects as bo
from code import tabu
class TestTabuSpecific(unittest.TestCase):
def setUp(self):
raw_data = dm.Importer()
# raw_data.import_data("./tests/cvrp2.test")
# raw_data.import_data("./tests/ulysses-n16-k3.vrp")
raw_data.import_data("./tests/E-n23-k3.vrp")
# raw_data.import_data("./tests/cvrp3.test")
# raw_data.import_data("./tests/P-n19-k2.vrp")
#raw_data.import_data("./tests/E-n101-k14.vrp")
data = dm.DataMapper(raw_data)
self.instance = i.ProblemInstance(data)
self.solution = a.Solution(self.instance)
greedy = gf.GreedyFirst(self.solution.solution)
greedy.run(sort=True)
self.solution.value = self.solution.eval()
self.tabu_search = tabu.TabuSearch(self.solution, 100)
def test_deep_copy(self):
self.assertEqual(self.tabu_search.instance.solution.fleet[0].route[0].id, self.tabu_search.best_instance.solution.fleet[0].route[0].id)
self.tabu_search.instance.solution.fleet[0].route[0].id = 666
self.assertNotEqual(self.tabu_search.instance.solution.fleet[0].route[0].id, self.tabu_search.best_instance.solution.fleet[0].route[0].id)
def test_get_sorted_edges(self):
edges = self.tabu_search.get_sorted_edges(self.tabu_search.instance.solution.fleet[0])
self.assertTrue(self.tabu_search.instance.distance_between(edges[0][0], edges[0][1]) <
self.tabu_search.instance.distance_between(edges[-1][0], edges[-1][1]))
def test_best_neighbours(self):
neighbours = self.tabu_search.best_neighbours(2)
self.assertTrue(neighbours[0][1]>neighbours[-1][1])
class TestTabuGeneral(unittest.TestCase):
def setUp(self):
raw_data = dm.Importer()
# raw_data.import_data("./tests/cvrp2.test")
# raw_data.import_data("./tests/ulysses-n16-k3.vrp")
# raw_data.import_data("./tests/E-n23-k3.vrp")
# raw_data.import_data("./tests/cvrp3.test")
# raw_data.import_data("./tests/P-n19-k2.vrp")
raw_data.import_data("./tests/E-n101-k14.vrp")
data = dm.DataMapper(raw_data)
self.instance = i.ProblemInstance(data)
self.solution = a.Solution(self.instance)
greedy = gf.GreedyFirst(self.solution.solution)
greedy.run(sort=False)
self.solution.value = self.solution.eval()
self.tabu_search = tabu.TabuSearch(self.solution, 100)
# def test_general(self):
# print("value before: " + str(self.tabu_search.best_instance.eval()))
# self.tabu_search.run()
# print("value after: " + str(self.tabu_search.best_instance.eval()))
if __name__ == "__main__":
unittest.main()
| 39.027027 | 146 | 0.674169 | 2,625 | 0.908934 | 0 | 0 | 0 | 0 | 0 | 0 | 721 | 0.249654 |
75d24f00bd3d394aa053d2de0806888649ac3eca
| 381 |
py
|
Python
|
hedge_hog/metric/__init__.py
|
otivedani/hedge_hog
|
62026e63b6bdc72cc4f0c984136712e6ee090f68
|
[
"MIT"
] | null | null | null |
hedge_hog/metric/__init__.py
|
otivedani/hedge_hog
|
62026e63b6bdc72cc4f0c984136712e6ee090f68
|
[
"MIT"
] | null | null | null |
hedge_hog/metric/__init__.py
|
otivedani/hedge_hog
|
62026e63b6bdc72cc4f0c984136712e6ee090f68
|
[
"MIT"
] | null | null | null |
"""
examples on scikit-image :
call :
from skimage.feature import blob_dog, blob_log, blob_doh
structure :
skimage
feature
__init__.py (from .blob import blob_dog, blob_log, blob_doh)
blob.py (contains blob_dog, blob_log, blob_doh)
conclusion :
module imported because it was defined in module dir
"""
from .timemeter import timemeter
| 20.052632 | 69 | 0.692913 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 343 | 0.900262 |
75d35dd0f6991525937dfc89b52855e73f47aaa9
| 1,941 |
py
|
Python
|
Chapter07/python/com/sparksamples/gradientboostedtrees/GradientBoostedTreesUtil.py
|
quguiliang/Machine-Learning-with-Spark-Second-Edition
|
0ba131e6c15a3de97609c6cb5d976806ccc14f09
|
[
"MIT"
] | 112 |
2017-05-13T15:44:29.000Z
|
2022-02-19T20:14:14.000Z
|
Chapter07/python/com/sparksamples/gradientboostedtrees/GradientBoostedTreesUtil.py
|
tophua/Machine-Learning-with-Spark-Second-Edition
|
0d93e992f6c79d55ad5cdcab735dbe6674143974
|
[
"MIT"
] | 1 |
2017-05-25T00:10:43.000Z
|
2017-05-25T00:10:43.000Z
|
Chapter07/python/com/sparksamples/gradientboostedtrees/GradientBoostedTreesUtil.py
|
tophua/Machine-Learning-with-Spark-Second-Edition
|
0d93e992f6c79d55ad5cdcab735dbe6674143974
|
[
"MIT"
] | 115 |
2017-05-06T10:49:00.000Z
|
2022-03-08T07:48:54.000Z
|
import numpy as np
from com.sparksamples.util import get_records
from com.sparksamples.util import get_mapping
from com.sparksamples.util import extract_features
from com.sparksamples.util import extract_label
from com.sparksamples.util import extract_features_dt
#from pyspark.mllib.tree import DecisionTree
from pyspark.mllib.tree import GradientBoostedTrees
from pyspark.mllib.regression import LabeledPoint
from com.sparksamples.util import squared_log_error
__author__ = 'Rajdeep Dua'
def evaluate_gbt(train, test, numItr, lrRate, mxDepth, mxBins):
# def trainRegressor(cls, data, categoricalFeaturesInfo,
# loss="leastSquaresError", numIterations=100, learningRate=0.1, maxDepth=3,
# maxBins=32):
gbt_model = GradientBoostedTrees.trainRegressor(train,categoricalFeaturesInfo={}, numIterations=numItr,
maxDepth=mxDepth, maxBins=mxBins, learningRate=lrRate)
predictions = gbt_model.predict(test.map(lambda x: x.features))
tp = test.map(lambda lp: lp.label).zip(predictions)
rms_le = np.sqrt(tp.map(lambda (t, p): squared_log_error(t, p)).mean())
return rms_le
def get_train_test_data():
records = get_records()
records.cache()
# extract all the catgorical mappings
mappings = [get_mapping(records, i) for i in range(2,10)]
cat_len = sum(map(len, mappings))
num_len = len(records.first()[11:15])
data = records.map(lambda r: LabeledPoint(extract_label(r), extract_features(r, cat_len, mappings)))
#data_dt = records.map(lambda r: LabeledPoint(extract_label(r), extract_features_dt(r)))
data_with_idx = data.zipWithIndex().map(lambda (k, v): (v, k))
test = data_with_idx.sample(False, 0.2, 42)
train = data_with_idx.subtractByKey(test)
train_data = train.map(lambda (idx, p): p)
test_data = test.map(lambda (idx, p) : p)
return train_data, test_data
| 41.297872 | 107 | 0.714065 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 364 | 0.187532 |
75d4809609a0cd8b60448ab7ac5fccbe7bba640b
| 5,010 |
py
|
Python
|
maze.py
|
vcxsd/muck-builder
|
12c1defbb816395a119da1992c1352d614d5507b
|
[
"MIT"
] | null | null | null |
maze.py
|
vcxsd/muck-builder
|
12c1defbb816395a119da1992c1352d614d5507b
|
[
"MIT"
] | null | null | null |
maze.py
|
vcxsd/muck-builder
|
12c1defbb816395a119da1992c1352d614d5507b
|
[
"MIT"
] | null | null | null |
import random
import yaml
class Grammar:
""" A simpler version of Tracery's ideas. """
def __init__( self, rules = None ):
self.rules = rules or { }
# To be pop()'d off by the caller.
self.saved = [ ]
def parse( self, string ):
if "[" in string or "]" in string:
fragments = [ ]
buffer = ''
brackets = False
for char in string:
if char == '[':
fragments += [ buffer ]
buffer = ''
if brackets:
raise Exception( "Grammar.parse: can't nest brackets" )
brackets = True
elif char == ']':
if not brackets:
raise Exception( "Grammar.parse: unmatched bracket" )
brackets = False
# Mechanism for saving what result we got: put a ! somewhere in the [ ]-surrounded text.
if buffer.replace( "!", "" ) in self.rules:
fragments += [ self.parse( random.choice( self.rules[buffer.replace( "!", "" )] ) ) ]
if "!" in buffer:
self.saved += [ fragments[-1] ]
buffer = ''
else:
raise Exception( "Grammar.parse: no such rule '" + buffer + "'." )
else:
buffer += char
if buffer != '':
fragments += [ buffer ]
return "".join( fragments )
else:
return string
def rule( self, rule, new = None ):
if new:
self.rules[rule] = new
else:
if rule in self.rules:
return self.rules[rule]
else:
return None
wallMaker = Grammar({
'wallMat': [ 'stone', 'rock', 'wood', 'paper', 'earth', 'crystal', 'leafy vagueness', 'sand', 'skin', 'bark', 'foliage', 'needles', 'delicate tiles', 'agate', 'quartz', 'glass', 'iron', 'copper' ],
'wallCond': [ 'dark', 'heavy', 'slick', 'moss-clung', 'twisted', 'fluted', 'greenish', 'dark', 'hot', 'lumpy', 'unsteady', 'slippery', 'geometrically flanged', 'sigil-eaten', 'consuming', 'blue', 'reddish', 'translucent', 'ultramarine', 'sky-blue', 'delicate pink', 'fuligin' ],
'walls': [ 'walls of [wallMat] close in; the way is [width].',
'[wallCond] walls of [wallMat] close in.',
'the walls are [wallCond] [wallMat]... the tunnels, [width].',
'all around, [wallCond] [wallMat].',
'all around, [wallMat].',
'there\'s [wallMat] everywhere here.',
'there\'s [wallMat] everywhere here. it\'s [wallCond].',
'[wallCond] [wallMat] all around.',
'the walls are made of [wallMat] here.',
'this place is built entirely of [wallMat].',
'it\'s very [wallCond] here.',
'[width], [wallCond].',
'[wallMat].',
'[wallCond].'],
'width': [ 'suffocatingly close', 'echoing', 'massive', 'wide', 'barely large enough to pass crawling', 'thin and straight', 'tall and narrow', 'tiny', 'spacious', 'vast' ],
'door': [ 'door', 'hatch', 'gate', 'opening', 'incision', 'grating', 'well', 'oubliette', 'tunnel', 'arch' ],
'doorMat': [ 'rock', 'oaken', 'papery', 'crystal', 'glass', 'iron', 'silver' ],
'hidden': [ 'half-hidden', 'in plain view', 'almost impossible to spot', 'staring you in the face', 'which can only be found by touch' ]
})
if __name__ == '__main__':
linkNames = [ "[N]orth;north;n", "[S]outh;south;s", "[E]ast;east;e", "[W]est;west;w", "[U]p;up;u" ]
project = { "projectName": "maze", "rooms": { } }
roomCount = 25
for i in range(0, roomCount):
desc = wallMaker.parse("[walls]\n\na [doorMat] [!door], [hidden].")
door = wallMaker.saved.pop( )
ID = "room-" + i.__str__()
project["rooms"][ ID ] = { "NAME": "Maze" }
project["rooms"][ ID ][ "LINKS" ] = { }
project["rooms"][ ID ][ "_/de" ] = desc
project["rooms"][ ID ][ "POSTSCRIPT" ] = { "BUILD": [ "@set here=D", "@tel here=#63" ] }
# Each room shall have 2-3 links to other random rooms. Don't try to be consistent.
ln = linkNames.copy( )
random.shuffle(ln)
for i in range( 0, random.choice([ 2, 3, 3, 3, 3, 4, 4, 4 ]) ):
project["rooms"][ ID ][ "LINKS" ][ "room-" + random.choice( range(0, roomCount) ).__str__() ] = {
"NAME": ln.pop( ),
"succ": "You force your way through the " + door + ".",
"osucc": "forces their way through the " + door + ".",
"odrop": "emerges through an obscure way from some other part of the maze." }
with open("maze.gen.yaml", "w") as fh:
fh.write( yaml.dump( project ) )
print( "write: maze.gen.yaml (probably.)" )
| 35.531915 | 282 | 0.48982 | 1,831 | 0.365469 | 0 | 0 | 0 | 0 | 0 | 0 | 2,188 | 0.436727 |
75d6d6a2674761306d18d16bf5fb2a0d2ba911d3
| 543 |
py
|
Python
|
kratos/apps/trigger/views.py
|
cipher-ops/backend-kts
|
7ea54d7f56bcb0da54b901ac8f3cfbfbb0b12319
|
[
"MIT"
] | 1 |
2020-11-30T09:53:40.000Z
|
2020-11-30T09:53:40.000Z
|
kratos/apps/trigger/views.py
|
cipher-ops/backend-kts
|
7ea54d7f56bcb0da54b901ac8f3cfbfbb0b12319
|
[
"MIT"
] | null | null | null |
kratos/apps/trigger/views.py
|
cipher-ops/backend-kts
|
7ea54d7f56bcb0da54b901ac8f3cfbfbb0b12319
|
[
"MIT"
] | null | null | null |
from rest_framework import viewsets
from kratos.apps.trigger import models, serializers
class TriggerViewSet(viewsets.GenericViewSet):
'''
Trigger信息
'''
serializer_class = serializers.TriggerRecordSerializer
queryset = models.TriggerRecord.objects.all()
def list(self, request):
'''
Trigger调用记录列表
'''
records = self.paginator.paginate_queryset(self.get_queryset(), self.request, view=self)
serializer = self.get_serializer(records, many=True)
return self.paginator.get_paginated_response(serializer.data)
| 30.166667 | 92 | 0.762431 | 469 | 0.838998 | 0 | 0 | 0 | 0 | 0 | 0 | 66 | 0.118068 |
75d7637d4de985450afeaf8267ea59deab8e6e61
| 478 |
py
|
Python
|
Module_04/ex00/test.py
|
CristinaFdezBornay/PythonPiscine
|
143968c2e26f5ddddb5114f3bcdddd0b1f00d153
|
[
"MIT"
] | 1 |
2021-11-17T10:04:30.000Z
|
2021-11-17T10:04:30.000Z
|
Module_04/ex00/test.py
|
CristinaFdezBornay/PythonPiscine
|
143968c2e26f5ddddb5114f3bcdddd0b1f00d153
|
[
"MIT"
] | null | null | null |
Module_04/ex00/test.py
|
CristinaFdezBornay/PythonPiscine
|
143968c2e26f5ddddb5114f3bcdddd0b1f00d153
|
[
"MIT"
] | null | null | null |
from FileLoader import FileLoader
tests = [
"non_existing_file.csv",
"empty_file.csv",
"../data/athlete_events.csv",
]
if __name__=="__main__":
for test in tests:
print(f"==> TESTING {test}")
fl = FileLoader()
print(f"\n=> Loading file")
df = fl.load(test)
print(f"\n=> Display first 3 rows")
fl.display(df, 3)
print(f"\n=> Display lasts 3 rows")
fl.display(df, -3)
input("====>\n\n")
| 20.782609 | 43 | 0.541841 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 185 | 0.387029 |
75d9805219c61b5aa264d0f163f779ea93a814b4
| 492 |
py
|
Python
|
lib/watchlists.py
|
nickcamel/IgApi
|
19717cb8f3aea88adf060d8dad4762f8cd81e584
|
[
"MIT"
] | 1 |
2021-10-02T00:30:17.000Z
|
2021-10-02T00:30:17.000Z
|
lib/watchlists.py
|
nickcamel/IgApi
|
19717cb8f3aea88adf060d8dad4762f8cd81e584
|
[
"MIT"
] | null | null | null |
lib/watchlists.py
|
nickcamel/IgApi
|
19717cb8f3aea88adf060d8dad4762f8cd81e584
|
[
"MIT"
] | null | null | null |
# REF: https://labs.ig.com/rest-trading-api-reference
class Watchlists:
"""
DO NOT CHANGE
Adding is ok ... and encouraged ;)
"""
base = {
'path': 'watchlists',
'GET': {
'version': '1',
'tokens': True,
}
# Not supported yet: 'POST'
}
id = {
'path': 'watchlists/',
'GET': {
'version': '1',
'tokens': True,
}
# Not supported yet: 'PUT', 'DELETE'
}
| 18.923077 | 53 | 0.422764 | 435 | 0.884146 | 0 | 0 | 0 | 0 | 0 | 0 | 271 | 0.550813 |
75d9f90c2f975ea4a2ae0b3fd9a26571c68ea1e6
| 21,236 |
py
|
Python
|
MBC_ER_status/Ulz_pipeline/downloads/run_tf_analyses_from_bam.py
|
adoebley/Griffin_analyses
|
94a8246b45c3ebbf255cffaa60b97e7e05d5de78
|
[
"BSD-3-Clause-Clear"
] | 6 |
2021-10-05T10:32:32.000Z
|
2022-03-03T15:38:38.000Z
|
MBC_ER_status/Ulz_pipeline/downloads/run_tf_analyses_from_bam.py
|
adoebley/Griffin_analyses
|
94a8246b45c3ebbf255cffaa60b97e7e05d5de78
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
MBC_ER_status/Ulz_pipeline/downloads/run_tf_analyses_from_bam.py
|
adoebley/Griffin_analyses
|
94a8246b45c3ebbf255cffaa60b97e7e05d5de78
|
[
"BSD-3-Clause-Clear"
] | 1 |
2021-11-03T07:19:16.000Z
|
2021-11-03T07:19:16.000Z
|
#!/usr/bin/env python
# coding: utf-8
#AL - the above code is new for the griffin paper version
#modified print commands for python3
# Analyze all possible things from BAM-file
import sys
import argparse
from subprocess import call
import numpy
import scipy
import scipy.stats
import os.path
import os
import glob
# Parse command line arguments ###################################################################################
parser = argparse.ArgumentParser(description='Analyze epigenetic traces in cfDNA')
parser.add_argument('-b','--bam', dest='bam_file',
help='BAM file',required=True)
parser.add_argument('-o','--output', dest='name',
help='Output name for files and directory',required=True)
parser.add_argument('-cov','--mean-coverage', dest='mean_coverage',
help='Mean coverage along the genome [default:1]',default=1,type=float)
parser.add_argument('-ylimit','--plot-y-limit', dest='ylimit',
help='Plotting until this limit on y-axis [default:1.5]',default=1.5,type=float)
parser.add_argument('-norm-file','--normalize-file', dest='norm_log2',
help='Normalize by local copynumber from this file')
parser.add_argument('-calccov','--calculate-mean-coverage', dest='calc_cov',
help='Specify whether genome read depths should be calculated',action="store_true")
parser.add_argument('-hg38','--hg38', dest='hg38',
help='Use hg38 coordinates [default: hg19]',action="store_true")
parser.add_argument('-a','--analysis', dest='analysis',
help='Specify type of analysis (all|enhancer|histone|tf|ctcf|...)',required=True)
parser.add_argument('-tf','--trans-factor', dest='tf',
help='Specify transcription factor for VirChip data')
args = parser.parse_args()
####################################################################################################
# setup structure
print ("Setup structure") # AL mod
if not os.path.isdir(args.name):
os.mkdir(args.name)
####################################################################################################
# get genomewide coverage from bedtools genomecoverage
if args.calc_cov:
#AL added if/else
if os.path.isfile(args.name.rstrip("/")+"/"+args.name+".coverage"):
print('cov already complete')
else:
#AL tabbed over this section to add it to the if/else statement but did not change it
print ("Calc avg. coverage") # AL mod
OUTPUT=open(args.name.rstrip("/")+"/"+args.name+".coverage","w")
if args.hg38:
call(["./Software/bedtools","genomecov","-ibam",args.bam_file,"-g","./Ref/hg38.chrom_sizes.txt"],stdout=OUTPUT)
else:
call(["./Software/bedtools","genomecov","-ibam",args.bam_file,"-g","./Ref/hg19.chrom_sizes.txt"],stdout=OUTPUT)
OUTPUT.close()
OUTPUT=open(args.name.rstrip("/")+"/"+args.name+".short_coverage","w")
call(["./Scripts/get_avg_coverage.py",args.name.rstrip("/")+"/"+args.name+".coverage"],stdout=OUTPUT)
OUTPUT.close()
#end AL edits
INPUT = open(args.name.rstrip("/")+"/"+args.name+".short_coverage","r")
avg_coverage = 1
for line in INPUT.readlines():
chrom,cov = line.rstrip().split("\t")
if chrom == "genome":
avg_coverage = cov
INPUT.close()
else:
print ("Skipping genomewide-coverage calculation using mean coverage: "+str(args.mean_coverage)) # AL mod
avg_coverage = args.mean_coverage
####################################################################################################
# print statistics:
print ("Write Logs") # AL mod
OUT=open(args.name.rstrip("/")+"/log.txt","w")
OUT.write("BAM:\t"+args.bam_file+"\n")
OUT.write("Norm File:\t"+args.norm_log2+"\n")
OUT.write("cov:\t"+str(avg_coverage)+"\n")
OUT.write("analysis:\t"+args.analysis+"\n")
OUT.close()
####################################################################################################
# get chromosome coverage from output of bedtools genomecoverage
def getChromCoverage(chromosome,args):
print (args.name.rstrip("/")+"/"+args.name+".short_coverage") # AL mod
if not os.path.isfile(args.name.rstrip("/")+"/"+args.name+".short_coverage"):
print ("Coverage file not found") # AL mod
sys.exit(1)
INPUT = open(args.name.rstrip("/")+"/"+args.name+".short_coverage","r")
avg_coverage = 1
found = False
for line in INPUT.readlines():
chrom,cov = line.rstrip().split("\t")
if chrom == chromosome:
avg_coverage = cov
found = True
INPUT.close()
if found:
return avg_coverage
else:
print ("Chromosome not found") # AL mod
sys.exit(1)
####################################################################################################
# CTCF analysis
def ctcf(args,avg_coverage):
print ("Analyze CTCF sites") # AL mod
if not os.path.isdir(args.name.rstrip("/")+"/CTCF"):
os.mkdir(args.name.rstrip("/")+"/CTCF")
#OUTPUT = open(args.name.rstrip("/")+"/CTCF"+"/CTCF_In_Insulated_Neighbourhoods.tss","w")
#call(["./Scripts/analyze_coverage_around_position_log2_pysam.py","-bed","./Ref/CTCF/FIMO_ChIP_CTCF_at_Insulated_Neighbourhoods.bed","-cov",str(avg_coverage),"-norm","-norm-file",args.norm_log2,"-b",args.bam_file,"-s","1000","-e","1000"],stdout=OUTPUT)
#OUTPUT.close()
#OUTPUT = open(args.name.rstrip("/")+"/CTCF"+"/CTCF_Outside_Insulated_Neighbourhoods.tss","w")
#call(["./Scripts/analyze_coverage_around_position_log2_pysam.py","-bed","./Ref/CTCF/FIMO_ChIP_CTCF_outside_Insulated_Neighbourhoods.bed","-cov",str(avg_coverage),"-norm","-norm-file",args.norm_log2,"-b",args.bam_file,"-s","1000","-e","1000"],stdout=OUTPUT)
#OUTPUT.close()
OUTPUT = open(args.name.rstrip("/")+"/CTCF"+"/CTCF_GTRD_In_Insulated_Neighbourhoods.tss","w")
call(["./Scripts/analyze_coverage_around_position_log2_pysam.py","-bed","./Ref/CTCF/CTCF.GTRD.Insulated.bed","-cov",str(avg_coverage),"-norm","-norm-file",args.norm_log2,"-b",args.bam_file,"-s","1000","-e","1000","-m","100000"],stdout=OUTPUT)
OUTPUT.close()
OUTPUT = open(args.name.rstrip("/")+"/CTCF"+"/CTCF_GTRD_Outside_Insulated_Neighbourhoods.tss","w")
call(["./Scripts/analyze_coverage_around_position_log2_pysam.py","-bed","./Ref/CTCF/CTCF.GTRD.NonInsulated.bed","-cov",str(avg_coverage),"-norm","-norm-file",args.norm_log2,"-b",args.bam_file,"-s","1000","-e","1000","-m","100000"],stdout=OUTPUT)
OUTPUT.close()
OUTPUT = open(args.name.rstrip("/")+"/CTCF"+"/CTCF_GTRD_50perc_In_Insulated_Neighbourhoods.tss","w")
call(["./Scripts/analyze_coverage_around_position_log2_pysam.py","-bed","./Ref/CTCF/CTCF.GTRD.50perc.Insulated.bed","-cov",str(avg_coverage),"-norm","-norm-file",args.norm_log2,"-b",args.bam_file,"-s","1000","-e","1000","-m","100000"],stdout=OUTPUT)
OUTPUT.close()
OUTPUT = open(args.name.rstrip("/")+"/CTCF"+"/CTCF_GTRD_50perc_Outside_Insulated_Neighbourhoods.tss","w")
call(["./Scripts/analyze_coverage_around_position_log2_pysam.py","-bed","./Ref/CTCF/CTCF.GTRD.50perc.NonInsulated.bed","-cov",str(avg_coverage),"-norm","-norm-file",args.norm_log2,"-b",args.bam_file,"-s","1000","-e","1000","-m","100000"],stdout=OUTPUT)
OUTPUT.close()
OUTPUT = open(args.name.rstrip("/")+"/CTCF"+"/CTCF_ultraconserved.tss","w")
call(["./Scripts/analyze_coverage_around_position_log2_pysam.py","-bed","./Ref/CTCF/Ultraconserved_CTCF.bed","-cov",str(avg_coverage),"-norm","-norm-file",args.norm_log2,"-b",args.bam_file,"-s","1000","-e","1000","-m","100000"],stdout=OUTPUT)
OUTPUT.close()
OUTPUT = open(args.name.rstrip("/")+"/CTCF"+"/CTCF_GTRD_proximalTSS.tss","w")
call(["./Scripts/analyze_coverage_around_position_log2_pysam.py","-bed","./Ref/CTCF/CTCF.hg19.sorted.bed.proximal.bed","-cov",str(avg_coverage),"-norm","-norm-file",args.norm_log2,"-b",args.bam_file,"-s","1000","-e","1000","-m","100000"],stdout=OUTPUT)
OUTPUT.close()
OUTPUT = open(args.name.rstrip("/")+"/CTCF"+"/CTCF_GTRD_distalTSS.tss","w")
call(["./Scripts/analyze_coverage_around_position_log2_pysam.py","-bed","./Ref/CTCF/CTCF.hg19.sorted.bed.distal.bed","-cov",str(avg_coverage),"-norm","-norm-file",args.norm_log2,"-b",args.bam_file,"-s","1000","-e","1000","-m","100000"],stdout=OUTPUT)
OUTPUT.close()
OUTPUT = open(args.name.rstrip("/")+"/CTCF"+"/CTCF_GTRD_50perc_proximalTSS.tss","w")
call(["./Scripts/analyze_coverage_around_position_log2_pysam.py","-bed","./Ref/CTCF/CTCF.50perc_hg19.sorted.bed.proximal.bed","-cov",str(avg_coverage),"-norm","-norm-file",args.norm_log2,"-b",args.bam_file,"-s","1000","-e","1000","-m","100000"],stdout=OUTPUT)
OUTPUT.close()
OUTPUT = open(args.name.rstrip("/")+"/CTCF"+"/CTCF_GTRD_50perc_distalTSS.tss","w")
call(["./Scripts/analyze_coverage_around_position_log2_pysam.py","-bed","./Ref/CTCF/CTCF.50perc_hg19.sorted.bed.distal.bed","-cov",str(avg_coverage),"-norm","-norm-file",args.norm_log2,"-b",args.bam_file,"-s","1000","-e","1000","-m","100000"],stdout=OUTPUT)
OUTPUT.close()
#call(["Rscript","./Scripts/plot_MotifCoverage.R",args.name.rstrip("/")+"/CTCF"+"/CTCF_In_Insulated_Neighbourhoods.tss",args.name.rstrip("/")+"/CTCF"+"/CTCF_In_Insulated_Neighbourhoods.png","TADs","0",str(args.ylimit)])
#call(["Rscript","./Scripts/plot_MotifCoverage.R",args.name.rstrip("/")+"/CTCF"+"/CTCF_Outside_Insulated_Neighbourhoods.tss",args.name.rstrip("/")+"/CTCF"+"/CTCF_Outside_Insulated_Neighbourhoods.png",
# "NonTADs","0",str(args.ylimit)])
#call(["Rscript","./Scripts/plot_MotifCoverage_2sample.R",args.name.rstrip("/")+"/CTCF"+"/CTCF_In_Insulated_Neighbourhoods.tss",args.name.rstrip("/")+"/CTCF"+"/CTCF_Outside_Insulated_Neighbourhoods.tss",
# args.name.rstrip("/")+"/CTCF"+"/CTCF_TADs.png","CTCF sites in TAD boundaries","CTCF sites outside TAD boundaries","0",str(args.ylimit)])
#########################################################################
def tf_gtrd_1000sites(args,avg_coverage):
print("Analyze Transcription factors GTRD")
if not os.path.isdir(args.name.rstrip("/")+"/TranscriptionFactors"):
os.mkdir(args.name.rstrip("/")+"/TranscriptionFactors")
if not os.path.isdir(args.name.rstrip("/")+"/TranscriptionFactors/GTRD_ChIP_Only_1000sites"):
os.mkdir(args.name.rstrip("/")+"/TranscriptionFactors/GTRD_ChIP_Only_1000sites")
target_list = glob.glob("./Ref/GTRD_1000sites/*.bed")
for tf in target_list:
tf_name = os.path.basename(tf[:-4])
if os.path.isfile(args.name.rstrip("/")+"/TranscriptionFactors/GTRD_ChIP_Only_1000sites"+"/"+tf_name+".tss"):
print("Skip "+tf_name)
continue
OUTPUT = open(args.name.rstrip("/")+"/TranscriptionFactors/GTRD_ChIP_Only_1000sites"+"/"+tf_name+".tss","w")
call(["./Scripts/analyze_coverage_around_position_log2_pysam.py","-m","100000","-limit","30","-bed",tf,"-cov",str(avg_coverage),"-norm","-norm-file",args.norm_log2,"-b",args.bam_file,"-s","1000","-e","1000"],stdout=OUTPUT)
OUTPUT.close()
call(["Rscript","./Scripts/plot_MotifCoverage.R",args.name.rstrip("/")+"/TranscriptionFactors/GTRD_ChIP_Only_1000sites"+"/"+tf_name+".tss",args.name.rstrip("/")+"/TranscriptionFactors/GTRD_ChIP_Only_1000sites"+"/"+tf_name+".png",tf_name,"0",str(args.ylimit)])
#call(["Rscript","./Scripts/plot_MotifCoverage_2sample.R",args.name.rstrip("/")+"/TranscriptionFactors/ENCODE_ChIP"+"/"+tf+".tss","./Ref/TranscriptionFactors/MergedMaleProfiles/"+tf+".tss",
# args.name.rstrip("/")+"/TranscriptionFactors/ENCODE_ChIP/"+tf+"_control.png",tf+" ("+args.name+")",tf+" (MergedMale)","0",str(args.ylimit)])
#########################################################################
def tf_gtrd(args,avg_coverage):
print ("Analyze Transcription factors GTRD") # AL mod
if not os.path.isdir(args.name.rstrip("/")+"/TranscriptionFactors"):
os.mkdir(args.name.rstrip("/")+"/TranscriptionFactors")
if not os.path.isdir(args.name.rstrip("/")+"/TranscriptionFactors/GTRD_ChIP_Only"):
os.mkdir(args.name.rstrip("/")+"/TranscriptionFactors/GTRD_ChIP_Only")
if args.hg38:
target_list = glob.glob("./Ref/GTRD/hg38/*hg38.bed")
else:
target_list = glob.glob("./Ref/GTRD/*hg19.bed")
for tf in target_list:
tf_name = os.path.basename(tf[:-4])
if os.path.isfile(args.name.rstrip("/")+"/TranscriptionFactors/GTRD_ChIP_Only"+"/"+tf_name+".tss"):
print ("Skip "+tf_name) # AL mod
continue
OUTPUT = open(args.name.rstrip("/")+"/TranscriptionFactors/GTRD_ChIP_Only"+"/"+tf_name+".tss","w")
call(["./Scripts/analyze_coverage_around_position_log2_pysam.py","-m","100000","-limit","30","-bed",tf,"-cov",str(avg_coverage),"-norm","-norm-file",args.norm_log2,"-b",args.bam_file,"-s","1000","-e","1000"],stdout=OUTPUT)
OUTPUT.close()
call(["Rscript","./Scripts/plot_MotifCoverage.R",args.name.rstrip("/")+"/TranscriptionFactors/GTRD_ChIP_Only"+"/"+tf_name+".tss",args.name.rstrip("/")+"/TranscriptionFactors/GTRD_ChIP_Only"+"/"+tf_name+".png",tf_name,"0",str(args.ylimit)])
#call(["Rscript","./Scripts/plot_MotifCoverage_2sample.R",args.name.rstrip("/")+"/TranscriptionFactors/ENCODE_ChIP"+"/"+tf+".tss","./Ref/TranscriptionFactors/MergedMaleProfiles/"+tf+".tss",
# args.name.rstrip("/")+"/TranscriptionFactors/ENCODE_ChIP/"+tf+"_control.png",tf+" ("+args.name+")",tf+" (MergedMale)","0",str(args.ylimit)])
#########################################################################
# TSS
def tss(args,avg_coverage):
print ("Analyze HK vs. Unexpr. TSS") # AL mod
if not os.path.isdir(args.name.rstrip("/")+"/TSS"):
os.mkdir(args.name.rstrip("/")+"/TSS")
OUTPUT = open(args.name.rstrip("/")+"/TSS"+"/HK_APPRIS_isoforms.tss","w")
call(["./Scripts/analyze_coverage_around_position_log2_pysam.py","-bed","./Ref/TSS/Housekeeping_APPRIS_isos_hg19_positions.bed","-m","100000","-cov",str(avg_coverage),"-norm","-norm-file",args.norm_log2,"-b",args.bam_file,"-s","1000","-e","1000"],stdout=OUTPUT)
OUTPUT.close()
OUTPUT = open(args.name.rstrip("/")+"/TSS"+"/HK.tss","w")
call(["./Scripts/analyze_coverage_around_position_log2_pysam.py","-bed","./Ref/TSS/Housekeeping.bed","-m","100000","-cov",str(avg_coverage),"-norm","-norm-file",args.norm_log2,"-b",args.bam_file,"-s","1000","-e","1000"],stdout=OUTPUT)
OUTPUT.close()
OUTPUT = open(args.name.rstrip("/")+"/TSS"+"/FANTOM_lower01.tss","w")
call(["./Scripts/analyze_coverage_around_position_log2_pysam.py","-bed","./Ref/TSS/Fantomlower01.bed","-m","100000","-cov",str(avg_coverage),"-norm","-norm-file",args.norm_log2,"-b",args.bam_file,"-s","1000","-e","1000"],stdout=OUTPUT)
OUTPUT.close()
call(["Rscript","./Scripts/plot_MotifCoverage_2sample.R",args.name.rstrip("/")+"/TSS/HK.tss",args.name.rstrip("/")+"/TSS/FANTOM_lower01.tss",
args.name.rstrip("/")+"/TSS/HK_vs_Unexpr.png","Housekeeping TSS","Unexpressed TSS","0",str(args.ylimit)])
####################################################################################################
# AndrogenReceptor
def androgen(args,avg_coverage):
print ("Analyze Androgen Receptor Binding sites") # AL mod
if not os.path.isdir(args.name.rstrip("/")+"/AR"):
os.mkdir(args.name.rstrip("/")+"/AR")
OUTPUT = open(args.name.rstrip("/")+"/AR"+"/AR_TARBS_All.tss","w")
call(["./Scripts/analyze_coverage_around_position_log2_pysam.py","-bed","./Ref/TranscriptionFactors/AndrogenReceptor/TARBS.bed","-m","100000","-cov",str(avg_coverage),"-norm","-norm-file",args.norm_log2,"-b",args.bam_file,"-s","1000","-e","1000"],stdout=OUTPUT)
OUTPUT.close()
OUTPUT = open(args.name.rstrip("/")+"/AR"+"/AR_NARBS_All.tss","w")
call(["./Scripts/analyze_coverage_around_position_log2_pysam.py","-bed","./Ref/TranscriptionFactors/AndrogenReceptor/NARBS.bed","-m","100000","-cov",str(avg_coverage),"-norm","-norm-file",args.norm_log2,"-b",args.bam_file,"-s","1000","-e","1000"],stdout=OUTPUT)
OUTPUT.close()
OUTPUT = open(args.name.rstrip("/")+"/AR"+"/AR_TARBS_GTRD_All.tss","w")
call(["./Scripts/analyze_coverage_around_position_log2_pysam.py","-bed","./Ref/TranscriptionFactors/AndrogenReceptor/AR_GTRD_TARBS_intersect.bed","-m","100000","-cov",str(avg_coverage),"-norm","-norm-file",args.norm_log2,"-b",args.bam_file,"-s","1000","-e","1000"],stdout=OUTPUT)
OUTPUT.close()
OUTPUT = open(args.name.rstrip("/")+"/AR"+"/AR_NARBS_GTRD_All.tss","w")
call(["./Scripts/analyze_coverage_around_position_log2_pysam.py","-bed","./Ref/TranscriptionFactors/AndrogenReceptor/AR_GTRD_NARBS_intersect.bed","-m","100000","-cov",str(avg_coverage),"-norm","-norm-file",args.norm_log2,"-b",args.bam_file,"-s","1000","-e","1000"],stdout=OUTPUT)
OUTPUT.close()
OUTPUT = open(args.name.rstrip("/")+"/AR"+"/AR_TARBS_GTRD_50perc.tss","w")
call(["./Scripts/analyze_coverage_around_position_log2_pysam.py","-bed","./Ref/TranscriptionFactors/AndrogenReceptor/AR_GTRD_50perc_TARBS_intersect.bed","-m","100000","-cov",str(avg_coverage),"-norm","-norm-file",args.norm_log2,"-b",args.bam_file,"-s","1000","-e","1000"],stdout=OUTPUT)
OUTPUT.close()
OUTPUT = open(args.name.rstrip("/")+"/AR"+"/AR_NARBS_GTRD_50perc.tss","w")
call(["./Scripts/analyze_coverage_around_position_log2_pysam.py","-bed","./Ref/TranscriptionFactors/AndrogenReceptor/AR_GTRD_50perc_NARBS_intersect.bed","-m","100000","-cov",str(avg_coverage),"-norm","-norm-file",args.norm_log2,"-b",args.bam_file,"-s","1000","-e","1000"],stdout=OUTPUT)
OUTPUT.close()
call(["Rscript","./Scripts/plot_MotifCoverage_2sample.R",args.name.rstrip("/")+"/AR/AR_TARBS_All.tss",args.name.rstrip("/")+"/AR/AR_NARBS_All.tss",
args.name.rstrip("/")+"/TSS/TARBS_vs_ARBS.png","T-AR binding sites","N-AR binding sites","0",str(args.ylimit)])
call(["Rscript","./Scripts/plot_MotifCoverage_2sample.R",args.name.rstrip("/")+"/AR/AR_TARBS_GTRD_All.tss",args.name.rstrip("/")+"/AR/AR_TARBS_GTRD_All.tss",
args.name.rstrip("/")+"/TSS/TARBS_vs_ARBS_GTRDintersect.png","T-AR binding sites (GTRD intersect)","N-AR binding sites (GTRD intersect)","0",str(args.ylimit)])
call(["Rscript","./Scripts/plot_MotifCoverage_2sample.R",args.name.rstrip("/")+"/AR/AR_TARBS_GTRD_50perc.tss",args.name.rstrip("/")+"/AR/AR_NARBS_GTRD_50perc.tss",
args.name.rstrip("/")+"/TSS/TARBS_vs_ARBS_GTRDintersect_50perc.png","T-AR binding sites (GTRD intersect,50perc)","N-AR binding sites (GTRD intersect,50perc)","0",str(args.ylimit)])
####################################################################################################
# Check for binding sites proximal and distal to Transcription start sites
def tf_tss(args,avg_coverage):
print ("Analyze distal and proximal TF binding sites") # AL mod
if not os.path.isdir(args.name.rstrip("/")+"/TSS_TF"):
os.mkdir(args.name.rstrip("/")+"/TSS_TF")
target_list = glob.glob("./Ref/TranscriptionFactors/TSS_intersects/*distal.bed")
for tf in target_list:
tf_name = os.path.basename(tf[:-10])
proximal_tf = tf[:-10]+"proximal.bed"
if os.path.isfile(args.name.rstrip("/")+"/TSS_TF/"+tf_name+"distal.tss"):
print ("Skip "+tf_name) # AL mod
continue
OUTPUT = open(args.name.rstrip("/")+"/TSS_TF/"+tf_name+"distal.tss","w")
call(["./Scripts/analyze_coverage_around_position_log2_pysam.py","-bed",tf,"-m","100000","-cov",str(avg_coverage),"-norm","-norm-file",args.norm_log2,"-b",args.bam_file,"-s","1000","-e","1000"],stdout=OUTPUT)
OUTPUT.close()
if os.path.isfile(args.name.rstrip("/")+"/TSS_TF/"+tf_name+"proximal.tss"):
print ("Skip "+tf_name) # AL mod
continue
OUTPUT = open(args.name.rstrip("/")+"/TSS_TF/"+tf_name+"proximal.tss","w")
call(["./Scripts/analyze_coverage_around_position_log2_pysam.py","-bed",proximal_tf,"-m","100000","-cov",str(avg_coverage),"-norm","-norm-file",args.norm_log2,"-b",args.bam_file,"-s","1000","-e","1000"],stdout=OUTPUT)
OUTPUT.close()
call(["Rscript","./Scripts/plot_MotifCoverage_2sample.R",args.name.rstrip("/")+"/TSS_TF/"+tf_name+"distal.tss",args.name.rstrip("/")+"/TSS_TF/"+tf_name+"proximal.tss",
args.name.rstrip("/")+"/TSS_TF/"+tf_name+".png",tf_name+" distal to TSS (>2kbp)",tf_name+" proximal to TSS (<2kbp)","0",str(args.ylimit)])
####################################################################################################
if args.analysis == "all":
ctcf(args,avg_coverage)
tf_gtrd_chip_only(args,avg_coverage)
tss(args,avg_coverage)
elif args.analysis == "tss":
tss(args,avg_coverage)
elif args.analysis == "androgen":
androgen(args,avg_coverage)
elif args.analysis == "ctcf":
ctcf(args,avg_coverage)
elif args.analysis == "tf_gtrd":
tf_gtrd(args,avg_coverage)
elif args.analysis == "tf_gtrd_1000sites":
tf_gtrd_1000sites(args,avg_coverage)
else:
print ("Unknown analysis type") # AL mod
print (" Use any of:") # AL mod
print (" -) all") # AL mod
print (" -) ctcf") # AL mod
print (" -) androgen") # AL mod
print (" -) tf_gtrd") # AL mod
print (" -) tf_gtrd_1000sites") # AL mod
print (" -) tf_tss") # AL mod
| 71.743243 | 290 | 0.638915 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 12,425 | 0.585091 |
75da6f1e542a2683ea21908b7c192b05e4167dbd
| 1,094 |
py
|
Python
|
Day 3/login-registrasi.py
|
adamsaparudin/python-datascience
|
1b4164bb8a091f88def950f07108fe023737399c
|
[
"MIT"
] | null | null | null |
Day 3/login-registrasi.py
|
adamsaparudin/python-datascience
|
1b4164bb8a091f88def950f07108fe023737399c
|
[
"MIT"
] | null | null | null |
Day 3/login-registrasi.py
|
adamsaparudin/python-datascience
|
1b4164bb8a091f88def950f07108fe023737399c
|
[
"MIT"
] | null | null | null |
import sys
def registrasi():
username = input("Masukan username: ")
password = input("Masukan password: ")
return username, password
def login(username, password): # list_user,
input_user = input("Masukan username: ")
input_pass = input("Masukan password: ")
if username == input_user and password == input_pass:
print ("Anda berhasil login")
else:
print("Salah username dan password")
def print_menu():
print("Pilih menu untuk melakukan aksi")
print("1. Registrasi")
print("2. Login")
print("3. Keluar")
def main():
# list_user = []
username = ""
password = ""
while True:
print_menu()
menu = int(input("Pilihan menu: "))
if (menu == 1):
username, password = registrasi()
# "skks".capitalize() # SKKS
# list_user.append({'username': username, 'password': password})
elif (menu == 2):
login(username, password)
elif (menu == 3):
sys.exit()
else:
print("Masukan pilihan menu yang benar")
main()
| 27.35 | 76 | 0.576782 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 372 | 0.340037 |
75db1e4b6ac368d1004f97e5c6edf9221b06b01a
| 7,631 |
py
|
Python
|
lagen/nu/regeringenlegacy.py
|
redhog/ferenda
|
6935e26fdc63adc68b8e852292456b8d9155b1f7
|
[
"BSD-2-Clause"
] | 18 |
2015-03-12T17:42:44.000Z
|
2021-12-27T10:32:22.000Z
|
lagen/nu/regeringenlegacy.py
|
redhog/ferenda
|
6935e26fdc63adc68b8e852292456b8d9155b1f7
|
[
"BSD-2-Clause"
] | 13 |
2016-01-27T10:19:07.000Z
|
2021-12-13T20:24:36.000Z
|
lagen/nu/regeringenlegacy.py
|
redhog/ferenda
|
6935e26fdc63adc68b8e852292456b8d9155b1f7
|
[
"BSD-2-Clause"
] | 6 |
2016-11-28T15:41:29.000Z
|
2022-01-08T11:16:48.000Z
|
# -*- coding: utf-8 -*-
from __future__ import (absolute_import, division,
print_function, unicode_literals)
from builtins import *
# this repo overrides ferenda.sources.legal.se.Regeringen to work
# against old downloaded
import re
import codecs
# from urllib.parse import urljoin
from rdflib import URIRef
from rdflib.namespace import SKOS
from ferenda.sources.legal.se import Regeringen, RPUBL
from ferenda.sources.legal.se.direktiv import DirRegeringen
from ferenda.sources.legal.se.sou import SOURegeringen
from ferenda.sources.legal.se.ds import Ds
from ferenda.sources.legal.se.propositioner import PropRegeringen
from ferenda.compat import urljoin
from . import SameAs
class RegeringenLegacy(Regeringen):
source_encoding = "iso-8859-1"
def download(self, basefile=None):
return False
def downloaded_to_intermediate(self, basefile, attachment=None):
return codecs.open(self.store.downloaded_path(basefile), encoding=self.source_encoding)
# override just some of the methods to parse the HTML index page
def extract_metadata(self, rawhead, basefile):
content = rawhead
title = content.find("h1").string
identifier_node = content.find("p", "lead")
if identifier_node:
identifier = identifier_node.text
else:
identifier = "" # infer_metadata calls infer_identifier
# if this is falsy, which will be good
# enough. No need to warn.
definitions = content.find("dl", "definitions")
ansvarig = None
if definitions:
for dt in definitions.find_all("dt"):
key = dt.get_text(strip=True)
value = dt.find_next_sibling("dd").get_text(strip=True)
if key in ("Utgiven:", "Publication date:"):
utgiven = self.parse_swedish_date(value)
elif key in ("Avsändare:",):
ansvarig = value
sammanfattning = None
if content.find("h2", text="Sammanfattning"):
sums = content.find("h2", text="Sammanfattning").find_next_siblings("p")
# "\n\n" doesn't seem to survive being stuffed in a rdfa
# content attribute. Replace with simple space.
sammanfattning = " ".join([x.get_text(strip=True) for x in sums])
# find related documents
re_basefile = re.compile(r'\d{4}(|/\d{2,4}):\d+')
# legStep1=Kommittedirektiv, 2=Utredning, 3=lagrådsremiss,
# 4=proposition. Assume that relationships between documents
# are reciprocal (ie if the page for a Kommittedirektiv
# references a Proposition, the page for that Proposition
# references the Kommittedirektiv.
elements = {self.KOMMITTEDIREKTIV: [],
self.DS: ["legStep1"],
self.PROPOSITION: ["legStep1", "legStep2"],
self.SOU: ["legStep1"]}[self.document_type]
utgarFran = []
for elementid in elements:
box = content.find(id=elementid)
if not box:
continue
for listitem in box.find_all("li"):
if not listitem.find("span", "info"):
continue
infospans = [x.text.strip(
) for x in listitem.find_all("span", "info")]
rel_basefile = None
rel_identifier = None
for infospan in infospans:
if re_basefile.search(infospan):
# scrub rel_identifier ("Dir. 2008:50" -> "2008:50" etc)
rel_basefile = re_basefile.search(infospan).group()
rel_identifier = infospan
if not rel_basefile:
# this often means that a non-standard document
# type is used as preparatory work for this
# document (eg department memos not published in
# Ds, like "S2013/8074/PBB" -- seems to be common
# in Socialdepartementet and Finansdepartementet)
self.log.warning(
"%s: Couldn't find rel_basefile (elementid #%s) among %r" % (basefile, elementid, infospans))
continue
attribs = {"rpubl:arsutgava": basefile.split(":")[0],
"rpubl:lopnummer": basefile.split(":")[1]}
if elementid == "legStep1":
attribs["rdf:type"] = RPUBL.Kommittedirektiv
elif elementid == "legStep2":
attribs["rdf:type"] = RPUBL.Utredningsbetankande
if rel_identifier.startswith("SOU"):
altlabel = "SOU"
elif rel_identifier.startswith(("Ds", "DS")):
altlabel = "Ds"
else:
self.log.warning(
"%s: Cannot find out what type of document the linked %s is (#%s)" % (basefile, rel_identifier, elementid))
continue
attribs["rpubl:utrSerie"] = self.lookup_resource(altlabel, SKOS.altLabel)
elif elementid == "legStep3":
attribs["rdf:type"] = RPUBL.Proposition
uri = self.minter.space.coin_uri(self.attributes_to_resource(attribs))
utgarFran.append(uri)
# find related pages
related = content.find("h2", text="Relaterat")
seealso = []
if related:
for link in related.findParent("div").find_all("a"):
r = urljoin("http://www.regeringen.se/", link["href"])
seealso.append(URIRef(r))
a = self.metadata_from_basefile(basefile)
a.update({'dcterms:title': title,
'dcterms:identifier': identifier,
'dcterms:issued': utgiven,
'rpubl:utgarFran': utgarFran
})
if ansvarig:
a["rpubl:departement"] = ansvarig
if seealso:
a["rdfs:seeAlso"] = seealso
if sammanfattning:
a['dcterms:abstract'] = sammanfattning
return a
def find_doc_links(self, soup, basefile):
files = []
docsection = soup.find('div', 'doc')
if docsection:
for li in docsection.find_all("li", "pdf"):
link = li.find('a')
m = re.match(r'/download/(\w+\.pdf).*', link['href'], re.IGNORECASE)
if not m:
continue
pdfbasefile = m.group(1)
files.append((pdfbasefile, link.string))
selected = self.select_files(files)
self.log.debug("selected %s out of %d pdf files" % (", ".join([x[0] for x in selected]), len(files)))
return selected
def source_url(self, basefile):
# as the old site is gone, there is no possible URL we can
# return here.
return None
class DirRegeringenLegacy(RegeringenLegacy, SameAs, DirRegeringen):
alias = "dirregeringen.legacy"
class SOURegeringenLegacy(RegeringenLegacy, SameAs, SOURegeringen):
alias = "souregeringen.legacy"
def sanitize_identifier(self, identifier):
from ferenda.sources.legal.se.sou import sou_sanitize_identifier
return sou_sanitize_identifier(identifier)
class DsRegeringenLegacy(RegeringenLegacy, SameAs, Ds):
alias = "dsregeringen.legacy"
class PropRegeringenLegacy(RegeringenLegacy, SameAs, PropRegeringen):
alias = "propregeringen.legacy"
| 40.375661 | 135 | 0.576727 | 6,912 | 0.905542 | 0 | 0 | 0 | 0 | 0 | 0 | 1,934 | 0.253374 |
75de5bb6fec2ff1e86bf17bc2c0b9b36441cdf30
| 211 |
py
|
Python
|
reverse_geocode_test.py
|
falcaopetri/trajectory-data
|
7f81343086ccd00d3d9f52899a7032d987fc0a66
|
[
"MIT"
] | 1 |
2019-05-21T15:52:28.000Z
|
2019-05-21T15:52:28.000Z
|
reverse_geocode_test.py
|
falcaopetri/trajectory-data
|
7f81343086ccd00d3d9f52899a7032d987fc0a66
|
[
"MIT"
] | null | null | null |
reverse_geocode_test.py
|
falcaopetri/trajectory-data
|
7f81343086ccd00d3d9f52899a7032d987fc0a66
|
[
"MIT"
] | 1 |
2020-08-18T14:38:52.000Z
|
2020-08-18T14:38:52.000Z
|
import reverse_geocode
reverse_geocode.search([(35.6963860567411,139.686436661882)])
reverse_geocode.search([(-33.8236171057086,151.021885871887)])
reverse_geocode.search([(47.3111740195794,8.52681624913163)])
| 35.166667 | 62 | 0.824645 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
75e1a861f6479f18e30cc2201832823b09eb3ea9
| 803 |
py
|
Python
|
src/data.py
|
voschezang/ABM
|
523fcf30000057e73ba93f5a500d8896c945a35f
|
[
"MIT"
] | null | null | null |
src/data.py
|
voschezang/ABM
|
523fcf30000057e73ba93f5a500d8896c945a35f
|
[
"MIT"
] | null | null | null |
src/data.py
|
voschezang/ABM
|
523fcf30000057e73ba93f5a500d8896c945a35f
|
[
"MIT"
] | null | null | null |
from mesa.datacollection import DataCollector
### datacollection functions
def density(model):
"""Density: number of cars per unit length of road."""
return len(model.schedule.agents) / model.space.length
def flow(model):
"""Flow: number of cars passing a reference point per unit of time."""
# get the flow in the current timestep
flow_in_timestep = model.data.flow
# reset flow counter
model.data.flow = 0
return flow_in_timestep / model.space.n_lanes
class Data(DataCollector):
def __init__(self, flow_reference_point):
super().__init__(model_reporters={
#"Density": density,
"Flow": flow
})
# setup data collectotion variables
self.flow_reference_point = flow_reference_point
self.flow = 0
| 25.903226 | 74 | 0.674969 | 308 | 0.383562 | 0 | 0 | 0 | 0 | 0 | 0 | 271 | 0.337484 |
75e2178969612f0c7284d059eb5edd0c7915d7e5
| 2,850 |
py
|
Python
|
lambda_assistant/mysql/client_handler.py
|
matiasvallejosdev/py-aws-lambda-handlers
|
4643042bc02e557bb4a2953118de5f4eb5320d70
|
[
"Apache-2.0"
] | null | null | null |
lambda_assistant/mysql/client_handler.py
|
matiasvallejosdev/py-aws-lambda-handlers
|
4643042bc02e557bb4a2953118de5f4eb5320d70
|
[
"Apache-2.0"
] | null | null | null |
lambda_assistant/mysql/client_handler.py
|
matiasvallejosdev/py-aws-lambda-handlers
|
4643042bc02e557bb4a2953118de5f4eb5320d70
|
[
"Apache-2.0"
] | null | null | null |
import pymysql
import logging
from lambda_assistant.handlers.event_handler import EventHandler
from lambda_assistant.errors import *
logger = logging.getLogger()
logger.setLevel(logging.INFO)
class Select():
def Select(self, handler: EventHandler, conn: pymysql.connections.Connection, sql: str):
try:
result = {}
# Execute SQL command
with conn.cursor() as cur:
cur.execute(sql)
row_headers=[x[0] for x in cur.description] #this will extract row headers
for index, row in enumerate(cur):
result[index] = dict(zip(row_headers, row))
# Commit changes
conn.commit()
return result
except Exception as e:
handler.performError(GetDataFailedError())
logger.error(handler.lambdaError.toPrint())
logger.error(e)
return handler.lambdaError.toDict()
class Delete():
def Delete(self, handler: EventHandler, conn:pymysql.connections.Connection, sql: str, sql_recheckidentity: str):
try:
result = {}
# Execute SQL command
with conn.cursor() as cur:
cur.execute(sql)
cur.execute(sql_recheckidentity)
# Commit changes
conn.commit()
return result
except Exception as e:
handler.performError(DeleteDataFailedError())
logger.error(handler.lambdaError.toPrint())
logger.error(e)
return handler.lambdaError.toDict()
class Insert():
def Insert(self, handler: EventHandler, conn: pymysql.connections.Connection, sql: str, get_id=False):
try:
result = {}
# Execute SQL command
with conn.cursor() as cur:
cur.execute(sql)
if get_id:
id = int(cur.lastrowid)
result['id_inserted'] = id
# Commit changes
conn.commit()
return result
except Exception as e:
handler.performError(PutDataFailedError())
logger.error(handler.lambdaError.toPrint())
logger.error(e)
return handler.lambdaError.toDict()
class MySqlHandler(Select, Delete, Insert):
def __init__(self, db_name, rds_host, db_username, db_password):
self.rds_host = rds_host
self.db_name = db_name
self.db_username = db_username
self.db_password = db_password
def Connect(self):
conn = pymysql.connect(host=self.rds_host, user=self.db_username, passwd=self.db_password, db=self.db_name, connect_timeout=5)
return conn
| 35.185185 | 135 | 0.565965 | 2,630 | 0.922807 | 0 | 0 | 0 | 0 | 0 | 0 | 180 | 0.063158 |
75e66488020f917b36c64a0fe8d0a2a1ac18f43c
| 1,280 |
py
|
Python
|
yatube/posts/models.py
|
PabloKor/Yatube
|
5c835e9d66a29e95781f08a87a102ec017fbc91b
|
[
"MIT"
] | null | null | null |
yatube/posts/models.py
|
PabloKor/Yatube
|
5c835e9d66a29e95781f08a87a102ec017fbc91b
|
[
"MIT"
] | null | null | null |
yatube/posts/models.py
|
PabloKor/Yatube
|
5c835e9d66a29e95781f08a87a102ec017fbc91b
|
[
"MIT"
] | null | null | null |
from django.contrib.auth import get_user_model
from django.db import models
# Create your models here.
User = get_user_model()
class Post(models.Model):
text = models.TextField(verbose_name='Текст')
pub_date = models.DateTimeField(auto_now_add=True, verbose_name="date published")
author = models.ForeignKey(User, on_delete=models.CASCADE, related_name='posts')
group = models.ForeignKey('Group', on_delete=models.CASCADE,
blank=True,
null=True,
related_name='posts',
verbose_name='Группа')
image = models.ImageField(upload_to='posts/', blank=True, null=True)
def __str__(self):
return self.text
class Group(models.Model):
title = models.CharField(max_length=200)
slug = models.SlugField(max_length=100, unique=True)
description = models.TextField(blank=True, null=True)
def __str__(self):
return self.title
class Comment(models.Model):
post = models.ForeignKey(Post, on_delete=models.CASCADE, related_name='comments')
author = models.ForeignKey(User, on_delete=models.CASCADE, related_name='comments')
text = models.TextField()
created = models.DateTimeField(auto_now_add=True)
| 34.594595 | 87 | 0.670313 | 1,154 | 0.893881 | 0 | 0 | 0 | 0 | 0 | 0 | 117 | 0.090627 |
75e695aeba900a9af2ded444426e995f02d6bb1e
| 1,508 |
py
|
Python
|
deep-rl/lib/python2.7/site-packages/OpenGL/GL/AMD/blend_minmax_factor.py
|
ShujaKhalid/deep-rl
|
99c6ba6c3095d1bfdab81bd01395ced96bddd611
|
[
"MIT"
] | 210 |
2016-04-09T14:26:00.000Z
|
2022-03-25T18:36:19.000Z
|
deep-rl/lib/python2.7/site-packages/OpenGL/GL/AMD/blend_minmax_factor.py
|
ShujaKhalid/deep-rl
|
99c6ba6c3095d1bfdab81bd01395ced96bddd611
|
[
"MIT"
] | 72 |
2016-09-04T09:30:19.000Z
|
2022-03-27T17:06:53.000Z
|
deep-rl/lib/python2.7/site-packages/OpenGL/GL/AMD/blend_minmax_factor.py
|
ShujaKhalid/deep-rl
|
99c6ba6c3095d1bfdab81bd01395ced96bddd611
|
[
"MIT"
] | 64 |
2016-04-09T14:26:49.000Z
|
2022-03-21T11:19:47.000Z
|
'''OpenGL extension AMD.blend_minmax_factor
This module customises the behaviour of the
OpenGL.raw.GL.AMD.blend_minmax_factor to provide a more
Python-friendly API
Overview (from the spec)
The EXT_blend_minmax extension extended the GL's blending functionality
to allow the blending equation to be specified by the application. That
extension introduced the MIN_EXT and MAX_EXT blend equations, which caused the
result of the blend equation to become the minimum or maximum of the source
color and destination color, respectively.
The MIN_EXT and MAX_EXT blend equations, however, do not include the source
or destination blend factors in the arguments to the min and max functions.
This extension provides two new blend equations that produce the minimum
or maximum of the products of the source color and source factor, and the
destination color and destination factor.
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/AMD/blend_minmax_factor.txt
'''
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.AMD.blend_minmax_factor import *
from OpenGL.raw.GL.AMD.blend_minmax_factor import _EXTENSION_NAME
def glInitBlendMinmaxFactorAMD():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION
| 40.756757 | 79 | 0.812334 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,116 | 0.740053 |
75e72372c73d69ec71d6ae230b03dd3710c4e2a3
| 2,506 |
py
|
Python
|
examples/building.py
|
jbermudezcabrera/campos
|
df34f93dd37b435a82663fb72ef37f669832af22
|
[
"MIT"
] | null | null | null |
examples/building.py
|
jbermudezcabrera/campos
|
df34f93dd37b435a82663fb72ef37f669832af22
|
[
"MIT"
] | null | null | null |
examples/building.py
|
jbermudezcabrera/campos
|
df34f93dd37b435a82663fb72ef37f669832af22
|
[
"MIT"
] | null | null | null |
"""This example demonstrates the basics on building complete forms using campos.
It creates several fields, marking some of them as required and adding some
custom validation.
Finally fields are added to a CreationForm which have several buttons and a
custom callback connected to one of them. After added, some related fields
are grouped.
"""
__author__ = 'Juan Manuel Bermúdez Cabrera'
def fake_create_person():
if form.valid:
msg = 'ID: {}<br/>'.format(form.id)
msg += 'Name: {}<br/>'.format(form.name)
msg += 'Last name: {}<br/>'.format(form.last_name)
msg += 'Phone: {}<br/>'.format(form.phone)
msg += 'Address: {}<br/>'.format(form.address)
msg += 'Country: {}<br/>'.format(form.country[0])
msg = 'New person created correctly with values:<br/>{}'.format(msg)
msg = '<html>{}</html>'.format(msg)
QMessageBox.information(None, 'Created', msg)
form.close()
def create_form():
id = campos.StringField(name='id', text='Personal ID', max_length=11,
required=True)
name = campos.StringField(name='name', text='Name', required=True)
last = campos.StringField(name='last_name', text='Last name', required=True)
val = campos.RegExp(r'\+?\d+', message='Invalid phone number')
phone = campos.StringField(name='phone', text='Phone number',
validators=[val])
address = campos.StringField(name='address', text='Home address')
country = campos.SelectField(name='country', text='Country', blank=True,
blank_text='Other', choices=['Cuba', 'EE.UU'],
default='Cuba')
fields = (id, name, last, phone, address, country)
global form
form = campos.CreationForm(on_save=fake_create_person, fields=fields)
form.setWindowTitle('Create Person')
# group some fields
form.group('Very personal info', ('phone', 'address'), layout='grid')
form.group('Identification', ['id', 'name', 'last_name'])
return form
if __name__ == '__main__':
import os
import sys
# set gui api to use
os.environ['QT_API'] = 'pyside'
from qtpy.QtWidgets import QMessageBox, QApplication
import campos
# set global settings for validation type and label positions
campos.Validation.set_current('instant')
campos.Labelling.set_current('top')
app = QApplication(sys.argv)
dialog = create_form()
sys.exit(dialog.exec_())
| 33.413333 | 80 | 0.634876 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 956 | 0.381332 |
75e9882a624cfcf705ab7744f64aca22cda52bfb
| 8,452 |
py
|
Python
|
ai.py
|
LHGames-2017/nospace
|
1f36fb980ee51cdc576b765eff2c4ad5533ea0e3
|
[
"MIT"
] | null | null | null |
ai.py
|
LHGames-2017/nospace
|
1f36fb980ee51cdc576b765eff2c4ad5533ea0e3
|
[
"MIT"
] | null | null | null |
ai.py
|
LHGames-2017/nospace
|
1f36fb980ee51cdc576b765eff2c4ad5533ea0e3
|
[
"MIT"
] | null | null | null |
from flask import Flask, request
from structs import *
import json
import numpy as np
import sys
import random, time
app = Flask(__name__)
dx=0
dy=0
def create_action(action_type, target):
actionContent = ActionContent(action_type, target.__dict__)
#print(actionContent)
return json.dumps(actionContent.__dict__)
def create_move_action(target):
return create_action("MoveAction", Point(target.X-dx,target.Y-dy))
def create_attack_action(target):
return create_action("AttackAction", Point(target.X-dx,target.Y-dy))
def create_collect_action(target):
return create_action("CollectAction", Point(target.X-dx,target.Y-dy))
def create_steal_action(target):
return create_action("StealAction", Point(target.X-dx,target.Y-dy))
def create_heal_action():
return create_action("HealAction", "")
def create_purchase_action(item):
return create_action("PurchaseAction", item)
def deserialize_map(serialized_map):
"""
Fonction utilitaire pour comprendre la map
"""
serialized_map = serialized_map[1:]
rows = serialized_map.split('[')
column = rows[0].split('{')
deserialized_map = [[Tile() for x in range(40)] for y in range(40)]
for i in range(len(rows) - 1):
column = rows[i + 1].split('{')
for j in range(len(column) - 1):
infos = column[j + 1].split(',')
end_index = infos[2].find('}')
content = int(infos[0])
x = int(infos[1])
y = int(infos[2][:end_index])
deserialized_map[i][j] = Tile(content, x, y)
return deserialized_map
#customs
def visual(lines,x,y):
for i in lines:
line = ''
for j in i[:20]:
#Empty, Wall, House, Lava, Resource, Shop, Player
#0 1 2 3 4 5 6
line+=str(j.Content).replace('None','N').replace('0', ' ').replace('1','#').replace('2','^').replace('3','L').replace('4','$').replace('5','S').replace('6','o')
print(line)
def distance(p1, p2):
return math.sqrt((p2[0]-p1[0])**2+(p2[1]-p1[1])**2)
'''
def searchg(x,y,grid,target, at):
if grid[x][y] == target:
at.append([x,y]) #found
return True
elif grid[x][y] == 1 or grid[x][y] == 3:
return False #wall or lava
elif grid[x][y] == 9:
return False #been here
at.append([x,y])
grid[x][y] == 9
if ((x<len(grid)-1 and search(x+1,y,grid,target, at))
or (y > 0 and search(x, y-1,grid,target, at))
or (x > 0 and search(x-1,y,grid,target, at))
or (y < len(grid)-1 and search(x, y+1,grid,target, at))):
return True
return False
'''
def search_next(me, target,m,dx,dy):
x=me.Position.X
y=me.Position.Y
if me.CarriedRessources==me.CarryingCapacity:
print('resource')
target=me.HouseLocation
#if distance([target.X,target.Y],[x,y])==0:
# return create_collect_action(Point(x+dx, x+dy))
neighbors = [[x+1,y],[x-1,y],[x,y+1],[x,y-1]]
tNeighbors = []
for neighbor in neighbors:
tNeighbors.append([distance(neighbor,[target.X, target.Y]),neighbor])
sortedNeighbors=sorted(tNeighbors, key=lambda x:x[0])
print(sortedNeighbors)
for n in sortedNeighbors:
#print(target.__dict__)
#print(x,y)
#print('----------',n,'--------')
#Empty, Wall, House, Lava, Resource, Shop, Player
#0 1 2 3 4 5 6
tile = m[n[1][0]-dx][n[1][1]-dy]
#print(tile.__dict__)
content = tile.Content
point = Point(n[1][0],n[1][1])
if content==0 or content==2:
print('----move----',point)
return create_move_action(point)
elif content==1 or content == 6:
print('attack',point)
return create_attack_action(point)
elif content==4:
return create_collect_action(point)
else:# content==3:
print('skip')
continue
def route(start, end, at, best=[]):
best.append(end)
for i in range(len(at)-1,-1,-1):
if compare(at[i], best[-1]):
best.append(at[i])
best=best[::-1]
return best
def compare(a,b):
if (a[0]==b[0]) and (abs(a[1]-b[1])==1):
return True
elif (a[1]==b[1]) and (abs(a[0]-b[0])==1):
return True
else:
return False
def arr2action(c,d):
if c[0]==d[0]:
if c[1]<d[1]:
return create_move_action(Point(x+1,y))
else:
return create_move_action(Point(x-1,y))
elif c[0]<d[0]:
return create_move_action(Point(x,y-1))
else:
return create_move_action(Point(x,y+1))
def findTargets(mapmatrix, me):
resources = []
enemyhouses = []
shops = []
for row in mapmatrix:
for tile in row:
if tile.Content==4:
resources.append(tile)
elif tile.Content==2 and tile.Content!=me.HouseLocation:
enemyhouses.append(tile)
elif tile.Content==5:
shops.append(tile)
else:
continue
return [resources, enemyhouses, shops]
def decide(me, closestEnemies, targets, grid):
try:
distEn = closestEnemy[0][0]
enemy = closestEnemy[0][1]
except:
distEn=0
enemy = []
distTarget = targets[0][0]
target = targets[0][1]
best=[]
at=[]
if distEn==1:
#print('------1-------')
return create_attack_action(Point(enemy.X,enemy.Y))
elif distTarget==1 and target.Content==2:
#print('------2-------')
return create_collect_action(Point(target.X,target.Y))
elif distTarget==0 and target.Content==4:
#print('------3-------')
return create_collect_action(Point(target.X,target.Y))
else:
#print('------4-------')
#t = random.choice([1,0])
#u = (t+1)%2
#return create_move_action(Point(me.Position.X+t,me.Position.Y+u))
return search_next(me, target, grid)
def bot():
"""
Main de votre bot.
"""
map_json = request.form["map"]
# Player info
encoded_map = map_json.encode()
map_json = json.loads(encoded_map)
p = map_json["Player"]
pos = p["Position"]
x = pos["X"]
y = pos["Y"]
house = p["HouseLocation"]
player = Player(p["Health"], p["MaxHealth"], Point(x,y),
Point(house["X"], house["Y"]), p["Score"],
p["CarriedResources"], p["CarryingCapacity"])
# Map
serialized_map = map_json["CustomSerializedMap"]
deserialized_map=deserialize_map(serialized_map)
transposed=np.transpose(deserialized_map)
targets = findTargets(deserialized_map, player)
visual(transposed[:20][::-1],x,y)
otherPlayers = []
'''
#print(map_json)
for player_dict in map_json["OtherPlayers"]:
#print(player_dict)
for player_name in player_dict.keys():
player_info = player_dict[player_name]
#print('---------')
#print(player_info)
#print('---------')
p_pos = player_info["Position"]
player_info = PlayerInfo(player_info["Health"],
player_info["MaxHealth"],
Point(p_pos["X"], p_pos["Y"]))
otherPlayers.append({player_name: player_info })
'''
# return decision
#targets =
tTargets = []
for target in targets[0]:#+targets[1]:
tTargets.append([distance([x,y],[target.X,target.Y]),target])
sortedTargets = sorted(tTargets, key=lambda x:x[0])
tEnemies = []
for enemy in otherPlayers:
tEnemies.append([distance([x,y],[enemy.X,enemy.Y]),enemy])
sortedEnemies = sorted(tEnemies, key=lambda x:x[0])
dx,dy=0,0
for i,line in enumerate(deserialized_map):
for j,tile in enumerate(line):
if tile.X==x and tile.Y==y:
dx = x-i
dy = y-j
#return decide(player, sortedEnemies, sortedTargets, deserialized_map)
print(player.__dict__,player.Position.__dict__)
return search_next(player, sortedTargets[0][1], deserialized_map,dx,dy)
@app.route("/", methods=["POST"])
def reponse():
"""
Point d'entree appelle par le GameServer
"""
sys.stdout.flush()
return bot()
if __name__ == "__main__":
app.run(host="0.0.0.0", port=3000)
| 30.959707 | 172 | 0.565192 | 0 | 0 | 0 | 0 | 149 | 0.017629 | 0 | 0 | 2,417 | 0.285968 |
75ecad4259bc7591e4a570004208ede9470250fd
| 92 |
py
|
Python
|
src/__init__.py
|
jmknoll/ig-autofriend
|
8de322b59c13346d21d6b11775cbad51b4e4920f
|
[
"MIT"
] | null | null | null |
src/__init__.py
|
jmknoll/ig-autofriend
|
8de322b59c13346d21d6b11775cbad51b4e4920f
|
[
"MIT"
] | null | null | null |
src/__init__.py
|
jmknoll/ig-autofriend
|
8de322b59c13346d21d6b11775cbad51b4e4920f
|
[
"MIT"
] | null | null | null |
from InstaFriend import InstaFriend
friend = InstaFriend('bonesaw')
friend.say_something()
| 18.4 | 35 | 0.815217 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 9 | 0.097826 |
75ee6ab2f29331c5f95dba4b6e05f4612d407042
| 3,004 |
py
|
Python
|
sierra_adapter/sierra_progress_reporter/src/interval_arithmetic.py
|
wellcomecollection/catalogue-pipeline
|
360fa432a006f5e197a5b22d72cced7d6735d222
|
[
"MIT"
] | 8 |
2019-08-02T09:48:40.000Z
|
2019-12-20T14:06:58.000Z
|
sierra_adapter/sierra_progress_reporter/src/interval_arithmetic.py
|
wellcomecollection/catalogue
|
17dcf7f1977f953fbaf35c60aa166aaa1413fdd2
|
[
"MIT"
] | 329 |
2020-02-18T07:43:08.000Z
|
2021-04-23T10:45:33.000Z
|
sierra_adapter/sierra_progress_reporter/src/interval_arithmetic.py
|
wellcomecollection/catalogue-pipeline
|
360fa432a006f5e197a5b22d72cced7d6735d222
|
[
"MIT"
] | 1 |
2019-08-22T11:44:34.000Z
|
2019-08-22T11:44:34.000Z
|
import datetime as dt
import os
import attr
@attr.s(repr=False)
class Interval:
start = attr.ib()
end = attr.ib()
key = attr.ib()
def __repr__(self):
return "%s(start=%r, end=%r, key=%r)" % (
type(self).__name__,
self.start.isoformat(),
self.end.isoformat(),
self.key,
)
__str__ = __repr__
def strip_timestamp(timestamp):
# The timezone offset may or may not be present, remove it if it's there
return timestamp.strip("Z").replace("+00-00", "")
def get_intervals(keys):
"""
Generate the intervals completed for a particular resource type.
:param keys: A generator of S3 key names.
"""
for k in keys:
name = os.path.basename(k)
start, end = name.split("__")
start = strip_timestamp(start)
end = strip_timestamp(end)
try:
yield Interval(
start=dt.datetime.strptime(start, "%Y-%m-%dT%H-%M-%S.%f"),
end=dt.datetime.strptime(end, "%Y-%m-%dT%H-%M-%S.%f"),
key=k,
)
except ValueError:
yield Interval(
start=dt.datetime.strptime(start, "%Y-%m-%dT%H-%M-%S"),
end=dt.datetime.strptime(end, "%Y-%m-%dT%H-%M-%S"),
key=k,
)
def combine_overlapping_intervals(sorted_intervals):
"""
Given a generator of sorted open intervals, generate the covering set.
It produces a series of 2-tuples: (interval, running), where ``running``
is the set of sub-intervals used to build the overall interval.
:param sorted_intervals: A generator of ``Interval`` instances.
"""
lower = None
running = []
for higher in sorted_intervals:
if not lower:
lower = higher
running.append(higher)
else:
# We treat these as open intervals. This first case is for the
# two intervals being wholly overlapping, for example:
#
# ( -- lower -- )
# ( -- higher -- )
#
if higher.start < lower.end:
upper_bound = max(lower.end, higher.end)
lower = Interval(start=lower.start, end=upper_bound, key=None)
running.append(higher)
# Otherwise the two intervals are disjoint. Note that this
# includes the case where lower.end == higher.start, because
# we can't be sure that point has been included.
#
# ( -- lower -- )
# ( -- higher -- )
#
# or
#
# ( -- lower -- )
# ( -- higher -- )
#
else:
yield (lower, running)
lower = higher
running = [higher]
# And spit out the final interval
if lower is not None:
yield (lower, running)
| 29.165049 | 78 | 0.508655 | 311 | 0.103529 | 2,454 | 0.816911 | 331 | 0.110186 | 0 | 0 | 1,141 | 0.379827 |
75eea75e6047e89d14c6be50606878240e707caf
| 41 |
py
|
Python
|
tests/components/mqtt_json/__init__.py
|
domwillcode/home-assistant
|
f170c80bea70c939c098b5c88320a1c789858958
|
[
"Apache-2.0"
] | 30,023 |
2016-04-13T10:17:53.000Z
|
2020-03-02T12:56:31.000Z
|
tests/components/mqtt_json/__init__.py
|
jagadeeshvenkatesh/core
|
1bd982668449815fee2105478569f8e4b5670add
|
[
"Apache-2.0"
] | 31,101 |
2020-03-02T13:00:16.000Z
|
2022-03-31T23:57:36.000Z
|
tests/components/mqtt_json/__init__.py
|
jagadeeshvenkatesh/core
|
1bd982668449815fee2105478569f8e4b5670add
|
[
"Apache-2.0"
] | 11,956 |
2016-04-13T18:42:31.000Z
|
2020-03-02T09:32:12.000Z
|
"""Tests for the mqtt_json component."""
| 20.5 | 40 | 0.707317 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 40 | 0.97561 |
75efb0136dccedb3a2615588ac4efa68a29d7748
| 414 |
py
|
Python
|
wxpy/utils/__init__.py
|
frkhit/bl_wxpy
|
b03bc63d51592d32ee218ef6fd1022df6ef75069
|
[
"MIT"
] | 3 |
2019-06-24T02:19:19.000Z
|
2021-02-14T05:27:16.000Z
|
wxpy/utils/__init__.py
|
frkhit/bl_wxpy
|
b03bc63d51592d32ee218ef6fd1022df6ef75069
|
[
"MIT"
] | null | null | null |
wxpy/utils/__init__.py
|
frkhit/bl_wxpy
|
b03bc63d51592d32ee218ef6fd1022df6ef75069
|
[
"MIT"
] | 1 |
2021-02-08T03:50:05.000Z
|
2021-02-08T03:50:05.000Z
|
from .console import embed, shell_entry
from .misc import decode_webwx_emoji, enhance_connection, ensure_list, get_raw_dict, get_receiver, \
get_text_without_at_bot, get_username, match_attributes, match_name, match_text, new_local_msg_id, repr_message, \
smart_map, start_new_thread
from .puid_map import PuidMap
from .tools import detect_freq_limit, dont_raise_response_error, ensure_one, mutual_friends
| 59.142857 | 118 | 0.84058 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
75efc068bea2e00c99851509afc17f84c017599e
| 123 |
py
|
Python
|
src/compiler/handler.py
|
neolang/neopy
|
92f328ed6c2f3c40ec04b889477aa8fe916711f1
|
[
"MIT"
] | 1 |
2018-11-18T08:01:28.000Z
|
2018-11-18T08:01:28.000Z
|
src/compiler/handler.py
|
neolang/neopy
|
92f328ed6c2f3c40ec04b889477aa8fe916711f1
|
[
"MIT"
] | 2 |
2018-11-18T08:02:07.000Z
|
2018-11-19T06:20:35.000Z
|
src/compiler/handler.py
|
neolang/neopy
|
92f328ed6c2f3c40ec04b889477aa8fe916711f1
|
[
"MIT"
] | null | null | null |
def compile_files(fp_list):
for fp in fp_list:
__compile_file(fp)
def __compile_file(file_pointer):
pass
| 15.375 | 33 | 0.699187 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
75f1634c6e371274f9060f7f9a480ee9c930fa89
| 1,082 |
py
|
Python
|
userbot/plugins/hpdiwali.py
|
yu9ohde/Marshmellow
|
145c90470701c972ab458483ac1b9320d1a44e8e
|
[
"MIT"
] | 2 |
2020-12-06T03:46:08.000Z
|
2022-02-19T20:34:52.000Z
|
userbot/plugins/hpdiwali.py
|
pro-boy/Marshmello
|
4cf6d96b69a7e0617ba5ced96eb5ee557b318b4c
|
[
"MIT"
] | 4 |
2020-11-07T07:39:51.000Z
|
2020-11-10T03:46:41.000Z
|
userbot/plugins/hpdiwali.py
|
pro-boy/Marshmello
|
4cf6d96b69a7e0617ba5ced96eb5ee557b318b4c
|
[
"MIT"
] | 9 |
2020-11-28T11:30:44.000Z
|
2021-06-01T07:11:57.000Z
|
# Plugin made by Dark cobra
# For Dark cobra
# Made by Shivam Patel(Team Cobra)
# Kang with credits..
import random
from userbot import CMD_HELP
from userbot.events import register
from userbot.utils import admin_cmd
from telethon import events, types, functions, utils
import asyncio
def choser(cmd, pack, blacklist={}):
docs = None
@borg.on(events.NewMessage(pattern=rf'\.{cmd}', outgoing=True))
async def handler(event):
if event.fwd_from:
return
animation_interval = 2
animation_ttl = range(0,8)
nonlocal docs
for i in animation_ttl:
await asyncio.sleep(animation_interval)
if docs is None:
docs = [
utils.get_input_document(x)
for x in (await borg(functions.messages.GetStickerSetRequest(types.InputStickerSetShortName(pack)))).documents
]
await event.respond(file=random.choice(docs))
choser('hpdiwali', 'a929138153_by_Shivam_Patel_1_anim')
| 28.473684 | 134 | 0.621072 | 0 | 0 | 0 | 0 | 677 | 0.625693 | 609 | 0.562847 | 155 | 0.143253 |
75f227cf59ba67118be0d4f419b2d0cc15fd93df
| 1,024 |
py
|
Python
|
scripts/parse-weka-results.py
|
jholewinski/ics-12-overlapped-tiling
|
af2b39bc957d33f68d4617865431ca731b18430a
|
[
"MIT"
] | 3 |
2015-12-31T11:19:50.000Z
|
2017-11-30T03:14:56.000Z
|
scripts/parse-weka-results.py
|
jholewinski/ics-12-overlapped-tiling
|
af2b39bc957d33f68d4617865431ca731b18430a
|
[
"MIT"
] | null | null | null |
scripts/parse-weka-results.py
|
jholewinski/ics-12-overlapped-tiling
|
af2b39bc957d33f68d4617865431ca731b18430a
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import sys
maximum = 0.0
selected = 0.0
results = []
for line in sys.stdin.readlines()[5:]:
line = line.strip()
if len(line) == 0:
continue
(inst, actual, predicted, error) = line.split()
results.append([inst, actual, predicted, error])
predicted = float(predicted)
if predicted > maximum:
maximum = predicted
selected = float(actual)
by_predicted = sorted(results, key=lambda entry: float(entry[2]))
by_predicted.reverse()
by_actual = sorted(results, key=lambda entry: float(entry[1]))
by_actual.reverse()
best_of_actuals = float(by_actual[0][1])
sys.stdout.write('Best of Actuals: %f\n' % best_of_actuals)
sys.stdout.write('Maximum Prediction: %s\n' %
str([x[2] for x in by_predicted[0:5]]))
sys.stdout.write('Selected Actual: %s\n' %
str([x[1] for x in by_predicted[0:5]]))
sys.stdout.write('Percentages: %s\n' %
str([float(x[1])/best_of_actuals for x in by_predicted[0:5]]))
| 26.947368 | 79 | 0.630859 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 125 | 0.12207 |
75f3476923aa5142454f8d9f4ed05a21bd8875d9
| 941 |
py
|
Python
|
symtuner/logger.py
|
audxo14/symtuner
|
741e4e14cfcf09b7c7a71ce34cf28f1858f1f476
|
[
"MIT"
] | null | null | null |
symtuner/logger.py
|
audxo14/symtuner
|
741e4e14cfcf09b7c7a71ce34cf28f1858f1f476
|
[
"MIT"
] | 1 |
2022-01-26T12:51:32.000Z
|
2022-01-26T12:51:32.000Z
|
symtuner/logger.py
|
audxo14/symtuner
|
741e4e14cfcf09b7c7a71ce34cf28f1858f1f476
|
[
"MIT"
] | 1 |
2022-01-26T12:42:24.000Z
|
2022-01-26T12:42:24.000Z
|
'''Logging module for symtuner library
Logging module for symtuner library. All loggings in symtuner library use this module.
'''
import logging as _logging
_LOGGER = None
def get_logger():
'''Get a logger.
Get a singleton `Logger`. If `Logger` not defined make one and return. If `get_logger` called
previously, returns a `Logger` object created previously.
Returns:
A `Logger` object.
'''
global _LOGGER
if not _LOGGER:
_LOGGER = _logging.getLogger('symtuner')
if not _logging.getLogger().handlers:
formatter = _logging.Formatter(fmt='%(asctime)s symtuner [%(levelname)s] %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
stderr_handler = _logging.StreamHandler()
stderr_handler.setFormatter(formatter)
_LOGGER.addHandler(stderr_handler)
_LOGGER.setLevel('INFO')
return _LOGGER
| 28.515152 | 98 | 0.631243 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 441 | 0.46865 |
75f57c3ebdfa5b1c58a1a40cbcfe56a933e80e69
| 3,326 |
py
|
Python
|
config/eval.py
|
XiaLiPKU/RESCAN-for-Deraining
|
e28d1d7cd3d8b276ce88de730de1603bafa30e23
|
[
"MIT"
] | 292 |
2018-07-17T01:11:53.000Z
|
2022-03-31T13:06:50.000Z
|
config/eval.py
|
XiaLiPKU/RESCAN-for-Deraining
|
e28d1d7cd3d8b276ce88de730de1603bafa30e23
|
[
"MIT"
] | 18 |
2018-08-02T13:33:06.000Z
|
2022-01-26T15:54:27.000Z
|
config/eval.py
|
XiaLiPKU/RESCAN-for-Deraining
|
e28d1d7cd3d8b276ce88de730de1603bafa30e23
|
[
"MIT"
] | 87 |
2018-07-17T18:02:09.000Z
|
2021-12-19T08:21:57.000Z
|
import os
import sys
import cv2
import argparse
import numpy as np
import torch
from torch import nn
from torch.nn import MSELoss
from torch.optim import Adam
from torch.optim.lr_scheduler import MultiStepLR
from torch.autograd import Variable
from torch.utils.data import DataLoader
from tensorboardX import SummaryWriter
import settings
from dataset import TestDataset
from model import RESCAN
from cal_ssim import SSIM
logger = settings.logger
torch.cuda.manual_seed_all(66)
torch.manual_seed(66)
torch.cuda.set_device(settings.device_id)
def ensure_dir(dir_path):
if not os.path.isdir(dir_path):
os.makedirs(dir_path)
class Session:
def __init__(self):
self.log_dir = settings.log_dir
self.model_dir = settings.model_dir
ensure_dir(settings.log_dir)
ensure_dir(settings.model_dir)
logger.info('set log dir as %s' % settings.log_dir)
logger.info('set model dir as %s' % settings.model_dir)
self.net = RESCAN().cuda()
self.crit = MSELoss().cuda()
self.ssim = SSIM().cuda()
self.dataloaders = {}
def get_dataloader(self, dataset_name):
dataset = TestDataset(dataset_name)
if not dataset_name in self.dataloaders:
self.dataloaders[dataset_name] = \
DataLoader(dataset, batch_size=1,
shuffle=False, num_workers=1, drop_last=False)
return self.dataloaders[dataset_name]
def load_checkpoints(self, name):
ckp_path = os.path.join(self.model_dir, name)
try:
obj = torch.load(ckp_path)
logger.info('Load checkpoint %s' % ckp_path)
except FileNotFoundError:
logger.info('No checkpoint %s!!' % ckp_path)
return
self.net.load_state_dict(obj['net'])
def inf_batch(self, name, batch):
O, B = batch['O'].cuda(), batch['B'].cuda()
O, B = Variable(O, requires_grad=False), Variable(B, requires_grad=False)
R = O - B
with torch.no_grad():
O_Rs = self.net(O)
loss_list = [self.crit(O_R, R) for O_R in O_Rs]
ssim_list = [self.ssim(O - O_R, O - R) for O_R in O_Rs]
losses = {
'loss%d' % i: loss.item()
for i, loss in enumerate(loss_list)
}
ssimes = {
'ssim%d' % i: ssim.item()
for i, ssim in enumerate(ssim_list)
}
losses.update(ssimes)
return losses
def run_test(ckp_name):
sess = Session()
sess.net.eval()
sess.load_checkpoints(ckp_name)
dt = sess.get_dataloader('test')
all_num = 0
all_losses = {}
for i, batch in enumerate(dt):
losses = sess.inf_batch('test', batch)
batch_size = batch['O'].size(0)
all_num += batch_size
for key, val in losses.items():
if i == 0:
all_losses[key] = 0.
all_losses[key] += val * batch_size
logger.info('batch %d mse %s: %f' % (i, key, val))
for key, val in all_losses.items():
logger.info('total mse %s: %f' % (key, val / all_num))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-m', '--model', default='latest')
args = parser.parse_args(sys.argv[1:])
run_test(args.model)
| 28.672414 | 81 | 0.613049 | 1,834 | 0.551413 | 0 | 0 | 0 | 0 | 0 | 0 | 192 | 0.057727 |
75f5ca0e1019fe3f64db390c86a601c2f8792420
| 6,371 |
py
|
Python
|
FastEMRIWaveforms/few/utils/modeselector.py
|
basuparth/ICERM_Workshop
|
ebabce680fc87e90ff1de30246dcda9beb384bb4
|
[
"MIT"
] | null | null | null |
FastEMRIWaveforms/few/utils/modeselector.py
|
basuparth/ICERM_Workshop
|
ebabce680fc87e90ff1de30246dcda9beb384bb4
|
[
"MIT"
] | null | null | null |
FastEMRIWaveforms/few/utils/modeselector.py
|
basuparth/ICERM_Workshop
|
ebabce680fc87e90ff1de30246dcda9beb384bb4
|
[
"MIT"
] | null | null | null |
# Online mode selection for FastEMRIWaveforms Packages
# Copyright (C) 2020 Michael L. Katz, Alvin J.K. Chua, Niels Warburton, Scott A. Hughes
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import numpy as np
from few.utils.citations import *
# check for cupy
try:
import cupy as xp
except (ImportError, ModuleNotFoundError) as e:
import numpy as xp
class ModeSelector:
"""Filter teukolsky amplitudes based on power contribution.
This module takes teukolsky modes, combines them with their associated ylms,
and determines the power contribution from each mode. It then filters the
modes bases on the fractional accuracy on the total power (eps) parameter.
The mode filtering is a major contributing factor to the speed of these
waveforms as it removes large numbers of useles modes from the final
summation calculation.
Be careful as this is built based on the construction that input mode arrays
will in order of :math:`m=0`, :math:`m>0`, and then :math:`m<0`.
args:
m0mask (1D bool xp.ndarray): This mask highlights which modes have
:math:`m=0`. Value is False if :math:`m=0`, True if not.
This only includes :math:`m\geq0`.
use_gpu (bool, optional): If True, allocate arrays for usage on a GPU.
Default is False.
"""
def __init__(self, m0mask, use_gpu=False):
if use_gpu:
self.xp = xp
else:
self.xp = np
# store information releated to m values
# the order is m = 0, m > 0, m < 0
self.m0mask = m0mask
self.num_m_zero_up = len(m0mask)
self.num_m_1_up = len(self.xp.arange(len(m0mask))[m0mask])
self.num_m0 = len(self.xp.arange(len(m0mask))[~m0mask])
def attributes_ModeSelector(self):
"""
attributes:
xp: cupy or numpy depending on GPU usage.
num_m_zero_up (int): Number of modes with :math:`m\geq0`.
num_m_1_up (int): Number of modes with :math:`m\geq1`.
num_m0 (int): Number of modes with :math:`m=0`.
"""
pass
@property
def citation(self):
"""Return citations related to this class."""
return few_citation + few_software_citation
def __call__(self, teuk_modes, ylms, modeinds, eps=1e-5):
"""Call to sort and filer teukolsky modes.
This is the call function that takes the teukolsky modes, ylms,
mode indices and fractional accuracy of the total power and returns
filtered teukolsky modes and ylms.
args:
teuk_modes (2D complex128 xp.ndarray): Complex teukolsky amplitudes
from the amplitude modules.
Shape: (number of trajectory points, number of modes).
ylms (1D complex128 xp.ndarray): Array of ylm values for each mode,
including m<0. Shape is (num of m==0,) + (num of m>0,)
+ (num of m<0). Number of m<0 and m>0 is the same, but they are
ordered as (m==0 first then) m>0 then m<0.
modeinds (list of int xp.ndarrays): List containing the mode index arrays. If in an
equatorial model, need :math:`(l,m,n)` arrays. If generic,
:math:`(l,m,k,n)` arrays. e.g. [l_arr, m_arr, n_arr].
eps (double, optional): Fractional accuracy of the total power used
to determine the contributing modes. Lowering this value will
calculate more modes slower the waveform down, but generally
improving accuracy. Increasing this value removes modes from
consideration and can have a considerable affect on the speed of
the waveform, albeit at the cost of some accuracy (usually an
acceptable loss). Default that gives good mismatch qualities is
1e-5.
"""
# get the power contribution of each mode including m < 0
power = (
self.xp.abs(
self.xp.concatenate(
[teuk_modes, self.xp.conj(teuk_modes[:, self.m0mask])], axis=1
)
* ylms
)
** 2
)
# sort the power for a cumulative summation
inds_sort = self.xp.argsort(power, axis=1)[:, ::-1]
power = self.xp.sort(power, axis=1)[:, ::-1]
cumsum = self.xp.cumsum(power, axis=1)
# initialize and indices array for keeping modes
inds_keep = self.xp.full(cumsum.shape, True)
# keep modes that add to within the fractional power (1 - eps)
inds_keep[:, 1:] = cumsum[:, :-1] < cumsum[:, -1][:, self.xp.newaxis] * (
1 - eps
)
# finds indices of each mode to be kept
temp = inds_sort[inds_keep]
# adjust the index arrays to make -m indices equal to +m indices
# if +m or -m contributes, we keep both because of structure of CUDA kernel
temp = temp * (temp < self.num_m_zero_up) + (temp - self.num_m_1_up) * (
temp >= self.num_m_zero_up
)
# if +m or -m contributes, we keep both because of structure of CUDA kernel
keep_modes = self.xp.unique(temp)
# set ylms
# adust temp arrays specific to ylm setup
temp2 = keep_modes * (keep_modes < self.num_m0) + (
keep_modes + self.num_m_1_up
) * (keep_modes >= self.num_m0)
# ylm duplicates the m = 0 unlike teuk_modes
ylmkeep = self.xp.concatenate([keep_modes, temp2])
# setup up teuk mode and ylm returns
out1 = (teuk_modes[:, keep_modes], ylms[ylmkeep])
# setup up mode values that have been kept
out2 = tuple([arr[keep_modes] for arr in modeinds])
return out1 + out2
| 38.149701 | 95 | 0.622822 | 5,414 | 0.849788 | 0 | 0 | 139 | 0.021818 | 0 | 0 | 4,330 | 0.679642 |
75f7e09f370f1d9746b214f2177f7c3fe2f5c339
| 81 |
py
|
Python
|
coreapi/models/__init__.py
|
recentfahim/smartbusinessbd
|
61a74ae629f2c6e2317c41da23476c8780446e84
|
[
"Apache-2.0"
] | null | null | null |
coreapi/models/__init__.py
|
recentfahim/smartbusinessbd
|
61a74ae629f2c6e2317c41da23476c8780446e84
|
[
"Apache-2.0"
] | null | null | null |
coreapi/models/__init__.py
|
recentfahim/smartbusinessbd
|
61a74ae629f2c6e2317c41da23476c8780446e84
|
[
"Apache-2.0"
] | null | null | null |
from .city import City
from .company import Company
from .country import Country
| 20.25 | 28 | 0.814815 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
75f9dd3819053deb8e0d3dbd4dc28b348322030d
| 2,113 |
py
|
Python
|
shared-data/python/opentrons_shared_data/deck/dev_types.py
|
Opentrons/protocol_framework
|
ebbd6b2fe984edd6ecfcbf1dbe040db7f7356b9f
|
[
"Apache-2.0"
] | null | null | null |
shared-data/python/opentrons_shared_data/deck/dev_types.py
|
Opentrons/protocol_framework
|
ebbd6b2fe984edd6ecfcbf1dbe040db7f7356b9f
|
[
"Apache-2.0"
] | null | null | null |
shared-data/python/opentrons_shared_data/deck/dev_types.py
|
Opentrons/protocol_framework
|
ebbd6b2fe984edd6ecfcbf1dbe040db7f7356b9f
|
[
"Apache-2.0"
] | null | null | null |
"""
opentrons_shared_data.deck.dev_types: types for deck defs
This should only be imported if typing.TYPE_CHECKING is True
"""
from typing import Any, Dict, List, NewType, Union
from typing_extensions import Literal, TypedDict
from ..module.dev_types import ModuleType
DeckSchemaVersion3 = Literal[3]
DeckSchemaVersion2 = Literal[2]
DeckSchemaVersion1 = Literal[1]
DeckSchema = NewType("DeckSchema", Dict[str, Any])
RobotModel = Union[Literal["OT-2 Standard"], Literal["OT-3 Standard"]]
class Metadata(TypedDict, total=False):
displayName: str
tags: List[str]
class Robot(TypedDict):
model: RobotModel
class BoundingBox(TypedDict):
xDimension: float
yDimension: float
zDimension: float
class SlotDefV3(TypedDict, total=False):
id: str
position: List[float]
boundingBox: BoundingBox
displayName: str
compatibleModuleTypes: List[ModuleType]
matingSurfaceUnitVector: List[Union[Literal[1], Literal[-1]]]
class CalibrationPoint(TypedDict):
id: str
position: List[float]
displayName: str
class Feature(TypedDict):
footprint: str
class FixedLabwareBySlot(TypedDict):
id: str
displayName: str
labware: str
slot: str
class FixedLabwareByPosition(TypedDict):
id: str
displayName: str
labware: str
position: List[float]
class FixedVolumeBySlot(TypedDict):
id: str
displayName: str
boundingBox: BoundingBox
slot: str
class FixedVolumeByPosition(TypedDict):
id: str
displayName: str
boundingBox: BoundingBox
position: List[float]
Fixture = Union[
FixedLabwareBySlot, FixedLabwareByPosition, FixedVolumeBySlot, FixedVolumeByPosition
]
class LocationsV3(TypedDict):
orderedSlots: List[SlotDefV3]
calibrationPoints: List[CalibrationPoint]
fixtures: List[Fixture]
class DeckDefinitionV3(TypedDict):
otId: str
schemaVersion: Literal[3]
cornerOffsetFromOrigin: List[float]
dimensions: List[float]
metadata: Metadata
robot: Robot
locations: LocationsV3
layers: Dict[str, List[Feature]]
DeckDefinition = DeckDefinitionV3
| 19.564815 | 88 | 0.730715 | 1,436 | 0.679602 | 0 | 0 | 0 | 0 | 0 | 0 | 169 | 0.079981 |
75f9e56ae6c6a091caa3997bff09abbf8201e9db
| 2,803 |
py
|
Python
|
source/hsicbt/model/vgg.py
|
tongjian121/PK-HBaR
|
c564e0f08c2c09e0023384adecfcf25e2d53a8a3
|
[
"MIT"
] | 9 |
2021-11-04T16:53:04.000Z
|
2022-03-28T10:27:44.000Z
|
source/hsicbt/model/vgg.py
|
tongjian121/PK-HBaR
|
c564e0f08c2c09e0023384adecfcf25e2d53a8a3
|
[
"MIT"
] | null | null | null |
source/hsicbt/model/vgg.py
|
tongjian121/PK-HBaR
|
c564e0f08c2c09e0023384adecfcf25e2d53a8a3
|
[
"MIT"
] | null | null | null |
import math
import torch
import torch.nn as nn
from torch.autograd import Variable
from torchvision import models
defaultcfg = {
11 : [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512],
13 : [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512],
16 : [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512],
19 : [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512],
}
def conv_layer(chann_in, chann_out, k_size, p_size):
layer = nn.Sequential(
nn.Conv2d(chann_in, chann_out, kernel_size=k_size, padding=p_size),
nn.BatchNorm2d(chann_out),
nn.ReLU()
)
return layer
def vgg_conv_block(in_list, out_list, k_list, p_list, pooling_k, pooling_s):
layers = [ conv_layer(in_list[i], out_list[i], k_list[i], p_list[i]) for i in range(len(in_list)) ]
layers += [ nn.MaxPool2d(kernel_size = pooling_k, stride = pooling_s)]
return nn.Sequential(*layers)
def vgg_fc_layer(size_in, size_out):
layer = nn.Sequential(
nn.Linear(size_in, size_out),
nn.BatchNorm1d(size_out),
nn.ReLU()
)
return layer
class VGG16(nn.Module):
def __init__(self, **kwargs):
super(VGG16, self).__init__()
self.rob = kwargs['robustness'] if 'robustness' in kwargs else False
# Conv blocks (BatchNorm + ReLU activation added in each block)
self.layer1 = vgg_conv_block([3,64], [64,64], [3,3], [1,1], 2, 2)
self.layer2 = vgg_conv_block([64,128], [128,128], [3,3], [1,1], 2, 2)
self.layer3 = vgg_conv_block([128,256,256], [256,256,256], [3,3,3], [1,1,1], 2, 2)
self.layer4 = vgg_conv_block([256,512,512], [512,512,512], [3,3,3], [1,1,1], 2, 2)
self.layer5 = vgg_conv_block([512,512,512], [512,512,512], [3,3,3], [1,1,1], 2, 2)
# FC layers
self.layer6 = vgg_fc_layer(512, 4096)
self.layer7 = vgg_fc_layer(4096, 4096)
# Final layer
self.layer8 = nn.Linear(4096, 10)
def forward(self, x):
output_list = []
out = self.layer1(x)
output_list.append(out)
out = self.layer2(out)
output_list.append(out)
out = self.layer3(out)
output_list.append(out)
out = self.layer4(out)
output_list.append(out)
vgg16_features = self.layer5(out)
out = vgg16_features.view(out.size(0), -1)
#print(out.shape)
out = self.layer6(out)
output_list.append(out)
out = self.layer7(out)
output_list.append(out)
out = self.layer8(out)
if self.rob:
return out
else:
return out, output_list
| 32.593023 | 108 | 0.567249 | 1,615 | 0.576168 | 0 | 0 | 0 | 0 | 0 | 0 | 176 | 0.06279 |
75faae9bc5c91a63ded3c9f4f2e51213df5e1730
| 11,555 |
py
|
Python
|
src/out/ICFP18evaluation/evaluationTreeLSTM/PyTorch/scripts/preprocess-sst.py
|
faradaym/Lantern
|
536e48da79ee374527c669f77ad9e0a0776a0bb8
|
[
"BSD-3-Clause"
] | 158 |
2018-03-28T21:58:07.000Z
|
2022-02-22T00:49:46.000Z
|
src/out/ICFP18evaluation/evaluationTreeLSTM/PyTorch/scripts/preprocess-sst.py
|
douxiansheng/Lantern
|
f453de532da638c1f467953b32bbe49a3dedfa45
|
[
"BSD-3-Clause"
] | 35 |
2018-09-03T21:27:15.000Z
|
2019-05-11T02:17:49.000Z
|
src/out/ICFP18evaluation/evaluationTreeLSTM/PyTorch/scripts/preprocess-sst.py
|
douxiansheng/Lantern
|
f453de532da638c1f467953b32bbe49a3dedfa45
|
[
"BSD-3-Clause"
] | 36 |
2017-06-30T00:28:59.000Z
|
2022-01-24T12:20:42.000Z
|
"""
Preprocessing script for Stanford Sentiment Treebank data.
"""
import os
import glob
#
# Trees and tree loading
#
class ConstTree(object):
def __init__(self):
self.left = None
self.right = None
def size(self):
self.size = 1
if self.left is not None:
self.size += self.left.size()
if self.right is not None:
self.size += self.right.size()
return self.size
def set_spans(self):
if self.word is not None:
self.span = self.word
return self.span
self.span = self.left.set_spans()
if self.right is not None:
self.span += ' ' + self.right.set_spans()
return self.span
def get_labels(self, spans, labels, dictionary):
if self.span in dictionary:
spans[self.idx] = self.span
labels[self.idx] = dictionary[self.span]
if self.left is not None:
self.left.get_labels(spans, labels, dictionary)
if self.right is not None:
self.right.get_labels(spans, labels, dictionary)
class DepTree(object):
def __init__(self):
self.children = []
self.lo, self.hi = None, None
def size(self):
self.size = 1
for c in self.children:
self.size += c.size()
return self.size
def set_spans(self, words):
self.lo, self.hi = self.idx, self.idx + 1
if len(self.children) == 0:
self.span = words[self.idx]
return
for c in self.children:
c.set_spans(words)
self.lo = min(self.lo, c.lo)
self.hi = max(self.hi, c.hi)
self.span = ' '.join(words[self.lo : self.hi])
def get_labels(self, spans, labels, dictionary):
if self.span in dictionary:
spans[self.idx] = self.span
labels[self.idx] = dictionary[self.span]
for c in self.children:
c.get_labels(spans, labels, dictionary)
def load_trees(dirpath):
const_trees, dep_trees, toks = [], [], []
with open(os.path.join(dirpath, 'parents.txt')) as parentsfile, \
open(os.path.join(dirpath, 'dparents.txt')) as dparentsfile, \
open(os.path.join(dirpath, 'sents.txt')) as toksfile:
parents, dparents = [], []
for line in parentsfile:
parents.append(map(int, line.split()))
for line in dparentsfile:
dparents.append(map(int, line.split()))
for line in toksfile:
toks.append(line.strip().split())
for i in xrange(len(toks)):
const_trees.append(load_constituency_tree(parents[i], toks[i]))
dep_trees.append(load_dependency_tree(dparents[i]))
return const_trees, dep_trees, toks
def load_constituency_tree(parents, words):
trees = []
root = None
size = len(parents)
for i in xrange(size):
trees.append(None)
word_idx = 0
for i in xrange(size):
if not trees[i]:
idx = i
prev = None
prev_idx = None
word = words[word_idx]
word_idx += 1
while True:
tree = ConstTree()
parent = parents[idx] - 1
tree.word, tree.parent, tree.idx = word, parent, idx
word = None
if prev is not None:
if tree.left is None:
tree.left = prev
else:
tree.right = prev
trees[idx] = tree
if parent >= 0 and trees[parent] is not None:
if trees[parent].left is None:
trees[parent].left = tree
else:
trees[parent].right = tree
break
elif parent == -1:
root = tree
break
else:
prev = tree
prev_idx = idx
idx = parent
return root
def load_dependency_tree(parents):
trees = []
root = None
size = len(parents)
for i in xrange(size):
trees.append(None)
for i in xrange(size):
if not trees[i]:
idx = i
prev = None
prev_idx = None
while True:
tree = DepTree()
parent = parents[idx] - 1
# node is not in tree
if parent == -2:
break
tree.parent, tree.idx = parent, idx
if prev is not None:
tree.children.append(prev)
trees[idx] = tree
if parent >= 0 and trees[parent] is not None:
trees[parent].children.append(tree)
break
elif parent == -1:
root = tree
break
else:
prev = tree
prev_idx = idx
idx = parent
return root
#
# Various utilities
#
def make_dirs(dirs):
for d in dirs:
if not os.path.exists(d):
os.makedirs(d)
def load_sents(dirpath):
sents = []
with open(os.path.join(dirpath, 'SOStr.txt')) as sentsfile:
for line in sentsfile:
sent = ' '.join(line.split('|'))
sents.append(sent.strip())
return sents
def load_splits(dirpath):
splits = []
with open(os.path.join(dirpath, 'datasetSplit.txt')) as splitfile:
splitfile.readline()
for line in splitfile:
idx, split = line.split(',')
splits.append(int(split))
return splits
def load_parents(dirpath):
parents = []
with open(os.path.join(dirpath, 'STree.txt')) as parentsfile:
for line in parentsfile:
p = ' '.join(line.split('|'))
parents.append(p.strip())
return parents
def load_dictionary(dirpath):
labels = []
with open(os.path.join(dirpath, 'sentiment_labels.txt')) as labelsfile:
labelsfile.readline()
for line in labelsfile:
idx, rating = line.split('|')
idx = int(idx)
rating = float(rating)
if rating <= 0.2:
label = -2
elif rating <= 0.4:
label = -1
elif rating > 0.8:
label = +2
elif rating > 0.6:
label = +1
else:
label = 0
labels.append(label)
d = {}
with open(os.path.join(dirpath, 'dictionary.txt')) as dictionary:
for line in dictionary:
s, idx = line.split('|')
d[s] = labels[int(idx)]
return d
def build_vocab(filepaths, dst_path, lowercase=True):
vocab = set()
for filepath in filepaths:
with open(filepath) as f:
for line in f:
if lowercase:
line = line.lower()
vocab |= set(line.split())
with open(dst_path, 'w') as f:
for w in sorted(vocab):
f.write(w + '\n')
def split(sst_dir, train_dir, dev_dir, test_dir):
sents = load_sents(sst_dir)
splits = load_splits(sst_dir)
parents = load_parents(sst_dir)
with open(os.path.join(train_dir, 'sents.txt'), 'w') as train, \
open(os.path.join(dev_dir, 'sents.txt'), 'w') as dev, \
open(os.path.join(test_dir, 'sents.txt'), 'w') as test, \
open(os.path.join(train_dir, 'parents.txt'), 'w') as trainparents, \
open(os.path.join(dev_dir, 'parents.txt'), 'w') as devparents, \
open(os.path.join(test_dir, 'parents.txt'), 'w') as testparents:
for sent, split, p in zip(sents, splits, parents):
if split == 1:
train.write(sent)
train.write('\n')
trainparents.write(p)
trainparents.write('\n')
elif split == 2:
test.write(sent)
test.write('\n')
testparents.write(p)
testparents.write('\n')
else:
dev.write(sent)
dev.write('\n')
devparents.write(p)
devparents.write('\n')
def get_labels(tree, dictionary):
size = tree.size()
spans, labels = [], []
for i in xrange(size):
labels.append(None)
spans.append(None)
tree.get_labels(spans, labels, dictionary)
return spans, labels
def write_labels(dirpath, dictionary):
print('Writing labels for trees in ' + dirpath)
with open(os.path.join(dirpath, 'labels.txt'), 'w') as labels, \
open(os.path.join(dirpath, 'dlabels.txt'), 'w') as dlabels:
# load constituency and dependency trees
const_trees, dep_trees, toks = load_trees(dirpath)
# write span labels
for i in xrange(len(const_trees)):
const_trees[i].set_spans()
dep_trees[i].set_spans(toks[i])
# const tree labels
s, l = [], []
for j in xrange(const_trees[i].size()):
s.append(None)
l.append(None)
const_trees[i].get_labels(s, l, dictionary)
labels.write(' '.join(map(str, l)) + '\n')
# dep tree labels
dep_trees[i].span = const_trees[i].span
s, l = [], []
for j in xrange(len(toks[i])):
s.append(None)
l.append('#')
dep_trees[i].get_labels(s, l, dictionary)
dlabels.write(' '.join(map(str, l)) + '\n')
def dependency_parse(filepath, cp='', tokenize=True):
print('\nDependency parsing ' + filepath)
dirpath = os.path.dirname(filepath)
filepre = os.path.splitext(os.path.basename(filepath))[0]
tokpath = os.path.join(dirpath, filepre + '.toks')
parentpath = os.path.join(dirpath, 'dparents.txt')
relpath = os.path.join(dirpath, 'rels.txt')
tokenize_flag = '-tokenize - ' if tokenize else ''
cmd = ('java -cp %s DependencyParse -tokpath %s -parentpath %s -relpath %s %s < %s'
% (cp, tokpath, parentpath, relpath, tokenize_flag, filepath))
os.system(cmd)
if __name__ == '__main__':
print('=' * 80)
print('Preprocessing Stanford Sentiment Treebank')
print('=' * 80)
base_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
data_dir = os.path.join(base_dir, 'data')
lib_dir = os.path.join(base_dir, 'lib')
sst_dir = os.path.join(data_dir, 'sst')
train_dir = os.path.join(sst_dir, 'train')
dev_dir = os.path.join(sst_dir, 'dev')
test_dir = os.path.join(sst_dir, 'test')
make_dirs([train_dir, dev_dir, test_dir])
# produce train/dev/test splits
split(sst_dir, train_dir, dev_dir, test_dir)
sent_paths = glob.glob(os.path.join(sst_dir, '*/sents.txt'))
# produce dependency parses
classpath = ':'.join([
lib_dir,
os.path.join(lib_dir, 'stanford-parser/stanford-parser.jar'),
os.path.join(lib_dir, 'stanford-parser/stanford-parser-3.5.1-models.jar')])
for filepath in sent_paths:
dependency_parse(filepath, cp=classpath, tokenize=False)
# get vocabulary
build_vocab(sent_paths, os.path.join(sst_dir, 'vocab.txt'))
build_vocab(sent_paths, os.path.join(sst_dir, 'vocab-cased.txt'), lowercase=False)
# write sentiment labels for nodes in trees
dictionary = load_dictionary(sst_dir)
write_labels(train_dir, dictionary)
write_labels(dev_dir, dictionary)
write_labels(test_dir, dictionary)
| 32.457865 | 87 | 0.539766 | 1,864 | 0.161315 | 0 | 0 | 0 | 0 | 0 | 0 | 1,061 | 0.091822 |
75fc997c30736fa87f40fddc061010fa3c1f2c9f
| 12,703 |
py
|
Python
|
models/relevance/relevance_google_net.py
|
sanglee/XAI-threshold-calibration
|
24ddd5213b02d4fb919bca191392fe8b1a30aa88
|
[
"Apache-2.0"
] | null | null | null |
models/relevance/relevance_google_net.py
|
sanglee/XAI-threshold-calibration
|
24ddd5213b02d4fb919bca191392fe8b1a30aa88
|
[
"Apache-2.0"
] | null | null | null |
models/relevance/relevance_google_net.py
|
sanglee/XAI-threshold-calibration
|
24ddd5213b02d4fb919bca191392fe8b1a30aa88
|
[
"Apache-2.0"
] | null | null | null |
import warnings
from collections import namedtuple
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor
import torch.utils.model_zoo as model_zoo
from typing import Optional, Tuple, List, Callable, Any
from modules.layers import *
__all__ = ['GoogLeNet', 'googlenet', "GoogLeNetOutputs", "_GoogLeNetOutputs"]
model_urls = {
# GoogLeNet ported from TensorFlow
'googlenet': 'https://download.pytorch.org/models/googlenet-1378be20.pth',
}
GoogLeNetOutputs = namedtuple('GoogLeNetOutputs', ['logits', 'aux_logits2', 'aux_logits1'])
GoogLeNetOutputs.__annotations__ = {'logits': Tensor, 'aux_logits2': Optional[Tensor],
'aux_logits1': Optional[Tensor]}
# Script annotations failed with _GoogleNetOutputs = namedtuple ...
# _GoogLeNetOutputs set here for backwards compat
_GoogLeNetOutputs = GoogLeNetOutputs
def googlenet(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> "GoogLeNet":
r"""GoogLeNet (Inception v1) model architecture from
`"Going Deeper with Convolutions" <http://arxiv.org/abs/1409.4842>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
aux_logits (bool): If True, adds two auxiliary branches that can improve training.
Default: *False* when pretrained is True otherwise *True*
transform_input (bool): If True, preprocesses the input according to the method with which it
was trained on ImageNet. Default: *False*
"""
if pretrained:
if 'transform_input' not in kwargs:
kwargs['transform_input'] = True
if 'aux_logits' not in kwargs:
kwargs['aux_logits'] = False
if kwargs['aux_logits']:
warnings.warn('auxiliary heads in the pretrained googlenet model are NOT pretrained, '
'so make sure to train them')
original_aux_logits = kwargs['aux_logits']
kwargs['aux_logits'] = True
kwargs['init_weights'] = False
model = GoogLeNet(**kwargs)
model.load_state_dict(model_zoo.load_url(model_urls['googlenet']))
return model
return GoogLeNet(**kwargs)
class GoogLeNet(nn.Module):
__constants__ = ['aux_logits', 'transform_input']
def __init__(
self,
num_classes: int = 1000,
aux_logits: bool = True,
transform_input: bool = False,
init_weights: Optional[bool] = None,
blocks: Optional[List[Callable[..., nn.Module]]] = None
) -> None:
super(GoogLeNet, self).__init__()
if blocks is None:
blocks = [BasicConv2d, Inception, InceptionAux]
if init_weights is None:
warnings.warn('The default weight initialization of GoogleNet will be changed in future releases of '
'torchvision. If you wish to keep the old behavior (which leads to long initialization times'
' due to scipy/scipy#11299), please set init_weights=True.', FutureWarning)
init_weights = True
assert len(blocks) == 3
conv_block = blocks[0]
inception_block = blocks[1]
inception_aux_block = blocks[2]
self.aux_logits = aux_logits
self.transform_input = transform_input
self.conv1 = conv_block(3, 64, kernel_size=7, stride=2, padding=3)
self.maxpool1 = MaxPool2d(3, stride=2, ceil_mode=True)
self.conv2 = conv_block(64, 64, kernel_size=1)
self.conv3 = conv_block(64, 192, kernel_size=3, padding=1)
self.maxpool2 = MaxPool2d(3, stride=2, ceil_mode=True)
self.inception3a = inception_block(192, 64, 96, 128, 16, 32, 32)
self.inception3b = inception_block(256, 128, 128, 192, 32, 96, 64)
self.maxpool3 = MaxPool2d(3, stride=2, ceil_mode=True)
self.inception4a = inception_block(480, 192, 96, 208, 16, 48, 64)
self.inception4b = inception_block(512, 160, 112, 224, 24, 64, 64)
self.inception4c = inception_block(512, 128, 128, 256, 24, 64, 64)
self.inception4d = inception_block(512, 112, 144, 288, 32, 64, 64)
self.inception4e = inception_block(528, 256, 160, 320, 32, 128, 128)
self.maxpool4 = MaxPool2d(2, stride=2, ceil_mode=True)
self.inception5a = inception_block(832, 256, 160, 320, 32, 128, 128)
self.inception5b = inception_block(832, 384, 192, 384, 48, 128, 128)
if aux_logits:
self.aux1 = inception_aux_block(512, num_classes)
self.aux2 = inception_aux_block(528, num_classes)
else:
self.aux1 = None # type: ignore[assignment]
self.aux2 = None # type: ignore[assignment]
self.avgpool = AdaptiveAvgPool2d((1, 1))
self.dropout = Dropout(0.2)
self.fc = Linear(1024, num_classes)
self.gradients = dict()
self.activations = dict()
def forward_hook(module, input, output):
self.activations['value'] = output
return None
def backward_hook(module,input,output):
self.gradients['value'] = output[0]
self.inception3b.register_forward_hook(forward_hook)
self.inception3b.register_backward_hook(backward_hook)
def forward(self, x):
# N x 3 x 224 x 224
x = self.conv1(x)
# N x 64 x 112 x 112
x = self.maxpool1(x)
# N x 64 x 56 x 56
x = self.conv2(x)
# N x 64 x 56 x 56
x = self.conv3(x)
# N x 192 x 56 x 56
x = self.maxpool2(x)
# N x 192 x 28 x 28
x = self.inception3a(x)
# N x 256 x 28 x 28
x = self.inception3b(x)
# N x 480 x 28 x 28
x = self.maxpool3(x)
# N x 480 x 14 x 14
x = self.inception4a(x)
# N x 512 x 14 x 14
x = self.inception4b(x)
# N x 512 x 14 x 14
x = self.inception4c(x)
# N x 512 x 14 x 14
x = self.inception4d(x)
# N x 528 x 14 x 14
x = self.inception4e(x)
# N x 832 x 14 x 14
x = self.maxpool4(x)
# N x 832 x 7 x 7
x = self.inception5a(x)
# N x 832 x 7 x 7
x = self.inception5b(x)
# N x 1024 x 7 x 7
x = self.avgpool(x)
# N x 1024 x 1 x 1
x = torch.flatten(x, 1)
# N x 1024
x = self.dropout(x)
x = self.fc(x)
# N x 1000 (num_classes)
# R = self.CLRP(x)
#
# logit = x[:, x.max(1)[-1]].sum()
# logit.backward()
# R = self.fc.relprop(R)
# R = self.dropout.relprop(R)
# R = R.reshape_as(self.avgpool.Y)
# R = self.avgpool.relprop(R)
# R = self.inception5b.relprop(R)
# R = self.inception5a.relprop(R)
# R = self.maxpool4.relprop(R)
# R = self.inception4e.relprop(R)
# R = self.inception4d.relprop(R)
# R = self.inception4c.relprop(R)
# R = self.inception4b.relprop(R)
# R = self.inception4a.relprop(R)
# R = self.maxpool3.relprop(R)
# R = self.inception3b.relprop(R)
# R = self.inception3a.relprop(R)
#
# r_weight = torch.mean(R,dim=(2,3),keepdim=True)
# r_cam = t*r_weight
# r_cam = torch.sum(r_cam,dim=(0,1))
#
# a = self.activations['value']
# g = self.gradients['value']
# g_ = torch.mean(g,dim=(2,3),keepdim=True)
# grad_cam = a * g_
# grad_cam = torch.sum(grad_cam,dim=(0,1))
#
# g_2 = g ** 2
# g_3 = g ** 3
# alpha_numer = g_2
# alpha_denom = 2 * g_2 + torch.sum(a * g_3, dim=(0, 1), keepdim=True) # + 1e-2
#
# alpha = alpha_numer / alpha_denom
#
# w = torch.sum(alpha * torch.clamp(g, min =0), dim=(0, 1), keepdim=True)
#
# grad_cam_pp = torch.clamp(w * a, min=0)
# grad_cam_pp = torch.sum(grad_cam_pp, dim=-1)
return x
def CLRP(self,x):
maxindex = torch.argmax(x)
R = torch.ones(x.shape).cuda()
R /= -1000
R[:, maxindex] = 1
return R
def relprop(self,R):
R = self.fc.relprop(R)
R = self.dropout.relprop(R)
R = R.reshape_as(self.avgpool.Y)
R = self.avgpool.relprop(R)
R = self.inception5b.relprop(R)
R = self.inception5a.relprop(R)
R = self.maxpool4.relprop(R)
R = self.inception4e.relprop(R)
R = self.inception4d.relprop(R)
R = self.inception4c.relprop(R)
R = self.inception4b.relprop(R)
R = self.inception4a.relprop(R)
# R = self.maxpool3.relprop(R)
# R = self.inception3b.relprop(R)
# R = self.inception3a.relprop(R)
# R = self.maxpool2.relprop(R)
# R = self.conv3.relprop(R)
# R = self.conv2.relprop(R)
# R = self.maxpool1.relprop(R)
# R = self.conv1.relprop(R)
return R
class InceptionAux(nn.Module):
def __init__(
self,
in_channels: int,
num_classes: int,
conv_block: Optional[Callable[..., nn.Module]] = None
) -> None:
super(InceptionAux, self).__init__()
if conv_block is None:
conv_block = BasicConv2d
self.conv = conv_block(in_channels, 128, kernel_size=1)
self.fc1 = nn.Linear(2048, 1024)
self.fc2 = nn.Linear(1024, num_classes)
def forward(self, x: Tensor) -> Tensor:
# aux1: N x 512 x 14 x 14, aux2: N x 528 x 14 x 14
x = F.adaptive_avg_pool2d(x, (4, 4))
# aux1: N x 512 x 4 x 4, aux2: N x 528 x 4 x 4
x = self.conv(x)
# N x 128 x 4 x 4
x = torch.flatten(x, 1)
# N x 2048
x = F.relu(self.fc1(x), inplace=True)
# N x 1024
x = F.dropout(x, 0.7, training=self.training)
# N x 1024
x = self.fc2(x)
# N x 1000 (num_classes)
return x
class Inception(nn.Module):
def __init__(
self,
in_channels: int,
ch1x1: int,
ch3x3red: int,
ch3x3: int,
ch5x5red: int,
ch5x5: int,
pool_proj: int,
conv_block: Optional[Callable[..., nn.Module]] = None
) -> None:
super(Inception, self).__init__()
if conv_block is None:
conv_block = BasicConv2d
self.branch1 = conv_block(in_channels, ch1x1, kernel_size=1)
self.channel1 = ch1x1
self.branch2 = Sequential(
conv_block(in_channels, ch3x3red, kernel_size=1),
conv_block(ch3x3red, ch3x3, kernel_size=3, padding=1)
)
self.channel2 = ch3x3
self.branch3 = Sequential(
conv_block(in_channels, ch5x5red, kernel_size=1),
# Here, kernel_size=3 instead of kernel_size=5 is a known bug.
# Please see https://github.com/pytorch/vision/issues/906 for details.
conv_block(ch5x5red, ch5x5, kernel_size=3, padding=1)
)
self.channel3 = ch5x5
self.branch4 = Sequential(
MaxPool2d(kernel_size=3, stride=1, padding=1, ceil_mode=True),
conv_block(in_channels, pool_proj, kernel_size=1)
)
self.channel4 = pool_proj
def _forward(self, x: Tensor) -> List[Tensor]:
branch1 = self.branch1(x)
branch2 = self.branch2(x)
branch3 = self.branch3(x)
branch4 = self.branch4(x)
outputs = [branch1, branch2, branch3, branch4]
return outputs
def forward(self, x: Tensor) -> Tensor:
outputs = self._forward(x)
return torch.cat(outputs, 1)
def relprop(self,R):
R1 = R[:,:self.channel1]
R2 = R[:, self.channel1:self.channel1+self.channel2]
R3 = R[:, self.channel1+self.channel2:self.channel1+self.channel2+self.channel3]
R4 = R[:, self.channel1+self.channel2+self.channel3:]
R1 = self.branch1.relprop(R1)
R2 = self.branch2.relprop(R2)
R3 = self.branch3.relprop(R3)
R4 = self.branch4.relprop(R4)
R = R1+R2+R3+R4
return R
class BasicConv2d(nn.Module):
def __init__(
self,
in_channels: int,
out_channels: int,
**kwargs: Any
) -> None:
super(BasicConv2d, self).__init__()
self.conv = Conv2d(in_channels, out_channels, bias=False, **kwargs)
self.bn = BatchNorm2d(out_channels, eps=0.001)
def forward(self, x: Tensor) -> Tensor:
x = self.conv(x)
x = self.bn(x)
return F.relu(x, inplace=True)
def relprop(self,R):
R = self.bn.relprop(R)
R = self.conv.relprop(R)
return R
| 32.994805 | 119 | 0.581831 | 10,414 | 0.819806 | 0 | 0 | 0 | 0 | 0 | 0 | 3,597 | 0.283161 |
75fe3189c125c3919b270ac5067d6ecc73d03252
| 375 |
py
|
Python
|
University Codesprint Contest/seperaenos.py
|
ukirderohit/Python-Hacker-rank-solutions
|
de3b60b00d864c15a452977225b33ead19c878a5
|
[
"MIT"
] | null | null | null |
University Codesprint Contest/seperaenos.py
|
ukirderohit/Python-Hacker-rank-solutions
|
de3b60b00d864c15a452977225b33ead19c878a5
|
[
"MIT"
] | null | null | null |
University Codesprint Contest/seperaenos.py
|
ukirderohit/Python-Hacker-rank-solutions
|
de3b60b00d864c15a452977225b33ead19c878a5
|
[
"MIT"
] | null | null | null |
q = int(raw_input().strip())
for a0 in xrange(q):
s=raw_input().strip()
# if s.startswith('0'):
# print "No"
# print s.find('1')
# print s.rfind(s,a0,a0-1)
# posof1 = s.find('1')
digits = [str(x) for x in str(s)]
print digits
for digit in len(digits):
if digits[digit]-digits[digit-1] == 1:
print "yes"
| 26.785714 | 46 | 0.512 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 107 | 0.285333 |
75fe4ffed842895823f432c3592116337d923fac
| 8,457 |
py
|
Python
|
polyglotdb/client/client.py
|
michaelhaaf/PolyglotDB
|
7640212c7062cf44ae911081241ce83a26ced2eb
|
[
"MIT"
] | 25 |
2016-01-28T20:47:07.000Z
|
2021-11-29T16:13:07.000Z
|
polyglotdb/client/client.py
|
michaelhaaf/PolyglotDB
|
7640212c7062cf44ae911081241ce83a26ced2eb
|
[
"MIT"
] | 120 |
2016-04-07T17:55:09.000Z
|
2022-03-24T18:30:10.000Z
|
polyglotdb/client/client.py
|
PhonologicalCorpusTools/PolyglotDB
|
7640212c7062cf44ae911081241ce83a26ced2eb
|
[
"MIT"
] | 10 |
2015-12-03T20:06:58.000Z
|
2021-02-11T03:02:48.000Z
|
import requests
from ..exceptions import ClientError
class PGDBClient(object):
"""
Simple client for interacting with ISCAN servers.
"""
def __init__(self, host, token=None, corpus_name=None):
self.host = host
self.token = token
if self.host.endswith('/'):
self.host = self.host[:-1]
self.corpus_name = corpus_name
self.query_behavior = 'speaker'
def login(self, user_name, password):
"""
Get an authentication token from the ISCAN server using the specified credentials
Parameters
----------
user_name : str
User name
password : str
Password
Returns
-------
str
Authentication token to use in future requests
"""
end_point = '/'.join([self.host, 'api', 'rest-auth', 'login', ''])
resp = requests.post(end_point, {'username': user_name, 'password': password})
token = resp.json()['key']
self.token = token
return token
def create_database(self, database_name):
"""
Create a new database with the specified name
Parameters
----------
database_name : str
Name of the database to be created
Returns
-------
dict
Database information
"""
databases = self.list_databases()
for d in databases:
if d['name'] == database_name:
raise ClientError('Could not create database, already exists.')
end_point = '/'.join([self.host, 'api', 'databases', ''])
data = {'name': database_name}
resp = requests.post(end_point, data=data, headers={'Authorization': 'Token {}'.format(self.token)})
if resp.status_code not in [200, 201, 202]:
raise ClientError('Could not create database: {}'.format(resp.text))
return resp.json()
def delete_database(self, database_name):
"""
Delete a database and all associated content
Parameters
----------
database_name : str
Name of database to be deleted
"""
databases = self.list_databases()
for d in databases:
if d['name'] == database_name:
database_id = d['id']
break
else:
raise ClientError('Could not delete database, does not exist.')
end_point = '/'.join([self.host, 'api', 'databases', str(database_id), ''])
resp = requests.delete(end_point, headers={'Authorization': 'Token {}'.format(self.token)})
if resp.status_code != 204:
raise ClientError('Could not delete database.')
def database_status(self, database_name=None):
"""
Get the current status of a specified database, or all databases on the server.
Parameters
----------
database_name : str
Name of database to get status of, if not specified, will get status of all databases
Returns
-------
dict
Database status JSON
"""
if database_name is not None:
databases = self.list_databases()
for d in databases:
if d['name'] == database_name:
database_id = d['id']
break
else:
raise ClientError('Could not find database, does not exist.')
end_point = '/'.join([self.host, 'api', 'databases', str(database_id), ''])
resp = requests.get(end_point, headers={'Authorization': 'Token {}'.format(self.token)})
return resp.json()
else:
end_point = '/'.join([self.host, 'api', 'databases', ''])
resp = requests.get(end_point, headers={'Authorization': 'Token {}'.format(self.token)})
return resp.json()
def get_directory(self, database_name):
"""
Get the directory of a local database
Parameters
----------
database_name : str
Name of database
Returns
-------
str
Database data directory
"""
databases = self.list_databases()
for d in databases:
if d['name'] == database_name:
database_id = d['id']
break
else:
raise ClientError('Could not find database, does not exist.')
end_point = '/'.join([self.host, 'api', 'databases', str(database_id), 'data_directory', ''])
resp = requests.get(end_point, headers={'Authorization': 'Token {}'.format(self.token)})
return resp.json()
def get_ports(self, database_name):
"""
Get the ports of a locally running database
Parameters
----------
database_name : str
Name of database
Returns
-------
dict
Ports of the database
"""
databases = self.list_databases()
for d in databases:
if d['name'] == database_name:
database_id = d['id']
break
else:
raise ClientError('Could not find database, does not exist.')
end_point = '/'.join([self.host, 'api', 'databases', str(database_id), 'ports', ''])
resp = requests.get(end_point, headers={'Authorization': 'Token {}'.format(self.token)})
return resp.json()
def list_databases(self):
"""
Get a list of all databases
Returns
-------
list
Database information
"""
end_point = '/'.join([self.host, 'api', 'databases', ''])
resp = requests.get(end_point, headers={'Authorization': 'Token {}'.format(self.token)})
if resp.status_code != 200:
raise ClientError('Encountered error getting list of databases: {}'.format(resp.json()))
return resp.json()
def list_corpora(self, database_name=None):
"""
Get a list of all corpora
Parameters
----------
database_name : str
Name of the database to restrict corpora list to, optional
Returns
-------
list
Corpora information
"""
if database_name is not None:
databases = self.list_databases()
for d in databases:
if d['name'] == database_name:
database_id = d['id']
break
else:
raise ClientError('Could not find database, does not exist.')
end_point = '/'.join([self.host, 'api', 'databases', str(database_id), 'corpora', ''])
else:
end_point = '/'.join([self.host, 'api', 'corpora', ''])
resp = requests.get(end_point, headers={'Authorization': 'Token {}'.format(self.token)})
return resp.json()
def start_database(self, database_name):
"""
Start a database
Parameters
----------
database_name : str
Database to start
"""
databases = self.list_databases()
for d in databases:
if d['name'] == database_name:
database_id = d['id']
break
else:
raise ClientError('Could not find database, does not exist.')
end_point = '/'.join([self.host, 'api', 'databases', str(database_id), 'start', ''])
resp = requests.post(end_point, data={}, headers={'Authorization': 'Token {}'.format(self.token)})
if resp.status_code not in [200, 201, 202]:
raise ClientError('Could not start database: {}'.format(resp.text))
def stop_database(self, database_name):
"""
Stop a database
Parameters
----------
database_name : str
Database to stop
"""
databases = self.list_databases()
for d in databases:
if d['name'] == database_name:
database_id = d['id']
break
else:
raise ClientError('Could not find database, does not exist.')
end_point = '/'.join([self.host, 'api', 'databases', str(database_id), 'stop', ''])
resp = requests.post(end_point, data={}, headers={'Authorization': 'Token {}'.format(self.token)})
if resp.status_code not in [200, 201, 202]:
raise ClientError('Could not stop database: {}'.format(resp.text))
| 33.295276 | 108 | 0.537898 | 8,401 | 0.993378 | 0 | 0 | 0 | 0 | 0 | 0 | 3,516 | 0.41575 |
75fefd40d863da1697a3900b9bc8d32e531394bf
| 2,745 |
py
|
Python
|
python/plotCSV.py
|
lrquad/LoboScripts
|
04d2de79d2d83e781e3f4a3de2531dc48e4013a6
|
[
"MIT"
] | null | null | null |
python/plotCSV.py
|
lrquad/LoboScripts
|
04d2de79d2d83e781e3f4a3de2531dc48e4013a6
|
[
"MIT"
] | null | null | null |
python/plotCSV.py
|
lrquad/LoboScripts
|
04d2de79d2d83e781e3f4a3de2531dc48e4013a6
|
[
"MIT"
] | null | null | null |
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.animation import FuncAnimation
import matplotlib.animation as animation
from matplotlib import rcParams
import matplotlib.patches as patches
rcParams['font.family'] = 'Times New Roman'
rcParams['font.size'] = 20
rcParams['axes.edgecolor'] = (0.0, 0.0, 0.0)
rcParams['axes.linewidth'] = 2
hfont = {'fontname': 'Times New Roman'}
folderpath = "./testdata/"
def format_e(n):
a = '%E' % n
return a.split('E')[0].rstrip('0').rstrip('.') + '.0E' + a.split('E')[1]
def loadData(path, logscale=True, min=1e-16):
data = np.array(np.loadtxt(path, delimiter=',', unpack=True))
data[data < min] = min
data = np.log10(data)
return data
def loadlabel(path):
data = np.array(np.loadtxt(path, delimiter=' ', unpack=True))
return data
def init(ax,xlabel_list,y_min,y_max,num_x,x_labels_num = 16,y_labels_num=18):
ax.set_xlim(0, num_x)
print(y_min,y_max)
ax.set_ylim(y_min, y_max)
ax.set_xlabel("Perturbation size",**hfont)
ax.set_ylabel("Relative error ", **hfont)
y_labels_tuples = ()
ax.yaxis.set_major_locator(plt.MaxNLocator(y_labels_num))
x_labels_tuples = ()
ax.xaxis.set_major_locator(plt.MaxNLocator(x_labels_num+1))
for i in range(0,y_labels_num):
y_value = i/(y_labels_num-1)*(y_max-y_min)+y_min
y_value = format_e(10**int(y_value))
y_labels_tuples = y_labels_tuples+(y_value,)
ax.set_yticklabels(y_labels_tuples,size = 10)
for i in range(0,x_labels_num):
index = int(i/(x_labels_num-1)*(num_x-1))
x_labels_tuples = x_labels_tuples + (format_e(xlabel_list[index]),)
ax.set_xticklabels(x_labels_tuples,size = 15)
plt.xticks(rotation=45)
return
def plotData(data, labels,names):
num_lines = data.shape[0]
num_x = data.shape[1]
y_max = np.max(data)
y_min = np.min(data)
fig, ax = plt.subplots()
init(ax,labels,y_min,y_max,num_x,x_labels_num=16,y_labels_num=18)
fig.set_figheight(8)
fig.set_figwidth(8)
plt.grid(True)
ydata = np.arange(num_x)
for i in range(num_lines):
print(ydata.tolist())
print(data[i,:].tolist())
plt.plot(ydata.tolist(),data[i,:].tolist(),'-', animated=False,antialiased=True,markersize=5,color = '#FF5C5C',label = names[i],linewidth = 6)
#ln, = plt.plot(ydata.tolist(),data[0,:].tolist(),'-', animated=True,antialiased=True,markersize=5,color = '#FF5C5C',label = "te",linewidth = 6)
plt.subplots_adjust(bottom=0.22)
plt.show()
return
if __name__ == "__main__":
data = loadData(folderpath+"ttmath_error.csv")
labels = loadlabel(folderpath+"h_list.csv")
plotData(data, labels,["ttmath","FD","CD","CFD"])
| 30.5 | 151 | 0.665938 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 394 | 0.143534 |
2f01f5d13c019c855d7b51b2b4f48b63f6f7275b
| 12,327 |
py
|
Python
|
wrappers/python/virgil_crypto_lib/foundation/_c_bridge/_vscf_rsa.py
|
odidev/virgil-crypto-c
|
3d5d5cb19fdcf81eab08cdc63647f040117ecbd8
|
[
"BSD-3-Clause"
] | 26 |
2018-12-17T13:45:25.000Z
|
2022-01-16T20:00:04.000Z
|
wrappers/python/virgil_crypto_lib/foundation/_c_bridge/_vscf_rsa.py
|
odidev/virgil-crypto-c
|
3d5d5cb19fdcf81eab08cdc63647f040117ecbd8
|
[
"BSD-3-Clause"
] | 4 |
2019-01-03T12:08:52.000Z
|
2021-12-02T05:21:13.000Z
|
wrappers/python/virgil_crypto_lib/foundation/_c_bridge/_vscf_rsa.py
|
odidev/virgil-crypto-c
|
3d5d5cb19fdcf81eab08cdc63647f040117ecbd8
|
[
"BSD-3-Clause"
] | 8 |
2019-01-24T08:22:06.000Z
|
2022-02-07T11:37:00.000Z
|
# Copyright (C) 2015-2021 Virgil Security, Inc.
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# (1) Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# (2) Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# (3) Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ''AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# Lead Maintainer: Virgil Security Inc. <[email protected]>
from virgil_crypto_lib._libs import *
from ctypes import *
from ._vscf_impl import vscf_impl_t
from ._vscf_error import vscf_error_t
from ._vscf_raw_public_key import vscf_raw_public_key_t
from ._vscf_raw_private_key import vscf_raw_private_key_t
from virgil_crypto_lib.common._c_bridge import vsc_data_t
from virgil_crypto_lib.common._c_bridge import vsc_buffer_t
class vscf_rsa_t(Structure):
pass
class VscfRsa(object):
"""RSA implementation."""
# Defines whether a public key can be imported or not.
CAN_IMPORT_PUBLIC_KEY = True
# Define whether a public key can be exported or not.
CAN_EXPORT_PUBLIC_KEY = True
# Define whether a private key can be imported or not.
CAN_IMPORT_PRIVATE_KEY = True
# Define whether a private key can be exported or not.
CAN_EXPORT_PRIVATE_KEY = True
def __init__(self):
"""Create underlying C context."""
self._ll = LowLevelLibs()
self._lib = self._ll.foundation
def vscf_rsa_new(self):
vscf_rsa_new = self._lib.vscf_rsa_new
vscf_rsa_new.argtypes = []
vscf_rsa_new.restype = POINTER(vscf_rsa_t)
return vscf_rsa_new()
def vscf_rsa_delete(self, ctx):
vscf_rsa_delete = self._lib.vscf_rsa_delete
vscf_rsa_delete.argtypes = [POINTER(vscf_rsa_t)]
vscf_rsa_delete.restype = None
return vscf_rsa_delete(ctx)
def vscf_rsa_use_random(self, ctx, random):
vscf_rsa_use_random = self._lib.vscf_rsa_use_random
vscf_rsa_use_random.argtypes = [POINTER(vscf_rsa_t), POINTER(vscf_impl_t)]
vscf_rsa_use_random.restype = None
return vscf_rsa_use_random(ctx, random)
def vscf_rsa_generate_ephemeral_key(self, ctx, key, error):
"""Generate ephemeral private key of the same type.
Note, this operation might be slow."""
vscf_rsa_generate_ephemeral_key = self._lib.vscf_rsa_generate_ephemeral_key
vscf_rsa_generate_ephemeral_key.argtypes = [POINTER(vscf_rsa_t), POINTER(vscf_impl_t), POINTER(vscf_error_t)]
vscf_rsa_generate_ephemeral_key.restype = POINTER(vscf_impl_t)
return vscf_rsa_generate_ephemeral_key(ctx, key, error)
def vscf_rsa_import_public_key(self, ctx, raw_key, error):
"""Import public key from the raw binary format.
Return public key that is adopted and optimized to be used
with this particular algorithm.
Binary format must be defined in the key specification.
For instance, RSA public key must be imported from the format defined in
RFC 3447 Appendix A.1.1."""
vscf_rsa_import_public_key = self._lib.vscf_rsa_import_public_key
vscf_rsa_import_public_key.argtypes = [POINTER(vscf_rsa_t), POINTER(vscf_raw_public_key_t), POINTER(vscf_error_t)]
vscf_rsa_import_public_key.restype = POINTER(vscf_impl_t)
return vscf_rsa_import_public_key(ctx, raw_key, error)
def vscf_rsa_export_public_key(self, ctx, public_key, error):
"""Export public key to the raw binary format.
Binary format must be defined in the key specification.
For instance, RSA public key must be exported in format defined in
RFC 3447 Appendix A.1.1."""
vscf_rsa_export_public_key = self._lib.vscf_rsa_export_public_key
vscf_rsa_export_public_key.argtypes = [POINTER(vscf_rsa_t), POINTER(vscf_impl_t), POINTER(vscf_error_t)]
vscf_rsa_export_public_key.restype = POINTER(vscf_raw_public_key_t)
return vscf_rsa_export_public_key(ctx, public_key, error)
def vscf_rsa_import_private_key(self, ctx, raw_key, error):
"""Import private key from the raw binary format.
Return private key that is adopted and optimized to be used
with this particular algorithm.
Binary format must be defined in the key specification.
For instance, RSA private key must be imported from the format defined in
RFC 3447 Appendix A.1.2."""
vscf_rsa_import_private_key = self._lib.vscf_rsa_import_private_key
vscf_rsa_import_private_key.argtypes = [POINTER(vscf_rsa_t), POINTER(vscf_raw_private_key_t), POINTER(vscf_error_t)]
vscf_rsa_import_private_key.restype = POINTER(vscf_impl_t)
return vscf_rsa_import_private_key(ctx, raw_key, error)
def vscf_rsa_export_private_key(self, ctx, private_key, error):
"""Export private key in the raw binary format.
Binary format must be defined in the key specification.
For instance, RSA private key must be exported in format defined in
RFC 3447 Appendix A.1.2."""
vscf_rsa_export_private_key = self._lib.vscf_rsa_export_private_key
vscf_rsa_export_private_key.argtypes = [POINTER(vscf_rsa_t), POINTER(vscf_impl_t), POINTER(vscf_error_t)]
vscf_rsa_export_private_key.restype = POINTER(vscf_raw_private_key_t)
return vscf_rsa_export_private_key(ctx, private_key, error)
def vscf_rsa_can_encrypt(self, ctx, public_key, data_len):
"""Check if algorithm can encrypt data with a given key."""
vscf_rsa_can_encrypt = self._lib.vscf_rsa_can_encrypt
vscf_rsa_can_encrypt.argtypes = [POINTER(vscf_rsa_t), POINTER(vscf_impl_t), c_size_t]
vscf_rsa_can_encrypt.restype = c_bool
return vscf_rsa_can_encrypt(ctx, public_key, data_len)
def vscf_rsa_encrypted_len(self, ctx, public_key, data_len):
"""Calculate required buffer length to hold the encrypted data."""
vscf_rsa_encrypted_len = self._lib.vscf_rsa_encrypted_len
vscf_rsa_encrypted_len.argtypes = [POINTER(vscf_rsa_t), POINTER(vscf_impl_t), c_size_t]
vscf_rsa_encrypted_len.restype = c_size_t
return vscf_rsa_encrypted_len(ctx, public_key, data_len)
def vscf_rsa_encrypt(self, ctx, public_key, data, out):
"""Encrypt data with a given public key."""
vscf_rsa_encrypt = self._lib.vscf_rsa_encrypt
vscf_rsa_encrypt.argtypes = [POINTER(vscf_rsa_t), POINTER(vscf_impl_t), vsc_data_t, POINTER(vsc_buffer_t)]
vscf_rsa_encrypt.restype = c_int
return vscf_rsa_encrypt(ctx, public_key, data, out)
def vscf_rsa_can_decrypt(self, ctx, private_key, data_len):
"""Check if algorithm can decrypt data with a given key.
However, success result of decryption is not guaranteed."""
vscf_rsa_can_decrypt = self._lib.vscf_rsa_can_decrypt
vscf_rsa_can_decrypt.argtypes = [POINTER(vscf_rsa_t), POINTER(vscf_impl_t), c_size_t]
vscf_rsa_can_decrypt.restype = c_bool
return vscf_rsa_can_decrypt(ctx, private_key, data_len)
def vscf_rsa_decrypted_len(self, ctx, private_key, data_len):
"""Calculate required buffer length to hold the decrypted data."""
vscf_rsa_decrypted_len = self._lib.vscf_rsa_decrypted_len
vscf_rsa_decrypted_len.argtypes = [POINTER(vscf_rsa_t), POINTER(vscf_impl_t), c_size_t]
vscf_rsa_decrypted_len.restype = c_size_t
return vscf_rsa_decrypted_len(ctx, private_key, data_len)
def vscf_rsa_decrypt(self, ctx, private_key, data, out):
"""Decrypt given data."""
vscf_rsa_decrypt = self._lib.vscf_rsa_decrypt
vscf_rsa_decrypt.argtypes = [POINTER(vscf_rsa_t), POINTER(vscf_impl_t), vsc_data_t, POINTER(vsc_buffer_t)]
vscf_rsa_decrypt.restype = c_int
return vscf_rsa_decrypt(ctx, private_key, data, out)
def vscf_rsa_can_sign(self, ctx, private_key):
"""Check if algorithm can sign data digest with a given key."""
vscf_rsa_can_sign = self._lib.vscf_rsa_can_sign
vscf_rsa_can_sign.argtypes = [POINTER(vscf_rsa_t), POINTER(vscf_impl_t)]
vscf_rsa_can_sign.restype = c_bool
return vscf_rsa_can_sign(ctx, private_key)
def vscf_rsa_signature_len(self, ctx, private_key):
"""Return length in bytes required to hold signature.
Return zero if a given private key can not produce signatures."""
vscf_rsa_signature_len = self._lib.vscf_rsa_signature_len
vscf_rsa_signature_len.argtypes = [POINTER(vscf_rsa_t), POINTER(vscf_impl_t)]
vscf_rsa_signature_len.restype = c_size_t
return vscf_rsa_signature_len(ctx, private_key)
def vscf_rsa_sign_hash(self, ctx, private_key, hash_id, digest, signature):
"""Sign data digest with a given private key."""
vscf_rsa_sign_hash = self._lib.vscf_rsa_sign_hash
vscf_rsa_sign_hash.argtypes = [POINTER(vscf_rsa_t), POINTER(vscf_impl_t), c_int, vsc_data_t, POINTER(vsc_buffer_t)]
vscf_rsa_sign_hash.restype = c_int
return vscf_rsa_sign_hash(ctx, private_key, hash_id, digest, signature)
def vscf_rsa_can_verify(self, ctx, public_key):
"""Check if algorithm can verify data digest with a given key."""
vscf_rsa_can_verify = self._lib.vscf_rsa_can_verify
vscf_rsa_can_verify.argtypes = [POINTER(vscf_rsa_t), POINTER(vscf_impl_t)]
vscf_rsa_can_verify.restype = c_bool
return vscf_rsa_can_verify(ctx, public_key)
def vscf_rsa_verify_hash(self, ctx, public_key, hash_id, digest, signature):
"""Verify data digest with a given public key and signature."""
vscf_rsa_verify_hash = self._lib.vscf_rsa_verify_hash
vscf_rsa_verify_hash.argtypes = [POINTER(vscf_rsa_t), POINTER(vscf_impl_t), c_int, vsc_data_t, vsc_data_t]
vscf_rsa_verify_hash.restype = c_bool
return vscf_rsa_verify_hash(ctx, public_key, hash_id, digest, signature)
def vscf_rsa_setup_defaults(self, ctx):
"""Setup predefined values to the uninitialized class dependencies."""
vscf_rsa_setup_defaults = self._lib.vscf_rsa_setup_defaults
vscf_rsa_setup_defaults.argtypes = [POINTER(vscf_rsa_t)]
vscf_rsa_setup_defaults.restype = c_int
return vscf_rsa_setup_defaults(ctx)
def vscf_rsa_generate_key(self, ctx, bitlen, error):
"""Generate new private key.
Note, this operation might be slow."""
vscf_rsa_generate_key = self._lib.vscf_rsa_generate_key
vscf_rsa_generate_key.argtypes = [POINTER(vscf_rsa_t), c_size_t, POINTER(vscf_error_t)]
vscf_rsa_generate_key.restype = POINTER(vscf_impl_t)
return vscf_rsa_generate_key(ctx, bitlen, error)
def vscf_rsa_shallow_copy(self, ctx):
vscf_rsa_shallow_copy = self._lib.vscf_rsa_shallow_copy
vscf_rsa_shallow_copy.argtypes = [POINTER(vscf_rsa_t)]
vscf_rsa_shallow_copy.restype = POINTER(vscf_rsa_t)
return vscf_rsa_shallow_copy(ctx)
def vscf_rsa_impl(self, ctx):
vscf_rsa_impl = self._lib.vscf_rsa_impl
vscf_rsa_impl.argtypes = [POINTER(vscf_rsa_t)]
vscf_rsa_impl.restype = POINTER(vscf_impl_t)
return vscf_rsa_impl(ctx)
| 49.705645 | 124 | 0.736108 | 10,343 | 0.839052 | 0 | 0 | 0 | 0 | 0 | 0 | 3,969 | 0.321976 |
2f028c07302e47df287d4dc5d37f771ec2181806
| 30,394 |
py
|
Python
|
tb_api_client/swagger_client/apis/user_controller_api.py
|
MOSAIC-LoPoW/oss7-thingsboard-backend-example
|
9b289dd7fdbb6e932ca338ad497a7bb1fc84d010
|
[
"Apache-2.0"
] | 5 |
2017-11-27T15:48:16.000Z
|
2020-09-21T04:18:47.000Z
|
tb_api_client/swagger_client/apis/user_controller_api.py
|
MOSAIC-LoPoW/oss7-thingsboard-backend-example
|
9b289dd7fdbb6e932ca338ad497a7bb1fc84d010
|
[
"Apache-2.0"
] | null | null | null |
tb_api_client/swagger_client/apis/user_controller_api.py
|
MOSAIC-LoPoW/oss7-thingsboard-backend-example
|
9b289dd7fdbb6e932ca338ad497a7bb1fc84d010
|
[
"Apache-2.0"
] | 6 |
2018-01-14T17:23:46.000Z
|
2019-06-24T13:38:54.000Z
|
# coding: utf-8
"""
Thingsboard REST API
For instructions how to authorize requests please visit <a href='http://thingsboard.io/docs/reference/rest-api/'>REST API documentation page</a>.
OpenAPI spec version: 2.0
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import sys
import os
import re
# python 2 and python 3 compatibility library
from six import iteritems
from ..api_client import ApiClient
class UserControllerApi(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def delete_user_using_delete(self, user_id, **kwargs):
"""
deleteUser
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.delete_user_using_delete(user_id, async=True)
>>> result = thread.get()
:param async bool
:param str user_id: userId (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.delete_user_using_delete_with_http_info(user_id, **kwargs)
else:
(data) = self.delete_user_using_delete_with_http_info(user_id, **kwargs)
return data
def delete_user_using_delete_with_http_info(self, user_id, **kwargs):
"""
deleteUser
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.delete_user_using_delete_with_http_info(user_id, async=True)
>>> result = thread.get()
:param async bool
:param str user_id: userId (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['user_id']
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_user_using_delete" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'user_id' is set
if ('user_id' not in params) or (params['user_id'] is None):
raise ValueError("Missing the required parameter `user_id` when calling `delete_user_using_delete`")
collection_formats = {}
path_params = {}
if 'user_id' in params:
path_params['userId'] = params['user_id']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['*/*'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['X-Authorization']
return self.api_client.call_api('/api/user/{userId}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_activation_link_using_get(self, user_id, **kwargs):
"""
getActivationLink
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_activation_link_using_get(user_id, async=True)
>>> result = thread.get()
:param async bool
:param str user_id: userId (required)
:return: str
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_activation_link_using_get_with_http_info(user_id, **kwargs)
else:
(data) = self.get_activation_link_using_get_with_http_info(user_id, **kwargs)
return data
def get_activation_link_using_get_with_http_info(self, user_id, **kwargs):
"""
getActivationLink
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_activation_link_using_get_with_http_info(user_id, async=True)
>>> result = thread.get()
:param async bool
:param str user_id: userId (required)
:return: str
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['user_id']
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_activation_link_using_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'user_id' is set
if ('user_id' not in params) or (params['user_id'] is None):
raise ValueError("Missing the required parameter `user_id` when calling `get_activation_link_using_get`")
collection_formats = {}
path_params = {}
if 'user_id' in params:
path_params['userId'] = params['user_id']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['text/plain'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['X-Authorization']
return self.api_client.call_api('/api/user/{userId}/activationLink', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='str',
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_customer_users_using_get(self, customer_id, limit, **kwargs):
"""
getCustomerUsers
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_customer_users_using_get(customer_id, limit, async=True)
>>> result = thread.get()
:param async bool
:param str customer_id: customerId (required)
:param str limit: limit (required)
:param str text_search: textSearch
:param str id_offset: idOffset
:param str text_offset: textOffset
:return: TextPageDataUser
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_customer_users_using_get_with_http_info(customer_id, limit, **kwargs)
else:
(data) = self.get_customer_users_using_get_with_http_info(customer_id, limit, **kwargs)
return data
def get_customer_users_using_get_with_http_info(self, customer_id, limit, **kwargs):
"""
getCustomerUsers
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_customer_users_using_get_with_http_info(customer_id, limit, async=True)
>>> result = thread.get()
:param async bool
:param str customer_id: customerId (required)
:param str limit: limit (required)
:param str text_search: textSearch
:param str id_offset: idOffset
:param str text_offset: textOffset
:return: TextPageDataUser
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['customer_id', 'limit', 'text_search', 'id_offset', 'text_offset']
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_customer_users_using_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'customer_id' is set
if ('customer_id' not in params) or (params['customer_id'] is None):
raise ValueError("Missing the required parameter `customer_id` when calling `get_customer_users_using_get`")
# verify the required parameter 'limit' is set
if ('limit' not in params) or (params['limit'] is None):
raise ValueError("Missing the required parameter `limit` when calling `get_customer_users_using_get`")
collection_formats = {}
path_params = {}
if 'customer_id' in params:
path_params['customerId'] = params['customer_id']
query_params = []
if 'text_search' in params:
query_params.append(('textSearch', params['text_search']))
if 'id_offset' in params:
query_params.append(('idOffset', params['id_offset']))
if 'text_offset' in params:
query_params.append(('textOffset', params['text_offset']))
if 'limit' in params:
query_params.append(('limit', params['limit']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['*/*'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['X-Authorization']
return self.api_client.call_api('/api/customer/{customerId}/users', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='TextPageDataUser',
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_tenant_admins_using_get(self, tenant_id, limit, **kwargs):
"""
getTenantAdmins
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_tenant_admins_using_get(tenant_id, limit, async=True)
>>> result = thread.get()
:param async bool
:param str tenant_id: tenantId (required)
:param str limit: limit (required)
:param str text_search: textSearch
:param str id_offset: idOffset
:param str text_offset: textOffset
:return: TextPageDataUser
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_tenant_admins_using_get_with_http_info(tenant_id, limit, **kwargs)
else:
(data) = self.get_tenant_admins_using_get_with_http_info(tenant_id, limit, **kwargs)
return data
def get_tenant_admins_using_get_with_http_info(self, tenant_id, limit, **kwargs):
"""
getTenantAdmins
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_tenant_admins_using_get_with_http_info(tenant_id, limit, async=True)
>>> result = thread.get()
:param async bool
:param str tenant_id: tenantId (required)
:param str limit: limit (required)
:param str text_search: textSearch
:param str id_offset: idOffset
:param str text_offset: textOffset
:return: TextPageDataUser
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['tenant_id', 'limit', 'text_search', 'id_offset', 'text_offset']
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_tenant_admins_using_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'tenant_id' is set
if ('tenant_id' not in params) or (params['tenant_id'] is None):
raise ValueError("Missing the required parameter `tenant_id` when calling `get_tenant_admins_using_get`")
# verify the required parameter 'limit' is set
if ('limit' not in params) or (params['limit'] is None):
raise ValueError("Missing the required parameter `limit` when calling `get_tenant_admins_using_get`")
collection_formats = {}
path_params = {}
if 'tenant_id' in params:
path_params['tenantId'] = params['tenant_id']
query_params = []
if 'text_search' in params:
query_params.append(('textSearch', params['text_search']))
if 'id_offset' in params:
query_params.append(('idOffset', params['id_offset']))
if 'text_offset' in params:
query_params.append(('textOffset', params['text_offset']))
if 'limit' in params:
query_params.append(('limit', params['limit']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['*/*'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['X-Authorization']
return self.api_client.call_api('/api/tenant/{tenantId}/users', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='TextPageDataUser',
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_user_by_id_using_get(self, user_id, **kwargs):
"""
getUserById
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_user_by_id_using_get(user_id, async=True)
>>> result = thread.get()
:param async bool
:param str user_id: userId (required)
:return: User
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_user_by_id_using_get_with_http_info(user_id, **kwargs)
else:
(data) = self.get_user_by_id_using_get_with_http_info(user_id, **kwargs)
return data
def get_user_by_id_using_get_with_http_info(self, user_id, **kwargs):
"""
getUserById
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_user_by_id_using_get_with_http_info(user_id, async=True)
>>> result = thread.get()
:param async bool
:param str user_id: userId (required)
:return: User
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['user_id']
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_user_by_id_using_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'user_id' is set
if ('user_id' not in params) or (params['user_id'] is None):
raise ValueError("Missing the required parameter `user_id` when calling `get_user_by_id_using_get`")
collection_formats = {}
path_params = {}
if 'user_id' in params:
path_params['userId'] = params['user_id']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['*/*'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['X-Authorization']
return self.api_client.call_api('/api/user/{userId}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='User',
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def save_user_using_post(self, user, **kwargs):
"""
saveUser
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.save_user_using_post(user, async=True)
>>> result = thread.get()
:param async bool
:param User user: user (required)
:param bool send_activation_mail: sendActivationMail
:return: User
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.save_user_using_post_with_http_info(user, **kwargs)
else:
(data) = self.save_user_using_post_with_http_info(user, **kwargs)
return data
def save_user_using_post_with_http_info(self, user, **kwargs):
"""
saveUser
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.save_user_using_post_with_http_info(user, async=True)
>>> result = thread.get()
:param async bool
:param User user: user (required)
:param bool send_activation_mail: sendActivationMail
:return: User
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['user', 'send_activation_mail']
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method save_user_using_post" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'user' is set
if ('user' not in params) or (params['user'] is None):
raise ValueError("Missing the required parameter `user` when calling `save_user_using_post`")
collection_formats = {}
path_params = {}
query_params = []
if 'send_activation_mail' in params:
query_params.append(('sendActivationMail', params['send_activation_mail']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'user' in params:
body_params = params['user']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['*/*'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['X-Authorization']
return self.api_client.call_api('/api/user', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='User',
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def send_activation_email_using_post(self, email, **kwargs):
"""
sendActivationEmail
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.send_activation_email_using_post(email, async=True)
>>> result = thread.get()
:param async bool
:param str email: email (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.send_activation_email_using_post_with_http_info(email, **kwargs)
else:
(data) = self.send_activation_email_using_post_with_http_info(email, **kwargs)
return data
def send_activation_email_using_post_with_http_info(self, email, **kwargs):
"""
sendActivationEmail
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.send_activation_email_using_post_with_http_info(email, async=True)
>>> result = thread.get()
:param async bool
:param str email: email (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['email']
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method send_activation_email_using_post" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'email' is set
if ('email' not in params) or (params['email'] is None):
raise ValueError("Missing the required parameter `email` when calling `send_activation_email_using_post`")
collection_formats = {}
path_params = {}
query_params = []
if 'email' in params:
query_params.append(('email', params['email']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['*/*'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['X-Authorization']
return self.api_client.call_api('/api/user/sendActivationMail', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| 40.471372 | 149 | 0.560012 | 29,875 | 0.982924 | 0 | 0 | 0 | 0 | 0 | 0 | 13,287 | 0.437159 |
2f03da5c972d890701aa5588b07be7bd754ca560
| 5,268 |
py
|
Python
|
bulk-image-optimizer.py
|
carzam87/python-bulk-image-optimizer
|
1e9e9396de84de3651b963fc3b8b569893296dde
|
[
"MIT"
] | 8 |
2020-01-28T10:33:28.000Z
|
2022-01-28T12:51:50.000Z
|
bulk-image-optimizer.py
|
carzam87/python-bulk-image-optimizer
|
1e9e9396de84de3651b963fc3b8b569893296dde
|
[
"MIT"
] | null | null | null |
bulk-image-optimizer.py
|
carzam87/python-bulk-image-optimizer
|
1e9e9396de84de3651b963fc3b8b569893296dde
|
[
"MIT"
] | 5 |
2020-09-29T08:26:35.000Z
|
2021-11-15T20:07:20.000Z
|
import os
import subprocess
from pathlib import Path
from PIL import Image
import errno
import time
from re import search
CONVERT_PNG_TO_JPG = False
TOTAL_ORIGINAL = 0
TOTAL_COMPRESSED = 0
TOTAL_GAIN = 0
TOTAL_FILES = 0
QUALITY = 85
def compress(location):
for r, d, f in os.walk(location):
for item in d:
compress(location + os.sep + item)
for image in f:
path = location
input_path = path + os.sep + image
out_path = path.replace(r'input', r'output')
if image.lower().endswith(('.png', '.jpg', '.jpeg', '.tiff', '.bmp', '.gif', 'webp')):
if os.path.isfile(input_path):
global TOTAL_GAIN
global TOTAL_ORIGINAL
global TOTAL_COMPRESSED
global TOTAL_FILES
global QUALITY
opt = None
try:
opt = Image.open(input_path)
except:
#do nothing just print the file skipping
print(f'skipping file cannot open: {input_path}')
continue
original_size = os.stat(input_path).st_size / 1024 / 1024
TOTAL_ORIGINAL += original_size
print(input_path)
print("Original size: " + f'{original_size:,.2f}' + ' Megabytes')
if not os.path.exists(out_path):
try:
os.makedirs(out_path, exist_ok=True)
except OSError as e:
#wait for race condition to settle
time.sleep(1)
# try to create the folder again
os.makedirs(out_path, exist_ok=True)
if e.errno != errno.EEXIST:
raise
out_file= out_path + os.sep + image
# Convert .pgn to .jpg
if CONVERT_PNG_TO_JPG and image.lower().endswith('.png'):
im = opt
rgb_im = im.convert('RGB')
out_file = out_file.replace(".png", ".jpg")
rgb_im.save(out_file)
opt = Image.open(out_file)
opt.save(out_file, optimize=True, quality=QUALITY)
opt = Image.open(out_file)
compressed_size = os.stat(out_file).st_size / 1024 / 1024
TOTAL_COMPRESSED += compressed_size
gain = original_size - compressed_size
TOTAL_GAIN += gain
TOTAL_FILES +=1
print("Compressed size: " + f'{compressed_size:,.2f}' + " megabytes")
print("Gain : " + f'{gain:,.2f}' + " megabytes")
opt.close()
else:
if os.path.isdir(out_path) and not os.path.exists(out_path):
try:
os.makedirs(out_path, exist_ok=True)
except OSError as e:
#wait for race condition to settle
time.sleep(1)
# try to create the folder again
os.makedirs(out_path, exist_ok=True)
if e.errno != errno.EEXIST:
raise
if os.path.isfile(input_path):
if not os.path.exists(out_path):
try:
os.makedirs(out_path, exist_ok=True)
except OSError as e:
#wait for race condition to settle
time.sleep(1)
# try to create the folder again
os.makedirs(out_path, exist_ok=True)
if e.errno != errno.EEXIST:
raise
input_file = input_path
output_file= input_file.replace('input','output')
print('File not image, copying instead: ' + input_path)
subprocess.call('cp ' + input_file + ' ' + output_file, shell=True)
if __name__ == '__main__':
start_path = os.path.dirname(os.path.abspath(__file__)) + os.sep + r"input"
# ask if .pgn images should automatically converted to .jpg
CONVERT_PNG_TO_JPG = input('Would you like to convert .png images to .jpg? (y/n): ') == 'y'
TOTAL_GAIN = 0
compress(start_path)
print("---------------------------------------------------------------------------------------------")
print('-------------------------------------------SUMMARY-------------------------------------------')
print('Files: ' + f'{TOTAL_FILES}')
print(
"Original: " + f'{TOTAL_ORIGINAL:,.2f}' + " megabytes || " + "New Size: " + f'{TOTAL_COMPRESSED:,.2f}' +
" megabytes" + " || Gain: " + f'{TOTAL_GAIN:,.2f}' + " megabytes ~" + f'{(TOTAL_GAIN / TOTAL_ORIGINAL) * 100:,.2f}'
+ "% reduction")
| 44.644068 | 123 | 0.44609 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,144 | 0.21716 |
2f03ebf048e5859cb54e5897517da48e3b0f38d0
| 16,968 |
py
|
Python
|
interpretdl/interpreter/lime.py
|
Tyihou/InterpretDL
|
df8894f8703634df4bfcbdcc495a3d12b220028c
|
[
"Apache-2.0"
] | 1 |
2021-03-11T02:38:51.000Z
|
2021-03-11T02:38:51.000Z
|
interpretdl/interpreter/lime.py
|
Tyihou/InterpretDL
|
df8894f8703634df4bfcbdcc495a3d12b220028c
|
[
"Apache-2.0"
] | null | null | null |
interpretdl/interpreter/lime.py
|
Tyihou/InterpretDL
|
df8894f8703634df4bfcbdcc495a3d12b220028c
|
[
"Apache-2.0"
] | null | null | null |
import os
import typing
from typing import Any, Callable, List, Tuple, Union
import numpy as np
from ..data_processor.readers import preprocess_image, read_image, restore_image
from ..data_processor.visualizer import show_important_parts, visualize_image, save_image
from ..common.paddle_utils import init_checkpoint, to_lodtensor
from ._lime_base import LimeBase
from .abc_interpreter import Interpreter
class LIMECVInterpreter(Interpreter):
"""
LIME Interpreter for CV tasks.
More details regarding the LIME method can be found in the original paper:
https://arxiv.org/abs/1602.04938
"""
def __init__(self,
paddle_model: Callable,
trained_model_path: str,
model_input_shape=[3, 224, 224],
use_cuda=True) -> None:
"""
Initialize the LIMECVInterpreter.
Args:
paddle_model (callable): A user-defined function that gives access to model predictions.
It takes the following arguments:
- data: Data inputs.
and outputs predictions. See the example at the end of ``interpret()``.
trained_model_path (str): The pretrained model directory.
model_input_shape (list, optional): The input shape of the model. Default: [3, 224, 224]
use_cuda (bool, optional): Whether or not to use cuda. Default: True
"""
Interpreter.__init__(self)
self.paddle_model = paddle_model
self.trained_model_path = trained_model_path
self.model_input_shape = model_input_shape
self.use_cuda = use_cuda
self.paddle_prepared = False
# use the default LIME setting
self.lime_base = LimeBase()
self.lime_intermediate_results = {}
def interpret(self,
data,
interpret_class=None,
num_samples=1000,
batch_size=50,
visual=True,
save_path=None):
"""
Main function of the interpreter.
Args:
data (str): The input file path.
interpret_class (int, optional): The index of class to interpret. If None, the most likely label will be used. Default: None
num_samples (int, optional): LIME sampling numbers. Larger number of samples usually gives more accurate interpretation. Default: 1000
batch_size (int, optional): Number of samples to forward each time. Default: 50
visual (bool, optional): Whether or not to visualize the processed image. Default: True
save_path (str, optional): The path to save the processed image. If None, the image will not be saved. Default: None
:return: LIME Prior weights: {interpret_label_i: weights on features}
:rtype: dict
Example::
import interpretdl as it
def paddle_model(data):
import paddle.fluid as fluid
class_num = 1000
model = ResNet50()
logits = model.net(input=image_input, class_dim=class_num)
probs = fluid.layers.softmax(logits, axis=-1)
return probs
lime = it.LIMECVInterpreter(paddle_model, "assets/ResNet50_pretrained")
lime_weights = lime.interpret(
'assets/catdog.png',
num_samples=1000,
batch_size=100,
save_path='assets/catdog_lime.png')
"""
if isinstance(data, str):
data_instance = read_image(
data, crop_size=self.model_input_shape[1])
else:
if len(data.shape) == 3:
data = np.expand_dims(data, axis=0)
if np.issubdtype(data.dtype, np.integer):
data_instance = data
else:
data_instance = restore_image(data.copy())
self.input_type = type(data_instance)
self.data_type = np.array(data_instance).dtype
if not self.paddle_prepared:
self._paddle_prepare()
# only one example here
probability = self.predict_fn(data_instance)[0]
# only interpret top 1
if interpret_class is None:
pred_label = np.argsort(probability)
interpret_class = pred_label[-1:]
interpret_class = np.array(interpret_class)
lime_weights, r2_scores = self.lime_base.interpret_instance(
data_instance[0],
self.predict_fn,
interpret_class,
num_samples=num_samples,
batch_size=batch_size)
interpretation = show_important_parts(
data_instance[0],
lime_weights,
interpret_class[0],
self.lime_base.segments,
visual=visual,
save_path=save_path)
self.lime_intermediate_results['probability'] = probability
self.lime_intermediate_results['input'] = data_instance[0]
self.lime_intermediate_results[
'segmentation'] = self.lime_base.segments
self.lime_intermediate_results['r2_scores'] = r2_scores
return lime_weights
def _paddle_prepare(self, predict_fn=None):
if predict_fn is None:
import paddle.fluid as fluid
startup_prog = fluid.Program()
main_program = fluid.Program()
with fluid.program_guard(main_program, startup_prog):
with fluid.unique_name.guard():
data_op = fluid.data(
name='data',
shape=[None] + self.model_input_shape,
dtype='float32')
probs = self.paddle_model(data_op)
if isinstance(probs, tuple):
probs = probs[0]
main_program = main_program.clone(for_test=True)
if self.use_cuda:
gpu_id = int(os.environ.get('FLAGS_selected_gpus', 0))
place = fluid.CUDAPlace(gpu_id)
else:
place = fluid.CPUPlace()
self.place = place
exe = fluid.Executor(place)
fluid.io.load_persistables(exe, self.trained_model_path,
main_program)
def predict_fn(data_instance):
data = preprocess_image(
data_instance
) # transpose to [N, 3, H, W], scaled to [0.0, 1.0]
[result] = exe.run(main_program,
fetch_list=[probs],
feed={'data': data})
return result
self.predict_fn = predict_fn
self.paddle_prepared = True
class LIMENLPInterpreter(Interpreter):
"""
LIME Interpreter for NLP tasks.
More details regarding the LIME method can be found in the original paper:
https://arxiv.org/abs/1602.04938
"""
def __init__(self,
paddle_model: Callable,
trained_model_path: str,
use_cuda=True) -> None:
"""
Initialize the LIMENLPInterpreter.
Args:
paddle_model (callable): A user-defined function that gives access to model predictions.
It takes the following arguments:
- data: Data inputs.
and outputs predictions. See the example at the end of ``interpret()``.
trained_model_path (str): The pretrained model directory.
model_input_shape (list, optional): The input shape of the model. Default: [3, 224, 224]
use_cuda (bool, optional): Whether or not to use cuda. Default: True
"""
Interpreter.__init__(self)
self.paddle_model = paddle_model
self.trained_model_path = trained_model_path
self.use_cuda = use_cuda
self.paddle_prepared = False
# use the default LIME setting
self.lime_base = LimeBase()
self.lime_intermediate_results = {}
def interpret(self,
data,
preprocess_fn,
unk_id,
pad_id=None,
interpret_class=None,
num_samples=1000,
batch_size=50,
lod_levels=None,
return_pred=False,
visual=True):
"""
Main function of the interpreter.
Args:
data (str): The raw string for analysis.
preprocess_fn (Callable): A user-defined function that input raw string and outputs the a tuple of inputs to feed into the NLP model.
unk_id (int): The word id to replace occluded words. Typical choices include "", <unk>, and <pad>.
pad_id (int or None): The word id used to pad the sequences. If None, it means there is no padding. Default: None.
interpret_class (list or numpy.ndarray, optional): The index of class to interpret. If None, the most likely label will be used. Default: None
num_samples (int, optional): LIME sampling numbers. Larger number of samples usually gives more accurate interpretation. Default: 1000
batch_size (int, optional): Number of samples to forward each time. Default: 50
lod_levels (list or tuple or numpy.ndarray or None, optional): The lod levels for model inputs. It should have the length equal to number of outputs given by preprocess_fn.
If None, lod levels are all zeros. Default: None.
visual (bool, optional): Whether or not to visualize. Default: True
:return: LIME Prior weights: {interpret_label_i: weights on features}
:rtype: dict
Example::
from assets.bilstm import bilstm
import io
from interpretdl.data_processor.visualizer import VisualizationTextRecord, visualize_text
def load_vocab(file_path):
vocab = {}
with io.open(file_path, 'r', encoding='utf8') as f:
wid = 0
for line in f:
if line.strip() not in vocab:
vocab[line.strip()] = wid
wid += 1
vocab["<unk>"] = len(vocab)
return vocab
MODEL_PATH = "assets/senta_model/bilstm_model"
VOCAB_PATH = os.path.join(MODEL_PATH, "word_dict.txt")
PARAMS_PATH = os.path.join(MODEL_PATH, "params")
DICT_DIM = 1256606
def paddle_model(data, seq_len):
probs = bilstm(data, seq_len, None, DICT_DIM, is_prediction=True)
return probs
MAX_SEQ_LEN = 256
def preprocess_fn(data):
word_ids = []
sub_word_ids = [word_dict.get(d, unk_id) for d in data.split()]
seq_lens = [len(sub_word_ids)]
if len(sub_word_ids) < MAX_SEQ_LEN:
sub_word_ids += [0] * (MAX_SEQ_LEN - len(sub_word_ids))
word_ids.append(sub_word_ids[:MAX_SEQ_LEN])
return word_ids, seq_lens
#https://baidu-nlp.bj.bcebos.com/sentiment_classification-dataset-1.0.0.tar.gz
word_dict = load_vocab(VOCAB_PATH)
unk_id = word_dict[""] #word_dict["<unk>"]
lime = it.LIMENLPInterpreter(paddle_model, PARAMS_PATH)
reviews = [
'交通 方便 ;环境 很好 ;服务态度 很好 房间 较小',
'这本书 实在 太烂 了 , 什么 朗读 手册 , 一点 朗读 的 内容 都 没有 . 看 了 几页 就 不 想 看 下去 了 .'
]
true_labels = [1, 0]
recs = []
for i, review in enumerate(reviews):
pred_class, pred_prob, lime_weights = lime.interpret(
review,
preprocess_fn,
num_samples=200,
batch_size=10,
unk_id=unk_id,
pad_id=0,
return_pred=True)
id2word = dict(zip(word_dict.values(), word_dict.keys()))
for y in lime_weights:
print([(id2word[t[0]], t[1]) for t in lime_weights[y]])
words = review.split()
interp_class = list(lime_weights.keys())[0]
word_importances = [t[1] for t in lime_weights[interp_class]]
word_importances = np.array(word_importances) / np.linalg.norm(
word_importances)
true_label = true_labels[i]
if interp_class == 0:
word_importances = -word_importances
rec = VisualizationTextRecord(words, word_importances, true_label,
pred_class[0], pred_prob[0],
interp_class)
recs.append(rec)
visualize_text(recs)
"""
model_inputs = preprocess_fn(data)
if not isinstance(model_inputs, tuple):
self.model_inputs = (np.array(model_inputs), )
else:
self.model_inputs = tuple(np.array(inp) for inp in model_inputs)
if lod_levels is None:
lod_levels = [0] * len(self.model_inputs)
self.lod_levels = lod_levels
if not self.paddle_prepared:
self._paddle_prepare()
# only one example here
probability = self.predict_fn(*self.model_inputs)[0]
# only interpret top 1
if interpret_class is None:
pred_label = np.argsort(probability)
interpret_class = pred_label[-1:]
lime_weights, r2_scores = self.lime_base.interpret_instance_text(
self.model_inputs,
classifier_fn=self.predict_fn,
interpret_labels=interpret_class,
unk_id=unk_id,
pad_id=pad_id,
num_samples=num_samples,
batch_size=batch_size)
data_array = self.model_inputs[0]
data_array = data_array.reshape((np.prod(data_array.shape), ))
for c in lime_weights:
weights_c = lime_weights[c]
weights_new = [(data_array[tup[0]], tup[1]) for tup in weights_c]
lime_weights[c] = weights_new
if return_pred:
return (interpret_class, probability[interpret_class],
lime_weights)
return lime_weights
def _paddle_prepare(self, predict_fn=None):
if predict_fn is None:
import paddle.fluid as fluid
startup_prog = fluid.Program()
main_program = fluid.Program()
with fluid.program_guard(main_program, startup_prog):
with fluid.unique_name.guard():
data_ops = ()
for i, inp in enumerate(self.model_inputs):
if self.lod_levels[i] > 0:
op_ = fluid.data(
name='op_%d' % i,
shape=[None],
dtype=inp.dtype,
lod_level=self.lod_levels[i])
else:
op_ = fluid.data(
name='op_%d' % i,
shape=(None, ) + inp.shape[1:],
dtype=inp.dtype)
data_ops += (op_, )
probs = self.paddle_model(*data_ops)
if isinstance(probs, tuple):
probs = probs[0]
main_program = main_program.clone(for_test=True)
if self.use_cuda:
gpu_id = int(os.environ.get('FLAGS_selected_gpus', 0))
place = fluid.CUDAPlace(gpu_id)
else:
place = fluid.CPUPlace()
self.place = place
exe = fluid.Executor(self.place)
#exe.run(startup_prog)
#fluid.io.load_persistables(exe, self.trained_model_path,
# main_program)
init_checkpoint(exe, self.trained_model_path, main_program)
#fluid.load(main_program, self.trained_model_path, exe)
def predict_fn(*params):
params = self._format_model_inputs(params)
[result] = exe.run(
main_program,
fetch_list=[probs],
feed={'op_%d' % i: d
for i, d in enumerate(params)})
return result
self.predict_fn = predict_fn
self.paddle_prepared = True
def _format_model_inputs(self, model_inputs):
out = ()
for i, inp in enumerate(model_inputs):
if self.lod_levels[i] == 0:
out += (inp, )
else:
out += (to_lodtensor(inp, self.place), )
return out
| 39.277778 | 184 | 0.552393 | 16,665 | 0.975817 | 0 | 0 | 0 | 0 | 0 | 0 | 8,216 | 0.481087 |
2f042e06fed341e6137967c14ffb3b319a432271
| 2,106 |
py
|
Python
|
opendc-web/opendc-web-api/opendc/api/v2/portfolios/portfolioId/scenarios/endpoint.py
|
Koen1999/opendc
|
f9b43518d2d50f33077734537a477539fca9f5b7
|
[
"MIT"
] | null | null | null |
opendc-web/opendc-web-api/opendc/api/v2/portfolios/portfolioId/scenarios/endpoint.py
|
Koen1999/opendc
|
f9b43518d2d50f33077734537a477539fca9f5b7
|
[
"MIT"
] | 4 |
2020-11-27T16:27:58.000Z
|
2020-12-28T23:00:08.000Z
|
opendc-web/opendc-web-api/opendc/api/v2/portfolios/portfolioId/scenarios/endpoint.py
|
Koen1999/opendc
|
f9b43518d2d50f33077734537a477539fca9f5b7
|
[
"MIT"
] | null | null | null |
from opendc.models.portfolio import Portfolio
from opendc.models.scenario import Scenario
from opendc.models.topology import Topology
from opendc.util.rest import Response
def POST(request):
"""Add a new Scenario for this Portfolio."""
request.check_required_parameters(path={'portfolioId': 'string'},
body={
'scenario': {
'name': 'string',
'trace': {
'traceId': 'string',
'loadSamplingFraction': 'float',
},
'topology': {
'topologyId': 'string',
},
'operational': {
'failuresEnabled': 'bool',
'performanceInterferenceEnabled': 'bool',
'schedulerName': 'string',
},
}
})
portfolio = Portfolio.from_id(request.params_path['portfolioId'])
portfolio.check_exists()
portfolio.check_user_access(request.google_id, True)
scenario = Scenario(request.params_body['scenario'])
topology = Topology.from_id(scenario.obj['topology']['topologyId'])
topology.check_exists()
topology.check_user_access(request.google_id, True)
scenario.set_property('portfolioId', portfolio.get_id())
scenario.set_property('simulation', {'state': 'QUEUED'})
scenario.set_property('topology.topologyId', topology.get_id())
scenario.insert()
portfolio.obj['scenarioIds'].append(scenario.get_id())
portfolio.update()
return Response(200, 'Successfully added Scenario.', scenario.obj)
| 42.12 | 91 | 0.45679 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 418 | 0.198481 |
f92e0d9330578dc947fa3c0cecc40a9523ecca24
| 1,906 |
py
|
Python
|
Python/pymd/md/core/box.py
|
ryanlopezzzz/ABPTutorial
|
923fa89f1959cd71b28ecf4628ecfbfce6a6206c
|
[
"MIT"
] | 8 |
2020-05-05T00:41:50.000Z
|
2021-11-04T20:54:43.000Z
|
Python/pymd/md/core/box.py
|
ryanlopezzzz/ABPTutorial
|
923fa89f1959cd71b28ecf4628ecfbfce6a6206c
|
[
"MIT"
] | null | null | null |
Python/pymd/md/core/box.py
|
ryanlopezzzz/ABPTutorial
|
923fa89f1959cd71b28ecf4628ecfbfce6a6206c
|
[
"MIT"
] | 5 |
2020-05-04T16:37:13.000Z
|
2021-08-18T07:53:58.000Z
|
# Copyright 2020 Rastko Sknepnek, University of Dundee, [email protected]
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions
# of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
# TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
# CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
# Class handling the simulation box
class Box:
def __init__(self, Lx, Ly = None):
"""
Construct simulation box.
Parameters
----------
Lx : float
Size of the simulation box in x direction
Ly : float
Size of the simulation box in y direction (if None, same as Lx, i.e., square box)
Note
----
Simulation box is centred as (0,0), i.e., x is in (-Lx/2,Lx/2] and y is in (-Ly/2,Ly/2]
"""
if Lx < 0.0:
raise ValueError('Simulation box has to have length larger than 0.')
self.Lx = Lx
self.Ly = Lx if (Ly == None or Ly < 0.0) else Ly
self.xmin = -0.5*self.Lx
self.xmax = 0.5*self.Lx
self.ymin = -0.5*self.Ly
self.ymax = 0.5*self.Ly
self.A = self.Lx*self.Ly
| 45.380952 | 114 | 0.693599 | 732 | 0.38405 | 0 | 0 | 0 | 0 | 0 | 0 | 1,575 | 0.826338 |
f92f4eea713aeec6532cc3eed5da737cef8d020e
| 884 |
py
|
Python
|
dot_vim/plugged/vim-devicons/pythonx/vim_devicons/powerline/segments.py
|
gabefgonc/san-francisco-rice-dotfiles
|
60ff3539f34ecfff6d7bce895497e2a3805910d4
|
[
"MIT"
] | 4,897 |
2015-07-12T17:52:02.000Z
|
2022-03-31T16:07:01.000Z
|
dot_vim/plugged/vim-devicons/pythonx/vim_devicons/powerline/segments.py
|
gabefgonc/san-francisco-rice-dotfiles
|
60ff3539f34ecfff6d7bce895497e2a3805910d4
|
[
"MIT"
] | 337 |
2015-07-12T17:14:35.000Z
|
2022-03-05T17:27:24.000Z
|
dot_vim/plugged/vim-devicons/pythonx/vim_devicons/powerline/segments.py
|
gabefgonc/san-francisco-rice-dotfiles
|
60ff3539f34ecfff6d7bce895497e2a3805910d4
|
[
"MIT"
] | 365 |
2015-07-20T07:51:11.000Z
|
2022-02-22T05:00:56.000Z
|
# -*- coding: utf-8 -*-
# vim:se fenc=utf8 noet:
from __future__ import (unicode_literals, division, absolute_import, print_function)
try:
import vim
except ImportError:
vim = {}
from powerline.bindings.vim import (vim_get_func, buffer_name)
from powerline.theme import requires_segment_info
@requires_segment_info
def webdevicons(pl, segment_info):
webdevicons = vim_get_func('WebDevIconsGetFileTypeSymbol')
name = buffer_name(segment_info)
return [] if not webdevicons else [{
'contents': webdevicons(name),
'highlight_groups': ['webdevicons', 'file_name'],
}]
@requires_segment_info
def webdevicons_file_format(pl, segment_info):
webdevicons_file_format = vim_get_func('WebDevIconsGetFileFormatSymbol')
return [] if not webdevicons_file_format else [{
'contents': webdevicons_file_format(),
'highlight_groups': ['webdevicons_file_format', 'file_format'],
}]
| 30.482759 | 84 | 0.777149 | 0 | 0 | 0 | 0 | 584 | 0.660633 | 0 | 0 | 227 | 0.256787 |
f93060fe13dca91fd46628410cdb2477c1e8f235
| 2,844 |
py
|
Python
|
app/api/auth.py
|
ergo-pad/paideia-api
|
7ffc78366567c72722d107f06ad37aa7557b05be
|
[
"MIT"
] | null | null | null |
app/api/auth.py
|
ergo-pad/paideia-api
|
7ffc78366567c72722d107f06ad37aa7557b05be
|
[
"MIT"
] | null | null | null |
app/api/auth.py
|
ergo-pad/paideia-api
|
7ffc78366567c72722d107f06ad37aa7557b05be
|
[
"MIT"
] | null | null | null |
from fastapi.security import OAuth2PasswordRequestForm
from fastapi import APIRouter, Depends, HTTPException, status
from datetime import timedelta
from starlette.responses import JSONResponse
from db.crud.users import blacklist_token
from db.session import get_db
from core import security
from core.auth import authenticate_user, get_current_active_user, sign_up_new_user
auth_router = r = APIRouter()
@r.post("/token")
async def login(
db=Depends(get_db), form_data: OAuth2PasswordRequestForm = Depends()
):
try:
user = authenticate_user(db, form_data.username, form_data.password)
if not user:
raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="Incorrect username or password",
headers={"WWW-Authenticate": "Bearer"},
)
access_token_expires = timedelta(
minutes=security.ACCESS_TOKEN_EXPIRE_MINUTES
)
if user.is_superuser:
permissions = "admin"
else:
permissions = "user"
access_token = security.create_access_token(
data={"sub": user.alias, "permissions": permissions},
expires_delta=access_token_expires,
)
return {"access_token": access_token, "token_type": "bearer", "permissions": permissions}
except HTTPException as e:
raise e
except Exception as e:
return JSONResponse(status_code=400, content=f"ERR::login::{str(e)}")
@r.post("/signup")
async def signup(
db=Depends(get_db), form_data: OAuth2PasswordRequestForm = Depends()
):
try:
user = sign_up_new_user(db, form_data.username, form_data.password)
if not user:
raise HTTPException(
status_code=status.HTTP_409_CONFLICT,
detail="Account already exists",
headers={"WWW-Authenticate": "Bearer"},
)
access_token_expires = timedelta(
minutes=security.ACCESS_TOKEN_EXPIRE_MINUTES
)
if user.is_superuser:
permissions = "admin"
else:
permissions = "user"
access_token = security.create_access_token(
data={"sub": user.alias, "permissions": permissions},
expires_delta=access_token_expires,
)
return {"access_token": access_token, "token_type": "bearer"}
except HTTPException as e:
raise e
except Exception as e:
return JSONResponse(status_code=400, content=f"ERR::signup::{str(e)}")
@r.post("/logout")
async def logout(db=Depends(get_db), token: str = Depends(security.oauth2_scheme), current_user=Depends(get_current_active_user)):
try:
return blacklist_token(db, token)
except Exception as e:
JSONResponse(status_code=400, content=f"ERR::logout::{str(e)}")
| 33.857143 | 130 | 0.654008 | 0 | 0 | 0 | 0 | 2,428 | 0.853727 | 2,372 | 0.834037 | 348 | 0.122363 |
f9306dd6abfdca80dd6982ef5b08247263dd7576
| 5,530 |
py
|
Python
|
src/gui_occluder/custom/sr_occluder.py
|
hgiesel/anki-multiple-choice
|
1a9a22480eb6c0e7f421dc08d36d14920e43dd3e
|
[
"MIT"
] | 5 |
2019-12-26T08:08:52.000Z
|
2021-11-21T03:34:27.000Z
|
src/gui_occluder/custom/sr_occluder.py
|
hgiesel/anki-set-randomizer
|
1a9a22480eb6c0e7f421dc08d36d14920e43dd3e
|
[
"MIT"
] | 84 |
2019-08-01T20:36:17.000Z
|
2019-10-26T16:16:33.000Z
|
src/gui_occluder/custom/sr_occluder.py
|
hgiesel/anki_set_randomizer
|
1a9a22480eb6c0e7f421dc08d36d14920e43dd3e
|
[
"MIT"
] | null | null | null |
import os
import enum
from aqt.qt import QDialog, QGraphicsScene, QGraphicsRectItem, QGraphicsEllipseItem, QApplication
from aqt.qt import Qt, QPen, QGraphicsItem, QPixmap, QRectF, QPainter
from aqt.qt import QPointF, QBrush, QColor, QPainterPath, QIcon, QSize, QPalette
from aqt.utils import showInfo
from ..sr_occluder_ui import Ui_SROccluder
from .sr_rect import SRRect
from .sr_occlusion_view.py import SROcclusionView
from .sr_occlusion_scene.py import SROcclusionScene
class ToolMode(enum.Enum):
Select = 1
Move = 2
Zoom = 3
Rect = 4
Ellipse = 5
Polygon = 6
Line = 7
Arrow = 8
Darrow = 9
Text = 10
class SROccluder(QDialog):
def __init__(self, parent):
super().__init__(parent=parent)
self.ui = Ui_SROccluder()
self.ui.setupUi(self)
self.toolMode = ToolMode.Select
self.setupButtons()
def setupButtons(self):
main_path = f'{os.path.dirname(os.path.realpath(__file__))}/../icons'
self.ui.selectButton.setIcon(QIcon(f"{main_path}/select.png"))
self.ui.moveButton.setIcon(QIcon(f"{main_path}/move.png"))
self.ui.zoomButton.setIcon(QIcon(f"{main_path}/zoom.png"))
self.ui.rectButton.setIcon(QIcon(f"{main_path}/rect.png"))
self.ui.ellipseButton.setIcon(QIcon(f"{main_path}/ellipse.png"))
self.ui.polygonButton.setIcon(QIcon(f"{main_path}/polygon.png"))
self.ui.lineButton.setIcon(QIcon(f"{main_path}/line.png"))
self.ui.arrowButton.setIcon(QIcon(f"{main_path}/arrow.png"))
self.ui.darrowButton.setIcon(QIcon(f"{main_path}/darrow.png"))
self.ui.textButton.setIcon(QIcon(f"{main_path}/text.png"))
self.ui.selectButton.clicked.connect(self.selectTool)
self.ui.moveButton.clicked.connect(self.moveTool)
self.ui.zoomButton.clicked.connect(self.zoomTool)
self.ui.rectButton.clicked.connect(self.rectTool)
self.ui.ellipseButton.clicked.connect(self.ellipseTool)
self.ui.polygonButton.clicked.connect(self.polygonTool)
self.ui.lineButton.clicked.connect(self.lineTool)
self.ui.arrowButton.clicked.connect(self.arrowTool)
self.ui.darrowButton.clicked.connect(self.darrowTool)
self.ui.textButton.clicked.connect(self.textTool)
def selectTool(self):
QApplication.setOverrideCursor(Qt.ArrowCursor)
self.changeMode(ToolMode.Select)
def moveTool(self):
QApplication.setOverrideCursor(Qt.SizeAllCursor)
self.changeMode(ToolMode.Move)
def zoomTool(self):
QApplication.setOverrideCursor(Qt.ArrowCursor)
self.changeMode(ToolMode.Zoom)
def rectTool(self):
QApplication.setOverrideCursor(Qt.ArrowCursor)
self.changeMode(ToolMode.Rect)
def ellipseTool(self):
QApplication.setOverrideCursor(Qt.ArrowCursor)
self.changeMode(ToolMode.Ellipse)
def polygonTool(self):
QApplication.setOverrideCursor(Qt.ArrowCursor)
self.changeMode(ToolMode.Polygon)
def lineTool(self):
QApplication.setOverrideCursor(Qt.ArrowCursor)
self.changeMode(ToolMode.Line)
def arrowTool(self):
QApplication.setOverrideCursor(Qt.ArrowCursor)
self.changeMode(ToolMode.Arrow)
def darrowTool(self):
QApplication.setOverrideCursor(Qt.ArrowCursor)
self.changeMode(ToolMode.Darrow)
def textTool(self):
QApplication.setOverrideCursor(Qt.ArrowCursor)
self.changeMode(ToolMode.Text)
def changeMode(self, mode):
self.resetButton(mode, True)
self.resetButton(self.toolMode, False)
self.toolMode = mode
def resetButton(self, mode, state):
if mode == ToolMode.Select:
self.ui.selectButton.setChecked(state)
self.ui.selectButton.repaint()
elif mode == ToolMode.Move:
self.ui.moveButton.setChecked(state)
self.ui.moveButton.repaint()
elif mode == ToolMode.Zoom:
self.ui.zoomButton.setChecked(state)
self.ui.zoomButton.repaint()
elif mode == ToolMode.Rect:
self.ui.rectButton.setChecked(state)
self.ui.rectButton.repaint()
elif mode == ToolMode.Ellipse:
self.ui.ellipseButton.setChecked(state)
self.ui.ellipseButton.repaint()
elif mode == ToolMode.Polygon:
self.ui.polygonButton.setChecked(state)
self.ui.polygonButton.repaint()
elif mode == ToolMode.Line:
self.ui.lineButton.setChecked(state)
self.ui.lineButton.repaint()
elif mode == ToolMode.Arrow:
self.ui.arrowButton.setChecked(state)
self.ui.arrowButton.repaint()
elif mode == ToolMode.Darrow:
self.ui.darrowButton.setChecked(state)
self.ui.darrowButton.repaint()
elif mode == ToolMode.Text:
self.ui.textButton.setChecked(state)
self.ui.textButton.repaint()
def setupUi(self):
theScene = SROcclusionScene(self, 'skull.jpg')
self.ui.graphicsView.setScene(theScene)
outlinePen = QPen()
rect = theScene.addRect(10, 10, 50, 50, outlinePen, Qt.green)
rect.setFlag(QGraphicsItem.ItemIsMovable)
rect.setFlag(QGraphicsItem.ItemIsSelectable)
rect.setFlag(QGraphicsItem.ItemIsFocusable)
rect2 = SRRect(0, 0, 50, 30)
rect2.setFlag(QGraphicsItem.ItemIsMovable)
rect2.setFlag(QGraphicsItem.ItemIsSelectable)
theScene.addItem(rect2)
| 34.5625 | 97 | 0.674141 | 5,047 | 0.912658 | 0 | 0 | 0 | 0 | 0 | 0 | 309 | 0.055877 |
f930eb0037c9a1f7c847f03ac1f6289fad3453d4
| 13,371 |
py
|
Python
|
gen3config/config.py
|
uc-cdis/gen3config
|
fe340c0ce8ef3367f13c4f6040ec605e5fa7bc0c
|
[
"Apache-2.0"
] | null | null | null |
gen3config/config.py
|
uc-cdis/gen3config
|
fe340c0ce8ef3367f13c4f6040ec605e5fa7bc0c
|
[
"Apache-2.0"
] | null | null | null |
gen3config/config.py
|
uc-cdis/gen3config
|
fe340c0ce8ef3367f13c4f6040ec605e5fa7bc0c
|
[
"Apache-2.0"
] | null | null | null |
"""
Configuration class for handling configs with a given default.
If you need custom functionality or need to apply post_processing to parsed config,
simply extend this class.
Example:
```
class FenceConfig(Config):
def __init__(self, *args, **kwargs):
super(FenceConfig, self).__init__(*args, **kwargs)
def post_process(self):
# allow authlib traffic on http for development if enabled. By default
# it requires https.
#
# NOTE: use when fence will be deployed in such a way that fence will
# only receive traffic from internal clients, and can safely use HTTP
if (
self._configs.get("AUTHLIB_INSECURE_TRANSPORT")
and "AUTHLIB_INSECURE_TRANSPORT" not in os.environ
):
os.environ["AUTHLIB_INSECURE_TRANSPORT"] = "true"
# if we're mocking storage, ignore the storage backends provided
# since they'll cause errors if misconfigured
if self._configs.get("MOCK_STORAGE", False):
self._configs["STORAGE_CREDENTIALS"] = {}
cirrus.config.config.update(**self._configs.get("CIRRUS_CFG", {}))
```
Recommended use:
- Create a `config-default.yaml` and `config.py` in the top-level folder your app
- Inside `config-default.yaml` add keys and reasonable default values
- Inside `config.py`, create a class that inherits from this Config class
- See above example
- Add a final line to your `config.py` that instantiates your custom class:
- Ensure that you provide the default config path
- If placed in same directory as `config.py` you can use something like:
```
default_cfg_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "config-default.yaml"
)
config = FenceConfig(default_cfg_path)
```
- Import your instaniated object whenever you need to get configuration
- Example: `from fence.config import config`
- Load in application configuration during init of your app
- Example: `config.load('path/to/fence-config.yaml')`
- Now you can safely access anything that was in your `config-default.yaml` from this
object as if it were a dictionary
- Example: `storage_creds = config["STORAGE_CREDENTIALS"]`
- Example: `if config["SOME_BOOLEAN"]: ...`
- Example: `nested_value = config["TOP_LEVEL"]["nested"]
- And of course you can import that into any file you want and will have access to
keys/values
- Example: `from fence.config import config`
"""
from __future__ import division, absolute_import, print_function, unicode_literals
import os
import glob
from yaml import safe_load as yaml_load
from yaml.scanner import ScannerError
from jinja2 import Template, TemplateSyntaxError
import six
from cdislogging import get_logger
from gen3config.errors import NotFoundError, ParsingError
logger = get_logger(__name__, log_level="info")
class Config(dict):
"""
Configuration singleton that's instantiated on module load.
Allows updating from a config file by using .update()
"""
def __init__(self, default_cfg_path):
self._configs = {}
self.default_cfg_path = default_cfg_path
logger.debug("Checking if provided cfg path is an actual file...")
if not os.path.isfile(default_cfg_path):
raise FileNotFoundError(
"Default configuration file provided {} does not exist.".format(
default_cfg_path
)
)
logger.debug("Attempting to parse provided cfg as yaml file...")
try:
yaml_load(open(self.default_cfg_path))
except Exception as exc:
logger.exception(exc)
raise ParsingError(
"Could not parse provided file {} as YAML. See logs for details.".format(
default_cfg_path
)
)
def get(self, key, default=None):
return self._configs.get(key, default)
def set(self, key, value):
self._configs.__setitem__(key, value)
def setdefault(self, key, default=None):
self._configs.setdefault(key, default)
def __setitem__(self, key, value):
self._configs.__setitem__(key, value)
def __contains__(self, key):
return key in self._configs
def __iter__(self):
for key, value in six.iteritems(self._configs):
yield key, value
def __getitem__(self, key):
return self._configs[key]
def __delitem__(self, key):
del self._configs[key]
def __len__(self):
return len(self._configs)
def __str__(self):
return str(self._configs)
def update(self, *args, **kwargs):
"""
update configuration properties
support passing dictionary or keyword args
"""
if len(args) > 1:
raise TypeError(
"update expected at most 1 arguments, got {}".format(len(args))
)
if args:
self._configs.update(dict(args[0]))
self._configs.update(kwargs)
def load(self, config_path=None, search_folders=None, file_name=None):
if not config_path and not search_folders:
raise AttributeError(
"Cannot find configuration with given information. "
"You must either provide `search_folders` arg so load knows where to "
"look OR provide `config_path` as full path to config."
)
config_path = config_path or get_config_path(search_folders, file_name)
if config_path:
self.load_configuration_file(config_path)
self.post_process()
return self
def load_configuration_file(self, provided_cfg_path):
logger.info("Opening default configuration...")
# treat default cfg as template and replace nested vars, returning an updated dict
config = nested_render(
yaml_load(open(self.default_cfg_path)), {}, {}
)
logger.info("Applying configuration: {}".format(provided_cfg_path))
# treat provided cfg as template and replace nested vars, returning an updated dict
provided_configurations = nested_render(
yaml_load(open(provided_cfg_path)), {}, {}
)
# only update known configuration values. In the situation
# where the provided config does not have a certain value,
# the default will be used.
common_keys = {
key: value
for (key, value) in six.iteritems(config)
if key in provided_configurations
}
keys_not_provided = {
key: value
for (key, value) in six.iteritems(config)
if key not in provided_configurations
}
keys_to_update = {
key: value
for (key, value) in six.iteritems(provided_configurations)
if key in common_keys
}
unknown_keys = {
key: value
for (key, value) in six.iteritems(provided_configurations)
if key not in common_keys
}
config.update(keys_to_update)
if keys_not_provided:
logger.warning(
"Did not provide key(s) {} in {}. Will be set to default value(s) from {}.".format(
keys_not_provided.keys(), provided_cfg_path, self.default_cfg_path
)
)
if unknown_keys:
logger.warning(
"Unknown key(s) {} found in {}. Will be ignored.".format(
unknown_keys.keys(), provided_cfg_path
)
)
self._configs.update(config)
def post_process(self):
"""
Do some post processing to the configuration (set env vars if necessary,
do more complex modifications/changes to vars, etc.)
Called after loading the configuration and doing the template-replace.
"""
pass
def force_default_if_none(self, key, default_cfg=None, default_cfg_path=None):
"""
Set the key in the configuration to the default value if it either
1) doesn't exist (this is mostly for backwards-compatibility with previous
configuration methods)
2) is None
"""
default_cfg = default_cfg or yaml_load(open(default_cfg_path))
if key not in self._configs or self._configs[key] is None:
self._configs[key] = default_cfg.get(key)
def nested_render(cfg, fully_rendered_cfgs, replacements):
"""
Template render the provided cfg by recurisevly replacing {{var}}'s which values
from the current "namespace".
The nested config is treated like nested namespaces where the inner variables
are only available in current block and further nested blocks.
Said the opposite way: the namespace with available vars that can be used
includes the current block's vars and parent block vars.
This means that you can do replacements for top-level
(global namespaced) config vars anywhere, but you can only use inner configs within
that block or further nested blocks.
An example is worth a thousand words:
---------------------------------------------------------------------------------
fence-config.yaml
--------------------------------------------------------------------------------
BASE_URL: 'http://localhost/user'
OPENID_CONNECT:
fence:
api_base_url: 'http://other_fence/user'
client_kwargs:
redirect_uri: '{{BASE_URL}}/login/fence/login'
authorize_url: '{{api_base_url}}/oauth2/authorize'
THIS_WONT_WORK: '{{api_base_url}}/test'
--------------------------------------------------------------------------------
"redirect_uri" will become "http://localhost/user/login/fence/login"
- BASE_URL is in the global namespace so it can be used in this nested cfg
"authorize_url" will become "http://other_fence/user/oauth2/authorize"
- api_base_url is in the current namespace, so it is available
"THIS_WONT_WORK" will become "/test"
- Why? api_base_url is not in the current namespace and so we cannot use that
as a replacement. the configuration (instead of failing) will replace with
an empty string
Args:
cfg (TYPE): Description
fully_rendered_cfgs (TYPE): Description
replacements (TYPE): Description
Returns:
dict: Configurations with template vars replaced
"""
if isinstance(cfg, dict):
for key, value in six.iteritems(cfg):
replacements.update(cfg)
fully_rendered_cfgs[key] = {}
fully_rendered_cfgs[key] = nested_render(
value,
fully_rendered_cfgs=fully_rendered_cfgs[key],
replacements=replacements,
)
# new namespace, remove current vars (no longer available as replacements)
for old_cfg, value in six.iteritems(cfg):
replacements.pop(old_cfg, None)
return fully_rendered_cfgs
else:
# it's not a dict, so lets try to render it. But only if it's
# truthy (which means there's actually something to replace)
if cfg:
try:
t = Template(str(cfg))
rendered_value = t.render(**replacements)
except TemplateSyntaxError:
rendered_value = cfg
try:
cfg = yaml_load(rendered_value)
except ScannerError:
# it's not loading into yaml, so let's assume it's a string with special
# chars such as: {}[],&*#?|:-<>=!%@\)
#
# in YAML, we have to "quote" a string with special chars.
#
# since yaml_load isn't loading from a file, we need to wrap the Python
# str in actual quotes.
cfg = yaml_load('"{}"'.format(rendered_value))
return cfg
def get_config_path(search_folders, file_name="*config.yaml"):
"""
Return the path of a single configuration file ending in config.yaml
from one of the search folders.
NOTE: Will return the first match it finds. If multiple are found,
this will error out.
"""
possible_configs = []
file_name = file_name or "*config.yaml"
for folder in search_folders:
config_path = os.path.join(folder, file_name)
possible_files = glob.glob(config_path)
possible_configs.extend(possible_files)
if len(possible_configs) == 1:
return possible_configs[0]
elif len(possible_configs) > 1:
raise IOError(
"Multiple config.yaml files found: {}. Please specify which "
"configuration to use by providing `config_path` instead of "
"`search_folders` to Config.load(). Alternatively, ensure that only a "
"single valid *config.yaml exists in the search folders: {}.".format(
str(possible_configs), search_folders
)
)
else:
raise NotFoundError(
"Could not find config file {}. Searched in the following locations: "
"{}".format(file_name, str(search_folders))
)
| 35.943548 | 99 | 0.615362 | 5,598 | 0.418667 | 104 | 0.007778 | 0 | 0 | 0 | 0 | 7,264 | 0.543265 |
f931949a583110bdf77e537bf67ef0dfdd9aeae4
| 8,150 |
py
|
Python
|
src/ReinforcementLearning/Modules/carlaUtils.py
|
B-C-WANG/ReinforcementLearningInAutoPilot
|
8d3c0b81e3db2fb4be0e52e25b700c54f5e569dc
|
[
"MIT"
] | 27 |
2019-05-14T01:06:05.000Z
|
2022-03-06T03:12:40.000Z
|
src/ReinforcementLearning/Modules/carlaUtils.py
|
B-C-WANG/ReinforcementLearningInAutoPilot
|
8d3c0b81e3db2fb4be0e52e25b700c54f5e569dc
|
[
"MIT"
] | null | null | null |
src/ReinforcementLearning/Modules/carlaUtils.py
|
B-C-WANG/ReinforcementLearningInAutoPilot
|
8d3c0b81e3db2fb4be0e52e25b700c54f5e569dc
|
[
"MIT"
] | 10 |
2020-01-20T09:39:51.000Z
|
2022-03-31T18:30:53.000Z
|
# coding:utf-8
# Type: Public
import numpy as np
import common.Math as cMath
import math
class CarlrUtils(object):
Author = "BaoChuan Wang"
AllowImport = False
@staticmethod
def get_direction_vector_series_and_car_to_next_waypoint_ratio(
carla_engine,
start_waypoint_xy_array,
target_waypoint_xy_array,
draw_in_UE=False
):
'''
适用于WaypointsTarget环境的state求取
# 以下代码作为参考
获得车辆最近的路径点,以及接下来n个路径点(目前改为最后两个路径点,不会随着车辆位置更新!),然后返回与这两个路径点相关的参数,有:
1.车辆到两个waypoints的中点距离
2.waypoint方向角
3.车辆到waypoint中点方向角
4.车辆本身方向角
# 另外这样获取waypoints实时更新的方法是不合适的,产生的rewards不对action连续
# 原来的方法是车辆获取最近的waypoint然后求得下一个waypoints,现在改成一开始就确定waypoints
因为使用获取最近waypoints的方法可能会产生变道
原来的方法代码:
# # 获得车辆的下两个waypoints的xy坐标
# next_center_waypoints = self.engine.map.get_waypoint(
# # location
# self.engine.vehicle.get_location()
# )
# # 获得接下来5m的作为下一个路径点
# next_next_center_waypoints = next_center_waypoints.next(5)[0]
#
# waypoint_list =((
# next_center_waypoints.transform.location.x,
# next_center_waypoints.transform.location.y
# ), (
# next_next_center_waypoints.transform.location.x,
# next_next_center_waypoints.transform.location.y
# ))
#
# # 在carla中绘制路径点
# self.engine.draw_waypoint_list(
# [next_center_waypoints,next_next_center_waypoints],life_time=1)
#
# return waypoint_list
# 注意点:
因为最终计算的时候是需要两个waypoint来得到和车辆的距离
以及 车辆到waypoints中心点的方向 和 两个waypoints方向 的夹角
所以一定要保证waypoints中心点在车辆前方(否则就会后退)
需要保证Waypoints的间隔足够大即可!也可以这里取点时取后面两个点而不是一个点!
# 这里的代码是求得距离车辆最近的点,然后往下找3个点,现在更新成一开始指定的点!
# # 求得最近的waypoints的index,然后找下一个!如果到了waypoints的末端?
# distance = np.sqrt(
# np.sum(np.square(self.car_waypoints_xy_array - np.array([self.engine.vehicle.get_location().x,
# self.engine.vehicle.get_location().y])), axis=1))
#
# # print(distance)
# # 最大的index
# index_max = distance.shape[0] - 1
# # 找到距离最近的waypoints的index
# index = int(np.argmin(distance))
#
#
# index = index_max - 1
#
# # 这里点取得向前一点儿
# next_point_index = index + 3
# if next_point_index > index_max: next_point_index = index_max
# if draw_in_UE:
# # 作出两个waypoints的线段
# start = self.car_waypoints_list[index]
# end = self.car_waypoints_list[next_point_index]
# self.engine.draw_line(start, end, life_time=1, color=(0, 255, 0))
# center_point = (self.car_waypoints_xy_array[index, :].reshape(-1) +
# self.car_waypoints_xy_array[next_point_index, :].reshape(-1)) / 2
'''
# 车辆位置
vehicle_location = carla_engine.vehicle.get_location()
car_point = np.array([vehicle_location.x, vehicle_location.y])
if draw_in_UE:
# waypoint中点
center_point = (start_waypoint_xy_array + target_waypoint_xy_array) / 2
center_point_transform = carla_engine.make_transform(
x=center_point[0],
y=center_point[1],
z=vehicle_location.z
)
carla_engine.draw_point_xyz(center_point[0], center_point[1], carla_engine.vehicle.get_location().z + 0.25,
color=(0, 255, 255), thickness=0.1)
carla_engine.draw_line_location(
vehicle_location,
center_point_transform.location,
life_time=1, color=(0, 0, 255)
)
# waypoints的单位方向向量
way_unit_direction = target_waypoint_xy_array - start_waypoint_xy_array
way_unit_direction /= np.linalg.norm(way_unit_direction, 2)
# 车辆到中心点的单位方向向量
car_to_way_unit_direction = (target_waypoint_xy_array - car_point)
car_to_way_unit_direction /= np.linalg.norm(car_to_way_unit_direction, 2)
# 车辆本身的单位方向向量
car_unit_direction = carla_engine.vehicle.get_transform().get_forward_vector()
car_unit_direction = np.array([car_unit_direction.x, car_unit_direction.y])
# 车辆到target点和总路程的比值
total_distance = np.linalg.norm(target_waypoint_xy_array - start_waypoint_xy_array, 2)
now_distance = np.linalg.norm(target_waypoint_xy_array - car_point, 2)
car_to_target_distance_ratio = now_distance / total_distance
# 车辆的yaw角度
car_yaw = math.radians(carla_engine.vehicle_yaw)
# 增加:相对于车辆坐标的目标waypoint的x和y
target_xy_array_relate_to_car = cMath.convert_point_into_relative_coordinate(
target_waypoint_xy_array,
car_point,
original_yaw_radius=car_yaw)
return way_unit_direction, car_to_way_unit_direction, car_unit_direction, car_to_target_distance_ratio, target_xy_array_relate_to_car
@staticmethod
def get_car_target_waypoints(engine, vehicle, n_waypoint=2, waypoint_spacing=15, draw_waypoints=True):
if n_waypoint < 2:
raise ValueError("At least 2 waypoints will return!")
# List<Waypoints>
car_waypoints_list = []
# Array2D
car_waypoints_xy_array = None
# List<List>
car_waypoints_xy_list = []
# 起始的点
next_center_waypoints = engine.map.get_waypoint(vehicle.get_location())
# 车辆的起点
start_waypoint_xy_array = np.array([next_center_waypoints.transform.location.x,
next_center_waypoints.transform.location.y])
car_waypoints_list.append(next_center_waypoints)
car_waypoints_xy_list.append([next_center_waypoints.transform.location.x,
next_center_waypoints.transform.location.y])
if n_waypoint == 2:
next_center_waypoints = next_center_waypoints.next(waypoint_spacing)[0]
car_waypoints_list.append(next_center_waypoints)
car_waypoints_xy_list.append([next_center_waypoints.transform.location.x,
next_center_waypoints.transform.location.y])
else:
for i in range(n_waypoint - 1):
next_center_waypoints = next_center_waypoints.next(waypoint_spacing)[0]
car_waypoints_list.append(next_center_waypoints)
car_waypoints_xy_list.append([next_center_waypoints.transform.location.x,
next_center_waypoints.transform.location.y])
car_waypoints_xy_array = np.array(car_waypoints_xy_list)
# 终点
target_waypoint_xy_array = np.array([next_center_waypoints.transform.location.x,
next_center_waypoints.transform.location.y])
# 绘制路径点
if draw_waypoints:
engine.draw_waypoint_list(car_waypoints_list, life_time=99999)
return car_waypoints_list, car_waypoints_xy_list, car_waypoints_xy_array, target_waypoint_xy_array
@staticmethod
def get_velocity_accel_relative_to_car_and_their_scalar(engine):
velocity_vector = engine.get_velocity()
velocity_to_car_x, velocity_to_car_y = cMath.convert_point_into_relative_coordinate(
target_xy=[velocity_vector.x, velocity_vector.y],
original_xy=[0, 0],
original_yaw_radius=math.radians(engine.vehicle_yaw))
velocity = engine.get_velocity_scalar()
accel_vector = engine.get_accel()
accel_to_car_x, accel_to_car_y = cMath.convert_point_into_relative_coordinate(
target_xy=[accel_vector.x, accel_vector.y],
original_xy=[0, 0],
original_yaw_radius=math.radians(engine.vehicle_yaw))
accel = engine.get_velocity_scalar()
return velocity, velocity_to_car_x, velocity_to_car_y, accel, accel_to_car_x, accel_to_car_y
| 40.346535 | 141 | 0.640613 | 9,008 | 0.989673 | 0 | 0 | 8,912 | 0.979125 | 0 | 0 | 3,840 | 0.421885 |
f932dbe3d5afcee0aae3f946f59a3b66e3f2fb59
| 2,413 |
py
|
Python
|
models.py
|
abhishekyana/CycleGANs-PyTorch
|
ebbd7d6dbed642577cc37a3e741f4233b9cbbd7a
|
[
"MIT"
] | 12 |
2019-07-27T09:54:57.000Z
|
2021-04-23T23:34:25.000Z
|
models.py
|
abhishekyana/CycleGANs-PyTorch
|
ebbd7d6dbed642577cc37a3e741f4233b9cbbd7a
|
[
"MIT"
] | 5 |
2020-11-13T15:40:12.000Z
|
2022-03-11T23:53:51.000Z
|
models.py
|
abhishekyana/CycleGANs-PyTorch
|
ebbd7d6dbed642577cc37a3e741f4233b9cbbd7a
|
[
"MIT"
] | 2 |
2021-03-11T10:45:33.000Z
|
2021-04-23T23:34:29.000Z
|
import torch.nn as nn
import torch.nn.functional as F
class ResBlock(nn.Module):
def __init__(self, inFeatures):
super(ResBlock, self).__init__()
self.conv = nn.Sequential(nn.ReflectionPad2d(1),
nn.Conv2d(inFeatures, inFeatures, 3),
nn.InstanceNorm2d(inFeatures),
nn.ReLU(inplace=True),
nn.ReflectionPad2d(1),
nn.Conv2d(inFeatures, inFeatures, 3),
nn.InstanceNorm2d(inFeatures))
def forward(self, X):
out = X + self.conv(X)
return out
class Generator(nn.Module):
def __init__(self, inputnc, outputnc, nResBlocks=9):
super(Generator, self).__init__()
layers = [nn.ReflectionPad2d(3),
nn.Conv2d(inputnc, 64, 7),
nn.InstanceNorm2d(64),
nn.ReLU(inplace=True)]
#To downsample the Image
inFeatures = 64
outFeatures = 2*inFeatures
for i in range(2):
layers += [nn.Conv2d(inFeatures, outFeatures, 3, stride=2, padding=1),
nn.InstanceNorm2d(outFeatures),
nn.ReLU(inplace=True)]
inFeatures = outFeatures
outFeatures = 2*inFeatures
for i in range(nResBlocks):
layers += [ResBlock(inFeatures)]
#To upsample the Image
outFeatures = inFeatures//2
for i in range(2):
layers += [nn.ConvTranspose2d(inFeatures, outFeatures, 3, stride=2, padding=1, output_padding=1),
nn.InstanceNorm2d(outFeatures),
nn.ReLU(inplace=True)]
inFeatures = outFeatures
outFeatures = inFeatures//2
layers += [nn.ReflectionPad2d(3),
nn.Conv2d(64, outputnc, 7),
nn.Tanh()]
self.model = nn.Sequential(*layers)
def forward(self, X):
out=self.model(X)
return out
class Discriminator(nn.Module):
def __init__(self, inputnc):
super(Discriminator, self).__init__()
layers = [nn.Conv2d(inputnc, 64, 4, stride=2, padding=1),
nn.LeakyReLU(0.2, inplace=True),
nn.Conv2d(64, 128, 4, stride=2, padding=1),
nn.InstanceNorm2d(128),
nn.LeakyReLU(0.2, inplace=True),
nn.Conv2d(128, 256, 4, stride=2, padding=1),
nn.InstanceNorm2d(256),
nn.LeakyReLU(0.2, inplace=True),
nn.Conv2d(256, 512, 4, padding=1),
nn.InstanceNorm2d(512),
nn.LeakyReLU(0.2, inplace=True),
nn.Conv2d(512, 1, 4, padding=1)]
self.model = nn.Sequential(*layers)
def forward(self, X):
out = self.model(X)
out = F.avg_pool2d(out, out.size()[2:]).view(out.size()[0], -1)
return out
| 30.544304 | 100 | 0.642768 | 2,353 | 0.975135 | 0 | 0 | 0 | 0 | 0 | 0 | 46 | 0.019063 |
f933130df8fe669a9af1c9efd51088775e210fbc
| 99 |
py
|
Python
|
stable_baselines3/bear/__init__.py
|
mjyoo2/stable-baselines3
|
ef7a580219df6d977b56fb99e503890bd5211195
|
[
"MIT"
] | null | null | null |
stable_baselines3/bear/__init__.py
|
mjyoo2/stable-baselines3
|
ef7a580219df6d977b56fb99e503890bd5211195
|
[
"MIT"
] | null | null | null |
stable_baselines3/bear/__init__.py
|
mjyoo2/stable-baselines3
|
ef7a580219df6d977b56fb99e503890bd5211195
|
[
"MIT"
] | null | null | null |
from stable_baselines3.bear.policies import BearPolicy
from stable_baselines3.bear.bear import BEAR
| 49.5 | 54 | 0.888889 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
f9347e37b52fec0692880a203b911075b279ecba
| 5,194 |
py
|
Python
|
file-io-and-other-io/modfile/test_ip_gen.py
|
eda-ricercatore/python-sandbox
|
741d23e15f22239cb5df8af6e695cd8e3574be50
|
[
"MIT"
] | null | null | null |
file-io-and-other-io/modfile/test_ip_gen.py
|
eda-ricercatore/python-sandbox
|
741d23e15f22239cb5df8af6e695cd8e3574be50
|
[
"MIT"
] | null | null | null |
file-io-and-other-io/modfile/test_ip_gen.py
|
eda-ricercatore/python-sandbox
|
741d23e15f22239cb5df8af6e695cd8e3574be50
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
"""
This is written by Zhiyang Ong to modify text (non-binary) files.
Synopsis:
Script to modify text (non-binary) files.
Revision History:
1) November 11, 2014. Initial working version.
The MIT License (MIT)
Copyright (c) <2014> <Zhiyang Ong>
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
Email address: echo "cukj -wb- 23wU4X5M589 TROJANS cqkH wiuz2y 0f Mw Stanford" | awk '{ sub("23wU4X5M589","F.d_c_b. ") sub("Stanford","d0mA1n"); print $5, $2, $8; for (i=1; i<=1; i++) print "6\b"; print $9, $7, $6 }' | sed y/kqcbuHwM62z/gnotrzadqmC/ | tr 'q' ' ' | tr -d [:cntrl:] | tr -d 'ir' | tr y "\n"
"""
# Import packages and functions from the Python Standard Library.
#from os import listdir, system
from os import system
#from os.path import isfile, join, splitext
#from os.subprocess import call
#import subprocess
# ============================================================
"""
Create an output file object.
Assume that the specified filename does not belong to an important file.
Assume that the specified file can be overwritten.
"""
f_object = open("input-file.txt", "w");
# Lists to generate data for the input test file.
# List of universities that are good in EDA.
universities = ["Berkeley", "Stanford", "MIT", "UT Austin", "Carnegie Mellon", "Georgia Tech", "Columbia", "Northwestern", "Purdue", "UCSD", "UCLA"]
# List of other universities in EDA.
other_unis = ["UIUC", "Brown", "Boston University", "UC Irvine", "UC Riverside", "UCSB", "USC", "University of Minnesota at Twin Cities", "Utah", "University of Wisconsin-Madison"]
# List of VLSI topics.
vlsi_topics = ["RTL design", "TLM design", "processor design", "SRAM design", "DRAM design", "low-power VLSI design", "decoder design", "DFM", "VLSI verification", "VLSI design flow", "NoC", "asynchronous VLSI design", "VLSI architecture", "digitally-assisted analog IC design", "VLSI signal processing", "microarchitecture"]
# List of EDA topics.
eda_topics = ["model checking", "equivalence checking", "high-level synthesis", "hardware/software partitioning", "hardware-accelerated emulation", "logic synthesis", "RTL synthesis", "static timing analysis", "statistical STA", "power optimization", "DVFS", "logic simulation", "fault saimulation", "ATPG", "DFT", "BIST", "memory compiler", "gate sizing", "threshold voltage assignment", "buffer insertion", "crosstalk analysis", "signal integrity analysis", "noise analysis", "thermal analysis", "floorplanning", "partitioning", "detailed placement", "detailed routing", "global placement", "global routing", "clock network synthesis", "power and ground routing", "layout compaction", "layout extraction", "parasitic extraction", "interconnect modeling", "design rule check", "layout versus schematic check", "electric rule check", "computational lithography", "optical proximity correction", "resolution enhancement technologies", "mask data preparation", "circuit simulation"]
# Lists of numbers to be fixed.
list_of_hundreds = range(1500, 5000, 100)
list_of_10s = range(1234560, 1234767, 10)
# References:
# http://eecs_ece-and-cs.quora.com/Choosing-a-Graduate-Program-in-VLSI-Design-Related-Areas-Things-to-Consider
# http://www.quora.com/What-are-the-best-VLSI-CAD-research-groups-in-US-universities
# Write text to the input test file.
#f_object.write("Ciao Mondo")
# Pointer to currently enumerated index of EDA topics.
ptr = 0
# ============================================================
# Generate test data for the test input file.
# Enumerate all universities that are good in EDA.
for gd_uni in universities:
#temp_str = "%S %S %S", gd_uni, eda_topics[ptr], eda_topics[ptr+1]
temp_str = gd_uni + "; " + str(list_of_hundreds[ptr]) + "; " + eda_topics[ptr]
ptr = ptr + 1
temp_str = temp_str + "; " + str(list_of_10s[ptr]) + "; " + eda_topics[ptr] + ".\n"
if ptr < len(universities):
ptr = ptr + 1
f_object.write(temp_str)
temp_str = "Stanford" + "; " + "326748027" + "; " + "statistical STA"
temp_str = temp_str + "; " + "7289" + "; " + "hardware-accelerated emulation" + ".\n"
f_object.write(temp_str)
# ============================================================
# Close the file object
f_object.close()
| 57.076923 | 980 | 0.701579 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4,435 | 0.85387 |
f934993945194bcd3e81f89c7b932f03bda5ad14
| 8,771 |
py
|
Python
|
aux_lib.py
|
paulokuriki/dcmtag2table
|
e9f7f366ffe64653aa2fab9bffd88669f1ed7f3f
|
[
"Apache-2.0"
] | null | null | null |
aux_lib.py
|
paulokuriki/dcmtag2table
|
e9f7f366ffe64653aa2fab9bffd88669f1ed7f3f
|
[
"Apache-2.0"
] | null | null | null |
aux_lib.py
|
paulokuriki/dcmtag2table
|
e9f7f366ffe64653aa2fab9bffd88669f1ed7f3f
|
[
"Apache-2.0"
] | null | null | null |
import pydicom
from tqdm import tqdm
import pandas as pd
import os
import time
import glob
import numpy as np
from pydicom import _dicom_dict as dc
from constants import *
import string
def dcmtag2df(folder: str, list_of_tags: list):
"""
# Create a Pandas DataFrame with the <list_of_tags> DICOM tags
# from the DICOM files in <folder>
# Parameters:
# folder (str): folder to be recursively walked looking for DICOM files.
# list_of_tags (list of strings): list of DICOM tags with no whitespaces.
# Returns:
# df (DataFrame): table of DICOM tags from the files in folder.
"""
list_of_tags = list_of_tags.copy()
table = []
start = time.time()
# checks if folder exists
if not os.path.isdir(folder):
print(f'{folder} is not a valid folder.')
return None
# joins ** to the folder name for using at the glob function
print("Searching files recursively...")
search_folder = os.path.join(folder, '**')
try:
filelist = glob.glob(search_folder, recursive=True)
print(f"{len(list(filelist))} files/folders found ")
except Exception as e:
print(e)
return None
time.time()
print("Reading files...")
for _f in tqdm(filelist):
try:
dataset = pydicom.dcmread(_f, stop_before_pixels=True)
items = []
items.append(_f)
for _tag in list_of_tags:
if _tag in dataset:
if dataset.data_element(_tag) is not None:
items.append(str(dataset.data_element(_tag).value))
else:
if dataset[tag_number] is not None:
items.append(str(dataset[tag_number].value))
else:
items.append("NaN")
else:
series_description = dataset.get('SeriesDescription')
if _tag == 'IOP_Plane':
IOP = dataset.get('ImageOrientationPatient')
_plano = IOP_Plane(IOP)
items.append(_plano)
elif _tag == "Primary":
try:
image_type = ' '.join(dataset.get('ImageType'))
except:
image_type = ''
found_word = search_words_in_serie(image_type, PRIMARY)
items.append(found_word)
elif _tag == "Gad":
found_word = search_words_in_serie(series_description, GAD, GAD_EXCLUSION)
items.append(found_word)
elif _tag == "T1":
found_word = search_words_in_serie(series_description, T1, FLAIR + T2)
items.append(found_word)
elif _tag == "T2":
found_word = search_words_in_serie(series_description, T2)
items.append(found_word)
elif _tag == "FLAIR":
found_word = search_words_in_serie(series_description, FLAIR, T1)
items.append(found_word)
elif _tag == "SWI":
found_word = search_words_in_serie(series_description, SWI)
items.append(found_word)
elif _tag == "FIESTA":
found_word = search_words_in_serie(series_description, FIESTA)
items.append(found_word)
elif _tag == "TOF":
found_word = search_words_in_serie(series_description, TOF)
items.append(found_word)
elif _tag == "DWI":
found_word = search_words_in_serie(series_description, DWI, DWI_EXCLUSION)
items.append(found_word)
elif _tag == "Angio":
found_word = search_words_in_serie(series_description, ANGIO)
items.append(found_word)
elif _tag == "MPR":
found_word = search_words_in_serie(series_description, MPR)
items.append(found_word)
elif _tag == "Others":
found_word = search_words_in_serie(series_description, OTHERS)
items.append(found_word)
else:
# checks if a tag number was informed
tag_number = tag_number_to_base_16(_tag)
if tag_number in dataset:
if dataset[tag_number] is not None:
items.append(str(dataset[tag_number].value))
else:
items.append("NaN")
else:
items.append("NaN")
table.append((items))
except (FileNotFoundError, PermissionError):
pass
except Exception as e:
pass
list_of_tags.insert(0, "Filename")
test = list(map(list, zip(*table)))
dictone = {}
if len(table) == 0:
print(f'0 DICOM files found at folder: {folder}')
return None
for i, _tag in enumerate(list_of_tags):
dictone[_tag] = test[i]
df = pd.DataFrame(dictone)
time.sleep(2)
print("Finished.")
return df
def IOP_Plane(IOP: list) -> str:
"""
This function takes IOP of an image and returns its plane (Sagittal, Coronal, Transverse)
['1', '0', '0', '0', '0', '-1'] you are dealing with Coronal plane view
['0', '1', '0', '0', '0', '-1'] you are dealing with Sagittal plane view
['1', '0', '0', '0', '1', '0'] you are dealing with Axial plane view
"""
try:
IOP_round = [round(x) for x in IOP]
plane = np.cross(IOP_round[0:3], IOP_round[3:6])
plane = [abs(x) for x in plane]
if plane[0] == 1:
return "SAG"
elif plane[1] == 1:
return "COR"
elif plane[2] == 1:
return "AXI"
else:
return "UNK"
except:
return "UNK"
def dicomtagnumber_to_tagname(dicom_tag_number: str) -> str:
# if receives int, convert to str
dicom_tag_base_16 = tag_number_to_base_16(dicom_tag_number)
try:
dicom_tag_name = dc.DicomDictionary.get(dicom_tag_base_16, (0, 0, 0, 0, dicom_tag_number))[4]
if dicom_tag_name == "0008103E":
dicom_tag_name = "SeriesDescription"
except Exception as e:
print(f'Erro ao converter dicomtag {dicom_tag_number}\n{e}')
return dicom_tag_name
def dicomtagname_to_tagnumber(dicom_tag_name: str) -> str:
tag_number_8_digits = dicom_tag_name
try:
# searches for Contracted Name
for key, value in dc.DicomDictionary.items():
if dicom_tag_name == value[4]:
tag_number = key
break
# searches for Expanded Name if not found Contracted Form
if not tag_number:
for key, value in dc.DicomDictionary.items():
if dicom_tag_name == value[2]:
tag_number = key
break
hex_number = hex(1048592)[2:]
tag_number_8_digits = f"{hex_number:>08}"
except Exception as e:
print(f'Erro ao converter dicomtag {dicom_tag_name}\n{e}')
return tag_number_8_digits
def tag_number_to_base_16(dicom_tag_number: str) -> str:
# if receives int, convert to str
hx = string.hexdigits
if type(dicom_tag_number) == int:
dicom_tag_number = str(dicom_tag_number)
only_hexdigits_tag = ''.join(i for i in dicom_tag_number if i in hx)
dicom_tag_base_16 = int(only_hexdigits_tag, 16)
return dicom_tag_base_16
def search_words_in_serie(series_description: str, search_words: list, exclusion_words: list = []) -> bool:
try:
search_flag = False
for word in search_words:
if word.upper() in series_description.upper():
search_flag = True
break
except Exception as e:
print(f"Erro ao procurar a lista de palavras de inclusao {search_words} na descricao {series_description}")
return "NaN"
try:
exclusion_flag = False
for word in exclusion_words:
if word.upper() in series_description.upper():
exclusion_flag = True
break
except Exception as e:
print(f"Erro ao procurar a lista de palavras de exclusao {search_words} na descricao {series_description}")
return "NaN"
found = search_flag and exclusion_flag is False
return found
| 37.165254 | 115 | 0.551248 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,734 | 0.197697 |
f9368f4ecdf91a5437237dc760bad64780ffdbe1
| 430 |
py
|
Python
|
eve_swagger/__init__.py
|
Annakan/eve-swagger
|
34b91a335e0e2c471fc552800751e9d702a7f260
|
[
"BSD-3-Clause"
] | null | null | null |
eve_swagger/__init__.py
|
Annakan/eve-swagger
|
34b91a335e0e2c471fc552800751e9d702a7f260
|
[
"BSD-3-Clause"
] | null | null | null |
eve_swagger/__init__.py
|
Annakan/eve-swagger
|
34b91a335e0e2c471fc552800751e9d702a7f260
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
eve-swagger
~~~~~~~~~~~
swagger.io extension for Eve-powered REST APIs.
:copyright: (c) 2015 by Nicola Iarocci.
:license: BSD, see LICENSE for more details.
"""
try:
from collections import OrderedDict
except ImportError:
from ordereddict import OrderedDict # noqa: F401
from .swagger import swagger, add_documentation # noqa
INFO = 'SWAGGER_INFO'
HOST = 'SWAGGER_HOST'
| 23.888889 | 55 | 0.676744 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 254 | 0.590698 |
f936f234615d9bcf0427f9ba2eb780c873f4aa17
| 9,005 |
py
|
Python
|
2018_Epoch_Spectra.py
|
chunders/EpochAnalysis
|
b5d83d9608692e3bf5f9947bb3627e04a54a312f
|
[
"MIT"
] | null | null | null |
2018_Epoch_Spectra.py
|
chunders/EpochAnalysis
|
b5d83d9608692e3bf5f9947bb3627e04a54a312f
|
[
"MIT"
] | null | null | null |
2018_Epoch_Spectra.py
|
chunders/EpochAnalysis
|
b5d83d9608692e3bf5f9947bb3627e04a54a312f
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
_
/ | | __ _ __ _
/ | / |_||_|| ||
/ | / | |\ | ||_
/____ |__/\ . | | \|_|\_|
__________________________ .
███████╗██████╗ ██████╗ ██████╗██╗ ██╗
██╔════╝██╔══██╗██╔═══██╗██╔════╝██║ ██║
█████╗ ██████╔╝██║ ██║██║ ███████║
██╔══╝ ██╔═══╝ ██║ ██║██║ ██╔══██║
███████╗██║ ╚██████╔╝╚██████╗██║ ██║
╚══════╝╚═╝ ╚═════╝ ╚═════╝╚═╝ ╚═╝
Created on Wed May 30 15:34:05 2018
@author: chrisunderwood
To compare the outputted Electron spectrums,
as part of a parameter scan
"""
import numpy as np
import os
import matplotlib.pyplot as plt
import seaborn as sns
#==============================================================================
# A function that replicates os.walk with a max depth level
#==============================================================================
def walklevel(some_dir, level=1):
some_dir = some_dir.rstrip(os.path.sep)
assert os.path.isdir(some_dir)
num_sep = some_dir.count(os.path.sep)
for root, dirs, files in os.walk(some_dir):
yield root, dirs, files
num_sep_this = root.count(os.path.sep)
if num_sep + level <= num_sep_this:
del dirs[:]
#==============================================================================
# Creates a list of the folders of interest
#==============================================================================
def listFolders(mainDir):
listSubFolders = [x[0] for x in walklevel(mainDir)][1:]
folderNames = []
#Modify so useable path
for i in range(len(listSubFolders)):
listSubFolders[i] += '/'
#Add the folder that I am looking into here too!
listSubFolders[i] += 'Dist_evo/'
folderNames.append(listSubFolders[i].split('/')[-3])
listSubFolders = np.array(listSubFolders)
folderNames = np.array(folderNames)
return listSubFolders, folderNames
def nearposn(array,value):
#Find array position of value
posn = (abs(array-value)).argmin()
return posn
def subplotPerSpectra(data, Crop):
sns.set_palette(sns.color_palette("Set1", len(folderNames)))
sns.set_context("talk")
sns.set_style('darkgrid')
fig, axes = plt.subplots(nrows = len(data), sharex = True, figsize = (7,8))
for d, names, ax in zip(data, folderNames, axes):
yLims = [1e50, 0]
px = d[:,0]
Energy_J = (px ** 2) / (2 * 9.11e-31)
Energy_eV = Energy_J / 1.6e-19
Energy_MeV = Energy_eV * 1e-6
xlow = nearposn(Energy_MeV, Crop[0])
xhigh = nearposn(Energy_MeV, Crop[1])
# print xlow, xhigh
# xlow = 50; xhigh = 400
intensity = d[:,1]
cropI = intensity[xlow:xhigh]
if cropI.min() < yLims[0]:
yLims[0] = cropI.min()
if cropI.max() > yLims[1]:
yLims[1] = cropI.max()
# print fp
if plot_MeV:
xAxis = Energy_MeV
else:
xAxis = Energy_J
ax.plot(xAxis, intensity)
ax.set_title('Blade Translation ' + names[1:] + 'mm')
ax.set_ylim(yLims)
# ax.set_ylabel('Intensity (# of electrons)')
ax.ticklabel_format(style='sci', axis='y', scilimits=(0,0),useOffset=False)
if plot_MeV:
plt.xlabel('Electron Energy (MeV)')
else:
plt.xlabel('Electron Energy (J)')
# plt.ylabel('Intensity (# of electrons)')
fig.text(0.02, 0.5, 'Intensity (# of electrons)', ha='center', va='center', rotation='vertical')
#==============================================================================
# Apply the plotting limits
#==============================================================================
#plt.xlim([-1e-14, 1e-13])
#plt.yscale('log')
#
# if logPlot:
# plt.ylim([yLims[1]/1e5, yLims[1]])
# plt.yscale('log')
# else:
# plt.ylim(yLims)
#
plt.xlim([xAxis[xlow],xAxis[xhigh]])
plt.legend()
def createPlotOfAll_e_spectra(folderPaths, folderNames, Crop_X, Crop_Y = False):
sns.set_palette(sns.color_palette("Set1", len(folderNames)))
sns.set_context("talk")
sns.set_style('darkgrid')
yLims = [1e50, 0]
data = []
plt.figure(figsize = (10,7))
for fp, names in zip(folderPaths, folderNames):
fp += 'Electron_Spectrum.txt'
try:
#Assuming that the first row is currently px
d = np.loadtxt(fp)
data.append(d)
px = d[:,0]
Energy_J = (px ** 2) / (2 * 9.11e-31)
Energy_eV = Energy_J / 1.6e-19
Energy_MeV = Energy_eV * 1e-6
xlow = nearposn(Energy_MeV, Crop_X[0])
xhigh = nearposn(Energy_MeV, Crop_X[1])
# print xlow, xhigh
# xlow = 50; xhigh = 400
intensity = d[:,1]
if not Crop_Y:
cropI = intensity[xlow:xhigh]
if cropI.min() < yLims[0]:
yLims[0] = cropI.min()
if cropI.max() > yLims[1]:
yLims[1] = cropI.max()
else:
yLims = Crop_Y
# print fp
if plot_MeV:
xAxis = Energy_MeV
else:
xAxis = Energy_J
plt.plot(xAxis, intensity, label = names)
plt.xlim([xAxis[xlow],xAxis[xhigh]])
except:
print 'Error Reading File'
print ' ' + fp
if plot_MeV:
plt.xlabel('Electron Energy (MeV)')
else:
plt.xlabel('Electron Energy (J)')
plt.ylabel('Intensity (# of electrons)')
#==============================================================================
# Apply the plotting limits
#==============================================================================
#plt.xlim([-1e-14, 1e-13])
#plt.yscale('log')
#
if logPlot:
plt.ylim([yLims[1]/1e5, yLims[1]])
plt.yscale('log')
else:
plt.ylim(yLims)
plt.legend()
print 'Crop corresponds to: ', [xAxis[xlow],xAxis[xhigh]], ' MeV'
print 'Range of inputed data is: ', Energy_MeV[0], Energy_MeV[-1]
return data
hdrive = '/Volumes/CIDU_passport/2018_Epoch_vega_1/'
gdrive = '/Volumes/GoogleDrive/My Drive/'
gdrive += '2018_Epoch_vega_1/'
#hdrive += '0601_Gaus_for_wavebreak/'
#fileSplice = [8,None]
#hdrive += '0607_Intensity_Scan/'
#fileSplice = [1,-11]
#hdrive += '0612_profileScan/'
#fileSplice = [2,None]
#hdrive = gdrive + '0711_highRes_selfInjection/'
#fileSplice = [-4,None]
#hdrive = gdrive + '0721_HR_Jump/'
#fileSplice = [-4,None]
hdrive = hdrive + '1010_SlurmJob/'
fileSplice = [10,12]
#hdrive = gdrive + '1018_vega1_Jump/'
#fileSplice = [2,None]
folderPaths, folderNames = listFolders(hdrive)
logPlot = False
plot_MeV = True
#==============================================================================
# Search for the set of folders to look at!
#==============================================================================
starts = ''
#starts = ''
fins = 'FS'
#Index_to_save = [i for i in xrange(len(folderNames)) if folderNames[i].endswith(fins)]
Index_to_save = [i for i in xrange(len(folderNames)) if folderNames[i].startswith(starts)]
#Index_to_save = [i for i in xrange(len(folderNames)) if folderNames[i].startswith(starts) and folderNames[i].endswith('23')]
#Modify the both arrays to just be the ones of interest
folderPaths = folderPaths[Index_to_save]
folderNames = folderNames[Index_to_save]
print folderNames
#==============================================================================
# Crop the axis to the interesting data
#==============================================================================
Energy_Crop = [1, 5] # In MeV
IntensityCrop = [0, 0.5e8]
#==============================================================================
# Slice name for number to sort by
#==============================================================================
Num = []
for f in folderNames:
Num.append(float(f[fileSplice[0]:fileSplice[1]]))
print Num
sort = sorted(zip(Num, folderNames, folderPaths))
folderNames = [x[1] for x in sort]
folderPaths = [x[2] for x in sort]
print 'Sorted'
print folderNames
#folderNames = folderNames[:-1]
data = createPlotOfAll_e_spectra(folderPaths, folderNames, Energy_Crop, IntensityCrop)
plt.savefig(hdrive + 'Electron_spectrum.png')
plt.show()
#data = data[:4]
subplotPerSpectra(data, Energy_Crop)
plt.tight_layout()
plt.savefig(hdrive + 'Electron_spectrums_in_subplot.png', dpi = 300)
| 31.596491 | 125 | 0.4804 | 0 | 0 | 349 | 0.037108 | 0 | 0 | 0 | 0 | 4,434 | 0.471451 |
f937303cbe2bd1ca99e6bfd681984ef1eb1f4844
| 35 |
py
|
Python
|
first-homework.py
|
Hexotical/Astr119
|
34a638d29f33c8fde9245cd7c5869bf3f9e7366b
|
[
"MIT"
] | null | null | null |
first-homework.py
|
Hexotical/Astr119
|
34a638d29f33c8fde9245cd7c5869bf3f9e7366b
|
[
"MIT"
] | 2 |
2020-10-01T18:51:01.000Z
|
2020-10-06T14:15:37.000Z
|
first-homework.py
|
Hexotical/astr-119
|
34a638d29f33c8fde9245cd7c5869bf3f9e7366b
|
[
"MIT"
] | null | null | null |
print("Lukas Ho, pronouns: he/him")
| 35 | 35 | 0.714286 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 28 | 0.8 |
f93933ebd7cddbd101cc7daf0772e4787528a6a9
| 2,965 |
py
|
Python
|
server/swagger_server/models/people_patch.py
|
fabric-testbed/fabric-core-api
|
8ce79fd16e1020271487967743a89b7a2346bf45
|
[
"MIT"
] | null | null | null |
server/swagger_server/models/people_patch.py
|
fabric-testbed/fabric-core-api
|
8ce79fd16e1020271487967743a89b7a2346bf45
|
[
"MIT"
] | null | null | null |
server/swagger_server/models/people_patch.py
|
fabric-testbed/fabric-core-api
|
8ce79fd16e1020271487967743a89b7a2346bf45
|
[
"MIT"
] | null | null | null |
# coding: utf-8
from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from swagger_server.models.base_model_ import Model
from swagger_server.models.preferences import Preferences # noqa: F401,E501
from swagger_server import util
class PeoplePatch(Model):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, email: str=None, name: str=None, preferences: Preferences=None): # noqa: E501
"""PeoplePatch - a model defined in Swagger
:param email: The email of this PeoplePatch. # noqa: E501
:type email: str
:param name: The name of this PeoplePatch. # noqa: E501
:type name: str
:param preferences: The preferences of this PeoplePatch. # noqa: E501
:type preferences: Preferences
"""
self.swagger_types = {
'email': str,
'name': str,
'preferences': Preferences
}
self.attribute_map = {
'email': 'email',
'name': 'name',
'preferences': 'preferences'
}
self._email = email
self._name = name
self._preferences = preferences
@classmethod
def from_dict(cls, dikt) -> 'PeoplePatch':
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The people_patch of this PeoplePatch. # noqa: E501
:rtype: PeoplePatch
"""
return util.deserialize_model(dikt, cls)
@property
def email(self) -> str:
"""Gets the email of this PeoplePatch.
:return: The email of this PeoplePatch.
:rtype: str
"""
return self._email
@email.setter
def email(self, email: str):
"""Sets the email of this PeoplePatch.
:param email: The email of this PeoplePatch.
:type email: str
"""
self._email = email
@property
def name(self) -> str:
"""Gets the name of this PeoplePatch.
:return: The name of this PeoplePatch.
:rtype: str
"""
return self._name
@name.setter
def name(self, name: str):
"""Sets the name of this PeoplePatch.
:param name: The name of this PeoplePatch.
:type name: str
"""
self._name = name
@property
def preferences(self) -> Preferences:
"""Gets the preferences of this PeoplePatch.
:return: The preferences of this PeoplePatch.
:rtype: Preferences
"""
return self._preferences
@preferences.setter
def preferences(self, preferences: Preferences):
"""Sets the preferences of this PeoplePatch.
:param preferences: The preferences of this PeoplePatch.
:type preferences: Preferences
"""
self._preferences = preferences
| 25.560345 | 101 | 0.601349 | 2,649 | 0.893423 | 0 | 0 | 1,624 | 0.547723 | 0 | 0 | 1,620 | 0.546374 |
f9393f537340aad0fcc03fb7b4478b7455578c86
| 14,649 |
py
|
Python
|
Code/src/models/optim/DMSAD_trainer.py
|
antoine-spahr/Contrastive-Deep-Semi-Supervised-Anomaly-Detection
|
e84c28ce4dd28671d39752a7d21c674e05fcb495
|
[
"MIT"
] | 8 |
2021-02-19T17:30:00.000Z
|
2022-02-21T05:55:06.000Z
|
Code/src/models/optim/DMSAD_trainer.py
|
antoine-spahr/Contrastive-Deep-Semi-Supervised-Anomaly-Detection
|
e84c28ce4dd28671d39752a7d21c674e05fcb495
|
[
"MIT"
] | 1 |
2021-05-03T14:04:53.000Z
|
2021-05-03T14:48:01.000Z
|
Code/src/models/optim/DMSAD_trainer.py
|
antoine-spahr/Contrastive-Deep-Semi-Supervised-Anomaly-Detection
|
e84c28ce4dd28671d39752a7d21c674e05fcb495
|
[
"MIT"
] | 5 |
2021-02-18T22:43:40.000Z
|
2021-05-03T14:01:49.000Z
|
import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
import time
import logging
from sklearn.metrics import roc_auc_score
from sklearn.cluster import KMeans
from sklearn.manifold import TSNE
from src.models.optim.Loss_Functions import DMSADLoss
from src.utils.utils import print_progessbar
class DMSAD_trainer:
"""
Trainer for the DMSAD.
"""
def __init__(self, c, R, eta=1.0, gamma=0.05, n_sphere_init=100, n_epoch=150,
lr=1e-4, lr_milestone=(), batch_size=64, weight_decay=1e-6,
device='cuda', n_job_dataloader=0, print_batch_progress=False):
"""
Constructor of the DMSAD trainer.
----------
INPUT
|---- c (array like N_sphere x Embed dim) the centers of the hyperspheres.
| If None, the centers are initialized using Kmeans.
|---- R (1D array) the radii associated with the centers.
|---- eta (float) the weight of semi-supervised labels in the loss.
|---- gamma (float) the fraction of allowed outlier when setting the
| radius of each sphere in the end.
|---- n_sphere_init (int) the number of initial hypersphere.
|---- n_epoch (int) the number of epoch.
|---- lr (float) the learning rate.
|---- lr_milestone (tuple) the lr update steps.
|---- batch_size (int) the batch_size to use.
|---- weight_decay (float) the weight_decay for the Adam optimizer.
|---- device (str) the device to work on ('cpu' or 'cuda').
|---- n_job_dataloader (int) number of workers for the dataloader.
|---- print_batch_progress (bool) whether to dispay the batch
| progress bar.
OUTPUT
|---- None
"""
# learning parameters
self.n_epoch = n_epoch
self.lr = lr
self.lr_milestone = lr_milestone
self.batch_size = batch_size
self.weight_decay = weight_decay
self.device = device
self.n_job_dataloader = n_job_dataloader
self.print_batch_progress = print_batch_progress
# DMSAD parameters
self.c = torch.tensor(c, device=self.device) if c is not None else None
self.R = torch.tensor(R, device=self.device) if R is not None else None
self.eta = eta
self.gamma = gamma
self.n_sphere_init = n_sphere_init
# Optimization parameters
self.eps = 1e-6
# Results
self.train_time = None
self.train_loss = None
self.eval_auc = None
self.eval_time = None
self.eval_scores = None
def train(self, dataset, net, valid_dataset=None):
"""
Train the DMSAD network on the provided dataset.
----------
INPUT
|---- dataset (torch.utils.data.Dataset) the dataset on which the
| network is trained. It must return an image, label, mask
| semi-supervized labels and the index.
|---- net (nn.Module) The DeepSAD to train.
|---- valid_dataset (torch.utils.data.Dataset) the dataset on which
| to validate the network at each epoch. Not validated if
| not provided.
OUTPUT
|---- net (nn.Module) The trained DeepSAD.
"""
logger = logging.getLogger()
# make the train dataloader
train_loader = torch.utils.data.DataLoader(dataset, batch_size=self.batch_size, \
shuffle=True, num_workers=self.n_job_dataloader)
# put net to device
net = net.to(self.device)
# initialize hypersphere center
if self.c is None:
logger.info(' Initializing the hypersphere centers.')
self.initialize_centers(train_loader, net)
logger.info(f' {self.c.shape[0]} centers successfully initialized.')
# define loss criterion
loss_fn = DMSADLoss(self.eta, eps=self.eps)
# define optimizer
optimizer = optim.Adam(net.parameters(), lr=self.lr, weight_decay=self.weight_decay)
# define scheduler
scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=self.lr_milestone, gamma=0.1)
# Start training
logger.info('Start Training the DMSAD.')
start_time = time.time()
epoch_loss_list = []
n_batch = len(train_loader)
for epoch in range(self.n_epoch):
net.train()
epoch_loss = 0.0
epoch_start_time = time.time()
n_k = torch.zeros(self.c.shape[0], device=self.device)
for b, data in enumerate(train_loader):
# get input and semi-supervized labels
input, _, _, semi_label, _ = data
# put them to device
input = input.to(self.device).float().requires_grad_(True)
semi_label = semi_label.to(self.device)
# zero the network's gradients
optimizer.zero_grad()
# optimize by backpropagation
_, embed = net(input)
loss = loss_fn(embed, self.c, semi_label)
loss.backward()
optimizer.step()
epoch_loss += loss.item()
# get the closest sphere and count the number of normal samples per sphere
idx = torch.argmin(torch.norm(self.c.unsqueeze(0) - embed.unsqueeze(1), p=2, dim=2), dim=1)
for i in idx[semi_label != -1]:
n_k[i] += 1
if self.print_batch_progress:
print_progessbar(b, len(train_loader), Name='\t\tTrain Batch', Size=40, erase=True)
# remove centers with less than gamma fraction of largest hypersphere number of sample
self.c = self.c[n_k >= self.gamma * torch.max(n_k)]
# validate if required
valid_auc = ''
if valid_dataset:
auc = self.evaluate(net, valid_dataset, return_auc=True, print_to_logger=False, save_tSNE=False)
valid_auc = f' Valid AUC {auc:.3%} |'
# log the epoch statistics
logger.info(f'----| Epoch: {epoch + 1:03}/{self.n_epoch:03} '
f'| Train Time: {time.time() - epoch_start_time:.3f} [s] '
f'| Train Loss: {epoch_loss / n_batch:.6f} '
f'| N sphere {self.c.shape[0]:03} |' + valid_auc)
epoch_loss_list.append([epoch+1, epoch_loss/n_batch])
# update scheduler
scheduler.step()
if epoch + 1 in self.lr_milestone:
logger.info(f'---- LR Scheduler : new learning rate {scheduler.get_lr()[0]:g}')
# Set the radius of each sphere as 1-gamma quantile of normal samples distances
logger.info(f'---- Setting the hyperspheres radii as the {1-self.gamma:.1%} quantiles of normal sample distances.')
self.set_radius(train_loader, net)
logger.info(f'---- {self.R.shape[0]} radii successufully defined.')
# End training
self.train_loss = epoch_loss_list
self.train_time = time.time() - start_time
logger.info(f'---- Finished Training DMSAD in {self.train_time:.3f} [s]')
return net
def evaluate(self, net, dataset, return_auc=False, print_to_logger=True, save_tSNE=True):
"""
Evaluate the DSAD network on the provided dataset.
----------
INPUT
|---- net (nn.Module) The DMSAD network to validate.
|---- dataset (torch.utils.data.Dataset) the dataset on which the
| network is evaluated.
|---- return_auc (bool) whether to return the computed auc or not.
|---- print_to_logger (bool) whether to print in the logger.
|---- save_tSNE (bool) whether to save a 2D t-SNE representation of
| the embeded data points
OUTPUT
|---- (auc) (float) the validation auc if required.
"""
if print_to_logger:
logger = logging.getLogger()
# make dataloader
loader = torch.utils.data.DataLoader(dataset, batch_size=self.batch_size,
shuffle=True, num_workers=self.n_job_dataloader)
# put net on device
net = net.to(self.device)
# Evaluating
if print_to_logger:
logger.info('Start Evaluating the DMSAD.')
start_time = time.time()
idx_label_score = []
net.eval()
with torch.no_grad():
for b, data in enumerate(loader):
# get data on device
input, label, _, semi_label, idx = data
input = input.to(self.device).float()
label = label.to(self.device)
semi_label = semi_label.to(self.device)
idx = idx.to(self.device)
# Embed input and compute anomaly score
_, embed = net(input)
# find closest sphere
score, sphere_idx = torch.min(torch.norm(self.c.unsqueeze(0) - embed.unsqueeze(1), p=2, dim=2), dim=1)
# append idx, scores, label and embeding
idx_label_score += list(zip(idx.cpu().data.numpy().tolist(),
label.cpu().data.numpy().tolist(),
score.cpu().data.numpy().tolist(),
sphere_idx.cpu().data.numpy().tolist(),
embed.cpu().data.numpy().tolist()))
if self.print_batch_progress:
print_progessbar(b, len(loader), Name='\t\t Evaluation Batch', Size=40, erase=True)
# compute AUCs
index, label, score, sphere_index, embed = zip(*idx_label_score)
label, score = np.array(label), np.array(score)
auc = roc_auc_score(label, score)
if save_tSNE:
embed = np.array(embed)
embed = TSNE(n_components=2).fit_transform(embed)
idx_label_score = list(zip(index, label.tolist(), score.tolist(), sphere_index, embed.tolist()))
self.eval_time = time.time() - start_time
self.eval_scores = idx_label_score
self.eval_auc = auc
if print_to_logger:
logger.info(f'Evaluation Time : {self.eval_time}')
logger.info(f'Evaluation AUC : {self.eval_auc:.3%}')
logger.info('Finished Evaluating the DMSAD.')
if return_auc:
return auc
def initialize_centers(self, loader, net, eps=0.1):
"""
Initialize the multiple centers using the K-Means algorithm on the
embedding of all the normal samples.
----------
INPUT
|---- loader (torch.utils.data.Dataloader) the loader of the data.
|---- net (nn.Module) the DMSAD network. The output must be a vector
| embedding of the input. The network should be an
| autoencoder for which the forward pass returns both the
| reconstruction and the embedding of the input.
|---- eps (float) minimal value for center coordinates, to avoid
| center too close to zero.
OUTPUT
|---- None
"""
# Get sample embedding
repr = []
net.eval()
with torch.no_grad():
for b, data in enumerate(loader):
# get data
input, _, _, semi_label, _ = data
input = input.to(self.device).float()
semi_label = semi_label.to(self.device)
# keep only normal samples
input = input[semi_label != -1]
# get embdeding of batch
_, embed = net(input)
repr.append(embed)
if self.print_batch_progress:
print_progessbar(b, len(loader), Name='\t\tBatch', Size=40, erase=True)
repr = torch.cat(repr, dim=0).cpu().numpy()
# Apply Kmeans algorithm on embedding
kmeans = KMeans(n_clusters=self.n_sphere_init).fit(repr)
self.c = torch.tensor(kmeans.cluster_centers_).to(self.device)
# check if c_i are epsilon too close to zero to avoid them to be trivialy matched to zero
self.c[(torch.abs(self.c) < eps) & (self.c < 0)] = -eps
self.c[(torch.abs(self.c) < eps) & (self.c > 0)] = eps
def set_radius(self, loader, net):
"""
compute radius as 1-gamma quatile of normal sample distance to center.
Then anomaly score is ||net(x) - c_j||^2 - R_j^2 <--- negative if in, positive if out.
----------
INPUT
|---- loader (torch.utils.data.Dataloader) the loader of the data.
|---- net (nn.Module) the DMSAD network. The output must be a vector
| embedding of the input. The network should be an
| autoencoder for which the forward pass returns both the
| reconstruction and the embedding of the input.
OUTPUT
|---- None
"""
dist_list = [[] for _ in range(self.c.shape[0])] # initialize N_sphere lists
net.eval()
with torch.no_grad():
for b, data in enumerate(loader):
# get data
input, _, _, semi_label, _ = data
input = input.to(self.device).float()
semi_label = semi_label.to(self.device)
# keep only normal samples
input = input[semi_label != -1]
# get embdeding of batch
_, embed = net(input)
# get the closest sphere and count the number of normal samples per sphere
dist, idx = torch.min(torch.norm(self.c.unsqueeze(0) - embed.unsqueeze(1), p=2, dim=2), dim=1)
for i, d in zip(idx, dist):
dist_list[i].append(d)
if self.print_batch_progress:
print_progessbar(b, len(loader), Name='\t\tBatch', Size=40, erase=True)
# compute the radius as 1-gamma quantile of the normal distances of each spheres
self.R = torch.zeros(self.c.shape[0], device=self.device)
for i, dist in enumerate(dist_list):
dist = torch.stack(dist, dim=0)
self.R[i] = torch.kthvalue(dist, k=int((1 - self.gamma) * dist.shape[0]))[0]
| 42.708455 | 123 | 0.558263 | 14,327 | 0.978019 | 0 | 0 | 0 | 0 | 0 | 0 | 6,034 | 0.411905 |
f93b09d7873482279865a3e138f9e289b66d1ef0
| 7,600 |
py
|
Python
|
escher/tests/test_plots.py
|
phantomas1234/escher
|
47f3291beefd7cc90207755c717e83f385262956
|
[
"MIT"
] | null | null | null |
escher/tests/test_plots.py
|
phantomas1234/escher
|
47f3291beefd7cc90207755c717e83f385262956
|
[
"MIT"
] | null | null | null |
escher/tests/test_plots.py
|
phantomas1234/escher
|
47f3291beefd7cc90207755c717e83f385262956
|
[
"MIT"
] | null | null | null |
from __future__ import print_function, unicode_literals
from escher import __schema_version__
import escher.server
from escher import Builder, get_cache_dir, clear_cache
from escher.plots import (_load_resource, local_index, server_index,
model_json_for_name, map_json_for_name)
from escher.urls import get_url
import os
import sys
from os.path import join
import json
from pytest import raises, mark
try:
from urllib.error import URLError
except ImportError:
from urllib2 import URLError
if sys.version < '3':
unicode_type = unicode
else:
unicode_type = str
# cache
def test_get_cache_dir():
d = get_cache_dir()
assert os.path.isdir(d)
d = get_cache_dir(name='maps')
assert os.path.isdir(d)
def test_clear_cache(tmpdir, request):
(tmpdir.mkdir('maps').mkdir('Escherichia coli')
.join('iJO1366.Central metabolism.json').write('temp'))
(tmpdir.mkdir('models').mkdir('Escherichia coli')
.join('iJO1366.json').write('temp'))
clear_cache(str(tmpdir))
assert os.listdir(str(tmpdir)) == []
def fin():
tmpdir.remove()
request.addfinalizer(fin)
def test_local_index(tmpdir, request):
maps = tmpdir.mkdir('maps')
maps.mkdir('Escherichia coli').join('iJO1366.Central metabolism.json').write('temp')
# ignore these
maps.join('ignore_md.json').write('ignore')
tmpdir.mkdir('models').mkdir('Escherichia coli').join('iJO1366.json').write('temp')
assert local_index(str(tmpdir)) == { 'maps': [ { 'organism': 'Escherichia coli',
'map_name': 'iJO1366.Central metabolism' } ],
'models': [ { 'organism': 'Escherichia coli',
'model_name': 'iJO1366' } ] }
def fin():
tmpdir.remove()
request.addfinalizer(fin)
# server
@mark.web
def test_server_index():
index = server_index()
map_0 = index['maps'][0]
assert 'organism' in map_0
assert 'map_name' in map_0
model_0 = index['models'][0]
assert 'organism' in model_0
assert 'model_name' in model_0
# model and maps
def test_model_json_for_name(tmpdir):
models = tmpdir.mkdir('models')
models.mkdir('Escherichia coli').join('iJO1366.json').write('"temp"')
json = model_json_for_name('iJO1366', cache_dir=str(tmpdir))
assert json == '"temp"'
@mark.web
def test_model_json_for_name_web(tmpdir):
data = model_json_for_name('iJO1366', cache_dir=str(tmpdir))
assert 'reactions' in data
assert 'metabolites' in data
def test_map_json_for_name(tmpdir):
maps = tmpdir.mkdir('maps')
maps.mkdir('Escherichia coli').join('iJO1366.Central metabolism.json').write('"temp"')
json = map_json_for_name('iJO1366.Central metabolism', cache_dir=str(tmpdir))
assert json == '"temp"'
@mark.web
def test_map_json_for_name_web(tmpdir):
data = map_json_for_name('iJO1366.Central metabolism', cache_dir=str(tmpdir))
root = get_url('escher_root', protocol='https').rstrip('/')
assert json.loads(data)[0]['schema'] == '/'.join([root, 'escher', 'jsonschema',
__schema_version__ + '#'])
# helper functions
def test__load_resource(tmpdir):
assert _load_resource('{"r": "val"}', 'name') == '{"r": "val"}'
directory = os.path.abspath(os.path.dirname(__file__))
assert _load_resource(join(directory, 'example.json'), 'name').strip() == '{"r": "val"}'
with raises(ValueError) as err:
p = join(str(tmpdir), 'dummy')
with open(p, 'w') as f:
f.write('dummy')
_load_resource(p, 'name')
assert 'not a valid json file' in err.value
@mark.web
def test__load_resource_web(tmpdir):
url = '/'.join([get_url('map_download', protocol='https'),
'Escherichia%20coli/iJO1366.Central%20metabolism.json'])
_ = json.loads(_load_resource(url, 'name'))
def test_Builder(tmpdir):
b = Builder(map_json='{"r": "val"}', model_json='{"r": "val"}')
# Cannot load dev/local version without an explicit css string property.
# TODO include a test where these do not raise.
with raises(Exception):
b.display_in_notebook(js_source='dev')
with raises(Exception):
b.display_in_notebook(js_source='local')
# ok with embedded_css arg
b = Builder(map_json='{"r": "val"}', model_json='{"r": "val"}', embedded_css='')
b.display_in_notebook(js_source='dev')
b.save_html(join(str(tmpdir), 'Builder.html'), js_source='dev')
# test options
with raises(Exception):
b._get_html(js_source='devv')
with raises(Exception):
b._get_html(menu='')
with raises(Exception):
b._get_html(scroll_behavior='asdf')
b._get_html(js_source='local')
b._get_html(menu='all')
b._get_html(scroll_behavior='zoom')
@mark.web
def test_Builder_download():
# download
b = Builder(map_name='iJO1366.Central metabolism',
model_name='iJO1366')
assert b.loaded_map_json is not None
assert b.loaded_model_json is not None
b._get_html(js_source='web')
b.display_in_notebook(height=200)
# data
b = Builder(map_name='iJO1366.Central metabolism',
model_name='iJO1366',
reaction_data=[{'GAPD': 123}, {'GAPD': 123}])
b = Builder(map_name='iJO1366.Central metabolism',
model_name='iJO1366',
metabolite_data=[{'nadh_c': 123}, {'nadh_c': 123}])
b = Builder(map_name='iJO1366.Central metabolism',
model_name='iJO1366',
gene_data=[{'gapA': 123}, {'adhE': 123}])
assert type(b.the_id) is unicode_type
assert len(b.the_id) == 10
def test_Builder_options():
b = Builder(embedded_css='')
b.set_metabolite_no_data_color('white')
assert b.metabolite_no_data_color=='white'
html = b._get_html(js_source='local')
assert 'metabolite_no_data_color: "white"' in html
def test__draw_js():
b = Builder(map_json='"useless_map"', model_json='"useless_model"',
embedded_css='')
def look_for_string(st, substring):
"""Look for the string in the substring. This solves a bug in py.test
for these cases"""
try:
found = st.find(substring)
assert found > -1
except AssertionError:
raise AssertionError('Could not find\n\n%s\n\nin\n\n%s' % (substring, st))
# no static parse, dev
ijs = b._initialize_javascript('id', 'local')
js = b._draw_js('id', True, 'all', True, True, True, 'pan', True, None)
look_for_string(ijs, 'var map_data_id = "useless_map";')
look_for_string(ijs, 'var model_data_id = "useless_model";')
look_for_string(js, 'Builder(map_data_id, model_data_id, embedded_css_id, d3.select("#id"), options);')
# static parse, not dev
ijs = b._initialize_javascript('id', 'local')
static_index = '{"my": ["useless", "index"]}'
js = b._draw_js('id', True, 'all', True, False, True, 'pan', True, static_index)
look_for_string(ijs, 'var map_data_id = "useless_map";')
look_for_string(ijs, 'var model_data_id = "useless_model";')
look_for_string(js, 'escher.static.load_map_model_from_url("%s/maps/", "%s/models/",' % (__schema_version__, __schema_version__))
look_for_string(js, static_index)
look_for_string(js, 'options, function(map_data_id, model_data_id, options) {')
look_for_string(js, 'escher.Builder(map_data_id, model_data_id, embedded_css_id, d3.select("#id"), options);')
| 37.073171 | 133 | 0.644342 | 0 | 0 | 0 | 0 | 1,882 | 0.247632 | 0 | 0 | 2,261 | 0.2975 |
f93b74e758fc59e8cc9ffa0d3c99de08f971b204
| 656 |
py
|
Python
|
setup.py
|
HiteshSachdev/casualty
|
7d3878bea7bc503a3cc5eb6046aa658608164e0f
|
[
"MIT"
] | 14 |
2018-10-07T12:05:24.000Z
|
2022-03-01T01:58:21.000Z
|
setup.py
|
treebohotels/corelated-logs
|
13926c97a473bc63c7b18e22870d1760089f30d1
|
[
"MIT"
] | 6 |
2018-10-07T09:07:59.000Z
|
2019-06-08T09:23:45.000Z
|
setup.py
|
treebohotels/corelated-logs
|
13926c97a473bc63c7b18e22870d1760089f30d1
|
[
"MIT"
] | 2 |
2019-01-23T06:14:31.000Z
|
2021-06-21T04:02:26.000Z
|
from setuptools import find_packages, setup
setup(
name="casualty",
version="0.1.9",
packages=find_packages(exclude=["tests"]),
install_requires=[
"structlog==18.2.0",
"wrapt==1.10.11",
"pre-commit-hooks==1.4.0",
"mock==2.0.0",
"pytest==3.8.2",
"pytest-mock==1.10.0",
"pytest-cov"
],
url="",
python_requires=">=2.6, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*",
license="MIT",
author="Sohit Kumar",
author_email="[email protected]",
test_suite="tests",
description="A python library to generate co-relation id and bind it to headers in outgoing request",
)
| 26.24 | 105 | 0.568598 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 324 | 0.493902 |
f93db1c837037edf147a1adf0e6c511aadcb0960
| 5,321 |
py
|
Python
|
isaactest/tests/user_progress_access.py
|
jsharkey13/isaac-selenium-testing
|
fc57ec57179cf7d9f0bb5ef46d759792b2af3bc8
|
[
"MIT"
] | null | null | null |
isaactest/tests/user_progress_access.py
|
jsharkey13/isaac-selenium-testing
|
fc57ec57179cf7d9f0bb5ef46d759792b2af3bc8
|
[
"MIT"
] | 1 |
2016-01-15T11:28:06.000Z
|
2016-01-25T17:09:18.000Z
|
isaactest/tests/user_progress_access.py
|
jsharkey13/isaac-selenium-testing
|
fc57ec57179cf7d9f0bb5ef46d759792b2af3bc8
|
[
"MIT"
] | 1 |
2019-05-14T16:53:49.000Z
|
2019-05-14T16:53:49.000Z
|
import time
from ..utils.log import log, INFO, ERROR, PASS
from ..utils.isaac import submit_login_form, assert_logged_in
from ..utils.i_selenium import assert_tab, image_div
from ..utils.i_selenium import wait_for_xpath_element, wait_for_invisible_xpath
from ..tests import TestWithDependency
from selenium.common.exceptions import TimeoutException
__all__ = ["user_progress_access"]
#####
# Test : Access Users Progress Page
#####
@TestWithDependency("USER_PROGRESS_ACCESS", ["LOGIN", "LOGOUT"])
def user_progress_access(driver, Users, ISAAC_WEB, WAIT_DUR, **kwargs):
"""Test access to user progress page is suitably restricted.
- 'driver' should be a Selenium WebDriver.
- 'Users' must be a TestUsers object.
- 'ISAAC_WEB' is the string URL of the Isaac website to be tested.
- 'WAIT_DUR' is the time in seconds to wait for JavaScript to run/load.
"""
assert_tab(driver, ISAAC_WEB)
driver.get(ISAAC_WEB + "/logout")
log(INFO, "Logging out any logged in user.")
time.sleep(WAIT_DUR)
progress_access_fail = False
try:
log(INFO, "Test if logged out user can access '/progress/1'.")
driver.get(ISAAC_WEB + "/progress/1")
time.sleep(WAIT_DUR)
assert (("/login?target=%2Fprogress%2F1" in driver.current_url) or ("/login?target=~2Fprogress~2F1" in driver.current_url))
log(INFO, "Logged out users can't access progress pages.")
time.sleep(WAIT_DUR)
driver.get(ISAAC_WEB + "/logout")
log(INFO, "Logging out to start from same initial page each time.")
time.sleep(WAIT_DUR)
except AssertionError:
progress_access_fail = True
image_div(driver, "ERROR_unexpected_admin_access")
log(ERROR, "Logged out user may have accessed '/progress/1'; see 'ERROR_unexpected_admin_access.png'!")
access_cases = [("Student", Users.Student), ("Teacher", Users.Teacher), ("Content Editor", Users.Editor), ("Event Manager", Users.Event)]
for i_type, user in access_cases:
log(INFO, "Test if '%s' users can access another users progress page." % i_type)
try:
driver.get(ISAAC_WEB + "/progress/1")
time.sleep(WAIT_DUR)
submit_login_form(driver, user=user, wait_dur=WAIT_DUR)
time.sleep(WAIT_DUR)
assert_logged_in(driver, user, wait_dur=WAIT_DUR)
log(INFO, "Try loading progress page; no errors will be shown but have to wait to see if data loads.")
wait_for_invisible_xpath(driver, "//div[@loading-overlay]", 60)
except AssertionError:
log(ERROR, "Couldn't log user in to test '/progress/1' access!")
return False
except TimeoutException:
log(ERROR, "'%s' users given endless loading screen; can't tell if can access page. Can't continue!" % i_type)
return False
try:
title = str(wait_for_xpath_element(driver, "(//h1)[1]").text)
title = title.strip()
assert title == "Progress for user:", "Title is '%s', expected 'Progress for user:' without a name!"
log(INFO, "'%s' users given blank info as expected; can't access page." % i_type)
except TimeoutException:
log(ERROR, "No title found on page after loading finished! Can't continue!")
return False
except AssertionError:
log(ERROR, "User of type '%s' accessed another users progress page!" % i_type)
progress_access_fail = True
driver.get(ISAAC_WEB + "/logout")
log(INFO, "Logged out '%s' user." % i_type)
time.sleep(WAIT_DUR)
access_cases = [("Admin", Users.Admin)]
for i_type, user in access_cases:
log(INFO, "Test if '%s' users can access another users progress page." % i_type)
try:
driver.get(ISAAC_WEB + "/progress/1")
time.sleep(WAIT_DUR)
submit_login_form(driver, user=user, wait_dur=WAIT_DUR)
time.sleep(WAIT_DUR)
assert_logged_in(driver, user, wait_dur=WAIT_DUR)
title = str(wait_for_xpath_element(driver, "(//h1)[1]").text)
title = title.strip()
assert len(title) > len("Progress for user:"), "Title is '%s', expected 'Progress for user: [name]'!"
wait_for_xpath_element(driver, "//div[@d3-plot]//ul[@class='d3-plot-key']")
time.sleep(WAIT_DUR)
log(INFO, "'%s' users can access '/progress/1' as expected." % i_type)
except TimeoutException:
progress_access_fail = True
image_div(driver, "ERROR_no_admin_access")
log(ERROR, "'%s' user can't access '/progress/1'; see 'ERROR_no_admin_access.png'!" % i_type)
except AssertionError, e:
progress_access_fail = True
image_div(driver, "ERROR_no_admin_access")
log(ERROR, "Error accessing other user progress: %s See 'ERROR_no_admin_access.png'!" % e.message)
driver.get(ISAAC_WEB + "/logout")
log(INFO, "Logged out '%s' user." % i_type)
time.sleep(3)
if not progress_access_fail:
log(PASS, "Access to another users progress page restricted appropriately.")
return True
else:
log(ERROR, "Access not appropriately restricted! Fail!")
return False
| 47.088496 | 141 | 0.64236 | 0 | 0 | 0 | 0 | 4,885 | 0.918061 | 0 | 0 | 2,104 | 0.395414 |
f93dfe9bacfa4bd9cb38fd01bfa6466399547497
| 423 |
py
|
Python
|
insert_loc_code.py
|
dspshin/house-bot
|
1e2755abae114c3284d7d95d81c40fadb0ab9b43
|
[
"MIT"
] | null | null | null |
insert_loc_code.py
|
dspshin/house-bot
|
1e2755abae114c3284d7d95d81c40fadb0ab9b43
|
[
"MIT"
] | null | null | null |
insert_loc_code.py
|
dspshin/house-bot
|
1e2755abae114c3284d7d95d81c40fadb0ab9b43
|
[
"MIT"
] | null | null | null |
import re
import sqlite3
conn = sqlite3.connect('loc.db')
c = conn.cursor()
c.execute('CREATE TABLE IF NOT EXISTS location(loc text PRIMARY KEY, code text)')
conn.commit()
f = open('loc_code.txt')
for d in f.readlines():
data = re.sub(r'\s{2}', '|', d.strip()).split('|')
print data[1].strip(), data[0]
c.execute('INSERT INTO location VALUES ("%s", "%s")'%(data[1].strip(), data[0]))
conn.commit()
f.close()
| 24.882353 | 84 | 0.63357 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 148 | 0.349882 |
f94108d55467d7dc2d4d0a83034f5df29403a946
| 33,479 |
py
|
Python
|
python/valkka/nv/valkka_nv.py
|
xiaoxoxin/valkka-nv
|
48b8fd5b1293c6e4f96f4798e6d327e209b83bce
|
[
"WTFPL"
] | 1 |
2021-03-03T13:25:22.000Z
|
2021-03-03T13:25:22.000Z
|
python/valkka/nv/valkka_nv.py
|
xiaoxoxin/valkka-nv
|
48b8fd5b1293c6e4f96f4798e6d327e209b83bce
|
[
"WTFPL"
] | null | null | null |
python/valkka/nv/valkka_nv.py
|
xiaoxoxin/valkka-nv
|
48b8fd5b1293c6e4f96f4798e6d327e209b83bce
|
[
"WTFPL"
] | null | null | null |
# This file was automatically generated by SWIG (http://www.swig.org).
# Version 3.0.12
#
# Do not make changes to this file unless you know what you are doing--modify
# the SWIG interface file instead.
from sys import version_info as _swig_python_version_info
if _swig_python_version_info >= (2, 7, 0):
def swig_import_helper():
import importlib
pkg = __name__.rpartition('.')[0]
mname = '.'.join((pkg, '_valkka_nv')).lstrip('.')
try:
return importlib.import_module(mname)
except ImportError:
return importlib.import_module('_valkka_nv')
_valkka_nv = swig_import_helper()
del swig_import_helper
elif _swig_python_version_info >= (2, 6, 0):
def swig_import_helper():
from os.path import dirname
import imp
fp = None
try:
fp, pathname, description = imp.find_module('_valkka_nv', [dirname(__file__)])
except ImportError:
import _valkka_nv
return _valkka_nv
try:
_mod = imp.load_module('_valkka_nv', fp, pathname, description)
finally:
if fp is not None:
fp.close()
return _mod
_valkka_nv = swig_import_helper()
del swig_import_helper
else:
import _valkka_nv
del _swig_python_version_info
try:
_swig_property = property
except NameError:
pass # Python < 2.2 doesn't have 'property'.
try:
import builtins as __builtin__
except ImportError:
import __builtin__
def _swig_setattr_nondynamic(self, class_type, name, value, static=1):
if (name == "thisown"):
return self.this.own(value)
if (name == "this"):
if type(value).__name__ == 'SwigPyObject':
self.__dict__[name] = value
return
method = class_type.__swig_setmethods__.get(name, None)
if method:
return method(self, value)
if (not static):
if _newclass:
object.__setattr__(self, name, value)
else:
self.__dict__[name] = value
else:
raise AttributeError("You cannot add attributes to %s" % self)
def _swig_setattr(self, class_type, name, value):
return _swig_setattr_nondynamic(self, class_type, name, value, 0)
def _swig_getattr(self, class_type, name):
if (name == "thisown"):
return self.this.own()
method = class_type.__swig_getmethods__.get(name, None)
if method:
return method(self)
raise AttributeError("'%s' object has no attribute '%s'" % (class_type.__name__, name))
def _swig_repr(self):
try:
strthis = "proxy of " + self.this.__repr__()
except __builtin__.Exception:
strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
try:
_object = object
_newclass = 1
except __builtin__.Exception:
class _object:
pass
_newclass = 0
from valkka import core
class FrameFilter(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, FrameFilter, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, FrameFilter, name)
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined")
__repr__ = _swig_repr
__swig_destroy__ = _valkka_nv.delete_FrameFilter
__del__ = lambda self: None
FrameFilter_swigregister = _valkka_nv.FrameFilter_swigregister
FrameFilter_swigregister(FrameFilter)
class DummyFrameFilter(FrameFilter):
__swig_setmethods__ = {}
for _s in [FrameFilter]:
__swig_setmethods__.update(getattr(_s, '__swig_setmethods__', {}))
__setattr__ = lambda self, name, value: _swig_setattr(self, DummyFrameFilter, name, value)
__swig_getmethods__ = {}
for _s in [FrameFilter]:
__swig_getmethods__.update(getattr(_s, '__swig_getmethods__', {}))
__getattr__ = lambda self, name: _swig_getattr(self, DummyFrameFilter, name)
__repr__ = _swig_repr
def __init__(self, name, verbose=True, next=None):
this = _valkka_nv.new_DummyFrameFilter(name, verbose, next)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
__swig_destroy__ = _valkka_nv.delete_DummyFrameFilter
__del__ = lambda self: None
DummyFrameFilter_swigregister = _valkka_nv.DummyFrameFilter_swigregister
DummyFrameFilter_swigregister(DummyFrameFilter)
class InfoFrameFilter(FrameFilter):
__swig_setmethods__ = {}
for _s in [FrameFilter]:
__swig_setmethods__.update(getattr(_s, '__swig_setmethods__', {}))
__setattr__ = lambda self, name, value: _swig_setattr(self, InfoFrameFilter, name, value)
__swig_getmethods__ = {}
for _s in [FrameFilter]:
__swig_getmethods__.update(getattr(_s, '__swig_getmethods__', {}))
__getattr__ = lambda self, name: _swig_getattr(self, InfoFrameFilter, name)
__repr__ = _swig_repr
def __init__(self, name, next=None):
this = _valkka_nv.new_InfoFrameFilter(name, next)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
__swig_destroy__ = _valkka_nv.delete_InfoFrameFilter
__del__ = lambda self: None
InfoFrameFilter_swigregister = _valkka_nv.InfoFrameFilter_swigregister
InfoFrameFilter_swigregister(InfoFrameFilter)
class BriefInfoFrameFilter(FrameFilter):
__swig_setmethods__ = {}
for _s in [FrameFilter]:
__swig_setmethods__.update(getattr(_s, '__swig_setmethods__', {}))
__setattr__ = lambda self, name, value: _swig_setattr(self, BriefInfoFrameFilter, name, value)
__swig_getmethods__ = {}
for _s in [FrameFilter]:
__swig_getmethods__.update(getattr(_s, '__swig_getmethods__', {}))
__getattr__ = lambda self, name: _swig_getattr(self, BriefInfoFrameFilter, name)
__repr__ = _swig_repr
def __init__(self, name, next=None):
this = _valkka_nv.new_BriefInfoFrameFilter(name, next)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
__swig_destroy__ = _valkka_nv.delete_BriefInfoFrameFilter
__del__ = lambda self: None
BriefInfoFrameFilter_swigregister = _valkka_nv.BriefInfoFrameFilter_swigregister
BriefInfoFrameFilter_swigregister(BriefInfoFrameFilter)
class ThreadSafeFrameFilter(FrameFilter):
__swig_setmethods__ = {}
for _s in [FrameFilter]:
__swig_setmethods__.update(getattr(_s, '__swig_setmethods__', {}))
__setattr__ = lambda self, name, value: _swig_setattr(self, ThreadSafeFrameFilter, name, value)
__swig_getmethods__ = {}
for _s in [FrameFilter]:
__swig_getmethods__.update(getattr(_s, '__swig_getmethods__', {}))
__getattr__ = lambda self, name: _swig_getattr(self, ThreadSafeFrameFilter, name)
__repr__ = _swig_repr
def __init__(self, name, next=None):
this = _valkka_nv.new_ThreadSafeFrameFilter(name, next)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
__swig_destroy__ = _valkka_nv.delete_ThreadSafeFrameFilter
__del__ = lambda self: None
ThreadSafeFrameFilter_swigregister = _valkka_nv.ThreadSafeFrameFilter_swigregister
ThreadSafeFrameFilter_swigregister(ThreadSafeFrameFilter)
class ForkFrameFilter(FrameFilter):
__swig_setmethods__ = {}
for _s in [FrameFilter]:
__swig_setmethods__.update(getattr(_s, '__swig_setmethods__', {}))
__setattr__ = lambda self, name, value: _swig_setattr(self, ForkFrameFilter, name, value)
__swig_getmethods__ = {}
for _s in [FrameFilter]:
__swig_getmethods__.update(getattr(_s, '__swig_getmethods__', {}))
__getattr__ = lambda self, name: _swig_getattr(self, ForkFrameFilter, name)
__repr__ = _swig_repr
def __init__(self, name, next=None, next2=None):
this = _valkka_nv.new_ForkFrameFilter(name, next, next2)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
__swig_destroy__ = _valkka_nv.delete_ForkFrameFilter
__del__ = lambda self: None
ForkFrameFilter_swigregister = _valkka_nv.ForkFrameFilter_swigregister
ForkFrameFilter_swigregister(ForkFrameFilter)
class ForkFrameFilter3(FrameFilter):
__swig_setmethods__ = {}
for _s in [FrameFilter]:
__swig_setmethods__.update(getattr(_s, '__swig_setmethods__', {}))
__setattr__ = lambda self, name, value: _swig_setattr(self, ForkFrameFilter3, name, value)
__swig_getmethods__ = {}
for _s in [FrameFilter]:
__swig_getmethods__.update(getattr(_s, '__swig_getmethods__', {}))
__getattr__ = lambda self, name: _swig_getattr(self, ForkFrameFilter3, name)
__repr__ = _swig_repr
def __init__(self, name, next=None, next2=None, next3=None):
this = _valkka_nv.new_ForkFrameFilter3(name, next, next2, next3)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
__swig_destroy__ = _valkka_nv.delete_ForkFrameFilter3
__del__ = lambda self: None
ForkFrameFilter3_swigregister = _valkka_nv.ForkFrameFilter3_swigregister
ForkFrameFilter3_swigregister(ForkFrameFilter3)
class ForkFrameFilterN(FrameFilter):
__swig_setmethods__ = {}
for _s in [FrameFilter]:
__swig_setmethods__.update(getattr(_s, '__swig_setmethods__', {}))
__setattr__ = lambda self, name, value: _swig_setattr(self, ForkFrameFilterN, name, value)
__swig_getmethods__ = {}
for _s in [FrameFilter]:
__swig_getmethods__.update(getattr(_s, '__swig_getmethods__', {}))
__getattr__ = lambda self, name: _swig_getattr(self, ForkFrameFilterN, name)
__repr__ = _swig_repr
def __init__(self, name):
this = _valkka_nv.new_ForkFrameFilterN(name)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
__swig_destroy__ = _valkka_nv.delete_ForkFrameFilterN
__del__ = lambda self: None
def connect(self, tag, filter):
return _valkka_nv.ForkFrameFilterN_connect(self, tag, filter)
def disconnect(self, tag):
return _valkka_nv.ForkFrameFilterN_disconnect(self, tag)
ForkFrameFilterN_swigregister = _valkka_nv.ForkFrameFilterN_swigregister
ForkFrameFilterN_swigregister(ForkFrameFilterN)
class SlotFrameFilter(FrameFilter):
__swig_setmethods__ = {}
for _s in [FrameFilter]:
__swig_setmethods__.update(getattr(_s, '__swig_setmethods__', {}))
__setattr__ = lambda self, name, value: _swig_setattr(self, SlotFrameFilter, name, value)
__swig_getmethods__ = {}
for _s in [FrameFilter]:
__swig_getmethods__.update(getattr(_s, '__swig_getmethods__', {}))
__getattr__ = lambda self, name: _swig_getattr(self, SlotFrameFilter, name)
__repr__ = _swig_repr
def __init__(self, name, n_slot, next=None):
this = _valkka_nv.new_SlotFrameFilter(name, n_slot, next)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
__swig_destroy__ = _valkka_nv.delete_SlotFrameFilter
__del__ = lambda self: None
SlotFrameFilter_swigregister = _valkka_nv.SlotFrameFilter_swigregister
SlotFrameFilter_swigregister(SlotFrameFilter)
class PassSlotFrameFilter(FrameFilter):
__swig_setmethods__ = {}
for _s in [FrameFilter]:
__swig_setmethods__.update(getattr(_s, '__swig_setmethods__', {}))
__setattr__ = lambda self, name, value: _swig_setattr(self, PassSlotFrameFilter, name, value)
__swig_getmethods__ = {}
for _s in [FrameFilter]:
__swig_getmethods__.update(getattr(_s, '__swig_getmethods__', {}))
__getattr__ = lambda self, name: _swig_getattr(self, PassSlotFrameFilter, name)
__repr__ = _swig_repr
def __init__(self, name, n_slot, next=None):
this = _valkka_nv.new_PassSlotFrameFilter(name, n_slot, next)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
__swig_destroy__ = _valkka_nv.delete_PassSlotFrameFilter
__del__ = lambda self: None
PassSlotFrameFilter_swigregister = _valkka_nv.PassSlotFrameFilter_swigregister
PassSlotFrameFilter_swigregister(PassSlotFrameFilter)
class DumpFrameFilter(FrameFilter):
__swig_setmethods__ = {}
for _s in [FrameFilter]:
__swig_setmethods__.update(getattr(_s, '__swig_setmethods__', {}))
__setattr__ = lambda self, name, value: _swig_setattr(self, DumpFrameFilter, name, value)
__swig_getmethods__ = {}
for _s in [FrameFilter]:
__swig_getmethods__.update(getattr(_s, '__swig_getmethods__', {}))
__getattr__ = lambda self, name: _swig_getattr(self, DumpFrameFilter, name)
__repr__ = _swig_repr
def __init__(self, name, next=None):
this = _valkka_nv.new_DumpFrameFilter(name, next)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
__swig_destroy__ = _valkka_nv.delete_DumpFrameFilter
__del__ = lambda self: None
DumpFrameFilter_swigregister = _valkka_nv.DumpFrameFilter_swigregister
DumpFrameFilter_swigregister(DumpFrameFilter)
class CountFrameFilter(FrameFilter):
__swig_setmethods__ = {}
for _s in [FrameFilter]:
__swig_setmethods__.update(getattr(_s, '__swig_setmethods__', {}))
__setattr__ = lambda self, name, value: _swig_setattr(self, CountFrameFilter, name, value)
__swig_getmethods__ = {}
for _s in [FrameFilter]:
__swig_getmethods__.update(getattr(_s, '__swig_getmethods__', {}))
__getattr__ = lambda self, name: _swig_getattr(self, CountFrameFilter, name)
__repr__ = _swig_repr
def __init__(self, name, next=None):
this = _valkka_nv.new_CountFrameFilter(name, next)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
__swig_destroy__ = _valkka_nv.delete_CountFrameFilter
__del__ = lambda self: None
CountFrameFilter_swigregister = _valkka_nv.CountFrameFilter_swigregister
CountFrameFilter_swigregister(CountFrameFilter)
class TimestampFrameFilter(FrameFilter):
__swig_setmethods__ = {}
for _s in [FrameFilter]:
__swig_setmethods__.update(getattr(_s, '__swig_setmethods__', {}))
__setattr__ = lambda self, name, value: _swig_setattr(self, TimestampFrameFilter, name, value)
__swig_getmethods__ = {}
for _s in [FrameFilter]:
__swig_getmethods__.update(getattr(_s, '__swig_getmethods__', {}))
__getattr__ = lambda self, name: _swig_getattr(self, TimestampFrameFilter, name)
__repr__ = _swig_repr
def __init__(self, *args):
this = _valkka_nv.new_TimestampFrameFilter(*args)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
__swig_destroy__ = _valkka_nv.delete_TimestampFrameFilter
__del__ = lambda self: None
TimestampFrameFilter_swigregister = _valkka_nv.TimestampFrameFilter_swigregister
TimestampFrameFilter_swigregister(TimestampFrameFilter)
class TimestampFrameFilter2(FrameFilter):
__swig_setmethods__ = {}
for _s in [FrameFilter]:
__swig_setmethods__.update(getattr(_s, '__swig_setmethods__', {}))
__setattr__ = lambda self, name, value: _swig_setattr(self, TimestampFrameFilter2, name, value)
__swig_getmethods__ = {}
for _s in [FrameFilter]:
__swig_getmethods__.update(getattr(_s, '__swig_getmethods__', {}))
__getattr__ = lambda self, name: _swig_getattr(self, TimestampFrameFilter2, name)
__repr__ = _swig_repr
def __init__(self, *args):
this = _valkka_nv.new_TimestampFrameFilter2(*args)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
__swig_destroy__ = _valkka_nv.delete_TimestampFrameFilter2
__del__ = lambda self: None
TimestampFrameFilter2_swigregister = _valkka_nv.TimestampFrameFilter2_swigregister
TimestampFrameFilter2_swigregister(TimestampFrameFilter2)
class DummyTimestampFrameFilter(FrameFilter):
__swig_setmethods__ = {}
for _s in [FrameFilter]:
__swig_setmethods__.update(getattr(_s, '__swig_setmethods__', {}))
__setattr__ = lambda self, name, value: _swig_setattr(self, DummyTimestampFrameFilter, name, value)
__swig_getmethods__ = {}
for _s in [FrameFilter]:
__swig_getmethods__.update(getattr(_s, '__swig_getmethods__', {}))
__getattr__ = lambda self, name: _swig_getattr(self, DummyTimestampFrameFilter, name)
__repr__ = _swig_repr
def __init__(self, name, next=None):
this = _valkka_nv.new_DummyTimestampFrameFilter(name, next)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
__swig_destroy__ = _valkka_nv.delete_DummyTimestampFrameFilter
__del__ = lambda self: None
DummyTimestampFrameFilter_swigregister = _valkka_nv.DummyTimestampFrameFilter_swigregister
DummyTimestampFrameFilter_swigregister(DummyTimestampFrameFilter)
class RepeatH264ParsFrameFilter(FrameFilter):
__swig_setmethods__ = {}
for _s in [FrameFilter]:
__swig_setmethods__.update(getattr(_s, '__swig_setmethods__', {}))
__setattr__ = lambda self, name, value: _swig_setattr(self, RepeatH264ParsFrameFilter, name, value)
__swig_getmethods__ = {}
for _s in [FrameFilter]:
__swig_getmethods__.update(getattr(_s, '__swig_getmethods__', {}))
__getattr__ = lambda self, name: _swig_getattr(self, RepeatH264ParsFrameFilter, name)
__repr__ = _swig_repr
def __init__(self, name, next=None):
this = _valkka_nv.new_RepeatH264ParsFrameFilter(name, next)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
__swig_destroy__ = _valkka_nv.delete_RepeatH264ParsFrameFilter
__del__ = lambda self: None
RepeatH264ParsFrameFilter_swigregister = _valkka_nv.RepeatH264ParsFrameFilter_swigregister
RepeatH264ParsFrameFilter_swigregister(RepeatH264ParsFrameFilter)
class GateFrameFilter(FrameFilter):
__swig_setmethods__ = {}
for _s in [FrameFilter]:
__swig_setmethods__.update(getattr(_s, '__swig_setmethods__', {}))
__setattr__ = lambda self, name, value: _swig_setattr(self, GateFrameFilter, name, value)
__swig_getmethods__ = {}
for _s in [FrameFilter]:
__swig_getmethods__.update(getattr(_s, '__swig_getmethods__', {}))
__getattr__ = lambda self, name: _swig_getattr(self, GateFrameFilter, name)
__repr__ = _swig_repr
def __init__(self, name, next=None):
this = _valkka_nv.new_GateFrameFilter(name, next)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
def set(self):
return _valkka_nv.GateFrameFilter_set(self)
def unSet(self):
return _valkka_nv.GateFrameFilter_unSet(self)
def passConfigFrames(self):
return _valkka_nv.GateFrameFilter_passConfigFrames(self)
def noConfigFrames(self):
return _valkka_nv.GateFrameFilter_noConfigFrames(self)
__swig_destroy__ = _valkka_nv.delete_GateFrameFilter
__del__ = lambda self: None
GateFrameFilter_swigregister = _valkka_nv.GateFrameFilter_swigregister
GateFrameFilter_swigregister(GateFrameFilter)
class SwitchFrameFilter(FrameFilter):
__swig_setmethods__ = {}
for _s in [FrameFilter]:
__swig_setmethods__.update(getattr(_s, '__swig_setmethods__', {}))
__setattr__ = lambda self, name, value: _swig_setattr(self, SwitchFrameFilter, name, value)
__swig_getmethods__ = {}
for _s in [FrameFilter]:
__swig_getmethods__.update(getattr(_s, '__swig_getmethods__', {}))
__getattr__ = lambda self, name: _swig_getattr(self, SwitchFrameFilter, name)
__repr__ = _swig_repr
def __init__(self, name, next1=None, next2=None):
this = _valkka_nv.new_SwitchFrameFilter(name, next1, next2)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
def set1(self):
return _valkka_nv.SwitchFrameFilter_set1(self)
def set2(self):
return _valkka_nv.SwitchFrameFilter_set2(self)
__swig_destroy__ = _valkka_nv.delete_SwitchFrameFilter
__del__ = lambda self: None
SwitchFrameFilter_swigregister = _valkka_nv.SwitchFrameFilter_swigregister
SwitchFrameFilter_swigregister(SwitchFrameFilter)
class CachingGateFrameFilter(FrameFilter):
__swig_setmethods__ = {}
for _s in [FrameFilter]:
__swig_setmethods__.update(getattr(_s, '__swig_setmethods__', {}))
__setattr__ = lambda self, name, value: _swig_setattr(self, CachingGateFrameFilter, name, value)
__swig_getmethods__ = {}
for _s in [FrameFilter]:
__swig_getmethods__.update(getattr(_s, '__swig_getmethods__', {}))
__getattr__ = lambda self, name: _swig_getattr(self, CachingGateFrameFilter, name)
__repr__ = _swig_repr
def __init__(self, name, next=None):
this = _valkka_nv.new_CachingGateFrameFilter(name, next)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
def set(self):
return _valkka_nv.CachingGateFrameFilter_set(self)
def unSet(self):
return _valkka_nv.CachingGateFrameFilter_unSet(self)
__swig_destroy__ = _valkka_nv.delete_CachingGateFrameFilter
__del__ = lambda self: None
CachingGateFrameFilter_swigregister = _valkka_nv.CachingGateFrameFilter_swigregister
CachingGateFrameFilter_swigregister(CachingGateFrameFilter)
class SetSlotFrameFilter(FrameFilter):
__swig_setmethods__ = {}
for _s in [FrameFilter]:
__swig_setmethods__.update(getattr(_s, '__swig_setmethods__', {}))
__setattr__ = lambda self, name, value: _swig_setattr(self, SetSlotFrameFilter, name, value)
__swig_getmethods__ = {}
for _s in [FrameFilter]:
__swig_getmethods__.update(getattr(_s, '__swig_getmethods__', {}))
__getattr__ = lambda self, name: _swig_getattr(self, SetSlotFrameFilter, name)
__repr__ = _swig_repr
def __init__(self, name, next=None):
this = _valkka_nv.new_SetSlotFrameFilter(name, next)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
def setSlot(self, n=0):
return _valkka_nv.SetSlotFrameFilter_setSlot(self, n)
__swig_destroy__ = _valkka_nv.delete_SetSlotFrameFilter
__del__ = lambda self: None
SetSlotFrameFilter_swigregister = _valkka_nv.SetSlotFrameFilter_swigregister
SetSlotFrameFilter_swigregister(SetSlotFrameFilter)
class TimeIntervalFrameFilter(FrameFilter):
__swig_setmethods__ = {}
for _s in [FrameFilter]:
__swig_setmethods__.update(getattr(_s, '__swig_setmethods__', {}))
__setattr__ = lambda self, name, value: _swig_setattr(self, TimeIntervalFrameFilter, name, value)
__swig_getmethods__ = {}
for _s in [FrameFilter]:
__swig_getmethods__.update(getattr(_s, '__swig_getmethods__', {}))
__getattr__ = lambda self, name: _swig_getattr(self, TimeIntervalFrameFilter, name)
__repr__ = _swig_repr
def __init__(self, name, mstimedelta, next=None):
this = _valkka_nv.new_TimeIntervalFrameFilter(name, mstimedelta, next)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
__swig_destroy__ = _valkka_nv.delete_TimeIntervalFrameFilter
__del__ = lambda self: None
TimeIntervalFrameFilter_swigregister = _valkka_nv.TimeIntervalFrameFilter_swigregister
TimeIntervalFrameFilter_swigregister(TimeIntervalFrameFilter)
class FifoFrameFilter(FrameFilter):
__swig_setmethods__ = {}
for _s in [FrameFilter]:
__swig_setmethods__.update(getattr(_s, '__swig_setmethods__', {}))
__setattr__ = lambda self, name, value: _swig_setattr(self, FifoFrameFilter, name, value)
__swig_getmethods__ = {}
for _s in [FrameFilter]:
__swig_getmethods__.update(getattr(_s, '__swig_getmethods__', {}))
__getattr__ = lambda self, name: _swig_getattr(self, FifoFrameFilter, name)
__repr__ = _swig_repr
def __init__(self, name, framefifo):
this = _valkka_nv.new_FifoFrameFilter(name, framefifo)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
__swig_destroy__ = _valkka_nv.delete_FifoFrameFilter
__del__ = lambda self: None
FifoFrameFilter_swigregister = _valkka_nv.FifoFrameFilter_swigregister
FifoFrameFilter_swigregister(FifoFrameFilter)
class BlockingFifoFrameFilter(FrameFilter):
__swig_setmethods__ = {}
for _s in [FrameFilter]:
__swig_setmethods__.update(getattr(_s, '__swig_setmethods__', {}))
__setattr__ = lambda self, name, value: _swig_setattr(self, BlockingFifoFrameFilter, name, value)
__swig_getmethods__ = {}
for _s in [FrameFilter]:
__swig_getmethods__.update(getattr(_s, '__swig_getmethods__', {}))
__getattr__ = lambda self, name: _swig_getattr(self, BlockingFifoFrameFilter, name)
__repr__ = _swig_repr
def __init__(self, name, framefifo):
this = _valkka_nv.new_BlockingFifoFrameFilter(name, framefifo)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
__swig_destroy__ = _valkka_nv.delete_BlockingFifoFrameFilter
__del__ = lambda self: None
BlockingFifoFrameFilter_swigregister = _valkka_nv.BlockingFifoFrameFilter_swigregister
BlockingFifoFrameFilter_swigregister(BlockingFifoFrameFilter)
class SwScaleFrameFilter(FrameFilter):
__swig_setmethods__ = {}
for _s in [FrameFilter]:
__swig_setmethods__.update(getattr(_s, '__swig_setmethods__', {}))
__setattr__ = lambda self, name, value: _swig_setattr(self, SwScaleFrameFilter, name, value)
__swig_getmethods__ = {}
for _s in [FrameFilter]:
__swig_getmethods__.update(getattr(_s, '__swig_getmethods__', {}))
__getattr__ = lambda self, name: _swig_getattr(self, SwScaleFrameFilter, name)
__repr__ = _swig_repr
def __init__(self, name, target_width, target_height, next=None):
this = _valkka_nv.new_SwScaleFrameFilter(name, target_width, target_height, next)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
__swig_destroy__ = _valkka_nv.delete_SwScaleFrameFilter
__del__ = lambda self: None
SwScaleFrameFilter_swigregister = _valkka_nv.SwScaleFrameFilter_swigregister
SwScaleFrameFilter_swigregister(SwScaleFrameFilter)
class Thread(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, Thread, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, Thread, name)
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined")
__repr__ = _swig_repr
__swig_destroy__ = _valkka_nv.delete_Thread
__del__ = lambda self: None
def setAffinity(self, i):
return _valkka_nv.Thread_setAffinity(self, i)
def startCall(self):
return _valkka_nv.Thread_startCall(self)
def stopCall(self):
return _valkka_nv.Thread_stopCall(self)
def requestStopCall(self):
return _valkka_nv.Thread_requestStopCall(self)
def waitStopCall(self):
return _valkka_nv.Thread_waitStopCall(self)
def waitReady(self):
return _valkka_nv.Thread_waitReady(self)
Thread_swigregister = _valkka_nv.Thread_swigregister
Thread_swigregister(Thread)
class FrameFifoContext(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, FrameFifoContext, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, FrameFifoContext, name)
__repr__ = _swig_repr
def __init__(self, *args):
this = _valkka_nv.new_FrameFifoContext(*args)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
__swig_setmethods__["n_basic"] = _valkka_nv.FrameFifoContext_n_basic_set
__swig_getmethods__["n_basic"] = _valkka_nv.FrameFifoContext_n_basic_get
if _newclass:
n_basic = _swig_property(_valkka_nv.FrameFifoContext_n_basic_get, _valkka_nv.FrameFifoContext_n_basic_set)
__swig_setmethods__["n_avpkt"] = _valkka_nv.FrameFifoContext_n_avpkt_set
__swig_getmethods__["n_avpkt"] = _valkka_nv.FrameFifoContext_n_avpkt_get
if _newclass:
n_avpkt = _swig_property(_valkka_nv.FrameFifoContext_n_avpkt_get, _valkka_nv.FrameFifoContext_n_avpkt_set)
__swig_setmethods__["n_avframe"] = _valkka_nv.FrameFifoContext_n_avframe_set
__swig_getmethods__["n_avframe"] = _valkka_nv.FrameFifoContext_n_avframe_get
if _newclass:
n_avframe = _swig_property(_valkka_nv.FrameFifoContext_n_avframe_get, _valkka_nv.FrameFifoContext_n_avframe_set)
__swig_setmethods__["n_yuvpbo"] = _valkka_nv.FrameFifoContext_n_yuvpbo_set
__swig_getmethods__["n_yuvpbo"] = _valkka_nv.FrameFifoContext_n_yuvpbo_get
if _newclass:
n_yuvpbo = _swig_property(_valkka_nv.FrameFifoContext_n_yuvpbo_get, _valkka_nv.FrameFifoContext_n_yuvpbo_set)
__swig_setmethods__["n_setup"] = _valkka_nv.FrameFifoContext_n_setup_set
__swig_getmethods__["n_setup"] = _valkka_nv.FrameFifoContext_n_setup_get
if _newclass:
n_setup = _swig_property(_valkka_nv.FrameFifoContext_n_setup_get, _valkka_nv.FrameFifoContext_n_setup_set)
__swig_setmethods__["n_signal"] = _valkka_nv.FrameFifoContext_n_signal_set
__swig_getmethods__["n_signal"] = _valkka_nv.FrameFifoContext_n_signal_get
if _newclass:
n_signal = _swig_property(_valkka_nv.FrameFifoContext_n_signal_get, _valkka_nv.FrameFifoContext_n_signal_set)
__swig_setmethods__["n_marker"] = _valkka_nv.FrameFifoContext_n_marker_set
__swig_getmethods__["n_marker"] = _valkka_nv.FrameFifoContext_n_marker_get
if _newclass:
n_marker = _swig_property(_valkka_nv.FrameFifoContext_n_marker_get, _valkka_nv.FrameFifoContext_n_marker_set)
__swig_setmethods__["flush_when_full"] = _valkka_nv.FrameFifoContext_flush_when_full_set
__swig_getmethods__["flush_when_full"] = _valkka_nv.FrameFifoContext_flush_when_full_get
if _newclass:
flush_when_full = _swig_property(_valkka_nv.FrameFifoContext_flush_when_full_get, _valkka_nv.FrameFifoContext_flush_when_full_set)
__swig_destroy__ = _valkka_nv.delete_FrameFifoContext
__del__ = lambda self: None
FrameFifoContext_swigregister = _valkka_nv.FrameFifoContext_swigregister
FrameFifoContext_swigregister(FrameFifoContext)
class DecoderThread(Thread):
__swig_setmethods__ = {}
for _s in [Thread]:
__swig_setmethods__.update(getattr(_s, '__swig_setmethods__', {}))
__setattr__ = lambda self, name, value: _swig_setattr(self, DecoderThread, name, value)
__swig_getmethods__ = {}
for _s in [Thread]:
__swig_getmethods__.update(getattr(_s, '__swig_getmethods__', {}))
__getattr__ = lambda self, name: _swig_getattr(self, DecoderThread, name)
__repr__ = _swig_repr
def __init__(self, *args):
this = _valkka_nv.new_DecoderThread(*args)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
__swig_destroy__ = _valkka_nv.delete_DecoderThread
__del__ = lambda self: None
def setTimeCorrection(self, val):
return _valkka_nv.DecoderThread_setTimeCorrection(self, val)
def getFrameFilter(self):
return _valkka_nv.DecoderThread_getFrameFilter(self)
def getBlockingFrameFilter(self):
return _valkka_nv.DecoderThread_getBlockingFrameFilter(self)
def setTimeTolerance(self, mstol):
return _valkka_nv.DecoderThread_setTimeTolerance(self, mstol)
def setNumberOfThreads(self, n_threads):
return _valkka_nv.DecoderThread_setNumberOfThreads(self, n_threads)
def decodingOnCall(self):
return _valkka_nv.DecoderThread_decodingOnCall(self)
def decodingOffCall(self):
return _valkka_nv.DecoderThread_decodingOffCall(self)
def requestStopCall(self):
return _valkka_nv.DecoderThread_requestStopCall(self)
DecoderThread_swigregister = _valkka_nv.DecoderThread_swigregister
DecoderThread_swigregister(DecoderThread)
def NVcuInit():
return _valkka_nv.NVcuInit()
NVcuInit = _valkka_nv.NVcuInit
def NVgetDevices():
return _valkka_nv.NVgetDevices()
NVgetDevices = _valkka_nv.NVgetDevices
class NVThread(DecoderThread):
__swig_setmethods__ = {}
for _s in [DecoderThread]:
__swig_setmethods__.update(getattr(_s, '__swig_setmethods__', {}))
__setattr__ = lambda self, name, value: _swig_setattr(self, NVThread, name, value)
__swig_getmethods__ = {}
for _s in [DecoderThread]:
__swig_getmethods__.update(getattr(_s, '__swig_getmethods__', {}))
__getattr__ = lambda self, name: _swig_getattr(self, NVThread, name)
__repr__ = _swig_repr
def __init__(self, *args):
this = _valkka_nv.new_NVThread(*args)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
__swig_destroy__ = _valkka_nv.delete_NVThread
__del__ = lambda self: None
NVThread_swigregister = _valkka_nv.NVThread_swigregister
NVThread_swigregister(NVThread)
# This file is compatible with both classic and new-style classes.
| 41.179582 | 138 | 0.729532 | 26,766 | 0.799486 | 0 | 0 | 0 | 0 | 0 | 0 | 1,761 | 0.0526 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.