hexsha
stringlengths
40
40
size
int64
5
2.06M
ext
stringclasses
10 values
lang
stringclasses
1 value
max_stars_repo_path
stringlengths
3
248
max_stars_repo_name
stringlengths
5
125
max_stars_repo_head_hexsha
stringlengths
40
78
max_stars_repo_licenses
listlengths
1
10
max_stars_count
int64
1
191k
max_stars_repo_stars_event_min_datetime
stringlengths
24
24
max_stars_repo_stars_event_max_datetime
stringlengths
24
24
max_issues_repo_path
stringlengths
3
248
max_issues_repo_name
stringlengths
5
125
max_issues_repo_head_hexsha
stringlengths
40
78
max_issues_repo_licenses
listlengths
1
10
max_issues_count
int64
1
67k
max_issues_repo_issues_event_min_datetime
stringlengths
24
24
max_issues_repo_issues_event_max_datetime
stringlengths
24
24
max_forks_repo_path
stringlengths
3
248
max_forks_repo_name
stringlengths
5
125
max_forks_repo_head_hexsha
stringlengths
40
78
max_forks_repo_licenses
listlengths
1
10
max_forks_count
int64
1
105k
max_forks_repo_forks_event_min_datetime
stringlengths
24
24
max_forks_repo_forks_event_max_datetime
stringlengths
24
24
content
stringlengths
5
2.06M
avg_line_length
float64
1
1.02M
max_line_length
int64
3
1.03M
alphanum_fraction
float64
0
1
count_classes
int64
0
1.6M
score_classes
float64
0
1
count_generators
int64
0
651k
score_generators
float64
0
1
count_decorators
int64
0
990k
score_decorators
float64
0
1
count_async_functions
int64
0
235k
score_async_functions
float64
0
1
count_documentation
int64
0
1.04M
score_documentation
float64
0
1
22a8ec1abea9d6f95b972cc7b4d65ddb840ef8b2
2,962
py
Python
dexp/cli/dexp_commands/crop.py
JoOkuma/dexp
6d9003384605b72f387d38b5befa29e4e2246af8
[ "BSD-3-Clause" ]
null
null
null
dexp/cli/dexp_commands/crop.py
JoOkuma/dexp
6d9003384605b72f387d38b5befa29e4e2246af8
[ "BSD-3-Clause" ]
null
null
null
dexp/cli/dexp_commands/crop.py
JoOkuma/dexp
6d9003384605b72f387d38b5befa29e4e2246af8
[ "BSD-3-Clause" ]
null
null
null
import click from arbol.arbol import aprint, asection from dexp.cli.defaults import DEFAULT_CLEVEL, DEFAULT_CODEC, DEFAULT_STORE from dexp.cli.parsing import _get_output_path, _parse_channels, _parse_chunks from dexp.datasets.open_dataset import glob_datasets from dexp.datasets.operations.crop import dataset_crop @click.command() @click.argument("input_paths", nargs=-1) # , help='input path' @click.option("--output_path", "-o") # , help='output path' @click.option("--channels", "-c", default=None, help="List of channels, all channels when ommited.") @click.option( "--quantile", "-q", default=0.99, type=float, help="Quantile parameter for lower bound of brightness for thresholding.", show_default=True, ) @click.option( "--reference-channel", "-rc", default=None, help="Reference channel to estimate cropping. If no provided it picks the first one.", ) @click.option("--store", "-st", default=DEFAULT_STORE, help="Zarr store: ‘dir’, ‘ndir’, or ‘zip’", show_default=True) @click.option("--chunks", "-chk", default=None, help="Dataset chunks dimensions, e.g. (1, 126, 512, 512).") @click.option( "--codec", "-z", default=DEFAULT_CODEC, help="Compression codec: zstd for ’, ‘blosclz’, ‘lz4’, ‘lz4hc’, ‘zlib’ or ‘snappy’ ", show_default=True, ) @click.option("--clevel", "-l", type=int, default=DEFAULT_CLEVEL, help="Compression level", show_default=True) @click.option("--overwrite", "-w", is_flag=True, help="Forces overwrite of target", show_default=True) @click.option( "--workers", "-wk", default=-4, help="Number of worker threads to spawn. Negative numbers n correspond to: number_of _cores / |n| ", show_default=True, ) # @click.option("--check", "-ck", default=True, help="Checking integrity of written file.", show_default=True) # def crop( input_paths, output_path, channels, quantile, reference_channel, store, chunks, codec, clevel, overwrite, workers, check, ): input_dataset, input_paths = glob_datasets(input_paths) output_path = _get_output_path(input_paths[0], output_path, "_crop") channels = _parse_channels(input_dataset, channels) if reference_channel is None: reference_channel = input_dataset.channels()[0] chunks = _parse_chunks(chunks) with asection( f"Cropping from: {input_paths} to {output_path} for channels: {channels}, " f"using channel {reference_channel} as a reference." ): dataset_crop( input_dataset, output_path, channels=channels, reference_channel=reference_channel, quantile=quantile, store=store, chunks=chunks, compression=codec, compression_level=clevel, overwrite=overwrite, workers=workers, check=check, ) input_dataset.close() aprint("Done!")
32.549451
117
0.660365
0
0
0
0
2,677
0.893525
0
0
956
0.319092
22a950c4c4a0d6a5d8ae35400f9dc583d0a56a66
2,287
py
Python
morse_DMT/write_dipha_file_3d_revise.py
YinuoJin/DMT_loss
c6e66cb7997b7cd5616156faaf294e350e77c4c2
[ "MIT" ]
1
2021-12-06T13:06:55.000Z
2021-12-06T13:06:55.000Z
morse_DMT/write_dipha_file_3d_revise.py
YinuoJin/DMT_loss
c6e66cb7997b7cd5616156faaf294e350e77c4c2
[ "MIT" ]
null
null
null
morse_DMT/write_dipha_file_3d_revise.py
YinuoJin/DMT_loss
c6e66cb7997b7cd5616156faaf294e350e77c4c2
[ "MIT" ]
null
null
null
import sys from matplotlib import image as mpimg import numpy as np import os DIPHA_CONST = 8067171840 DIPHA_IMAGE_TYPE_CONST = 1 DIM = 3 input_dir = os.path.join(os.getcwd(), sys.argv[1]) dipha_output_filename = sys.argv[2] vert_filename = sys.argv[3] input_filenames = [name for name in os.listdir(input_dir) if (os.path.isfile(input_dir + '/' + name)) and (name != ".DS_Store")] input_filenames.sort() image = mpimg.imread(os.path.join(input_dir, input_filenames[0])) nx, ny = image.shape del image nz = len(input_filenames) print(nx, ny, nz) #sys.exit() im_cube = np.zeros([nx, ny, nz]) i = 0 for name in input_filenames: sys.stdout.flush() print(i, name) fileName = input_dir + "/" + name im_cube[:, :, i] = mpimg.imread(fileName) i = i + 1 print('writing dipha output...') with open(dipha_output_filename, 'wb') as output_file: # this is needed to verify you are giving dipha a dipha file np.int64(DIPHA_CONST).tofile(output_file) # this tells dipha that we are giving an image as input np.int64(DIPHA_IMAGE_TYPE_CONST).tofile(output_file) # number of points np.int64(nx * ny * nz).tofile(output_file) # dimension np.int64(DIM).tofile(output_file) # pixels in each dimension np.int64(nx).tofile(output_file) np.int64(ny).tofile(output_file) np.int64(nz).tofile(output_file) # pixel values for k in range(nz): sys.stdout.flush() print('dipha - working on image', k) for j in range(ny): for i in range(nx): val = int(-im_cube[i, j, k]*255) ''' if val != 0 and val != -1: print('val check:', val) ''' np.float64(val).tofile(output_file) output_file.close() print('writing vert file') with open(vert_filename, 'w') as vert_file: for k in range(nz): sys.stdout.flush() print('verts - working on image', k) for j in range(ny): for i in range(nx): vert_file.write(str(i) + ' ' + str(j) + ' ' + str(k) + ' ' + str(int(-im_cube[i, j, k] * 255)) + '\n') vert_file.close() print(nx, ny, nz)
29.701299
119
0.584609
0
0
0
0
0
0
0
0
449
0.196327
22a96894a0336c7d7df8e78f4c4c6ea30cbd0530
1,507
py
Python
microservices/validate/tools/validates.py
clodonil/pipeline_aws_custom
8ca517d0bad48fe528461260093f0035f606f9be
[ "Apache-2.0" ]
null
null
null
microservices/validate/tools/validates.py
clodonil/pipeline_aws_custom
8ca517d0bad48fe528461260093f0035f606f9be
[ "Apache-2.0" ]
null
null
null
microservices/validate/tools/validates.py
clodonil/pipeline_aws_custom
8ca517d0bad48fe528461260093f0035f606f9be
[ "Apache-2.0" ]
null
null
null
""" Tools para validar o arquivo template recebido do SQS """ class Validate: def __init__(self): pass def check_validate_yml(self, template): """ valida se o arquivo yml é valido """ if template: return True else: return False def check_yml_struct(self, template): """ Valida se a estrutura do yml é valido """ if template: return True else: return False def check_template_exist(self, template): """ Valida se o template informado no arquivo yml existe """ if template: return True else: return False def check_callback_protocol_endpoint(self, template): """ validar se o protocolo e endpoint são validos """ return True def check_template(self, template): if self.check_validate_yml(template) \ and self.check_yml_struct(template) \ and self.check_template_exist(template) \ and self.check_callback_protocol_endpoint(template): msg = {"status": True} return msg else: msg = {'status': False, 'message': 'problema no arquivo yml'} return msg def change_yml_to_json(content): try: template_json = yaml.safe_load(content) return template_json except yaml.YAMLError as error: return {"message": str(error)}
25.542373
73
0.568016
1,252
0.829139
0
0
0
0
0
0
385
0.254967
22aabcb0f1d4d4e04e99859300806fd807e56ef4
1,223
py
Python
MetropolisMCMC.py
unrealTOM/MC
5a4cdf1ee11ef3d438f24dd38e894731103448ac
[ "MIT" ]
4
2020-04-11T09:54:27.000Z
2021-08-18T07:06:52.000Z
MetropolisMCMC.py
unrealTOM/MC
5a4cdf1ee11ef3d438f24dd38e894731103448ac
[ "MIT" ]
null
null
null
MetropolisMCMC.py
unrealTOM/MC
5a4cdf1ee11ef3d438f24dd38e894731103448ac
[ "MIT" ]
5
2019-01-22T03:47:17.000Z
2022-02-14T18:09:07.000Z
import numpy as np import matplotlib.pyplot as plt import math def normal(mu,sigma,x): #normal distribution return 1/(math.pi*2)**0.5/sigma*np.exp(-(x-mu)**2/2/sigma**2) def eval(x): return normal(-4,1,x) + normal(4,1,x) #return 0.3*np.exp(-0.2*x**2)+0.7*np.exp(-0.2*(x-10)**2) def ref(x_star,x): #normal distribution return normal(x,10,x_star) N = [100,500,1000,5000] fig = plt.figure() for i in range(4): X = np.array([]) x = 0.1 #initialize x0 to be 0.1 for j in range(N[i]): u = np.random.rand() x_star = np.random.normal(x,10) A = min(1,eval(x_star)/eval(x)) #*q(x,x_star)/p(x)/q(x_star,x)) if u < A: x = x_star X=np.hstack((X,x)) ax = fig.add_subplot(2,2,i+1) ax.hist(X,bins=100,density=True) x = np.linspace(-10,20,5000) #ax.plot(x,eval(x)/2.7) #2.7 approximates the normalizing constant ax.plot(x,eval(x)/2) #2 approximates the normalizing constant ax.set_ylim(0,0.35) ax.text(-9,0.25,'I=%d'%N[i]) fig.suptitle('Metropolis_Hastings for MCMC(Normal)') #fig.suptitle('Metropolis_Hastings for MCMC(Exp.)') plt.savefig('MetropolisNormal.png',dpi=100) #plt.savefig('MetropolisExp.png',dpi=100) plt.show()
29.829268
71
0.623058
0
0
0
0
0
0
0
0
415
0.33933
22ab90482878ca5263216eabd709a4a4b0c55fab
338
py
Python
gfwlist/gen.py
lipeijian/shadowsocks-android
ef707e4383a0d430775c8ac9b660c334e87e40ec
[ "OpenSSL", "MIT" ]
137
2016-08-04T13:34:02.000Z
2021-05-31T12:47:10.000Z
gfwlist/gen.py
lipeijian/shadowsocks-android
ef707e4383a0d430775c8ac9b660c334e87e40ec
[ "OpenSSL", "MIT" ]
9
2016-10-16T14:43:30.000Z
2018-04-21T11:02:39.000Z
gfwlist/gen.py
lipeijian/shadowsocks-android
ef707e4383a0d430775c8ac9b660c334e87e40ec
[ "OpenSSL", "MIT" ]
86
2016-08-30T07:22:19.000Z
2020-10-19T05:08:22.000Z
#!/usr/bin/python # -*- encoding: utf8 -*- import itertools import math import sys import IPy def main(): china_list_set = IPy.IPSet() for line in sys.stdin: china_list_set.add(IPy.IP(line)) # 输出结果 for ip in china_list_set: print '<item>' + str(ip) + '</item>' if __name__ == "__main__": main()
14.695652
44
0.60355
0
0
0
0
0
0
0
0
82
0.236994
22ac34a9639b610355752302f9ba8f423e657538
436
py
Python
Specialization/Personal/SortHours.py
lastralab/Statistics
358679f2e749db2e23c655795b34382c84270704
[ "MIT" ]
3
2017-09-26T20:19:57.000Z
2020-02-03T16:59:59.000Z
Specialization/Personal/SortHours.py
lastralab/Statistics
358679f2e749db2e23c655795b34382c84270704
[ "MIT" ]
1
2017-09-22T13:57:04.000Z
2017-09-26T20:03:24.000Z
Specialization/Personal/SortHours.py
lastralab/Statistics
358679f2e749db2e23c655795b34382c84270704
[ "MIT" ]
3
2018-05-09T01:41:16.000Z
2019-01-16T15:32:59.000Z
name = "mail.txt" counts = dict() handle = open(name) for line in handle: line = line.rstrip() if line == '': continue words = line.split() if words[0] == 'From': counts[words[5][:2]] = counts.get(words[5][:2], 0) + 1 tlist = list() for key, value in counts.items(): newtup = (key, value) tlist.append(newtup) tlist.sort() for key, value in tlist: print key, value
18.956522
64
0.548165
0
0
0
0
0
0
0
0
18
0.041284
22ac5683811849c14d8a103b4887cbd79b2ac236
9,338
py
Python
core/simulators/carla_scenario_simulator.py
RangiLyu/DI-drive
f7db2e7b19d70c05184d6d6edae6b7e035a324d7
[ "Apache-2.0" ]
null
null
null
core/simulators/carla_scenario_simulator.py
RangiLyu/DI-drive
f7db2e7b19d70c05184d6d6edae6b7e035a324d7
[ "Apache-2.0" ]
null
null
null
core/simulators/carla_scenario_simulator.py
RangiLyu/DI-drive
f7db2e7b19d70c05184d6d6edae6b7e035a324d7
[ "Apache-2.0" ]
null
null
null
import os from typing import Any, Dict, List, Optional import carla from core.simulators.carla_simulator import CarlaSimulator from core.simulators.carla_data_provider import CarlaDataProvider from .srunner.scenarios.route_scenario import RouteScenario, SCENARIO_CLASS_DICT from .srunner.scenariomanager.scenario_manager import ScenarioManager class CarlaScenarioSimulator(CarlaSimulator): """ Carla simualtor used to run scenarios. The simulator loads configs of provided scenario, and create hero actor, npc vehicles, walkers, world map according to it. The sensors and running status are set as common Carla simulator. When created, it will set up Carla client due to arguments, set simulator basic configurations used all around its lifetime, and set some default running configurations. If no traffic manager port is provided, it will find random free port in system. :Arguments: - cfg (Dict): Config Dict. - client (carla.Client, optional): Already established Carla client. Defaults to None. - host (str, optional): TCP host Carla client link to. Defaults to 'localhost'. - port (int, optional): TCP port Carla client link to. Defaults to 9000. - tm_port (int, optional): Traffic manager port Carla client link to. Defaults to None. - timeout (float, optional): Carla client link timeout. Defaults to 10.0. :Interfaces: init, get_state, get_sensor_data, get_navigation, get_information, apply_control, run_step, clean_up :Properties: - town_name (str): Current town name. - hero_player (carla.Actor): hero actor in simulation. - collided (bool): Whether collided in current episode. - end_distance (float): Distance to target in current frame. - end_timeout (float): Timeout for entire route provided by planner. - total_diatance (float): Dictance for entire route provided by planner. - scenario_manager (Any): Scenario Manager instance used to get running state. """ config = dict( town='Town01', weather='random', sync_mode=True, delta_seconds=0.1, no_rendering=False, auto_pilot=False, n_vehicles=0, n_pedestrians=0, disable_two_wheels=False, col_threshold=400, resolution=1.0, waypoint_num=20, obs=list(), planner=dict(), aug=None, verbose=True, debug=False, ) def __init__( self, cfg: Dict, client: Optional[carla.Client] = None, host: str = 'localhost', port: int = 9000, tm_port: int = 9050, timeout: float = 10.0, **kwargs ) -> None: """ Init Carla scenario simulator. """ super().__init__(cfg, client, host, port, tm_port, timeout) self._resolution = self._cfg.resolution self._scenario = None self._start_scenario = False self._manager = ScenarioManager(self._debug, self._sync_mode, self._client_timeout) self._criteria_status = dict() def init(self, config: Any) -> None: """ Init simulator episode with provided args. This method takes an scneario configuration instance to set up scenarios in Carla server. the scenario could be a single scenario, or a route scenario together with several scenarios during navigating the route. A scneario manager is used to manager and check the running status and tick scenarios. A local planner is set to trace the route to generate target waypoint and road options in each tick. It will set world, map, vehicles, pedestrians dut to provided args and default configs, and reset running status. If no collision happens when creating actors, the init will end and return. :Arguments: - config (Any): Scenario configuration instance, containing information about the scenarios. """ self._scenario_config = config self.clean_up() self._set_town(config.town) self._set_weather(self._weather) self._blueprints = self._world.get_blueprint_library() while True: self.clean_up() CarlaDataProvider.set_client(self._client) CarlaDataProvider.set_world(self._world) CarlaDataProvider.set_traffic_manager_port(self._tm.get_port()) if CarlaDataProvider.get_map().name != config.town and CarlaDataProvider.get_map().name != "OpenDriveMap": print("WARNING: The CARLA server uses the wrong map: {}".format(CarlaDataProvider.get_map().name)) print("WARNING: This scenario requires to use map: {}".format(config.town)) print("[SIMULATOR] Preparing scenario: " + config.name) config.n_vehicles = self._n_vehicles config.disable_two_wheels = self._disable_two_wheels if "RouteScenario" in config.name: self._scenario = RouteScenario( world=self._world, config=config, debug_mode=self._debug, resolution=self._resolution ) self._hero_actor = self._scenario.ego_vehicles[0] self._prepare_observations() self._manager.load_scenario(self._scenario) self._planner.set_route(CarlaDataProvider.get_hero_vehicle_route(), clean=True) self._total_distance = self._planner.distance_to_goal self._end_timeout = self._scenario.route_timeout else: # select scenario if config.type in SCENARIO_CLASS_DICT: scenario_class = SCENARIO_CLASS_DICT[config.type] ego_vehicles = [] for vehicle in config.ego_vehicles: ego_vehicles.append( CarlaDataProvider.request_new_actor( vehicle.model, vehicle.transform, vehicle.rolename, True, color=vehicle.color, actor_category=vehicle.category ) ) self._scenario = scenario_class( world=self._world, ego_vehicles=ego_vehicles, config=config, debug_mode=self._debug ) else: raise RuntimeError("Scenario '{}' not support!".format(config.type)) self._hero_actor = self._scenario.ego_vehicles[0] self._prepare_observations() self._manager.load_scenario(self._scenario) self._planner.set_destination(config.route.data[0], config.route.data[1], clean=True) self._total_distance = self._planner.distance_to_goal self._spawn_pedestrians() if self._ready(): if self._debug: self._count_actors() break def run_step(self) -> None: """ Run one step simulation. This will tick Carla world and scenarios, update informations for all sensors and measurement. """ if not self._start_scenario: self._manager.start_scenario() self._start_scenario = True self._tick += 1 world_snapshot = self._world.get_snapshot() timestamp = world_snapshot.timestamp self._timestamp = timestamp.elapsed_seconds self._manager.tick_scenario(timestamp) if self._planner is not None: self._planner.run_step() self._collided = self._collision_sensor.collided self._traffic_light_helper.tick() if self._bev_wrapper is not None: if CarlaDataProvider._hero_vehicle_route is not None: self._bev_wrapper.tick() def get_criteria(self) -> List: """ Get criteria status list of scenario in current frame. Criteria related with hero actor is encounted. :Returns: List: Criteria list of scenario. """ criterion_list = self._manager.analyze_tick() for name, actor_id, result, actual_value, expected_value in criterion_list: if actor_id == self._hero_actor.id: self._criteria_status.update({name: [result, actual_value, expected_value]}) return self._criteria_status def end_scenario(self) -> None: """ End current scenario. Must be called before ending an episode. """ if self._start_scenario: self._manager.end_scenario() self._start_scenario = False def clean_up(self) -> None: """ Destroy all actors and sensors in current world. Clear all messages saved in simulator and data provider, and clean up running scenarios. This will NOT destroy theCarla client, so simulator can use same carla client to start next episode. """ if self._manager is not None: self._manager.clean_up() self._criteria_status.clear() super().clean_up() @property def scenario_manager(self) -> Any: return self._manager
41.502222
119
0.624331
8,988
0.962519
0
0
77
0.008246
0
0
3,463
0.37085
22acbc10643824eb1f53a753c9581e0e1f9b708d
86
py
Python
bin/run.py
Conengmo/python-empty-project
18d275422116577d48ae4fdbe1c93501a5e6ef78
[ "MIT" ]
null
null
null
bin/run.py
Conengmo/python-empty-project
18d275422116577d48ae4fdbe1c93501a5e6ef78
[ "MIT" ]
null
null
null
bin/run.py
Conengmo/python-empty-project
18d275422116577d48ae4fdbe1c93501a5e6ef78
[ "MIT" ]
null
null
null
import myproject myproject.logs(show_level='debug') myproject.mymod.do_something()
12.285714
34
0.802326
0
0
0
0
0
0
0
0
7
0.081395
22ad01968a4a3e4e8168ccbc68b9c73d312ea977
709
py
Python
development/simple_email.py
gerold-penz/python-simplemail
9cfae298743af2b771d6d779717b602de559689b
[ "MIT" ]
16
2015-04-21T19:12:26.000Z
2021-06-04T04:38:12.000Z
development/simple_email.py
gerold-penz/python-simplemail
9cfae298743af2b771d6d779717b602de559689b
[ "MIT" ]
3
2015-04-21T22:09:55.000Z
2021-04-27T07:04:05.000Z
development/simple_email.py
gerold-penz/python-simplemail
9cfae298743af2b771d6d779717b602de559689b
[ "MIT" ]
4
2015-07-22T11:33:28.000Z
2019-08-06T07:27:20.000Z
#!/usr/bin/env python # coding: utf-8 # BEGIN --- required only for testing, remove in real world code --- BEGIN import os import sys THISDIR = os.path.dirname(os.path.abspath(__file__)) APPDIR = os.path.abspath(os.path.join(THISDIR, os.path.pardir, os.path.pardir)) sys.path.insert(0, APPDIR) # END --- required only for testing, remove in real world code --- END import simplemail simplemail.Email( smtp_server = "smtp.a1.net:25", smtp_user = "xxx", smtp_password = "xxx", use_tls = False, from_address = "xxx", to_address = "xxx", subject = u"Really simple test with umlauts (öäüß)", message = u"This is the message with umlauts (öäüß)", ).send() print "Sent" print
22.870968
79
0.679831
0
0
0
0
0
0
0
0
313
0.436541
22ad0b38c724e88cb9ecf306aa56fd0fb313ec45
3,325
py
Python
features/hdf_features.py
DerekYJC/bmi_python
7b9cf3f294a33688db24b0863c1035e9cc6999ea
[ "Apache-2.0" ]
null
null
null
features/hdf_features.py
DerekYJC/bmi_python
7b9cf3f294a33688db24b0863c1035e9cc6999ea
[ "Apache-2.0" ]
null
null
null
features/hdf_features.py
DerekYJC/bmi_python
7b9cf3f294a33688db24b0863c1035e9cc6999ea
[ "Apache-2.0" ]
null
null
null
''' HDF-saving features ''' import time import tempfile import random import traceback import numpy as np import fnmatch import os, sys import subprocess from riglib import calibrations, bmi from riglib.bmi import extractor from riglib.experiment import traits import hdfwriter class SaveHDF(object): ''' Saves data from registered sources into tables in an HDF file ''' def init(self): ''' Secondary init function. See riglib.experiment.Experiment.init() Prior to starting the task, this 'init' starts an HDFWriter sink. ''' from riglib import sink self.sinks = sink.sinks self.h5file = tempfile.NamedTemporaryFile(suffix=".h5", delete=False) self.h5file.flush() self.h5file.close() self.hdf = sink.sinks.start(self.sink_class, filename=self.h5file.name) super(SaveHDF, self).init() @property def sink_class(self): ''' Specify the sink class as a function in case future descendant classes want to use a different type of sink ''' return hdfwriter.HDFWriter def run(self): ''' Code to execute immediately prior to the beginning of the task FSM executing, or after the FSM has finished running. See riglib.experiment.Experiment.run(). This 'run' method stops the HDF sink after the FSM has finished running ''' try: super(SaveHDF, self).run() finally: self.hdf.stop() def join(self): ''' Re-join any spawned process for cleanup ''' self.hdf.join() super(SaveHDF, self).join() def set_state(self, condition, **kwargs): ''' Save task state transitions to HDF Parameters ---------- condition: string Name of new state to transition into. The state name must be a key in the 'status' dictionary attribute of the task Returns ------- None ''' self.hdf.sendMsg(condition) super(SaveHDF, self).set_state(condition, **kwargs) def record_annotation(self, msg): """ Record a user-input annotation """ self.hdf.sendMsg("annotation: " + msg) super(SaveHDF, self).record_annotation(msg) print("Saved annotation to HDF: " + msg) def get_h5_filename(self): return self.h5file.name def cleanup(self, database, saveid, **kwargs): ''' See LogExperiment.cleanup for documentation ''' super(SaveHDF, self).cleanup(database, saveid, **kwargs) print("Beginning HDF file cleanup") print("\tHDF data currently saved to temp file: %s" % self.h5file.name) try: print("\tRunning self.cleanup_hdf()") self.cleanup_hdf() except: print("\n\n\n\n\nError cleaning up HDF file!") import traceback traceback.print_exc() # this 'if' is needed because the remote procedure call to save_data doesn't like kwargs dbname = kwargs['dbname'] if 'dbname' in kwargs else 'default' if dbname == 'default': database.save_data(self.h5file.name, "hdf", saveid) else: database.save_data(self.h5file.name, "hdf", saveid, dbname=dbname)
31.367925
127
0.61203
3,044
0.915489
0
0
210
0.063158
0
0
1,451
0.436391
22ad976fe4002a0a8ca1f3ab36292229eb143691
2,040
py
Python
common/irma/common/exceptions.py
vaginessa/irma
02285080b67b25ef983a99a765044683bd43296c
[ "Apache-2.0" ]
null
null
null
common/irma/common/exceptions.py
vaginessa/irma
02285080b67b25ef983a99a765044683bd43296c
[ "Apache-2.0" ]
null
null
null
common/irma/common/exceptions.py
vaginessa/irma
02285080b67b25ef983a99a765044683bd43296c
[ "Apache-2.0" ]
null
null
null
# # Copyright (c) 2013-2018 Quarkslab. # This file is part of IRMA project. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License in the top-level directory # of this distribution and at: # # http://www.apache.org/licenses/LICENSE-2.0 # # No part of the project, including this file, may be copied, # modified, propagated, or distributed except according to the # terms contained in the LICENSE file. class IrmaDependencyError(Exception): """Error caused by a missing dependency.""" pass class IrmaMachineManagerError(Exception): """Error on a machine manager.""" pass class IrmaMachineError(Exception): """Error on a machine.""" pass class IrmaAdminError(Exception): """Error in admin part.""" pass class IrmaDatabaseError(Exception): """Error on a database manager.""" pass class IrmaCoreError(Exception): """Error in core parts (Db, Ftp, Celery..)""" pass class IrmaDatabaseResultNotFound(IrmaDatabaseError): """A database result was required but none was found.""" pass class IrmaFileSystemError(IrmaDatabaseError): """Nothing corresponding to the request has been found in the database.""" pass class IrmaConfigurationError(IrmaCoreError): """Error wrong configuration.""" pass class IrmaFtpError(IrmaCoreError): """Error on ftp manager.""" pass class IrmaFTPSError(IrmaFtpError): """Error on ftp/tls manager.""" pass class IrmaSFTPError(IrmaFtpError): """Error on sftp manager.""" pass class IrmaTaskError(IrmaCoreError): """Error while processing celery tasks.""" pass class IrmaLockError(Exception): """Error for the locks on db content (already taken)""" pass class IrmaLockModeError(Exception): """Error for the mode of the locks (doesn't exist)""" pass class IrmaValueError(Exception): """Error for the parameters passed to the functions""" pass
21.473684
78
0.701471
1,468
0.719608
0
0
0
0
0
0
1,168
0.572549
22ad9d02328e75faf184ffbf1cc357191c9ff796
7,979
py
Python
tf_crnn/libs/infer.py
sunmengnan/city_brain
478f0b974f4491b4201956f37b83ce6860712bc8
[ "MIT" ]
null
null
null
tf_crnn/libs/infer.py
sunmengnan/city_brain
478f0b974f4491b4201956f37b83ce6860712bc8
[ "MIT" ]
null
null
null
tf_crnn/libs/infer.py
sunmengnan/city_brain
478f0b974f4491b4201956f37b83ce6860712bc8
[ "MIT" ]
null
null
null
import time import os import math import numpy as np from libs import utils from libs.img_dataset import ImgDataset from nets.crnn import CRNN from nets.cnn.paper_cnn import PaperCNN import shutil def calculate_accuracy(predicts, labels): """ :param predicts: encoded predict result :param labels: ground true label :return: accuracy """ assert len(predicts) == len(labels) correct_count = 0 for i, p_label in enumerate(predicts): if p_label == labels[i]: correct_count += 1 acc = correct_count / len(predicts) return acc, correct_count def calculate_edit_distance_mean(edit_distences): """ 排除了 edit_distance == 0 的值计算编辑距离的均值 :param edit_distences: :return: """ data = np.array(edit_distences) data = data[data != 0] if len(data) == 0: return 0 return np.mean(data) def validation(sess, feeds, fetches, dataset, converter, result_dir, name, step=None, print_batch_info=False, copy_failed=False): """ Save file name: {acc}_{step}.txt :param sess: tensorflow session :param model: crnn network :param result_dir: :param name: val, test, infer. used to create sub dir in result_dir :return: """ sess.run(dataset.init_op) img_paths = [] predicts = [] trimed_predicts = [] labels = [] trimed_labels = [] edit_distances = [] total_batch_time = 0 for batch in range(dataset.num_batches): img_batch, widths, label_batch, batch_labels, batch_img_paths = dataset.get_next_batch(sess) if len(batch_labels) == 0: continue batch_start_time = time.time() feed = {feeds['inputs']: img_batch, feeds['labels']: label_batch, feeds['sequence_length']: PaperCNN.get_sequence_lengths(widths), feeds['is_training']: False} try: batch_predicts, edit_distance, batch_edit_distances = sess.run(fetches, feed) except Exception: print(batch_labels) continue batch_predicts = [converter.decode(p, CRNN.CTC_INVALID_INDEX) for p in batch_predicts] trimed_batch_predicts = [utils.remove_all_symbols(txt) for txt in batch_predicts] trimed_batch_labels = [utils.remove_all_symbols(txt) for txt in batch_labels] img_paths.extend(batch_img_paths) predicts.extend(batch_predicts) labels.extend(batch_labels) trimed_predicts.extend(trimed_batch_predicts) trimed_labels.extend(trimed_batch_labels) edit_distances.extend(batch_edit_distances) acc, correct_count = calculate_accuracy(batch_predicts, batch_labels) trimed_acc, trimed_correct_count = calculate_accuracy(trimed_batch_predicts, trimed_batch_labels) batch_time = time.time() - batch_start_time total_batch_time += batch_time if print_batch_info: print("{:.03f}s [{}/{}] acc: {:.03f}({}/{}), edit_distance: {:.03f}, trim_acc {:.03f}({}/{})" .format(batch_time, batch, dataset.num_batches, acc, correct_count, dataset.batch_size, edit_distance, trimed_acc, trimed_correct_count, dataset.batch_size)) acc, correct_count = calculate_accuracy(predicts, labels) trimed_acc, trimed_correct_count = calculate_accuracy(trimed_predicts, trimed_labels) edit_distance_mean = calculate_edit_distance_mean(edit_distances) total_edit_distance = sum(edit_distances) acc_str = "Accuracy: {:.03f} ({}/{}), Trimed Accuracy: {:.03f} ({}/{})" \ "Total edit distance: {:.03f}, " \ "Average edit distance: {:.03f}, Average batch time: {:.03f}" \ .format(acc, correct_count, dataset.size, trimed_acc, trimed_correct_count, dataset.size, total_edit_distance, edit_distance_mean, total_batch_time / dataset.num_batches) print(acc_str) save_dir = os.path.join(result_dir, name) utils.check_dir_exist(save_dir) result_file_path = save_txt_result(save_dir, acc, step, labels, predicts, 'acc', edit_distances, acc_str) save_txt_result(save_dir, acc, step, labels, predicts, 'acc', edit_distances, acc_str, only_failed=True) save_txt_result(save_dir, trimed_acc, step, trimed_labels, trimed_predicts, 'tacc', edit_distances) save_txt_result(save_dir, trimed_acc, step, trimed_labels, trimed_predicts, 'tacc', edit_distances, only_failed=True) save_txt_4_analyze(save_dir, labels, predicts, 'acc', step) save_txt_4_analyze(save_dir, trimed_labels, trimed_predicts, 'tacc', step) # Copy image not all match to a dir # TODO: we will only save failed imgs for acc if copy_failed: failed_infer_img_dir = result_file_path[:-4] + "_failed" if os.path.exists(failed_infer_img_dir) and os.path.isdir(failed_infer_img_dir): shutil.rmtree(failed_infer_img_dir) utils.check_dir_exist(failed_infer_img_dir) failed_image_indices = [] for i, val in enumerate(edit_distances): if val != 0: failed_image_indices.append(i) for i in failed_image_indices: img_path = img_paths[i] img_name = img_path.split("/")[-1] dst_path = os.path.join(failed_infer_img_dir, img_name) shutil.copyfile(img_path, dst_path) failed_infer_result_file_path = os.path.join(failed_infer_img_dir, "result.txt") with open(failed_infer_result_file_path, 'w', encoding='utf-8') as f: for i in failed_image_indices: p_label = predicts[i] t_label = labels[i] f.write("{}\n".format(img_paths[i])) f.write("input: {:17s} length: {}\n".format(t_label, len(t_label))) f.write("predict: {:17s} length: {}\n".format(p_label, len(p_label))) f.write("edit distance: {}\n".format(edit_distances[i])) f.write('-' * 30 + '\n') return acc, trimed_acc, edit_distance_mean, total_edit_distance, correct_count, trimed_correct_count def save_txt_4_analyze(save_dir, labels, predicts, acc_type, step): """ 把测试集的真值和预测结果放在保存在同一个 txt 文件中,方便统计 """ txt_path = os.path.join(save_dir, '%d_%s_gt_and_pred.txt' % (step, acc_type)) with open(txt_path, 'w', encoding='utf-8') as f: for i, p_label in enumerate(predicts): t_label = labels[i] f.write("{}__$__{}\n".format(t_label, p_label)) def save_txt_result(save_dir, acc, step, labels, predicts, acc_type, edit_distances=None, acc_str=None, only_failed=False): """ :param acc_type: 'acc' or 'tacc' :return: """ failed_suffix = '' if only_failed: failed_suffix = 'failed' if step is not None: txt_path = os.path.join(save_dir, '%d_%s_%.3f_%s.txt' % (step, acc_type, acc, failed_suffix)) else: txt_path = os.path.join(save_dir, '%s_%.3f_%s.txt' % (acc_type, acc, failed_suffix)) print("Write result to %s" % txt_path) with open(txt_path, 'w', encoding='utf-8') as f: for i, p_label in enumerate(predicts): t_label = labels[i] all_match = (t_label == p_label) if only_failed and all_match: continue # f.write("{}\n".format(img_paths[i])) f.write("input: {:17s} length: {}\n".format(t_label, len(t_label))) f.write("predict: {:17s} length: {}\n".format(p_label, len(p_label))) f.write("all match: {}\n".format(1 if all_match else 0)) if edit_distances: f.write("edit distance: {}\n".format(edit_distances[i])) f.write('-' * 30 + '\n') if acc_str: f.write(acc_str + "\n") return txt_path
36.104072
105
0.628525
0
0
0
0
0
0
0
0
1,421
0.176237
22ae53d11248d624a0ee5f564b8dd2e374ddaa54
606
py
Python
Day 2/Day_2_Python.py
giTan7/30-Days-Of-Code
f023a2bf1b5e58e1eb5180162443b9cd4b6b2ff8
[ "MIT" ]
1
2020-10-15T14:44:08.000Z
2020-10-15T14:44:08.000Z
Day 2/Day_2_Python.py
giTan7/30-Days-Of-Code
f023a2bf1b5e58e1eb5180162443b9cd4b6b2ff8
[ "MIT" ]
null
null
null
Day 2/Day_2_Python.py
giTan7/30-Days-Of-Code
f023a2bf1b5e58e1eb5180162443b9cd4b6b2ff8
[ "MIT" ]
null
null
null
#!/bin/python3 import math import os import random import re import sys # Complete the solve function below. def solve(meal_cost, tip_percent, tax_percent): tip = (meal_cost * tip_percent)/100 tax = (meal_cost * tax_percent)/100 print(int(meal_cost + tip + tax + 0.5)) # We add 0.5 because the float should be rounded to the nearest integer if __name__ == '__main__': meal_cost = float(input()) tip_percent = int(input()) tax_percent = int(input()) solve(meal_cost, tip_percent, tax_percent) # Time complexity: O(1) # Space complexity: O(1)
22.444444
76
0.663366
0
0
0
0
0
0
0
0
183
0.30198
22ae7c79d1d1030557cb109b5f2d23a5d5fb88a4
5,706
py
Python
modules/templates/RLPPTM/tools/mis.py
nursix/rlpptm
e7b50b2fdf6277aed5f198ca10ad773c5ca0b947
[ "MIT" ]
1
2022-03-21T21:58:30.000Z
2022-03-21T21:58:30.000Z
modules/templates/RLPPTM/tools/mis.py
nursix/rlpptm
e7b50b2fdf6277aed5f198ca10ad773c5ca0b947
[ "MIT" ]
null
null
null
modules/templates/RLPPTM/tools/mis.py
nursix/rlpptm
e7b50b2fdf6277aed5f198ca10ad773c5ca0b947
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- # # Helper Script for Mass-Invitation of Participant Organisations # # RLPPTM Template Version 1.0 # # Execute in web2py folder after code upgrade like: # python web2py.py -S eden -M -R applications/eden/modules/templates/RLPPTM/tools/mis.py # import os import sys from core import s3_format_datetime from templates.RLPPTM.config import SCHOOLS from templates.RLPPTM.helpers import InviteUserOrg # Batch limit (set to False to disable) BATCH_LIMIT = 250 # Override auth (disables all permission checks) auth.override = True # Failed-flag failed = False # Info log = None def info(msg): sys.stderr.write("%s" % msg) if log: log.write("%s" % msg) def infoln(msg): sys.stderr.write("%s\n" % msg) if log: log.write("%s\n" % msg) # Load models for tables otable = s3db.org_organisation gtable = s3db.org_group mtable = s3db.org_group_membership utable = s3db.auth_user oltable = s3db.org_organisation_user pltable = s3db.pr_person_user ctable = s3db.pr_contact timestmp = s3_format_datetime(dtfmt="%Y%m%d%H%M%S") LOGFILE = os.path.join(request.folder, "private", "mis_%s.log" % timestmp) # ----------------------------------------------------------------------------- # Invite organisations # if not failed: try: with open(LOGFILE, "w", encoding="utf-8") as logfile: log = logfile join = [mtable.on((mtable.organisation_id == otable.id) & \ (mtable.deleted == False)), gtable.on((gtable.id == mtable.group_id) & \ (gtable.name == SCHOOLS) & \ (gtable.deleted == False)), ] query = (otable.deleted == False) organisations = db(query).select(otable.id, otable.pe_id, otable.name, join = join, orderby = otable.id, ) total = len(organisations) infoln("Total: %s Organisations" % total) infoln("") skipped = sent = failures = 0 invite_org = InviteUserOrg.invite_account for organisation in organisations: info("%s..." % organisation.name) # Get all accounts that are linked to this org organisation_id = organisation.id join = oltable.on((oltable.user_id == utable.id) & \ (oltable.deleted == False)) left = pltable.on((pltable.user_id == utable.id) & \ (pltable.deleted == False)) query = (oltable.organisation_id == organisation_id) rows = db(query).select(utable.id, utable.email, utable.registration_key, pltable.pe_id, join = join, left = left, ) if rows: # There are already accounts linked to this organisation invited, registered = [], [] for row in rows: username = row.auth_user.email if row.pr_person_user.pe_id: registered.append(username) else: invited.append(username) if registered: infoln("already registered (%s)." % ", ".join(registered)) else: infoln("already invited (%s)." % ", ".join(invited)) skipped += 1 continue # Find email address query = (ctable.pe_id == organisation.pe_id) & \ (ctable.contact_method == "EMAIL") & \ (ctable.deleted == False) contact = db(query).select(ctable.value, orderby = ctable.priority, limitby = (0, 1), ).first() if contact: email = contact.value info("(%s)..." % email) else: infoln("no email address.") skipped += 1 continue error = invite_org(organisation, email, account=None) if not error: sent += 1 infoln("invited.") db.commit() else: failures += 1 infoln("invitation failed (%s)." % error) if BATCH_LIMIT and sent >= BATCH_LIMIT: infoln("Batch limit (%s) reached" % BATCH_LIMIT) skipped = total - (sent + failures) break infoln("") infoln("%s invitations sent" % sent) infoln("%s invitations failed" % failures) infoln("%s organisations skipped" % skipped) log = None except IOError: infoln("...failed (could not create logfile)") failed = True # ----------------------------------------------------------------------------- # Finishing up # if failed: db.rollback() infoln("PROCESS FAILED - Action rolled back.") else: db.commit() infoln("PROCESS SUCCESSFUL.")
35.222222
88
0.45496
0
0
0
0
0
0
0
0
1,128
0.197687
22aec8fbe47f7975a1e7f4a0caa5c88c56e4a03e
1,133
py
Python
data/train/python/22aec8fbe47f7975a1e7f4a0caa5c88c56e4a03e__init__.py
harshp8l/deep-learning-lang-detection
2a54293181c1c2b1a2b840ddee4d4d80177efb33
[ "MIT" ]
84
2017-10-25T15:49:21.000Z
2021-11-28T21:25:54.000Z
data/train/python/22aec8fbe47f7975a1e7f4a0caa5c88c56e4a03e__init__.py
vassalos/deep-learning-lang-detection
cbb00b3e81bed3a64553f9c6aa6138b2511e544e
[ "MIT" ]
5
2018-03-29T11:50:46.000Z
2021-04-26T13:33:18.000Z
data/train/python/22aec8fbe47f7975a1e7f4a0caa5c88c56e4a03e__init__.py
vassalos/deep-learning-lang-detection
cbb00b3e81bed3a64553f9c6aa6138b2511e544e
[ "MIT" ]
24
2017-11-22T08:31:00.000Z
2022-03-27T01:22:31.000Z
def save_form(form, actor=None): """Allows storing a form with a passed actor. Normally, Form.save() does not accept an actor, but if you require this to be passed (is not handled by middleware), you can use this to replace form.save(). Requires you to use the audit.Model model as the actor is passed to the object's save method. """ obj = form.save(commit=False) obj.save(actor=actor) form.save_m2m() return obj #def intermediate_save(instance, actor=None): # """Allows saving of an instance, without storing the changes, but keeping the history. This allows you to perform # intermediate saves: # # obj.value1 = 1 # intermediate_save(obj) # obj.value2 = 2 # obj.save() # <value 1 and value 2 are both stored in the database> # """ # if hasattr(instance, '_audit_changes'): # tmp = instance._audit_changes # if actor: # instance.save(actor=actor) # else: # instance.save() # instance._audit_changes = tmp # else: # if actor: # instance.save(actor=actor) # else: # instance.save()
32.371429
118
0.634598
0
0
0
0
0
0
0
0
976
0.86143
22aeec83fb0e871521d1f1a2e9afa8b18858d4b4
728
py
Python
engine/test_sysctl.py
kingsd041/os-tests
2ea57cb6f1da534633a4670ccb83d40300989886
[ "Apache-2.0" ]
null
null
null
engine/test_sysctl.py
kingsd041/os-tests
2ea57cb6f1da534633a4670ccb83d40300989886
[ "Apache-2.0" ]
null
null
null
engine/test_sysctl.py
kingsd041/os-tests
2ea57cb6f1da534633a4670ccb83d40300989886
[ "Apache-2.0" ]
null
null
null
# coding = utf-8 # Create date: 2018-11-05 # Author :Hailong def test_sysctl(ros_kvm_with_paramiko, cloud_config_url): command = 'sudo cat /proc/sys/kernel/domainname' feed_back = 'test' client = ros_kvm_with_paramiko(cloud_config='{url}/test_sysctl.yml'.format(url=cloud_config_url)) stdin, stdout, stderr = client.exec_command(command, timeout=10) output = stdout.read().decode('utf-8').replace('\n', '') assert (feed_back == output) command_b = 'sudo cat /proc/sys/dev/cdrom/debug' feed_back_b = '1' stdin, stdout, stderr = client.exec_command(command_b, timeout=10) output_b = stdout.read().decode('utf-8').replace('\n', '') client.close() assert (feed_back_b == output_b)
36.4
101
0.68956
0
0
0
0
0
0
0
0
190
0.260989
22aeecf51ba4f5585bf276df470496e100ee4eac
3,310
py
Python
paprika_sync/core/management/commands/import_recipes_from_file.py
grschafer/paprika-sync
8b6fcd6246557bb79009fa9355fd4d588fb8ed90
[ "MIT" ]
null
null
null
paprika_sync/core/management/commands/import_recipes_from_file.py
grschafer/paprika-sync
8b6fcd6246557bb79009fa9355fd4d588fb8ed90
[ "MIT" ]
null
null
null
paprika_sync/core/management/commands/import_recipes_from_file.py
grschafer/paprika-sync
8b6fcd6246557bb79009fa9355fd4d588fb8ed90
[ "MIT" ]
null
null
null
import json import logging from django.core.management.base import BaseCommand from django.db import transaction from paprika_sync.core.models import PaprikaAccount from paprika_sync.core.serializers import RecipeSerializer, CategorySerializer from paprika_sync.core.utils import log_start_end logger = logging.getLogger(__name__) class Command(BaseCommand): help = 'Import all recipes from file to specified PaprikaAccount' def add_arguments(self, parser): parser.add_argument( 'file', help='Path to json file containing list of all recipes', ) parser.add_argument( '--categories-file', help='Path to json file containing list of all categories', ) parser.add_argument( 'paprika_account_id', type=int, help='ID of PaprikaAccount to import recipes to', ) parser.add_argument( '-r', '--remove', action='store_true', help="Removes all of account's existing recipes before importing", ) @log_start_end(logger) def handle(self, *args, **options): recipes_file = options['file'] categories_file = options['categories_file'] pa_id = options['paprika_account_id'] wipe_account = options['remove'] logger.info('Starting import for PaprikaAccount id %s from %s, wipe_account=%s', pa_id, recipes_file, wipe_account) pa = PaprikaAccount.objects.get(id=pa_id) with open(recipes_file, 'rt') as fin: recipes = json.load(fin) logger.info('Found %s recipes to import to %s', len(recipes), pa) categories = [] if categories_file: with open(categories_file, 'rt') as fin: categories = json.load(fin) logger.info('Found %s categories to import to %s', len(categories), pa) with transaction.atomic(): if wipe_account: pa.recipes.all().delete() pa.categories.all().delete() for category in categories: category['paprika_account'] = pa.id cs = CategorySerializer(data=category) if cs.is_valid(): cs.save() else: logger.warning('Failed to import category %s (%s) due to errors: %s', category['uid'], category['name'], cs.errors) for recipe in recipes: # Remove categories if we're not bothering to import them if not categories: recipe['categories'] = [] recipe['paprika_account'] = pa.id rs = RecipeSerializer(data=recipe) if rs.is_valid(): rs.save() else: logger.warning('Failed to import recipe %s (%s) due to errors: %s', recipe['uid'], recipe['name'], rs.errors) # recipe_field_names = set([f.name for f in Recipe._meta.fields]) # Recipe.objects.create( # paprika_account=pa, # **{k: v for k, v in recipe.items() if k in recipe_field_names}, # ) logger.info('Finished recipe import successfully') # transaction.set_rollback(True)
35.978261
135
0.578248
2,972
0.897885
0
0
2,221
0.670997
0
0
1,016
0.306949
22b050a05912835a15d1f775a59389484ca92826
142
py
Python
scripts/update_asp_l1.py
sot/mica
136a9b0d9521efda5208067b51cf0c8700b4def3
[ "BSD-3-Clause" ]
null
null
null
scripts/update_asp_l1.py
sot/mica
136a9b0d9521efda5208067b51cf0c8700b4def3
[ "BSD-3-Clause" ]
150
2015-01-23T17:09:53.000Z
2022-01-10T00:50:54.000Z
scripts/update_asp_l1.py
sot/mica
136a9b0d9521efda5208067b51cf0c8700b4def3
[ "BSD-3-Clause" ]
null
null
null
#!/usr/bin/env python # Licensed under a 3-clause BSD style license - see LICENSE.rst import mica.archive.asp_l1 mica.archive.asp_l1.main()
20.285714
63
0.760563
0
0
0
0
0
0
0
0
84
0.591549
22b2735d6e9bb2b53a0a0541af9ec0a4bc2db7e4
738
py
Python
pair.py
hhgarnes/python-validity
82b42e4fd152f10f75584de56502fd9ada299bb5
[ "MIT" ]
null
null
null
pair.py
hhgarnes/python-validity
82b42e4fd152f10f75584de56502fd9ada299bb5
[ "MIT" ]
null
null
null
pair.py
hhgarnes/python-validity
82b42e4fd152f10f75584de56502fd9ada299bb5
[ "MIT" ]
null
null
null
from time import sleep from proto9x.usb import usb from proto9x.tls import tls from proto9x.flash import read_flash from proto9x.init_flash import init_flash from proto9x.upload_fwext import upload_fwext from proto9x.calibrate import calibrate from proto9x.init_db import init_db #usb.trace_enabled=True #tls.trace_enabled=True def restart(): print('Sleeping...') sleep(3) tls.reset() usb.open() usb.send_init() tls.parseTlsFlash(read_flash(1, 0, 0x1000)) tls.open() usb.open() print('Initializing flash...') init_flash() restart() print('Uploading firmware...') upload_fwext() restart() print('Calibrating...') calibrate() print('Init database...') init_db() print('That\'s it, pairing\'s finished')
18.45
47
0.734417
0
0
0
0
0
0
0
0
172
0.233062
22b29bb3979813975d0a62cdf7e26438790eeb19
448
py
Python
output/models/ms_data/element/elem_q017_xsd/elem_q017.py
tefra/xsdata-w3c-tests
b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f
[ "MIT" ]
1
2021-08-14T17:59:21.000Z
2021-08-14T17:59:21.000Z
output/models/ms_data/element/elem_q017_xsd/elem_q017.py
tefra/xsdata-w3c-tests
b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f
[ "MIT" ]
4
2020-02-12T21:30:44.000Z
2020-04-15T20:06:46.000Z
output/models/ms_data/element/elem_q017_xsd/elem_q017.py
tefra/xsdata-w3c-tests
b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f
[ "MIT" ]
null
null
null
from dataclasses import dataclass, field @dataclass class FooTest: class Meta: name = "fooTest" value: str = field( init=False, default="Hello" ) @dataclass class Root: class Meta: name = "root" foo_test: str = field( init=False, default="Hello", metadata={ "name": "fooTest", "type": "Element", "required": True, } )
15.448276
40
0.5
379
0.845982
0
0
401
0.895089
0
0
69
0.154018
22b2c7ab0a465a4d5e5a4f3cd082436d406520c8
43,545
py
Python
contrib_src/predict.py
modelhub-ai/mic-dkfz-brats
4522a26442f1e323f97aa45fbd5047bfe9029b2b
[ "MIT" ]
1
2020-01-09T11:45:26.000Z
2020-01-09T11:45:26.000Z
contrib_src/predict.py
modelhub-ai/mic-dkfz-brats
4522a26442f1e323f97aa45fbd5047bfe9029b2b
[ "MIT" ]
null
null
null
contrib_src/predict.py
modelhub-ai/mic-dkfz-brats
4522a26442f1e323f97aa45fbd5047bfe9029b2b
[ "MIT" ]
null
null
null
import json import os from collections import OrderedDict from copy import deepcopy import SimpleITK as sitk from batchgenerators.augmentations.utils import resize_segmentation # resize_softmax_output from skimage.transform import resize from torch.optim import lr_scheduler from torch import nn import numpy as np import torch from scipy.ndimage import binary_fill_holes ''' This code is not intended to be looked at by anyone. It is messy. It is undocumented. And the entire training pipeline is missing. ''' max_num_filters_3d = 320 max_num_filters_2d = 480 join = os.path.join def load_json(file): with open(file, 'r') as f: a = json.load(f) return a def resize_image(image, old_spacing, new_spacing, order=3, cval=0): new_shape = (int(np.round(old_spacing[0]/new_spacing[0]*float(image.shape[0]))), int(np.round(old_spacing[1]/new_spacing[1]*float(image.shape[1]))), int(np.round(old_spacing[2]/new_spacing[2]*float(image.shape[2])))) if any([i != j for i, j in zip(image.shape, new_shape)]): res = resize(image, new_shape, order=order, mode='edge', cval=cval) else: res = image return res class ConvDropoutNormNonlin(nn.Module): def __init__(self, input_channels, output_channels, conv_op=nn.Conv2d, conv_kwargs=None, norm_op=nn.BatchNorm2d, norm_op_kwargs=None, dropout_op=nn.Dropout2d, dropout_op_kwargs=None, nonlin=nn.LeakyReLU, nonlin_kwargs=None): super(ConvDropoutNormNonlin, self).__init__() if nonlin_kwargs is None: nonlin_kwargs = {'negative_slope': 1e-2, 'inplace': True} if dropout_op_kwargs is None: dropout_op_kwargs = {'p': 0.5, 'inplace': True} if norm_op_kwargs is None: norm_op_kwargs = {'eps': 1e-5, 'affine': True, 'momentum': 0.1} if conv_kwargs is None: conv_kwargs = {'kernel_size': 3, 'stride': 1, 'padding': 1, 'dilation': 1, 'bias': True} self.nonlin_kwargs = nonlin_kwargs self.nonlin = nonlin self.dropout_op = dropout_op self.dropout_op_kwargs = dropout_op_kwargs self.norm_op_kwargs = norm_op_kwargs self.conv_kwargs = conv_kwargs self.conv_op = conv_op self.norm_op = norm_op self.conv = self.conv_op(input_channels, output_channels, **self.conv_kwargs) if self.dropout_op is not None and self.dropout_op_kwargs['p'] is not None and self.dropout_op_kwargs[ 'p'] > 0: self.dropout = self.dropout_op(**self.dropout_op_kwargs) else: self.dropout = None self.instnorm = self.norm_op(output_channels, **self.norm_op_kwargs) self.lrelu = nn.LeakyReLU(**self.nonlin_kwargs) def forward(self, x): x = self.conv(x) if self.dropout is not None: x = self.dropout(x) return self.lrelu(self.instnorm(x)) def pad_nd_image(image, new_shape=None, mode="edge", kwargs=None, return_slicer=False, shape_must_be_divisible_by=None): if kwargs is None: kwargs = {} if new_shape is not None: old_shape = np.array(image.shape[-len(new_shape):]) else: assert shape_must_be_divisible_by is not None assert isinstance(shape_must_be_divisible_by, (list, tuple, np.ndarray)) new_shape = image.shape[-len(shape_must_be_divisible_by):] old_shape = new_shape num_axes_nopad = len(image.shape) - len(new_shape) new_shape = [max(new_shape[i], old_shape[i]) for i in range(len(new_shape))] if not isinstance(new_shape, np.ndarray): new_shape = np.array(new_shape) if shape_must_be_divisible_by is not None: if not isinstance(shape_must_be_divisible_by, (list, tuple, np.ndarray)): shape_must_be_divisible_by = [shape_must_be_divisible_by] * len(new_shape) else: assert len(shape_must_be_divisible_by) == len(new_shape) for i in range(len(new_shape)): if new_shape[i] % shape_must_be_divisible_by[i] == 0: new_shape[i] -= shape_must_be_divisible_by[i] new_shape = np.array([new_shape[i] + shape_must_be_divisible_by[i] - new_shape[i] % shape_must_be_divisible_by[i] for i in range(len(new_shape))]) difference = new_shape - old_shape pad_below = difference // 2 pad_above = difference // 2 + difference % 2 pad_list = [[0, 0]]*num_axes_nopad + list([list(i) for i in zip(pad_below, pad_above)]) res = np.pad(image, pad_list, mode, **kwargs) if not return_slicer: return res else: pad_list = np.array(pad_list) pad_list[:, 1] = np.array(res.shape) - pad_list[:, 1] slicer = list(slice(*i) for i in pad_list) return res, slicer class NeuralNetwork(nn.Module): def __init__(self): super(NeuralNetwork, self).__init__() def get_device(self): if next(self.parameters()).device == "cpu": return "cpu" else: return next(self.parameters()).device.index def set_device(self, device): if device == "cpu": self.cpu() else: self.cuda(device) def forward(self, x): raise NotImplementedError class SegmentationNetwork(NeuralNetwork): def __init__(self): self.input_shape_must_be_divisible_by = None self.conv_op = None super(NeuralNetwork, self).__init__() self.inference_apply_nonlin = lambda x:x def predict_3D(self, x, do_mirroring, num_repeats=1, use_train_mode=False, batch_size=1, mirror_axes=(2, 3, 4), tiled=False, tile_in_z=True, step=2, patch_size=None, regions_class_order=None, use_gaussian=False, pad_border_mode="edge", pad_kwargs=None): """ :param x: (c, x, y , z) :param do_mirroring: :param num_repeats: :param use_train_mode: :param batch_size: :param mirror_axes: :param tiled: :param tile_in_z: :param step: :param patch_size: :param regions_class_order: :param use_gaussian: :return: """ current_mode = self.training if use_train_mode is not None and use_train_mode: self.train() elif use_train_mode is not None and not use_train_mode: self.eval() else: pass assert len(x.shape) == 4, "data must have shape (c,x,y,z)" if self.conv_op == nn.Conv3d: if tiled: res = self._internal_predict_3D_3Dconv_tiled(x, num_repeats, batch_size, tile_in_z, step, do_mirroring, mirror_axes, patch_size, regions_class_order, use_gaussian, pad_border_mode, pad_kwargs=pad_kwargs) else: res = self._internal_predict_3D_3Dconv(x, do_mirroring, num_repeats, patch_size, batch_size, mirror_axes, regions_class_order, pad_border_mode, pad_kwargs=pad_kwargs) elif self.conv_op == nn.Conv2d: if tiled: res = self._internal_predict_3D_2Dconv_tiled(x, do_mirroring, num_repeats, batch_size, mirror_axes, step, patch_size, regions_class_order, use_gaussian, pad_border_mode, pad_kwargs=pad_kwargs) else: res = self._internal_predict_3D_2Dconv(x, do_mirroring, num_repeats, patch_size, batch_size, mirror_axes, regions_class_order, pad_border_mode, pad_kwargs=pad_kwargs) else: raise RuntimeError("Invalid conv op, cannot determine what dimensionality (2d/3d) the network is") if use_train_mode is not None: self.train(current_mode) return res def _internal_maybe_mirror_and_pred_3D(self, x, num_repeats, mirror_axes, do_mirroring=True): with torch.no_grad(): a = torch.zeros(x.shape).float() if self.get_device() == "cpu": a = a.cpu() else: a = a.cuda(self.get_device()) if do_mirroring: mirror_idx = 8 else: mirror_idx = 1 all_preds = [] for i in range(num_repeats): for m in range(mirror_idx): data_for_net = np.array(x) do_stuff = False if m == 0: do_stuff = True pass if m == 1 and (4 in mirror_axes): do_stuff = True data_for_net = data_for_net[:, :, :, :, ::-1] if m == 2 and (3 in mirror_axes): do_stuff = True data_for_net = data_for_net[:, :, :, ::-1, :] if m == 3 and (4 in mirror_axes) and (3 in mirror_axes): do_stuff = True data_for_net = data_for_net[:, :, :, ::-1, ::-1] if m == 4 and (2 in mirror_axes): do_stuff = True data_for_net = data_for_net[:, :, ::-1, :, :] if m == 5 and (2 in mirror_axes) and (4 in mirror_axes): do_stuff = True data_for_net = data_for_net[:, :, ::-1, :, ::-1] if m == 6 and (2 in mirror_axes) and (3 in mirror_axes): do_stuff = True data_for_net = data_for_net[:, :, ::-1, ::-1, :] if m == 7 and (2 in mirror_axes) and (3 in mirror_axes) and (4 in mirror_axes): do_stuff = True data_for_net = data_for_net[:, :, ::-1, ::-1, ::-1] if do_stuff: _ = a.data.copy_(torch.from_numpy(np.copy(data_for_net))) p = self.inference_apply_nonlin(self(a)) p = p.data.cpu().numpy() if m == 0: pass if m == 1 and (4 in mirror_axes): p = p[:, :, :, :, ::-1] if m == 2 and (3 in mirror_axes): p = p[:, :, :, ::-1, :] if m == 3 and (4 in mirror_axes) and (3 in mirror_axes): p = p[:, :, :, ::-1, ::-1] if m == 4 and (2 in mirror_axes): p = p[:, :, ::-1, :, :] if m == 5 and (2 in mirror_axes) and (4 in mirror_axes): p = p[:, :, ::-1, :, ::-1] if m == 6 and (2 in mirror_axes) and (3 in mirror_axes): p = p[:, :, ::-1, ::-1, :] if m == 7 and (2 in mirror_axes) and (3 in mirror_axes) and (4 in mirror_axes): p = p[:, :, ::-1, ::-1, ::-1] all_preds.append(p) return np.vstack(all_preds) def _internal_predict_3D_3Dconv(self, x, do_mirroring, num_repeats, min_size=None, BATCH_SIZE=None, mirror_axes=(2, 3, 4), regions_class_order=None, pad_border_mode="edge", pad_kwargs=None): with torch.no_grad(): x, slicer = pad_nd_image(x, min_size, pad_border_mode, pad_kwargs, True, self.input_shape_must_be_divisible_by) #x, old_shape = pad_patient_3D_incl_c(x, self.input_shape_must_be_divisible_by, min_size) new_shp = x.shape data = np.zeros(tuple([1] + list(new_shp)), dtype=np.float32) data[0] = x if BATCH_SIZE is not None: data = np.vstack([data] * BATCH_SIZE) stacked = self._internal_maybe_mirror_and_pred_3D(data, num_repeats, mirror_axes, do_mirroring) slicer = [slice(0, stacked.shape[i]) for i in range(len(stacked.shape) - (len(slicer) - 1))] + slicer[1:] stacked = stacked[slicer] uncertainty = stacked.var(0) bayesian_predictions = stacked softmax_pred = stacked.mean(0) if regions_class_order is None: predicted_segmentation = softmax_pred.argmax(0) else: predicted_segmentation_shp = softmax_pred[0].shape predicted_segmentation = np.zeros(predicted_segmentation_shp) for i, c in enumerate(regions_class_order): predicted_segmentation[softmax_pred[i] > 0.5] = c return predicted_segmentation, bayesian_predictions, softmax_pred, uncertainty def softmax_helper(x): rpt = [1 for _ in range(len(x.size()))] rpt[1] = x.size(1) x_max = x.max(1, keepdim=True)[0].repeat(*rpt) e_x = torch.exp(x - x_max) return e_x / e_x.sum(1, keepdim=True).repeat(*rpt) class InitWeights_He(object): def __init__(self, neg_slope=1e-2): self.neg_slope = neg_slope def __call__(self, module): if isinstance(module, nn.Conv3d) or isinstance(module, nn.Conv2d) or isinstance(module, nn.ConvTranspose2d) or isinstance(module, nn.ConvTranspose3d): module.weight = nn.init.kaiming_normal_(module.weight, a=1e-2) if module.bias is not None: module.bias = nn.init.constant_(module.bias, 0) class StackedConvLayers(nn.Module): def __init__(self, input_feature_channels, output_feature_channels, num_convs, conv_op=nn.Conv2d, conv_kwargs=None, norm_op=nn.BatchNorm2d, norm_op_kwargs=None, dropout_op=nn.Dropout2d, dropout_op_kwargs=None, nonlin=nn.LeakyReLU, nonlin_kwargs=None, first_stride=None): self.input_channels = input_feature_channels self.output_channels = output_feature_channels if nonlin_kwargs is None: nonlin_kwargs = {'negative_slope': 1e-2, 'inplace': True} if dropout_op_kwargs is None: dropout_op_kwargs = {'p': 0.5, 'inplace': True} if norm_op_kwargs is None: norm_op_kwargs = {'eps': 1e-5, 'affine': True, 'momentum': 0.1} if conv_kwargs is None: conv_kwargs = {'kernel_size': 3, 'stride': 1, 'padding': 1, 'dilation': 1, 'bias': True} self.nonlin_kwargs = nonlin_kwargs self.nonlin = nonlin self.dropout_op = dropout_op self.dropout_op_kwargs = dropout_op_kwargs self.norm_op_kwargs = norm_op_kwargs self.conv_kwargs = conv_kwargs self.conv_op = conv_op self.norm_op = norm_op if first_stride is not None: self.conv_kwargs_first_conv = deepcopy(conv_kwargs) self.conv_kwargs_first_conv['stride'] = first_stride else: self.conv_kwargs_first_conv = conv_kwargs super(StackedConvLayers, self).__init__() self.blocks = nn.Sequential( *([ConvDropoutNormNonlin(input_feature_channels, output_feature_channels, self.conv_op, self.conv_kwargs_first_conv, self.norm_op, self.norm_op_kwargs, self.dropout_op, self.dropout_op_kwargs, self.nonlin, self.nonlin_kwargs)] + [ConvDropoutNormNonlin(output_feature_channels, output_feature_channels, self.conv_op, self.conv_kwargs, self.norm_op, self.norm_op_kwargs, self.dropout_op, self.dropout_op_kwargs, self.nonlin, self.nonlin_kwargs) for _ in range(num_convs - 1)])) def forward(self, x): return self.blocks(x) def soft_dice(net_output, gt, smooth=1., smooth_in_nom=1.): axes = tuple(range(2, len(net_output.size()))) intersect = sum_tensor(net_output * gt, axes, keepdim=False) denom = sum_tensor(net_output + gt, axes, keepdim=False) result = (- ((2 * intersect + smooth_in_nom) / (denom + smooth))).mean() return result def sum_tensor(input, axes, keepdim=False): axes = np.unique(axes) if keepdim: for ax in axes: input = input.sum(ax, keepdim=True) else: for ax in sorted(axes, reverse=True): input = input.sum(ax) return input class Generic_UNet_Cotraining(SegmentationNetwork): def __init__(self, input_channels, base_num_features, num_classes, num_conv_per_stage=2, num_downscale=4, feat_map_mul_on_downscale=2, conv_op=nn.Conv2d, conv_kwargs=None, norm_op=nn.BatchNorm2d, norm_op_kwargs=None, dropout_op=nn.Dropout2d, dropout_op_kwargs=None, nonlin=nn.LeakyReLU, nonlin_kwargs=None, deep_supervision=True, dropout_in_localization=False, final_nonlin=softmax_helper, weightInitializer=InitWeights_He(1e-2), pool_op_kernel_sizes=None, upscale_logits=False, convolutional_pooling=False, convolutional_upsampling=False): """ Have fun lookint at that one. This is my go-to model. I crammed the cotraining code in there somehow, so yeah. What a mess. You know what's the best part? No documentation. What a great piece of code. :param input_channels: :param base_num_features: :param num_classes: :param num_conv_per_stage: :param num_downscale: :param feat_map_mul_on_downscale: :param conv_op: :param conv_kwargs: :param norm_op: :param norm_op_kwargs: :param dropout_op: :param dropout_op_kwargs: :param nonlin: :param nonlin_kwargs: :param deep_supervision: :param dropout_in_localization: :param final_nonlin: :param weightInitializer: :param pool_op_kernel_sizes: :param upscale_logits: :param convolutional_pooling: :param convolutional_upsampling: """ super(Generic_UNet_Cotraining, self).__init__() assert isinstance(num_classes, (list, tuple)), "for cotraining, num_classes must be list or tuple of int" self.num_classes = num_classes self.input_shape_must_be_divisible_by = np.prod(pool_op_kernel_sizes, 0) self.pool_op_kernel_sizes = pool_op_kernel_sizes self.convolutional_upsampling = convolutional_upsampling self.convolutional_pooling = convolutional_pooling self.upscale_logits = upscale_logits if nonlin_kwargs is None: nonlin_kwargs = {'negative_slope':1e-2, 'inplace':True} if dropout_op_kwargs is None: dropout_op_kwargs = {'p':0.5, 'inplace':True} if norm_op_kwargs is None: norm_op_kwargs = {'eps':1e-5, 'affine':True, 'momentum':0.1} if conv_kwargs is None: conv_kwargs = {'kernel_size':3, 'stride':1, 'padding':1, 'dilation':1, 'bias':True} self.nonlin = nonlin self.nonlin_kwargs = nonlin_kwargs self.dropout_op_kwargs = dropout_op_kwargs self.norm_op_kwargs = norm_op_kwargs self.conv_kwargs = conv_kwargs self.weightInitializer = weightInitializer self.conv_op = conv_op self.norm_op = norm_op self.dropout_op = dropout_op if pool_op_kernel_sizes is None: if conv_op == nn.Conv2d: pool_op_kernel_sizes = [(2, 2)] * num_downscale elif conv_op == nn.Conv3d: pool_op_kernel_sizes = [(2, 2, 2)] * num_downscale else: raise ValueError("unknown convolution dimensionality, conv op: %s" % str(conv_op)) self.pool_op_kernel_sizes = pool_op_kernel_sizes self.final_nonlin = final_nonlin assert num_conv_per_stage > 1, "this implementation does not support only one conv per stage" if conv_op == nn.Conv2d: upsample_mode = 'bilinear' pool_op = nn.MaxPool2d transpconv = nn.ConvTranspose2d elif conv_op == nn.Conv3d: upsample_mode = 'trilinear' pool_op = nn.MaxPool3d transpconv = nn.ConvTranspose3d else: raise ValueError("unknown convolution dimensionality, conv op: %s" % str(conv_op)) self.do_ds = deep_supervision self.conv_blocks_context = [] self.conv_blocks_localization = [] self.td = [] self.tu = [] self.seg_outputs = [] output_features = base_num_features input_features = input_channels for d in range(num_downscale): if d != 0 and self.convolutional_pooling: first_stride = pool_op_kernel_sizes[d-1] else: first_stride = None self.conv_blocks_context.append(StackedConvLayers(input_features, output_features, num_conv_per_stage, self.conv_op, self.conv_kwargs, self.norm_op, self.norm_op_kwargs, self.dropout_op, self.dropout_op_kwargs, self.nonlin, self.nonlin_kwargs, first_stride)) if not self.convolutional_pooling: self.td.append(pool_op(pool_op_kernel_sizes[d])) input_features = output_features output_features = int(np.round(output_features * feat_map_mul_on_downscale)) if self.conv_op == nn.Conv3d: output_features = min(output_features, max_num_filters_3d) else: output_features = min(output_features, max_num_filters_2d) if self.convolutional_pooling: first_stride = pool_op_kernel_sizes[-1] else: first_stride = None if self.convolutional_upsampling: final_num_features = output_features else: final_num_features = self.conv_blocks_context[-1].output_channels self.conv_blocks_context.append(nn.Sequential( StackedConvLayers(input_features, output_features, num_conv_per_stage - 1, self.conv_op, self.conv_kwargs, self.norm_op, self.norm_op_kwargs, self.dropout_op, self.dropout_op_kwargs, self.nonlin, self.nonlin_kwargs, first_stride), StackedConvLayers(output_features, final_num_features, 1, self.conv_op, self.conv_kwargs, self.norm_op, self.norm_op_kwargs, self.dropout_op, self.dropout_op_kwargs, self.nonlin, self.nonlin_kwargs))) if not dropout_in_localization: old_dropout_p = self.dropout_op_kwargs['p'] self.dropout_op_kwargs['p'] = 0.0 for u in range(num_downscale): nfeatures_from_down = final_num_features nfeatures_from_skip = self.conv_blocks_context[-(2 + u)].output_channels # self.conv_blocks_context[-1] is bottleneck, so start with -2 n_features_after_tu_and_concat = nfeatures_from_skip * 2 # the first conv reduces the number of features to match those of skip # the following convs work on that number of features # if not convolutional upsampling then the final conv reduces the num of features again if u != num_downscale-1 and not self.convolutional_upsampling: final_num_features = self.conv_blocks_context[-(3 + u)].output_channels else: final_num_features = nfeatures_from_skip if not self.convolutional_upsampling: self.tu.append(nn.Upsample(scale_factor=pool_op_kernel_sizes[-(u+1)], mode=upsample_mode)) else: self.tu.append(transpconv(nfeatures_from_down, nfeatures_from_skip, pool_op_kernel_sizes[-(u+1)], pool_op_kernel_sizes[-(u+1)], bias=False)) self.conv_blocks_localization.append(nn.Sequential( StackedConvLayers(n_features_after_tu_and_concat, nfeatures_from_skip, num_conv_per_stage - 1, self.conv_op, self.conv_kwargs, self.norm_op, self.norm_op_kwargs, self.dropout_op, self.dropout_op_kwargs, self.nonlin, self.nonlin_kwargs), StackedConvLayers(nfeatures_from_skip, final_num_features, 1, self.conv_op, self.conv_kwargs, self.norm_op, self.norm_op_kwargs, self.dropout_op, self.dropout_op_kwargs, self.nonlin, self.nonlin_kwargs) )) for ds in range(len(self.conv_blocks_localization)): self.seg_outputs.append(nn.ModuleList([conv_op(self.conv_blocks_localization[ds][-1].output_channels, i, 1, 1, 0, 1, 1, False) for i in num_classes])) self.upscale_logits_ops = [] cum_upsample = np.cumprod(np.vstack(pool_op_kernel_sizes), axis=0)[::-1] for usl in range(num_downscale - 1): if self.upscale_logits: self.upscale_logits_ops.append(nn.Upsample(scale_factor=tuple([int(i) for i in cum_upsample[usl+1]]), mode=upsample_mode)) else: self.upscale_logits_ops.append(lambda x: x) if not dropout_in_localization: self.dropout_op_kwargs['p'] = old_dropout_p # register all modules properly self.conv_blocks_localization = nn.ModuleList(self.conv_blocks_localization) self.conv_blocks_context = nn.ModuleList(self.conv_blocks_context) self.td = nn.ModuleList(self.td) self.tu = nn.ModuleList(self.tu) self.seg_outputs = nn.ModuleList(self.seg_outputs) if self.upscale_logits: self.upscale_logits_ops = nn.ModuleList(self.upscale_logits_ops) # lambda x:x is not a Module so we need to distinguish here self.apply(self.weightInitializer) self.test_return_output = 0 self.inference = False def train(self, mode=True): super(Generic_UNet_Cotraining, self).train(mode) def eval(self): super(Generic_UNet_Cotraining, self).eval() def infer(self, infer): self.train(False) self.inference = infer def forward(self, x): #input_var = x skips = [] seg_outputs = [] for d in range(len(self.conv_blocks_context) - 1): x = self.conv_blocks_context[d](x) skips.append(x) if not self.convolutional_pooling: x = self.td[d](x) x = self.conv_blocks_context[-1](x) for u in range(len(self.tu)): x = self.tu[u](x) x = torch.cat((x, skips[-(u + 1)]), dim=1) x = self.conv_blocks_localization[u](x) if not self.inference: seg_outputs.append([self.final_nonlin(self.seg_outputs[u][i](x[(x.shape[0]//len(self.num_classes) * i): (x.shape[0]//len(self.num_classes) * (i+1))])) for i in range(len(self.num_classes))]) else: seg_outputs.append(self.final_nonlin(self.seg_outputs[u][self.test_return_output](x))) if self.do_ds: return tuple([seg_outputs[-1]] + [i(j) for i, j in zip(list(self.upscale_logits_ops)[::-1], seg_outputs[:-1][::-1])]) else: return seg_outputs[-1] class NetworkTrainerBraTS2018Baseline2RegionsCotrainingBraTSDecSDCE(object): def __init__(self): self.preprocessed_data_directory = None # set through arguments from init self.experiment_name = "baseline_inspired_by_decathlon 2_regions_cotraining brats dec sd ce" self.experiment_description = "NetworkTrainerBraTS2018Baseline 2_regions_cotraining brats dec sd ce" self.output_folder = 'model/params' self.dataset_directory = None self.device = 0 self.fold = 0 self.preprocessed_data_directory = None self.gt_niftis_folder = None # set in self.initialize() self.network = None self.num_input_channels = self.num_classes = self.net_pool_per_axis = self.patch_size = self.batch_size = \ self.threeD = self.base_num_features = self.intensity_properties = self.normalization_schemes = None # loaded automatically from plans_file self.basic_generator_patch_size = self.data_aug_params = self.plans = None self.was_initialized = False self.also_val_in_tr_mode = False self.dataset = None self.inference_apply_nonlin = nn.Sigmoid() def initialize(self, training=True): if not os.path.isdir(self.output_folder): os.mkdir(self.output_folder) self.output_folder = os.path.join(self.output_folder, "fold%d" % self.fold) if not os.path.isdir(self.output_folder): os.mkdir(self.output_folder) self.process_plans_file() if training: raise NotImplementedError self.initialize_network_optimizer_and_scheduler() self.network.inference_apply_nonlin = self.inference_apply_nonlin self.was_initialized = True def initialize_network_optimizer_and_scheduler(self): net_numpool = max(self.net_pool_per_axis) net_pool_kernel_sizes = [] for s in range(1, net_numpool+1): this_pool_kernel_sizes = [1, 1, 1] if self.net_pool_per_axis[0] >= s: this_pool_kernel_sizes[0] = 2 if self.net_pool_per_axis[1] >= s: this_pool_kernel_sizes[1] = 2 if len(self.patch_size)>2: if self.net_pool_per_axis[2] >= s: this_pool_kernel_sizes[2] = 2 else: this_pool_kernel_sizes = this_pool_kernel_sizes[:-1] net_pool_kernel_sizes.append(tuple(this_pool_kernel_sizes)) if self.threeD: conv_op = nn.Conv3d dropout_op = nn.Dropout3d norm_op = nn.InstanceNorm3d else: conv_op = nn.Conv2d dropout_op = nn.Dropout2d norm_op = nn.InstanceNorm2d conv_kwargs = {'kernel_size':3, 'stride':1, 'padding':1, 'dilation':1, 'bias':True} norm_op_kwargs = {'eps':1e-5, 'affine':True, 'momentum':0.02, 'track_running_stats':False} dropout_op_kwargs = {'p':0, 'inplace':True} net_nonlin = nn.LeakyReLU net_nonlin_kwargs = {'negative_slope':1e-2, 'inplace':True} self.network = Generic_UNet_Cotraining(self.num_input_channels, self.base_num_features, self.num_classes, 2, net_numpool, 2, conv_op, conv_kwargs, norm_op, norm_op_kwargs, dropout_op, dropout_op_kwargs, net_nonlin, net_nonlin_kwargs, False, False, lambda x:x, InitWeights_He(1e-2), net_pool_kernel_sizes, True, False, False) self.optimizer = None self.lr_scheduler = None self.network.set_device(self.device) def process_plans_file(self): self.batch_size = 2 self.net_pool_per_axis = [4, 4, 4] self.patch_size = (128, 128, 128) self.intensity_properties = None self.normalization_schemes = ["nonCT"] * 4 self.base_num_features = 30 self.num_input_channels = 4 self.do_dummy_2D_aug = False self.use_mask_for_norm = True self.only_keep_largest_connected_component = {(0, ): False} if len(self.patch_size) == 2: self.threeD = False elif len(self.patch_size) == 3: self.threeD = True else: raise RuntimeError("invalid patch size in plans file: %s" % str(self.patch_size)) self.regions = ((1, 2, 3, 4), (2, 3, 4), (2,)) self.regions_class_order = (1, 3, 2) self.batch_size = 2 self.base_num_features = 30 self.num_classes = (3, 3) def predict_preprocessed_data_return_softmax(self, data, do_mirroring, num_repeats, use_train_mode, batch_size, mirror_axes, tiled, tile_in_z, step, min_size, use_gaussian): return self.network.predict_3D(data, do_mirroring, num_repeats, use_train_mode, batch_size, mirror_axes, tiled, tile_in_z, step, min_size, use_gaussian=use_gaussian)[2] def load_best_checkpoint(self, train=True): self.load_checkpoint(os.path.join(self.output_folder, "model_best.model"), train=train) def load_checkpoint(self, fname, train=True): print("loading checkpoint", fname, "train=", train) if not self.was_initialized: self.initialize() saved_model = torch.load(fname) new_state_dict = OrderedDict() for k, value in saved_model['state_dict'].items(): key = k new_state_dict[key] = value self.network.load_state_dict(new_state_dict) self.epoch = saved_model['epoch'] if train: optimizer_state_dict = saved_model['optimizer_state_dict'] if optimizer_state_dict is not None: self.optimizer.load_state_dict(optimizer_state_dict) if self.lr_scheduler is not None and not isinstance(self.lr_scheduler, lr_scheduler.ReduceLROnPlateau): self.lr_scheduler.load_state_dict(saved_model['lr_scheduler_state_dict']) if len(saved_model['plot_stuff']) < 9: self.all_tr_losses_x, self.all_tr_losses, self.all_tr_eval_metrics, self.all_val_losses_x, \ self.all_val_losses, self.all_val_eval_metrics_dc_per_sample, self.all_val_losses_tr_mode, \ self.all_val_eval_metrics_dc_glob = saved_model['plot_stuff'] self.all_val_eval_metrics_dc_per_sample_std = [] else: self.all_tr_losses_x, self.all_tr_losses, self.all_tr_eval_metrics, self.all_val_losses_x, \ self.all_val_losses, self.all_val_eval_metrics_dc_per_sample, self.all_val_losses_tr_mode, \ self.all_val_eval_metrics_dc_glob, self.all_val_eval_metrics_dc_per_sample_std = saved_model['plot_stuff'] self.network.set_device(self.device) def resize_softmax_output(softmax_output, new_shape, order=3): ''' Resizes softmax output. Resizes each channel in c separately and fuses results back together :param softmax_output: c x x x y x z :param new_shape: x x y x z :param order: :return: ''' tpe = softmax_output.dtype new_shp = [softmax_output.shape[0]] + list(new_shape) result = np.zeros(new_shp, dtype=softmax_output.dtype) for i in range(softmax_output.shape[0]): result[i] = resize(softmax_output[i].astype(float), new_shape, order, "constant", 0, True) return result.astype(tpe) def save_segmentation_nifti_softmax(softmax_output, dct, out_fname, order=3, region_class_order=None): ''' segmentation must have the same spacing as the original nifti (for now). segmentation may have been cropped out of the original image :param segmentation: :param dct: :param out_fname: :return: ''' old_size = dct.get('size_before_cropping') bbox = dct.get('brain_bbox') if bbox is not None: seg_old_size = np.zeros([softmax_output.shape[0]] + list(old_size)) for c in range(3): bbox[c][1] = np.min((bbox[c][0] + softmax_output.shape[c+1], old_size[c])) seg_old_size[:, bbox[0][0]:bbox[0][1], bbox[1][0]:bbox[1][1], bbox[2][0]:bbox[2][1]] = softmax_output else: seg_old_size = softmax_output segmentation = resize_softmax_output(seg_old_size, np.array(dct['size'])[[2, 1, 0]], order=order) if region_class_order is None: segmentation = segmentation.argmax(0) else: seg_old_spacing_final = np.zeros(segmentation.shape[1:]) for i, c in enumerate(region_class_order): seg_old_spacing_final[segmentation[i] > 0.5] = c segmentation = seg_old_spacing_final return segmentation.astype(np.uint8) def subfiles(folder, join=True, prefix=None, suffix=None, sort=True): if join: l = os.path.join else: l = lambda x, y: y res = [l(folder, i) for i in os.listdir(folder) if os.path.isfile(os.path.join(folder, i)) and (prefix is None or i.startswith(prefix)) and (suffix is None or i.endswith(suffix))] if sort: res.sort() return res def maybe_mkdir_p(directory): splits = directory.split("/")[1:] for i in range(0, len(splits)): if not os.path.isdir(os.path.join("/", *splits[:i+1])): os.mkdir(os.path.join("/", *splits[:i+1])) def convert_labels_back(seg): new_seg = np.zeros(seg.shape, dtype=seg.dtype) new_seg[seg == 1] = 2 new_seg[seg == 2] = 4 new_seg[seg == 3] = 1 return new_seg def preprocess_image(itk_image, is_seg=False, spacing_target=(1, 0.5, 0.5), brain_mask=None, cval=0): """ brain mask must be a numpy array that has the same shape as itk_image's pixel array. This function is not ideal but gets the job done :param itk_image: :param is_seg: :param spacing_target: :param brain_mask: :return: """ spacing = np.array(itk_image.GetSpacing())[[2, 1, 0]] image = sitk.GetArrayFromImage(itk_image).astype(float) if not is_seg: if brain_mask is None: brain_mask = (image!=image[0,0,0]).astype(float) if np.any([[i!=j] for i, j in zip(spacing, spacing_target)]): image = resize_image(image, spacing, spacing_target, 3, cval).astype(np.float32) brain_mask = resize_image(brain_mask.astype(float), spacing, spacing_target, order=0).astype(int) image[brain_mask==0] = 0 #subtract mean, divide by std. use heuristic masking image[brain_mask!=0] -= image[brain_mask!=0].mean() image[brain_mask!=0] /= image[brain_mask!=0].std() else: new_shape = (int(np.round(spacing[0] / spacing_target[0] * float(image.shape[0]))), int(np.round(spacing[1] / spacing_target[1] * float(image.shape[1]))), int(np.round(spacing[2] / spacing_target[2] * float(image.shape[2])))) image = resize_segmentation(image, new_shape, 1, cval) return image def create_brain_masks(data): """ data must be (b, c, x, y, z), brain mask is hole filled binary mask where all sequences are 0 (this is a heuristic to recover a brain mask form brain extracted mri sequences, not an actual brain ectraction) :param data: :return: """ shp = list(data.shape) brain_mask = np.zeros(shp, dtype=np.float32) for b in range(data.shape[0]): for c in range(data.shape[1]): this_mask = data[b, c] != 0 this_mask = binary_fill_holes(this_mask) brain_mask[b, c] = this_mask return brain_mask def extract_brain_region(image, segmentation, outside_value=0): brain_voxels = np.where(segmentation != outside_value) minZidx = int(np.min(brain_voxels[0])) maxZidx = int(np.max(brain_voxels[0])) minXidx = int(np.min(brain_voxels[1])) maxXidx = int(np.max(brain_voxels[1])) minYidx = int(np.min(brain_voxels[2])) maxYidx = int(np.max(brain_voxels[2])) # resize images resizer = (slice(minZidx, maxZidx), slice(minXidx, maxXidx), slice(minYidx, maxYidx)) return image[resizer], [[minZidx, maxZidx], [minXidx, maxXidx], [minYidx, maxYidx]] def load_and_preprocess(t1_file, t1km_file, t2_file, flair_file, seg_file=None, bet_file=None, encode_bet_mask_in_seg=False, label_conversion_fn=None): images = {} # t1 images["T1"] = sitk.ReadImage(t1_file) # t1km images["T1KM"] = sitk.ReadImage(t1km_file) properties_dict = { "spacing": images["T1"].GetSpacing(), "direction": images["T1"].GetDirection(), "size": images["T1"].GetSize(), "origin": images["T1"].GetOrigin() } # t2 images["T2"] = sitk.ReadImage(t2_file) # flair images["FLAIR"] = sitk.ReadImage(flair_file) if seg_file is not None: images['seg'] = sitk.ReadImage(seg_file) if bet_file is not None: images['bet_mask'] = sitk.ReadImage(bet_file) else: t1_npy = sitk.GetArrayFromImage(images["T1"]) mask = create_brain_masks(t1_npy[None])[0].astype(int) mask = sitk.GetImageFromArray(mask) mask.CopyInformation(images["T1"]) images['bet_mask'] = mask try: images["t1km_sub"] = images["T1KM"] - images["T1"] except RuntimeError: tmp1 = sitk.GetArrayFromImage(images["T1KM"]) tmp2 = sitk.GetArrayFromImage(images["T1"]) res = tmp1 - tmp2 res_itk = sitk.GetImageFromArray(res) res_itk.CopyInformation(images["T1"]) images["t1km_sub"] = res_itk for k in ['T1', 'T1KM', 'T2', 'FLAIR', "t1km_sub"]: images[k] = sitk.Mask(images[k], images['bet_mask'], 0) bet_numpy = sitk.GetArrayFromImage(images['bet_mask']) for k in images.keys(): is_seg = (k == "seg") | (k == "bet_mask") if is_seg: cval = -1 else: cval = 0 images[k] = preprocess_image(images[k], is_seg=is_seg, spacing_target=(1., 1., 1.), brain_mask=np.copy(bet_numpy), cval=cval) properties_dict['size_before_cropping'] = images["T1"].shape mask = np.copy(images['bet_mask']) for k in images.keys(): images[k], bbox = extract_brain_region(images[k], mask, False) properties_dict['brain_bbox'] = bbox if (label_conversion_fn is not None) and ("seg" in images.keys()): images["seg"] = label_conversion_fn(images["seg"]) use_these = ['T1', 'T1KM', 'T2', 'FLAIR', "t1km_sub", 'seg'] if (not encode_bet_mask_in_seg) or ("seg" not in images.keys()): use_these.append("bet_mask") else: images["seg"][images["bet_mask"] <= 0] = -1 imgs = [] for seq in use_these: if seq not in images.keys(): imgs.append(np.zeros(images["T1"].shape)[None]) else: imgs.append(images[seq][None]) all_data = np.vstack(imgs) return all_data, properties_dict def segment(t1_file, t1ce_file, t2_file, flair_file, netLoc): """ Segments the passed files """ trainer = NetworkTrainerBraTS2018Baseline2RegionsCotrainingBraTSDecSDCE() trainer.initialize(False) all_data, dct = load_and_preprocess(t1_file, t1ce_file, t2_file, flair_file, None, None, True, None) all_softmax = [] for fold in range(5): trainer.output_folder = join(netLoc, "%d" % fold) trainer.load_best_checkpoint(False) trainer.network.infer(True) trainer.network.test_return_output = 0 softmax = trainer.predict_preprocessed_data_return_softmax(all_data[:4], True, 1, False, 1, (2, 3, 4), False, None, None, trainer.patch_size, True) all_softmax.append(softmax[None]) softmax_consolidated = np.vstack(all_softmax).mean(0) output = save_segmentation_nifti_softmax(softmax_consolidated, dct, "tumor_isen2018_class.nii.gz", 1, trainer.regions_class_order) return output
43.807847
206
0.610288
30,431
0.69884
0
0
0
0
0
0
4,777
0.109703
22b364d4334f94cc1d058ea248dee07fc3c34b86
982
py
Python
plot/finderror.py
architsakhadeo/Offline-Hyperparameter-Tuning-for-RL
94b8f205b12f0cc59ae8e19b2e6099f34be929d6
[ "MIT" ]
null
null
null
plot/finderror.py
architsakhadeo/Offline-Hyperparameter-Tuning-for-RL
94b8f205b12f0cc59ae8e19b2e6099f34be929d6
[ "MIT" ]
null
null
null
plot/finderror.py
architsakhadeo/Offline-Hyperparameter-Tuning-for-RL
94b8f205b12f0cc59ae8e19b2e6099f34be929d6
[ "MIT" ]
null
null
null
import os basepath = '/home/archit/scratch/cartpoles/data/hyperparam/cartpole/offline_learning/esarsa-adam/' dirs = os.listdir(basepath) string = '''''' for dir in dirs: print(dir) subbasepath = basepath + dir + '/' subdirs = os.listdir(subbasepath) for subdir in subdirs: print(subdir) subsubbasepath = subbasepath + subdir + '/' subsubdirs = os.listdir(subsubbasepath) string += subsubbasepath + '\n' content = [] for i in range(0,len(subsubdirs)-1): for j in range(i+1, len(subsubdirs)): a = os.system('diff ' + subsubbasepath + subsubdirs[i] + '/log_json.txt ' + subsubbasepath + subsubdirs[j] + '/log_json.txt') content.append([a, subsubdirs[i], subsubdirs[j]]) filteredcontent = [i for i in content if i[0] == 0] for i in range(len(filteredcontent)): string += ' and '.join(filteredcontent[i][1:]) if i != len(filteredcontent) - 1: string += ', ' string += '\n\n' f = open('offlinelearningerrors.txt','w') f.write(string) f.close()
33.862069
129
0.669043
0
0
0
0
0
0
0
0
188
0.191446
22b4a1c6f88314760073b0d207d79b3e4653b1cf
4,848
py
Python
src/pybacked/zip_handler.py
bluePlatinum/pyback
1c12a52974232b0482981c12a9af27e52dd2190e
[ "MIT" ]
null
null
null
src/pybacked/zip_handler.py
bluePlatinum/pyback
1c12a52974232b0482981c12a9af27e52dd2190e
[ "MIT" ]
null
null
null
src/pybacked/zip_handler.py
bluePlatinum/pyback
1c12a52974232b0482981c12a9af27e52dd2190e
[ "MIT" ]
null
null
null
import os import shutil import tempfile import zipfile def archive_write(archivepath, data, filename, compression, compressionlevel): """ Create a file named filename in the archive and write data to it :param archivepath: The path to the zip-archive :type archivepath: str :param data: The data to be written to the file :type data: str :param filename: The filename for the newly created file :type filename: str :param compression: The desired compression for the zip-archive :type compression: int :param compressionlevel: The desired compression level for the zip-archive :type compressionlevel: int :return: void """ archive = zipfile.ZipFile(archivepath, mode='a', compression=compression, compresslevel=compressionlevel) archive.writestr(filename, data) archive.close() def create_archive(archivepath, filedict, compression, compressionlevel): """ Write filedict to zip-archive data subdirectory. Will check wether archive at archivepath exists before writing. If file exists will raise a FileExistsError. :param archivepath: the path to the file :param filedict: dictionary containing the filepath, filename key-value pairs :param compression: desired compression methods (see zipfile documentation) :param compressionlevel: compression level (see zipfile documentation) :return: void """ if os.path.isfile(archivepath): raise FileExistsError("Specified file already exists") else: archive = zipfile.ZipFile(archivepath, mode='x', compression=compression, compresslevel=compressionlevel) for filepath, filename in filedict.items(): archive.write(filepath, arcname="data/" + filename) archive.close() def extract_archdata(archivepath, filename, destination): """ Extract a file from a archive and write it to the destination. If the destination path already exists extract_archdata will not overwrite but will throw a "FileExists" error. :param archivepath: The path to the archive containing the file :type archivepath: str :param filename: The archive name of the desired file. :type filename: str :param destination: The path at which the extracted file is to be placed. :type destination: str :return: void :rtype: None """ # check if destination path already exists if os.path.exists(destination): raise FileExistsError("The specified destination is already in use") archive = zipfile.ZipFile(archivepath, mode='r') with tempfile.TemporaryDirectory() as tmpdir: archive.extract(filename, path=tmpdir) # create directories for the destination os.makedirs(os.path.dirname(destination), exist_ok=True) shutil.copy(os.path.abspath(tmpdir + "/" + filename), destination) def read_bin(archivepath, filelist): """ Read a list of files from an archive and return the file data as a dictionary of filename, data key-value pairs. :param archivepath: the path to the archive :param filelist: list of filenames to read :return: dictionary with filename, data key-value pairs :rtype: dict """ datadict = dict() if os.path.isfile(archivepath): archive = zipfile.ZipFile(archivepath, mode='r') else: raise FileNotFoundError("Specified file does not exist") for filename in filelist: try: file = archive.open(filename) datadict[filename] = file.read().decode() file.close() except KeyError: datadict[filename] = None archive.close() return datadict def read_diff_log(archivepath): """ Read the diff-log.csv from a given archive file. :param archivepath: The path to the zip-archive :type archivepath: str :return: The diff-log.csv contents in ascii string form. :rtype: str """ arch = zipfile.ZipFile(archivepath, mode='r') diff_log_file = arch.open("diff-log.csv") diff_log_bin = diff_log_file.read() diff_log = diff_log_bin.decode() diff_log_file.close() arch.close() return diff_log def zip_extract(archivepath, filelist, extractpath): """ Extract a list of files to a specific location :param archivepath: the path to the zip-archive :param filelist: list of member filenames to extract :param extractpath: path for the extracted files :return: void """ if os.path.isfile(archivepath): archive = zipfile.ZipFile(archivepath, mode='r') else: raise FileNotFoundError("Specified file does not exist") archive.extractall(path=extractpath, members=filelist) archive.close()
33.902098
79
0.679868
0
0
0
0
0
0
0
0
2,583
0.532797
22b5613b1a36e6519fc3f676eadefe6b4b566ae1
968
py
Python
src/query_planner/abstract_scan_plan.py
imvinod/Eva
0ed9814ae89db7dce1fb734dc99d5dac69cb3c82
[ "Apache-2.0" ]
1
2019-11-06T03:30:08.000Z
2019-11-06T03:30:08.000Z
src/query_planner/abstract_scan_plan.py
imvinod/Eva
0ed9814ae89db7dce1fb734dc99d5dac69cb3c82
[ "Apache-2.0" ]
1
2019-11-18T03:09:56.000Z
2019-11-18T03:09:56.000Z
src/query_planner/abstract_scan_plan.py
asrayousuf/Eva
f652e5d398556055490c146f37e7a2d7a9d091f3
[ "Apache-2.0" ]
null
null
null
"""Abstract class for all the scan planners https://www.postgresql.org/docs/9.1/using-explain.html https://www.postgresql.org/docs/9.5/runtime-config-query.html """ from src.query_planner.abstract_plan import AbstractPlan from typing import List class AbstractScan(AbstractPlan): """Abstract class for all the scan based planners Arguments: predicate : Expression video : video on which the scan will be executed columns_id : """ def __init__(self, predicate: Expression, video: Storage, column_ids: List[int]): super(AbstractScan, self).__init__() self._predicate = predicate self._column_ids = column_ids self._video = video @property def video(self) -> Storage: return self._video @property def predicate(self) -> Expression: return self._predicate @property def column_ids(self) -> List: return self._column_ids
26.162162
61
0.664256
719
0.742769
0
0
222
0.229339
0
0
354
0.365702
22b61a63d3fab6ac5a4af3febf6a8b869aa2fb13
926
py
Python
tests/tools/test-tcp4-client.py
jimmy-huang/zephyr.js
cef5c0dffaacf7d5aa3f8265626f68a1e2b32eb5
[ "Apache-2.0" ]
null
null
null
tests/tools/test-tcp4-client.py
jimmy-huang/zephyr.js
cef5c0dffaacf7d5aa3f8265626f68a1e2b32eb5
[ "Apache-2.0" ]
null
null
null
tests/tools/test-tcp4-client.py
jimmy-huang/zephyr.js
cef5c0dffaacf7d5aa3f8265626f68a1e2b32eb5
[ "Apache-2.0" ]
null
null
null
# !usr/bin/python # coding:utf-8 import time import socket def main(): print "Socket client creat successful" host = "192.0.2.1" port = 9876 bufSize = 1024 addr = (host, port) Timeout = 300 mySocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) mySocket.settimeout(Timeout) mySocket.connect(addr) while 1: try : Data = mySocket.recv(bufSize) Data = Data.strip() print "Got data: ", Data time.sleep(2) if Data == "close": mySocket.close() print "close socket" break else: mySocket.sendall(Data) print "Send data: ", Data except KeyboardInterrupt : print "exit client" break except : print "time out" continue if __name__ == "__main__" : main()
20.577778
64
0.512959
0
0
0
0
0
0
0
0
153
0.165227
22b65cc97460c0c9287ab847203def7abf74c5bd
1,642
py
Python
kinto/__main__.py
s-utsch/kinto
5e368849a8ab652a6e1923f44febcf89afd2c78b
[ "Apache-2.0" ]
null
null
null
kinto/__main__.py
s-utsch/kinto
5e368849a8ab652a6e1923f44febcf89afd2c78b
[ "Apache-2.0" ]
null
null
null
kinto/__main__.py
s-utsch/kinto
5e368849a8ab652a6e1923f44febcf89afd2c78b
[ "Apache-2.0" ]
null
null
null
import argparse import sys from cliquet.scripts import cliquet from pyramid.scripts import pserve from pyramid.paster import bootstrap def main(args=None): """The main routine.""" if args is None: args = sys.argv[1:] parser = argparse.ArgumentParser(description="Kinto commands") subparsers = parser.add_subparsers(title='subcommands', description='valid subcommands', help='init/start/migrate') parser_init = subparsers.add_parser('init') parser_init.set_defaults(which='init') parser_init.add_argument('--config_file', required=False, help='Config file may be passed as argument') parser_migrate = subparsers.add_parser('migrate') parser_migrate.set_defaults(which='migrate') parser_start = subparsers.add_parser('start') parser_start.set_defaults(which='start') args = vars(parser.parse_args()) if args['which'] == 'init': if(args['config_file'] is None): env = bootstrap('config/kinto.ini') else: config_file = format(args['config_file']) env = bootstrap(config_file) elif args['which'] == 'migrate': env = bootstrap('config/kinto.ini') cliquet.init_schema(env) elif args['which'] == 'start': pserve_argv = ['pserve', 'config/kinto.ini', '--reload'] pserve.main(pserve_argv) if __name__ == "__main__": main()
34.93617
78
0.563337
0
0
0
0
0
0
0
0
340
0.207065
22b79c3feb3f8475ac595810daa8294a07d6c2b9
625
py
Python
apis/admin.py
JumboCode/GroundWorkSomerville
280f9cd8ea38f065c9fb113e563a4be362a7e265
[ "MIT" ]
null
null
null
apis/admin.py
JumboCode/GroundWorkSomerville
280f9cd8ea38f065c9fb113e563a4be362a7e265
[ "MIT" ]
null
null
null
apis/admin.py
JumboCode/GroundWorkSomerville
280f9cd8ea38f065c9fb113e563a4be362a7e265
[ "MIT" ]
1
2021-06-28T22:56:22.000Z
2021-06-28T22:56:22.000Z
from django.contrib import admin from django.contrib.auth.models import User from .models import Vegetable, Harvest, Transaction, Merchandise, MerchandisePrice from .models import PurchasedItem, UserProfile, VegetablePrice, StockedVegetable from .models import MerchandisePhotos admin.site.register(Vegetable) admin.site.register(StockedVegetable) admin.site.register(Harvest) admin.site.register(VegetablePrice) admin.site.register(PurchasedItem) admin.site.register(Transaction) admin.site.register(UserProfile) admin.site.register(Merchandise) admin.site.register(MerchandisePrice) admin.site.register(MerchandisePhotos)
36.764706
82
0.8528
0
0
0
0
0
0
0
0
0
0
22b7e88858264b834f72f09e2cb52dba1a8d0aee
3,423
py
Python
tests/unit/media/test_synthesis.py
AnantTiwari-Naman/pyglet
4774f2889057da95a78785a69372112931e6a620
[ "BSD-3-Clause" ]
null
null
null
tests/unit/media/test_synthesis.py
AnantTiwari-Naman/pyglet
4774f2889057da95a78785a69372112931e6a620
[ "BSD-3-Clause" ]
null
null
null
tests/unit/media/test_synthesis.py
AnantTiwari-Naman/pyglet
4774f2889057da95a78785a69372112931e6a620
[ "BSD-3-Clause" ]
1
2021-09-16T20:47:07.000Z
2021-09-16T20:47:07.000Z
from ctypes import sizeof from io import BytesIO import unittest from pyglet.media.synthesis import * local_dir = os.path.dirname(__file__) test_data_path = os.path.abspath(os.path.join(local_dir, '..', '..', 'data')) del local_dir def get_test_data_file(*file_parts): """Get a file from the test data directory in an OS independent way. Supply relative file name as you would in os.path.join(). """ return os.path.join(test_data_path, *file_parts) class SynthesisSourceTest: """Simple test to check if synthesized sources provide data.""" source_class = None def test_default(self): source = self.source_class(1.) self._test_total_duration(source) if self.source_class is not WhiteNoise: self._test_generated_bytes(source) def test_sample_rate_11025(self): source = self.source_class(1., sample_rate=11025) self._test_total_duration(source) if self.source_class is not WhiteNoise: self._test_generated_bytes(source, sample_rate=11025) def _test_total_duration(self, source): total_bytes = source.audio_format.bytes_per_second self._check_audio_data(source, total_bytes, 1.) def _check_audio_data(self, source, expected_bytes, expected_duration): data = source.get_audio_data(expected_bytes + 100) self.assertIsNotNone(data) self.assertAlmostEqual(expected_bytes, data.length, delta=20) self.assertAlmostEqual(expected_duration, data.duration) self.assertIsNotNone(data.data) self.assertAlmostEqual(expected_bytes, len(data.data), delta=20) # Should now be out of data last_data = source.get_audio_data(100) self.assertIsNone(last_data) def test_seek_default(self): source = self.source_class(1.) self._test_seek(source) def _test_seek(self, source): seek_time = .5 bytes_left = source.audio_format.bytes_per_second * .5 source.seek(seek_time) self._check_audio_data(source, bytes_left, .5) def _test_generated_bytes(self, source, sample_rate=44800, sample_size=16): source_name = self.source_class.__name__.lower() filename = "synthesis_{0}_{1}_{2}_1ch.wav".format(source_name, sample_size, sample_rate) with open(get_test_data_file('media', filename), 'rb') as f: # discard the wave header: loaded_bytes = f.read()[44:] source.seek(0) generated_data = source.get_audio_data(source._max_offset) bytes_buffer = BytesIO(generated_data.data).getvalue() # Compare a small chunk, to avoid hanging on mismatch: assert bytes_buffer[:1000] == loaded_bytes[:1000],\ "Generated bytes do not match sample wave file." class SilenceTest(SynthesisSourceTest, unittest.TestCase): source_class = Silence class WhiteNoiseTest(SynthesisSourceTest, unittest.TestCase): source_class = WhiteNoise class SineTest(SynthesisSourceTest, unittest.TestCase): source_class = Sine class TriangleTest(SynthesisSourceTest, unittest.TestCase): source_class = Triangle class SawtoothTest(SynthesisSourceTest, unittest.TestCase): source_class = Sawtooth class SquareTest(SynthesisSourceTest, unittest.TestCase): source_class = Square class FMTest(SynthesisSourceTest, unittest.TestCase): source_class = SimpleFM
32.6
96
0.706106
2,928
0.85539
0
0
0
0
0
0
413
0.120654
22b80f5d2e66e370817465d9b5b278c1f1dcbe4e
282
py
Python
Ejercicio/Ejercicio7.py
tavo1599/F.P2021
a592804fb5ae30da55551d9e29819887919db041
[ "Apache-2.0" ]
1
2021-05-05T19:39:37.000Z
2021-05-05T19:39:37.000Z
Ejercicio/Ejercicio7.py
tavo1599/F.P2021
a592804fb5ae30da55551d9e29819887919db041
[ "Apache-2.0" ]
null
null
null
Ejercicio/Ejercicio7.py
tavo1599/F.P2021
a592804fb5ae30da55551d9e29819887919db041
[ "Apache-2.0" ]
null
null
null
#Datos de entrada num=int(input("Ingrese un numero: ")) # Proceso if num==10: print("Calificacion: A") elif num==9: print("Calificacion: B") elif num==8: print("Calificacion: C") elif num==7 and num==6: print("Calificacion: D") elif num<=5 and num>=0: print("Calificacion: F")
20.142857
37
0.673759
0
0
0
0
0
0
0
0
132
0.468085
22b828cde8bc59acbcf210743592fd1c629c4095
417
py
Python
2015/day-2/part2.py
nairraghav/advent-of-code-2019
274a2a4a59a8be39afb323356c592af5e1921e54
[ "MIT" ]
null
null
null
2015/day-2/part2.py
nairraghav/advent-of-code-2019
274a2a4a59a8be39afb323356c592af5e1921e54
[ "MIT" ]
null
null
null
2015/day-2/part2.py
nairraghav/advent-of-code-2019
274a2a4a59a8be39afb323356c592af5e1921e54
[ "MIT" ]
null
null
null
ribbon_needed = 0 with open("input.txt", "r") as puzzle_input: for line in puzzle_input: length, width, height = [int(item) for item in line.split("x")] dimensions = [length, width, height] smallest_side = min(dimensions) dimensions.remove(smallest_side) second_smallest_side = min(dimensions) ribbon_needed += 2*smallest_side + 2*second_smallest_side + length*width*height print(ribbon_needed)
26.0625
81
0.736211
0
0
0
0
0
0
0
0
17
0.040767
22b916a799056741ecb2a3c045e0fdb664033699
11,424
py
Python
Algorithm.Python/Alphas/GreenblattMagicFormulaAlgorithm.py
aaronwJordan/Lean
3486a6de56a739e44af274f421ac302cbbc98f8d
[ "Apache-2.0" ]
null
null
null
Algorithm.Python/Alphas/GreenblattMagicFormulaAlgorithm.py
aaronwJordan/Lean
3486a6de56a739e44af274f421ac302cbbc98f8d
[ "Apache-2.0" ]
null
null
null
Algorithm.Python/Alphas/GreenblattMagicFormulaAlgorithm.py
aaronwJordan/Lean
3486a6de56a739e44af274f421ac302cbbc98f8d
[ "Apache-2.0" ]
null
null
null
# QUANTCONNECT.COM - Democratizing Finance, Empowering Individuals. # Lean Algorithmic Trading Engine v2.0. Copyright 2014 QuantConnect Corporation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from clr import AddReference AddReference("System") AddReference("QuantConnect.Common") AddReference("QuantConnect.Algorithm") AddReference("QuantConnect.Indicators") AddReference("QuantConnect.Algorithm.Framework") from System import * from QuantConnect import * from QuantConnect.Orders.Fees import ConstantFeeModel from QuantConnect.Data.UniverseSelection import * from QuantConnect.Indicators import * from Selection.FundamentalUniverseSelectionModel import FundamentalUniverseSelectionModel from datetime import timedelta, datetime from math import ceil from itertools import chain # # This alpha picks stocks according to Joel Greenblatt's Magic Formula. # First, each stock is ranked depending on the relative value of the ratio EV/EBITDA. For example, a stock # that has the lowest EV/EBITDA ratio in the security universe receives a score of one while a stock that has # the tenth lowest EV/EBITDA score would be assigned 10 points. # # Then, each stock is ranked and given a score for the second valuation ratio, Return on Capital (ROC). # Similarly, a stock that has the highest ROC value in the universe gets one score point. # The stocks that receive the lowest combined score are chosen for insights. # # Source: Greenblatt, J. (2010) The Little Book That Beats the Market # # This alpha is part of the Benchmark Alpha Series created by QuantConnect which are open # sourced so the community and client funds can see an example of an alpha. # class GreenblattMagicFormulaAlgorithm(QCAlgorithmFramework): ''' Alpha Streams: Benchmark Alpha: Pick stocks according to Joel Greenblatt's Magic Formula''' def Initialize(self): self.SetStartDate(2018, 1, 1) self.SetCash(100000) #Set zero transaction fees self.SetSecurityInitializer(lambda security: security.SetFeeModel(ConstantFeeModel(0))) # select stocks using MagicFormulaUniverseSelectionModel self.SetUniverseSelection(GreenBlattMagicFormulaUniverseSelectionModel()) # Use MagicFormulaAlphaModel to establish insights self.SetAlpha(RateOfChangeAlphaModel()) # Equally weigh securities in portfolio, based on insights self.SetPortfolioConstruction(EqualWeightingPortfolioConstructionModel()) ## Set Immediate Execution Model self.SetExecution(ImmediateExecutionModel()) ## Set Null Risk Management Model self.SetRiskManagement(NullRiskManagementModel()) class RateOfChangeAlphaModel(AlphaModel): '''Uses Rate of Change (ROC) to create magnitude prediction for insights.''' def __init__(self, *args, **kwargs): self.lookback = kwargs['lookback'] if 'lookback' in kwargs else 1 self.resolution = kwargs['resolution'] if 'resolution' in kwargs else Resolution.Daily self.predictionInterval = Time.Multiply(Extensions.ToTimeSpan(self.resolution), self.lookback) self.symbolDataBySymbol = {} def Update(self, algorithm, data): insights = [] for symbol, symbolData in self.symbolDataBySymbol.items(): if symbolData.CanEmit: insights.append(Insight.Price(symbol, self.predictionInterval, InsightDirection.Up, symbolData.Return, None)) return insights def OnSecuritiesChanged(self, algorithm, changes): # clean up data for removed securities for removed in changes.RemovedSecurities: symbolData = self.symbolDataBySymbol.pop(removed.Symbol, None) if symbolData is not None: symbolData.RemoveConsolidators(algorithm) # initialize data for added securities symbols = [ x.Symbol for x in changes.AddedSecurities ] history = algorithm.History(symbols, self.lookback, self.resolution) if history.empty: return tickers = history.index.levels[0] for ticker in tickers: symbol = SymbolCache.GetSymbol(ticker) if symbol not in self.symbolDataBySymbol: symbolData = SymbolData(symbol, self.lookback) self.symbolDataBySymbol[symbol] = symbolData symbolData.RegisterIndicators(algorithm, self.resolution) symbolData.WarmUpIndicators(history.loc[ticker]) class SymbolData: '''Contains data specific to a symbol required by this model''' def __init__(self, symbol, lookback): self.Symbol = symbol self.ROC = RateOfChange('{}.ROC({})'.format(symbol, lookback), lookback) self.Consolidator = None self.previous = 0 def RegisterIndicators(self, algorithm, resolution): self.Consolidator = algorithm.ResolveConsolidator(self.Symbol, resolution) algorithm.RegisterIndicator(self.Symbol, self.ROC, self.Consolidator) def RemoveConsolidators(self, algorithm): if self.Consolidator is not None: algorithm.SubscriptionManager.RemoveConsolidator(self.Symbol, self.Consolidator) def WarmUpIndicators(self, history): for tuple in history.itertuples(): self.ROC.Update(tuple.Index, tuple.close) @property def Return(self): return float(self.ROC.Current.Value) @property def CanEmit(self): if self.previous == self.ROC.Samples: return False self.previous = self.ROC.Samples return self.ROC.IsReady def __str__(self, **kwargs): return '{}: {:.2%}'.format(self.ROC.Name, (1 + self.Return)**252 - 1) class GreenBlattMagicFormulaUniverseSelectionModel(FundamentalUniverseSelectionModel): '''Defines a universe according to Joel Greenblatt's Magic Formula, as a universe selection model for the framework algorithm. From the universe QC500, stocks are ranked using the valuation ratios, Enterprise Value to EBITDA (EV/EBITDA) and Return on Assets (ROA). ''' def __init__(self, filterFineData = True, universeSettings = None, securityInitializer = None): '''Initializes a new default instance of the MagicFormulaUniverseSelectionModel''' super().__init__(filterFineData, universeSettings, securityInitializer) # Number of stocks in Coarse Universe self.NumberOfSymbolsCoarse = 500 # Number of sorted stocks in the fine selection subset using the valuation ratio, EV to EBITDA (EV/EBITDA) self.NumberOfSymbolsFine = 20 # Final number of stocks in security list, after sorted by the valuation ratio, Return on Assets (ROA) self.NumberOfSymbolsInPortfolio = 10 self.lastMonth = -1 self.dollarVolumeBySymbol = {} self.symbols = [] def SelectCoarse(self, algorithm, coarse): '''Performs coarse selection for constituents. The stocks must have fundamental data The stock must have positive previous-day close price The stock must have positive volume on the previous trading day''' month = algorithm.Time.month if month == self.lastMonth: return self.symbols self.lastMonth = month # The stocks must have fundamental data # The stock must have positive previous-day close price # The stock must have positive volume on the previous trading day filtered = [x for x in coarse if x.HasFundamentalData and x.Volume > 0 and x.Price > 0] # sort the stocks by dollar volume and take the top 1000 top = sorted(filtered, key=lambda x: x.DollarVolume, reverse=True)[:self.NumberOfSymbolsCoarse] self.dollarVolumeBySymbol = { i.Symbol: i.DollarVolume for i in top } self.symbols = list(self.dollarVolumeBySymbol.keys()) return self.symbols def SelectFine(self, algorithm, fine): '''QC500: Performs fine selection for the coarse selection constituents The company's headquarter must in the U.S. The stock must be traded on either the NYSE or NASDAQ At least half a year since its initial public offering The stock's market cap must be greater than 500 million Magic Formula: Rank stocks by Enterprise Value to EBITDA (EV/EBITDA) Rank subset of previously ranked stocks (EV/EBITDA), using the valuation ratio Return on Assets (ROA)''' # QC500: ## The company's headquarter must in the U.S. ## The stock must be traded on either the NYSE or NASDAQ ## At least half a year since its initial public offering ## The stock's market cap must be greater than 500 million filteredFine = [x for x in fine if x.CompanyReference.CountryId == "USA" and (x.CompanyReference.PrimaryExchangeID == "NYS" or x.CompanyReference.PrimaryExchangeID == "NAS") and (algorithm.Time - x.SecurityReference.IPODate).days > 180 and x.EarningReports.BasicAverageShares.ThreeMonths * x.EarningReports.BasicEPS.TwelveMonths * x.ValuationRatios.PERatio > 5e8] count = len(filteredFine) if count == 0: return [] myDict = dict() percent = float(self.NumberOfSymbolsFine / count) # select stocks with top dollar volume in every single sector for key in ["N", "M", "U", "T", "B", "I"]: value = [x for x in filteredFine if x.CompanyReference.IndustryTemplateCode == key] value = sorted(value, key=lambda x: self.dollarVolumeBySymbol[x.Symbol], reverse = True) myDict[key] = value[:ceil(len(value) * percent)] # stocks in QC500 universe topFine = list(chain.from_iterable(myDict.values()))[:self.NumberOfSymbolsCoarse] # Magic Formula: ## Rank stocks by Enterprise Value to EBITDA (EV/EBITDA) ## Rank subset of previously ranked stocks (EV/EBITDA), using the valuation ratio Return on Assets (ROA) # sort stocks in the security universe of QC500 based on Enterprise Value to EBITDA valuation ratio sortedByEVToEBITDA = sorted(topFine, key=lambda x: x.ValuationRatios.EVToEBITDA , reverse=True) # sort subset of stocks that have been sorted by Enterprise Value to EBITDA, based on the valuation ratio Return on Assets (ROA) sortedByROA = sorted(sortedByEVToEBITDA[:self.NumberOfSymbolsFine], key=lambda x: x.ValuationRatios.ForwardROA, reverse=False) # retrieve list of securites in portfolio top = sortedByROA[:self.NumberOfSymbolsInPortfolio] self.symbols = [f.Symbol for f in top] return self.symbols
44.8
167
0.68785
9,245
0.809261
0
0
253
0.022146
0
0
4,639
0.406075
22b976d4af390f9c20bc3dedbfcb6376fdbf0308
5,277
py
Python
hw2/deeplearning/style_transfer.py
axelbr/berkeley-cs182-deep-neural-networks
2bde27d9d5361d48dce7539d00b136209c1cfaa1
[ "MIT" ]
null
null
null
hw2/deeplearning/style_transfer.py
axelbr/berkeley-cs182-deep-neural-networks
2bde27d9d5361d48dce7539d00b136209c1cfaa1
[ "MIT" ]
null
null
null
hw2/deeplearning/style_transfer.py
axelbr/berkeley-cs182-deep-neural-networks
2bde27d9d5361d48dce7539d00b136209c1cfaa1
[ "MIT" ]
null
null
null
import numpy as np import torch import torch.nn.functional as F def content_loss(content_weight, content_current, content_target): """ Compute the content loss for style transfer. Inputs: - content_weight: Scalar giving the weighting for the content loss. - content_current: features of the current image; this is a PyTorch Tensor of shape (1, C_l, H_l, W_l). - content_target: features of the content image, Tensor with shape (1, C_l, H_l, W_l). Returns: - scalar content loss """ ############################################################################## # YOUR CODE HERE # ############################################################################## _, C, H, W = content_current.shape current_features = content_current.view(C, H*W) target_features = content_target.view(C, H*W) loss = content_weight * torch.sum(torch.square(current_features - target_features)) return loss ############################################################################## # END OF YOUR CODE # ############################################################################## def gram_matrix(features, normalize=True): """ Compute the Gram matrix from features. Inputs: - features: PyTorch Variable of shape (N, C, H, W) giving features for a batch of N images. - normalize: optional, whether to normalize the Gram matrix If True, divide the Gram matrix by the number of neurons (H * W * C) Returns: - gram: PyTorch Variable of shape (N, C, C) giving the (optionally normalized) Gram matrices for the N input images. """ ############################################################################## # YOUR CODE HERE # ############################################################################## C, H, W = features.shape[-3], features.shape[-2], features.shape[-1] reshaped = features.view(-1, C, H*W) G = reshaped @ reshaped.transpose(dim0=1, dim1=2) if normalize: G = G / (H*W*C) return G ############################################################################## # END OF YOUR CODE # ############################################################################## def style_loss(feats, style_layers, style_targets, style_weights): """ Computes the style loss at a set of layers. Inputs: - feats: list of the features at every layer of the current image, as produced by the extract_features function. - style_layers: List of layer indices into feats giving the layers to include in the style loss. - style_targets: List of the same length as style_layers, where style_targets[i] is a PyTorch Variable giving the Gram matrix the source style image computed at layer style_layers[i]. - style_weights: List of the same length as style_layers, where style_weights[i] is a scalar giving the weight for the style loss at layer style_layers[i]. Returns: - style_loss: A PyTorch Variable holding a scalar giving the style loss. """ # Hint: you can do this with one for loop over the style layers, and should # not be very much code (~5 lines). You will need to use your gram_matrix function. ############################################################################## # YOUR CODE HERE # ############################################################################## loss = 0 for i, l in enumerate(style_layers): A, G = style_targets[i], gram_matrix(feats[l]) loss += style_weights[i] * torch.sum(torch.square(G - A)) return loss ############################################################################## # END OF YOUR CODE # ############################################################################## def tv_loss(img, tv_weight): """ Compute total variation loss. Inputs: - img: PyTorch Variable of shape (1, 3, H, W) holding an input image. - tv_weight: Scalar giving the weight w_t to use for the TV loss. Returns: - loss: PyTorch Variable holding a scalar giving the total variation loss for img weighted by tv_weight. """ # Your implementation should be vectorized and not require any loops! ############################################################################## # YOUR CODE HERE # ############################################################################## tv = torch.square(img[..., 1:, :-1] - img[..., :-1, :-1]) + torch.square(img[..., :-1, 1:] - img[..., :-1, :-1]) return tv_weight * torch.sum(tv) ############################################################################## # END OF YOUR CODE # ##############################################################################
45.886957
116
0.437938
0
0
0
0
0
0
0
0
4,030
0.763691
22b997fa793710eae107fad9390bbdd1f1c77572
495
py
Python
submissions/available/Johnson-CausalTesting/Holmes/fuzzers/Peach/Transformers/Encode/HTMLDecode.py
brittjay0104/rose6icse
7b24743b7a805b9ed094b67e4a08bad7894f0e84
[ "Unlicense" ]
null
null
null
submissions/available/Johnson-CausalTesting/Holmes/fuzzers/Peach/Transformers/Encode/HTMLDecode.py
brittjay0104/rose6icse
7b24743b7a805b9ed094b67e4a08bad7894f0e84
[ "Unlicense" ]
null
null
null
submissions/available/Johnson-CausalTesting/Holmes/fuzzers/Peach/Transformers/Encode/HTMLDecode.py
brittjay0104/rose6icse
7b24743b7a805b9ed094b67e4a08bad7894f0e84
[ "Unlicense" ]
null
null
null
# This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at http://mozilla.org/MPL/2.0/. import xml.sax.saxutils from Peach.transformer import Transformer class HtmlDecode(Transformer): """Decode HTML encoded string.""" def realEncode(self, data): return xml.sax.saxutils.unescape(data) def realEncode(self, data): return xml.sax.saxutils.escape(data)
29.117647
69
0.717172
226
0.456566
0
0
0
0
0
0
229
0.462626
22be5667afd253d36e99d23282612d6ddbb78c15
2,132
py
Python
src/archive/greatcircle.py
AuraUAS/aura-core
4711521074db72ba9089213e14455d89dc5306c0
[ "MIT", "BSD-2-Clause-FreeBSD" ]
8
2016-08-03T19:35:03.000Z
2019-12-15T06:25:05.000Z
src/archive/greatcircle.py
jarilq/aura-core
7880ed265396bf8c89b783835853328e6d7d1589
[ "MIT", "BSD-2-Clause-FreeBSD" ]
4
2018-09-27T15:48:56.000Z
2018-11-05T12:38:10.000Z
src/archive/greatcircle.py
jarilq/aura-core
7880ed265396bf8c89b783835853328e6d7d1589
[ "MIT", "BSD-2-Clause-FreeBSD" ]
5
2017-06-28T19:15:36.000Z
2020-02-19T19:31:24.000Z
# From: http://williams.best.vwh.net/avform.htm#GCF import math EPS = 0.0001 d2r = math.pi / 180.0 r2d = 180.0 / math.pi rad2nm = (180.0 * 60.0) / math.pi nm2rad = 1.0 / rad2nm nm2meter = 1852 meter2nm = 1.0 / nm2meter # p1 = (lat1(deg), lon1(deg)) # p2 = (lat2(deg), lon2(deg)) def course_and_dist(p1, p2): # this formulations uses postive lon = W (opposite of usual, so we # invert the longitude.) lat1 = p1[0] * d2r lon1 = -p1[1] * d2r lat2 = p2[0] * d2r lon2 = -p2[1] * d2r dist_rad = 2.0 * math.asin(math.sqrt((math.sin((lat1-lat2)/2.0))**2.0 + math.cos(lat1)*math.cos(lat2)*(math.sin((lon1-lon2)/2.0))**2)) # if starting point is on a pole if math.cos(lat1) < EPS: # EPS a small number ~ machine precision if (lat1 > 0.0): # starting from N pole tc1_rad = math.pi else: # starting from S pole tc1_rad = 2.0 * math.pi # For starting points other than the poles: if dist_rad < 0.000000001: # about a cm tc1_rad = 0.0 else: num1 = math.sin(lat2) - math.sin(lat1)*math.cos(dist_rad) den1 = math.sin(dist_rad) * math.cos(lat1) tmp1 = num1 / den1 if tmp1 < -1.0: #print "CLIPPING TMP1 to -1.0!" tmp1 = -1.0 if tmp1 > 1.0: #print "CLIPPING TMP1 to 1.0!" tmp1 = 1.0 if math.sin(lon2-lon1) < 0.0: tc1_rad = math.acos(tmp1) else: tc1_rad = 2.0 * math.pi - math.acos(tmp1) dist_nm = dist_rad * rad2nm dist_m = dist_nm * nm2meter tc1_deg = tc1_rad * r2d return (tc1_deg, dist_m) def project_course_distance(p1, course_deg, dist_m): lat1 = p1[0] * d2r lon1 = -p1[1] * d2r tc = course_deg * d2r d = dist_m * meter2nm * nm2rad lat = math.asin(math.sin(lat1)*math.cos(d)+math.cos(lat1)*math.sin(d)*math.cos(tc)) if math.cos(lat) < EPS: lon = lon1 # endpoint a pole else: lon = math.fmod(lon1-math.asin(math.sin(tc)*math.sin(d)/math.cos(lat))+math.pi, 2*math.pi) - math.pi return (lat*r2d, -lon*r2d)
29.611111
138
0.560976
0
0
0
0
0
0
0
0
448
0.210131
22be826c96db32727162b13681b36634865339c6
1,195
py
Python
app/__init__.py
JoeCare/flask_geolocation_api
ad9ea0d22b738a7af8421cc57c972bd0e0fa80da
[ "Apache-2.0" ]
null
null
null
app/__init__.py
JoeCare/flask_geolocation_api
ad9ea0d22b738a7af8421cc57c972bd0e0fa80da
[ "Apache-2.0" ]
2
2021-03-14T03:55:49.000Z
2021-03-14T04:01:32.000Z
app/__init__.py
JoeCare/flask_geolocation_api
ad9ea0d22b738a7af8421cc57c972bd0e0fa80da
[ "Apache-2.0" ]
null
null
null
import connexion, os from connexion.resolver import RestyResolver from flask import json from flask_sqlalchemy import SQLAlchemy from flask_marshmallow import Marshmallow # Globally accessible libraries db = SQLAlchemy() mm = Marshmallow() def init_app(): """Initialize the Connexion application.""" BASE_DIR = os.path.abspath(os.path.dirname(__file__)) openapi_path = os.path.join(BASE_DIR, "../") conn_app = connexion.FlaskApp( __name__, specification_dir=openapi_path, options={ "swagger_ui": True, "serve_spec": True } ) conn_app.add_api("openapi.yaml", resolver=RestyResolver('run'), strict_validation=True) # Flask app and getting into app_context app = conn_app.app # Load application config app.config.from_object('config.ProdConfig') app.json_encoder = json.JSONEncoder # Initialize Plugins db.init_app(app) mm.init_app(app) with app.app_context(): # Include our Routes/views import run # Register Blueprints # app.register_blueprint(auth.auth_bp) # app.register_blueprint(admin.admin_bp) return app
26.555556
67
0.672803
0
0
0
0
0
0
0
0
351
0.293724
22c02d3ee15e860f429769f7b7700c393718fcdc
29,893
py
Python
RIPv2-Simulation/Router.py
vkmanojk/Networks-VirtualLAN
52c6546da611a7a7b9fdea65c567b284664a99b4
[ "MIT" ]
null
null
null
RIPv2-Simulation/Router.py
vkmanojk/Networks-VirtualLAN
52c6546da611a7a7b9fdea65c567b284664a99b4
[ "MIT" ]
null
null
null
RIPv2-Simulation/Router.py
vkmanojk/Networks-VirtualLAN
52c6546da611a7a7b9fdea65c567b284664a99b4
[ "MIT" ]
null
null
null
''' Summary: Program that implements a routing deamon based on the RIP version 2 protocol from RFC2453. Usage: python3 Router.py <router_config_file> Configuration File: The user supplies a router configuration file of the format: [Settings] router-id = <router_number> input-ports = <input> [, <input>, ...] outputs = <output>-<metric>-<destination_router> [, <output>-<metric>-<destination_router>, ...] where, router_number: ID of router between 1 - 64000. input: port number between 1024 - 64000. output: port number between 1024 - 6400, not equal to any inputs. metric: metric of output between 1 - 16. destination_router: ID of destination router. Description: This program implements a basic RIPv2 routing protocol from RFC2453 for routing computations in computer networks. It takes a configuration file as shown above and sets up a router with a new socket for each input-port. The RIPv2 protocol uses a routing table to keep track of all reachable routers on the network along with their metric/cost and the direct next hop router ID along the route to that destination router. However, it can only send messages to the direct neighbours specified in outputs. The protocol uses the Bellman-Ford distance vector algorithm to compute the lowest cost route to each router in the network. If the metric is 16 or greater, the router is considered unreachable. The routing table initially starts with a single route entry (RTE) for itself with a metric of zero. The routing table is periodically transmitted too each of its direct output ports via an unsolicited response message as defined in RFC2453 section 3.9.2 and 4. This is performed on a separate thread so it does not interfere with other operations The receives messages from other routers by using the python select() function which blocks until a message is ready to be read. Once a message is received the header and contents are validated. If the message is valid each RTE is processed according to RFC2453 section 3.9.2. If a new router is found the RTE is added to the routing table, adding the cost to the metric for the output the message was received on. If the RTE already exists, but the metric is smaller, the metric is updated to the lower metric. If the lower metric is from a different next hop router, change the next hop. If nothing has changed, restart the timeout timer. If RTE metric >= max metric of 16, mark the entry for garbage collection and update the metric in the table. If any change has occurred in the routing table as a result of a received message, a triggered update (RFC2453 section 3.10.1) is sent to all outputs with the updated entries. Triggered updates are sent with a random delay between 1 - 5 seconds to prevent synchronized updates. Request messages are not implemented in this program. Timers (all timers are on separate threads) (RFC2453 section 3.8): Update timer - Periodic unsolicited response message sent to all outputs. The period is adjusted each time to a random value between 0.8 * BASE_TIMER and 1.2 * BASE_TIMER to prevent synchronized updates. Timeout - used to check the routing table for RTEs which have have not been updated within the ROUTE_TIMEOUT interval. If a router has not been heard from within this time, then set the metric to the max metric of 16 and start the garbage collection timer. Garbage timer - used to check the routing table for RTEs set for garbage collection. If the timeout >= DELETE_TIMEOUT, mark the RTE for deletion. Garbage Collection - used to check the routing table for RTEs marked for deletion, and removes those entries from the table. ''' import configparser import select import socket import sys import time import threading import struct import datetime from random import randint, randrange DEBUG = False HOST = '127.0.0.1' # localhost BASE_TIMER = 5 MAX_METRIC = 16 ROUTE_TIMEOUT = BASE_TIMER * 6 DELETE_TIMEOUT = BASE_TIMER * 4 AF_INET = 2 # =========================================================================== # TRANSITIONS class Transistion(): '''Class Representing a transition between states.''' def __init__(self, to_state): self.to_state = to_state def execute(self): '''Run the transition functions''' pass # =========================================================================== # STATES class State(): '''Class Representing a generic state''' def __init__(self, fsm): self.fsm = fsm def enter(self): '''Execute functions for entering a state''' pass def execute(self): '''Execute functions while in state''' pass def exit(self): '''Execute functions for leaving a state''' pass class StartUp(State): '''Class Representing the Start up state which reads the configuration file ''' def __init__(self, fsm): super(StartUp, self).__init__(fsm) def execute(self): '''Execute the configuration functions''' print_message("Loading Configuration File: '" + self.fsm.router.config_file + "'") config = configparser.ConfigParser() config.read(self.fsm.router.config_file) self.get_router_id(config) self.setup_inputs(config) self.get_outputs(config) self.setup_routing_table() self.fsm.router.print_routing_table() self.fsm.to_transition("toWaiting") def exit(self): '''Print complete message''' print_message("Router Setup Complete.") def get_router_id(self, config): '''Read the router id number from the configuration file''' if 1 <= int(config['Settings']['router-id']) <= 64000: self.fsm.router.router_settings['id'] = \ int(config['Settings']['router-id']) else: raise Exception('Invalid Router ID Number') def get_outputs(self, config): '''Return a dictionary of outputs containing port, cost and destination router id from the Configuration file''' outputs = config['Settings']['outputs'].split(', ') outputs = [i.split('-') for i in outputs] self.fsm.router.router_settings['outputs'] = {} existing_ports = [] for output in outputs: is_valid_port = 1024 <= int(output[0]) <= 64000 and not \ int(output[0]) in existing_ports is_valid_cost = 1 <= int(output[1]) < 16 is_valid_id = 1 <= int(output[2]) <= 64000 if is_valid_port and is_valid_cost and is_valid_id: existing_ports.append(int(output[0])) self.fsm.router.router_settings['outputs'][int(output[2])] = \ {'metric': int(output[1]), 'port': int(output[0])} else: raise Exception('Invalid Outputs') def setup_inputs(self, config): '''Create input sockets from the inputs specified in the config file''' # get inputs from configuration file ports = config['Settings']['input-ports'].split(', ') inputs = [] for port in ports: if 1024 <= int(port) <= 64000 and not int(port) in inputs: inputs.append(int(port)) else: raise Exception('Invalid Port Number') self.fsm.router.router_settings['inputs'] = {} # create socket for each input port for port in inputs: try: self.fsm.router.router_settings['inputs'][port] = \ socket.socket(socket.AF_INET, socket.SOCK_DGRAM) print_message('Socket ' + str(port) + ' Created.') except socket.error as msg: print('Failed to create socket. Message: ' + str(msg)) sys.exit() # bind port to socket try: self.fsm.router.router_settings['inputs'][port].bind( (HOST, port)) print_message('Socket ' + str(port) + ' Bind Complete.') except socket.error as msg: print('Failed to create socket. Message ' + str(msg)) sys.exit() def setup_routing_table(self): '''Setup routing table with the outputs specified in the config file''' self.fsm.router.routing_table[self.fsm.router.router_settings['id']] = \ RIPRouteEntry(address=self.fsm.router.router_settings['id'], nexthop=0, metric=0, imported=True) class Waiting(State): ''' Class representing the waiting state of the FSM where the router waits for messages to be received on its input sockets. When a message is received the state changes to the ReadMeassage state. ''' def __init__(self, fsm): super(Waiting, self).__init__(fsm) def enter(self): '''Display State entry message''' print_message("Entering idle state...") def execute(self): '''Waits for input sockets to be readable and then changes the state to process the received message.''' readable = select.select( self.fsm.router.router_settings['inputs'].values(), [], []) if readable[0]: self.fsm.router.readable_ports = readable[0] self.fsm.to_transition("toReadMessage") def exit(self): '''Display State exit message''' print_message("Message Received") class ReadMessage(State): '''Class representing the state for reading messages received on the input sockets''' def __init__(self, fsm): super(ReadMessage, self).__init__(fsm) def enter(self): print_message("Reading Messages...") def execute(self): for port in self.fsm.router.readable_ports: packet = RIPPacket(port.recvfrom(1024)[0]) self.fsm.router.update_routing_table(packet) if self.fsm.router.route_change: self.fsm.router.trigger_update() self.fsm.router.print_routing_table() self.fsm.to_transition("toWaiting") def exit(self): print_message("Messages Read.") # =========================================================================== # FINITE STATE MACHINE class RouterFSM(): '''Class representing the Router finite state machine''' def __init__(self, rip_router): self.router = rip_router self.states = {} self.transitions = {} self.cur_state = None self.trans = None def add_transistion(self, trans_name, transition): '''Add a new transition to the FSM''' self.transitions[trans_name] = transition def add_state(self, state_name, state): '''Add a new state to the FSM''' self.states[state_name] = state def set_state(self, state_name): '''Set the current state of the FSM''' self.cur_state = self.states[state_name] def to_transition(self, to_trans): '''Set the current transition of the FSM''' self.trans = self.transitions[to_trans] def execute(self): '''Run the FSM''' if self.trans: self.cur_state.exit() self.trans.execute() self.set_state(self.trans.to_state) self.cur_state.enter() self.trans = None self.cur_state.execute() # =========================================================================== # IMPLEMENTATION class RIPPacket: '''Class representing a RIP packet containing a header and body as defined in RFC2453 RIPv2 section 4.''' def __init__(self, data=None, header=None, rtes=None): if data: self._init_from_network(data) elif header and rtes: self._init_from_host(header, rtes) else: raise ValueError def __repr__(self): return "RIPPacket: Command {}, Ver. {}, number of RTEs {}.". \ format(self.header.cmd, self.header.ver, len(self.rtes)) def _init_from_network(self, data): '''Init for RIPPacket if data is from the network''' # Packet Validation datalen = len(data) if datalen < RIPHeader.SIZE: raise FormatException malformed_rtes = (datalen - RIPHeader.SIZE) % RIPRouteEntry.SIZE if malformed_rtes: raise FormatException # Convert bytes in packet to header and RTE data num_rtes = int((datalen - RIPHeader.SIZE) / RIPRouteEntry.SIZE) self.header = RIPHeader(data[0:RIPHeader.SIZE]) self.rtes = [] rte_start = RIPHeader.SIZE rte_end = RIPHeader.SIZE + RIPRouteEntry.SIZE # Loop over data packet to obtain each RTE for i in range(num_rtes): self.rtes.append(RIPRouteEntry(rawdata=data[rte_start:rte_end], src_id=self.header.src)) rte_start += RIPRouteEntry.SIZE rte_end += RIPRouteEntry.SIZE def _init_from_host(self, header, rtes): '''Init for imported data''' if header.ver != 2: raise ValueError("Only Version 2 is supported.") self.header = header self.rtes = rtes def serialize(self): '''Return the byte sting representing this packet for network transmission''' packed = self.header.serialize() for rte in self.rtes: packed += rte.serialize() return packed class RIPHeader: '''Class representing the header of a RIP packet''' FORMAT = "!BBH" SIZE = struct.calcsize(FORMAT) TYPE_RESPONSE = 2 VERSION = 2 def __init__(self, rawdata=None, router_id=None): self.packed = None if rawdata: self._init_from_network(rawdata) elif router_id: self._init_from_host(router_id) else: raise ValueError def __repr__(self): return "RIP Header (cmd = {}, ver = {}, src = {})".format(self.cmd, self.ver, self.src) def _init_from_network(self, rawdata): '''init for data from network''' header = struct.unpack(self.FORMAT, rawdata) self.cmd = header[0] self.ver = header[1] self.src = header[2] def _init_from_host(self, router_id): '''Init for data from host''' self.cmd = self.TYPE_RESPONSE self.ver = self.VERSION self.src = router_id def serialize(self): '''Return the byte sting representing this header for network transmission''' return struct.pack(self.FORMAT, self.cmd, self.ver, self.src) class RIPRouteEntry: '''Class representing a single RIP route entry (RTE)''' FORMAT = "!HHIII" SIZE = struct.calcsize(FORMAT) MIN_METRIC = 0 MAX_METRIC = 16 def __init__(self, rawdata=None, src_id=None, address=None, nexthop=None, metric=None, imported=False): self.changed = False self.imported = imported self.init_timeout() if rawdata and src_id != None: self._init_from_network(rawdata, src_id) elif address and nexthop != None and metric != None: self._init_from_host(address, nexthop, metric) else: raise ValueError def __repr__(self): template = "|{:^11}|{:^10}|{:^11}|{:^15}|{:^10}|{:^13}|" # Check that timeout is set if self.timeout == None: return template.format(self.addr, self.metric, self.nexthop, self.changed, self.garbage, str(self.timeout)) else: timeout = (datetime.datetime.now() - self.timeout).total_seconds() return template.format(self.addr, self.metric, self.nexthop, self.changed, self.garbage, round(timeout, 1)) def _init_from_host(self, address, nexthop, metric): '''Init for data from host''' self.afi = AF_INET self.tag = 0 # not used self.addr = address self.nexthop = nexthop self.metric = metric def _init_from_network(self, rawdata, src_id): '''Init for data received from network''' rte = struct.unpack(self.FORMAT, rawdata) self.afi = rte[0] self.tag = rte[1] self.addr = rte[2] self.set_nexthop(rte[3]) self.metric = rte[4] if self.nexthop == 0: self.nexthop = src_id # Validation if not self.MIN_METRIC <= self.metric <= self.MAX_METRIC: raise FormatException def init_timeout(self): '''Initialize the timeout property''' if self.imported: self.timeout = None else: self.timeout = datetime.datetime.now() self.garbage = False self.marked_for_delection = False def __eq__(self, other): if self.afi == other.afi and \ self.addr == other.addr and \ self.tag == other.tag and \ self.nexthop == other.nexthop and \ self.metric == other.metric: return True else: return False def set_nexthop(self, nexthop): '''Set the nexthop property''' self.nexthop = nexthop def serialize(self): '''Pack entries into typical RIPv2 packet format for sending over the network. ''' return struct.pack(self.FORMAT, self.afi, self.tag, self.addr, self.nexthop, self.metric) class FormatException(Exception): '''Class representing the Format Exception''' def __init__(self, message=""): self.message = message class Router: '''Class representing a single router''' def __init__(self, config_file): self.fsm = RouterFSM(self) self.config_file = config_file # Dictionary of router settings, including router-id, inputs and # outputs self.router_settings = {} self.readable_ports = [] # Dictionary of routing table self.routing_table = {} self.route_change = False # STATES self.fsm.add_state("StartUp", StartUp(self.fsm)) self.fsm.add_state("Waiting", Waiting(self.fsm)) self.fsm.add_state("ReadMessage", ReadMessage(self.fsm)) # TRANSITIONS self.fsm.add_transistion("toWaiting", Transistion("Waiting")) self.fsm.add_transistion("toReadMessage", Transistion("ReadMessage")) self.fsm.set_state("StartUp") def execute(self): '''Run the router's finite state machine''' self.fsm.execute() def update_routing_table(self, packet): '''Update Routing table if new route info exist''' for rte in packet.rtes: # ignore RTEs of self if rte.addr != self.fsm.router.router_settings['id']: bestroute = self.routing_table.get(rte.addr) # set nexthop to source router and calculate metric rte.set_nexthop(packet.header.src) rte.metric = min(rte.metric + self.router_settings['outputs'][ packet.header.src]['metric'], RIPRouteEntry.MAX_METRIC) # Route dosn't yet exist if not bestroute: # ignore RTEs with a metric of MAX_METRIC if rte.metric == RIPRouteEntry.MAX_METRIC: return # Add new RTE to routing table rte.changed = True self.route_change = True self.routing_table[rte.addr] = rte print_message("RTE added for Router: " + str(rte.addr)) return else: # Route already exists if rte.nexthop == bestroute.nexthop: if bestroute.metric != rte.metric: if bestroute.metric != RIPRouteEntry.MAX_METRIC \ and rte.metric >= RIPRouteEntry.MAX_METRIC: # mark for garbage collection bestroute.metric = RIPRouteEntry.MAX_METRIC bestroute.garbage = True bestroute.changed = True self.route_change = True else: self.update_route(bestroute, rte) # Route still exists with same values elif not bestroute.garbage: bestroute.init_timeout() # Lower metric on existing route elif rte.metric < bestroute.metric: self.update_route(bestroute, rte) def update_route(self, bestroute, rte): '''Update an existing route entry with new route info''' bestroute.init_timeout() bestroute.garbage = False bestroute.changed = True bestroute.metric = rte.metric bestroute.nexthop = rte.nexthop self.route_change = True print_message("RTE for Router: " + str(rte.addr) + " updated with metric=" + str(rte.metric) + ", nexthop=" + str(rte.nexthop) + ".") def print_routing_table(self): '''Print the routing table to the terminal''' line = "+-----------+----------+-----------+---------------+----------+-------------+" print(line) print( "| Routing Table (Router " + str(self.router_settings['id']) + ") |") print(line) print( "|Router ID | Metric | NextHop | ChangedFlag | Garbage | Timeout(s) |") print(line) print(self.routing_table[self.router_settings['id']]) print( "+===========+==========+===========+===============+==========+=============+") for entry in self.routing_table: if entry != self.router_settings['id']: print(self.routing_table[entry]) print(line) print('\n') def trigger_update(self): '''Send Routing update for only the routes which have changed''' changed_rtes = [] print_message("Sending Trigger update.") for rte in self.routing_table.values(): if rte.changed: changed_rtes.append(rte) rte.changed = False self.route_change = False # send update with random delay between 1 and 5 seconds delay = randint(1, 5) threading.Timer(delay, self.update, [changed_rtes]) def update(self, entries): '''Send a message to all output ports''' if self.router_settings != {}: sock = list(self.router_settings['inputs'].values())[1] local_header = RIPHeader(router_id=self.router_settings['id']) for output in self.router_settings['outputs']: # Split horizon # Remove RTES for which nexthop == output split_horizon_entries = [] for entry in entries: if entry.nexthop != output: split_horizon_entries.append(entry) else: # Poison reverse # Create new entry to get around some funky referencing # When doing poisoned_entry = entry poisoned_entry = RIPRouteEntry(rawdata=None, src_id=None, address=entry.addr, nexthop=entry.nexthop, metric= RIPRouteEntry.MAX_METRIC, imported=entry.imported) split_horizon_entries.append(poisoned_entry) # comment out to disable split horizon packet = RIPPacket( header=local_header, rtes=split_horizon_entries) # Uncomment to disable split horizon # packet = RIPPacket(header=local_header, rtes=entries) sock.sendto(packet.serialize(), (HOST, self.router_settings['outputs'][output]["port"])) print_message("Message Sent To Router: " + str(output)) def check_timeout(self): '''Check the current timeout value for each RTE in the routing table. If the time difference with now is greater than ROUTE_TIMEOUT, then set the metric to 16 and start the garbage collection timer.''' print_message("Checking timeout...") if self.routing_table != {}: for rte in self.routing_table.values(): if rte.timeout != None and \ (datetime.datetime.now() - rte.timeout).total_seconds() \ >= ROUTE_TIMEOUT: rte.garbage = True rte.changed = True self.route_change = True rte.metric = RIPRouteEntry.MAX_METRIC rte.timeout = datetime.datetime.now() self.print_routing_table() print_message("Router: " + str(rte.addr) + " timed out.") def garbage_timer(self): '''Check the status of the garbage property of each RTE. If true, and the timeout value difference with now is greater than DELETE_TIMEOUT, mark it for deletion''' print_message("Checking garbage timeout...") if self.routing_table != {}: for rte in self.routing_table.values(): if rte.garbage: if (datetime.datetime.now() - rte.timeout).total_seconds() \ >= DELETE_TIMEOUT: rte.marked_for_delection = True def garbage_collection(self): '''Check the routing table for RTE's that are marked for deletion and remove them.''' print_message("Collecting Garbage...") if self.routing_table != {}: delete_routes = [] for rte in self.routing_table.values(): if rte.marked_for_delection: delete_routes.append(rte.addr) print_message("Router: " + str(rte.addr) + " has been " + "removed from the routing table.") for entry in delete_routes: del self.routing_table[entry] self.print_routing_table() def timer(self, function, param=None): '''Start a periodic timer which calls a specified function''' if param != None: function(list(param.values())) period = BASE_TIMER * randrange(8, 12, 1) / 10 else: period = BASE_TIMER function() threading.Timer(period, self.timer, [function, param]).start() def start_timers(self): '''Start the timers on separate threads''' self.timer(self.update, param=self.routing_table) self.timer(self.check_timeout) self.timer(self.garbage_timer) self.timer(self.garbage_collection) def main_loop(self): '''Start the main loop for the program.''' while True: self.execute() # RUN THE PROGRAM def print_message(message): '''Print the given message with the current time before it''' if DEBUG: print("[" + time.strftime("%H:%M:%S") + "]: " + message) def main(): '''Main function to run the program.''' if __name__ == "__main__": router = Router(str(sys.argv[-1])) router.start_timers() router.main_loop() main()
35.12691
95
0.544174
23,920
0.800187
0
0
0
0
0
0
11,055
0.369819
22c090ce75cc118c533814274bbfc243abbfc79a
5,669
py
Python
atlaselectrophysiology/extract_files.py
alowet/iblapps
9be936cd6806153dde0cbff1b6f2180191de3aeb
[ "MIT" ]
null
null
null
atlaselectrophysiology/extract_files.py
alowet/iblapps
9be936cd6806153dde0cbff1b6f2180191de3aeb
[ "MIT" ]
null
null
null
atlaselectrophysiology/extract_files.py
alowet/iblapps
9be936cd6806153dde0cbff1b6f2180191de3aeb
[ "MIT" ]
null
null
null
from ibllib.io import spikeglx import numpy as np import ibllib.dsp as dsp from scipy import signal from ibllib.misc import print_progress from pathlib import Path import alf.io as aio import logging import ibllib.ephys.ephysqc as ephysqc from phylib.io import alf _logger = logging.getLogger('ibllib') RMS_WIN_LENGTH_SECS = 3 WELCH_WIN_LENGTH_SAMPLES = 1024 def rmsmap(fbin, spectra=True): """ Computes RMS map in time domain and spectra for each channel of Neuropixel probe :param fbin: binary file in spike glx format (will look for attached metatdata) :type fbin: str or pathlib.Path :param spectra: whether to compute the power spectrum (only need for lfp data) :type: bool :return: a dictionary with amplitudes in channeltime space, channelfrequency space, time and frequency scales """ if not isinstance(fbin, spikeglx.Reader): sglx = spikeglx.Reader(fbin) rms_win_length_samples = 2 ** np.ceil(np.log2(sglx.fs * RMS_WIN_LENGTH_SECS)) # the window generator will generates window indices wingen = dsp.WindowGenerator(ns=sglx.ns, nswin=rms_win_length_samples, overlap=0) # pre-allocate output dictionary of numpy arrays win = {'TRMS': np.zeros((wingen.nwin, sglx.nc)), 'nsamples': np.zeros((wingen.nwin,)), 'fscale': dsp.fscale(WELCH_WIN_LENGTH_SAMPLES, 1 / sglx.fs, one_sided=True), 'tscale': wingen.tscale(fs=sglx.fs)} win['spectral_density'] = np.zeros((len(win['fscale']), sglx.nc)) # loop through the whole session for first, last in wingen.firstlast: D = sglx.read_samples(first_sample=first, last_sample=last)[0].transpose() # remove low frequency noise below 1 Hz D = dsp.hp(D, 1 / sglx.fs, [0, 1]) iw = wingen.iw win['TRMS'][iw, :] = dsp.rms(D) win['nsamples'][iw] = D.shape[1] if spectra: # the last window may be smaller than what is needed for welch if last - first < WELCH_WIN_LENGTH_SAMPLES: continue # compute a smoothed spectrum using welch method _, w = signal.welch(D, fs=sglx.fs, window='hanning', nperseg=WELCH_WIN_LENGTH_SAMPLES, detrend='constant', return_onesided=True, scaling='density', axis=-1) win['spectral_density'] += w.T # print at least every 20 windows if (iw % min(20, max(int(np.floor(wingen.nwin / 75)), 1))) == 0: print_progress(iw, wingen.nwin) return win def extract_rmsmap(fbin, out_folder=None, spectra=True): """ Wrapper for rmsmap that outputs _ibl_ephysRmsMap and _ibl_ephysSpectra ALF files :param fbin: binary file in spike glx format (will look for attached metatdata) :param out_folder: folder in which to store output ALF files. Default uses the folder in which the `fbin` file lives. :param spectra: whether to compute the power spectrum (only need for lfp data) :type: bool :return: None """ _logger.info(f"Computing QC for {fbin}") sglx = spikeglx.Reader(fbin) # check if output ALF files exist already: if out_folder is None: out_folder = Path(fbin).parent else: out_folder = Path(out_folder) alf_object_time = f'_iblqc_ephysTimeRms{sglx.type.upper()}' alf_object_freq = f'_iblqc_ephysSpectralDensity{sglx.type.upper()}' # crunch numbers rms = rmsmap(fbin, spectra=spectra) # output ALF files, single precision with the optional label as suffix before extension if not out_folder.exists(): out_folder.mkdir() tdict = {'rms': rms['TRMS'].astype(np.single), 'timestamps': rms['tscale'].astype(np.single)} aio.save_object_npy(out_folder, object=alf_object_time, dico=tdict) if spectra: fdict = {'power': rms['spectral_density'].astype(np.single), 'freqs': rms['fscale'].astype(np.single)} aio.save_object_npy(out_folder, object=alf_object_freq, dico=fdict) def _sample2v(ap_file): """ Convert raw ephys data to Volts """ md = spikeglx.read_meta_data(ap_file.with_suffix('.meta')) s2v = spikeglx._conversion_sample2v_from_meta(md) return s2v['ap'][0] def ks2_to_alf(ks_path, bin_path, out_path, bin_file=None, ampfactor=1, label=None, force=True): """ Convert Kilosort 2 output to ALF dataset for single probe data :param ks_path: :param bin_path: path of raw data :param out_path: :return: """ m = ephysqc.phy_model_from_ks2_path(ks2_path=ks_path, bin_path=bin_path, bin_file=bin_file) ephysqc.spike_sorting_metrics_ks2(ks_path, m, save=True) ac = alf.EphysAlfCreator(m) ac.convert(out_path, label=label, force=force, ampfactor=ampfactor) def extract_data(ks_path, ephys_path, out_path): efiles = spikeglx.glob_ephys_files(ephys_path) for efile in efiles: if efile.get('ap') and efile.ap.exists(): ks2_to_alf(ks_path, ephys_path, out_path, bin_file=efile.ap, ampfactor=_sample2v(efile.ap), label=None, force=True) extract_rmsmap(efile.ap, out_folder=out_path, spectra=False) if efile.get('lf') and efile.lf.exists(): extract_rmsmap(efile.lf, out_folder=out_path) # if __name__ == '__main__': # # ephys_path = Path('C:/Users/Mayo/Downloads/raw_ephys_data') # ks_path = Path('C:/Users/Mayo/Downloads/KS2') # out_path = Path('C:/Users/Mayo/Downloads/alf') # extract_data(ks_path, ephys_path, out_path)
40.492857
99
0.657259
0
0
0
0
0
0
0
0
2,154
0.379961
22c0a198d3ffbdb90c8a504d310e057f35103de5
2,740
py
Python
site_settings/models.py
shervinbdndev/Django-Shop
baa4e7b91fbdd01ee591049c12cd9fbfaa434379
[ "MIT" ]
13
2022-02-25T05:04:58.000Z
2022-03-15T10:55:24.000Z
site_settings/models.py
iTsSobhan/Django-Shop
9eb6a08c6e93e5401d6bc2eeb30f2ef35adec730
[ "MIT" ]
null
null
null
site_settings/models.py
iTsSobhan/Django-Shop
9eb6a08c6e93e5401d6bc2eeb30f2ef35adec730
[ "MIT" ]
1
2022-03-03T09:21:49.000Z
2022-03-03T09:21:49.000Z
from django.db import models class SiteSettings(models.Model): site_name = models.CharField(max_length=200 , verbose_name='Site Name') site_url = models.CharField(max_length=200 , verbose_name='Site URL') site_address = models.CharField(max_length=300 , verbose_name='Site Address') site_phone = models.CharField(max_length=100 , null=True , blank=True , verbose_name='Site Phone') site_fax = models.CharField(max_length=200 , null=True , blank=True , verbose_name='Site Fax') site_email = models.EmailField(max_length=200 , null=True , blank=True , verbose_name='Site Email') about_us_text = models.TextField(verbose_name='About Us Text') site_copy_right = models.TextField(verbose_name='Copyright Text') site_logo = models.ImageField(upload_to='images/site-setting/' , verbose_name='Site Logo') is_main_setting = models.BooleanField(verbose_name='Site Main Settings') def __str__(self) -> str: super(SiteSettings , self).__str__() return self.site_name class Meta: verbose_name = 'Site Setting' verbose_name_plural = 'Site Settings' class FooterLinkBox(models.Model): title = models.CharField(max_length=200 , verbose_name='Title') def __str__(self) -> str: super(FooterLinkBox , self).__str__() return self.title class Meta: verbose_name = 'Footer Link Setting' verbose_name_plural = 'Footer Link Settings' class FooterLink(models.Model): title = models.CharField(max_length=200 , verbose_name='Title') url = models.URLField(max_length=500 , verbose_name='Links') footer_link_box = models.ForeignKey(to=FooterLinkBox , verbose_name='Category' , on_delete=models.CASCADE) def __str__(self) -> str: super(FooterLink , self).__str__() return self.title class Meta: verbose_name = 'Footer Link' verbose_name_plural = 'Footer Links' class Slider(models.Model): title = models.CharField(max_length=200 , verbose_name='Title') description = models.TextField(verbose_name='Slider Description') url_title = models.CharField(max_length=200 , verbose_name='URL Title') url = models.URLField(max_length=200 , verbose_name='URL Address') image = models.ImageField(upload_to='images/sliders' , verbose_name='Slider Image') is_active = models.BooleanField(default=False , verbose_name='Active / Inactive') def __str__(self) -> str: super(Slider , self).__str__() return self.title class Meta: verbose_name = 'Slider' verbose_name_plural = 'Sliders'
36.052632
110
0.670073
2,576
0.940146
0
0
0
0
0
0
400
0.145985
22c0aad467733eae25b9c32e9a7eb9d1b86f8921
9,955
py
Python
examples/basics/visuals/line_prototype.py
3DAlgoLab/vispy
91972307cf336674aad58198fb26b9e46f8f9ca1
[ "BSD-3-Clause" ]
2,617
2015-01-02T07:52:18.000Z
2022-03-29T19:31:15.000Z
examples/basics/visuals/line_prototype.py
3DAlgoLab/vispy
91972307cf336674aad58198fb26b9e46f8f9ca1
[ "BSD-3-Clause" ]
1,674
2015-01-01T00:36:08.000Z
2022-03-31T19:35:56.000Z
examples/basics/visuals/line_prototype.py
3DAlgoLab/vispy
91972307cf336674aad58198fb26b9e46f8f9ca1
[ "BSD-3-Clause" ]
719
2015-01-10T14:25:00.000Z
2022-03-02T13:24:56.000Z
# -*- coding: utf-8 -*- # vispy: gallery 10 # Copyright (c) Vispy Development Team. All Rights Reserved. # Distributed under the (new) BSD License. See LICENSE.txt for more info. import sys import numpy as np from vispy import app, gloo, visuals from vispy.visuals.filters import Clipper, ColorFilter from vispy.visuals.shaders import MultiProgram from vispy.visuals.collections import PointCollection from vispy.visuals.transforms import STTransform from vispy.scene import SceneCanvas from vispy.scene.visuals import create_visual_node class LineVisual(visuals.Visual): """Example of a very simple GL-line visual. This shows the minimal set of methods that need to be reimplemented to make a new visual class. """ def __init__(self, pos=None, color=(1, 1, 1, 1)): vcode = """ attribute vec2 a_pos; void main() { gl_Position = $transform(vec4(a_pos, 0., 1.)); gl_PointSize = 10.; } """ fcode = """ void main() { gl_FragColor = $color; } """ visuals.Visual.__init__(self, vcode=vcode, fcode=fcode) self.pos_buf = gloo.VertexBuffer() # The Visual superclass contains a MultiProgram, which is an object # that behaves like a normal shader program (you can assign shader # code, upload values, set template variables, etc.) but internally # manages multiple ModularProgram instances, one per view. # The MultiProgram is accessed via the `shared_program` property, so # the following modifications to the program will be applied to all # views: self.shared_program['a_pos'] = self.pos_buf self.shared_program.frag['color'] = color self._need_upload = False # Visual keeps track of draw mode, index buffer, and GL state. These # are shared between all views. self._draw_mode = 'line_strip' self.set_gl_state('translucent', depth_test=False) if pos is not None: self.set_data(pos) def set_data(self, pos): self._pos = pos self._need_upload = True def _prepare_transforms(self, view=None): view.view_program.vert['transform'] = view.transforms.get_transform() def _prepare_draw(self, view=None): """This method is called immediately before each draw. The *view* argument indicates which view is about to be drawn. """ if self._need_upload: # Note that pos_buf is shared between all views, so we have no need # to use the *view* argument in this example. This will be true # for most visuals. self.pos_buf.set_data(self._pos) self._need_upload = False class PointVisual(LineVisual): """Another simple visual class. Due to the simplicity of these example classes, it was only necessary to subclass from LineVisual and set the draw mode to 'points'. A more fully-featured PointVisual class might not follow this approach. """ def __init__(self, pos=None, color=(1, 1, 1, 1)): LineVisual.__init__(self, pos, color) self._draw_mode = 'points' class PlotLineVisual(visuals.CompoundVisual): """An example compound visual that draws lines and points. To the user, the compound visual behaves exactly like a normal visual--it has a transform system, draw() and bounds() methods, etc. Internally, the compound visual automatically manages proxying these transforms and methods to its sub-visuals. """ def __init__(self, pos=None, line_color=(1, 1, 1, 1), point_color=(1, 1, 1, 1)): self._line = LineVisual(pos, color=line_color) self._point = PointVisual(pos, color=point_color) visuals.CompoundVisual.__init__(self, [self._line, self._point]) class PointCollectionVisual(visuals.Visual): """Thin wrapper around a point collection. Note: This is currently broken! """ def __init__(self): prog = MultiProgram(vcode='', fcode='') self.points = PointCollection("agg", color="shared", program=prog) visuals.Visual.__init__(self, program=prog) def _prepare_draw(self, view): if self.points._need_update: self.points._update() self._draw_mode = self.points._mode self._index_buffer = self.points._indices_buffer def append(self, *args, **kwargs): self.points.append(*args, **kwargs) def _prepare_transforms(self, view=None): pass @property def color(self): return self.points['color'] @color.setter def color(self, c): self.points['color'] = c class PanZoomTransform(STTransform): def __init__(self, canvas=None, aspect=None, **kwargs): self._aspect = aspect self.attach(canvas) STTransform.__init__(self, **kwargs) def attach(self, canvas): """ Attach this tranform to a canvas """ self._canvas = canvas canvas.events.mouse_wheel.connect(self.on_mouse_wheel) canvas.events.mouse_move.connect(self.on_mouse_move) def on_mouse_move(self, event): if event.is_dragging: dxy = event.pos - event.last_event.pos button = event.press_event.button if button == 1: self.move(dxy) elif button == 2: center = event.press_event.pos if self._aspect is None: self.zoom(np.exp(dxy * (0.01, -0.01)), center) else: s = dxy[1] * -0.01 self.zoom(np.exp(np.array([s, s])), center) def on_mouse_wheel(self, event): self.zoom(np.exp(event.delta * (0.01, -0.01)), event.pos) canvas = app.Canvas(keys='interactive', size=(900, 600), show=True, title="Visual Canvas") pos = np.random.normal(size=(1000, 2), loc=0, scale=50).astype('float32') pos[0] = [0, 0] # Make a line visual line = LineVisual(pos=pos) line.transforms.canvas = canvas line.transform = STTransform(scale=(2, 1), translate=(20, 20)) panzoom = PanZoomTransform(canvas) line.transforms.scene_transform = panzoom panzoom.changed.connect(lambda ev: canvas.update()) # Attach color filter to all views (current and future) of the visual line.attach(ColorFilter((1, 1, 0.5, 0.7))) # Attach a clipper just to this view. The Clipper filter requires a # transform that maps from the framebuffer coordinate system to the # clipping coordinates. tr = line.transforms.get_transform('framebuffer', 'canvas') line.attach(Clipper((20, 20, 260, 260), transform=tr), view=line) # Make a view of the line that will draw its shadow shadow = line.view() shadow.transforms.canvas = canvas shadow.transform = STTransform(scale=(2, 1), translate=(25, 25)) shadow.transforms.scene_transform = panzoom shadow.attach(ColorFilter((0, 0, 0, 0.6)), view=shadow) tr = shadow.transforms.get_transform('framebuffer', 'canvas') shadow.attach(Clipper((20, 20, 260, 260), transform=tr), view=shadow) # And make a second view of the line with different clipping bounds view = line.view() view.transforms.canvas = canvas view.transform = STTransform(scale=(2, 0.5), translate=(450, 150)) tr = view.transforms.get_transform('framebuffer', 'canvas') view.attach(Clipper((320, 20, 260, 260), transform=tr), view=view) # Make a compound visual plot = PlotLineVisual(pos, (0.5, 1, 0.5, 0.2), (0.5, 1, 1, 0.3)) plot.transforms.canvas = canvas plot.transform = STTransform(translate=(80, 450), scale=(1.5, 1)) tr = plot.transforms.get_transform('framebuffer', 'canvas') plot.attach(Clipper((20, 320, 260, 260), transform=tr), view=plot) # And make a view on the compound view2 = plot.view() view2.transforms.canvas = canvas view2.transform = STTransform(scale=(1.5, 1), translate=(450, 400)) tr = view2.transforms.get_transform('framebuffer', 'canvas') view2.attach(Clipper((320, 320, 260, 260), transform=tr), view=view2) # And a shadow for the view shadow2 = plot.view() shadow2.transforms.canvas = canvas shadow2.transform = STTransform(scale=(1.5, 1), translate=(455, 405)) shadow2.attach(ColorFilter((0, 0, 0, 0.6)), view=shadow2) tr = shadow2.transforms.get_transform('framebuffer', 'canvas') shadow2.attach(Clipper((320, 320, 260, 260), transform=tr), view=shadow2) # Example of a collection visual collection = PointCollectionVisual() collection.transforms.canvas = canvas collection.transform = STTransform(translate=(750, 150)) collection.append(np.random.normal(loc=0, scale=20, size=(10000, 3)), itemsize=5000) collection.color = (1, 0.5, 0.5, 1), (0.5, 0.5, 1, 1) shadow3 = collection.view() shadow3.transforms.canvas = canvas shadow3.transform = STTransform(scale=(1, 1), translate=(752, 152)) shadow3.attach(ColorFilter((0, 0, 0, 0.6)), view=shadow3) # tr = shadow3.transforms.get_transform('framebuffer', 'canvas') # shadow3.attach(Clipper((320, 320, 260, 260), transform=tr), view=shadow2) order = [shadow, line, view, plot, shadow2, view2, shadow3, collection] @canvas.connect def on_draw(event): canvas.context.clear((0.3, 0.3, 0.3, 1.0)) for v in order: v.draw() def on_resize(event): # Set canvas viewport and reconfigure visual transforms to match. vp = (0, 0, canvas.physical_size[0], canvas.physical_size[1]) canvas.context.set_viewport(*vp) for v in order: v.transforms.configure(canvas=canvas, viewport=vp) canvas.events.resize.connect(on_resize) on_resize(None) Line = create_visual_node(LineVisual) canvas2 = SceneCanvas(keys='interactive', title='Scene Canvas', show=True) v = canvas2.central_widget.add_view(margin=10) v.border_color = (1, 1, 1, 1) v.bgcolor = (0.3, 0.3, 0.3, 1) v.camera = 'panzoom' line2 = Line(pos, parent=v.scene) def mouse(ev): print(ev) v.events.mouse_press.connect(mouse) if __name__ == '__main__': if sys.flags.interactive != 1: app.run()
34.209622
79
0.668609
5,238
0.526168
0
0
255
0.025615
0
0
3,077
0.309091
22c0b1d42f5e6f6bbd43886632ceb253dedae7b6
4,243
py
Python
h1st/tests/core/test_schemas_inferrer.py
Mou-Ikkai/h1st
da47a8f1ad6af532c549e075fba19e3b3692de89
[ "Apache-2.0" ]
2
2020-08-21T07:49:08.000Z
2020-08-21T07:49:13.000Z
h1st/tests/core/test_schemas_inferrer.py
Mou-Ikkai/h1st
da47a8f1ad6af532c549e075fba19e3b3692de89
[ "Apache-2.0" ]
3
2020-11-13T19:06:07.000Z
2022-02-10T02:06:03.000Z
h1st/tests/core/test_schemas_inferrer.py
Mou-Ikkai/h1st
da47a8f1ad6af532c549e075fba19e3b3692de89
[ "Apache-2.0" ]
null
null
null
from unittest import TestCase from datetime import datetime import pyarrow as pa import numpy as np import pandas as pd from h1st.schema import SchemaInferrer class SchemaInferrerTestCase(TestCase): def test_infer_python(self): inferrer = SchemaInferrer() self.assertEqual(inferrer.infer_schema(1), pa.int64()) self.assertEqual(inferrer.infer_schema(1.1), pa.float64()) self.assertEqual(inferrer.infer_schema({ 'test1': 1, 'test2': "hello", 'test3': b"hello", 'today': datetime.now(), }), { 'type': dict, 'fields': { 'test1': pa.int64(), 'test2': pa.string(), 'test3': pa.binary(), 'today': pa.date64(), } }) self.assertEqual(inferrer.infer_schema(( 1, 2, 3 )), pa.list_(pa.int64())) self.assertEqual(inferrer.infer_schema(( 1.2, 1.3, 1.4 )), pa.list_(pa.float64())) table = pa.Table.from_arrays( [pa.array([1, 2, 3]), pa.array(["a", "b", "c"])], ['c1', 'c2'] ) self.assertEqual(inferrer.infer_schema(table), table.schema) def test_infer_numpy(self): inferrer = SchemaInferrer() self.assertEqual(inferrer.infer_schema(np.random.random((100, 28, 28))), { 'type': np.ndarray, 'item': pa.float64(), 'shape': (None, 28, 28) }) self.assertEqual(inferrer.infer_schema(np.array(["1", "2", "3"])), { 'type': np.ndarray, 'item': pa.string() }) def test_infer_dataframe(self): inferrer = SchemaInferrer() df = pd.DataFrame({ 'f1': [1, 2, 3], 'f2': ['a', 'b', 'c'], 'f3': [0.1, 0.2, 0.9] }) self.assertEqual(inferrer.infer_schema(df), { 'type': pd.DataFrame, 'fields': { 'f1': pa.int64(), 'f2': pa.string(), 'f3': pa.float64() } }) df = pd.DataFrame({ 'Timestamp': [1.1, 2.2, 3.1], 'CarSpeed': [0.1, 0.2, 0.9], 'Gx': [0.1, 0.2, 0.9], 'Gy': [0.1, 0.2, 0.9], 'Label': ['1', '0', '1'] }) self.assertEqual(inferrer.infer_schema(df), { 'type': pd.DataFrame, 'fields': { 'Timestamp': pa.float64(), 'CarSpeed': pa.float64(), 'Gx': pa.float64(), 'Gy': pa.float64(), 'Label': pa.string(), } }) self.assertEqual(inferrer.infer_schema(pd.Series([1, 2, 3])), { 'type': pd.Series, 'item': pa.int64() }) def test_infer_dict(self): inferrer = SchemaInferrer() self.assertEqual(inferrer.infer_schema({ 'test': 123, }), { 'type': dict, 'fields': { 'test': pa.int64(), } }) self.assertEqual(inferrer.infer_schema({ 'test': 123, 'indices': [1, 2, 3] }), { 'type': dict, 'fields': { 'test': pa.int64(), 'indices': pa.list_(pa.int64()) } }) self.assertEqual(inferrer.infer_schema({ 'results': pd.DataFrame({ 'CarSpeed': [0, 1, 2], 'Label': ['a', 'b', 'c'] }) }), { 'type': dict, 'fields': { 'results': { 'type': pd.DataFrame, 'fields': { 'CarSpeed': pa.int64(), 'Label': pa.string(), } } } }) def test_infer_list(self): inferrer = SchemaInferrer() self.assertEqual(inferrer.infer_schema([ {'test': 123}, {'test': 345}, ]), { 'type': list, 'item': { 'type': dict, 'fields': { 'test': pa.int64() } } })
27.732026
82
0.418572
4,081
0.961819
0
0
0
0
0
0
499
0.117605
22c0ccfce68cfbaf9d19c13daf2d7c341cf47746
373
py
Python
c_core_librairies/exercise_a.py
nicolasessisbreton/pyzehe
7497a0095d974ac912ce9826a27e21fd9d513942
[ "Apache-2.0" ]
1
2018-05-31T19:36:36.000Z
2018-05-31T19:36:36.000Z
c_core_librairies/exercise_a.py
nicolasessisbreton/pyzehe
7497a0095d974ac912ce9826a27e21fd9d513942
[ "Apache-2.0" ]
1
2018-05-31T01:10:51.000Z
2018-05-31T01:10:51.000Z
c_core_librairies/exercise_a.py
nicolasessisbreton/pyzehe
7497a0095d974ac912ce9826a27e21fd9d513942
[ "Apache-2.0" ]
null
null
null
""" # refactoring Refactoring is the key to successfull projects. Refactor: 1) annuity_factor such that: conversion to integer is handled, no extra printing 2) policy_book into a class such that: a function generates the book and the premium stats and visualizations functions are avalaible 3) book_report such that: it uses all the previous improvements """
21.941176
50
0.772118
0
0
0
0
0
0
0
0
373
1
22c0e10976672b4523dad7b6dd7cde8c3d5b7c7b
6,272
py
Python
util/util.py
harshitAgr/vess2ret
5702175bcd9ecde34d4fedab45a7cd2878a0184c
[ "MIT" ]
111
2017-01-30T17:49:15.000Z
2022-03-28T05:53:51.000Z
util/util.py
engineerlion/vess2ret
5702175bcd9ecde34d4fedab45a7cd2878a0184c
[ "MIT" ]
19
2017-03-06T10:28:16.000Z
2020-12-09T12:25:22.000Z
util/util.py
engineerlion/vess2ret
5702175bcd9ecde34d4fedab45a7cd2878a0184c
[ "MIT" ]
46
2017-02-10T18:39:25.000Z
2022-03-05T21:39:46.000Z
"""Auxiliary methods.""" import os import json from errno import EEXIST import numpy as np import seaborn as sns import cPickle as pickle import matplotlib.pyplot as plt sns.set() DEFAULT_LOG_DIR = 'log' ATOB_WEIGHTS_FILE = 'atob_weights.h5' D_WEIGHTS_FILE = 'd_weights.h5' class MyDict(dict): """ Dictionary that allows to access elements with dot notation. ex: >> d = MyDict({'key': 'val'}) >> d.key 'val' >> d.key2 = 'val2' >> d {'key2': 'val2', 'key': 'val'} """ __getattr__ = dict.get __setattr__ = dict.__setitem__ def convert_to_rgb(img, is_binary=False): """Given an image, make sure it has 3 channels and that it is between 0 and 1.""" if len(img.shape) != 3: raise Exception("""Image must have 3 dimensions (channels x height x width). """ """Given {0}""".format(len(img.shape))) img_ch, _, _ = img.shape if img_ch != 3 and img_ch != 1: raise Exception("""Unsupported number of channels. """ """Must be 1 or 3, given {0}.""".format(img_ch)) imgp = img if img_ch == 1: imgp = np.repeat(img, 3, axis=0) if not is_binary: imgp = imgp * 127.5 + 127.5 imgp /= 255. return np.clip(imgp.transpose((1, 2, 0)), 0, 1) def compose_imgs(a, b, is_a_binary=True, is_b_binary=False): """Place a and b side by side to be plotted.""" ap = convert_to_rgb(a, is_binary=is_a_binary) bp = convert_to_rgb(b, is_binary=is_b_binary) if ap.shape != bp.shape: raise Exception("""A and B must have the same size. """ """{0} != {1}""".format(ap.shape, bp.shape)) # ap.shape and bp.shape must have the same size here h, w, ch = ap.shape composed = np.zeros((h, 2*w, ch)) composed[:, :w, :] = ap composed[:, w:, :] = bp return composed def get_log_dir(log_dir, expt_name): """Compose the log_dir with the experiment name.""" if log_dir is None: raise Exception('log_dir can not be None.') if expt_name is not None: return os.path.join(log_dir, expt_name) return log_dir def mkdir(mypath): """Create a directory if it does not exist.""" try: os.makedirs(mypath) except OSError as exc: if exc.errno == EEXIST and os.path.isdir(mypath): pass else: raise def create_expt_dir(params): """Create the experiment directory and return it.""" expt_dir = get_log_dir(params.log_dir, params.expt_name) # Create directories if they do not exist mkdir(params.log_dir) mkdir(expt_dir) # Save the parameters json.dump(params, open(os.path.join(expt_dir, 'params.json'), 'wb'), indent=4, sort_keys=True) return expt_dir def plot_loss(loss, label, filename, log_dir): """Plot a loss function and save it in a file.""" plt.figure(figsize=(5, 4)) plt.plot(loss, label=label) plt.legend() plt.savefig(os.path.join(log_dir, filename)) plt.clf() def log(losses, atob, it_val, N=4, log_dir=DEFAULT_LOG_DIR, expt_name=None, is_a_binary=True, is_b_binary=False): """Log losses and atob results.""" log_dir = get_log_dir(log_dir, expt_name) # Save the losses for further inspection pickle.dump(losses, open(os.path.join(log_dir, 'losses.pkl'), 'wb')) ########################################################################### # PLOT THE LOSSES # ########################################################################### plot_loss(losses['d'], 'discriminator', 'd_loss.png', log_dir) plot_loss(losses['d_val'], 'discriminator validation', 'd_val_loss.png', log_dir) plot_loss(losses['p2p'], 'Pix2Pix', 'p2p_loss.png', log_dir) plot_loss(losses['p2p_val'], 'Pix2Pix validation', 'p2p_val_loss.png', log_dir) ########################################################################### # PLOT THE A->B RESULTS # ########################################################################### plt.figure(figsize=(10, 6)) for i in range(N*N): a, _ = next(it_val) bp = atob.predict(a) img = compose_imgs(a[0], bp[0], is_a_binary=is_a_binary, is_b_binary=is_b_binary) plt.subplot(N, N, i+1) plt.imshow(img) plt.axis('off') plt.savefig(os.path.join(log_dir, 'atob.png')) plt.clf() # Make sure all the figures are closed. plt.close('all') def save_weights(models, log_dir=DEFAULT_LOG_DIR, expt_name=None): """Save the weights of the models into a file.""" log_dir = get_log_dir(log_dir, expt_name) models.atob.save_weights(os.path.join(log_dir, ATOB_WEIGHTS_FILE), overwrite=True) models.d.save_weights(os.path.join(log_dir, D_WEIGHTS_FILE), overwrite=True) def load_weights(atob, d, log_dir=DEFAULT_LOG_DIR, expt_name=None): """Load the weights into the corresponding models.""" log_dir = get_log_dir(log_dir, expt_name) atob.load_weights(os.path.join(log_dir, ATOB_WEIGHTS_FILE)) d.load_weights(os.path.join(log_dir, D_WEIGHTS_FILE)) def load_weights_of(m, weights_file, log_dir=DEFAULT_LOG_DIR, expt_name=None): """Load the weights of the model m.""" log_dir = get_log_dir(log_dir, expt_name) m.load_weights(os.path.join(log_dir, weights_file)) def load_losses(log_dir=DEFAULT_LOG_DIR, expt_name=None): """Load the losses of the given experiment.""" log_dir = get_log_dir(log_dir, expt_name) losses = pickle.load(open(os.path.join(log_dir, 'losses.pkl'), 'rb')) return losses def load_params(params): """ Load the parameters of an experiment and return them. The params passed as argument will be merged with the new params dict. If there is a conflict with a key, the params passed as argument prevails. """ expt_dir = get_log_dir(params.log_dir, params.expt_name) expt_params = json.load(open(os.path.join(expt_dir, 'params.json'), 'rb')) # Update the loaded parameters with the current parameters. This will # override conflicting keys as expected. expt_params.update(params) return expt_params
30.745098
89
0.603795
320
0.05102
0
0
0
0
0
0
2,285
0.364318
22c1ccef20d9d7a1d41049e783b9575459b18d70
834
py
Python
services/apiRequests.py
CakeCrusher/voon-video_processing
6ecaacf4e36baa72d713a92101b445885b3d95ef
[ "MIT" ]
null
null
null
services/apiRequests.py
CakeCrusher/voon-video_processing
6ecaacf4e36baa72d713a92101b445885b3d95ef
[ "MIT" ]
null
null
null
services/apiRequests.py
CakeCrusher/voon-video_processing
6ecaacf4e36baa72d713a92101b445885b3d95ef
[ "MIT" ]
null
null
null
from github import Github def parseGithubURL(url): splitURL = url.split('/') owner = splitURL[3] repo = splitURL[4] return { "owner": owner, "repo": repo } def fetchRepoFiles(owner, repo): files = [] g = Github('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') repo = g.get_repo(f'{owner}/{repo}') contents = repo.get_contents('') while contents: file_content = contents.pop(0) if file_content.type == 'dir': contents.extend(repo.get_contents(file_content.path)) else: files.append(file_content.path) return files # parsedUrl = parseGithubURL('https://github.com/CakeCrusher/restock_emailer') # filePaths = fetchRepoFiles(parsedUrl['owner'], parsedUrl['repo']) # files = [path.split('/')[-1] for path in filePaths] # print(files)
29.785714
78
0.642686
0
0
0
0
0
0
0
0
294
0.352518
22c3df00575427d7293f54af4b1eb86f32f1ea11
995
py
Python
utils/tricks.py
HouchangX-AI/Dialog-Solution
1f68f847d9c9c4a46ef0b5fc6a78014402a4dd7a
[ "MIT" ]
3
2020-03-12T06:28:01.000Z
2020-03-27T20:15:53.000Z
utils/tricks.py
HouchangX-AI/Dialog-Solution
1f68f847d9c9c4a46ef0b5fc6a78014402a4dd7a
[ "MIT" ]
null
null
null
utils/tricks.py
HouchangX-AI/Dialog-Solution
1f68f847d9c9c4a46ef0b5fc6a78014402a4dd7a
[ "MIT" ]
2
2020-03-19T02:47:37.000Z
2021-12-14T02:26:40.000Z
#-*- coding: utf-8 -*- import codecs import random from utils.global_names import GlobalNames, get_file_path def modify_tokens(tokens): new_tokens = [] pos = 0 len_ = len(tokens) while pos < len_: if tokens[pos] == "[": if pos+2 < len_ and tokens[pos+2] == "]": token = "".join(tokens[pos:pos+3]) new_tokens.append(token) pos += 3 elif pos+3 < len_ and tokens[pos+3] == "]": if tokens[pos+2].isdigit(): tokens[pos+2] = "_digit_" token = "".join(tokens[pos:pos+4]) new_tokens.append(token) pos += 4 else: pos += 1 else: new_tokens.append(tokens[pos]) pos += 1 return new_tokens def length_weight(corpus, orders, length_limit=6): for idx, _ in enumerate(orders): if len(corpus[idx]) > length_limit: return idx return 0
26.184211
57
0.501508
0
0
0
0
0
0
0
0
44
0.044221
22c52f6029df65fcd8fa5837d73e5ae4e6fb61e1
1,087
py
Python
test/functional/test_device.py
Jagadambass/Graph-Neural-Networks
c8f1d87f8cd67d645c2f05f370be039acf05ca52
[ "MIT" ]
null
null
null
test/functional/test_device.py
Jagadambass/Graph-Neural-Networks
c8f1d87f8cd67d645c2f05f370be039acf05ca52
[ "MIT" ]
null
null
null
test/functional/test_device.py
Jagadambass/Graph-Neural-Networks
c8f1d87f8cd67d645c2f05f370be039acf05ca52
[ "MIT" ]
null
null
null
from graphgallery.functional import device import tensorflow as tf import torch def test_device(): # how about other backend? # tf assert isinstance(device("cpu", "tf"), str) assert device() == 'cpu' assert device("cpu", "tf") == 'CPU' assert device("cpu", "tf") == 'cpu' assert device("device/cpu", "tf") == 'cpu' try: assert device("gpu", "tf") == 'GPU' assert device("cuda", "tf") == 'GPU' except RuntimeError: pass device = tf.device("cpu") assert device(device, "tf") == device._device_name # ?? torch device = device("cpu", "torch") assert isinstance(device, torch.device) and 'cpu' in str(device) device = device(backend="torch") assert isinstance(device, torch.device) and 'cpu' in str(device) try: assert 'cuda' in str(device("gpu", "torch")) assert 'cuda' in str(device("cuda", "torch")) except RuntimeError: pass device = torch.device("cpu") assert device(device, "torch") == device if __name__ == "__main__": test_device()
26.512195
68
0.596136
0
0
0
0
0
0
0
0
229
0.210672
22c745e9fe90945bd78c2b0b4951b89a65ce5057
3,482
py
Python
py_hanabi/card.py
krinj/hanabi-simulator
b77b04aa09bab8bd8d7b784e04bf8b9d5d76d1a6
[ "MIT" ]
1
2018-09-28T00:47:52.000Z
2018-09-28T00:47:52.000Z
py_hanabi/card.py
krinj/hanabi-simulator
b77b04aa09bab8bd8d7b784e04bf8b9d5d76d1a6
[ "MIT" ]
null
null
null
py_hanabi/card.py
krinj/hanabi-simulator
b77b04aa09bab8bd8d7b784e04bf8b9d5d76d1a6
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- """ A card (duh). """ import random import uuid from enum import Enum from typing import List from py_hanabi.settings import CARD_DECK_DISTRIBUTION __author__ = "Jakrin Juangbhanich" __email__ = "[email protected]" class Color(Enum): RED = 1 BLUE = 2 GREEN = 3 YELLOW = 4 WHITE = 5 class Card: def __init__(self, number: int, color: Color): self._number: int = number self._color: Color = color self._id: str = uuid.uuid4().hex self._hint_number_counter: int = 0 self._hint_color_counter: int = 0 # self._index_hinted: List[int] = [] # self._lone_hinted: List[bool] = [] # According to hints, these are the ones we know it is NOT. self.not_color: List[Color] = [] self.not_number: List[int] = [] def __repr__(self): hint_str = "" if self.hint_received_color: hint_str += "C" if self.hint_received_number: hint_str += "N" return f"[{self.color} {self.number} {hint_str}]" def __eq__(self, other: 'Card'): return self.color == other.color and self.number == other.number def receive_hint_number(self, number: int): if number == self.number: self._hint_number_counter += 1 else: self.not_number.append(number) def receive_hint_color(self, color: Color): if color == self.color: self._hint_color_counter += 1 else: self.not_color.append(color) def remove_hint_number(self, number: int): if number == self.number: self._hint_number_counter -= 1 else: self.not_number.pop() def remove_hint_color(self, color: Color): if color == self.color: self._hint_color_counter -= 1 else: self.not_color.pop() @property def label(self): return f"{self.number} of {self.get_color_label(self.color)}" @property def id(self) -> str: return self._id @property def key(self) -> tuple: return self.get_key(self.color, self.number) @staticmethod def get_key(c: Color, n: int) -> tuple: return c, n @property def number(self) -> int: return self._number @property def color(self) -> Color: return self._color @property def observed_color(self) -> Color: return None if not self.hint_received_color else self._color @property def observed_number(self) -> int: return None if not self.hint_received_number else self._number @property def hint_received_number(self) -> bool: return self._hint_number_counter > 0 @property def hint_received_color(self) -> bool: return self._hint_color_counter > 0 @staticmethod def generate_deck() -> List['Card']: """ Generate the starting deck for the game. """ deck: List[Card] = [] for color in Color: for i in CARD_DECK_DISTRIBUTION: card = Card(i, color) deck.append(card) random.shuffle(deck) return deck @staticmethod def get_color_label(color: Color) -> str: color_labels = { Color.BLUE: "Blue", Color.RED: "Red", Color.YELLOW: "Yellow", Color.GREEN: "Green", Color.WHITE: "White", } return color_labels[color]
24.871429
72
0.587881
3,226
0.926479
0
0
1,517
0.435669
0
0
419
0.120333
22c76b57ffb3eeb2695ac101001d7de50b9a816d
4,344
py
Python
facetools/test/testcases.py
bigsassy/django-facetools
aeedaea81ab0007ee8e96b2f81f1404dc8bddb3c
[ "MIT" ]
2
2018-01-24T20:41:27.000Z
2019-06-27T13:24:18.000Z
facetools/test/testcases.py
bigsassy/django-facetools
aeedaea81ab0007ee8e96b2f81f1404dc8bddb3c
[ "MIT" ]
null
null
null
facetools/test/testcases.py
bigsassy/django-facetools
aeedaea81ab0007ee8e96b2f81f1404dc8bddb3c
[ "MIT" ]
null
null
null
import types import django.test.testcases from django.conf import settings from facetools.models import TestUser from facetools.common import _create_signed_request from facetools.test import TestUserNotLoaded from facetools.signals import sync_facebook_test_user, setup_facebook_test_client from facetools.common import _get_facetools_test_fixture_name class FacebookTestCaseMixin(object): """ TestCase which makes it possible to test views when the FacebookMiddleware and SyncFacebookUser middlewares are activated. Must use the Client attached to this object (i.e. self.client). """ facebook_test_user = None def set_client_signed_request(self, facebook_id, access_token): """ Allow code to configure the test client so it has a signed request of the specified test user for each request """ setup_facebook_test_client.send(sender=None, client=self.client, signed_request=_create_signed_request( settings.FACEBOOK_APPLICATION_SECRET_KEY, facebook_id, oauth_token=access_token)) def _pre_setup(self): if self.facebook_test_user: if type(self.facebook_test_user) not in [str, unicode]: raise Exception("facebook_test_user variable must be a string (found a %s)" % type(self.facebook_test_user)) app_name = get_app_name_from_test_case(type(self).__module__) facetools_fixture_name = _get_facetools_test_fixture_name(app_name) if not hasattr(self, 'fixtures'): self.fixtures = [] if facetools_fixture_name not in self.fixtures: self.fixtures.append(facetools_fixture_name) super(FacebookTestCaseMixin, self)._pre_setup() # Make sure anybody that needs to sync their models loaded from fixtures # has a chance to do so now that the refreshed user test data is available. try: for test_user in TestUser.objects.all(): sync_facebook_test_user.send(sender=None, test_user=test_user) self.test_user = TestUser.objects.get(name=self.facebook_test_user) self.set_client_signed_request(self.test_user.facebook_id, self.test_user.access_token) except TestUser.DoesNotExist: raise TestUserNotLoaded("Test user %s hasn't been loaded via the %s fixture (did you run sync_facebook_test_users?)" % (self.facebook_test_user, facetools_fixture_name)) else: super(FacebookTestCaseMixin, self)._pre_setup() def get_app_name_from_test_case(module_path_string): """ Gets thet Django app from the __class__ attribute of a TestCase in a Django app. class_string should look something like this: 'facetools_tests.tests.test_test_module' """ packages = module_path_string.split(".") try: tests_location = packages.index("tests") except ValueError: raise ValueError("Couldn't find tests module in %s (are you running this test from tests.py or a tests package in your Django app?)" % module_path_string) if tests_location == 0: raise ValueError("Facetools doesn't support Django app's with a name of 'tests', or it failed to find the Django app name out of %s" % module_path_string) app_name = packages[tests_location - 1] if app_name not in settings.INSTALLED_APPS: raise ValueError("Facetools didn't find %s among INSTALLED_APPS. (app name pulled from %s)" % (app_name, module_path_string)) return app_name # ----------------------------------------------------------------------------- # Test Cases # ----------------------------------------------------------------------------- class FacebookTransactionTestCase(FacebookTestCaseMixin, django.test.testcases.TransactionTestCase): def _pre_setup(self): super(FacebookTransactionTestCase, self)._pre_setup() class FacebookTestCase(FacebookTestCaseMixin, django.test.testcases.TestCase): def _pre_setup(self): super(FacebookTestCase, self)._pre_setup() if 'LiveServerTestCase' in dir(django.test.testcases): class FacebookLiveServerTestCase(FacebookTestCaseMixin, django.test.testcases.LiveServerTestCase): def _pre_setup(self): super(FacebookLiveServerTestCase, self)._pre_setup()
51.714286
162
0.69268
2,779
0.639733
0
0
0
0
0
0
1,352
0.311234
22c78686c8b8a763f3206d86fcbc87e20d6ea1aa
1,186
py
Python
setup.py
d2gex/distpickymodel
7acd4ffafbe592d6336d91d6e7411cd45357e41c
[ "MIT" ]
null
null
null
setup.py
d2gex/distpickymodel
7acd4ffafbe592d6336d91d6e7411cd45357e41c
[ "MIT" ]
null
null
null
setup.py
d2gex/distpickymodel
7acd4ffafbe592d6336d91d6e7411cd45357e41c
[ "MIT" ]
null
null
null
import setuptools import distpickymodel def get_long_desc(): with open("README.rst", "r") as fh: return fh.read() setuptools.setup( name="distpickymodel", version=distpickymodel.__version__, author="Dan G", author_email="[email protected]", description="A shared Mongoengine-based model library", long_description=get_long_desc(), url="https://github.com/d2gex/distpickymodel", # Exclude 'tests' and 'docs' packages=['distpickymodel'], python_requires='>=3.6', install_requires=['pymongo>=3.7.2', 'mongoengine>=0.17.0', 'six'], tests_require=['pytest>=4.4.0', 'PyYAML>=5.1'], classifiers=[ 'Development Status :: 5 - Production/Stable', 'Environment :: Web Environment', 'Environment :: Console', 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Operating System :: OS Independent', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', 'Programming Language :: Python :: Implementation :: CPython', 'Topic :: Software Development :: Libraries :: Python Modules', ] )
32.944444
71
0.636594
0
0
0
0
0
0
0
0
681
0.574199
22c7a70f2a69982c24184228f6ed64f2bdc7679e
1,948
py
Python
credentials_test.py
tinatasha/passwordgenerator
ad161e14779e975e98ad989c5df976ac3662f8d8
[ "MIT" ]
null
null
null
credentials_test.py
tinatasha/passwordgenerator
ad161e14779e975e98ad989c5df976ac3662f8d8
[ "MIT" ]
null
null
null
credentials_test.py
tinatasha/passwordgenerator
ad161e14779e975e98ad989c5df976ac3662f8d8
[ "MIT" ]
null
null
null
import unittest from password import Credentials class TestCredentials(unittest.TestCase): """ Class to test behaviour of the credentials class """ def setUp(self): """ Setup method that defines instructions """ self.new_credentials = Credentials("Github","Tina","blackfaffp1") def tearDown(self): """ Method that cleans up after each test """ Credentials.credentials_list = [] def test_init(self): """ Test for correct initialization """ self.assertEqual(self.new_credentials.account_name,"Github") self.assertEqual(self.new_credentials.username,"tinatasga") self.assertEqual(self.new_credentials.password,"@#tinatasha") def test_save_credentials(self): """ Test to check whether app saves account credentials """ self.new_credentials.save_credentials() self.assertEqual(len(Credentials.credentials_list),1) def test_save_multiple_credentials(self): """ Test for saving multiple credentials """ self.new_credentials.save_credentials() test_credentials = Credentials("AllFootball","Kibet","messithegoat") test_credentials.save_credentials() self.assertEqual(len(Credentials.credentials_list),2) def test_view_credentials(self): """ Test to view an account credential """ self.assertEqual(Credentials.display_credentials(),Credentials.credentials_list) def test_delete_credentials(self): """ Test to delete account credentials """ self.new_credentials.save_credentials() test_credentials = Credentials("i","love","cats") test_credentials.save_credentials() self.new_credentials.delete_credentials() self.assertEqual(len(Credentials.credentials_list),1) if __name__ == '__main__': unittest.main()
31.419355
88
0.658624
413
0.212012
0
0
0
0
0
0
611
0.313655
22c82577ce9bb70304bc0ff3dee27fa81b62e25c
564
py
Python
homework_08/calc_fitness.py
ufpa-organization-repositories/evolutionary-computing
e16786f9619e2b357b94ab91ff3a7b352e6a0d92
[ "MIT" ]
null
null
null
homework_08/calc_fitness.py
ufpa-organization-repositories/evolutionary-computing
e16786f9619e2b357b94ab91ff3a7b352e6a0d92
[ "MIT" ]
null
null
null
homework_08/calc_fitness.py
ufpa-organization-repositories/evolutionary-computing
e16786f9619e2b357b94ab91ff3a7b352e6a0d92
[ "MIT" ]
null
null
null
def calc_fitness(pop): from to_decimal import to_decimal from math import sin, sqrt for index, elem in enumerate(pop): # só atribui a fitness a cromossomos que ainda não possuem fitness # print(elem[0], elem[1]) x = to_decimal(elem[0]) y = to_decimal(elem[1]) # x = elem[0] # y = elem[1] f6 = 0.5 - ((sin(sqrt(x**2 + y**2)))**2 - 0.5) / (1 + 0.001 * (x**2 + y**2))**2 pop[index] = [f6, elem] return 0 # populacao = [[0,0],[-3,1]] # calc_fitness(pop=populacao) # print(populacao)
25.636364
87
0.546099
0
0
0
0
0
0
0
0
194
0.342756
22c8357e530d1406b6c30aa5078c53db167737b2
128
py
Python
pichetprofile/__init__.py
jamenor/pichetprofile
6633ea6eaa7473af9e10f34f6a19428c2db92465
[ "MIT" ]
2
2021-04-20T01:54:40.000Z
2022-01-31T10:00:04.000Z
pichetprofile/__init__.py
jamenor/pichetprofile
6633ea6eaa7473af9e10f34f6a19428c2db92465
[ "MIT" ]
null
null
null
pichetprofile/__init__.py
jamenor/pichetprofile
6633ea6eaa7473af9e10f34f6a19428c2db92465
[ "MIT" ]
2
2021-12-12T08:17:42.000Z
2022-02-13T21:04:44.000Z
# -*- coding: utf-8 -*- from oopschool.school import Student,Tesla,SpecialStudent,Teacher from oopschool.newschool import Test
42.666667
66
0.78125
0
0
0
0
0
0
0
0
24
0.1875
22cafbcbf0f7a34d5ff23e6e75d1650c542e7866
450
py
Python
leetcode/group2/461.py
HPluseven/playground
78e363b5b376af3945bcb55a13d6a96b7c151a1b
[ "MIT" ]
1
2021-03-26T05:51:08.000Z
2021-03-26T05:51:08.000Z
leetcode/group2/461.py
HPluseven/playground
78e363b5b376af3945bcb55a13d6a96b7c151a1b
[ "MIT" ]
null
null
null
leetcode/group2/461.py
HPluseven/playground
78e363b5b376af3945bcb55a13d6a96b7c151a1b
[ "MIT" ]
null
null
null
class Solution: def hammingDistance(self, x: int, y: int) -> int: xor = x ^ y distance = 0 while xor: if xor & 1: distance += 1 xor = xor >> 1 return distance class Solution: def hammingDistance(self, x: int, y: int) -> int: xor = x ^ y distance = 0 while xor: distance += 1 xor = xor & (xor-1) return distance
21.428571
53
0.455556
446
0.991111
0
0
0
0
0
0
0
0
22cc300c5aa21f713c2ef3f3b60722cc7d238f97
1,163
py
Python
rdl/data_sources/DataSourceFactory.py
pageuppeople-opensource/relational-data-loader
0bac7036d65636d06eacca4e68e09d6e1c506ea4
[ "MIT" ]
2
2019-03-11T12:45:23.000Z
2019-04-05T05:22:43.000Z
rdl/data_sources/DataSourceFactory.py
pageuppeople-opensource/relational-data-loader
0bac7036d65636d06eacca4e68e09d6e1c506ea4
[ "MIT" ]
5
2019-02-08T03:23:25.000Z
2019-04-11T01:29:45.000Z
rdl/data_sources/DataSourceFactory.py
PageUpPeopleOrg/relational-data-loader
0bac7036d65636d06eacca4e68e09d6e1c506ea4
[ "MIT" ]
1
2019-03-04T04:08:49.000Z
2019-03-04T04:08:49.000Z
import logging from rdl.data_sources.MsSqlDataSource import MsSqlDataSource from rdl.data_sources.AWSLambdaDataSource import AWSLambdaDataSource class DataSourceFactory(object): def __init__(self, logger=None): self.logger = logger or logging.getLogger(__name__) self.sources = [MsSqlDataSource, AWSLambdaDataSource] def create_source(self, connection_string): for source in self.sources: if source.can_handle_connection_string(connection_string): self.logger.info( f"Found handler '{source}' for given connection string." ) return source(connection_string) raise RuntimeError( "There are no data sources that can handle this connection string" ) def is_prefix_supported(self, connection_string): for source in self.sources: if source.can_handle_connection_string(connection_string): return True return False def get_supported_source_prefixes(self): return list( map(lambda source: source.get_connection_string_prefix(), self.sources) )
35.242424
83
0.674979
1,015
0.872743
0
0
0
0
0
0
122
0.104901
22cc9cf5c82866cdbb6751a30f5964a624debd38
2,753
py
Python
ch05/ch05-02-timeseries.py
alexmalins/kagglebook
260f6634b6bbaa94c2e989770e75dc7101f5c614
[ "BSD-3-Clause" ]
13
2021-02-20T08:57:28.000Z
2022-03-31T12:47:08.000Z
ch05/ch05-02-timeseries.py
Tharunkumar01/kagglebook
260f6634b6bbaa94c2e989770e75dc7101f5c614
[ "BSD-3-Clause" ]
null
null
null
ch05/ch05-02-timeseries.py
Tharunkumar01/kagglebook
260f6634b6bbaa94c2e989770e75dc7101f5c614
[ "BSD-3-Clause" ]
2
2021-07-15T03:56:39.000Z
2021-07-29T00:53:54.000Z
# --------------------------------- # Prepare the data etc. # ---------------------------------- import numpy as np import pandas as pd # train_x is the training data, train_y is the target values, and test_x is the test data # stored in pandas DataFrames and Series (numpy arrays also used) train = pd.read_csv('../input/sample-data/train_preprocessed.csv') train_x = train.drop(['target'], axis=1) train_y = train['target'] test_x = pd.read_csv('../input/sample-data/test_preprocessed.csv') # As time-series data assume a period variable is set that changes with time train_x['period'] = np.arange(0, len(train_x)) // (len(train_x) // 4) train_x['period'] = np.clip(train_x['period'], 0, 3) test_x['period'] = 4 # ----------------------------------- # Hold-out method for time-series data # ----------------------------------- # Partition using the period variable as the basis (0 to 3 are the training data, 4 is the test data) # Here for within the training data period 3 is used for validation and periods 0 to 2 are used for training is_tr = train_x['period'] < 3 is_va = train_x['period'] == 3 tr_x, va_x = train_x[is_tr], train_x[is_va] tr_y, va_y = train_y[is_tr], train_y[is_va] # ----------------------------------- # Cross validation for time-series data (use method that follows time) # ----------------------------------- # Partition using the period variable as the basis (0 to 3 are the training data, 4 is the test data) # Periods 1, 2 and 3 are each used for cross-validation, and the preceding periods are used for training va_period_list = [1, 2, 3] for va_period in va_period_list: is_tr = train_x['period'] < va_period is_va = train_x['period'] == va_period tr_x, va_x = train_x[is_tr], train_x[is_va] tr_y, va_y = train_y[is_tr], train_y[is_va] # (For reference) Using TimeSeriesSplit() function is difficult as only the order of the data can be used from sklearn.model_selection import TimeSeriesSplit tss = TimeSeriesSplit(n_splits=4) for tr_idx, va_idx in tss.split(train_x): tr_x, va_x = train_x.iloc[tr_idx], train_x.iloc[va_idx] tr_y, va_y = train_y.iloc[tr_idx], train_y.iloc[va_idx] # ----------------------------------- # Cross validation for time-series data (method to simply partition by time) # ----------------------------------- # Partition using the period variable as the basis (0 to 3 are the training data, 4 is the test data) # Periods 1, 2 and 3 are each used for cross-validation, and the preceding periods are used for training va_period_list = [0, 1, 2, 3] for va_period in va_period_list: is_tr = train_x['period'] != va_period is_va = train_x['period'] == va_period tr_x, va_x = train_x[is_tr], train_x[is_va] tr_y, va_y = train_y[is_tr], train_y[is_va]
43.698413
108
0.653106
0
0
0
0
0
0
0
0
1,639
0.595351
22cd87f92115b6affd305877641e7e519dbd0eb4
476
py
Python
server/WitClient.py
owo/jitalk
2db2782282a2302b4cf6049030822734a6856982
[ "MIT" ]
1
2020-06-22T14:28:41.000Z
2020-06-22T14:28:41.000Z
server/WitClient.py
owo/jitalk
2db2782282a2302b4cf6049030822734a6856982
[ "MIT" ]
null
null
null
server/WitClient.py
owo/jitalk
2db2782282a2302b4cf6049030822734a6856982
[ "MIT" ]
null
null
null
#!/usr/bin/env python # -*- coding: utf-8 -*- import wit import json class WitClient(object): """docstring for WitClient""" _access_token = 'NBPOVLY7T6W3KOUEML2GXOWODH3LPWPD' def __init__(self): wit.init() def text_query(self, text): res = json.loads(wit.text_query(text, WitClient._access_token)) return res["outcomes"] def close_connection(self): wit.close() if __name__ == "__main__": print "You ran the Wit client, nothing will happen. Exiting..."
20.695652
65
0.707983
309
0.64916
0
0
0
0
0
0
184
0.386555
22ce1c9c1c8f77ccd4520e195e541c2a19150619
268
py
Python
HackerRank/Python/Easy/E0036.py
Mohammed-Shoaib/HackerRank-Problems
ccfb9fc2f0d8dff454439d75ce519cf83bad7c3b
[ "MIT" ]
54
2019-05-13T12:13:09.000Z
2022-02-27T02:59:00.000Z
HackerRank/Python/Easy/E0036.py
Mohammed-Shoaib/HackerRank-Problems
ccfb9fc2f0d8dff454439d75ce519cf83bad7c3b
[ "MIT" ]
2
2020-10-02T07:16:43.000Z
2020-10-19T04:36:19.000Z
HackerRank/Python/Easy/E0036.py
Mohammed-Shoaib/HackerRank-Problems
ccfb9fc2f0d8dff454439d75ce519cf83bad7c3b
[ "MIT" ]
20
2020-05-26T09:48:13.000Z
2022-03-18T15:18:27.000Z
# Problem Statement: https://www.hackerrank.com/challenges/itertools-combinations-with-replacement/problem from itertools import combinations_with_replacement S, k = input().split() for comb in combinations_with_replacement(sorted(S), int(k)): print(''.join(comb))
33.5
106
0.791045
0
0
0
0
0
0
0
0
108
0.402985
22cecf207eb3150281c5d9ddc72a0ab1531e7bdb
5,341
py
Python
visual_genome/models.py
hayyubi/visual-genome-driver
412223bf1552b1927fb1219cfcf90dcd2599bf34
[ "MIT" ]
null
null
null
visual_genome/models.py
hayyubi/visual-genome-driver
412223bf1552b1927fb1219cfcf90dcd2599bf34
[ "MIT" ]
null
null
null
visual_genome/models.py
hayyubi/visual-genome-driver
412223bf1552b1927fb1219cfcf90dcd2599bf34
[ "MIT" ]
null
null
null
""" Visual Genome Python API wrapper, models """ class Image: """ Image. ID int url hyperlink string width int height int """ def __init__(self, id, url, width, height, coco_id, flickr_id): self.id = id self.url = url self.width = width self.height = height self.coco_id = coco_id self.flickr_id = flickr_id def __str__(self): return 'id: %d, coco_id: %d, flickr_id: %d, width: %d, url: %s' \ % (self.id, -1 if self.coco_id is None else self.coco_id, -1 if self.flickr_id is None else self.flickr_id, self.width, self.url) def __repr__(self): return str(self) class Region: """ Region. image int phrase string x int y int width int height int """ def __init__(self, id, image, phrase, x, y, width, height): self.id = id self.image = image self.phrase = phrase self.x = x self.y = y self.width = width self.height = height def __str__(self): stat_str = 'id: {0}, x: {1}, y: {2}, width: {3},' \ 'height: {4}, phrase: {5}, image: {6}' return stat_str.format(self.id, self.x, self.y, self.width, self.height, self.phrase, self.image.id) def __repr__(self): return str(self) class Graph: """ Graphs contain objects, relationships and attributes image Image bboxes Object array relationships Relationship array attributes Attribute array """ def __init__(self, image, objects, relationships, attributes): self.image = image self.objects = objects self.relationships = relationships self.attributes = attributes class Object: """ Objects. id int x int y int width int height int names string array synsets Synset array """ def __init__(self, id, x, y, width, height, names, synsets): self.id = id self.x = x self.y = y self.width = width self.height = height self.names = names[0] self.synsets = synsets self.bbox = [x, y, width, height] def __str__(self): name = self.names[0] if len(self.names) != 0 else 'None' return '%s' % (name) def __repr__(self): return str(self) class Relationship: """ Relationships. Ex, 'man - jumping over - fire hydrant'. subject int predicate string object int rel_canon Synset """ def __init__(self, id, subject, predicate, object, synset): self.id = id self.subject = subject self.predicate = predicate self.object = object self.synset = synset def __str__(self): return "{0}: {1} {2} {3}".format(self.id, self.subject, self.predicate, self.object) def __repr__(self): return str(self) class Attribute: """ Attributes. Ex, 'man - old'. subject Object attribute string synset Synset """ def __init__(self, id, subject, attribute, synset): self.id = id self.subject = subject self.attribute = attribute self.synset = synset def __str__(self): return "%d: %s is %s" % (self.id, self.subject, self.attribute) def __repr__(self): return str(self) class QA: """ Question Answer Pairs. ID int image int question string answer string q_objects QAObject array a_objects QAObject array """ def __init__(self, id, image, question, answer, question_objects, answer_objects): self.id = id self.image = image self.question = question self.answer = answer self.q_objects = question_objects self.a_objects = answer_objects def __str__(self): return 'id: %d, image: %d, question: %s, answer: %s' \ % (self.id, self.image.id, self.question, self.answer) def __repr__(self): return str(self) class QAObject: """ Question Answer Objects are localized in the image and refer to a part of the question text or the answer text. start_idx int end_idx int name string synset_name string synset_definition string """ def __init__(self, start_idx, end_idx, name, synset): self.start_idx = start_idx self.end_idx = end_idx self.name = name self.synset = synset def __repr__(self): return str(self) class Synset: """ Wordnet Synsets. name string definition string """ def __init__(self, name, definition): self.name = name self.definition = definition def __str__(self): return '{} - {}'.format(self.name, self.definition) def __repr__(self): return str(self)
24.058559
74
0.52874
5,265
0.98577
0
0
0
0
0
0
1,822
0.341135
22cf451d04e0bf782f9148035e8ed296f046dac4
2,152
py
Python
python-scripts/plot_delay.py
GayashanNA/my-scripts
d865e828c833d6b54c787ce9475da512f8488278
[ "Apache-2.0" ]
null
null
null
python-scripts/plot_delay.py
GayashanNA/my-scripts
d865e828c833d6b54c787ce9475da512f8488278
[ "Apache-2.0" ]
null
null
null
python-scripts/plot_delay.py
GayashanNA/my-scripts
d865e828c833d6b54c787ce9475da512f8488278
[ "Apache-2.0" ]
null
null
null
import csv import matplotlib.pyplot as plt import time PLOT_PER_WINDOW = False WINDOW_LENGTH = 60000 BINS = 1000 delay_store = {} perwindow_delay_store = {} plotting_delay_store = {} filename = "output-large.csv" # filename = "output.csv" # filename = "output-medium.csv" # filename = "output-small.csv" # filename = "output-tiny.csv" with open(filename, "rU") as dataFile: csvreader = csv.reader(dataFile) for row in csvreader: if len(row) > 2 and str(row[0]).isdigit(): delay_store[long(row[1])] = long(row[2]) window_begin = min(delay_store.keys()) window_end = max(delay_store.keys()) if PLOT_PER_WINDOW: window_end = window_begin + WINDOW_LENGTH # find the time delays that are within the window of choice for (tapp, delay) in delay_store.iteritems(): if window_begin <= tapp <= window_end: perwindow_delay_store[tapp] = delay plotting_delay_store = perwindow_delay_store else: plotting_delay_store = delay_store # the histogram of the data n, bins, patches = plt.hist(plotting_delay_store.values(), BINS, histtype='stepfilled', normed=True, cumulative=False, facecolor='blue', alpha=0.9) # plt.axhline(y=0.95, color='red', label='0.95') max_delay = max(plotting_delay_store.values()) min_delay = min(plotting_delay_store.values()) count = len(plotting_delay_store.values()) # format epoch time to date time to be shown in the plot figure window_begin_in_datetime = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(window_begin / 1000)) window_end_in_datetime = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(window_end / 1000)) title = "Window begin: %s\n" % window_begin_in_datetime title += "Window end: %s\n" % window_end_in_datetime # title += "Window length: %dms\n" % WINDOW_LENGTH title += "Window length: ~%dmins\n" % ((window_end - window_begin)/60000) title += "Maximum delay: %dms\n" % max_delay title += "Minimum delay: %dms\n" % min_delay title += "Count: %d" % count # start plotting plt.xlabel('Delay (ms)') plt.ylabel('Probability') plt.grid(True) plt.legend() plt.suptitle(title) plt.subplots_adjust(top=0.8) plt.show()
33.107692
98
0.703067
0
0
0
0
0
0
0
0
605
0.281134
22cf943746c3603f630cb274d2c1d26e36acc1fd
3,472
py
Python
python_scripts/BUSCO_phylogenetics/rename_all_fa_seq.py
peterthorpe5/Methods_M.cerasi_R.padi_genome_assembly
c6cb771afaf40f5def47e33ff11cd8867ec528e0
[ "MIT" ]
4
2019-04-01T02:08:21.000Z
2022-02-04T08:37:47.000Z
python_scripts/BUSCO_phylogenetics/rename_all_fa_seq.py
peterthorpe5/Methods_M.cerasi_R.padi_genome_assembly
c6cb771afaf40f5def47e33ff11cd8867ec528e0
[ "MIT" ]
1
2018-09-30T00:29:43.000Z
2018-10-01T07:51:16.000Z
python_scripts/BUSCO_phylogenetics/rename_all_fa_seq.py
peterthorpe5/Methods_M.cerasi_R.padi_genome_assembly
c6cb771afaf40f5def47e33ff11cd8867ec528e0
[ "MIT" ]
1
2019-12-05T09:04:38.000Z
2019-12-05T09:04:38.000Z
#!/usr/bin/env python # author: Peter Thorpe September 2015. The James Hutton Insitute, Dundee, UK. # title rename single copy busco genes from Bio.Seq import Seq from Bio.SeqRecord import SeqRecord from Bio import SeqIO import os from sys import stdin,argv import sys from optparse import OptionParser ######################################################################## # functions def parse_busco_file(busco): """this is a function to open busco full ouput and get a list of duplicated genes. This list is required so we can ignore these genes later. Takes file, return list""" duplicated_list = [] with open(busco) as handle: for line in handle: if not line.strip(): continue # if the last line is blank if line.startswith("#"): continue if not line: print ("your file is empty") return False line_info = line.rstrip().split("\t") # first element Busco_name = line_info[0] # second element status = line_info[1] if status == "Duplicated" or status == "Fragmented": duplicated_list.append(Busco_name) return duplicated_list def reformat_as_fasta(filename,prefix,outfile): "this function re-write a file as a fasta file" f= open(outfile, 'w') fas = open(filename, "r") for line in fas: if not line.strip(): continue # if the last line is blank if line.startswith("#"): continue if not line: return False if not line.startswith(">"): seq = line title = ">" + prefix + "_" + filename.replace("BUSCOa", "").split(".fas")[0] data = "%s\n%s\n" %(title, seq) f.write(data) f.close() if "-v" in sys.argv or "--version" in sys.argv: print "v0.0.1" sys.exit(0) usage = """Use as follows: converts $ python renaem....py -p Mce -b full_table_BUSCO_output script to walk through all files in a folder and rename the seq id to start with Prefix. Used for Busco output. give it the busco full ouput table. The script will only return complete single copy gene. Duplicate gene will be ignored. """ parser = OptionParser(usage=usage) parser.add_option("-p", "--prefix", dest="prefix", default=None, help="Output filename", metavar="FILE") parser.add_option("-b", "--busco", dest="busco", default=None, help="full_table_*_BUSCO output from BUSCO", metavar="FILE") (options, args) = parser.parse_args() prefix = options.prefix busco = options.busco # Run as script if __name__ == '__main__': #call function to get a list of dupicated gene. #these genes will be ignored duplicated_list = parse_busco_file(busco) #iterate through the dir for filename in os.listdir("."): count = 1 if not filename.endswith(".fas"): continue #filter out the ones we dont want if filename.split(".fa")[0] in duplicated_list: continue out_file = "../"+prefix+filename out_file = out_file.replace("BUSCOa", "") #out_file = "../"+filename try: #print filename reformat_as_fasta(filename, prefix, out_file) except: ValueError continue
26.707692
84
0.579781
0
0
0
0
0
0
0
0
1,313
0.378168
22cfe37b118c380f98097dbe5e6dfaa75be99d71
427
py
Python
video/rest/compositionhooks/delete-hook/delete-hook.6.x.py
afeld/api-snippets
d77456c387c9471d36aa949e2cf785d8a534a370
[ "MIT" ]
3
2020-05-05T10:01:02.000Z
2021-02-06T14:23:13.000Z
video/rest/compositionhooks/delete-hook/delete-hook.6.x.py
afeld/api-snippets
d77456c387c9471d36aa949e2cf785d8a534a370
[ "MIT" ]
null
null
null
video/rest/compositionhooks/delete-hook/delete-hook.6.x.py
afeld/api-snippets
d77456c387c9471d36aa949e2cf785d8a534a370
[ "MIT" ]
null
null
null
# Download the Python helper library from twilio.com/docs/python/install from twilio.rest import Client # Your Account Sid and Auth Token from twilio.com/console api_key_sid = 'SKXXXX' api_key_secret = 'your_api_key_secret' client = Client(api_key_sid, api_key_secret) did_delete = client.video\ .compositionHooks('HKXXXX')\ .delete() if(did_delete): print('Composition removed')
28.466667
72
0.709602
0
0
0
0
0
0
0
0
187
0.437939
22d061bf4dd94ca94a7f507ce7fe9f9a517f47a3
274
py
Python
global_info.py
AkagiYui/AzurLaneTool
f00fa6e5c6371db72ee399d7bd178a81f39afd8b
[ "Apache-2.0" ]
null
null
null
global_info.py
AkagiYui/AzurLaneTool
f00fa6e5c6371db72ee399d7bd178a81f39afd8b
[ "Apache-2.0" ]
null
null
null
global_info.py
AkagiYui/AzurLaneTool
f00fa6e5c6371db72ee399d7bd178a81f39afd8b
[ "Apache-2.0" ]
null
null
null
from time import sleep debug_mode = False time_to_exit = False exiting = False exit_code = 0 def get_debug_mode(): return debug_mode def trigger_exit(_exit_code): global time_to_exit, exit_code exit_code = _exit_code time_to_exit = True sleep(0.1)
14.421053
34
0.729927
0
0
0
0
0
0
0
0
0
0
22d06d326dbc942db8f36ca27ac8dc094685d70b
6,924
py
Python
advesarial_text/data/data_utils_test.py
slowy07/tensorflow-model-research
48ba4ba6240452eb3e3350fe7099f2b045acc530
[ "MIT" ]
null
null
null
advesarial_text/data/data_utils_test.py
slowy07/tensorflow-model-research
48ba4ba6240452eb3e3350fe7099f2b045acc530
[ "MIT" ]
null
null
null
advesarial_text/data/data_utils_test.py
slowy07/tensorflow-model-research
48ba4ba6240452eb3e3350fe7099f2b045acc530
[ "MIT" ]
null
null
null
from __future__ import absoulte_import from __future__ import division from __future__ import print_function import tensorflow as tf from data import data_utils data = data_utils class SequenceWrapperTest(tf.test.TestCase): def testDefaultTimesteps(self): seq = data.SequenceWrapper() t1 = seq.add_timestep() _ = seq.add_timestep() self.assertEqual(len(seq), 2) self.assertEqual(t1.weight, 0.0) self.assertEqual(t1.label, 0) self.assertEqual(t1.token, 0) def testSettersAndGetters(self): ts = data.SequenceWrapper().add_timestep() ts.set_token(3) ts.set_label(4) ts.set_weight(2.0) self.assertEqual(ts.token, 3) self.assertEqual(ts.label, 4) self.assertEqual(ts.weight, 2.0) def testTimestepIteration(self): seq = data.SequenceWrapper() seq.add_timestep().set_token(0) seq.add_timestep().set_token(1) seq.add_timestep().set_token(2) for i, ts in enumerate(seq): self.assertEqual(ts.token, i) def testFillsSequenceExampleCorrectly(self): seq = data.SequenceWrapper() seq.add_timestep().set_token(1).set_label(2).set_weight(3.0) seq.add_timestep().set_token(10).set_label(20).set_weight(30.0) seq_ex = seq.seq fl = seq_ex.feature_lists.feature_list fl_token = fl[data.SequenceWrapper.F_TOKEN_ID].feature fl_label = fl[data.SequenceWrapper.F_LABEL].feature fl_weight = fl[data.SequenceWrapper.F_WEIGHT].feature _ = [self.assertEqual(len(f), 2) for f in [fl_token, fl_label, fl_weight]] self.assertAllEqual([f.int64_list.value[0] for f in fl_token], [1, 10]) self.assertAllEqual([f.int64_list.value[0] for f in fl_label], [2, 20]) self.assertAllEqual([f.float_list.value[0] for f in fl_weight], [3.0, 30.0]) class DataUtilsTest(tf.test.TestCase): def testSplitByPunct(self): output = data.split_by_punct( "hello! world, i've been\nwaiting\tfor\ryou for.a long time" ) expected = [ "hello", "world", "i", "ve", "been", "waiting", "for", "you", "for", "a", "long", "time", ] self.assertListEqual(output, expected) def _buildDummySequence(self): seq = data.SequenceWrapper() for i in range(10): seq.add_timestep().set_token(i) return seq def testBuildLMSeq(self): seq = self._buildDummySequence() lm_seq = data.build_lm_sequence(seq) for i, ts in enumerate(lm_seq): # For end of sequence, the token and label should be same, and weight # should be 0.0. if i == len(lm_seq) - 1: self.assertEqual(ts.token, i) self.assertEqual(ts.label, i) self.assertEqual(ts.weight, 0.0) else: self.assertEqual(ts.token, i) self.assertEqual(ts.label, i + 1) self.assertEqual(ts.weight, 1.0) def testBuildSAESeq(self): seq = self._buildDummySequence() sa_seq = data.build_seq_ae_sequence(seq) self.assertEqual(len(sa_seq), len(seq) * 2 - 1) # Tokens should be sequence twice, minus the EOS token at the end for i, ts in enumerate(sa_seq): self.assertEqual(ts.token, seq[i % 10].token) # Weights should be len-1 0.0's and len 1.0's. for i in range(len(seq) - 1): self.assertEqual(sa_seq[i].weight, 0.0) for i in range(len(seq) - 1, len(sa_seq)): self.assertEqual(sa_seq[i].weight, 1.0) # Labels should be len-1 0's, and then the sequence for i in range(len(seq) - 1): self.assertEqual(sa_seq[i].label, 0) for i in range(len(seq) - 1, len(sa_seq)): self.assertEqual(sa_seq[i].label, seq[i - (len(seq) - 1)].token) def testBuildLabelSeq(self): seq = self._buildDummySequence() eos_id = len(seq) - 1 label_seq = data.build_labeled_sequence(seq, True) for i, ts in enumerate(label_seq[:-1]): self.assertEqual(ts.token, i) self.assertEqual(ts.label, 0) self.assertEqual(ts.weight, 0.0) final_timestep = label_seq[-1] self.assertEqual(final_timestep.token, eos_id) self.assertEqual(final_timestep.label, 1) self.assertEqual(final_timestep.weight, 1.0) def testBuildBidirLabelSeq(self): seq = self._buildDummySequence() reverse_seq = data.build_reverse_sequence(seq) bidir_seq = data.build_bidirectional_seq(seq, reverse_seq) label_seq = data.build_labeled_sequence(bidir_seq, True) for (i, ts), j in zip(enumerate(label_seq[:-1]), reversed(range(len(seq) - 1))): self.assertAllEqual(ts.tokens, [i, j]) self.assertEqual(ts.label, 0) self.assertEqual(ts.weight, 0.0) final_timestep = label_seq[-1] eos_id = len(seq) - 1 self.assertAllEqual(final_timestep.tokens, [eos_id, eos_id]) self.assertEqual(final_timestep.label, 1) self.assertEqual(final_timestep.weight, 1.0) def testReverseSeq(self): seq = self._buildDummySequence() reverse_seq = data.build_reverse_sequence(seq) for i, ts in enumerate(reversed(reverse_seq[:-1])): self.assertEqual(ts.token, i) self.assertEqual(ts.label, 0) self.assertEqual(ts.weight, 0.0) final_timestep = reverse_seq[-1] eos_id = len(seq) - 1 self.assertEqual(final_timestep.token, eos_id) self.assertEqual(final_timestep.label, 0) self.assertEqual(final_timestep.weight, 0.0) def testBidirSeq(self): seq = self._buildDummySequence() reverse_seq = data.build_reverse_sequence(seq) bidir_seq = data.build_bidirectional_seq(seq, reverse_seq) for (i, ts), j in zip(enumerate(bidir_seq[:-1]), reversed(range(len(seq) - 1))): self.assertAllEqual(ts.tokens, [i, j]) self.assertEqual(ts.label, 0) self.assertEqual(ts.weight, 0.0) final_timestep = bidir_seq[-1] eos_id = len(seq) - 1 self.assertAllEqual(final_timestep.tokens, [eos_id, eos_id]) self.assertEqual(final_timestep.label, 0) self.assertEqual(final_timestep.weight, 0.0) def testLabelGain(self): seq = self._buildDummySequence() label_seq = data.build_labeled_sequence(seq, True, label_gain=True) for i, ts in enumerate(label_seq): self.assertEqual(ts.token, i) self.assertEqual(ts.label, 1) self.assertNear(ts.weight, float(i) / (len(seq) - 1), 1e-3) if __name__ == "__main__": tf.test.main()
36.0625
88
0.608897
6,687
0.965771
0
0
0
0
0
0
383
0.055315
22d0777e1db496026b52bc09fc63aa81f467b3e2
105
py
Python
headlesspreview/apps.py
arush15june/wagtail-torchbox
c4d06e096c72bd8007975dc016133024f9d27fab
[ "MIT" ]
null
null
null
headlesspreview/apps.py
arush15june/wagtail-torchbox
c4d06e096c72bd8007975dc016133024f9d27fab
[ "MIT" ]
null
null
null
headlesspreview/apps.py
arush15june/wagtail-torchbox
c4d06e096c72bd8007975dc016133024f9d27fab
[ "MIT" ]
null
null
null
from django.apps import AppConfig class HeadlesspreviewConfig(AppConfig): name = 'headlesspreview'
17.5
39
0.790476
68
0.647619
0
0
0
0
0
0
17
0.161905
22d0f53b1d93eab616a976b47567e50595d96288
3,546
py
Python
LipSDP/solve_sdp.py
revbucket/LipSDP
39f2ffe65cb656440e055e4e86a750bc7e77e357
[ "MIT" ]
1
2021-07-21T12:19:01.000Z
2021-07-21T12:19:01.000Z
LipSDP/solve_sdp.py
revbucket/LipSDP
39f2ffe65cb656440e055e4e86a750bc7e77e357
[ "MIT" ]
null
null
null
LipSDP/solve_sdp.py
revbucket/LipSDP
39f2ffe65cb656440e055e4e86a750bc7e77e357
[ "MIT" ]
null
null
null
import argparse import numpy as np import matlab.engine from scipy.io import savemat import os from time import time def main(args): start_time = time() eng = matlab.engine.start_matlab() eng.addpath(os.path.join(file_dir, 'matlab_engine')) eng.addpath(os.path.join(file_dir, r'matlab_engine/weight_utils')) eng.addpath(os.path.join(file_dir, r'matlab_engine/error_messages')) eng.addpath(os.path.join(file_dir, r'examples/saved_weights')) network = { 'alpha': matlab.double([args.alpha]), 'beta': matlab.double([args.beta]), 'weight_path': args.weight_path, } lip_params = { 'formulation': args.form, 'split': matlab.logical([args.split]), 'parallel': matlab.logical([args.parallel]), 'verbose': matlab.logical([args.verbose]), 'split_size': matlab.double([args.split_size]), 'num_neurons': matlab.double([args.num_neurons]), 'num_workers': matlab.double([args.num_workers]), 'num_dec_vars': matlab.double([args.num_decision_vars]) } L = eng.solve_LipSDP(network, lip_params, nargout=1) if lip_params['verbose']: print(f'LipSDP-{args.form.capitalize()} gives a Lipschitz constant of %.03f' % L) print('Total time %.03f' % (time() - start_time)) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--form', default='neuron', const='neuron', nargs='?', choices=('neuron', 'network', 'layer', 'network-rand', 'network-dec-vars'), help='LipSDP formulation to use') parser.add_argument('-v', '--verbose', action='store_true', help='prints CVX output from solve if supplied') parser.add_argument('--alpha', type=float, default=0, nargs=1, help='lower bound for slope restriction bound') parser.add_argument('--beta', type=float, default=1, nargs=1, help='lower bound for slope restriction bound') parser.add_argument('--num-neurons', type=int, default=100, nargs=1, help='number of neurons to couple for LipSDP-Network-rand formulation') parser.add_argument('--split', action='store_true', help='splits network into subnetworks for more efficient solving if supplied') parser.add_argument('--parallel', action='store_true', help='parallelizes solving for split formulations if supplied') parser.add_argument('--split-size', type=int, default=2, nargs=1, help='number of layers in each subnetwork for splitting formulations') parser.add_argument('--num-workers', type=int, default=0, nargs=1, help='number of workers for parallelization of splitting formulations') parser.add_argument('--num-decision-vars', type=int, default=10, nargs=1, help='specify number of decision variables to be used for LipSDP') parser.add_argument('--weight-path', type=str, required=True, nargs=1, help='path of weights corresponding to trained neural network model') args = parser.parse_args() if args.parallel is True and args.num_workers[0] < 1: raise ValueError('When you use --parallel, --num-workers must be an integer >= 1.') if args.split is True and args.split_size[0] < 1: raise ValueError('When you use --split, --split-size must be an integer >= 1.') main(args)
30.834783
91
0.631416
0
0
0
0
0
0
0
0
1,299
0.366328
22d1e9715d6acd537e633072609ca037ec95ec12
805
py
Python
stockprophet/__init__.py
chihyi-liao/stockprophet
891c91b2a446e3bd30bb56b88be3874d7dda1b8d
[ "BSD-3-Clause" ]
1
2021-11-15T13:07:19.000Z
2021-11-15T13:07:19.000Z
stockprophet/__init__.py
chihyi-liao/stockprophet
891c91b2a446e3bd30bb56b88be3874d7dda1b8d
[ "BSD-3-Clause" ]
null
null
null
stockprophet/__init__.py
chihyi-liao/stockprophet
891c91b2a446e3bd30bb56b88be3874d7dda1b8d
[ "BSD-3-Clause" ]
1
2021-09-15T09:25:39.000Z
2021-09-15T09:25:39.000Z
from stockprophet.cli import entry_point from stockprophet.crawler import ( init_stock_type, init_stock_category ) from stockprophet.db import init_db from .utils import read_db_settings def preprocessing() -> bool: result = False # noinspection PyBroadException try: db_config = read_db_settings() if not db_config: print("config.ini 找不到 'database' 區段") return result except Exception: print("無法讀取或解析config.ini") return result # noinspection PyBroadException try: init_db(db_config) init_stock_category() init_stock_type() result = True except Exception as e: print("無法連線資料庫: %s" % (str(e), )) return result def main(): if preprocessing(): entry_point()
22.361111
49
0.645963
0
0
0
0
0
0
0
0
162
0.192171
22d210de31752ef0c139ecd0a2fcb3a182d83a41
216
py
Python
2021/day_25.py
mpcjanssen/Advent-of-Code
06c5257d038bfcd3d4790f3213afecb5c36d5c61
[ "Unlicense" ]
1
2022-02-06T08:33:08.000Z
2022-02-06T08:33:08.000Z
2021/day_25.py
mpcjanssen/Advent-of-Code
06c5257d038bfcd3d4790f3213afecb5c36d5c61
[ "Unlicense" ]
null
null
null
2021/day_25.py
mpcjanssen/Advent-of-Code
06c5257d038bfcd3d4790f3213afecb5c36d5c61
[ "Unlicense" ]
null
null
null
import aoc_helper RAW = aoc_helper.day(25) print(RAW) def parse_raw(): ... DATA = parse_raw() def part_one(): ... def part_two(): ... aoc_helper.submit(25, part_one) aoc_helper.submit(25, part_two)
11.368421
31
0.652778
0
0
0
0
0
0
0
0
0
0
22d23a29cb139320e7b38591cd284a89f2406142
475
py
Python
6/6.2.py
Hunter1753/adventofcode
962df52af01f6ab575e8f00eb2d1c1335dba5430
[ "CC0-1.0" ]
1
2020-12-08T21:53:19.000Z
2020-12-08T21:53:19.000Z
6/6.2.py
Hunter1753/adventofcode
962df52af01f6ab575e8f00eb2d1c1335dba5430
[ "CC0-1.0" ]
null
null
null
6/6.2.py
Hunter1753/adventofcode
962df52af01f6ab575e8f00eb2d1c1335dba5430
[ "CC0-1.0" ]
null
null
null
def setIntersectionCount(group): return len(set.intersection(*group)) groupList = [] tempGroup = [] with open("./6/input.txt") as inputFile: for line in inputFile: line = line.replace("\n","") if len(line) > 0: tempGroup.append(set(line)) else: groupList.append(tempGroup) tempGroup = [] if len(tempGroup) > 0: groupList.append(tempGroup) groupList = list(map(setIntersectionCount,groupList)) print("{} common options in groups".format(sum(groupList)))
25
59
0.703158
0
0
0
0
0
0
0
0
50
0.105263
22d2adc9a61d389ca50d1c98a9058e597ec58a82
2,964
py
Python
demo/gpnas/CVPR2021_NAS_competition_gpnas_demo.py
ZichaoGuo/PaddleSlim
2550fb4ec86aee6155c1c8a2c9ab174e239918a3
[ "Apache-2.0" ]
926
2019-12-16T05:06:56.000Z
2022-03-31T07:22:10.000Z
demo/gpnas/CVPR2021_NAS_competition_gpnas_demo.py
ZichaoGuo/PaddleSlim
2550fb4ec86aee6155c1c8a2c9ab174e239918a3
[ "Apache-2.0" ]
327
2019-12-16T06:04:31.000Z
2022-03-30T11:08:18.000Z
demo/gpnas/CVPR2021_NAS_competition_gpnas_demo.py
ZichaoGuo/PaddleSlim
2550fb4ec86aee6155c1c8a2c9ab174e239918a3
[ "Apache-2.0" ]
234
2019-12-16T03:12:08.000Z
2022-03-27T12:59:39.000Z
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License" # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import copy import numpy as np from paddleslim.nas import GPNAS # 使用GP-NAS参加[CVPR 2021 NAS国际比赛](https://www.cvpr21-nas.com/competition) Track2 demo # [CVPR 2021 NAS国际比赛Track2 studio地址](https://aistudio.baidu.com/aistudio/competition/detail/71?lang=en) # [AI studio GP-NAS demo](https://aistudio.baidu.com/aistudio/projectdetail/1824958) # demo 基于paddleslim自研NAS算法GP-NAS:Gaussian Process based Neural Architecture Search # 基于本demo的改进版可以获得双倍奖金 def preprare_trainning_data(file_name, t_flag): ## t_flag ==1 using all trainning data ## t_flag ==2 using half trainning data with open(file_name, 'r') as f: arch_dict = json.load(f) Y_all = [] X_all = [] for sub_dict in arch_dict.items(): Y_all.append(sub_dict[1]['acc'] * 100) X_all.append(np.array(sub_dict[1]['arch']).T.reshape(4, 16)[2]) X_all, Y_all = np.array(X_all), np.array(Y_all) X_train, Y_train, X_test, Y_test = X_all[0::t_flag], Y_all[ 0::t_flag], X_all[1::t_flag], Y_all[1::t_flag] return X_train, Y_train, X_test, Y_test if __name__ == '__main__': stage1_file = './datasets/Track2_stage1_trainning.json' stage2_file = './datasets/Track2_stage2_few_show_trainning.json' X_train_stage1, Y_train_stage1, X_test_stage1, Y_test_stage1 = preprare_trainning_data( stage1_file, 1) X_train_stage2, Y_train_stage2, X_test_stage2, Y_test_stage2 = preprare_trainning_data( stage2_file, 2) gpnas = GPNAS() w = gpnas.get_initial_mean(X_test_stage1, Y_test_stage1) init_cov = gpnas.get_initial_cov(X_train_stage1) error_list = np.array( Y_test_stage2.reshape(len(Y_test_stage2), 1) - gpnas.get_predict( X_test_stage2)) print('RMSE trainning on stage1 testing on stage2:', np.sqrt(np.dot(error_list.T, error_list) / len(error_list))) gpnas.get_posterior_mean(X_train_stage2[0::3], Y_train_stage2[0::3]) gpnas.get_posterior_mean(X_train_stage2[1::3], Y_train_stage2[1::3]) gpnas.get_posterior_cov(X_train_stage2[1::3], Y_train_stage2[1::3]) error_list = np.array( Y_test_stage2.reshape(len(Y_test_stage2), 1) - gpnas.get_predict_jiont( X_test_stage2, X_train_stage2[::1], Y_train_stage2[::1])) print('RMSE using stage1 as prior:', np.sqrt(np.dot(error_list.T, error_list) / len(error_list)))
44.909091
103
0.721323
0
0
0
0
0
0
0
0
1,307
0.430784
22d2e5e3c594615ca7c099c59610e2d90de239db
403
py
Python
pages/migrations/0004_auto_20181102_0944.py
yogeshprasad/spa-development
1bee9ca64da5815e1c9a2f7af43b44b59ee2ca7b
[ "Apache-2.0" ]
null
null
null
pages/migrations/0004_auto_20181102_0944.py
yogeshprasad/spa-development
1bee9ca64da5815e1c9a2f7af43b44b59ee2ca7b
[ "Apache-2.0" ]
7
2020-06-05T19:11:22.000Z
2022-03-11T23:30:57.000Z
pages/migrations/0004_auto_20181102_0944.py
yogeshprasad/spa-development
1bee9ca64da5815e1c9a2f7af43b44b59ee2ca7b
[ "Apache-2.0" ]
null
null
null
# Generated by Django 2.0.6 on 2018-11-02 09:44 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('pages', '0003_coachingcourse'), ] operations = [ migrations.AlterField( model_name='coachingcourse', name='username', field=models.CharField(default='', max_length=100), ), ]
21.210526
63
0.602978
310
0.769231
0
0
0
0
0
0
103
0.255583
22d31dc3511cd477901e03ecc8f042e8c0f688bf
1,119
py
Python
imageclassification/src/sample/splitters/_StratifiedSplitter.py
waikato-datamining/keras-imaging
f044f883242895c18cfdb31a827bc32bdb0405ed
[ "MIT" ]
null
null
null
imageclassification/src/sample/splitters/_StratifiedSplitter.py
waikato-datamining/keras-imaging
f044f883242895c18cfdb31a827bc32bdb0405ed
[ "MIT" ]
null
null
null
imageclassification/src/sample/splitters/_StratifiedSplitter.py
waikato-datamining/keras-imaging
f044f883242895c18cfdb31a827bc32bdb0405ed
[ "MIT" ]
1
2020-04-16T15:29:28.000Z
2020-04-16T15:29:28.000Z
from collections import OrderedDict from random import Random from typing import Set from .._types import Dataset, Split, LabelIndices from .._util import per_label from ._RandomSplitter import RandomSplitter from ._Splitter import Splitter class StratifiedSplitter(Splitter): """ TODO """ def __init__(self, percentage: float, labels: LabelIndices, random: Random = Random()): self._percentage = percentage self._labels = labels self._random = random def __str__(self) -> str: return f"strat-{self._percentage}" def __call__(self, dataset: Dataset) -> Split: subsets_per_label = per_label(dataset) sub_splits = { label: RandomSplitter(int(len(subsets_per_label[label]) * self._percentage), self._random)(subsets_per_label[label]) for label in self._labels.keys() } result = OrderedDict(), OrderedDict() for filename, label in dataset.items(): result_index = 0 if filename in sub_splits[label][0] else 1 result[result_index][filename] = label return result
28.692308
128
0.671135
874
0.781055
0
0
0
0
0
0
47
0.042002
22d37205c7c002b3538af8a7bcaeddcf556d57d9
315
py
Python
revenuecat_python/enums.py
YuraHavrylko/revenuecat_python
a25b234933b6e80e1ff09b6a82d73a0e3df91caa
[ "MIT" ]
1
2020-12-11T09:31:02.000Z
2020-12-11T09:31:02.000Z
revenuecat_python/enums.py
YuraHavrylko/revenuecat_python
a25b234933b6e80e1ff09b6a82d73a0e3df91caa
[ "MIT" ]
null
null
null
revenuecat_python/enums.py
YuraHavrylko/revenuecat_python
a25b234933b6e80e1ff09b6a82d73a0e3df91caa
[ "MIT" ]
null
null
null
from enum import Enum class SubscriptionPlatform(Enum): ios = 'ios' android = 'android' macos = 'macos' uikitformac = 'uikitformac' stripe = 'stripe' class AttributionNetworkCode(Enum): apple_search_ads = 0 adjust = 1 apps_flyer = 2 branch = 3 tenjin = 4 facebook = 5
17.5
35
0.634921
288
0.914286
0
0
0
0
0
0
42
0.133333
22d53110de1903196c37bd847b098f2456b54f16
1,441
py
Python
windows_packages_gpu/torch/nn/intrinsic/qat/modules/linear_relu.py
codeproject/DeepStack
d96368a3db1bc0266cb500ba3701d130834da0e6
[ "Apache-2.0" ]
353
2020-12-10T10:47:17.000Z
2022-03-31T23:08:29.000Z
windows_packages_gpu/torch/nn/intrinsic/qat/modules/linear_relu.py
codeproject/DeepStack
d96368a3db1bc0266cb500ba3701d130834da0e6
[ "Apache-2.0" ]
80
2020-12-10T09:54:22.000Z
2022-03-30T22:08:45.000Z
windows_packages_gpu/torch/nn/intrinsic/qat/modules/linear_relu.py
codeproject/DeepStack
d96368a3db1bc0266cb500ba3701d130834da0e6
[ "Apache-2.0" ]
63
2020-12-10T17:10:34.000Z
2022-03-28T16:27:07.000Z
from __future__ import absolute_import, division, print_function, unicode_literals import torch.nn.qat as nnqat import torch.nn.intrinsic import torch.nn.functional as F class LinearReLU(nnqat.Linear): r""" A LinearReLU module fused from Linear and ReLU modules, attached with FakeQuantize modules for output activation and weight, used in quantization aware training. We adopt the same interface as :class:`torch.nn.Linear`. Similar to `torch.nn.intrinsic.LinearReLU`, with FakeQuantize modules initialized to default. Attributes: activation_post_process: fake quant module for output activation weight: fake quant module for weight Examples:: >>> m = nn.qat.LinearReLU(20, 30) >>> input = torch.randn(128, 20) >>> output = m(input) >>> print(output.size()) torch.Size([128, 30]) """ _FLOAT_MODULE = torch.nn.intrinsic.LinearReLU def __init__(self, in_features, out_features, bias=True, qconfig=None): super(LinearReLU, self).__init__(in_features, out_features, bias, qconfig) def forward(self, input): return self.activation_post_process(F.relu( F.linear(input, self.weight_fake_quant(self.weight), self.bias))) @classmethod def from_float(cls, mod, qconfig=None): return super(LinearReLU, cls).from_float(mod, qconfig)
34.309524
89
0.66898
1,263
0.876475
0
0
121
0.083969
0
0
700
0.485774
22d6fda860ed01e0cc3ade5c2a2e95bc621ae7ac
992
py
Python
venv/Lib/site-packages/PyOpenGL-3.0.1/OpenGL/GL/EXT/draw_buffers2.py
temelkirci/Motion_Editor
a8b8d4c4d2dcc9be28385600f56066cef92a38ad
[ "MIT" ]
1
2022-03-02T17:07:20.000Z
2022-03-02T17:07:20.000Z
venv/Lib/site-packages/PyOpenGL-3.0.1/OpenGL/GL/EXT/draw_buffers2.py
temelkirci/RealTime_6DOF_Motion_Editor
a8b8d4c4d2dcc9be28385600f56066cef92a38ad
[ "MIT" ]
null
null
null
venv/Lib/site-packages/PyOpenGL-3.0.1/OpenGL/GL/EXT/draw_buffers2.py
temelkirci/RealTime_6DOF_Motion_Editor
a8b8d4c4d2dcc9be28385600f56066cef92a38ad
[ "MIT" ]
null
null
null
'''OpenGL extension EXT.draw_buffers2 This module customises the behaviour of the OpenGL.raw.GL.EXT.draw_buffers2 to provide a more Python-friendly API Overview (from the spec) This extension builds upon the ARB_draw_buffers extension and provides separate blend enables and color write masks for each color output. In ARB_draw_buffers (part of OpenGL 2.0), separate values can be written to each color buffer, but the blend enable and color write mask are global and apply to all color outputs. While this extension does provide separate blend enables, it does not provide separate blend functions or blend equations per color output. The official definition of this extension is available here: http://www.opengl.org/registry/specs/EXT/draw_buffers2.txt ''' from OpenGL import platform, constants, constant, arrays from OpenGL import extensions, wrapper from OpenGL.GL import glget import ctypes from OpenGL.raw.GL.EXT.draw_buffers2 import * ### END AUTOGENERATED SECTION
36.740741
73
0.803427
0
0
0
0
0
0
0
0
807
0.813508
22d789885783516e44018b1a27dcbc9e0ec012e0
6,443
py
Python
pymemcache/client/retrying.py
liquidpele/pymemcache
0001f94a06b91078ed7b7708729ef0d1aaa73a68
[ "Apache-2.0" ]
null
null
null
pymemcache/client/retrying.py
liquidpele/pymemcache
0001f94a06b91078ed7b7708729ef0d1aaa73a68
[ "Apache-2.0" ]
null
null
null
pymemcache/client/retrying.py
liquidpele/pymemcache
0001f94a06b91078ed7b7708729ef0d1aaa73a68
[ "Apache-2.0" ]
null
null
null
""" Module containing the RetryingClient wrapper class. """ from time import sleep def _ensure_tuple_argument(argument_name, argument_value): """ Helper function to ensure the given arguments are tuples of Exceptions (or subclasses), or can at least be converted to such. Args: argument_name: str, name of the argument we're checking, only used for raising meaningful exceptions. argument: any, the argument itself. Returns: tuple[Exception]: A tuple with the elements from the argument if they are valid. Exceptions: ValueError: If the argument was not None, tuple or Iterable. ValueError: If any of the elements of the argument is not a subclass of Exception. """ # Ensure the argument is a tuple, set or list. if argument_value is None: return tuple() elif not isinstance(argument_value, (tuple, set, list)): raise ValueError("%s must be either a tuple, a set or a list." % argument_name) # Convert the argument before checking contents. argument_tuple = tuple(argument_value) # Check that all the elements are actually inherited from Exception. # (Catchable) if not all([issubclass(arg, Exception) for arg in argument_tuple]): raise ValueError( "%s is only allowed to contain elements that are subclasses of " "Exception." % argument_name ) return argument_tuple class RetryingClient(object): """ Client that allows retrying calls for the other clients. """ def __init__( self, client, attempts=2, retry_delay=0, retry_for=None, do_not_retry_for=None ): """ Constructor for RetryingClient. Args: client: Client|PooledClient|HashClient, inner client to use for performing actual work. attempts: optional int, how many times to attempt an action before failing. Must be 1 or above. Defaults to 2. retry_delay: optional int|float, how many seconds to sleep between each attempt. Defaults to 0. retry_for: optional None|tuple|set|list, what exceptions to allow retries for. Will allow retries for all exceptions if None. Example: `(MemcacheClientError, MemcacheUnexpectedCloseError)` Accepts any class that is a subclass of Exception. Defaults to None. do_not_retry_for: optional None|tuple|set|list, what exceptions should be retried. Will not block retries for any Exception if None. Example: `(IOError, MemcacheIllegalInputError)` Accepts any class that is a subclass of Exception. Defaults to None. Exceptions: ValueError: If `attempts` is not 1 or above. ValueError: If `retry_for` or `do_not_retry_for` is not None, tuple or Iterable. ValueError: If any of the elements of `retry_for` or `do_not_retry_for` is not a subclass of Exception. ValueError: If there is any overlap between `retry_for` and `do_not_retry_for`. """ if attempts < 1: raise ValueError( "`attempts` argument must be at least 1. " "Otherwise no attempts are made." ) self._client = client self._attempts = attempts self._retry_delay = retry_delay self._retry_for = _ensure_tuple_argument("retry_for", retry_for) self._do_not_retry_for = _ensure_tuple_argument( "do_not_retry_for", do_not_retry_for ) # Verify no overlap in the go/no-go exception collections. for exc_class in self._retry_for: if exc_class in self._do_not_retry_for: raise ValueError( 'Exception class "%s" was present in both `retry_for` ' "and `do_not_retry_for`. Any exception class is only " "allowed in a single argument." % repr(exc_class) ) # Take dir from the client to speed up future checks. self._client_dir = dir(self._client) def _retry(self, name, func, *args, **kwargs): """ Workhorse function, handles retry logic. Args: name: str, Name of the function called. func: callable, the function to retry. *args: args, array arguments to pass to the function. **kwargs: kwargs, keyword arguments to pass to the function. """ for attempt in range(self._attempts): try: result = func(*args, **kwargs) return result except Exception as exc: # Raise the exception to caller if either is met: # - We've used the last attempt. # - self._retry_for is set, and we do not match. # - self._do_not_retry_for is set, and we do match. # - name is not actually a member of the client class. if ( attempt >= self._attempts - 1 or (self._retry_for and not isinstance(exc, self._retry_for)) or ( self._do_not_retry_for and isinstance(exc, self._do_not_retry_for) ) or name not in self._client_dir ): raise exc # Sleep and try again. sleep(self._retry_delay) # This is the real magic soup of the class, we catch anything that isn't # strictly defined for ourselves and pass it on to whatever client we've # been given. def __getattr__(self, name): return lambda *args, **kwargs: self._retry( name, self._client.__getattribute__(name), *args, **kwargs ) # We implement these explicitly because they're "magic" functions and won't # get passed on by __getattr__. def __dir__(self): return self._client_dir # These magics are copied from the base client. def __setitem__(self, key, value): self.set(key, value, noreply=True) def __getitem__(self, key): value = self.get(key) if value is None: raise KeyError return value def __delitem__(self, key): self.delete(key, noreply=True)
35.994413
87
0.60298
4,989
0.774329
0
0
0
0
0
0
3,785
0.587459
22d92edfa8963f3c42a5dc829d7d8e2eae0773ab
461
py
Python
8.1.py
HuaichenOvO/EIE3280HW
e1424abb8baf715a4e9372e2ca6b0bed1e62f3d6
[ "MIT" ]
null
null
null
8.1.py
HuaichenOvO/EIE3280HW
e1424abb8baf715a4e9372e2ca6b0bed1e62f3d6
[ "MIT" ]
null
null
null
8.1.py
HuaichenOvO/EIE3280HW
e1424abb8baf715a4e9372e2ca6b0bed1e62f3d6
[ "MIT" ]
null
null
null
import numpy as np import numpy.linalg as lg A_mat = np.matrix([ [0, 1, 1, 1, 0], [1, 0, 0, 0, 1], [1, 0, 0, 1, 1], [1, 0, 1, 0, 1], [0, 1, 1, 1, 0] ]) eigen = lg.eig(A_mat) # return Arr[5] with 5 different linear independent eigen values vec = eigen[1][:, 0] # the column (eigen vector) with the largest eigen value value = eigen[0][0] # the largest eigen value print(vec) print(A_mat * vec) print(value * vec)
20.043478
87
0.566161
0
0
0
0
0
0
0
0
148
0.321041
22d9ab4bf21ea11b6c751dd2350676d79a0f46d5
397
py
Python
classroom/migrations/0025_myfile_file.py
Abulhusain/E-learing
65cfe3125f1b6794572ef2daf89917976f0eac09
[ "MIT" ]
5
2019-06-19T03:47:17.000Z
2020-06-11T17:46:50.000Z
classroom/migrations/0025_myfile_file.py
Abulhusain/E-learing
65cfe3125f1b6794572ef2daf89917976f0eac09
[ "MIT" ]
3
2021-03-19T01:23:12.000Z
2021-09-08T01:05:25.000Z
classroom/migrations/0025_myfile_file.py
seeej/digiwiz
96ddfc22fe4c815feec3d75c30576fec5f344154
[ "MIT" ]
1
2021-06-04T05:58:15.000Z
2021-06-04T05:58:15.000Z
# Generated by Django 2.2.2 on 2019-08-25 09:29 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('classroom', '0024_auto_20190825_1723'), ] operations = [ migrations.AddField( model_name='myfile', name='file', field=models.CharField(blank=True, max_length=100), ), ]
20.894737
63
0.602015
304
0.765743
0
0
0
0
0
0
97
0.244332
22da304d7553bb5adf64e1d52f39170a3b5aca59
249
py
Python
jumbo_api/objects/profile.py
rolfberkenbosch/python-jumbo-api
9ca35cbea6225dcc6108093539e76f110b1840b0
[ "MIT" ]
3
2020-07-24T08:44:13.000Z
2021-09-05T06:24:01.000Z
jumbo_api/objects/profile.py
rolfberkenbosch/python-jumbo-api
9ca35cbea6225dcc6108093539e76f110b1840b0
[ "MIT" ]
6
2020-04-30T19:12:24.000Z
2021-03-23T19:21:19.000Z
jumbo_api/objects/profile.py
rolfberkenbosch/python-jumbo-api
9ca35cbea6225dcc6108093539e76f110b1840b0
[ "MIT" ]
2
2020-04-30T14:59:12.000Z
2020-08-30T19:15:57.000Z
from jumbo_api.objects.store import Store class Profile(object): def __init__(self, data): self.id = data.get("identifier") self.store = Store(data.get("store")) def __str__(self): return f"{self.id} {self.store}"
22.636364
45
0.634538
204
0.819277
0
0
0
0
0
0
44
0.176707
22da72ceaa2598d408165e577728716dec2eb71a
11,928
py
Python
tmp/real_time_log_analy/logWatcher.py
hankai17/test
8f38d999a7c6a92eac94b4d9dc8e444619d2144f
[ "MIT" ]
7
2017-07-16T15:09:26.000Z
2021-09-01T02:13:15.000Z
tmp/real_time_log_analy/logWatcher.py
hankai17/test
8f38d999a7c6a92eac94b4d9dc8e444619d2144f
[ "MIT" ]
null
null
null
tmp/real_time_log_analy/logWatcher.py
hankai17/test
8f38d999a7c6a92eac94b4d9dc8e444619d2144f
[ "MIT" ]
3
2017-09-13T09:54:49.000Z
2019-03-18T01:29:15.000Z
#!/usr/bin/env python import os import sys import time import errno import stat import datetime import socket import struct import atexit import logging #from lru import LRUCacheDict from logging import handlers from task_manager import Job, taskManage from ctypes import * from urlparse import * from multiprocessing import Process,Lock from log_obj import CLog from parse_conf import cConfParser log_file = "timelog.log" log_fmt = '%(asctime)s: %(message)s' config_file = 'test.config' domain_white_dict = {} pps_ip_list = [] pps_port = 0 domain_sfx_err_count = 0 domain_sfx_err_rate = 0 ats_ip = '' def daemonize(pid_file=None): pid = os.fork() if pid: sys.exit(0) os.chdir('/') os.umask(0) os.setsid() _pid = os.fork() if _pid: sys.exit(0) sys.stdout.flush() sys.stderr.flush() with open('/dev/null') as read_null, open('/dev/null', 'w') as write_null: os.dup2(read_null.fileno(), sys.stdin.fileno()) os.dup2(write_null.fileno(), sys.stdout.fileno()) os.dup2(write_null.fileno(), sys.stderr.fileno()) if pid_file: with open(pid_file, 'w+') as f: f.write(str(os.getpid())) atexit.register(os.remove, pid_file) def get_suffix(p): if len(p) == 1: #return "pure domain" return "nil" fields = p.split("/") if len(fields) == 0 or len(fields) == 1: return "null" fields1 = fields[len(fields) - 1].split(".") if len(fields1) == 0 or len(fields1) == 1: return "null" else: return fields1[len(fields1) - 1] class LogWatcher(object): def __init__(self, folder, callback, extensions=["log"], logfile_keyword="squid", tail_lines=0): self.files_map = {} self.callback = callback self.folder = os.path.realpath(folder) self.extensions = extensions self.logfile_kw = logfile_keyword assert os.path.exists(self.folder), "%s does not exists" % self.folder assert callable(callback) self.update_files() for id, file in self.files_map.iteritems(): file.seek(os.path.getsize(file.name)) # EOF if tail_lines: lines = self.tail(file.name, tail_lines) if lines: self.callback(file.name, lines) def __del__(self): self.close() def loop(self, interval=0.1, async=False): while 1: try: self.update_files() for fid, file in list(self.files_map.iteritems()): self.readfile(file) if async: return time.sleep(interval) except KeyboardInterrupt: break def log(self, line): print line def listdir(self): ls = os.listdir(self.folder) if self.extensions: return [x for x in ls if os.path.splitext(x)[1][1:] in self.extensions and self.logfile_kw in os.path.split(x)[1] ] else: return ls @staticmethod def tail(fname, window): try: f = open(fname, 'r') except IOError, err: if err.errno == errno.ENOENT: return [] else: raise else: BUFSIZ = 1024 f.seek(0, os.SEEK_END) fsize = f.tell() block = -1 data = "" exit = False while not exit: step = (block * BUFSIZ) if abs(step) >= fsize: f.seek(0) exit = True else: f.seek(step, os.SEEK_END) data = f.read().strip() if data.count('\n') >= window: break else: block -= 1 return data.splitlines()[-window:] def update_files(self): ls = [] if os.path.isdir(self.folder): for name in self.listdir(): absname = os.path.realpath(os.path.join(self.folder, name)) try: st = os.stat(absname) except EnvironmentError, err: if err.errno != errno.ENOENT: raise else: if not stat.S_ISREG(st.st_mode): continue fid = self.get_file_id(st) ls.append((fid, absname)) elif os.path.isfile(self.folder): absname = os.path.realpath(self.folder) try: st = os.stat(absname) except EnvironmentError, err: if err.errno != errno.ENOENT: raise else: fid = self.get_file_id(st) ls.append((fid, absname)) else: print 'You submitted an object that was neither a file or folder...exiting now.' sys.exit() for fid, file in list(self.files_map.iteritems()): try: st = os.stat(file.name) except EnvironmentError, err: if err.errno == errno.ENOENT: self.unwatch(file, fid) else: raise else: if fid != self.get_file_id(st): self.unwatch(file, fid) self.watch(file.name) for fid, fname in ls: if fid not in self.files_map: self.watch(fname) def readfile(self, file): lines = file.readlines() if lines: self.callback(file.name, lines) def watch(self, fname): try: file = open(fname, "r") fid = self.get_file_id(os.stat(fname)) except EnvironmentError, err: if err.errno != errno.ENOENT: raise else: self.log("watching logfile %s" % fname) self.files_map[fid] = file def unwatch(self, file, fid): lines = self.readfile(file) self.log("un-watching logfile %s" % file.name) del self.files_map[fid] if lines: self.callback(file.name, lines) @staticmethod def get_file_id(st): return "%xg%x" % (st.st_dev, st.st_ino) def close(self): for id, file in self.files_map.iteritems(): file.close() self.files_map.clear() def udp_send_message(ip_list, port, arr): for ip in ip_list: s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) s.sendto(arr, (ip, port)) s.close() def pull_data(job): if not (job.sfx == "nil" or job.sfx == "null"): fmt = "=HHHH%dsH%dsH" %(len(job.url),len(job.sfx)) data = struct.pack( fmt, 80, #id 1, #type 8 + len(job.url) + 2 + len(job.sfx) + 1, #length len(job.url), #domain_len job.url, #domain len(job.sfx), #sfx_len job.sfx, #sfx 0 ) else: fmt = "=HHHH%dsH" %(len(job.url)) data = struct.pack( fmt, 80, #id 1, #type 8 + len(job.url) + 1, #length len(job.url), #domain_len job.url, 0 ) global pps_ip_list global pps_port udp_send_message(pps_ip_list, pps_port, data) tmg.done_task_add(job) log_message = job.url + ' ' + job.sfx loger.write(20, log_message) def callback_routine(idx): print 'callback_routinue' def get_domain_white(f): if len(f) == 0: print 'No domain_white_list' return filename = f fd = open(filename, 'r') for line in fd.readlines(): line = line.strip() if not domain_white_dict.has_key(line): domain_white_dict[line] = 1 print 'parse domain_white_list done' def period_check_task(job): global txn_idx global once_flag if txn_idx == 0 and once_flag == 0: once_flag = 1 tmg.done_task_add(job) job.addtime = time.time() tmg.task_add(job) return loger.write(10, '------>') mutex.acquire() for k in d1.keys(): if domain_white_dict.has_key(k): continue for k1 in d1[k].keys(): err_rate = d1[k][k1]['not_ok'] * 100 / (d1[k][k1]['not_ok'] + d1[k][k1]['20x']) log_message = k + ' ' + str(err_rate) loger.write(10, log_message) global domain_sfx_err_count global domain_sfx_err_rate if err_rate >= domain_sfx_err_rate and (d1[k][k1]['not_ok'] + d1[k][k1]['20x']) >= domain_sfx_err_count : #print "will add to task", k, k1, "ok:", d1[k][k1]['20x'], "not_ok:", d1[k][k1]['not_ok'], "err rate:", err_rate txn_idx += 1 job = Job(txn_idx, pull_data, time.time(), 0, k, '', callback_routine, k1, '') tmg.task_add(job) loger.write(10, '<------') d1.clear() mutex.release() tmg.done_task_add(job) if job.period > 0: job.addtime = time.time() tmg.task_add(job) def config_parse(): global domain_sfx_err_count global domain_sfx_err_rate global pps_ip_list global pps_port global ats_ip cp = cConfParser(config_file) pps_ip = cp.get('common', 'pps_ip') fields = pps_ip.strip().split('|') if len(fields) > 0: for i in fields: pps_ip_list.append(i) else: pps_ip_list.append(pps_ip) pps_port = int(cp.get('common', 'pps_port')) domain_sfx_err_count = int(cp.get('common', 'domain_sfx_err_count' )) domain_sfx_err_rate = int(cp.get('common', 'domain_sfx_err_rate' )) ats_ip = cp.get('common', 'ats_ip') print 'ats_ip: ', ats_ip print 'pps_ip: ', pps_ip print 'pps_port: ', pps_port print 'domain_sfx_err_count: ', domain_sfx_err_count print 'domain_sfx_err_rate: ', domain_sfx_err_rate return cp once_flag = 0 txn_idx = 0 d1 = {} mutex = Lock() version_message = '1.0.1' #1.0.1: Add conf obj; Add log obj #1.0.2: More pps. add tool config if __name__ == '__main__': help_message = 'Usage: python %s' % sys.argv[0] if len(sys.argv) == 2 and (sys.argv[1] in '--version'): print version_message exit(1) if len(sys.argv) == 2 and (sys.argv[1] in '--help'): print help_message exit(1) if len(sys.argv) != 1: print help_message exit(1) cp = config_parse() get_domain_white(cp.get('common', 'domain_white_list')) loger = CLog(log_file, log_fmt, 12, 5, cp.get('common', 'debug')) print 'Start ok' daemonize() tmg = taskManage() tmg.run() pull_pps_job = Job(txn_idx, period_check_task, time.time(), int(cp.get('common', 'interval')), '', '', callback_routine, '', '') tmg.task_add(pull_pps_job) def callback(filename, lines): for line in lines: fields = line.strip().split("'") http_code = fields[23] domain = fields[13] log_message = 'new line ' + domain #loger.write(10, log_message) if len(domain.split(":")) > 0: domain = domain.split(":")[0] user_ip = fields[5] result = urlparse(fields[15]) sfx = get_suffix(result.path) if sfx == 'nil' or sfx == 'null': continue if len(domain) <= 3: continue #is watch req global ats_ip if user_ip == ats_ip: continue mutex.acquire() sfx_dict = None if not d1.has_key(domain): d1[domain] = {} sfx_dict = d1[domain] else: sfx_dict = d1[domain] if not sfx_dict.has_key(sfx): sfx_dict[sfx] = {'20x':0, 'not_ok':0} if not(http_code in "200" or http_code in "206" or http_code in "304" or http_code in "204"): sfx_dict[sfx]['not_ok'] += 1 else: sfx_dict[sfx]['20x'] += 1 mutex.release() l = LogWatcher("/opt/ats/var/log/trafficserver", callback) l.loop() #https://docs.python.org/2/library/ctypes.html #https://blog.csdn.net/u012611644/article/details/80529746
29.021898
132
0.550889
4,845
0.406187
0
0
935
0.078387
0
0
1,355
0.113598
22db31f9f12a464c13a70cead5b1a18013bd0add
365
py
Python
lazyblacksmith/views/ajax/__init__.py
jonathonfletcher/LazyBlacksmith
f244f0a15c795707b64e7cc53f82c6d6270691b5
[ "BSD-3-Clause" ]
49
2016-10-24T13:51:56.000Z
2022-02-18T06:07:47.000Z
lazyblacksmith/views/ajax/__init__.py
jonathonfletcher/LazyBlacksmith
f244f0a15c795707b64e7cc53f82c6d6270691b5
[ "BSD-3-Clause" ]
84
2015-04-29T10:24:51.000Z
2022-02-17T19:18:01.000Z
lazyblacksmith/views/ajax/__init__.py
jonathonfletcher/LazyBlacksmith
f244f0a15c795707b64e7cc53f82c6d6270691b5
[ "BSD-3-Clause" ]
34
2017-01-23T13:19:17.000Z
2022-02-02T17:32:08.000Z
# -*- encoding: utf-8 -*- from flask import request from lazyblacksmith.utils.request import is_xhr import logging logger = logging.getLogger('lb.ajax') def is_not_ajax(): """ Return True if request is not ajax This function is used in @cache annotation to not cache direct call (http 403) """ return not is_xhr(request)
21.470588
48
0.665753
0
0
0
0
0
0
0
0
176
0.482192
22dbcb72dc9b6914e75bad92c8d92d61083088a7
6,145
py
Python
src/automata_learning_with_policybank/Traces.py
logic-and-learning/AdvisoRL
3bbd741e681e6ea72562fec142d54e9d781d097d
[ "MIT" ]
4
2021-02-04T17:33:07.000Z
2022-01-24T10:29:39.000Z
src/automata_learning_with_policybank/Traces.py
logic-and-learning/AdvisoRL
3bbd741e681e6ea72562fec142d54e9d781d097d
[ "MIT" ]
null
null
null
src/automata_learning_with_policybank/Traces.py
logic-and-learning/AdvisoRL
3bbd741e681e6ea72562fec142d54e9d781d097d
[ "MIT" ]
null
null
null
import os class Traces: def __init__(self, positive = set(), negative = set()): self.positive = positive self.negative = negative """ IG: at the moment we are adding a trace only if it ends up in an event. should we be more restrictive, e.g. consider xxx, the same as xxxxxxxxxx (where x is an empty event '') recent suggestion (from the meeting): ignore empty events altogether and don't consider them as events at all (neither for execution, nor for learning) """ def _should_add(self, trace, i): prefixTrace = trace[:i] if not prefixTrace[-1] == '': return True else: return False def _get_prefixes(self, trace, up_to_limit = None): if up_to_limit is None: up_to_limit = len(trace) all_prefixes = set() for i in range(1, up_to_limit+1): if self._should_add(trace, i): all_prefixes.add(trace[:i]) return all_prefixes def symbol_to_trace(self,symbols): letters = ['a','b','c','d','e','f','g', 'h', 'n'] numbers = [int(i) for i in range(0,9)] dictionary = dict(zip(letters, numbers)) traces = list() for symbol in symbols: traces.append(dictionary.get(symbol)) return tuple(traces) def trace_to_symbol(self,traces): letters = ['a','b','c','d','e','f','g', 'h', 'n'] numbers = [int(i) for i in range(0,9)] dictionary = dict(zip(numbers, letters)) symbols = list() for trace in traces: symbols.append(dictionary.get(trace)) return tuple(traces) def rm_trace_to_symbol(self,rm_file): file = rm_file letters = ['a','b','c','d','e','f','g', 'h', 'n'] numbers = [int(i) for i in range(0,9)] dictionary = dict(zip(numbers, letters)) with open(file) as f: content = f.readlines() lines = [] for line in content: end = 0 begin = 1 #initialize values based on what won't enter the loops; initial values irrelevant number = 0 #random, had to initialize if line != content[0]: number = str() check = 0 count=0 for character in line: if ((check==1) & (character=="'")): #looks for second quotation check = 10 #end search end = count-1 elif (character == "'"): #looks for first quotation check = 1 begin = count+1 elif (check==1): number += character count = count+1 symbol = dictionary.get(int(number)) #symbol = symbol + '&!n' line = list(line) #necessary for use of pop,insert if end==begin+1: line.pop(end) line.pop(begin) line.insert(begin,symbol) elif end==begin: line.pop(begin) line.insert(begin,symbol) lines.append(line) with open(rm_file, 'w') as f: for line in lines: for item in line: f.write(str(item)) def fix_rmfiles(self,rmfile): file = rmfile with open(file) as f: content = f.readlines() final_state = str() for line in content: if line != content[0]: brackets = 0 commas = 0 state = str() next_state = str() for character in line: if (character == "(") & (brackets == 0): brackets = 1 elif brackets == 1: if character == "(": brackets = 2 elif brackets == 2: if character == "1": final_state = next_state print(final_state) if ((commas == 0) & (brackets == 1)): if character == ",": commas = 1 else: state += character elif ((commas == 1) & (brackets == 1)): if character == ",": commas = 2 else: next_state += character # with open(rmfile, 'w') as f: # for line in content: # for item in line: # f.write(str(item)) # f.write("\n") # writethis = "(" + str(final_state) + "," + str(final_state) + ",'True',ConstantRewardFunction(0))" # f.write(writethis) """ when adding a trace, it additionally adds all prefixes as negative traces """ def add_trace(self, trace, reward, learned): trace = tuple(trace) if reward > 0: self.positive.add(trace) # | is a set union operator #if learned==0: self.negative |= self._get_prefixes(trace, len(trace)-1) else: #if learned == 0: self.negative |= self._get_prefixes(trace) # else: # self.negative.add(trace) def export_traces(self, filename): parent_path = os.path.dirname(filename) os.makedirs(parent_path,exist_ok=True) with open(filename, "w") as output_file: output_file.write("POSITIVE:") for trace in self.positive: output_file.write("\n") string_repr = [str(el) for el in trace] output_file.write(','.join(string_repr)) output_file.write("\nNEGATIVE:") for trace in self.negative: output_file.write("\n") string_repr = [str(el) for el in trace] output_file.write(','.join(string_repr)) def __repr__(self): return repr(self.positive) + "\n\n" + repr(self.negative)
36.577381
127
0.479414
6,133
0.998047
0
0
0
0
0
0
1,186
0.193002
22dbf84787aba6cdbf21c855e5dcbb4cff617bd6
1,758
py
Python
example/comp/urls.py
edwilding/django-comments-xtd
c3a335b6345b52c75cce69c66b7cf0ef72439d35
[ "BSD-2-Clause" ]
null
null
null
example/comp/urls.py
edwilding/django-comments-xtd
c3a335b6345b52c75cce69c66b7cf0ef72439d35
[ "BSD-2-Clause" ]
null
null
null
example/comp/urls.py
edwilding/django-comments-xtd
c3a335b6345b52c75cce69c66b7cf0ef72439d35
[ "BSD-2-Clause" ]
1
2021-06-01T20:35:25.000Z
2021-06-01T20:35:25.000Z
import django from django.conf import settings from django.conf.urls import include, url from django.contrib import admin from django.contrib.staticfiles.urls import staticfiles_urlpatterns if django.VERSION[:2] > (1, 9): from django.views.i18n import JavaScriptCatalog else: from django.views.i18n import javascript_catalog from django_comments_xtd import LatestCommentFeed from django_comments_xtd.views import XtdCommentListView from comp import views admin.autodiscover() urlpatterns = [ url(r'^$', views.HomepageView.as_view(), name='homepage'), url(r'^i18n/', include('django.conf.urls.i18n')), url(r'^admin/', include(admin.site.urls)), url(r'^articles/', include('comp.articles.urls')), url(r'^quotes/', include('comp.quotes.urls')), url(r'^comments/', include('django_comments_xtd.urls')), url(r'^comments/$', XtdCommentListView.as_view( content_types=["articles.article", "quotes.quote"], paginate_by=10, page_range=5), name='comments-xtd-list'), url(r'^feeds/comments/$', LatestCommentFeed(), name='comments-feed'), url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework')), ] if django.VERSION[:2] > (1, 9): urlpatterns.append( url(r'^jsi18n/$', JavaScriptCatalog.as_view(), name='javascript-catalog') ) else: js_info_dict = { 'packages': ('django_comments_xtd',) } urlpatterns.append( url(r'^jsi18n/$', javascript_catalog, js_info_dict, name='javascript-catalog') ) if settings.DEBUG: urlpatterns += staticfiles_urlpatterns() if 'rosetta' in settings.INSTALLED_APPS: urlpatterns += [url(r'^rosetta/', include('rosetta.urls'))]
31.392857
77
0.67463
0
0
0
0
0
0
0
0
438
0.249147
22de394896bd7be748b49ef5d7072349cfcc8ff2
1,770
py
Python
09_multiprocessing/prime_validation/primes_factor_test.py
jumploop/high_performance_python
da5b11735601b51f141975f9d59f14293cab16bb
[ "MIT" ]
null
null
null
09_multiprocessing/prime_validation/primes_factor_test.py
jumploop/high_performance_python
da5b11735601b51f141975f9d59f14293cab16bb
[ "MIT" ]
null
null
null
09_multiprocessing/prime_validation/primes_factor_test.py
jumploop/high_performance_python
da5b11735601b51f141975f9d59f14293cab16bb
[ "MIT" ]
null
null
null
import math import time def check_prime(n): if n % 2 == 0: return False, 2 for i in range(3, int(math.sqrt(n)) + 1): if n % i == 0: return False, i return True, None if __name__ == "__main__": primes = [] t1 = time.time() # 100109100129100151 big prime # http://primes.utm.edu/curios/page.php/100109100129100151.html # number_range = xrange(100109100129100153, 100109100129101238, 2) number_range = range(100109100129101237, 100109100129201238, 2) # new expensive near-primes # [(95362951, (100109100129100369, 7.254560947418213)) # (171656941, (100109100129101027, 13.052711009979248)) # (121344023, (100109100129101291, 8.994053840637207) # note these two lines of timings look really wrong, they're about 4sec # each really # [(265687139, (100109100129102047, 19.642582178115845)), (219609683, (100109100129102277, 16.178056001663208)), (121344023, (100109100129101291, 8.994053840637207))] # [(316096873, (100109100129126653, 23.480671882629395)), (313994287, (100109100129111617, 23.262380123138428)), (307151363, (100109100129140177, 22.80288815498352))] # primes # 100109100129162907 # 100109100129162947 highest_factors = {} for possible_prime in number_range: t2 = time.time() is_prime, factor = check_prime(possible_prime) if is_prime: primes.append(possible_prime) print("GOT NEW PRIME", possible_prime) else: highest_factors[factor] = (possible_prime, time.time() - t2) hf = highest_factors.items() hf = sorted(hf, reverse=True) print(hf[:3]) print("Took:", time.time() - t1) print(len(primes), primes[:10], primes[-10:])
36.122449
170
0.654802
0
0
0
0
0
0
0
0
845
0.477401
22df608412513a1bf5e311a4eae60aa3f6a7a737
1,609
py
Python
python/test/test_dynamic_bitset.py
hagabb/katana
a52a688b90315a79aa95cf8d279fd7f949a3b94b
[ "BSD-3-Clause" ]
null
null
null
python/test/test_dynamic_bitset.py
hagabb/katana
a52a688b90315a79aa95cf8d279fd7f949a3b94b
[ "BSD-3-Clause" ]
null
null
null
python/test/test_dynamic_bitset.py
hagabb/katana
a52a688b90315a79aa95cf8d279fd7f949a3b94b
[ "BSD-3-Clause" ]
null
null
null
import pytest from katana.dynamic_bitset import DynamicBitset __all__ = [] SIZE = 50 @pytest.fixture def dbs(): return DynamicBitset(SIZE) def test_set(dbs): dbs[10] = 1 assert dbs[10] def test_set_invalid_type(dbs): try: dbs[2.3] = 0 assert False except TypeError: pass def test_set_invalid_index_low(dbs): try: dbs[-1] = 1 assert False except IndexError: pass def test_set_invalid_index_high(dbs): try: dbs[SIZE] = 1 assert False except IndexError: pass def test_reset(dbs): dbs[10] = 1 dbs.reset() assert not dbs[10] assert len(dbs) == SIZE def test_reset_index(dbs): dbs[10] = 1 dbs[10] = 0 assert not dbs[10] def test_reset_begin_end(dbs): dbs[10] = 1 dbs[15] = 1 dbs[12:17] = 0 assert dbs[10] assert not dbs[15] def test_reset_begin_end_invalid_step(dbs): try: dbs[12:17:22] = 0 assert False except ValueError: pass def test_reset_none_end(dbs): dbs[10] = 1 dbs[15] = 1 dbs[:12] = 0 assert not dbs[10] assert dbs[15] def test_resize(dbs): dbs.resize(20) assert len(dbs) == 20 dbs[8] = 1 dbs.resize(20) assert len(dbs) == 20 assert dbs[8] dbs.resize(70) assert len(dbs) == 70 assert dbs[8] assert dbs.count() == 1 def test_clear(dbs): dbs[10] = 1 dbs.clear() assert len(dbs) == 0 dbs.resize(20) assert len(dbs) == 20 assert not dbs[10] def test_count(dbs): dbs[10] = 1 assert dbs.count() == 1
14.898148
47
0.580485
0
0
0
0
57
0.035426
0
0
0
0
22df9e5579ccb8577b1f37196d5e862a47aa496e
1,026
py
Python
tests/basic/test_basic.py
kopp/python-astar
642dd4bcef9829776614dc0f12681ac94634a3bc
[ "BSD-3-Clause" ]
133
2017-05-05T03:40:13.000Z
2022-03-30T06:37:23.000Z
src/test/basic/basic.py
ReznicencuBogdan/python-astar
48d1caedd6e839c51315555f85ced567f7f166a7
[ "BSD-3-Clause" ]
6
2019-01-17T20:46:34.000Z
2021-12-23T22:59:57.000Z
src/test/basic/basic.py
ReznicencuBogdan/python-astar
48d1caedd6e839c51315555f85ced567f7f166a7
[ "BSD-3-Clause" ]
61
2017-03-17T14:05:34.000Z
2022-02-18T21:27:40.000Z
import unittest import astar class BasicTests(unittest.TestCase): def test_bestpath(self): """ensure that we take the shortest path, and not the path with less elements. the path with less elements is A -> B with a distance of 100 the shortest path is A -> C -> D -> B with a distance of 60 """ nodes = {'A': [('B', 100), ('C', 20)], 'C': [('D', 20)], 'D': [('B', 20)]} def neighbors(n): for n1, d in nodes[n]: yield n1 def distance(n1, n2): for n, d in nodes[n1]: if n == n2: return d def cost(n, goal): return 1 path = list(astar.find_path('A', 'B', neighbors_fnct=neighbors, heuristic_cost_estimate_fnct=cost, distance_between_fnct=distance)) self.assertEqual(4, len(path)) for i, n in enumerate('ACDB'): self.assertEqual(n, path[i]) if __name__ == '__main__': unittest.main()
30.176471
87
0.522417
946
0.922027
904
0.881092
0
0
0
0
276
0.269006
22e090fdaf1d3e3871f2d87d1370e0c27a711e78
2,623
py
Python
potions.py
abdza/skyrim_formulas
bf6be3c82715cfde89810d6e6183c95a55a4414c
[ "MIT" ]
null
null
null
potions.py
abdza/skyrim_formulas
bf6be3c82715cfde89810d6e6183c95a55a4414c
[ "MIT" ]
null
null
null
potions.py
abdza/skyrim_formulas
bf6be3c82715cfde89810d6e6183c95a55a4414c
[ "MIT" ]
null
null
null
#!/bin/env python3 import csv def intersect(list1,list2): list3 = [ value for value in list1 if value in list2] return list3 def category(list1,effects): cat = 'Good' good = 0 bad = 0 for ing in list1: if effects[ing]=='Good': good += 1 else: bad += 1 if bad==0: return 'Potion' elif good==0: return 'Poison' else: return 'Downside' effects = {} ingredients = {} print("Formulating formulas") with open('ingredients.csv') as csvfile: aff = csv.reader(csvfile, delimiter=',') for row in aff: if row[0] not in effects.keys(): effects[row[0]] = row[1] with open('skyrim-ingredients.csv', newline='') as csvfile: ingre = csv.reader(csvfile, delimiter=',') for row in ingre: if row[0] not in ingredients.keys(): ingredients[row[0]] = [row[1],row[2],row[3],row[4]] multieffects = {} for ce in effects: curing = [] for ing in ingredients: if ce in ingredients[ing]: curing.append(ing) for k,curi in enumerate(curing): for i in range(k+1,len(curing)): cureff = intersect(ingredients[curi],ingredients[curing[i]]) cureff.sort() if len(cureff)>1: if curi>curing[i]: curname = curing[i] + ':' + curi else: curname = curi + ':' + curing[i] multieffects[curname] = cureff finallist = {} for me in multieffects: curing = me.split(":") for ing in ingredients: if ing!=curing[0] and ing!=curing[1]: eff1 = intersect(ingredients[curing[0]],ingredients[ing]) eff2 = intersect(ingredients[curing[1]],ingredients[ing]) if len(eff1)>0 or len(eff2)>0: tmpname = [ val for val in curing ] tmpname.append(ing) tmpname.sort() finalname = ":".join(tmpname) finallist[finalname] = list(set(multieffects[me] + eff1 + eff2)) finallist[finalname].sort() with open('formulas.csv',mode='w') as formula_file: formula_writer = csv.writer(formula_file, delimiter=',') formula_writer.writerow(['Category','Ingredient 1','Ingredient 2','Ingredient 3','Effect 1','Effect 2','Effect 3','Effect 4','Effect 5']) for fl in finallist: formula_writer.writerow([category(finallist[fl],effects)] + fl.split(":") + finallist[fl]) for fl in multieffects: formula_writer.writerow([category(multieffects[fl],effects)] + fl.split(":") + [''] + multieffects[fl])
31.60241
141
0.569577
0
0
0
0
0
0
0
0
269
0.102554
22e1bc52c4e28d18d68ad09f117367db41946c7e
5,679
py
Python
src/clients/ctm_api_client/models/user_additional_properties.py
IceT-M/ctm-python-client
0ef1d8a3c9a27a01c088be1cdf5d177d25912bac
[ "BSD-3-Clause" ]
5
2021-12-01T18:40:00.000Z
2022-03-04T10:51:44.000Z
src/clients/ctm_api_client/models/user_additional_properties.py
IceT-M/ctm-python-client
0ef1d8a3c9a27a01c088be1cdf5d177d25912bac
[ "BSD-3-Clause" ]
3
2022-02-21T20:08:32.000Z
2022-03-16T17:41:03.000Z
src/clients/ctm_api_client/models/user_additional_properties.py
IceT-M/ctm-python-client
0ef1d8a3c9a27a01c088be1cdf5d177d25912bac
[ "BSD-3-Clause" ]
7
2021-12-01T11:59:16.000Z
2022-03-01T18:16:40.000Z
# coding: utf-8 """ Control-M Services Provides access to BMC Control-M Services # noqa: E501 OpenAPI spec version: 9.20.215 Contact: [email protected] Generated by: https://github.com/swagger-api/swagger-codegen.git """ import pprint import re # noqa: F401 import six from clients.ctm_api_client.configuration import Configuration class UserAdditionalProperties(object): """NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. """ """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { "member_of_groups": "list[str]", "authentication": "AuthenticationData", "is_external_user": "bool", } attribute_map = { "member_of_groups": "memberOfGroups", "authentication": "authentication", "is_external_user": "isExternalUser", } def __init__( self, member_of_groups=None, authentication=None, is_external_user=None, _configuration=None, ): # noqa: E501 """UserAdditionalProperties - a model defined in Swagger""" # noqa: E501 if _configuration is None: _configuration = Configuration() self._configuration = _configuration self._member_of_groups = None self._authentication = None self._is_external_user = None self.discriminator = None if member_of_groups is not None: self.member_of_groups = member_of_groups if authentication is not None: self.authentication = authentication if is_external_user is not None: self.is_external_user = is_external_user @property def member_of_groups(self): """Gets the member_of_groups of this UserAdditionalProperties. # noqa: E501 List of role names # noqa: E501 :return: The member_of_groups of this UserAdditionalProperties. # noqa: E501 :rtype: list[str] """ return self._member_of_groups @member_of_groups.setter def member_of_groups(self, member_of_groups): """Sets the member_of_groups of this UserAdditionalProperties. List of role names # noqa: E501 :param member_of_groups: The member_of_groups of this UserAdditionalProperties. # noqa: E501 :type: list[str] """ self._member_of_groups = member_of_groups @property def authentication(self): """Gets the authentication of this UserAdditionalProperties. # noqa: E501 user authentication # noqa: E501 :return: The authentication of this UserAdditionalProperties. # noqa: E501 :rtype: AuthenticationData """ return self._authentication @authentication.setter def authentication(self, authentication): """Sets the authentication of this UserAdditionalProperties. user authentication # noqa: E501 :param authentication: The authentication of this UserAdditionalProperties. # noqa: E501 :type: AuthenticationData """ self._authentication = authentication @property def is_external_user(self): """Gets the is_external_user of this UserAdditionalProperties. # noqa: E501 :return: The is_external_user of this UserAdditionalProperties. # noqa: E501 :rtype: bool """ return self._is_external_user @is_external_user.setter def is_external_user(self, is_external_user): """Sets the is_external_user of this UserAdditionalProperties. :param is_external_user: The is_external_user of this UserAdditionalProperties. # noqa: E501 :type: bool """ self._is_external_user = is_external_user def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list( map(lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value) ) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict( map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items(), ) ) else: result[attr] = value if issubclass(UserAdditionalProperties, dict): for key, value in self.items(): result[key] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, UserAdditionalProperties): return False return self.to_dict() == other.to_dict() def __ne__(self, other): """Returns true if both objects are not equal""" if not isinstance(other, UserAdditionalProperties): return True return self.to_dict() != other.to_dict()
30.207447
101
0.611727
5,308
0.934672
0
0
2,031
0.357633
0
0
2,543
0.44779
22e2114d0da96fc447264d248b0ab2d8a5d86656
3,469
py
Python
Tests/Methods/Mesh/Interpolation/test_interpolation.py
harshasunder-1/pyleecan
32ae60f98b314848eb9b385e3652d7fc50a77420
[ "Apache-2.0" ]
2
2020-08-28T14:54:55.000Z
2021-03-13T19:34:45.000Z
Tests/Methods/Mesh/Interpolation/test_interpolation.py
harshasunder-1/pyleecan
32ae60f98b314848eb9b385e3652d7fc50a77420
[ "Apache-2.0" ]
null
null
null
Tests/Methods/Mesh/Interpolation/test_interpolation.py
harshasunder-1/pyleecan
32ae60f98b314848eb9b385e3652d7fc50a77420
[ "Apache-2.0" ]
null
null
null
# -*- coding: utf-8 -*- import pytest import numpy as np from unittest import TestCase from pyleecan.Classes.CellMat import CellMat from pyleecan.Classes.MeshSolution import MeshSolution from pyleecan.Classes.PointMat import PointMat from pyleecan.Classes.MeshMat import MeshMat from pyleecan.Classes.ScalarProductL2 import ScalarProductL2 from pyleecan.Classes.Interpolation import Interpolation from pyleecan.Classes.RefSegmentP1 import RefSegmentP1 from pyleecan.Classes.FPGNSeg import FPGNSeg @pytest.mark.MeshSol class unittest_real_points(TestCase): """ Tests for interpolation method""" def test_line(self): DELTA = 1e-10 mesh = MeshMat() mesh.cell["line"] = CellMat(nb_pt_per_cell=2) mesh.point = PointMat() mesh.point.add_point(np.array([0, 0])) mesh.point.add_point(np.array([1, 0])) mesh.point.add_point(np.array([0, 1])) mesh.point.add_point(np.array([2, 3])) mesh.point.add_point(np.array([3, 3])) mesh.add_cell(np.array([0, 1]), "line") mesh.add_cell(np.array([0, 2]), "line") mesh.add_cell(np.array([1, 2]), "line") c_line = mesh.cell["line"] c_line.interpolation = Interpolation() c_line.interpolation.ref_cell = RefSegmentP1() c_line.interpolation.scalar_product = ScalarProductL2() c_line.interpolation.gauss_point = FPGNSeg() meshsol = MeshSolution() meshsol.mesh = [mesh] vert = mesh.get_vertice(0)["line"] test_pt = np.array([0.7, 0]) test_field = np.array([1, 1]) sol = [1] func = c_line.interpolation.ref_cell.interpolation(test_pt, vert, test_field) testA = np.sum(abs(func - sol)) msg = "Wrong result: returned " + str(func) + ", expected: " + str(test_field) self.assertAlmostEqual(testA, 0, msg=msg, delta=DELTA) vert = mesh.get_vertice(0)["line"] test_pt = np.array([0.7, 0]) test_field = np.ones( (2, 120, 3) ) # Simulate a 3D vector field for 120 time step func = c_line.interpolation.ref_cell.interpolation(test_pt, vert, test_field) sol = np.ones((120, 3)) testA = np.sum(abs(func - sol)) msg = "Wrong result: returned " + str(func) + ", expected: " + str(sol) self.assertAlmostEqual(testA, 0, msg=msg, delta=DELTA) vert = mesh.get_vertice(2)["line"] test_pt = np.array([0.6, 0.4]) test_field = np.zeros((2, 120, 3)) test_field[0, :] = np.ones( (1, 120, 3) ) # Simulate a 3D vector field for 120 time step func = c_line.interpolation.ref_cell.interpolation(test_pt, vert, test_field) sol = 0.6 * np.ones((120, 3)) testA = np.sum(abs(sol - func)) msg = "Wrong result: returned " + str(func) + ", expected: " + str(sol) self.assertAlmostEqual(testA, 0, msg=msg, delta=DELTA) vert = mesh.get_vertice(1)["line"] test_pt = np.array([0, 0.4]) test_field = np.zeros((2, 120, 3)) test_field[1, :] = np.ones( (1, 120, 3) ) # Simulate a 3D vector field for 120 time step func = c_line.interpolation.ref_cell.interpolation(test_pt, vert, test_field) sol = 0.4 * np.ones((120, 3)) testA = np.sum(abs(sol - func)) msg = "Wrong result: returned " + str(func) + ", expected: " + str(sol) self.assertAlmostEqual(testA, 0, msg=msg, delta=DELTA)
38.544444
86
0.618622
2,946
0.849236
0
0
2,967
0.85529
0
0
408
0.117613
22e2925cc3811ca52e0058f9e3c1868295f2875f
13,863
py
Python
lib/models.py
ecarg/grace
8c1540116c07648f7d8852ee5e9edff33b6ae2f6
[ "BSD-2-Clause" ]
7
2017-11-20T03:30:46.000Z
2021-06-10T15:33:07.000Z
lib/models.py
ecarg/grace
8c1540116c07648f7d8852ee5e9edff33b6ae2f6
[ "BSD-2-Clause" ]
47
2017-09-08T07:02:42.000Z
2017-11-04T13:50:50.000Z
lib/models.py
ecarg/grace
8c1540116c07648f7d8852ee5e9edff33b6ae2f6
[ "BSD-2-Clause" ]
2
2018-10-19T05:05:23.000Z
2019-10-31T06:27:24.000Z
# -*- coding: utf-8 -*- """ Pytorch models __author__ = 'Jamie ([email protected])' __copyright__ = 'No copyright. Just copyleft!' """ # pylint: disable=no-member # pylint: disable=invalid-name ########### # imports # ########### import torch import torch.nn as nn from embedder import Embedder from pos_models import PosTagger, FnnTagger, CnnTagger # pylint: disable=unused-import ############# # Ner Class # ############# class Ner(nn.Module): """ named entity recognizer pytorch model """ def __init__(self, embedder, encoder, decoder): """ * embedder (Embedder) [sentence_len, context_len] => [sentence_len, context_len, embed_dim] * encoder (nn.Module) [sentence_len, context_len, embed_dim] => [sentence_len, hidden_dim] * decoder (nn.Module) [sentence_len, hidden_dim] => [sentence_len, n_tags], """ super().__init__() self.embedder = embedder self.encoder = encoder self.decoder = decoder assert isinstance(embedder, Embedder) assert isinstance(encoder, nn.Module) assert isinstance(decoder, nn.Module) def forward(self, sentence, gazet, pos, words): #pylint: disable=arguments-differ # [sentence_len, context_len] => [sentence_len, context_len, embed_dim] sentence_embed = self.embedder(sentence, gazet, pos, words) # [sentence_len, context_len, embed_dim] => [sentence_len, hidden_dim] hidden = self.encoder(sentence_embed) # [sentence_len, hidden_dim] => [sentence_len, n_tags] predicted_tags = self.decoder(hidden) return predicted_tags def save(self, path): """ 모델을 저장하는 메소드 :param path: 경로 """ if torch.cuda.is_available(): self.cpu() torch.save(self, str(path)) if torch.cuda.is_available(): self.cuda() @classmethod def load(cls, path): """ 저장된 모델을 로드하는 메소드 :param path: 경로 :return: 모델 클래스 객체 """ model = torch.load(str(path)) if torch.cuda.is_available(): model.cuda() return model ################# # Encoder Class # ################# class Fnn5(nn.Module): """ 2-Layer Full-Connected Neural Networks """ def __init__(self, context_len=21, in_dim=50, hidden_dim=500): super(Fnn5, self).__init__() self.context_len = context_len self.hidden_dim = hidden_dim self.out_dim = hidden_dim self.net = nn.Sequential( nn.Linear(context_len*in_dim, hidden_dim), ) def forward(self, x):#pylint: disable=arguments-differ """ Args: x: [sentence_len, context_len, in_dim] Return: x: [sentence_len, out_dim] """ sentence_len = x.size(0) x = x.view(sentence_len, -1) # [sentence_len, context_len x in_dim] x = self.net(x) # [setence_len, out_dim] return x class Cnn7(nn.Module): """ ConvNet kernels=[2,3,4,5] + Fully-Connected """ def __init__(self, in_dim=50, hidden_dim=500): """ """ super(Cnn7, self).__init__() self.in_dim = in_dim self.hidden_dim = hidden_dim self.out_dim = in_dim * 4 self.conv2 = nn.Sequential( nn.Conv1d(in_dim, in_dim, kernel_size=2), # 20 nn.ReLU(), nn.MaxPool1d(kernel_size=2, ceil_mode=True), # 10 nn.Conv1d(in_dim, in_dim, kernel_size=2), # 9 nn.ReLU(), nn.MaxPool1d(kernel_size=2, ceil_mode=True), # 5 nn.Conv1d(in_dim, in_dim, kernel_size=2), # 4 nn.ReLU(), nn.MaxPool1d(kernel_size=2, ceil_mode=True), # 2 nn.Conv1d(in_dim, in_dim, kernel_size=2), # 1 ) self.conv3 = nn.Sequential( nn.Conv1d(in_dim, in_dim, kernel_size=3, padding=1), # 21 nn.ReLU(), nn.MaxPool1d(kernel_size=2, ceil_mode=True), # 11 nn.Conv1d(in_dim, in_dim, kernel_size=3, padding=1), # 11 nn.ReLU(), nn.MaxPool1d(kernel_size=2, ceil_mode=True), # 6 nn.Conv1d(in_dim, in_dim, kernel_size=3, padding=1), # 6 nn.ReLU(), nn.MaxPool1d(kernel_size=2, ceil_mode=True), # 3 nn.Conv1d(in_dim, in_dim, kernel_size=3), # 1 ) self.conv4 = nn.Sequential( nn.Conv1d(in_dim, in_dim, kernel_size=4, padding=1), # 20 nn.ReLU(), nn.MaxPool1d(kernel_size=2, ceil_mode=True), # 10 nn.Conv1d(in_dim, in_dim, kernel_size=4, padding=1), # 9 nn.ReLU(), nn.MaxPool1d(kernel_size=2, ceil_mode=True), # 5 nn.Conv1d(in_dim, in_dim, kernel_size=4, padding=1), # 4 nn.ReLU(), nn.Conv1d(in_dim, in_dim, kernel_size=4), # 1 ) self.conv5 = nn.Sequential( nn.Conv1d(in_dim, in_dim, kernel_size=5, padding=2), # 21 nn.ReLU(), nn.MaxPool1d(kernel_size=2, ceil_mode=True), # 11 nn.Conv1d(in_dim, in_dim, kernel_size=5, padding=2), # 11 nn.ReLU(), nn.MaxPool1d(kernel_size=2, ceil_mode=True), # 6 nn.Conv1d(in_dim, in_dim, kernel_size=5, padding=2), # 6 nn.ReLU(), nn.MaxPool1d(kernel_size=2, ceil_mode=True), # 3 nn.Conv1d(in_dim, in_dim, kernel_size=5, padding=1), # 1 ) def forward(self, x): #pylint: disable=arguments-differ """ Args: x: [sentence_length, context_len, in_dim] Return: x: [sentence_length, in_dim * 4] """ # [sentence_length, in_dim, context_len] x = x.transpose(1, 2) conv2 = self.conv2(x).squeeze(-1) # [sentence_len, in_dim] conv3 = self.conv3(x).squeeze(-1) # [sentence_len, in_dim] conv4 = self.conv4(x).squeeze(-1) # [sentence_len, in_dim] conv5 = self.conv5(x).squeeze(-1) # [sentence_len, in_dim] # [sentence_len, in_dim * 4] out = torch.cat([conv2, conv3, conv4, conv5], dim=1) return out class Cnn8(nn.Module): """ 9-layer Conv NN + Batch Norm + Residual """ def __init__(self, context_len=21, in_dim=64, hidden_dim=None): super(Cnn8, self).__init__() self.context_len = context_len # conv block 64 self.conv_block1_1 = self.conv_block(in_dim, 2, False) self.conv_block1_2_1 = self.conv_block(in_dim, 1, False) self.conv_block1_2_2 = self.conv_block(in_dim, 1, True) self.pool1 = nn.MaxPool1d(kernel_size=2, padding=1, ceil_mode=True) # conv block 128 self.conv_block2_1 = self.conv_block(in_dim*2, 2, False) self.conv_block2_2_1 = self.conv_block(in_dim*2, 1, False) self.conv_block2_2_2 = self.conv_block(in_dim*2, 1, True) self.pool2 = nn.MaxPool1d(kernel_size=2, padding=1, ceil_mode=True) # conv block 256 self.conv_block3_1 = self.conv_block(in_dim*4, 2, False) self.conv_block3_2_1 = self.conv_block(in_dim*4, 1, False) self.conv_block3_2_2 = self.conv_block(in_dim*4, 1, True) self.pool3 = nn.MaxPool1d(kernel_size=2) # conv block 512 self.conv_block4_1 = self.conv_block(in_dim*8, 2, False) self.conv_block4_2_1 = self.conv_block(in_dim*8, 1, False) self.conv_block4_2_2 = self.conv_block(in_dim*8, 1, True) self.pool4 = nn.MaxPool1d(kernel_size=3) self.out_dim = in_dim*16 @classmethod def conv_block(cls, in_dim=64, depth=2, double=True): """ Args: [batch_size, dim, length] Return: [batch_size, dim*2, length] if double=True [batch_size, dim, length] if double=False """ out_dim = in_dim layers = [] for i in range(depth): if double: if i == depth - 1: out_dim = in_dim * 2 layers.append(nn.Conv1d(in_dim, out_dim, kernel_size=3, padding=1)) layers.append(nn.BatchNorm1d(out_dim)) layers.append(nn.ReLU()) return nn.Sequential(*layers) def forward(self, sentence):#pylint: disable=arguments-differ """ Args: sentence: [sentence_len, context_len, embed_dim] Return: logit: [batch_size, out_dim] """ # [sentence_len, embed_dim, context_len] x = sentence.transpose(1, 2) # conv block 64 x = self.conv_block1_1(x) + x # [batch, in_dim, 21] x = self.conv_block1_2_1(x) + x # [batch, in_dim, 21] x = self.conv_block1_2_2(x) # [batch, in_dim*2, 21] x = self.pool1(x) # [batch, in_dim*2, 11] # conv block 128 x = self.conv_block2_1(x) + x # [batch, in_dim*2, 11] x = self.conv_block2_2_1(x) + x # [batch, in_dim*2, 11] x = self.conv_block2_2_2(x) # [batch, in_dim*4, 11] x = self.pool2(x) # [batch, in_dim*4, 6] # conv block 256 x = self.conv_block3_1(x) + x # [batch, in_dim*4, 6] x = self.conv_block3_2_1(x) + x # [batch, in_dim*4, 6] x = self.conv_block3_2_2(x) # [batch, in_dim*8, 6] x = self.pool3(x) # [batch, in_dim*8, 3] # conv block 512 x = self.conv_block4_1(x) + x # [batch, in_dim*8, 3] x = self.conv_block4_2_1(x) + x # [batch, in_dim*8, 3] x = self.conv_block4_2_2(x) # [batch, in_dim*16, 3] x = self.pool4(x) # [batch_size, in_dim*16, 1] x = x.squeeze(-1) # [batch, in_dim*16] return x class RnnEncoder(nn.Module): """ RNN Encoder Module """ def __init__(self, context_len=21, in_dim=1024, out_dim=1024, num_layers=2, cell='gru'): super(RnnEncoder, self).__init__() self.hidden_dim = out_dim // 2 if cell == 'gru': self.rnn = nn.GRU( input_size=in_dim, hidden_size=self.hidden_dim, num_layers=num_layers, dropout=0.5, bidirectional=True) if cell == 'lstm': self.rnn = nn.LSTM( input_size=in_dim, hidden_size=self.hidden_dim, num_layers=num_layers, dropout=0.5, bidirectional=True) elif cell == 'sru': from sru import SRU self.rnn = SRU( input_size=in_dim, hidden_size=self.hidden_dim, num_layers=num_layers, dropout=0.5, bidirectional=True) def forward(self, x):#pylint: disable=arguments-differ """ Args: x: [sentence_len, context_len, input_size] Return: x: [sentence_len, hidden_size] """ # input (seq_len, batch, input_size) # h_0 (num_layers * num_directions, batch, hidden_size) # output (seq_len, batch, hidden_size * num_directions) # h_n (num_layers * num_directions, batch, hidden_size) # [sequence_len, context_len, input_size] # =>[sentence_len, context_len, hidden_size x 2] x, _ = self.rnn(x) # [sequence_len, hidden_size x 2] x = x[:, 10, :] return x ################# # Decoder Class # ################# class FCDecoder(nn.Module): """ Fully-Connected Decoder """ def __init__(self, in_dim, hidden_dim, n_tags): super(FCDecoder, self).__init__() self.net = nn.Sequential( nn.ReLU(), nn.Dropout(), nn.Linear(in_dim, n_tags) ) def forward(self, x):#pylint: disable=arguments-differ """ [sentence_len, in_dim] => [sentence_len, n_tags] """ return self.net(x) class RnnDecoder(nn.Module): """ RNN-based Decoder """ def __init__(self, in_dim=1024, hidden_dim=512, n_tags=11, num_layers=2, cell='gru'): super(RnnDecoder, self).__init__() if cell == 'gru': self.rnn = nn.GRU( input_size=in_dim, hidden_size=hidden_dim, num_layers=num_layers, dropout=0.5, bidirectional=True) if cell == 'lstm': self.rnn = nn.LSTM( input_size=in_dim, hidden_size=hidden_dim, num_layers=num_layers, dropout=0.5, bidirectional=True) elif cell == 'sru': from sru import SRU self.rnn = SRU( input_size=in_dim, hidden_size=hidden_dim, num_layers=num_layers, dropout=0.5, bidirectional=True) self.out = nn.Sequential( nn.ReLU(), nn.Dropout(), nn.Linear(hidden_dim * 2, n_tags) ) def forward(self, x):#pylint: disable=arguments-differ """ [sentence_len, in_dim] => [sentence_len, n_tags] """ # input (seq_len, batch, input_size) # h_0 (num_layers * num_directions, batch, hidden_size) # output (seq_len, batch, hidden_size * num_directions) # h_n (num_layers * num_directions, batch, hidden_size) # [sentence_len, batch=1, input_size] x = x.unsqueeze(1) # x: [sentence_len, batch=1, hidden_size x 2] # h_n: [num_layers * 2, batch=1, hidden_size] # c_n: [num_layers * 2, batch=1, hidden_size] x, _ = self.rnn(x) # [sequence_len, hidden_size x 2] x = x.squeeze(1) # [sequence_len, n_tags] x = self.out(x) return x
31.506818
89
0.549592
13,370
0.95973
0
0
958
0.068767
0
0
4,289
0.307875
22e519a060ec94aa77f57c2992012dba58e6efff
221
py
Python
pyseqlogo/__init__.py
BioGeek/pyseqlogo
e41d9645c7a9fa5baf3deab281acf40ea5357f64
[ "MIT" ]
24
2017-10-23T16:06:18.000Z
2022-03-04T14:09:25.000Z
pyseqlogo/__init__.py
BioGeek/pyseqlogo
e41d9645c7a9fa5baf3deab281acf40ea5357f64
[ "MIT" ]
7
2020-11-19T13:55:54.000Z
2021-11-30T03:16:33.000Z
pyseqlogo/__init__.py
BioGeek/pyseqlogo
e41d9645c7a9fa5baf3deab281acf40ea5357f64
[ "MIT" ]
16
2018-02-01T16:12:07.000Z
2021-09-28T03:53:11.000Z
# -*- coding: utf-8 -*- """Top-level package for pyseqlogo.""" __author__ = """Saket Choudhary""" __email__ = '[email protected]' __version__ = '0.1.0' from .pyseqlogo import draw_logo from .pyseqlogo import setup_axis
22.1
38
0.705882
0
0
0
0
0
0
0
0
108
0.488688
22e5c3b42de15feed5e29aa272f135d23d064ab1
1,274
py
Python
setup.py
edulix/apscheduler
8030e0fc7e1845a15861e649988cc73a1aa624ec
[ "MIT" ]
null
null
null
setup.py
edulix/apscheduler
8030e0fc7e1845a15861e649988cc73a1aa624ec
[ "MIT" ]
null
null
null
setup.py
edulix/apscheduler
8030e0fc7e1845a15861e649988cc73a1aa624ec
[ "MIT" ]
null
null
null
# coding: utf-8 import os.path try: from setuptools import setup extras = dict(zip_safe=False, test_suite='nose.collector', tests_require=['nose']) except ImportError: from distutils.core import setup extras = {} import apscheduler here = os.path.dirname(__file__) readme_path = os.path.join(here, 'README.rst') readme = open(readme_path).read() setup( name='APScheduler', version=apscheduler.release, description='In-process task scheduler with Cron-like capabilities', long_description=readme, author='Alex Gronholm', author_email='[email protected]', url='http://pypi.python.org/pypi/APScheduler/', classifiers=[ 'Development Status :: 5 - Production/Stable', 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Programming Language :: Python', 'Programming Language :: Python :: 2.6', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.2', 'Programming Language :: Python :: 3.3' ], keywords='scheduling cron', license='MIT', packages=('apscheduler', 'apscheduler.jobstores', 'apscheduler.triggers', 'apscheduler.triggers.cron'), )
31.073171
107
0.663265
0
0
0
0
0
0
0
0
648
0.508634
22e6c10685bc8e3a610b18ebd720a7487a124de6
9,576
py
Python
object_detection/exporter_test.py
travisyates81/object-detection
931bebfa54798c08d2c401e9c1bad39015d8c832
[ "MIT" ]
1
2019-09-19T18:24:55.000Z
2019-09-19T18:24:55.000Z
object_detection/exporter_test.py
travisyates81/object-detection
931bebfa54798c08d2c401e9c1bad39015d8c832
[ "MIT" ]
null
null
null
object_detection/exporter_test.py
travisyates81/object-detection
931bebfa54798c08d2c401e9c1bad39015d8c832
[ "MIT" ]
null
null
null
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Travis Yates """Tests for object_detection.export_inference_graph.""" import os import mock import numpy as np import tensorflow as tf from object_detection import exporter from object_detection.builders import model_builder from object_detection.core import model from object_detection.protos import pipeline_pb2 class FakeModel(model.DetectionModel): def preprocess(self, inputs): return (tf.identity(inputs) * tf.get_variable('dummy', shape=(), initializer=tf.constant_initializer(2), dtype=tf.float32)) def predict(self, preprocessed_inputs): return {'image': tf.identity(preprocessed_inputs)} def postprocess(self, prediction_dict): with tf.control_dependencies(prediction_dict.values()): return { 'detection_boxes': tf.constant([[0.0, 0.0, 0.5, 0.5], [0.5, 0.5, 0.8, 0.8]], tf.float32), 'detection_scores': tf.constant([[0.7, 0.6]], tf.float32), 'detection_classes': tf.constant([[0, 1]], tf.float32), 'num_detections': tf.constant([2], tf.float32) } def restore_fn(self, checkpoint_path, from_detection_checkpoint): pass def loss(self, prediction_dict): pass class ExportInferenceGraphTest(tf.test.TestCase): def _save_checkpoint_from_mock_model(self, checkpoint_path, use_moving_averages): g = tf.Graph() with g.as_default(): mock_model = FakeModel(num_classes=1) mock_model.preprocess(tf.constant([1, 3, 4, 3], tf.float32)) if use_moving_averages: tf.train.ExponentialMovingAverage(0.0).apply() saver = tf.train.Saver() init = tf.global_variables_initializer() with self.test_session() as sess: sess.run(init) saver.save(sess, checkpoint_path) def _load_inference_graph(self, inference_graph_path): od_graph = tf.Graph() with od_graph.as_default(): od_graph_def = tf.GraphDef() with tf.gfile.GFile(inference_graph_path) as fid: serialized_graph = fid.read() od_graph_def.ParseFromString(serialized_graph) tf.import_graph_def(od_graph_def, name='') return od_graph def _create_tf_example(self, image_array): with self.test_session(): encoded_image = tf.image.encode_jpeg(tf.constant(image_array)).eval() def _bytes_feature(value): return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value])) example = tf.train.Example(features=tf.train.Features(feature={ 'image/encoded': _bytes_feature(encoded_image), 'image/format': _bytes_feature('jpg'), 'image/source_id': _bytes_feature('image_id') })).SerializeToString() return example def test_export_graph_with_image_tensor_input(self): with mock.patch.object( model_builder, 'build', autospec=True) as mock_builder: mock_builder.return_value = FakeModel(num_classes=1) inference_graph_path = os.path.join(self.get_temp_dir(), 'exported_graph.pbtxt') pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() pipeline_config.eval_config.use_moving_averages = False exporter.export_inference_graph( input_type='image_tensor', pipeline_config=pipeline_config, checkpoint_path=None, inference_graph_path=inference_graph_path) def test_export_graph_with_tf_example_input(self): with mock.patch.object( model_builder, 'build', autospec=True) as mock_builder: mock_builder.return_value = FakeModel(num_classes=1) inference_graph_path = os.path.join(self.get_temp_dir(), 'exported_graph.pbtxt') pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() pipeline_config.eval_config.use_moving_averages = False exporter.export_inference_graph( input_type='tf_example', pipeline_config=pipeline_config, checkpoint_path=None, inference_graph_path=inference_graph_path) def test_export_frozen_graph(self): checkpoint_path = os.path.join(self.get_temp_dir(), 'model-ckpt') self._save_checkpoint_from_mock_model(checkpoint_path, use_moving_averages=False) inference_graph_path = os.path.join(self.get_temp_dir(), 'exported_graph.pb') with mock.patch.object( model_builder, 'build', autospec=True) as mock_builder: mock_builder.return_value = FakeModel(num_classes=1) pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() pipeline_config.eval_config.use_moving_averages = False exporter.export_inference_graph( input_type='image_tensor', pipeline_config=pipeline_config, checkpoint_path=checkpoint_path, inference_graph_path=inference_graph_path) def test_export_frozen_graph_with_moving_averages(self): checkpoint_path = os.path.join(self.get_temp_dir(), 'model-ckpt') self._save_checkpoint_from_mock_model(checkpoint_path, use_moving_averages=True) inference_graph_path = os.path.join(self.get_temp_dir(), 'exported_graph.pb') with mock.patch.object( model_builder, 'build', autospec=True) as mock_builder: mock_builder.return_value = FakeModel(num_classes=1) pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() pipeline_config.eval_config.use_moving_averages = True exporter.export_inference_graph( input_type='image_tensor', pipeline_config=pipeline_config, checkpoint_path=checkpoint_path, inference_graph_path=inference_graph_path) def test_export_and_run_inference_with_image_tensor(self): checkpoint_path = os.path.join(self.get_temp_dir(), 'model-ckpt') self._save_checkpoint_from_mock_model(checkpoint_path, use_moving_averages=False) inference_graph_path = os.path.join(self.get_temp_dir(), 'exported_graph.pb') with mock.patch.object( model_builder, 'build', autospec=True) as mock_builder: mock_builder.return_value = FakeModel(num_classes=1) pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() pipeline_config.eval_config.use_moving_averages = False exporter.export_inference_graph( input_type='image_tensor', pipeline_config=pipeline_config, checkpoint_path=checkpoint_path, inference_graph_path=inference_graph_path) inference_graph = self._load_inference_graph(inference_graph_path) with self.test_session(graph=inference_graph) as sess: image_tensor = inference_graph.get_tensor_by_name('image_tensor:0') boxes = inference_graph.get_tensor_by_name('detection_boxes:0') scores = inference_graph.get_tensor_by_name('detection_scores:0') classes = inference_graph.get_tensor_by_name('detection_classes:0') num_detections = inference_graph.get_tensor_by_name('num_detections:0') (boxes, scores, classes, num_detections) = sess.run( [boxes, scores, classes, num_detections], feed_dict={image_tensor: np.ones((1, 4, 4, 3)).astype(np.uint8)}) self.assertAllClose(boxes, [[0.0, 0.0, 0.5, 0.5], [0.5, 0.5, 0.8, 0.8]]) self.assertAllClose(scores, [[0.7, 0.6]]) self.assertAllClose(classes, [[1, 2]]) self.assertAllClose(num_detections, [2]) def test_export_and_run_inference_with_tf_example(self): checkpoint_path = os.path.join(self.get_temp_dir(), 'model-ckpt') self._save_checkpoint_from_mock_model(checkpoint_path, use_moving_averages=False) inference_graph_path = os.path.join(self.get_temp_dir(), 'exported_graph.pb') with mock.patch.object( model_builder, 'build', autospec=True) as mock_builder: mock_builder.return_value = FakeModel(num_classes=1) pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() pipeline_config.eval_config.use_moving_averages = False exporter.export_inference_graph( input_type='tf_example', pipeline_config=pipeline_config, checkpoint_path=checkpoint_path, inference_graph_path=inference_graph_path) inference_graph = self._load_inference_graph(inference_graph_path) with self.test_session(graph=inference_graph) as sess: tf_example = inference_graph.get_tensor_by_name('tf_example:0') boxes = inference_graph.get_tensor_by_name('detection_boxes:0') scores = inference_graph.get_tensor_by_name('detection_scores:0') classes = inference_graph.get_tensor_by_name('detection_classes:0') num_detections = inference_graph.get_tensor_by_name('num_detections:0') (boxes, scores, classes, num_detections) = sess.run( [boxes, scores, classes, num_detections], feed_dict={tf_example: self._create_tf_example( np.ones((4, 4, 3)).astype(np.uint8))}) self.assertAllClose(boxes, [[0.0, 0.0, 0.5, 0.5], [0.5, 0.5, 0.8, 0.8]]) self.assertAllClose(scores, [[0.7, 0.6]]) self.assertAllClose(classes, [[1, 2]]) self.assertAllClose(num_detections, [2]) if __name__ == '__main__': tf.test.main()
44.539535
77
0.680138
9,143
0.954783
0
0
0
0
0
0
765
0.079887
22e9a24e177b5cc9ead771b6359f5209ebe42377
543
py
Python
run.py
matthewyoung28/macmentum
af1a26903e25b4a4f278388d7be1e638e071c0a8
[ "MIT" ]
null
null
null
run.py
matthewyoung28/macmentum
af1a26903e25b4a4f278388d7be1e638e071c0a8
[ "MIT" ]
null
null
null
run.py
matthewyoung28/macmentum
af1a26903e25b4a4f278388d7be1e638e071c0a8
[ "MIT" ]
null
null
null
import os import sys import random def get_next_wallpaper(curr_path): lst_dir = os.listdir() rand_index = random.randint(0, len(lst_dir) - 1) return lst_dir[rand_index] def get_wall_dir(): return "/Users/MYOUNG/Pictures/mmt" def main(): script = "osascript -e 'tell application \"Finder\" to set desktop picture to POSIX file '" path = get_wall_dir() file = get_next_wallpaper(path) # print("FILE = ", file) script = script + path + "/" + file # print("SCRIPT = ", script) os.system(script) main()
18.724138
93
0.662983
0
0
0
0
0
0
0
0
167
0.307551
22eae5e579a412e845c5851038ebc3ce5e3c9735
2,099
py
Python
noxfile.py
dolfno/mlops_demo
52a04525f1655a32d45002384a972a1920fd517a
[ "MIT" ]
null
null
null
noxfile.py
dolfno/mlops_demo
52a04525f1655a32d45002384a972a1920fd517a
[ "MIT" ]
null
null
null
noxfile.py
dolfno/mlops_demo
52a04525f1655a32d45002384a972a1920fd517a
[ "MIT" ]
null
null
null
"""Automated CI tools to run with Nox""" import nox from nox import Session locations = "src", "noxfile.py", "docs/conf.py" nox.options.sessions = "lint", "tests" @nox.session(python="3.9") def tests(session: Session) -> None: """Run tests with nox""" session.run("poetry", "install", external=True) session.run("pytest", "--cov") @nox.session(python="3.9") def lint(session: Session) -> None: """Run linting with nox""" session.install( "flake8", "flake8-annotations", "flake8-bandit", "flake8-black", "flake8-bugbear", "flake8-docstrings", "flake8-import-order", ) args = session.posargs or locations session.run("flake8", *args) @nox.session(python="3.9") def black(session: Session) -> None: """Run black with nox""" session.install("black") args = session.posargs or locations session.run("black", *args, "--line-length=120") @nox.session(python="3.9") def pytype(session: Session) -> None: """Run the static type checker.""" args = session.posargs or ["--disable=import-error", *locations] session.install("pytype") session.run("pytype", *args) package = "hypermodern_python" @nox.session(python=["3.9"]) def typeguard(session: Session) -> None: """Run typeguard for type checking with nox""" args = session.posargs or ["-m", "not e2e"] session.run("poetry", "install", "--no-dev", external=True) session.install("pytest", "pytest-mock", "typeguard") session.run("pytest", f"--typeguard-packages={package}", *args) @nox.session(python="3.9") def docs(session: Session) -> None: """Build the documentation.""" session.run("poetry", "install", "--no-dev", external=True) session.install("sphinx", "sphinx-autodoc-typehints") session.run("sphinx-build", "docs", "docs/_build") @nox.session(python="3.9") def coverage(session: Session) -> None: """Upload coverage data.""" session.install("coverage[toml]", "codecov") session.run("coverage", "xml", "--fail-under=0") session.run("codecov", *session.posargs)
28.364865
68
0.636494
0
0
0
0
1,881
0.896141
0
0
848
0.404002
22ecf4bdf03fca4f671513bb4a4ebe6ea6f1152b
225
py
Python
cocotb_test/run.py
canerbulduk/cocotb-test
ece092446a1e5de932db12dfb60441d6f322d5f1
[ "BSD-2-Clause" ]
null
null
null
cocotb_test/run.py
canerbulduk/cocotb-test
ece092446a1e5de932db12dfb60441d6f322d5f1
[ "BSD-2-Clause" ]
null
null
null
cocotb_test/run.py
canerbulduk/cocotb-test
ece092446a1e5de932db12dfb60441d6f322d5f1
[ "BSD-2-Clause" ]
null
null
null
import cocotb_test.simulator # For partial back compatibility def run(simulator=None, **kwargs): if simulator: sim = simulator(**kwargs) sim.run() else: cocotb_test.simulator.run(**kwargs)
17.307692
43
0.648889
0
0
0
0
0
0
0
0
32
0.142222
22ed999c1f1e8e891adad2dd4f4e9520b2e7dd4f
267
py
Python
kanban_backend/project_management/apps.py
hamzabouissi/kanban_backend
549d8c2711313011f3186b5b3a3ac969481df3f7
[ "MIT" ]
null
null
null
kanban_backend/project_management/apps.py
hamzabouissi/kanban_backend
549d8c2711313011f3186b5b3a3ac969481df3f7
[ "MIT" ]
null
null
null
kanban_backend/project_management/apps.py
hamzabouissi/kanban_backend
549d8c2711313011f3186b5b3a3ac969481df3f7
[ "MIT" ]
null
null
null
from django.apps import AppConfig class ProjectManagementConfig(AppConfig): name = 'kanban_backend.project_management' def ready(self): try: import kanban_backend.users.signals # noqa F401 except ImportError: pass
20.538462
60
0.666667
229
0.857678
0
0
0
0
0
0
46
0.172285
22eecf1d05ffdd487202a1266800927ab92af76d
1,098
py
Python
src/framework/tracing.py
davidhozic/Discord-Shiller
ff22bb1ceb7b4128ee0d27f3c9c9dd0a5279feb9
[ "MIT" ]
12
2022-02-20T20:50:24.000Z
2022-03-24T17:15:15.000Z
src/framework/tracing.py
davidhozic/Discord-Shiller
ff22bb1ceb7b4128ee0d27f3c9c9dd0a5279feb9
[ "MIT" ]
3
2022-02-21T15:17:43.000Z
2022-03-17T22:36:23.000Z
src/framework/tracing.py
davidhozic/discord-advertisement-framework
ff22bb1ceb7b4128ee0d27f3c9c9dd0a5279feb9
[ "MIT" ]
1
2022-03-31T01:04:01.000Z
2022-03-31T01:04:01.000Z
""" ~ Tracing ~ This modules containes functions and classes related to the console debug long or trace. """ from enum import Enum, auto import time __all__ = ( "TraceLEVELS", "trace" ) m_use_debug = None class TraceLEVELS(Enum): """ Info: Level of trace for debug """ NORMAL = 0 WARNING = auto() ERROR = auto() def trace(message: str, level: TraceLEVELS = TraceLEVELS.NORMAL): """" Name : trace Param: - message : str = Trace message - level : TraceLEVELS = Level of the trace """ if m_use_debug: timestruct = time.localtime() timestamp = "Date: {:02d}.{:02d}.{:04d} Time:{:02d}:{:02d}" timestamp = timestamp.format(timestruct.tm_mday, timestruct.tm_mon, timestruct.tm_year, timestruct.tm_hour, timestruct.tm_min) l_trace = f"{timestamp}\nTrace level: {level.name}\nMessage: {message}\n" print(l_trace)
25.534884
81
0.528233
131
0.119308
0
0
0
0
0
0
435
0.396175
22f2bda6c50ac4fe1d32522345090972ebb7ad66
728
py
Python
sunkit_image/__init__.py
jeffreypaul15/sunkit-image
0987db8fcd38c79a83d7d890e407204e63a05c4f
[ "BSD-2-Clause-NetBSD", "BSD-2-Clause" ]
null
null
null
sunkit_image/__init__.py
jeffreypaul15/sunkit-image
0987db8fcd38c79a83d7d890e407204e63a05c4f
[ "BSD-2-Clause-NetBSD", "BSD-2-Clause" ]
null
null
null
sunkit_image/__init__.py
jeffreypaul15/sunkit-image
0987db8fcd38c79a83d7d890e407204e63a05c4f
[ "BSD-2-Clause-NetBSD", "BSD-2-Clause" ]
null
null
null
""" sunkit-image ============ A image processing toolbox for Solar Physics. * Homepage: https://sunpy.org * Documentation: https://sunkit-image.readthedocs.io/en/latest/ """ import sys from .version import version as __version__ # NOQA # Enforce Python version check during package import. __minimum_python_version__ = "3.7" class UnsupportedPythonError(Exception): """ Running on an unsupported version of Python. """ if sys.version_info < tuple(int(val) for val in __minimum_python_version__.split(".")): # This has to be .format to keep backwards compatibly. raise UnsupportedPythonError( "sunkit_image does not support Python < {}".format(__minimum_python_version__) ) __all__ = []
23.483871
87
0.717033
105
0.144231
0
0
0
0
0
0
399
0.548077
22f32d963c063df45b4e85b0c4f01e4ea1ea6369
26,004
py
Python
app/view.py
lucasblazzi/stocker
52cdec481ed84a09d97369ee4da229e169f99f51
[ "MIT" ]
null
null
null
app/view.py
lucasblazzi/stocker
52cdec481ed84a09d97369ee4da229e169f99f51
[ "MIT" ]
null
null
null
app/view.py
lucasblazzi/stocker
52cdec481ed84a09d97369ee4da229e169f99f51
[ "MIT" ]
null
null
null
import plotly.graph_objects as go import plotly.express as px import pandas as pd class View: def __init__(self, st): self.st = st self.st.set_page_config(layout='wide') self.side_bar = st.sidebar def show_message(self, location, _type, message): if location == "sb": component = self.side_bar else: component = self.st if _type == "success": component.success(message) elif _type == "error": component.error(message) elif _type == "warning": component.warning(message) elif _type == "info": component.info(message) def login(self): _user = self.side_bar.text_input("Username:") _pass = self.side_bar.text_input("Password", type="password") return _user, _pass def advisor_setup(self): option = self.side_bar.selectbox("Options:", ("Research", )) if option == "Research": self.st.header("Advisor Research Area") self.st.markdown("___") return option def research_area(self): execute = False args = {"price": {"enabled": False}, "sector": {"enabled": False}, "news": {"enabled": False}, "company_info": {"enabled": False}, "volatility": {"enabled": False}, "return": {"enabled": False}, "raw_price": {"enabled": False}, "volume": {"enabled": False}} self.st.markdown("___") check_cols = self.st.beta_columns(4) args["price"]["enabled"] = check_cols[0].checkbox("Price") args["company_info"]["enabled"] = check_cols[1].checkbox("Company Information") args["sector"]["enabled"] = check_cols[2].checkbox("Sector Distribution") args["news"]["enabled"] = check_cols[3].checkbox("News") if args["price"]["enabled"]: self.st.markdown("___") self.st.subheader("Price Insights") price_cols = self.st.beta_columns(7) args["price"]["_type"] = price_cols[0].selectbox("Price type:", ("close", "open", "high", "low")) args["price"]["period"] = price_cols[1].selectbox("Period:", ("ytd", "1m", "6m", "1y", "2y", "5y", "max")) args["raw_price"]["enabled"] = price_cols[3].checkbox("Raw Price") args["volume"]["enabled"] = price_cols[4].checkbox("Volume") args["return"]["enabled"] = price_cols[5].checkbox("Return") args["volatility"]["enabled"] = price_cols[6].checkbox("Volatility") return execute, args def show_cryptos(self, cryptos): for crypto in cryptos: cols = self.st.beta_columns(3) cols[0].markdown(f"**Symbol: ** {crypto.get('symbol', '-')}") cols[1].markdown(f"**Name: ** {crypto.get('name', '-')}") cols[2].markdown(f"**Price: ** {crypto.get('price', '-')}") def crypto_form(self): self.st.markdown("<br><br>", unsafe_allow_html=True) self.st.markdown("___") _input = self.st.text_input("Cryptocurrency") return _input def sector_distribution(self, sectors): self.st.subheader("Sector Distribution") r = sectors['sector'].value_counts() fig = go.Figure(data=[go.Pie(labels=r.index, values=r)]) fig.update_layout( width=400, height=400, ) self.st.plotly_chart(fig) def plot_price(self, prices, _type): self.st.subheader(_type.capitalize()) fig = go.Figure() for price in prices: name = price["symbol"][0] fig.add_trace(go.Scatter(x=price.index, y=price[_type], mode='lines', name=name)) fig.update_layout( template="plotly_white", width=1400, height=500, hovermode="x unified", plot_bgcolor='rgba(0,0,0,0)' ) self.st.plotly_chart(fig) def show_companies(self, companies): self.st.markdown("___") self.st.subheader("Company Information") self.st.markdown("<br>", unsafe_allow_html=True) for company in companies: basic = self.st.beta_columns(4) basic[0].markdown(f"## **{company.get('name', ' ')} ({company.get('symbol', ' ')})**") if company.get("logo"): basic[3].image(company.get("logo"), width=50) basic[3].markdown("<br>", unsafe_allow_html=True) desc = self.st.beta_columns(2) if company.get('sector'): desc[0].markdown(f"**Sector: ** {company.get('sector', '-')}") if company.get('industry'): desc[1].markdown(f"**Industry: ** {company.get('industry', '-')}") if company.get('description'): desc[0].markdown(f"**Description: ** {company.get('description', '-')}") info = self.st.beta_columns(2) if company.get('CEO'): info[0].markdown(f"**CEO: ** {company.get('CEO', '-')}") if company.get('employees'): info[1].markdown(f"**Employees: ** {company.get('employees', '-')}") if company.get('website'): info[0].markdown(f"**Website: ** {company.get('website', '-')}") if company.get('city') or company.get('state') or company.get('country'): info[1].markdown(f"**Location: ** {company.get('city', ' ')} - {company.get('state', ' ')} - {company.get('country', ' ')}") self.st.markdown("___") def show_news(self, news, title="Company News"): self.st.markdown("___") self.st.subheader(title) self.st.markdown("<br>", unsafe_allow_html=True) for n in news: if n.get('symbol') or n.get('title') or n.get('date'): self.st.markdown(f"**{n.get('symbol', ' ')} - {n.get('title', ' ')} [{n.get('date', ' ')}]**") if n.get('source'): self.st.markdown(f"**Source: ** {n.get('source', '-')}") if n.get("image"): self.st.image(n.get("image"), width=300) if n.get("description"): self.st.markdown(f"**Description: ** {n.get('description', '-')}") if n.get("url"): self.st.markdown(f"**Access on: ** {n.get('url', '-')}") self.st.markdown("<br>", unsafe_allow_html=True) def list_advisors(self, advisors): for advisor in advisors: cols = self.st.beta_columns(3) cols[0].markdown(f"**Name: ** {advisor[0]}") cols[1].markdown(f"**CPF: ** {advisor[1]}") cols[2].markdown(f"**CVM: ** {advisor[2]}") def symbol_input(self, symbols): selected_symbols = self.st.multiselect("Stocks list:", symbols) return selected_symbols def admin_setup(self): option = self.side_bar.selectbox("Option:", ("Data Loader", "Advisors", "Ad-Hoc")) execute = False arg = None self.st.title("Stocker Administration Area") self.st.markdown("___") if option == "Data Loader": arg = dict() self.st.header("Stocker Data Loader") arg["symbols"] = self.st.selectbox("Stocks Option:", ("Sample", "S&P 100")) self.st.markdown("<br><br>", unsafe_allow_html=True) self.st.markdown("___") self.st.subheader("Stocker Company Loader") self.show_message("st", "info", "Stock Loading: Load on our database information about the companies listed" "on the Stocks Option selected") if self.st.button("Load Stocks"): execute = True arg["loader"] = "company" self.st.markdown("<br><br><br>", unsafe_allow_html=True) self.st.markdown("___") self.st.subheader("Stocker Price Loader") self.show_message("st", "info", "Price Loading: Load on our database information about companies daily" " prices, you can select a specific period") arg["period"] = self.st.selectbox("Prices Period:", ("5y", "2y", "1y", "ytd", "6m", "3m", "1m", "5d")) if self.st.button("Load Prices"): execute = True arg["loader"] = "price" self.st.markdown("<br><br><br>", unsafe_allow_html=True) self.st.markdown("___") self.st.subheader("Stocker News Loader") self.show_message("st", "info", "News Loading: Load on our database information about the latest news of" " companies which can impact the market") if self.st.button("Load News"): execute = True arg["loader"] = "news" self.st.markdown("<br><br><br>", unsafe_allow_html=True) self.st.markdown("___") self.st.subheader("Stocker Crypto Loader") self.show_message("st", "info", "Crypto Loading: Load on our database information about all " "cryptocurrencies available on the market") if self.st.button("Load Crypto"): execute = True arg["loader"] = "crypto" self.st.markdown("<br><br><br>", unsafe_allow_html=True) self.st.markdown("___") self.st.subheader("Stocker Full Loader") self.show_message("st", "info", "Full Loading: Load on our database all information listed above: companies" " prices, news and cryptocurrencies") if self.st.button("Full Load"): execute = True arg["loader"] = "full" elif option == "Ad-Hoc": self.st.header("Ad-Hoc") elif option == "Advisors": sub_option = self.st.selectbox("Opções:", ("List Advisors", "Register Advisor", "Edit Advisor")) self.st.markdown("___") if sub_option == "List Advisors": option = sub_option execute = True elif sub_option == "Register Advisor": arg = self.advisor_form(None) option = sub_option if arg: execute = True elif sub_option == "Edit Advisor": arg = self.st.text_input("CPF", max_chars=15, type='default', help="CPF: 123.123.123-12") execute = True option = sub_option self.st.markdown("___") return option, execute, arg def advisor_form(self, advisor): cols = self.st.beta_columns([0.5, 0.25, 0.25]) button = "Update Advisor" if advisor else "Register Advisor" advisor = { "name": cols[0].text_input("Nome", max_chars=30, type='default', help="Nome Completo", value=advisor["name"]) if advisor else cols[0].text_input("Nome", max_chars=30, type='default', help="Nome Completo"), "username": cols[1].text_input("Usuário", max_chars=15, type='default', help="Usuário para login", value=advisor["username"]) if advisor else cols[1].text_input("Usuário", max_chars=15, type='default', help="Usuário para login"), "password": cols[2].text_input("Senha", max_chars=15, type='password', help="Senha para login"), "cpf": advisor["cpf"] if advisor else cols[2].text_input("CPF", max_chars=15, type='default', help="CPF: 123.123.123-12"), "cvm_license": cols[1].text_input("Lincença CVM", max_chars=10, type='default', value=advisor["cvm_license"]) if advisor else cols[1].text_input("Lincença CVM", max_chars=10, type='default'), "email": cols[0].text_input("Email", max_chars=30, type='default', value=advisor["email"]) if advisor else cols[0].text_input("Email", max_chars=30, type='default'), "profile": "advisor" } register = self.st.button(button) self.st.markdown("___") filled = True for b in advisor.values(): if not b: filled = False if register: if not filled: self.show_message("st", "warning", "Preencha todos os campos") else: return advisor @staticmethod def plot_bar(companies, x, y, title, color): df = pd.DataFrame(companies) fig = px.bar(df, x=x, y=y, color=color, title=title, color_discrete_sequence=px.colors.qualitative.Pastel, height=400) return fig @staticmethod def plot_bar2(companies, y, title): df = pd.DataFrame(companies)[["symbol", y]] r = df[y].value_counts() fig = go.Figure(data=[go.Bar(x=df[y], y=r)]) fig.update_layout( height=400, title=title ) return fig @staticmethod def plot_pie(companies, y, title): df = pd.DataFrame(companies)[["symbol", y]] r = df[y].value_counts() fig = go.Figure(data=[go.Pie(labels=df[y], values=r)]) fig.update_layout( height=400, title=title ) return fig @staticmethod def plot_highest_emp(highest_emp): fig = go.Figure(data=[go.Indicator( mode="number+delta", value=highest_emp[0][1], title={ "text": f"{highest_emp[0][0]}<br><span style='font-size:0.8em;color:gray'>Highest number</span><br>" f"<span style='font-size:0.8em;color:gray'>of employees</span>"}, )]) return fig @staticmethod def plot_information_companies(cols, companies): logos = [company[1] for company in companies] names = [company[0] for company in companies] for idx, logo in enumerate(logos): col = 2 if idx % 2 == 0 else 3 cols[col].image(logo, width=50) for idx, name in enumerate(names): col = 0 if idx % 2 == 0 else 1 cols[col].markdown(f"**Name: ** {name}") @staticmethod def plot_notusa_companies(cols, companies): for company in companies: cols[0].markdown(f"**Name: ** {company[0]}") cols[1].markdown(f"**Country: ** {company[2]}") cols[2].image(company[1], width=50) @staticmethod def plot_insight_prices(k, v): fig = go.Figure(data=[go.Indicator( mode="number+delta", value=v[0][1], title={ "text": f"{v[0][0]}<br><span style='font-size:0.8em;color:gray'>{k.split('_')[0].capitalize()} {k.split('_')[1].capitalize()}</span><br>" f"<span style='font-size:0.8em;color:gray'>{v[0][2]}</span>"}, )]) return fig def plot_company_ad_hoc(self, results): companies = results["company"]["specific"] highest_emp = results["company"]["insights"]["highest_emp"] information = results["company"]["insights"]["tech"] not_usa = results["company"]["insights"]["not_us"] fields = results["company"]["fields"] if companies: if not "symbol" in fields: self.st.warning("Be sure to select the symbol option") else: self.show_companies(companies) col = self.st.beta_columns(2) if "employees" in fields: fig1 = self.plot_bar(companies, "symbol", "employees", "Number of employees by company", "employees") col[0].plotly_chart(fig1, use_container_width=True) if "state" in fields: fig2 = self.plot_bar2(companies, "state", "State distribution") col[1].plotly_chart(fig2, use_container_width=True) col2 = self.st.beta_columns(2) if "sector" in fields: fig3 = self.plot_pie(companies, "sector", "Companies by sector") col2[0].plotly_chart(fig3, use_container_width=True) if "industry" in fields: fig4 = self.plot_pie(companies, "industry", "Companies by industry") col2[1].plotly_chart(fig4, use_container_width=True) if highest_emp: fig5 = self.plot_highest_emp(highest_emp) self.st.plotly_chart(fig5, use_container_width=True) if information: self.st.markdown("___") title_col = self.st.beta_columns(1) cols4 = self.st.beta_columns([1, 1, 0.2, 0.2]) title_col[0].subheader("Information sector companies") self.plot_information_companies(cols4, information) if not_usa: self.st.markdown("___") title_col2 = self.st.beta_columns(1) title_col2[0].subheader("Nasdaq listed companies outside USA") cols5 = self.st.beta_columns(4) self.plot_notusa_companies(cols5, not_usa) def plot_price_ad_hoc(self, results): if not results["price"]["specific"].empty: self.st.markdown("___") dfs = list() for company in results["price"]["company_list"]: mask = (results["price"]["specific"]["symbol"] == company) dfs.append(results["price"]["specific"][mask]) self.plot_price(dfs, results["price"]["type"][0]) self.st.markdown("___") c = 0 cols = self.st.beta_columns(len(results["price"]["insights"].keys())) for k, val in results["price"]["insights"].items(): if val: cols[c].plotly_chart(self.plot_insight_prices(k, val), use_container_width=True) c += 1 def plot_news_ad_hoc(self, results): if results["news"]["filter"]: self.show_news(results["news"]["filter"], "Filtered News") if results["news"]["insights"]: news_fields = ("id", "symbol", "date", "title", "source", "url", "description", "image") latest = results["news"]["insights"][0] latest_news = dict() for idx, v in enumerate(latest): latest_news[news_fields[idx]] = v self.show_news([latest], f"Latest news - {latest['symbol']} - {latest['date']}") def plot_crypto_ad_hoc(self, results): if results["crypto"]: self.st.markdown("___") self.show_cryptos(results["crypto"]) def ad_hoc_plot(self, results): self.plot_company_ad_hoc(results) self.plot_price_ad_hoc(results) self.plot_news_ad_hoc(results) self.plot_crypto_ad_hoc(results) def ad_hoc_form(self, symbols): company_fields = ("symbol", "name", "exchange", "industry", "website", "description", "CEO", "sector", "employees", "state", "city", "country", "logo") news_fields = ("symbol", "date", "title", "source", "url", "description", "image") ad_hoc = self.default_ad_hoc() self.st.markdown("___") self.st.markdown(f"**Company Options:**") cols = self.st.beta_columns([2, 1, 1]) cols[0].markdown(f"**Specific company views:**") ad_hoc["company"]["specific"]["company_list"] = cols[0].multiselect("Stocks list:", sum(symbols, [])) ad_hoc["company"]["specific"]["fields"] = cols[0].multiselect("Information:", company_fields) filter_cols = self.st.beta_columns(6) ad_hoc["company"]["specific"]["order_by"] = filter_cols[0].selectbox("Order By:", ad_hoc["company"]["specific"]["fields"]), ad_hoc["company"]["specific"]["order_method"] = filter_cols[1].selectbox("Order Method:", ("Ascending", "Descending")), ad_hoc["company"]["specific"]["limit"] = filter_cols[2].number_input("Number of results:", value=1, min_value=1, max_value=100), ad_hoc["company"]["specific"]["rule_filter"] = {} cols[1].markdown(f"**Insights views:**") cols[2].markdown(f"**-**") cols[1].markdown("<br>", unsafe_allow_html=True) ad_hoc["company"]["insights"]["highest_emp"] = cols[1].checkbox("Highest employees number") cols[1].markdown("<br>", unsafe_allow_html=True) ad_hoc["company"]["insights"]["tech"] = cols[1].checkbox("Information Companies") cols[2].markdown("<br>", unsafe_allow_html=True) ad_hoc["company"]["insights"]["not_us"] = cols[2].checkbox("Outside USA") cols[2].markdown("<br>", unsafe_allow_html=True) ad_hoc["company"]["specific"]["rule_filter"]["apply"] = cols[2].checkbox("Rule filter") if ad_hoc["company"]["specific"]["rule_filter"]["apply"]: ad_hoc["company"]["specific"]["rule_filter"]["field"] = filter_cols[0].selectbox( "Filter Field:", ("symbol", "name", "employees")) ad_hoc["company"]["specific"]["rule_filter"]["operation"] = filter_cols[1].selectbox( "Operation", ("Greater than", "Less than", "Equals to") if ad_hoc["company"]["specific"]["rule_filter"]["field"] == "employees" else ("Equals to", )) ad_hoc["company"]["specific"]["rule_filter"]["value"] = filter_cols[2].number_input("Value: ") \ if ad_hoc["company"]["specific"]["rule_filter"]["field"] == "employees"\ else filter_cols[2].text_input("Value: ") self.st.markdown("___") self.st.markdown(f"**Prices Options:**") price_cols = self.st.beta_columns([2, 1, 1]) price_cols[0].markdown(f"**Specific price views:**") ad_hoc["price"]["specific"]["company_list"] = price_cols[0].multiselect("Price Stocks:", sum(symbols, [])) filter_price_cols = self.st.beta_columns(6) ad_hoc["price"]["specific"]["start_date"] = filter_price_cols[0].date_input("Start Date:") ad_hoc["price"]["specific"]["end_date"] = filter_price_cols[1].date_input("End Date:") ad_hoc["price"]["specific"]["type"] = filter_price_cols[2].selectbox("Price Type:", ("close", "open", "high", "low")), price_cols[1].markdown(f"**Insights views:**") price_cols[2].markdown(f"**-**") price_cols[1].markdown("<br>", unsafe_allow_html=True) price_cols[2].markdown("<br>", unsafe_allow_html=True) ad_hoc["price"]["insights"]["highest_close"] = price_cols[1].checkbox("Highest close price") price_cols[1].markdown("<br>", unsafe_allow_html=True) ad_hoc["price"]["insights"]["lowest_close"] = price_cols[2].checkbox("Lowest close price") ad_hoc["price"]["insights"]["highest_volume"] = price_cols[1].checkbox("Highest volume") price_cols[2].markdown("<br>", unsafe_allow_html=True) ad_hoc["price"]["insights"]["lowest_volume"] = price_cols[2].checkbox("Lowest volume") self.st.markdown("___") self.st.markdown(f"**News Options:**") news_cols = self.st.beta_columns([2, 1, 1, 1]) news_cols[0].markdown(f"**Specific news views:**") news_cols[1].markdown("-<br>", unsafe_allow_html=True) news_cols[2].markdown("-<br>", unsafe_allow_html=True) news_cols[3].markdown("-<br>", unsafe_allow_html=True) ad_hoc["news"]["company_list"] = news_cols[0].multiselect("News Stocks:", sum(symbols, [])) ad_hoc["news"]["fields"] = news_cols[0].multiselect("News Info:", news_fields) ad_hoc["news"]["date"] = news_cols[1].date_input("Date:") ad_hoc["news"]["filter_date"] = news_cols[2].selectbox("Filter Date as:", ("On", "Starting from", "Until")) ad_hoc["news"]["order_by"] = news_cols[1].selectbox("Order by field:", ad_hoc["news"]["fields"]) ad_hoc["news"]["order_method"] = news_cols[2].selectbox("Order results:", ("Ascending", "Descending")) ad_hoc["news"]["limit"] = news_cols[3].number_input("Limit of results:", value=1, min_value=1, max_value=100) ad_hoc["news"]["latest"] = news_cols[3].checkbox("Latest News") self.st.markdown("___") self.st.markdown(f"**Crypto Options:**") crypto_col = self.st.beta_columns([2, 0.5, 1]) ad_hoc["crypto"]["name"] = crypto_col[0].text_input("Cryptocurrency") ad_hoc["crypto"]["limit"] = crypto_col[1].number_input("Limit of crypto:", value=1, min_value=1, max_value=100) generate = self.st.button("Generate Report") if generate: return ad_hoc @staticmethod def default_ad_hoc(): return { "company": { "specific": { "company_list": [], "fields": [], "order_by": None, "order_method": None, "limit": None, "rule_filter": { "apply": False, "field": None, "operation": None, "value": None } }, "insights": { "highest_emp": False, "tech": False, "not_us": False } }, "news": { "company_list": [], "date": None, "filter_date": None, }, "price": { "specific": { "company_list": [], "type": None, "start_date": None, "end_date": None }, "insights": { "highest_close": False, "lowest_close": False, "highest_volume": False, "lowest_volume": False, } }, "crypto": { "name": None, "limit": None } }
46.352941
153
0.543916
25,929
0.996809
0
0
3,910
0.150315
0
0
7,393
0.284215
22f3312bdb283b4ef7d6f8aa9f88ddb5c8c89e30
662
py
Python
ch_4/stopping_length.py
ProhardONE/python_primer
211e37c1f2fd169269fc4f3c08e8b7e5225f2ad0
[ "MIT" ]
51
2016-04-05T16:56:11.000Z
2022-02-08T00:08:47.000Z
ch_4/stopping_length.py
zhangxiao921207/python_primer
211e37c1f2fd169269fc4f3c08e8b7e5225f2ad0
[ "MIT" ]
null
null
null
ch_4/stopping_length.py
zhangxiao921207/python_primer
211e37c1f2fd169269fc4f3c08e8b7e5225f2ad0
[ "MIT" ]
47
2016-05-02T07:51:37.000Z
2022-02-08T01:28:15.000Z
# Exercise 4.11 # Author: Noah Waterfield Price import sys g = 9.81 # acceleration due to gravity try: # initial velocity (convert to m/s) v0 = (1000. / 3600) * float(sys.argv[1]) mu = float(sys.argv[2]) # coefficient of friction except IndexError: print 'Both v0 (in km/s) and mu must be supplied on the command line' v0 = (1000. / 3600) * float(raw_input('v0 = ?\n')) mu = float(raw_input('mu = ?\n')) except ValueError: print 'v0 and mu must be pure numbers' sys.exit(1) d = 0.5 * v0 ** 2 / mu / g print d """ Sample run: python stopping_length.py 120 0.3 188.771850342 python stopping_length.py 50 0.3 32.7728906843 """
22.827586
73
0.649547
0
0
0
0
0
0
0
0
364
0.549849
22f35b16a60f939a7ee519533639ecb4ccd48d47
866
py
Python
TestFiles/volumioTest.py
GeorgeIoak/Oden
9bb6a5811e2ea40ceef67e46bc56eab1be9ce06c
[ "MIT" ]
null
null
null
TestFiles/volumioTest.py
GeorgeIoak/Oden
9bb6a5811e2ea40ceef67e46bc56eab1be9ce06c
[ "MIT" ]
null
null
null
TestFiles/volumioTest.py
GeorgeIoak/Oden
9bb6a5811e2ea40ceef67e46bc56eab1be9ce06c
[ "MIT" ]
null
null
null
# Testing code to check update status on demand from socketIO_client import SocketIO, LoggingNamespace from threading import Thread socketIO = SocketIO('localhost', 3000) status = 'pause' def on_push_state(*args): print('state', args) global status, position, duration, seek status = args[0]['status'].encode('ascii', 'ignore') seek = args[0]['seek'] duration = args[0]['duration'] if duration: position = int(seek / 1000) else: position = 0 print("status", status, "position", position) def _receive_thread(): socketIO.wait() receive_thread = Thread(target=_receive_thread, daemon=True) receive_thread.start() socketIO.on('pushState', on_push_state) # issue this and the socketIO.wait in the background will push the reply socketIO.emit('getState', '', on_push_state)
29.862069
72
0.674365
0
0
0
0
0
0
0
0
224
0.258661