hexsha
stringlengths
40
40
size
int64
5
2.06M
ext
stringclasses
10 values
lang
stringclasses
1 value
max_stars_repo_path
stringlengths
3
248
max_stars_repo_name
stringlengths
5
125
max_stars_repo_head_hexsha
stringlengths
40
78
max_stars_repo_licenses
listlengths
1
10
max_stars_count
int64
1
191k
max_stars_repo_stars_event_min_datetime
stringlengths
24
24
max_stars_repo_stars_event_max_datetime
stringlengths
24
24
max_issues_repo_path
stringlengths
3
248
max_issues_repo_name
stringlengths
5
125
max_issues_repo_head_hexsha
stringlengths
40
78
max_issues_repo_licenses
listlengths
1
10
max_issues_count
int64
1
67k
max_issues_repo_issues_event_min_datetime
stringlengths
24
24
max_issues_repo_issues_event_max_datetime
stringlengths
24
24
max_forks_repo_path
stringlengths
3
248
max_forks_repo_name
stringlengths
5
125
max_forks_repo_head_hexsha
stringlengths
40
78
max_forks_repo_licenses
listlengths
1
10
max_forks_count
int64
1
105k
max_forks_repo_forks_event_min_datetime
stringlengths
24
24
max_forks_repo_forks_event_max_datetime
stringlengths
24
24
content
stringlengths
5
2.06M
avg_line_length
float64
1
1.02M
max_line_length
int64
3
1.03M
alphanum_fraction
float64
0
1
count_classes
int64
0
1.6M
score_classes
float64
0
1
count_generators
int64
0
651k
score_generators
float64
0
1
count_decorators
int64
0
990k
score_decorators
float64
0
1
count_async_functions
int64
0
235k
score_async_functions
float64
0
1
count_documentation
int64
0
1.04M
score_documentation
float64
0
1
c7300e0d4920ea9bf3233fb48ec01feb851a08ad
4,125
py
Python
code/network/__init__.py
michalochman/complex-networks
49337376e32fac253d8de9919d5acd00a9b566bb
[ "MIT" ]
null
null
null
code/network/__init__.py
michalochman/complex-networks
49337376e32fac253d8de9919d5acd00a9b566bb
[ "MIT" ]
null
null
null
code/network/__init__.py
michalochman/complex-networks
49337376e32fac253d8de9919d5acd00a9b566bb
[ "MIT" ]
null
null
null
import fractions class Network(object): def __init__(self, network): self.network = network def degree(self, link_type, key): return len(self.network.get(link_type).get(key)) def average_degree(self, link_type): degree = 0 for link in self.network.get(link_type).itervalues(): degree += len(link) return float(degree) / float(len(self.network.get(link_type))) def nn_degree(self, link_type, link_n_type, key): degree = self.degree(link_type, key) nn_degree = 0 for n_key in self.network.get(link_type, key): nn_degree += self.degree(link_n_type, n_key) return '%d/%d' % (nn_degree, degree) def jaccard_index(self, set_a, set_b): n = len(set_a & set_b) return float(n)/float(len(set_a) + len(set_b) - n) def jaccard_similarity(self, link_type, key_a, key_b, return_string=False): key_a = int(key_a) key_b = int(key_b) set_a = set(self.network.get(link_type).get(key_a).values()) set_b = set(self.network.get(link_type).get(key_b).values()) if return_string: intersection = len(set_a & set_b) union = len(set_a | set_b) gcd = fractions.gcd(intersection, union) return '%d/%d' % (intersection/gcd, union/gcd) return self.jaccard_index(set_a, set_b) def collaborative_similarity(self, link_type, link_n_type, key, return_string=False): degree = self.degree(link_type, key) if degree <= 1: return 0 similarity_sum = 0 for n_key_1 in self.network.get(link_type).get(key).itervalues(): for n_key_2 in self.network.get(link_type).get(key).itervalues(): if n_key_1 == n_key_2: continue similarity_sum += self.jaccard_similarity(link_n_type, n_key_1, n_key_2) if return_string: precision = 1e3 new_similarity_sum = round(similarity_sum * degree*(degree-1) * precision) gcd = fractions.gcd(new_similarity_sum, degree*(degree-1) * precision) new_similarity_sum /= gcd return '%d/%d' % (new_similarity_sum, degree*(degree-1)*round(new_similarity_sum/similarity_sum)) return similarity_sum / (degree*(degree-1)) def average_jaccard_similarity(self, link_type, link_n_type, return_string=False): nodes = 0 similarity_sum = 0 for key_links in self.network.get(link_type).itervalues(): for n_key_1 in key_links.itervalues(): for n_key_2 in key_links.itervalues(): if n_key_1 == n_key_2: continue nodes += 1 similarity_sum += self.jaccard_similarity(link_n_type, n_key_1, n_key_2) if nodes == 0: return 0 if return_string: precision = 1e3 new_similarity_sum = round(similarity_sum * nodes * precision) gcd = fractions.gcd(new_similarity_sum, nodes * precision) new_similarity_sum /= gcd return '%d/%d' % (new_similarity_sum, nodes*round(new_similarity_sum/similarity_sum)) return similarity_sum / nodes def network_collaborative_similarity(self, link_type, link_n_type, return_string=False): nodes = 0 similarity_sum = 0 for key, key_links in self.network.get(link_type).iteritems(): if self.degree(link_type, key) <= 1: continue nodes += 1 collaborative_similarity = self.collaborative_similarity(link_type, link_n_type, key) similarity_sum += collaborative_similarity if nodes == 0: return 0 if return_string: precision = 1e3 new_similarity_sum = round(similarity_sum * nodes * precision) gcd = fractions.gcd(new_similarity_sum, nodes * precision) new_similarity_sum /= gcd return '%d/%d' % (new_similarity_sum, nodes*(new_similarity_sum/similarity_sum)) return similarity_sum/nodes
42.96875
109
0.615758
4,105
0.995152
0
0
0
0
0
0
35
0.008485
c730483de9837a25bc1e629091819a776f0b1ff3
3,055
py
Python
invoke_ansible.py
samvarankashyap/ansible_api_usage
d03c67b4606d2e101ef7341bd31161b4db39cd5b
[ "Apache-2.0" ]
null
null
null
invoke_ansible.py
samvarankashyap/ansible_api_usage
d03c67b4606d2e101ef7341bd31161b4db39cd5b
[ "Apache-2.0" ]
null
null
null
invoke_ansible.py
samvarankashyap/ansible_api_usage
d03c67b4606d2e101ef7341bd31161b4db39cd5b
[ "Apache-2.0" ]
null
null
null
import ansible import pprint from ansible import utils from jinja2 import Environment, PackageLoader from collections import namedtuple from ansible import utils from ansible.parsing.dataloader import DataLoader from ansible.vars import VariableManager from ansible.inventory import Inventory from ansible.executor.playbook_executor import PlaybookExecutor from ansible.plugins.callback import CallbackBase from callbacks import PlaybookCallback def invoke_ansible_playbook(module_path, e_vars, playbook_path="site.yml", console=True): """ Invokes playbook """ loader = DataLoader() variable_manager = VariableManager() variable_manager.extra_vars = e_vars inventory = Inventory(loader=loader, variable_manager=variable_manager, host_list=['localhost']) passwords = {} utils.VERBOSITY = 4 Options = namedtuple('Options', ['listtags', 'listtasks', 'listhosts', 'syntax', 'connection', 'module_path', 'forks', 'remote_user', 'private_key_file', 'ssh_common_args', 'ssh_extra_args', 'sftp_extra_args', 'scp_extra_args', 'become', 'become_method', 'become_user', 'verbosity', 'check']) options = Options(listtags=False, listtasks=False, listhosts=False, syntax=False, connection='ssh', module_path=module_path, forks=100, remote_user='root', private_key_file=None, ssh_common_args=None, ssh_extra_args=None, sftp_extra_args=None, scp_extra_args=None, become=False, become_method=None, become_user='root', verbosity=utils.VERBOSITY, check=False) pbex = PlaybookExecutor(playbooks=[playbook_path], inventory=inventory, variable_manager=variable_manager, loader=loader, options=options, passwords=passwords) if not console: cb = PlaybookCallback() pbex._tqm._stdout_callback = cb return_code = pbex.run() results = cb.results else: results = pbex.run() return results
40.197368
89
0.466776
0
0
0
0
0
0
0
0
294
0.096236
c733c87e85c1c4f5626af759efe7bb3290f415c6
2,336
py
Python
bin/python/csv2es.py
reid-wagner/proteomics-pipelines
2214c2ad4c14fabcb50a3c0800e9d383ce73df3d
[ "MIT" ]
2
2018-09-06T14:05:59.000Z
2022-02-18T10:09:06.000Z
bin/python/csv2es.py
reid-wagner/proteomics-pipelines
2214c2ad4c14fabcb50a3c0800e9d383ce73df3d
[ "MIT" ]
7
2018-09-30T00:49:04.000Z
2022-01-27T07:55:26.000Z
bin/python/csv2es.py
reid-wagner/proteomics-pipelines
2214c2ad4c14fabcb50a3c0800e9d383ce73df3d
[ "MIT" ]
3
2019-10-29T12:20:45.000Z
2021-10-06T14:38:43.000Z
#!/usr/bin/env python3 import itertools import string from elasticsearch import Elasticsearch,helpers import sys import os from glob import glob import pandas as pd import json host = sys.argv[1] port = int(sys.argv[2]) alias = sys.argv[3] print(host) print(port) print(alias) es = Elasticsearch([{'host': host, 'port': port}]) # create our test index # Get all csv files in /root/data files = [y for x in os.walk('/root/data') for y in glob(os.path.join(x[0], '*.csv'))] count = 0 def clean_field(val): val = val.split('.') val = [i for i in val if i != ''] val = '_'.join(val) val = val.split() val = [i for i in val if i != ''] val = '_'.join(val) val = val.split('/') val = [i for i in val if i != ''] val = '_'.join(val) return val es.indices.delete(index=alias + '*', ignore=[400, 404]) indices = [] for file in files: data = pd.read_csv(file, sep=None, engine='python') index = alias + '_'.join(file.split('/')) index = clean_field(index).lower().split('_csv')[0] indices.append(index) es.indices.create(index) for col in data.columns: if col.startswith('Unnamed'): del data[col] else: data.rename(columns= { col : clean_field(col) },inplace=True ) data = data.reset_index() # Make sure there is no duplicate indexing data.rename(columns={'index':'row'},inplace =True) data['File'] = file data['_id'] = data['File'] + '.{}.'.format(str(count)) + data.reset_index()['index'].apply(str) data['_type'] = "document" data['_index'] = index records = data.to_json(orient='records') records = json.loads(records) helpers.bulk(es, records, chunk_size=100) count += 1 print(es.count(index=index)) # Create an index table in elasticsearch to locate the files indices_table = pd.DataFrame() indices_table['Index'] = pd.Series(indices) indices_table['File'] = pd.Series(files) indices_table['Alias'] = alias indices_table['_id'] = indices_table['Alias'] + '.' + indices_table['File'] indices_table['_type'] = "document" indices_table['_index'] = alias + '_indices' es.indices.create(alias + '_indices') records = indices_table.to_json(orient='records') records = json.loads(records) helpers.bulk(es, records, chunk_size=100) print(es.count(index=alias + '_indices'))
28.144578
99
0.644264
0
0
0
0
0
0
0
0
445
0.190497
c7345842917a4fbe78846b66040cbcd50b2fa112
45
py
Python
main/src/preparation/parsers/tree-sitter-python/examples/crlf-line-endings.py
jason424217/Artificial-Code-Gen
a6e2c097c5ffe8cb0929e6703035b526f477e514
[ "MIT" ]
null
null
null
main/src/preparation/parsers/tree-sitter-python/examples/crlf-line-endings.py
jason424217/Artificial-Code-Gen
a6e2c097c5ffe8cb0929e6703035b526f477e514
[ "MIT" ]
null
null
null
main/src/preparation/parsers/tree-sitter-python/examples/crlf-line-endings.py
jason424217/Artificial-Code-Gen
a6e2c097c5ffe8cb0929e6703035b526f477e514
[ "MIT" ]
null
null
null
print a if b: if c: d e
6.428571
9
0.311111
0
0
0
0
0
0
0
0
0
0
c7349ec685ce1af0110178abaaf2eb1878a5bd71
106
py
Python
Src/main.py
DukeA/DAT02X-19-03-MachineLearning-Starcraft2
ade31deb4cf6cacd0c411c39310aeb1300561936
[ "MIT" ]
null
null
null
Src/main.py
DukeA/DAT02X-19-03-MachineLearning-Starcraft2
ade31deb4cf6cacd0c411c39310aeb1300561936
[ "MIT" ]
null
null
null
Src/main.py
DukeA/DAT02X-19-03-MachineLearning-Starcraft2
ade31deb4cf6cacd0c411c39310aeb1300561936
[ "MIT" ]
null
null
null
from absl import app from mainLoop import main if __name__ == '__main__': app.run(main)
13.25
27
0.632075
0
0
0
0
0
0
0
0
10
0.09434
c735745b02553eb9e477617ad9c63df5e4730b1c
3,793
py
Python
bos_sarcat_scraper/__main__.py
hysds/bos_sarcat_scraper
1bf3612e7d8fad80c8704a909087be19cc3e1db2
[ "Apache-2.0" ]
1
2020-06-24T00:25:30.000Z
2020-06-24T00:25:30.000Z
bos_sarcat_scraper/__main__.py
aria-jpl/bos_sarcat_scraper
1bf3612e7d8fad80c8704a909087be19cc3e1db2
[ "Apache-2.0" ]
null
null
null
bos_sarcat_scraper/__main__.py
aria-jpl/bos_sarcat_scraper
1bf3612e7d8fad80c8704a909087be19cc3e1db2
[ "Apache-2.0" ]
1
2019-05-08T17:15:00.000Z
2019-05-08T17:15:00.000Z
from __future__ import absolute_import from builtins import str from builtins import input import sys import argparse from . import bosart_scrape import datetime import json def valid_date(s): try: try: date = datetime.datetime.strptime(s, "%Y-%m-%dT%H:%M:%S.%fZ") except: date = datetime.datetime.strptime(s, "%Y-%m-%dT%H:%M:%SZ") return date except ValueError: msg = "Not a valid date: '{0}'.".format(s) raise argparse.ArgumentTypeError(msg) def geojson(spatial_extent): if type(json.loads(spatial_extent)) is dict: return spatial_extent def sort_field(s_f): if s_f == "start_time" or s_f == "stop_time" or s_f == "bos_ingest": return s_f else: raise argparse.ArgumentError("The value for sortBy should be either start_time, stop_time or bos_ingest not %s."%s_f) def sort_order(order): if order == "asc" or order == "des": return order else: raise argparse.ArgumentError("The value for sort should be either asc or des not %s,"%order) def check_inputs(args): yes = "y" no = "n" if not args.fromTime and not args.fromBosIngestTime: print ("You have NOT specified any start time using --fromTime, -from or --fromBosIngestTime. \nYou are asking to find all acquisitions from the beginning of time! \nThis query will take a very long time.\nTHIS IS NOT RECOMMENDED.") resp = str(eval(input('Are you sure you want to proceed? (y/n):'))) if resp.lower() == yes.lower(): print("Okay! Please wait...") return True elif resp.lower() == no.lower(): print("Please try again with the start time specified using --fromTime, -from or --fromBosIngestTime.") exit() else: print("Please specify y/n\n") return False return True def main(): parser = argparse.ArgumentParser(description='Query BOS SarCat for acquisitions.') parser.add_argument("-from","--fromTime", help='specify the temporal start point in format , to get acquisitions starting after the given timestamp in the format yyyy-mm-ddThh:mm:ss.sssZ', type=valid_date) parser.add_argument("--fromBosIngestTime", help='provide date and time in format , to get acquisitions acquired by BOS after the given timestamp in the format yyyy-mm-ddThh:mm:ss.sssZ', type=valid_date) parser.add_argument("-to","--toTime", help='specify the temporal end point in format , to get acquisitions ending before the given timestamp in the format yyyy-mm-ddThh:mm:ss.sssZ', type=valid_date) parser.add_argument("--spatialExtent", help='specify the area of interest in GeoJSON format', type = geojson) parser.add_argument("--sortBy", help='type "start_time" , "stop_time" or "bos_ingest" to sort results by field', type = sort_field) parser.add_argument("--sort", help='type "asc" or "des" to get results in ascending or descending order of time respectively. If sortBy is specified but sort is not, then defaults to ascending', type = sort_order) args = parser.parse_args() checked = False while not checked: checked = check_inputs(args) # construct the parameter list based on user specified restrictions params = {} if args.fromTime: params["fromTime"] = args.fromTime if args.fromBosIngestTime: params["fromBosIngestTime"] = args.fromBosIngestTime if args.toTime: params["toTime"] = args.toTime if args.spatialExtent: params["spatialExtent"] = json.dumps(args.spatialExtent) if args.sortBy: params["sortBy"] = args.sortBy if args.sort: params["sort"] = args.sort print(bosart_scrape.make_api_call(parameters=params)) if __name__ == '__main__': main()
39.926316
240
0.675718
0
0
0
0
0
0
0
0
1,627
0.428948
c73803a506dad8312572b3d3624ec1ddd2985a19
23,181
py
Python
vgm2electron.py
simondotm/vgm2electron
38e340d2baeaa3e5722ac982c82e58fb9858f9d9
[ "MIT" ]
2
2021-03-08T13:55:02.000Z
2021-05-02T12:50:38.000Z
vgm2electron.py
simondotm/vgm2electron
38e340d2baeaa3e5722ac982c82e58fb9858f9d9
[ "MIT" ]
null
null
null
vgm2electron.py
simondotm/vgm2electron
38e340d2baeaa3e5722ac982c82e58fb9858f9d9
[ "MIT" ]
null
null
null
#!/usr/bin/env python # vgm2electron.py # Tool for converting SN76489-based PSG VGM data to Acorn Electron # By Simon Morris (https://github.com/simondotm/) # See https://github.com/simondotm/vgm-packer # # Copyright (c) 2019 Simon Morris. All rights reserved. # # "MIT License": # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), # to deal in the Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, distribute, sublicense, # and/or sell copies of the Software, and to permit persons to whom the Software # is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, # INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A # PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT # HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE # SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. import functools import itertools import struct import sys import time import binascii import math import operator import os from modules.vgmparser import VgmStream class VgmElectron: OUTPUT_RAWDATA = False # output raw dumps of the data that was compressed by LZ4/Huffman VERBOSE = True # 0-3 represents approx the loudest 50% of volumes (=ON), 4-15 are the quietest 50% (=OFF) ATTENTUATION_THRESHOLD1 = 10 ATTENTUATION_THRESHOLD2 = 10 ATTENTUATION_THRESHOLD3 = 10 # define the number of octaves to transpose whole song by, in case too much bass getting lost TRANSPOSE_OCTAVES1 = 0 TRANSPOSE_OCTAVES2 = 0 TRANSPOSE_OCTAVES3 = 0 #-1 ENABLE_CHANNEL1 = True ENABLE_CHANNEL2 = True ENABLE_CHANNEL3 = True USE_TECHNIQUE = 2 def __init__(self): print("init") #---------------------------------------------------------- # Utilities #---------------------------------------------------------- # split the packed raw data into 11 separate streams # returns array of 11 bytearrays def split_raw(self, rawData, stripCommands = True): registers = [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] registers_opt = [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] latched_channel = -1 output_block = bytearray() output_blocks = [] for o in range(11): output_blocks.append( bytearray() ) if stripCommands: register_mask = 15 else: register_mask = 255 # unpack the raw binary data in 11 arrays of register data without any deltas between them # eg. the raw chip writes to all 11 registers every frame n = 0 Packet = True verbose = False while (Packet): packet_size = rawData[n] if verbose: print("packet_size=" + str(packet_size)) n += 1 if packet_size == 255: Packet = False else: for x in range(packet_size): d = rawData[n+x] #if verbose: # print " frame byte number=" +str(x) # print " frame byte=" +str(d) if d & 128: # latch c = (d>>5)&3 latched_channel = c if d & 16: # volume if verbose: print(" volume on channel " + str(c)) registers[c+7] = d & register_mask else: # tone if verbose: print(" tone on channel " + str(c)) registers[c*2+0] = d & register_mask else: if verbose: print(" tone data on latched channel " + str(latched_channel)) registers[latched_channel*2+1] = d # we no longer do any masking here # d & 63 # tone data only contains 6 bits of info anyway, so no need for mask if latched_channel == 3: print("ERROR CHANNEL") # emit current state of each of the 11 registers to 11 different bytearrays for x in range(11): output_blocks[x].append( registers[x] ) # next packet n += packet_size #print(output_blocks[6]) #IGNORE we no longer do this - let the decoder do it instead. if False: # make sure we only emit tone3 when it changes, or 15 for no-change # this prevents the LFSR from being reset lastTone3 = 255 for x in range(len(output_blocks[6])): t = output_blocks[6][x] if t == lastTone3: output_blocks[6][x] = 15 lastTone3 = t # print(output_blocks[6]) # Add EOF marker (0x08) to tone3 byte stream output_blocks[6].append(0x08) # 0x08 is an invalid noise tone. # return the split blocks return output_blocks # given an array of data points, serialize it to a bytearray # size is the number of bytes to be used to represent each element in the source array. def toByteArray(self, array, size = 1): r = bytearray() for v in array: if size < 2: r.append(v & 255) else: r.append(v & 255) r.append(v >> 8) return r #---------------------------------------------------------- # Process(filename) # Convert the given VGM file to an electron VGM file #---------------------------------------------------------- def process(self, src_filename, dst_filename): # load the VGM file, or alternatively interpret as a binary if src_filename.lower()[-4:] != ".vgm": print("ERROR: Not a VGM source") return vgm = VgmStream(src_filename) data_block = vgm.as_binary() data_offset = 0 # parse the header header_size = data_block[0] # header size play_rate = data_block[1] # play rate if header_size == 5 and play_rate == 50: packet_count = data_block[2] + data_block[3]*256 # packet count LO duration_mm = data_block[4] # duration mm duration_ss = data_block[5] # duration ss data_offset = header_size+1 data_offset += data_block[data_offset]+1 data_offset += data_block[data_offset]+1 print("header_size=" +str(header_size)) print("play_rate="+str(play_rate)) print("packet_count="+str(packet_count)) print("duration_mm="+str(duration_mm)) print("duration_ss="+str(duration_ss)) print("data_offset="+str(data_offset)) else: print("No header.") print("") # Trim off the header data. The rest is raw data. data_block = data_block[data_offset:] #---------------------------------------------------------- # Unpack the register data into 11 separate data streams #---------------------------------------------------------- registers = self.split_raw(data_block, True) #---------------------------------------------------------- # Begin VGM conversion to Electron #---------------------------------------------------------- # Filter out channels we do not need # Modify all volumes to full or none # Interleave sound to a single channel # output final VGM vgm_stream = bytearray() vgm_time = 0 electron_data = bytearray() # given an SN76489 tone register value, return the equivalent Electron ULA register setting def sn_to_electron(tone_value): # hack to protect against divbyzero if (tone_value == 0): tone_value = 1 hz = float(vgm.vgm_source_clock) / ( 2.0 * float(tone_value) * 16.0) print(" sn_to_electron freq " + str(hz) + "hz") # electron # Sound frequency = 1 MHz / [32 * (S + 1)] # f * 32*(S+1) = 1Mhz # 32*(S+1) = 1Mhz / f # (S+1) = 1Mhz / f*32 #print ("SN freq is " + str(hz)) ula6 = int( 1000000.0 / (hz * 32.0) ) - 1 # check we are within range if ula6 < 0: print(" WARNING: Electron freqency '" + str(ula6) + "' too high (" + str(hz) + ")") ula6 = 0 if ula6 > 255: print(" WARNING: Electron frequency '" + str(ula6) + "' too low (" + str(hz) + ")") ula6 = 255 return ula6 #-------------------------------------------------------------- # conversion settings #-------------------------------------------------------------- # convert the register data to a vgm stream sample_interval = int(44100 / vgm.metadata['rate']) # 882 # 50hz - TODO: use frame rate print("sample_interval=" + str(sample_interval)) USE_TONE3 = VgmElectron.ENABLE_CHANNEL3 # True # TODO: make these all parameters # Add channel filter option # Add mix type options # --attentuation 468 --filter 123 --transpose 00F --mix 123 --arpeggio 2 --rate 50 # Add option to clamp or transpose out of range frequencies # Make the .ula output file filename.electron.ula # Add 0x01 as a terminating byte in the output ULA MIX_RATE = 2 # modulo 2 for interleaving channels # other options # bias for channels # transpose or silence out of range notes channel_mix = 0 #-------------------------------------------------------------- # pre-process music to suit Electron capabilities #-------------------------------------------------------------- for i in range(len(registers[0])): print("Frame " + str(i)) #-------------------------------------------------------------- # step 1- map volumes to 1-bit precision #-------------------------------------------------------------- # 11 registers per frame # Tone 0 HL Tone 1 HL Tone 2 HL Tone 3 Vol 0123 for r in range(11): if r > 6: register_data = registers[r][i] # apply the threshold for each channel threshold = VgmElectron.ATTENTUATION_THRESHOLD1 if r == 8: threshold = VgmElectron.ATTENTUATION_THRESHOLD2 if r == 9: threshold = VgmElectron.ATTENTUATION_THRESHOLD3 # if its a volume, map to loudest volume or no volume (using logarithmic scale) if register_data < threshold: register_data = 0 # full volume else: register_data = 15 # zero volume if r == 7 and VgmElectron.ENABLE_CHANNEL1 == False: register_data = 15 # zero volume if r == 8 and VgmElectron.ENABLE_CHANNEL2 == False: register_data = 15 # zero volume if r == 9 and VgmElectron.ENABLE_CHANNEL3 == False: register_data = 15 # zero volume registers[r][i] = register_data #-------------------------------------------------------------- # step 2 - transpose to fit frequency range #-------------------------------------------------------------- # final step - bring tone1 into the frequency range of the electron # if the frequency goes below the range of the ULA capabilities, add an octave def retune(octaves, l,h,v): #if (octaves == 0): # print(" No transpose performed, octaves set to 0") # return print( " tonehi=" + str(registers[h][i]) + ", tonelo=" + str(registers[l][i])) tone_value = (registers[h][i] << 4) + registers[l][i] if tone_value > 0: tone_freq = float(vgm.vgm_source_clock) / ( 2.0 * float(tone_value) * 16.0) print(" Retune, Channel " + str(int(l/2)) + " tone=" + str(tone_value) + ", freq=" + str(tone_freq)) # electron baseline is 122Hz not 244Hz as the AUG states. baseline_freq = 1000000.0 / (32.0*256.0) target_freq = tone_freq retuned = 0 transpose = abs(octaves) while retuned != transpose: # target_freq < baseline_freq: if (octaves < 0): target_freq /= 2.0 else: target_freq *= 2.0 retuned += 1 # if cant reach baseline freq, transpose once, then silence if still too low :( if target_freq < baseline_freq: print(" WARNING: Freq too low - Added " + str(1) + " octave(s) - from " + str(target_freq) + " to " + str(target_freq*2.0) + "Hz") # better to just clamp low frequencies at the bottom, and risk tuning issues rather than transposition jumps target_freq = baseline_freq #*= 2.0 retuned = 1 if target_freq < baseline_freq: registers[v][i] = 15 print(" Tone " + str(i) + " silenced because frequency too low - " + str(target_freq)) #target_freq *= 2.0 #retuned += 1 if retuned: #print(" WARNING: Freq too low - Added " + str(retuned) + " octave(s) - from " + str(tone_freq) + " to " + str(target_freq) + "Hz") tone_value = int( round( float(vgm.vgm_source_clock) / (2.0 * target_freq * 16.0 ) ) ) registers[h][i] = tone_value >> 4 registers[l][i] = tone_value & 15 # transpose #if TRANSPOSE_OCTAVES > 0: print(" Transposing ") retune(VgmElectron.TRANSPOSE_OCTAVES1, 0,1,7) retune(VgmElectron.TRANSPOSE_OCTAVES2, 2,3,8) retune(VgmElectron.TRANSPOSE_OCTAVES3, 4,5,9) #-------------------------------------------------------------- # Step 3 - mix the 2 primary channels down to 1 channel #-------------------------------------------------------------- # map channel 2 to channel 1 # noise channel is completely ignored ENABLE_DOWNMIX = True if ENABLE_DOWNMIX: print(" Downmix channels ") #print("Frame " + str(i)) vol1 = registers[7][i] vol2 = registers[8][i] vol3 = registers[9][i] tone1_active = vol1 != 15 tone2_active = vol2 != 15 tone3_active = vol3 != 15 tone_active = tone1_active or tone2_active or tone3_active if tone_active: print(" Tone active, mixing") output_tone = 1 if self.USE_TECHNIQUE == 2: c1f = (registers[1][i] << 4) + registers[0][i] c2f = (registers[3][i] << 4) + registers[2][i] c3f = (registers[5][i] << 4) + registers[4][i] active_channels = [ False, False, False ] if tone1_active: active_channels[0] = True print("Channel 1 is active volume") if tone2_active: active_channels[1] = True print("Channel 2 is active volume") if tone3_active: active_channels[2] = True print("Channel 3 is active volume") # any channels playing the same frequency are filtered out if tone1_active and tone2_active and c2f == c1f: active_channels[1] = False print("Channel 2 is same freq as Channel 1, filtered") if tone1_active and tone3_active and c3f == c1f: active_channels[2] = False print("Channel 3 is same freq as Channel 1, filtered") if tone2_active and tone3_active and c2f == c3f: active_channels[2] = False print("Channel 3 is same freq as Channel 2, filtered") channel_count = 0 if active_channels[0]: channel_count += 1 if active_channels[1]: channel_count += 1 if active_channels[2]: channel_count += 1 print("channel_count=" + str(channel_count)) output_mix = [] if active_channels[0]: output_mix.append(1) if active_channels[1]: output_mix.append(2) if active_channels[2]: output_mix.append(3) mix = (i % channel_count) output_tone = output_mix[mix] if self.USE_TECHNIQUE == 1: # interleaving of channels 1+2 is done on odd/even frames for a consistent effect mix = (i % MIX_RATE) == 0 #(i & 1) == 0 # random is no good, thought it might average out but it sounds , well random #mix = random.random() < 0.5 # test code to see if modulo 3 any good, it wasn't if False: if channel_mix == 0 and vol1 != 0: channel_mix = (channel_mix + 1) % 3 if channel_mix == 1 and vol2 != 0: channel_mix = (channel_mix + 1) % 3 if channel_mix == 1 and vol3 != 0: channel_mix = (channel_mix + 1) % 3 output_tone = (channel_mix % 3) + 1 print("output tone=" + str(output_tone)) channel_mix = (channel_mix + 1) % 3 if True: # detect if channel 1 needs priority this frame # - its volume is on, and the alternative frame mix flag is good c1p = vol1 == 0 and mix # don't give channel 2 priority if tone is the same and channel1 is playing c1f = (registers[1][i] << 4) + registers[0][i] c2f = (registers[3][i] << 4) + registers[2][i] sametone = (c1f == c2f/2) or (c1f == c2f * 2) or (c1f == c2f) sametone = sametone and (vol1 == vol2) and (vol1 == 0) if vol1 == 0 and sametone: #diff < 100: #registers[0][i] == registers[2][i] and registers[1][i] == registers[2][i] and vol1 == 0: c1p = True print(" NOTE: channel 1 & channel 2 have same tone") # replace channel 1 data with channel 2 data # if, channel2 is active, but c1 doesn't have priority this frame if vol2 == 0 and not c1p:# and vol1 != 0: output_tone = 2 # if no volume on tone1, we can look at channel 3 too if USE_TONE3: #if registers[7][i] == 15: if vol1 == 15 and vol2 == 15 and vol3 == 0 and not mix:# and not c1p and output_tone != 2: print("tone3 active") output_tone = 3 # pick which tone to output if output_tone == 1: # do nothing, because tone1 register frequency already setup output_tone = 1 elif output_tone == 2: # replace tone 1 frequency with tone 2 frequency registers[0][i] = registers[2][i] registers[1][i] = registers[3][i] registers[7][i] = registers[8][i] elif output_tone == 3: # replace tone 1 frequency with tone 3 frequency registers[0][i] = registers[4][i] registers[1][i] = registers[5][i] registers[7][i] = registers[9][i] else: print("UNHANDLED CASE - output_tone not set") # output ULA data final_volume = registers[7][i] ula_tone = 0 # zero is highest freq. so inaudible, so thats how we handle volume if final_volume == 0: final_tone1 = (registers[1][i] << 4) + registers[0][i] ula_tone = sn_to_electron(final_tone1) electron_data.append( ula_tone ) # write to output ULA file ula_file = open(dst_filename + ".ula.bin", 'wb') ula_file.write(electron_data) ula_file.close() #-------------------------------------------------------------- # Final stage - output to vgm #-------------------------------------------------------------- # Tone1----- Tone2----- Tone3----- Tone4 Vol1 Vol2 Vol3 Vol4 control = [ 0x80, 0x00, 0xa0, 0x00, 0xc0, 0x00, 0xe0, 0x90, 0xb0, 0xd0, 0xf0 ] #filter = [ 0,1,2,3,7,8 ] #filter = [ 2,3,8 ] #filter = [ 0,1,2,3,4,5,6,7,8,9,10 ] filter = [ 0,1,2,3,4,5,7,8,9 ] if ENABLE_DOWNMIX: filter = [ 0,1,7 ] last_tone3 = 255 for i in range(len(registers[0])): # 11 registers per frame # Tone 0 HL Tone 1 HL Tone 2 HL Tone 3 Vol 0123 for r in range(11): register_data = registers[r][i] # dont update noise register unless different update = True if r == 6: if register_data == last_tone3: update = False else: last_tone3 = register_data if not r in filter: update = False if update: register_data |= control[r] vgm_stream.extend( struct.pack('B', 0x50) ) # COMMAND vgm_stream.extend( struct.pack('B', register_data) ) # DATA # next frame if sample_interval == 882: # wait 50 vgm_stream.extend( struct.pack('B', 0x63) ) elif sample_interval == 735: # wait 60 vgm_stream.extend( struct.pack('B', 0x62) ) else: vgm_stream.extend( struct.pack('B', 0x61) ) vgm_stream.extend( struct.pack('B', int(sample_interval % 256)) ) vgm_stream.extend( struct.pack('B', int(sample_interval / 256)) ) # END command vgm_stream.extend( struct.pack('B', 0x66) ) vgm.write_vgm(vgm_stream, dst_filename) #output = bytearray() # write the electron vgm file #open(dst_filename, "wb").write( output ) #------------------------------------------------------------------------ # Main() #------------------------------------------------------------------------ import argparse # Determine if running as a script if __name__ == '__main__': print("Vgm2Electron.py : VGM music converter for Acorn Electron") print("Written in 2019 by Simon Morris, https://github.com/simondotm/vgm-packer") print("") epilog_string = "" parser = argparse.ArgumentParser( formatter_class=argparse.RawDescriptionHelpFormatter, epilog=epilog_string) parser.add_argument("input", help="VGM source file (must be single SN76489 PSG format) [input]") parser.add_argument("-o", "--output", metavar="<output>", help="write VGC file <output> (default is '[input].vgc')") parser.add_argument("-v", "--verbose", help="Enable verbose mode", action="store_true") parser.add_argument("-a", "--attenuation", default="444", metavar="<nnn>", help="Set attenuation threshold for each channel, 3 character string where each character is 0-F and 0 is loudest, 4 is 50%, F is quietest, default: 444") parser.add_argument("-t", "--transpose", default="000", metavar="<nnn>", help="Set octaves to transpose for each channel, where 1 is +1 octave and F is -1 octave.") parser.add_argument("-c", "--channels", default="123", metavar="[1][2][3]", help="Set which channels will be included in the conversion, default 123, which means all 3 channels") parser.add_argument("-q", "--technique", default=2, metavar="<n>", help="Set which downmix technique to use 1 or 2.") args = parser.parse_args() src = args.input dst = args.output if dst == None: dst = os.path.splitext(src)[0] + ".electron.vgm" # attenuation options attenuation = args.attenuation if (len(attenuation) != 3): print("ERROR: attenuation must be 3 values eg. '444'") sys.exit() #print("attenuation=" + attenuation) VgmElectron.ATTENTUATION_THRESHOLD1 = int(attenuation[0],16) VgmElectron.ATTENTUATION_THRESHOLD2 = int(attenuation[1],16) VgmElectron.ATTENTUATION_THRESHOLD3 = int(attenuation[2],16) # transpose options transpose = args.transpose if (len(transpose) != 3): print("ERROR: transpose must be 3 values eg. '000'") sys.exit() #print("transpose=" + transpose) # 0 1 2 3 4 5 6 7 8 9 a b c d e f ttable = [0,1,2,3,4,5,6,7,-8,-7,-6,-5,-4,-3,-2,-1] VgmElectron.TRANSPOSE_OCTAVES1 = ttable[ int(transpose[0],16) ] VgmElectron.TRANSPOSE_OCTAVES2 = ttable[ int(transpose[1],16) ] VgmElectron.TRANSPOSE_OCTAVES3 = ttable[ int(transpose[2],16) ] # channel options print(args.channels) VgmElectron.ENABLE_CHANNEL1 = args.channels.find("1") >= 0 VgmElectron.ENABLE_CHANNEL2 = args.channels.find("2") >= 0 VgmElectron.ENABLE_CHANNEL3 = args.channels.find("3") >= 0 print("Channel 1: Enabled=" + str(VgmElectron.ENABLE_CHANNEL1) + ", Transpose=" + str(VgmElectron.TRANSPOSE_OCTAVES1) + ", Attenuation="+str(VgmElectron.ATTENTUATION_THRESHOLD1)) print("Channel 2: Enabled=" + str(VgmElectron.ENABLE_CHANNEL2) + ", Transpose=" + str(VgmElectron.TRANSPOSE_OCTAVES2) + ", Attenuation="+str(VgmElectron.ATTENTUATION_THRESHOLD2)) print("Channel 3: Enabled=" + str(VgmElectron.ENABLE_CHANNEL3) + ", Transpose=" + str(VgmElectron.TRANSPOSE_OCTAVES3) + ", Attenuation="+str(VgmElectron.ATTENTUATION_THRESHOLD3)) # technique VgmElectron.USE_TECHNIQUE = int(args.technique) print("Using technique " + str(VgmElectron.USE_TECHNIQUE)) # check for missing files if not os.path.isfile(src): print("ERROR: File '" + src + "' not found") sys.exit() packer = VgmElectron() packer.VERBOSE = args.verbose packer.process(src, dst)
31.798354
230
0.60981
18,054
0.778827
0
0
0
0
0
0
10,514
0.453561
c739f9c426d2980ab50d3acc428d5d636d5dd280
14,198
py
Python
twitter_sent.py
rthorst/TwitterSentiment
b719feffbfed1dfe9028db0900b3158d19322284
[ "MIT" ]
6
2020-02-21T15:50:34.000Z
2021-11-09T19:45:50.000Z
twitter_sent.py
rthorst/TwitterSentiment
b719feffbfed1dfe9028db0900b3158d19322284
[ "MIT" ]
null
null
null
twitter_sent.py
rthorst/TwitterSentiment
b719feffbfed1dfe9028db0900b3158d19322284
[ "MIT" ]
null
null
null
import webapp2 import tweepy import json import csv import os import statistics import bokeh from bokeh.io import show, output_file from bokeh.plotting import figure from bokeh.models import HoverTool, ColumnDataSource from bokeh.embed import components, json_item from bokeh.resources import INLINE from bokeh.models.glyphs import Line, Text import numpy as np import random import operator from collections import Counter from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer """ ---AUTHOR: --- Robert Thorstad [email protected] ---LICENSE: --- MIT License. ---ABOUT: --- Application to get the sentiment of recent tweets based on a keyword. Example: keyword -> "taco bell" retrieve 300 recent tweets mentioning taco bell. get average sentiment. plot distribution of tweets and sentiment. plot most informative words for this application. This script runs based on google app server. Expects Python 2.7 Depenencies need to be included in the lib/ directory (pip install -t lib [PACKAGE_NAME]) The main work is done by the MainPage class. The get() method runs the main pipeline of code and returns HTML as a string. Working online version: https://twittersentiment-247018.appspot.com/ """ def get_tweets(keyword, max_tweets=200): """ Given a keyword as a string (e.g. "data science"), get recent tweets matching that string up to # max_tweets. Return a list of tweets, represented as strings. """ # API keys. consumer_key = "kNOG1klRMMUYbsjMuY5TKl4lE" consumer_secret = "ieghv6WI1qseYly43A0Ra1MPksEw1i5Onma0txfEu5aHantD2v" access_key = "3291622062-15ssVc0qpJXf2SFXbA7vgfl1Sooz4Ueo2DGPQVz" access_secret = "9XJuzgGSVLnx93tq6NfRzMT07S6o2lzjmHfjt3VRlkqXn" # Initialize tweepy API object and authorize using API key. auth = tweepy.OAuthHandler(consumer_key, consumer_secret) auth.set_access_token(access_key, access_secret) api = tweepy.API(auth) """ Get tweets.""" alltweets = [] for status in tweepy.Cursor( api.search, q=keyword + " -RT", # the -RT flag excludes retweets. count=1000, result_type="recent", include_entities=True, monitor_rate_limit=True, wait_on_rate_limit=True, lang="en", ).items(): # get text of the tweet, encoding as utf-8. text = str(status.text.encode("utf-8")) # add to the data structure, alltweets, holding the tweets. alltweets.append(text) # if we've reached max_tweets, break. if len(alltweets) >= max_tweets: break return alltweets class VaderSentimentModel: """ Calculate sentiment using a mostly lexicon-based approach that is optimized for social media. Approach is social media aware, for example emoticons are part of the lexicon and tokenization is twitter-sensitive. There are also some basic rules, e.g. it's sensitive to negations. """ def __init__(self): # Initialize a vader_analyzer object which does the work of sentiment analysis. self.vader_analyzer = SentimentIntensityAnalyzer() pass def classify_sentiment(self, tweet): # Classify sentiment of a single tweet. # Input tweet: as string. # Return sentiment score : # range -1 (very negaitve) to +1 (very positive). # score is calculated as p(positive) - p(negative) # normalizing to range from -1 to 1. # calculate sentiment in a dictionary. key is polarity ("pos", "neg", "neut") and value is probability. sentiment_dict = self.vader_analyzer.polarity_scores(tweet) # retrieve the compound sentiment score, which is p(pos) - p(neg), but normalized to range from {-1, 1} score = sentiment_dict["compound"] # compound is the combined score scaled to {-1, 1} return score def plot_tweets(tweets, sentiment_scores): """ Create a histogram-style barplot of tweets and their sentiment. Return a bokeh plot object, expressed as a tuple of (resources, script, div). Where : resources: some CSS, etc. that goes in the head of the webpage for styling the plot. script: javascript for the plot to function. expressed as string. div: html div container for the plot. expressed as string. """ # Sort tweets from negative to positive. # This step is not strictly necessary, but makes it easier to see the overall shape of the data. sorted_indices = np.argsort(sentiment_scores) sentiment_scores = np.array(sentiment_scores)[sorted_indices] tweets = np.array(tweets)[sorted_indices] # Express the data as a bokeh data source object. source = ColumnDataSource(data={ "text": tweets, "sentiment": sentiment_scores, "x": np.arange(len(tweets)), }) """ Create plot. """ # Create plot object. width = 0.9 p = figure(x_axis_label="Tweet", y_axis_label="Sentiment (0 = Neutral)") p.vbar(source=source, x="x", top="sentiment", width=width) # Add hover tool, allowing mouseover to view text and sentiment. hover = HoverTool( tooltips=[ ("text", "@text"), ("sentiment", "@sentiment") ], formatters={ "text": "printf", "sentiment": "printf" }, mode="vline" ) p.add_tools(hover) """ Format plot. """ # axis font size p.xaxis.axis_label_text_font_size = "15pt" p.yaxis.axis_label_text_font_size = "15pt" # remove tick marks from axes p.xaxis.major_tick_line_color = None p.xaxis.minor_tick_line_color = None p.yaxis.major_tick_line_color = None p.yaxis.minor_tick_line_color = None # adjust plot width, height scale = 1.5 p.plot_height = int(250 * scale) p.plot_width = int(450 * scale) # remove toolbar (e.g. move, resize, etc) from right of plot. p.toolbar.logo = None p.toolbar_location = None # remove gridlines p.xgrid.visible = False p.ygrid.visible = False # remove x axis tick labels (done by setting label fontsize to 0 pt) p.xaxis.major_label_text_font_size = '0pt' """ Export plot """ # Create resources string, which is CSS, etc. that goes in the head of resources = INLINE.render() # Get javascript (script) and HTML div (div) for the plot. script, div = components(p) return (resources, script, div) def plot_reason(tweets, sentiment_scores): """ Plot the top words that lead us to the classification as positive or negative. Return: script : javascript for the plot, expressed as string. div : html container for the plot, expressed as string. NOTE: requires the shared resources attribute from plot_tweets() in the HTML header. """ """ Calculate the sentiment of each individual token in the tweets. """ # list tokens, keeping only unique tokens (e.g. remove repeated words). all_toks = [] for tweet in tweets: toks = tweet.lower().split() all_toks.extend(toks) all_toks = [tok for tok in set(all_toks)] # remove duplicates. # calculate sentiment of each token. sm = VaderSentimentModel() toks_sentiment = [sm.classify_sentiment(tok) for tok in all_toks] """ sort tokens by sentiment. if overall valence is negative, sort negative to postitive. if overall valence is positive, sort positive to negative. thus, in any case, the earliest elements in the list are the most informative words. """ nwords = 20 # negative? sort neg -> positive. if np.mean(sentiment_scores) < 0: sorted_indices = np.argsort(toks_sentiment) # else (positive)? sort positive -> negative else: sorted_indices = np.argsort(toks_sentiment)[::-1] # toks_to_plot: shape (nwords, ) list of informative tokens. # sentiment_to_plot: shape (nwords, ) list of sentiment of these tokens. toks_to_plot = np.array(all_toks)[sorted_indices][:nwords] sentiment_to_plot = np.array(toks_sentiment)[sorted_indices][:nwords] # convert all sentiment scores to positive values. # this is for DISPLAY only, to make all plots go from left to right. # we still retain the correct tokens and sorting order. sentiment_to_plot = np.array([abs(v) for v in sentiment_to_plot]) """ Set up plot. - create data source object. - define formatting variables. """ text_offset = 0.1 source = ColumnDataSource(data={ "token": toks_to_plot, "sentiment": sentiment_to_plot, "x": np.arange(len(toks_to_plot))[::-1], "label_x": sentiment_to_plot + text_offset }) """ Make plot. """ # Create initial plot. width = 0.9 xrange = [0, max(sentiment_to_plot) + 1] p2 = figure(x_axis_label="Sentiment", y_axis_label="Word", x_range=xrange) p2.hbar(source=source, y="x", right="sentiment", height=width) """ Format plot. """ # Annotate each bar with the word being represented. glyph = Text(x="label_x", y="x", text="token") p2.add_glyph(source, glyph) # Axis labels. p2.xaxis.axis_label_text_font_size = "15pt" p2.yaxis.axis_label_text_font_size = "15pt" # Remove ticks. p2.xaxis.major_tick_line_color = None p2.xaxis.minor_tick_line_color = None p2.yaxis.major_tick_line_color = None p2.yaxis.minor_tick_line_color = None # Remove y axis tick labels. p2.yaxis.major_label_text_font_size = '0pt' # Plot width, height. scale = 1.5 p2.plot_height = int(250 * scale) p2.plot_width = int(250 * scale) # remove toolbar (e.g. move, resize, etc) from right of plot. p2.toolbar.logo = None p2.toolbar_location = None # remove gridlines p2.xgrid.visible = False p2.ygrid.visible = False # remove x axis tick labels (set font to 0pt) p2.xaxis.major_label_text_font_size = '0pt' # get bokeh component for plot 2. script2, div2 = components(p2) return (script2, div2) class MainPage(webapp2.RequestHandler): """ This class does the work of writing HTML to the google app server. Thus, we allow the get() method to incorporate: our main pipeline (getting tweets, analyzing sentiment, producing graphs) writing html """ def get(self): """ Get tweets and sentiment scores. """ # Retrieve keyword from the HTML form. If no keyword provided, use a random suggested keyword. keyword = self.request.get("keyword") if not keyword: suggested_keywords = ["alarm clocks", "the future", "miller lite", "taco bell", "yoga", "netflix", "life", "traffic", "elon musk", "beards", "world trade", "pepsi", "amazon"] indices = np.arange(len(suggested_keywords)) random.shuffle(indices) keyword = suggested_keywords[indices[0]] # Get recent tweets based on the keyword, up to 300 maximum tweets. tweets = get_tweets(keyword, max_tweets=300) # Compute the sentiment of each tweet. v = VaderSentimentModel() sentiment_scores = [v.classify_sentiment(tw) for tw in tweets] # shape (ntweets,) # Label sentiment categorically, e.g. "negative" or "positive" M_sent = np.mean(sentiment_scores) map = {1 : "positive", 0 : "negative"} valence = map[int(M_sent > 0)] """ Create plots. """ ############# # Plot #1: ############ # Plot the distribution of tweets and sentiment. # Resources is CSS code that goes in the header of the HTML. Shared across all bokeh plots. # Script1 is javascript for this plot. # Div1 is an HTML container for the plot. Goes where you want the plot to appear. resources, script1, div1 = plot_tweets(tweets=tweets, sentiment_scores=sentiment_scores) ############# # Plot #2: ############ # Plot the key words that lead us to this classification. # Script2 is javascript for this plot. # Div2 is an HTML container for this plot. Goes where you want the plot to appear. # Requires the HTML to include the shared resources, generated above, in the <HEAD> script2, div2 = plot_reason(tweets=tweets, sentiment_scores=sentiment_scores) """ Create HTML output. """ # Load HTML template. # This is a functioning webpage, with some placeholders for the keywords and plots we have created. html_p = os.path.join("html", "index.html") html = open(html_p, "r").read() # Fill in placeholders in the HTML with varibles we have created. term_to_value = { "[[!KEYWORD]]" : keyword, "[[!VALENCE]]" : valence, "[[!BOKEH_SCRIPT]]" : script1, "[[!BOKEH_SCRIPT2]]": script2, "[[!BOKEH_DIV]]" : div1, "[[!BOKEH_RESOURCES]]" : resources, "[[!BOKEH_DIV2]]" : div2 } for term, val in term_to_value.items(): html = html.replace(term, val) """ Write a response. This essentially returns HTML to the google app engine. This will render a webpage visible to the user. """ self.response.headers["Content-Type"] = "text/html" self.response.write(html) # Run application. routes = [('/', MainPage)] my_app = webapp2.WSGIApplication(routes, debug=True)
33.885442
120
0.623257
4,656
0.327934
0
0
0
0
0
0
7,355
0.518031
c73a657eabaaa5580cd95fd8f430b160b1e8e216
8,956
py
Python
tests/testcgatools.py
ereide/pyga-camcal
fd25748ddb11c5b05ef24a2deca2689e0d899875
[ "MIT" ]
5
2018-05-22T09:11:31.000Z
2022-03-11T02:32:01.000Z
tests/testcgatools.py
ereide/pyga-camcal
fd25748ddb11c5b05ef24a2deca2689e0d899875
[ "MIT" ]
null
null
null
tests/testcgatools.py
ereide/pyga-camcal
fd25748ddb11c5b05ef24a2deca2689e0d899875
[ "MIT" ]
null
null
null
import unittest import clifford as cl from clifford import g3c from numpy import pi, e import numpy as np from scipy.sparse.linalg.matfuncs import _sinch as sinch from clifford import MultiVector from pygacal.common.cgatools import ( Sandwich, Dilator, Translator, Reflector, inversion, Rotor, Transversor, I3, I5, VectorEquality, Distance, ga_log, ga_exp, MVEqual, Meet, extractBivectorParameters_complicated, ga_exp_complicated, one) from pygacal.geometry import createRandomBivector, createRandomVector, createRandomPoints from pygacal.geometry.lines import createLine from pygacal.geometry.planes import createPlane layout = g3c.layout locals().update(g3c.blades) ep, en, up, down, homo, E0, ninf, no = (g3c.stuff["ep"], g3c.stuff["en"], g3c.stuff["up"], g3c.stuff["down"], g3c.stuff["homo"], g3c.stuff["E0"], g3c.stuff["einf"], -g3c.stuff["eo"]) np.random.seed(2512) def AssertMVEqual(actual, expected, rtol = 1e-5, atol = 1e-6, verbose = False): assert(MVEqual(actual, expected, rtol, atol, verbose)) def AssertMVUnEqual(actual, expected, rtol = 1e-5, atol = 1e-6, verbose = False): assert(not MVEqual(actual, expected, rtol, atol, verbose)) class TestCGAOperators(unittest.TestCase): def testDilator(self): x = 2*e1 + 3* e2 + 4*e3 X = up(x) assert(down(Sandwich(X, Dilator(0.1))) == x * 0.1) def testTranslation(self): x = 2*e1 + 3* e2 + 4*e3 X = up(x) a = 2 * e1 + e3 assert(down(Sandwich(X, Translator(a))) == x + a) def testRotation(self): x = 2*e1 + 3* e2 + 4*e3 X = up(x) actual = down(Sandwich(X, Rotor(e12, pi/2))) expected = (-3.0)*e1 + 2.0*e2 + 4.0 * e3 assert(actual == expected) def testInversion(self): x = 2*e1 + 3* e2 + 4*e3 X = up(x) assert(down(inversion(X)) * x == 1) def testDistance(self): a = e1 b = e2 A, B = up(a), up(b) assert(Distance(A, B) == np.sqrt(2)) def testMeet(self): A, B, C, D = createRandomPoints(N = 4, scale = 50) L = createLine(A, B) L2 = createLine(A, C) P1 = createPlane(A, B, C) P2 = createPlane(A, B, D) L_actual = Meet(P1, P2) assert(MVEqual(L, L_actual)) #Plane to line Q = (ninf ^ A).normal() P3 = A ^ C ^ D ^ ninf Q_actual = Meet(P3, L).normal() #How do we define order/direction? assert(MVEqual(Q, Q_actual)) def testAssertEqual(self): verbose = False a = createRandomBivector() b = a + 0.01 a2 = b - 0.01 c = a + 1 d = c - a AssertMVEqual(a, a2, verbose = verbose) AssertMVUnEqual(a, b, verbose = verbose) AssertMVEqual(d, 1, verbose = verbose) def testLogarithm(self): verbose = False if verbose: print("\nTest Logarithms and exponents") phi = 0.5 #Rotation amount P = (e12 + 2*e23 + 3*e13).normal() #Rotation Plane P_n = P*I3 t = 2.73 * e1 + 3.14*e2 #Translation vector t_nor = (P_n | t) * P_n #Decomposition into normal component t_par = t - t_nor #Decomposition into paralel component assert(t_par + t_nor == t) if verbose: print("P = ", P) print("phi = ", phi) print("t = ", t) print("t_nor = ", t_nor) print("t_par = ", t_par) print("") assert(P|t_nor == 0) #Normal to P assert(P^t_nor != 0) #Normal to P assert(P|t_par != 0) #Parallel to P assert(P^t_par == 0) #Parallel to P assert(P*t != 0) #Non zero product R_expected = (np.cos(phi) + (np.sin(phi) * P))*(1 + (t_nor*ninf)) + np.sinc(phi/np.pi)*t_par * ninf B_expected = phi * P + t*ninf R_exponential = np.exp(B_expected) R_actual = ga_exp(B_expected, verbose = verbose) B_new = ga_log(R_expected, verbose = verbose) R_ga = ga_exp(B_new) if verbose: print("R_old ", R_expected) print("R_expected ", R_actual) print("R_exponential", R_exponential) print("R_ga ", R_ga) print("B_new ", B_new) print("B_expected ", B_expected) #Rotor properties AssertMVEqual(R_expected * ~R_expected, 1, verbose = verbose) AssertMVEqual(R_ga * ~R_ga, 1, verbose = verbose) #Equalities AssertMVEqual(R_actual, R_expected, verbose = verbose) AssertMVEqual(R_exponential, R_expected, verbose = verbose) AssertMVEqual(B_new, B_expected, verbose = verbose) AssertMVEqual(R_ga, R_expected, verbose = verbose) N = 100 #Random bivectors to test this as well for i in range(N): B = createRandomBivector() AssertMVEqual(B, ga_log(ga_exp(B, verbose = verbose), verbose = verbose), verbose = verbose) def testComplicatedLogarithm(self): verbose = True if verbose: print("\nTest Complicated Logarithms and exponents") phi = 0.2 #Rotation amount P = (e12 + 2*e23 + 3*e13).normal() #Rotation Plane P_n = P*I3 #t = 0 t = 2.73 * e1 + 3.14*e2 #Translation vector t_nor = (P_n | t) * P_n #Decomposition into normal component t_par = t - t_nor #Decomposition into paralel component omega = 0.1 assert(t_par + t_nor == t) if verbose: print("P = ", P) print("phi = ", phi) print("t = ", t) print("t_nor = ", t_nor) print("t_par = ", t_par) print("omega = ", omega) print("") """ assert(P|t_nor == 0) #Normal to P assert(P^t_nor != 0) #Normal to P assert(P|t_par != 0) #Parallel to P assert(P^t_par == 0) #Parallel to P assert(P*t != 0) #Non zero product assert(t_par|t_nor == 0) #Non zero product """ B_expected = (phi * P) + (t*ninf) + (omega * E0) k = (omega * omega + phi * phi) R_expected = (np.cos(phi) + np.sin(phi) * P)*(np.cosh(omega) + np.sinh(omega) * E0 + sinch(omega) * t_nor*ninf) if (k > 0): R_expected += 1/k* ( (-omega * np.sin(phi) * np.cosh(omega) + phi * np.cos(phi) * np.sinh(omega)) * P + ( omega * np.cos(phi) * np.sinh(omega) + phi * np.sin(phi) * np.cosh(omega))) * t_par * ninf else: R_expected += t_par * ninf phi_test, P_test, t_nor_test, t_par_test, omega_test = extractBivectorParameters_complicated(B_expected) B_actual = phi_test * P_test + (t_nor_test + t_par_test)*ninf + omega_test * E0 #Testing some basic properties of the extraction AssertMVEqual(phi*(P * ~P), phi*one, verbose = False) AssertMVEqual(phi*P, phi*P_test, verbose = False) R_exponential = np.exp(B_expected) R_actual = ga_exp_complicated(B_expected, verbose = verbose) #B_new = ga_log(R_expected, verbose = verbose) #R_ga = ga_exp(B_new) if verbose: print("R_expected ", R_expected) print("R_actual ", R_actual) print("R_exponential ", R_exponential) #print("R_ga ", R_ga) #print("B_new ", B_new) print("B_expected ", B_expected) print() #BivectorExtraction AssertMVEqual(B_actual, B_expected, verbose = verbose) AssertMVEqual(R_expected * ~R_expected, one, verbose = verbose) #Rotor properties AssertMVEqual(R_actual * ~R_actual, one, verbose = verbose) #Only an approximation AssertMVEqual(R_exponential * ~R_exponential, one, verbose = verbose) #AssertMVEqual(R_expected * ~R_expected, 1, verbose = verbose) #AssertMVEqual(R_ga * ~R_ga, 1, verbose = verbose) #Equalities #AssertMVEqual(R_actual, R_expected, verbose = verbose) AssertMVEqual(R_exponential, R_actual, rtol = 1e-2, atol = 1e-3, verbose = verbose) #AssertMVEqual(B_new, B_expected, verbose = verbose) #AssertMVEqual(R_ga, R_expected, verbose = verbose) #N = 100 #Random bivectors to test this as well #for i in range(N): # B = createRandomBivector() # AssertMVEqual(B, ga_log(ga_exp(B, verbose = verbose), verbose = verbose), verbose = verbose) if __name__ == "__main__": unittest.main()
33.17037
125
0.546226
7,571
0.845355
0
0
0
0
0
0
1,861
0.207794
c73c3d02ecdfac6eb2c791e1853c9f4bcf52f552
6,909
py
Python
router/posts.py
DiegoLing33/prestij.xyz-api
69a11a2c93dd98975f9becbc4b8f596e4941a05f
[ "MIT" ]
null
null
null
router/posts.py
DiegoLing33/prestij.xyz-api
69a11a2c93dd98975f9becbc4b8f596e4941a05f
[ "MIT" ]
null
null
null
router/posts.py
DiegoLing33/prestij.xyz-api
69a11a2c93dd98975f9becbc4b8f596e4941a05f
[ "MIT" ]
null
null
null
# ██╗░░░░░██╗███╗░░██╗░██████╗░░░░██████╗░██╗░░░░░░█████╗░░█████╗░██╗░░██╗ # ██║░░░░░██║████╗░██║██╔════╝░░░░██╔══██╗██║░░░░░██╔══██╗██╔══██╗██║░██╔╝ # ██║░░░░░██║██╔██╗██║██║░░██╗░░░░██████╦╝██║░░░░░███████║██║░░╚═╝█████═╝░ # ██║░░░░░██║██║╚████║██║░░╚██╗░░░██╔══██╗██║░░░░░██╔══██║██║░░██╗██╔═██╗░ # ███████╗██║██║░╚███║╚██████╔╝░░░██████╦╝███████╗██║░░██║╚█████╔╝██║░╚██╗ # ╚══════╝╚═╝╚═╝░░╚══╝░╚═════╝░░░░╚═════╝░╚══════╝╚═╝░░╚═╝░╚════╝░╚═╝░░╚═╝ # # Developed by Yakov V. Panov (C) Ling • Black 2020 # @site http://ling.black # ██╗░░░░░██╗███╗░░██╗░██████╗░░░░██████╗░██╗░░░░░░█████╗░░█████╗░██╗░░██╗ # ██║░░░░░██║████╗░██║██╔════╝░░░░██╔══██╗██║░░░░░██╔══██╗██╔══██╗██║░██╔╝ # ██║░░░░░██║██╔██╗██║██║░░██╗░░░░██████╦╝██║░░░░░███████║██║░░╚═╝█████═╝░ # ██║░░░░░██║██║╚████║██║░░╚██╗░░░██╔══██╗██║░░░░░██╔══██║██║░░██╗██╔═██╗░ # ███████╗██║██║░╚███║╚██████╔╝░░░██████╦╝███████╗██║░░██║╚█████╔╝██║░╚██╗ # ╚══════╝╚═╝╚═╝░░╚══╝░╚═════╝░░░░╚═════╝░╚══════╝╚═╝░░╚═╝░╚════╝░╚═╝░░╚═╝ # # Developed by Yakov V. Panov (C) Ling • Black 2020 # @site http://ling.black from typing import List from fastapi import APIRouter, Depends, HTTPException from pydantic import BaseModel from core.response import RequestLimit from database import get_db, DatabaseUtils from database.wow.models import PostModel, PostCommentsModel from wow.interface.entity import PostCategory, Post, PostCategoryCreate, PostCreate, PostLikeCreate, PostCommentCreate from wow.utils.posts import PostsUtils from wow.utils.users import BlizzardUsersUtils router = APIRouter() class TokenArgs(BaseModel): token: str class TokenPostIdArgs(BaseModel): token: str post_id: int class CommentIdAndToken(TokenArgs): comment_id: int class PostAPIList(BaseModel): items: List[Post] count: int class PostAPIListResponse(BaseModel): response: PostAPIList request: RequestLimit # ----------------------------------- # CATEGORIES # ----------------------------------- @router.post( "/categories", response_model=PostCategory, summary='Adds the category' ) def add_category(body: PostCategoryCreate): """ Adds the category :param body: :return: """ blizzard_id = BlizzardUsersUtils.id__safe(body.token) return PostsUtils.add_category(user_id=blizzard_id, url=body.url, title=body.title) @router.get( "/categories", response_model=List[PostCategory], summary='Returns the categories' ) def get_categories(): """ Returns the categories list :return: """ return PostsUtils.get_categories() # ----------------------------------- # POSTS # ----------------------------------- @router.get( "/", response_model=PostAPIListResponse, summary='Returns all the posts' ) def get_posts_all(limit: int = 100, offset: int = 0): return PostsUtils.get_posts_limit( limit=limit, offset=offset ) @router.get( "/category/{category_url}", response_model=PostAPIListResponse, summary='Returns the posts in category' ) def get_posts_all(category_url: int, limit: int = 100, offset: int = 0): """ Returns all the posts by category :param category_url: :param limit: :param offset: :return: """ return PostsUtils.get_posts_by_category_limit( category_id=category_url, limit=limit, offset=offset ) @router.get( "/user/{blizzard_id}", response_model=PostAPIListResponse, summary='Returns the posts by users' ) def get_posts_all(blizzard_id: int, limit: int = 100, offset: int = 0): """ Returns all the posts by category :param blizzard_id: :param limit: :param offset: :return: """ return PostsUtils.get_posts_by_blizzard_id( blizzard_id=blizzard_id, limit=limit, offset=offset ) @router.post( "/like", summary='Likes the post', tags=['Лайки'] ) def like_post(body: PostLikeCreate): blizzard_id = BlizzardUsersUtils.id__safe(body.token) return PostsUtils.add_like( user_id=blizzard_id, post_id=body.post_id, ) @router.post( "/unlike", summary='Unlikes the post', tags=['Лайки'] ) def like_post(body: PostLikeCreate): blizzard_id = BlizzardUsersUtils.id__safe(body.token) return PostsUtils.remove_like( user_id=blizzard_id, post_id=body.post_id, ) @router.post( "/comment", summary='Adds the comment', tags=['Комментарии'] ) def like_post(body: PostCommentCreate): blizzard_id = BlizzardUsersUtils.id__safe(body.token) return PostsUtils.add_comment( user_id=blizzard_id, post_id=body.post_id, reply_id=body.reply_id, text=body.text, ) @router.delete( "/comment", summary='Removes the comment', tags=['Комментарии'] ) def removes_post(body: CommentIdAndToken, db=Depends(get_db)): blizzard_id = BlizzardUsersUtils.id__safe(body.token) com = db.query(PostCommentsModel).filter(PostCommentsModel.id == body.comment_id).filter( PostCommentsModel.user_id == blizzard_id) if com.count() > 0: com.delete() db.commit() return True return False @router.post( "/", response_model=Post, summary='Adds the post' ) def add_post(body: PostCreate): """ Adds the post item :param body: :return: """ blizzard_id = BlizzardUsersUtils.id__safe(body.token) return PostsUtils.add_post( user_id=blizzard_id, category_id=body.category_id, title=body.title, content=body.content, tags=body.tags, image=body.image ) @router.delete( "/{post_id}", summary='Deletes the post' ) def delete_post(post_id: int, body: TokenArgs, db=Depends(get_db)): blizzard_id = BlizzardUsersUtils.id__safe(body.token) q = db.query(PostModel).filter(PostModel.id == post_id).filter(PostModel.user_id == blizzard_id) if q.count() == 0: raise HTTPException(status_code=404, detail='Post is undefined') return DatabaseUtils.remove_query(db, q) @router.post( "/{post_id}", summary='Edits the post' ) def edit_post(post_id: int, body: PostCreate, db=Depends(get_db)): blizzard_id = BlizzardUsersUtils.id__safe(body.token) q = db.query(PostModel).filter(PostModel.id == post_id).filter(PostModel.user_id == blizzard_id) if q.count() == 0: raise HTTPException(status_code=404, detail='Post is undefined') q.update({ 'title': body.title, 'content': body.content, 'category_id': body.category_id, 'image': body.image, 'tags': body.tags, }) db.commit() return True @router.get( "/{post_id}", response_model=Post, summary='Returns the post' ) def get_post(post_id: int, db=Depends(get_db)): return db.query(PostModel).filter(PostModel.id == post_id).first()
25.876404
118
0.568823
317
0.03655
0
0
4,813
0.554941
0
0
3,981
0.459011
c73c5c8e9b60dd28827b865f9cd0c2682cc0cd16
3,216
py
Python
toontown/catalog/CatalogChatBalloon.py
CrankySupertoon01/Toontown-2
60893d104528a8e7eb4aced5d0015f22e203466d
[ "MIT" ]
1
2021-02-13T22:40:50.000Z
2021-02-13T22:40:50.000Z
toontown/catalog/CatalogChatBalloon.py
CrankySupertoonArchive/Toontown-2
60893d104528a8e7eb4aced5d0015f22e203466d
[ "MIT" ]
1
2018-07-28T20:07:04.000Z
2018-07-30T18:28:34.000Z
toontown/catalog/CatalogChatBalloon.py
CrankySupertoonArchive/Toontown-2
60893d104528a8e7eb4aced5d0015f22e203466d
[ "MIT" ]
2
2019-12-02T01:39:10.000Z
2021-02-13T22:41:00.000Z
from pandac.PandaModules import * class CatalogChatBalloon: TEXT_SHIFT = (0.1, -0.05, 1.1) TEXT_SHIFT_REVERSED = -0.05 TEXT_SHIFT_PROP = 0.08 NATIVE_WIDTH = 10.0 MIN_WIDTH = 2.5 MIN_HEIGHT = 1 BUBBLE_PADDING = 0.3 BUBBLE_PADDING_PROP = 0.05 BUTTON_SCALE = 6 BUTTON_SHIFT = (-0.2, 0, 0.6) FRAME_SHIFT = (0.2, 1.4) def __init__(self, model): self.model = model def generate(self, text, font, textColor=(0,0,0,1), balloonColor=(1,1,1,1), wordWrap = 10.0, button=None, reversed=False): root = NodePath('balloon') # Add balloon geometry: balloon = self.model.copyTo(root) top = balloon.find('**/top') middle = balloon.find('**/middle') bottom = balloon.find('**/bottom') balloon.setColor(balloonColor) if balloonColor[3] < 1.0: balloon.setTransparency(1) # Render the text into a TextNode, using the font: t = root.attachNewNode(TextNode('text')) t.node().setFont(font) t.node().setWordwrap(wordWrap) t.node().setText(text) t.node().setTextColor(textColor) width, height = t.node().getWidth(), t.node().getHeight() # Turn off depth write for the text: The place in the depth buffer is # held by the chat bubble anyway, and the text renders after the bubble # so there's no risk of the bubble overwriting the text's pixels. t.setAttrib(DepthWriteAttrib.make(0)) t.setPos(self.TEXT_SHIFT) t.setX(t, self.TEXT_SHIFT_PROP*width) t.setZ(t, height) if reversed: # The nametag code wants the text on the left side of the axis, # rather than on the right side. Therefore, we move the text to the # opposite side: t.setX(self.TEXT_SHIFT_REVERSED - self.TEXT_SHIFT_PROP*width - width) # Give the chat bubble a button, if one is requested: if button: np = button.copyTo(root) np.setPos(t, width, 0, -height) np.setPos(np, self.BUTTON_SHIFT) np.setScale(self.BUTTON_SCALE) # Set a minimum width and height for short or empty messages if width < self.MIN_WIDTH: width = self.MIN_WIDTH if reversed: t.setX(t, -width/2.0) else: t.setX(t, width/2.0) t.node().setAlign(TextNode.ACenter) if height < self.MIN_HEIGHT: height = self.MIN_HEIGHT t.setX(t, height/2.0) t.node().setAlign(TextNode.ACenter) # Set the balloon's size: width *= 1+self.BUBBLE_PADDING_PROP width += self.BUBBLE_PADDING balloon.setSx(width/self.NATIVE_WIDTH) if reversed: balloon.setSx(-balloon.getSx()) balloon.setTwoSided(True) # Render the backface of the balloon middle.setSz(height) top.setZ(top, height-1) # Calculate the frame occupied by the balloon: left, bottom = self.FRAME_SHIFT if reversed: left = -left - width frame = (left, left+width, bottom, bottom+height+1) return root, frame
34.212766
81
0.589552
3,180
0.988806
0
0
0
0
0
0
689
0.214241
c73c9cd86a4a585bb09b4cbd3f15cf16c3ddc42d
831
py
Python
TTS/vocoder/tf/utils/io.py
mightmay/Mien-TTS
8a22ff0a79558b3cf4981ce1b63f4d1485ea6338
[ "MIT" ]
null
null
null
TTS/vocoder/tf/utils/io.py
mightmay/Mien-TTS
8a22ff0a79558b3cf4981ce1b63f4d1485ea6338
[ "MIT" ]
null
null
null
TTS/vocoder/tf/utils/io.py
mightmay/Mien-TTS
8a22ff0a79558b3cf4981ce1b63f4d1485ea6338
[ "MIT" ]
1
2021-04-28T17:30:03.000Z
2021-04-28T17:30:03.000Z
import datetime import pickle import tensorflow as tf def save_checkpoint(model, current_step, epoch, output_path, **kwargs): """ Save TF Vocoder model """ state = { 'model': model.weights, 'step': current_step, 'epoch': epoch, 'date': datetime.date.today().strftime("%B %d, %Y"), } state.update(kwargs) pickle.dump(state, open(output_path, 'wb')) def load_checkpoint(model, checkpoint_path): """ Load TF Vocoder model """ checkpoint = pickle.load(open(checkpoint_path, 'rb')) chkp_var_dict = {var.name: var.numpy() for var in checkpoint['model']} tf_vars = model.weights for tf_var in tf_vars: layer_name = tf_var.name chkp_var_value = chkp_var_dict[layer_name] tf.keras.backend.set_value(tf_var, chkp_var_value) return model
29.678571
74
0.65704
0
0
0
0
0
0
0
0
110
0.132371
c73caaa0e2719e60ad785aecaaee84cf63518c02
1,497
py
Python
tests/test_path_choice.py
jataware/flee
67c00c4572e71dd2bbfb390d7d7ede13ffb9594e
[ "BSD-3-Clause" ]
3
2021-05-24T14:07:48.000Z
2022-01-10T03:20:36.000Z
tests/test_path_choice.py
jataware/flee
67c00c4572e71dd2bbfb390d7d7ede13ffb9594e
[ "BSD-3-Clause" ]
15
2020-06-05T11:42:23.000Z
2022-03-09T20:17:29.000Z
tests/test_path_choice.py
jataware/flee
67c00c4572e71dd2bbfb390d7d7ede13ffb9594e
[ "BSD-3-Clause" ]
3
2020-05-29T15:10:28.000Z
2022-03-09T19:51:41.000Z
from flee import flee """ Generation 1 code. Incorporates only distance, travel always takes one day. """ def test_path_choice(): print("Testing basic data handling and simulation kernel.") flee.SimulationSettings.MinMoveSpeed = 5000.0 flee.SimulationSettings.MaxMoveSpeed = 5000.0 flee.SimulationSettings.MaxWalkSpeed = 5000.0 e = flee.Ecosystem() l1 = e.addLocation(name="A", movechance=1.0) _ = e.addLocation(name="B", movechance=1.0) _ = e.addLocation(name="C1", movechance=1.0) _ = e.addLocation(name="C2", movechance=1.0) _ = e.addLocation(name="D1", movechance=1.0) _ = e.addLocation(name="D2", movechance=1.0) _ = e.addLocation(name="D3", movechance=1.0) # l2 = e.addLocation(name="B", movechance=1.0) # l3 = e.addLocation(name="C1", movechance=1.0) # l4 = e.addLocation(name="C2", movechance=1.0) # l5 = e.addLocation(name="D1", movechance=1.0) # l6 = e.addLocation(name="D2", movechance=1.0) # l7 = e.addLocation(name="D3", movechance=1.0) e.linkUp(endpoint1="A", endpoint2="B", distance=10.0) e.linkUp(endpoint1="A", endpoint2="C1", distance=10.0) e.linkUp(endpoint1="A", endpoint2="D1", distance=10.0) e.linkUp(endpoint1="C1", endpoint2="C2", distance=10.0) e.linkUp(endpoint1="D1", endpoint2="D2", distance=10.0) e.linkUp(endpoint1="D2", endpoint2="D3", distance=10.0) e.addAgent(location=l1) print("Test successful!") if __name__ == "__main__": test_path_choice()
33.266667
75
0.663327
0
0
0
0
0
0
0
0
514
0.343353
c73dae2399d233b79b4e4ba84ebee8f7d71a6c22
10,463
py
Python
archive/old_plots/plot_supplemental_divergence_correlations.py
garudlab/mother_infant
98a27c83bf5ece9497d5a030c6c9396a8c514781
[ "BSD-2-Clause" ]
2
2020-08-09T06:19:11.000Z
2021-08-18T17:12:23.000Z
archive/old_plots/plot_supplemental_divergence_correlations.py
garudlab/mother_infant
98a27c83bf5ece9497d5a030c6c9396a8c514781
[ "BSD-2-Clause" ]
null
null
null
archive/old_plots/plot_supplemental_divergence_correlations.py
garudlab/mother_infant
98a27c83bf5ece9497d5a030c6c9396a8c514781
[ "BSD-2-Clause" ]
8
2019-02-20T22:21:55.000Z
2021-02-13T00:55:40.000Z
import matplotlib matplotlib.use('Agg') import config import parse_midas_data import parse_HMP_data import os.path import pylab import sys import numpy import diversity_utils import gene_diversity_utils import calculate_substitution_rates import stats_utils import matplotlib.colors as colors import matplotlib.cm as cmx from math import log10,ceil import matplotlib as mpl import matplotlib.pyplot as plt import matplotlib.gridspec as gridspec from numpy.random import randint from scipy.cluster.hierarchy import dendrogram, linkage from scipy.cluster.hierarchy import cophenet from scipy.cluster.hierarchy import fcluster from scipy.stats import gaussian_kde mpl.rcParams['font.size'] = 6 mpl.rcParams['lines.linewidth'] = 0.5 mpl.rcParams['legend.frameon'] = False mpl.rcParams['legend.fontsize'] = 'small' ################################################################################ # # Standard header to read in argument information # ################################################################################ import argparse parser = argparse.ArgumentParser() parser.add_argument("--debug", help="Loads only a subset of SNPs for speed", action="store_true") parser.add_argument("--chunk-size", type=int, help="max number of records to load", default=1000000000) args = parser.parse_args() debug = args.debug chunk_size = args.chunk_size ################################################################################ good_species_list = ['Bacteroides_vulgatus_57955', 'Bacteroides_uniformis_57318', 'Alistipes_putredinis_61533'] #################################################### # # Set up Figure (3 panels, arranged in 1x3 grid) # #################################################### pylab.figure(1,figsize=(7,1.5)) fig = pylab.gcf() # make three panels panels outer_grid = gridspec.GridSpec(1,3,width_ratios=[1,1,1],wspace=0.1) ####### # # SNP divergence vs Gene divergence in B. vulgatus # ####### gene_axis = plt.Subplot(fig, outer_grid[0]) fig.add_subplot(gene_axis) gene_axis.set_ylabel('SNP divergence\n %s' % (good_species_list[0])) gene_axis.set_xlabel('Gene divergence\n %s' % (good_species_list[0])) gene_axis.set_ylim([1e-06,1e-01]) #gene_axis.set_xlim([1e-02,1]) gene_axis.spines['top'].set_visible(False) gene_axis.spines['right'].set_visible(False) gene_axis.get_xaxis().tick_bottom() gene_axis.get_yaxis().tick_left() ####### # # SNP divergence (B vulgatus) vs SNP divergence (A putredinis) # ####### species_axis_1 = plt.Subplot(fig, outer_grid[1]) fig.add_subplot(species_axis_1) species_axis_1.set_xlabel('SNP divergence\n %s' % (good_species_list[1])) species_axis_1.set_ylim([1e-06,1e-01]) species_axis_1.set_xlim([1e-06,1e-01]) species_axis_1.spines['top'].set_visible(False) species_axis_1.spines['right'].set_visible(False) species_axis_1.get_xaxis().tick_bottom() species_axis_1.get_yaxis().tick_left() ####### # # SNP divergence (B vulgatus) vs SNP divergence (A putredinis) # ####### species_axis_2 = plt.Subplot(fig, outer_grid[2]) fig.add_subplot(species_axis_2) species_axis_2.set_xlabel('SNP divergence\n %s' % (good_species_list[2])) species_axis_2.set_ylim([1e-06,1e-01]) species_axis_2.set_xlim([1e-06,1e-01]) species_axis_2.spines['top'].set_visible(False) species_axis_2.spines['right'].set_visible(False) species_axis_2.get_xaxis().tick_bottom() species_axis_2.get_yaxis().tick_left() ######## # # Now do calculation and plot figures # ######## sys.stderr.write("Loading sample metadata...\n") subject_sample_map = parse_HMP_data.parse_subject_sample_map() sample_order_map = parse_HMP_data.parse_sample_order_map() sys.stderr.write("Done!\n") snp_divergence_map = {species_name: {} for species_name in good_species_list} gene_divergence_map = {species_name: {} for species_name in good_species_list} for species_name in good_species_list: sys.stderr.write("Loading haploid samples...\n") snp_samples = diversity_utils.calculate_haploid_samples(species_name, debug=debug) sys.stderr.write("Calculating unique samples...\n") # Only consider one sample per person snp_samples = snp_samples[parse_midas_data.calculate_unique_samples(subject_sample_map, sample_list=snp_samples)] sys.stderr.write("Loading pre-computed substitution rates for %s...\n" % species_name) substitution_rate_map = calculate_substitution_rates.load_substitution_rate_map(species_name) sys.stderr.write("Calculating snp matrix...\n") dummy_samples, snp_difference_matrix, snp_opportunity_matrix = calculate_substitution_rates.calculate_matrices_from_substitution_rate_map(substitution_rate_map, 'core', allowed_samples=snp_samples) snp_samples = dummy_samples sys.stderr.write("Done!\n") sys.stderr.write("Calculating gene matrix...\n") gene_samples, gene_difference_matrix, gene_opportunity_matrix = calculate_substitution_rates.calculate_matrices_from_substitution_rate_map(substitution_rate_map, 'genes', allowed_samples=snp_samples) snp_samples = gene_samples sys.stderr.write("Done!\n") # Focus on the subset of samples that have sufficient gene depth and snp depth desired_samples = gene_samples # Figure out which pairs of indices in desired_samples belong to diff subjects desired_same_sample_idxs, desired_same_subject_idxs, desired_diff_subject_idxs = parse_midas_data.calculate_subject_pairs( subject_sample_map, desired_samples) # Turn these into indices for snp and gene matrices snp_sample_idx_map = parse_midas_data.calculate_sample_idx_map(desired_samples, snp_samples) gene_sample_idx_map = parse_midas_data.calculate_sample_idx_map(desired_samples, gene_samples) same_subject_snp_idxs = parse_midas_data.apply_sample_index_map_to_indices(snp_sample_idx_map, desired_same_subject_idxs) same_subject_gene_idxs = parse_midas_data.apply_sample_index_map_to_indices(gene_sample_idx_map, desired_same_subject_idxs) diff_subject_snp_idxs = parse_midas_data.apply_sample_index_map_to_indices(snp_sample_idx_map, desired_diff_subject_idxs) diff_subject_gene_idxs = parse_midas_data.apply_sample_index_map_to_indices(gene_sample_idx_map, desired_diff_subject_idxs) for sample_pair_idx in xrange(0,len(diff_subject_snp_idxs[0])): snp_i = diff_subject_snp_idxs[0][sample_pair_idx] snp_j = diff_subject_snp_idxs[1][sample_pair_idx] gene_i = diff_subject_gene_idxs[0][sample_pair_idx] gene_j = diff_subject_gene_idxs[1][sample_pair_idx] sample_i = desired_samples[gene_i] sample_j = desired_samples[gene_j] # This will serve as a key in snp_divergence_map sample_pair = frozenset([sample_i,sample_j]) # Focus on pairs of samples with sufficient coverage if snp_opportunity_matrix[snp_i,snp_j]>0: snp_d = snp_difference_matrix[snp_i,snp_j]*1.0/snp_opportunity_matrix[snp_i,snp_j] snp_divergence_map[species_name][sample_pair] = snp_d if gene_opportunity_matrix[gene_i, gene_j]>0: gene_d = gene_difference_matrix[gene_i, gene_j]*1.0/gene_opportunity_matrix[gene_i, gene_j] gene_divergence_map[species_name][sample_pair] = gene_d ################# # # Plot figures! # ################# # First calculate SNP vs gene divergence in B. vulgatus species_name = good_species_list[0] snp_divergences = [] gene_divergences = [] # Loop over sample pairs that are in both snp_divergence_map and gene_divergence_map for sample_pair in (set(snp_divergence_map[species_name].keys()) & set(gene_divergence_map[species_name].keys()) ): snp_divergences.append( snp_divergence_map[species_name][sample_pair] ) gene_divergences.append( gene_divergence_map[species_name][sample_pair] ) snp_divergences = numpy.array(snp_divergences) gene_divergences = numpy.array(gene_divergences) # Null expectation (medians line up) median_ratio = numpy.median(snp_divergences)/numpy.median(gene_divergences) gene_axis.loglog([1e-02,1],[1e-02*median_ratio,1*median_ratio],'k-',linewidth=0.25) gene_axis.loglog(gene_divergences, snp_divergences, 'r.', markersize=2,alpha=0.5,markeredgewidth=0, rasterized=True) # Then SNP divergence between two species species_1 = good_species_list[0] species_2 = good_species_list[1] snp_divergences_1 = [] snp_divergences_2 = [] # Loop over sample pairs that are in both snp_divergence_map and gene_divergence_map for sample_pair in (set(snp_divergence_map[species_1].keys()) & set(snp_divergence_map[species_2].keys()) ): snp_divergences_1.append( snp_divergence_map[species_1][sample_pair] ) snp_divergences_2.append( snp_divergence_map[species_2][sample_pair] ) snp_divergences_1 = numpy.array(snp_divergences_1) snp_divergences_2 = numpy.array(snp_divergences_2) # Null expectation (medians line up) median_ratio = numpy.median(snp_divergences_1)/numpy.median(snp_divergences_2) species_axis_1.loglog([1e-06,1e-01],[1e-06*median_ratio,1e-01*median_ratio],'k-',linewidth=0.25) # Observed values species_axis_1.loglog(snp_divergences_2, snp_divergences_1, 'r.', markersize=2,alpha=0.5,markeredgewidth=0, rasterized=True) # Then SNP divergence between other two species species_1 = good_species_list[0] species_2 = good_species_list[2] snp_divergences_1 = [] snp_divergences_2 = [] # Loop over sample pairs that are in both snp_divergence_map and gene_divergence_map for sample_pair in (set(snp_divergence_map[species_1].keys()) & set(snp_divergence_map[species_2].keys()) ): snp_divergences_1.append( snp_divergence_map[species_1][sample_pair] ) snp_divergences_2.append( snp_divergence_map[species_2][sample_pair] ) snp_divergences_1 = numpy.array(snp_divergences_1) snp_divergences_2 = numpy.array(snp_divergences_2) # Null expectation (medians line up) median_ratio = numpy.median(snp_divergences_1)/numpy.median(snp_divergences_2) species_axis_2.loglog([1e-06,1e-01],[1e-06*median_ratio,1e-01*median_ratio],'k-',linewidth=0.25) species_axis_2.loglog(snp_divergences_2, snp_divergences_1, 'r.', markersize=2,alpha=0.5,markeredgewidth=0,rasterized=True) # Since y-axes are shared, do not duplicate ticklables species_axis_1.set_yticklabels([]) species_axis_2.set_yticklabels([]) sys.stderr.write("Saving figure...\t") fig.savefig('%s/supplemental_divergence_correlations.pdf' % (parse_midas_data.analysis_directory),bbox_inches='tight',dpi=600) sys.stderr.write("Done!\n")
38.047273
203
0.750454
0
0
0
0
0
0
0
0
2,482
0.237217
c73e6e9b07e0e5afa67a521f170e1521081ec4b3
34,246
py
Python
multivis/plotFeatures.py
brettChapman/cimcb_vis
b373ed426b24ece1dcc20febd7c8023921b024d6
[ "MIT" ]
1
2021-06-27T23:52:40.000Z
2021-06-27T23:52:40.000Z
multivis/plotFeatures.py
brettChapman/cimcb_vis
b373ed426b24ece1dcc20febd7c8023921b024d6
[ "MIT" ]
null
null
null
multivis/plotFeatures.py
brettChapman/cimcb_vis
b373ed426b24ece1dcc20febd7c8023921b024d6
[ "MIT" ]
2
2021-06-27T23:53:03.000Z
2021-07-12T12:59:23.000Z
import sys import copy import matplotlib import matplotlib.pyplot as plt import seaborn as sns from collections import Counter from .utils import * import numpy as np import pandas as pd class plotFeatures: usage = """Produces different feature plots given a data table and peak table. Initial_Parameters ---------- peaktable : Pandas dataframe containing peak data. Must contain 'Name' and 'Label'. datatable : Pandas dataframe containing matrix of values to plot (N samples x N features). Columns/features must be same as 'Name' from Peak Table. Methods ------- set_params : Set parameters - plot_type: The type of plot. Either "point", "violin", "box", "swarm", "violin-swarm" or "box-swarm" (default: 'point') column_numbers: The number of columns to display in the plots (default: 4) log_data: Perform a log ('natural', base 2 or base 10) on all data (default: (True, 2)) scale_data: Scale the data ('standard' (centers to the mean and scales to unit variance), 'minmax' (scales between 0 and 1), 'maxabs' (scales to the absolute maximum value), 'robust' (centers to the median and scales to between 25th and 75th quantile range) (default: (True, 'minmax')) impute_data: Impute any missing values using KNN impute with a set number of nearest neighbours (default: (True, 3)) style: Set the matplotlib style (see https://matplotlib.org/stable/tutorials/introductory/customizing.html) (default: 'seaborn-white') transparent: Setting to 'True' will make the background transparent (default: False) figSize: The figure size as a tuple (width,height) (default: (15,10)) fontSize: The font size for all text (default: 12) colour_palette: The colour palette to use for the plot (default: None) y_axis_label: The label to customise the y axis (default: None) x_axis_rotation: Rotate the x axis labels this number of degrees (default: 0) group_column_name: The group column name used in the datatable (e.g. 'Class') (default: None) point_estimator: The statistical function to use for the point plot. Either "mean" or "median" (default: 'mean') point_ci: The bootstrapped confidence interval for the point plot. Can also be standard deviation ("sd") (default: 95) violin_distribution_type: The representation of the distribution of data points within the violin plot. Either "quartile", "box", "point", "stick" or None (default: 'box') violin_width_scale: The method used to scale the width of the violin plot. Either "area", "count" or "width" (default: "width") box_iqr: The proportion past the lower and upper quartiles to extend the plot whiskers for the box plot. Points outside this range will be identified as outliers (default: 1.5) saveImage: Setting to 'True' will save the image to file (default: True) imageFileName: The image file name to save to (default: [plot_type]_features.png') dpi: The number of Dots Per Inch (DPI) for the image (default: 200) help : Print this help text plot : Generates feature plots """ def __init__(self, peaktable, datatable): peaktable = self.__checkPeakTable(self.__checkData(peaktable)) datatable = self.__checkData(datatable) # Slice the meta-data, and select only peaks from the peaktable for processing, and add the meta-data back meta = datatable.T[~datatable.T.index.isin(peaktable['Name'])].T.reset_index(drop=True) dat = datatable[peaktable['Name']].reset_index() datatable = pd.concat([meta, dat], axis=1).set_index(['index']) datatable.index.name = None self.__peaktable = peaktable # Search for duplicate labels and amend with a suffix, to avoid issues when relabelling the datatable labels = copy.deepcopy(list(peaktable['Label'])) label_counts = {k: v for k, v in Counter(labels).items() if v > 1} for i in reversed(range(len(labels))): item = str(labels[i]) if item in label_counts and label_counts[item]: labels[i] += "_" + str(label_counts[item]) label_counts[item] -= 1 #Label datatable with peak labels instead of names for ease of feature plotting col_label_dict = dict(zip(list(peaktable['Name']), labels)) datatable.rename(columns=col_label_dict, inplace=True) self.__peak_labels = labels self.__datatable = datatable self.set_params() def help(self): print(plotFeatures.usage) def set_params(self, plot_type='point', column_numbers=4, log_data=(True, 2), scale_data=(True, 'minmax'), impute_data=(True, 3), style='seaborn-white', transparent=False, figSize = (15, 10), fontSize = 12, colour_palette=None, y_axis_label=None, x_axis_rotation=0, group_column_name=None, point_estimator='mean', point_ci=95, violin_distribution_type='box', violin_width_scale='width', box_iqr=1.5, saveImage=True, imageFileName='_features.png', dpi = 200): plot_type, column_numbers, log_data, scale_data, impute_data, style, transparent, figSize, fontSize, colour_palette, y_axis_label, x_axis_rotation, group_column_name, point_estimator, point_ci, violin_distribution_type, violin_width_scale, box_iqr, saveImage, imageFileName, dpi = self.__paramCheck(plot_type, column_numbers, log_data, scale_data, impute_data, style, transparent, figSize, fontSize, colour_palette, y_axis_label, x_axis_rotation, group_column_name, point_estimator, point_ci, violin_distribution_type, violin_width_scale, box_iqr, saveImage, imageFileName, dpi) self.__plot_type = plot_type; self.__column_numbers = column_numbers; self.__log_data = log_data; self.__scale_data = scale_data; self.__impute_data = impute_data; self.__style = style; self.__transparent = transparent; self.__figSize = figSize; self.__fontSize = fontSize; self.__colour_palette = colour_palette; self.__y_axis_label = y_axis_label; self.__x_axis_rotation = x_axis_rotation; self.__group_column_name = group_column_name; self.__point_estimator = point_estimator; self.__point_ci = point_ci; self.__violin_distribution_type = violin_distribution_type; self.__violin_width_scale = violin_width_scale; self.__box_iqr = box_iqr; self.__saveImage = saveImage; self.__imageFileName = imageFileName; self.__dpi = dpi; def plot(self): datatable = copy.deepcopy(self.__datatable) labels = self.__peak_labels plot_type = self.__plot_type group_column_name = self.__group_column_name column_numbers = self.__column_numbers colour_palette = self.__colour_palette point_ci = self.__point_ci point_estimator = self.__point_estimator log_data = self.__log_data scale_data = self.__scale_data impute_data = self.__impute_data x_axis_rotation = self.__x_axis_rotation y_axis_label = self.__y_axis_label violin_distribution_type = self.__violin_distribution_type violin_width_scale = self.__violin_width_scale box_iqr = self.__box_iqr imageFileName = self.__imageFileName saveImage = self.__saveImage fontSize = self.__fontSize style = self.__style transparent = self.__transparent dpi = self.__dpi figSize = self.__figSize meta = datatable.T[~datatable.T.index.isin(labels)].T.reset_index(drop=True) X = datatable[labels].reset_index(drop=True) (log_bool, log_base) = log_data; if log_bool: if isinstance(log_base, str) and log_base.lower() == 'natural': X = X.applymap(np.log); elif log_base == 2: X = X.applymap(np.log2); elif log_base == 10: X = X.applymap(np.log10); else: print("Error: The chosen log type is invalid.") sys.exit() (scale_bool, scale_type) = scale_data if scale_bool: if isinstance(scale_type, str) and scale_type.lower() == 'standard': X = scaler(X, type=scale_type.lower()).reset_index(drop=True) elif isinstance(scale_type, str) and scale_type.lower() == 'minmax': X = scaler(X, type=scale_type.lower()).reset_index(drop=True) elif isinstance(scale_type, str) and scale_type.lower() == 'maxabs': X = scaler(X, type=scale_type.lower()).reset_index(drop=True) elif isinstance(scale_type, str) and scale_type.lower() == 'robust': X = scaler(X, type=scale_type.lower()).reset_index(drop=True) else: print("Error: The chosen scale type is invalid.") sys.exit() (impute_bool, k) = impute_data; if impute_bool: X = imputeData(X, k=k).reset_index(drop=True) if not isinstance(X, pd.DataFrame): X = pd.DataFrame(X, columns=labels) # Add the meta data back in with the logged, scaled, or imputed data datatable = pd.concat([meta, X], axis=1).reset_index(drop=True) with plt.style.context(style): fig, axes = plt.subplots(nrows=int(np.ceil(float(len(labels) / column_numbers))), ncols=column_numbers, sharey=True, figsize=figSize) if plot_type == 'point': for peak_index, peak in enumerate(labels): if point_estimator.lower() == 'mean': point_estimator = 'Mean' ax = sns.pointplot(data=datatable, x=group_column_name, y=peak, estimator=np.nanmean, capsize=0.1, ci=point_ci, palette=colour_palette, ax=axes.flat[peak_index]) elif point_estimator.lower() == 'median': point_estimator = 'Median' ax = sns.pointplot(data=datatable, x=group_column_name, y=peak, estimator=np.nanmedian, capsize=0.1, ci=point_ci, palette=colour_palette, ax=axes.flat[peak_index]) else: print("Error: Invalid point plot estimator type.") sys.exit() ax.tick_params(labelrotation=x_axis_rotation, labelsize=fontSize) if log_bool: if scale_data: if isinstance(point_ci, str): if point_ci == 'sd': ax.set_title(peak + ' within SD', fontsize=fontSize) ax.set_xlabel('') if y_axis_label is None: ax.set_ylabel('Log({}) scaled ({}) {} Peak Area within SD'.format(log_base, scale_type, point_estimator), fontsize=fontSize) else: ax.set_ylabel(y_axis_label, fontsize=fontSize) else: ax.set_title(peak + ' with {}% CI'.format(point_ci), fontsize=fontSize) ax.set_xlabel('') if y_axis_label is None: ax.set_ylabel('Log({}) scaled ({}) {} Peak Area & {}% CI'.format(log_base, scale_type, point_estimator, point_ci), fontsize=fontSize) else: ax.set_ylabel(y_axis_label, fontsize=fontSize) else: if isinstance(point_ci, str): if point_ci == 'sd': ax.set_title(peak + ' within SD', fontsize=fontSize) ax.set_xlabel('') if y_axis_label is None: ax.set_ylabel('Log({}) {} Peak Area within SD'.format(log_base, point_estimator), fontsize=fontSize) else: ax.set_ylabel(y_axis_label, fontsize=fontSize) else: ax.set_title(peak + ' with {}% CI'.format(point_ci), fontsize=fontSize) ax.set_xlabel('') if y_axis_label is None: ax.set_ylabel('Log({}) {} Peak Area & {}% CI'.format(log_base, point_estimator, point_ci), fontsize=fontSize) else: ax.set_ylabel(y_axis_label, fontsize=fontSize) else: if scale_data: if isinstance(point_ci, str): if point_ci == 'sd': ax.set_title(peak + ' within SD', fontsize=fontSize) ax.set_xlabel('') if y_axis_label is None: ax.set_ylabel('Scaled ({}) {} Peak Area within SD'.format(scale_type, point_estimator), fontsize=fontSize) else: ax.set_ylabel(y_axis_label, fontsize=fontSize) else: ax.set_title(peak + ' with {}% CI'.format(point_ci), fontsize=fontSize) ax.set_xlabel('') if y_axis_label is None: ax.set_ylabel('Scaled ({}) {} Peak Area & {}% CI'.format(scale_type, point_estimator, point_ci), fontsize=fontSize) else: ax.set_ylabel(y_axis_label, fontsize=fontSize) else: if isinstance(point_ci, str): if point_ci == 'sd': ax.set_title(peak + ' within SD', fontsize=fontSize) ax.set_xlabel('') if y_axis_label is None: ax.set_ylabel('{} Peak Area within SD'.format(point_estimator), fontsize=fontSize) else: ax.set_ylabel(y_axis_label, fontsize=fontSize) else: ax.set_title(peak + ' with {}% CI'.format(point_ci), fontsize=fontSize) ax.set_xlabel('') if y_axis_label is None: ax.set_ylabel('{} Peak Area & {}% CI'.format(point_estimator, point_ci), fontsize=fontSize) else: ax.set_ylabel(y_axis_label, fontsize=fontSize) elif plot_type.lower() == 'violin': for peak_index, peak in enumerate(labels): ax = sns.violinplot(data=datatable, x=group_column_name, y=peak, linewidth=1, inner=violin_distribution_type, scale=violin_width_scale, palette=colour_palette, ax=axes.flat[peak_index]) ax.tick_params(labelrotation=x_axis_rotation, labelsize=fontSize) ax.set_title(peak, fontsize=fontSize) ax.set_xlabel('') if log_bool: if scale_data: if y_axis_label is None: ax.set_ylabel('Log({}) scaled ({}) Peak Area'.format(log_base, scale_type), fontsize=fontSize) else: ax.set_ylabel(y_axis_label, fontsize=fontSize) else: if y_axis_label is None: ax.set_ylabel('Log({}) Peak Area'.format(log_base), fontsize=fontSize) else: ax.set_ylabel(y_axis_label, fontsize=fontSize) else: if scale_data: if y_axis_label is None: ax.set_ylabel('Scaled ({}) Peak Area'.format(scale_type), fontsize=fontSize) else: ax.set_ylabel(y_axis_label, fontsize=fontSize) else: if y_axis_label is None: ax.set_ylabel('Peak Area', fontsize=fontSize) else: ax.set_ylabel(y_axis_label, fontsize=fontSize) elif plot_type.lower() == 'box': for peak_index, peak in enumerate(labels): ax = sns.boxplot(data=datatable, x=group_column_name, y=peak, palette=colour_palette, whis=box_iqr, ax=axes.flat[peak_index]) ax.tick_params(labelrotation=x_axis_rotation, labelsize=fontSize) ax.set_title(peak, fontsize=fontSize) ax.set_xlabel('') if log_bool: if scale_data: if y_axis_label is None: ax.set_ylabel('Log({}) scaled ({}) Peak Area'.format(log_base, scale_type), fontsize=fontSize) else: ax.set_ylabel(y_axis_label, fontsize=fontSize) else: if y_axis_label is None: ax.set_ylabel('Log({}) Peak Area'.format(log_base), fontsize=fontSize) else: ax.set_ylabel(y_axis_label, fontsize=fontSize) else: if scale_data: if y_axis_label is None: ax.set_ylabel('Scaled ({}) Peak Area'.format(scale_type), fontsize=fontSize) else: ax.set_ylabel(y_axis_label, fontsize=fontSize) else: if y_axis_label is None: ax.set_ylabel('Peak Area', fontsize=fontSize) else: ax.set_ylabel(y_axis_label, fontsize=fontSize) elif plot_type.lower() == 'swarm': for peak_index, peak in enumerate(labels): ax = sns.swarmplot(data=datatable, x=group_column_name, y=peak, size=10, palette=colour_palette, ax=axes.flat[peak_index]) ax.tick_params(labelrotation=x_axis_rotation, labelsize=fontSize) ax.set_title(peak, fontsize=fontSize) ax.set_xlabel('') if log_bool: if scale_data: if y_axis_label is None: ax.set_ylabel('Log({}) scaled ({}) Peak Area'.format(log_base, scale_type), fontsize=fontSize) else: ax.set_ylabel(y_axis_label, fontsize=fontSize) else: if y_axis_label is None: ax.set_ylabel('Log({}) Peak Area'.format(log_base), fontsize=fontSize) else: ax.set_ylabel(y_axis_label, fontsize=fontSize) else: if scale_data: if y_axis_label is None: ax.set_ylabel('Scaled ({}) Peak Area'.format(scale_type), fontsize=fontSize) else: ax.set_ylabel(y_axis_label, fontsize=fontSize) else: if y_axis_label is None: ax.set_ylabel('Peak Area', fontsize=fontSize) else: ax.set_ylabel(y_axis_label, fontsize=fontSize) elif plot_type.lower() == 'violin-swarm': for peak_index, peak in enumerate(labels): ax = sns.violinplot(data=datatable, x=group_column_name, y=peak, linewidth=1, inner=None, scale=violin_width_scale, palette=colour_palette, ax=axes.flat[peak_index]) ax = sns.swarmplot(data=datatable, x=group_column_name, y=peak, color="white", edgecolor="gray", ax=axes.flat[peak_index]) ax.tick_params(labelrotation=x_axis_rotation, labelsize=fontSize) ax.set_title(peak, fontsize=fontSize) ax.set_xlabel('') if log_bool: if scale_data: if y_axis_label is None: ax.set_ylabel('Log({}) scaled ({}) Peak Area'.format(log_base, scale_type), fontsize=fontSize) else: ax.set_ylabel(y_axis_label, fontsize=fontSize) else: if y_axis_label is None: ax.set_ylabel('Log({}) Peak Area'.format(log_base), fontsize=fontSize) else: ax.set_ylabel(y_axis_label, fontsize=fontSize) else: if scale_data: if y_axis_label is None: ax.set_ylabel('Scaled ({}) Peak Area'.format(scale_type), fontsize=fontSize) else: ax.set_ylabel(y_axis_label, fontsize=fontSize) else: if y_axis_label is None: ax.set_ylabel('Peak Area', fontsize=fontSize) else: ax.set_ylabel(y_axis_label, fontsize=fontSize) elif plot_type.lower() == 'box-swarm': for peak_index, peak in enumerate(labels): ax = sns.boxplot(data=datatable, x=group_column_name, y=peak, palette=colour_palette, whis=np.inf, ax=axes.flat[peak_index]) ax = sns.swarmplot(data=datatable, x=group_column_name, y=peak, color="0.2", ax=axes.flat[peak_index]) ax.tick_params(labelrotation=x_axis_rotation, labelsize=fontSize) ax.set_title(peak, fontsize=fontSize) ax.set_xlabel('') if log_bool: if scale_data: if y_axis_label is None: ax.set_ylabel('Log({}) scaled ({}) Peak Area'.format(log_base, scale_type), fontsize=fontSize) else: ax.set_ylabel(y_axis_label, fontsize=fontSize) else: if y_axis_label is None: ax.set_ylabel('Log({}) Peak Area'.format(log_base), fontsize=fontSize) else: ax.set_ylabel(y_axis_label, fontsize=fontSize) else: if scale_data: if y_axis_label is None: ax.set_ylabel('Scaled ({}) Peak Area'.format(scale_type), fontsize=fontSize) else: ax.set_ylabel(y_axis_label, fontsize=fontSize) else: if y_axis_label is None: ax.set_ylabel('Peak Area', fontsize=fontSize) else: ax.set_ylabel(y_axis_label, fontsize=fontSize) fig.tight_layout(h_pad=5, w_pad=2) if saveImage: plt.savefig(plot_type + 'Plot' + imageFileName, dpi=dpi, transparent=transparent) plt.show() def __paramCheck(self, plot_type, column_numbers, log_data, scale_data, impute_data, style, transparent, figSize, fontSize, colour_palette, y_axis_label, x_axis_rotation, group_column_name, point_estimator, point_ci, violin_distribution_type, violin_width_scale, box_iqr, saveImage, imageFileName, dpi): cmap_list = list(matplotlib.cm.cmaps_listed) + list(matplotlib.cm.datad) cmap_list_r = [cmap + '_r' for cmap in cmap_list] cmap_list = cmap_list + cmap_list_r plot_types = ['point', 'violin', 'box', 'swarm', 'violin-swarm', 'box-swarm'] estimator_types = ['mean', 'median'] datatable = self.__datatable if plot_type.lower() not in plot_types: print("Error: Plot type is not valid. Choose one of the following: {}.".format(', '.join(plot_types))) sys.exit() if not isinstance(column_numbers, int): print("Error: Column numbers is not valid. Choose a integer value.") sys.exit() if not isinstance(log_data, tuple): print("Error: Log data type if not a tuple. Please ensure the value is a tuple (e.g. (True, 2).") sys.exit() else: (log_bool, log_base) = log_data if not isinstance(log_bool, bool): print("Error: Log data first tuple item is not a boolean value. Choose either \"True\" or \"False\".") sys.exit() base_types = ['natural', 2, 10] if isinstance(log_base, str): log_base = log_base.lower() if log_base not in base_types: print("Error: Log data second tuple item is not valid. Choose one of {}.".format(', '.join(base_types))) sys.exit() if not isinstance(scale_data, tuple): print("Error: Scale data type if not a tuple. Please ensure the value is a tuple (e.g. (True, 'standard').") sys.exit() else: (scale_bool, scale_type) = scale_data if not isinstance(scale_bool, bool): print("Error: Scale data first tuple item is not a boolean value. Choose either \"True\" or \"False\".") sys.exit() scale_types = ['standard', 'minmax', 'maxabs', 'robust'] if isinstance(scale_type, str): scale_type = scale_type.lower() if scale_type not in scale_types: print("Error: Scale data second tuple item is not valid. Choose one of {}.".format(', '.join(scale_types))) sys.exit() if not isinstance(impute_data, tuple): print("Error: Impute data type if not a tuple. Please ensure the value is a tuple (e.g. (True, 3).") sys.exit() else: (impute_bool, k) = impute_data if not isinstance(impute_bool, bool): print("Error: Impute data first tuple item is not a boolean value. Choose either \"True\" or \"False\".") sys.exit() if not isinstance(k, float): if not isinstance(k, int): print("Error: Impute data second tuple item, the nearest neighbours k value, is not valid. Choose a float or integer value.") sys.exit() if not isinstance(style, str): print("Error: Seaborn style is not valid. Choose a string value.") sys.exit() else: styleList = list(plt.style.available) if style not in styleList: print("Error: Chosen style is not valid. Choose one of the following: {}.".format(', '.join(styleList))) sys.exit() if not isinstance(transparent, bool): print("Error: The transparent value is not valid. Choose either \"True\" or \"False\".") sys.exit() if not isinstance(figSize, tuple): print("Error: Figure size is not valid. Choose a tuple of length 2.") sys.exit() else: for length in figSize: if not isinstance(length, float): if not isinstance(length, int): print("Error: Figure size value is not valid. Choose a float or integer value.") sys.exit() if not isinstance(fontSize, float): if not isinstance(fontSize, int): print("Error: Font size is not valid. Choose a float or integer value.") sys.exit() if colour_palette is not None: if not isinstance(colour_palette, str): print("Error: The colour palette is not valid. Choose a string value.") sys.exit() else: if colour_palette not in cmap_list: print("Error: The colour palette is not valid. Choose one of the following: {}.".format(', '.join(cmap_list))) sys.exit() if y_axis_label is not None: if isinstance(y_axis_label, str): print("Error: The y axis label is not valid. Choose a string value.") sys.exit() if not isinstance(x_axis_rotation, float): if not isinstance(x_axis_rotation, int): print("Error: The x axis rotation value is not valid. Choose a float or integer value.") sys.exit() if ((x_axis_rotation < 0) or (x_axis_rotation > 360)): print("Error: The x axis rotation value is not valid. Choose a value >=0 or <= 360.") sys.exit() if group_column_name is not None: if not isinstance(group_column_name, str): print("Error: Group column name is not valid. Choose a string value.") sys.exit() else: if group_column_name not in list(datatable.columns): print("Error: Group column name not valid. Choose one of {}.".format(', '.join(list(datatable.columns)))) sys.exit() if point_estimator.lower() not in estimator_types: print("Error: The chosen point plot estimator is invalid. Choose one of \"{}\".".format('\" or \"'.join(estimator_types))) sys.exit() if isinstance(point_ci, str): if point_ci != 'sd': print("Error: The string value for point plot ci is invalid. Choose a float, integer or 'sd' value for standard deviation.") sys.exit() else: if not isinstance(point_ci, float): if not isinstance(point_ci, int): print("Error: The value for point plot ci is invalid. Choose a float, integer or 'sd' value for standard deviation.") sys.exit() violin_distribution_types = ['quartile', 'box', 'point', 'stick', None] violin_width_scale_types = ['area', 'count', 'width'] if plot_type.lower() == "violin": if violin_distribution_type not in violin_distribution_types: print("Error: Violin distribution type not valid. Choose one of the following: {}.".format(', '.join(violin_distribution_types))) sys.exit() if violin_width_scale not in violin_width_scale_types: print("Error: Violin width scale type not valid. Choose one of the following: {}.".format(', '.join(violin_width_scale_types))) sys.exit() if plot_type.lower == "box": if not isinstance(box_iqr, float): if not isinstance(box_iqr, int): print( "Error: The box plot interquartile range extension beyond whiskers is not valid. Choose a float or integer value.") sys.exit() if not isinstance(saveImage, bool): print("Error: Save image is not valid. Choose either \"True\" or \"False\".") sys.exit() if not isinstance(imageFileName, str): print("Error: Image file name is not valid. Choose a string value.") sys.exit() if not isinstance(dpi, float): if not isinstance(dpi, int): print("Error: Dpi is not valid. Choose a float or integer value.") sys.exit() return plot_type, column_numbers, log_data, scale_data, impute_data, style, transparent, figSize, fontSize, colour_palette, y_axis_label, x_axis_rotation, group_column_name, point_estimator, point_ci, violin_distribution_type, violin_width_scale, box_iqr, saveImage, imageFileName, dpi def __checkData(self, df): if not isinstance(df, pd.DataFrame): print("Error: A dataframe was not entered. Please check your data.") return df def __checkPeakTable(self, PeakTable): if "Name" not in PeakTable.columns: print("Error: \"Name\" column not in Peak Table. Please check your data.") sys.exit() if "Label" not in PeakTable.columns: print("Error: \"Label\" column not in Peak Table. Please check your data.") sys.exit() # Do not assume the peaks/nodes have been indexed correctly. Remove any index columns and reindex. column_list = [column.lower() for column in PeakTable.columns] if 'idx' in column_list: index = column_list.index('idx') column_name = PeakTable.columns[index] PeakTable = PeakTable.drop(columns=[column_name]) if 'index' in column_list: index = column_list.index('index') column_name = PeakTable.columns[index] PeakTable = PeakTable.drop(columns=[column_name]) PeakTable = PeakTable.reset_index(drop=True) PeakTable.index.name = 'Idx' PeakTable = PeakTable.reset_index() return PeakTable
52.605223
586
0.5464
34,058
0.99451
0
0
0
0
0
0
7,936
0.231735
c73eca01ba5620a706110aaabb7ea66ae754f7f0
1,183
py
Python
core/data/DataWriter.py
berendkleinhaneveld/Registrationshop
0d6f3ee5324865cdcb419369139f37c39dfe9a1c
[ "MIT" ]
25
2015-11-08T16:36:54.000Z
2022-01-20T16:03:28.000Z
core/data/DataWriter.py
berendkleinhaneveld/Registrationshop
0d6f3ee5324865cdcb419369139f37c39dfe9a1c
[ "MIT" ]
2
2016-12-01T23:13:08.000Z
2017-07-25T02:40:49.000Z
core/data/DataWriter.py
berendkleinhaneveld/Registrationshop
0d6f3ee5324865cdcb419369139f37c39dfe9a1c
[ "MIT" ]
10
2016-07-05T14:39:16.000Z
2022-01-01T02:05:55.000Z
""" DataWriter.py """ from DataController import DataController from DataReader import DataReader from vtk import vtkMetaImageWriter from vtk import vtkXMLImageDataWriter class DataWriter(DataController): """ DataWriter writes an image data object to disk using the provided format. """ def __init__(self): super(DataWriter, self).__init__() self.supportedExtensions = [DataReader.TypeMHD, DataReader.TypeVTI, DataReader.TypeMHA] def WriteToFile(self, imageData, exportFileName, fileType): if fileType == DataReader.TypeMHD: if not exportFileName.endswith(".mhd"): exportFileName = exportFileName + ".mhd" writer = vtkMetaImageWriter() writer.SetFileName(exportFileName) writer.SetInputData(imageData) writer.Write() elif fileType == DataReader.TypeVTI: writer = vtkXMLImageDataWriter() writer.SetFileName(exportFileName) writer.SetInputData(imageData) writer.Write() elif fileType == DataReader.TypeMHA: writer = vtkMetaImageWriter() writer.SetFileName(exportFileName) writer.SetInputData(imageData) writer.Write() else: raise NotImplementedError("No writing support for type " + str(fileType))
27.511628
76
0.752325
1,008
0.852071
0
0
0
0
0
0
147
0.12426
c73ff4534e3b71c1974b4bf7835f8ec9472d9d62
7,483
py
Python
parkings/models/permit.py
klemmari1/parkkihubi
93218c6046c0910e8a4c723dc7128c6eec085b8c
[ "MIT" ]
12
2016-11-29T15:13:10.000Z
2021-06-12T06:45:38.000Z
parkings/models/permit.py
niuzhipeng123/parkkihubi
93218c6046c0910e8a4c723dc7128c6eec085b8c
[ "MIT" ]
154
2016-11-30T09:07:58.000Z
2022-02-12T08:29:36.000Z
parkings/models/permit.py
niuzhipeng123/parkkihubi
93218c6046c0910e8a4c723dc7128c6eec085b8c
[ "MIT" ]
15
2016-11-29T19:32:48.000Z
2022-01-05T11:31:39.000Z
from itertools import chain from django.conf import settings from django.contrib.gis.db import models as gis_models from django.db import models, router, transaction from django.utils import timezone from django.utils.translation import gettext_lazy as _ from ..fields import CleaningJsonField from ..validators import DictListValidator, TextField, TimestampField from .constants import GK25FIN_SRID from .enforcement_domain import EnforcementDomain from .mixins import TimestampedModelMixin from .parking import Parking class PermitArea(TimestampedModelMixin): name = models.CharField(max_length=40, verbose_name=_('name')) domain = models.ForeignKey( EnforcementDomain, on_delete=models.PROTECT, related_name='permit_areas') identifier = models.CharField(max_length=10, verbose_name=_('identifier')) geom = gis_models.MultiPolygonField( srid=GK25FIN_SRID, verbose_name=_('geometry')) permitted_user = models.ForeignKey( settings.AUTH_USER_MODEL, on_delete=models.PROTECT, verbose_name=_("permitted_user")) class Meta: unique_together = [('domain', 'identifier')] ordering = ('identifier',) def __str__(self): return '{}/{}: {}'.format(self.domain.code, self.identifier, self.name) class PermitSeriesQuerySet(models.QuerySet): def active(self): return self.filter(active=True) def latest_active(self): return self.active().order_by('-modified_at').first() def prunable(self, time_limit=None): limit = time_limit or ( timezone.now() - settings.PARKKIHUBI_PERMITS_PRUNABLE_AFTER) return self.filter(created_at__lt=limit, active=False) class PermitSeries(TimestampedModelMixin, models.Model): active = models.BooleanField(default=False) owner = models.ForeignKey( settings.AUTH_USER_MODEL, on_delete=models.PROTECT, verbose_name=_("owner")) objects = PermitSeriesQuerySet.as_manager() class Meta: ordering = ('created_at', 'id') verbose_name = _("permit series") verbose_name_plural = _("permit series") @classmethod def delete_prunable_series(cls, time_limit=None): prunable = cls.objects.prunable(time_limit) Permit.objects.filter(series__in=prunable).delete() prunable.delete() def __str__(self): return str(self.id) class PermitQuerySet(models.QuerySet): def active(self): return self.filter(series__active=True) def by_time(self, timestamp): lookup_items = PermitLookupItem.objects.by_time(timestamp) return self.filter(lookup_items__in=lookup_items).distinct() def by_subject(self, registration_number): lookup_items = PermitLookupItem.objects.by_subject(registration_number) return self.filter(lookup_items__in=lookup_items).distinct() def by_area(self, area): lookup_items = PermitLookupItem.objects.by_area(area) return self.filter(lookup_items__in=lookup_items).distinct() def bulk_create(self, permits, *args, **kwargs): for permit in permits: assert isinstance(permit, Permit) permit.full_clean() with transaction.atomic(using=self.db, savepoint=False): created_permits = super().bulk_create(permits, *args, **kwargs) PermitLookupItem.objects.using(self.db).bulk_create( chain(*(x._make_lookup_items() for x in created_permits))) return created_permits class Permit(TimestampedModelMixin, models.Model): domain = models.ForeignKey( EnforcementDomain, on_delete=models.PROTECT, related_name='permits') series = models.ForeignKey(PermitSeries, on_delete=models.PROTECT) external_id = models.CharField(max_length=50, null=True, blank=True) subjects = CleaningJsonField(blank=True, validators=[DictListValidator({ 'start_time': TimestampField(), 'end_time': TimestampField(), 'registration_number': TextField(max_length=20), })]) areas = CleaningJsonField(blank=True, validators=[DictListValidator({ 'start_time': TimestampField(), 'end_time': TimestampField(), 'area': TextField(max_length=10), })]) objects = PermitQuerySet.as_manager() class Meta: unique_together = [('series', 'external_id')] indexes = [ models.Index(fields=['series', 'id']), ] ordering = ('series', 'id') def __str__(self): return 'Permit {id} ({series}{active}/{external_id} {dom})'.format( id=self.id, dom=self.domain.code, series=self.series, active='*' if self.series.active else '', external_id=self.external_id) def save(self, using=None, *args, **kwargs): self.full_clean() using = using or router.db_for_write(type(self), instance=self) with transaction.atomic(using=using, savepoint=False): super(Permit, self).save(using=using, *args, **kwargs) self.lookup_items.all().using(using).delete() new_lookup_items = self._make_lookup_items() PermitLookupItem.objects.using(using).bulk_create(new_lookup_items) def _make_lookup_items(self): for area in self.areas: for subject in self.subjects: max_start_time = max(subject['start_time'], area['start_time']) min_end_time = min(subject['end_time'], area['end_time']) if max_start_time >= min_end_time: continue yield PermitLookupItem( permit=self, registration_number=Parking.normalize_reg_num( subject['registration_number']), area=PermitArea.objects.get(identifier=area['area'], domain=self.domain), start_time=max_start_time, end_time=min_end_time ) class PermitLookupItemQuerySet(models.QuerySet): def active(self): return self.filter(permit__series__active=True) def by_time(self, timestamp): return self.filter(start_time__lte=timestamp, end_time__gte=timestamp) def by_subject(self, registration_number): normalized_reg_num = Parking.normalize_reg_num(registration_number) return self.filter(registration_number=normalized_reg_num) def by_area(self, area): return self.filter(area=area) class PermitLookupItem(models.Model): permit = models.ForeignKey( Permit, related_name="lookup_items", on_delete=models.CASCADE) registration_number = models.CharField(max_length=20) area = models.ForeignKey(PermitArea, on_delete=models.PROTECT, default=None, null=True, blank=True) start_time = models.DateTimeField() end_time = models.DateTimeField() objects = PermitLookupItemQuerySet.as_manager() class Meta: indexes = [ models.Index(fields=[ 'registration_number', 'start_time', 'end_time', 'area', 'permit']), ] ordering = ('registration_number', 'start_time', 'end_time') def __str__(self): return ( '{start_time:%Y-%m-%d %H:%M} -- {end_time:%Y-%m-%d %H:%M} / ' '{registration_number} / {area}' ).format( start_time=self.start_time, end_time=self.end_time, registration_number=self.registration_number, area=self.area.identifier)
37.415
103
0.667379
6,939
0.927302
736
0.098356
204
0.027262
0
0
628
0.083924
c744286930e6918cebec7544521adbaf000c03cc
4,265
py
Python
poi_mining/biz/LSA/logEntropy.py
yummydeli/machine_learning
54471182ac21ef0eee26557a7bd6f3a3dc3a09bd
[ "MIT" ]
1
2019-09-29T13:36:29.000Z
2019-09-29T13:36:29.000Z
poi_mining/biz/LSA/logEntropy.py
yummydeli/machine_learning
54471182ac21ef0eee26557a7bd6f3a3dc3a09bd
[ "MIT" ]
null
null
null
poi_mining/biz/LSA/logEntropy.py
yummydeli/machine_learning
54471182ac21ef0eee26557a7bd6f3a3dc3a09bd
[ "MIT" ]
null
null
null
#!/usr/bin/env python # encoding:utf-8 # ############################################################################## # The MIT License (MIT) # # Copyright (c) [2015] [baidu.com] # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # ############################################################################## """ 生成LogEntropy矩阵并筛选出合适的词汇 """ import glob import collections import pandas from sklearn.feature_extraction.text import CountVectorizer import math class LogEntropy(object): """计算logentropy, 得到类别关键字""" def __init__(self): self.fnames = glob.glob('data/segs/names.*') def extract_segs(self): """分词文件中获取分词结果""" idx = [] words = [] for f in self.fnames: lines = [] for i, line in enumerate(open(f)): if i % 2 == 1: non_int = '\t'.join([e for e in line.decode('GBK').rstrip('\n').split('\t') \ if not e.isdigit()]) lines.append(non_int) words.append('\t'.join(lines)) idx.append(f.split('.')[1][1:]) return words, idx def mk_document_term_matrix(self): """生成TDM矩阵""" words, idx = self.extract_segs() countvec = CountVectorizer() dtm = pandas.DataFrame(countvec.fit_transform(words).toarray(), columns=countvec.get_feature_names(), index=idx) """ canting faguo riben zhongwen 1001 1 0 0 1 991 1 0 1 0 203 1 1 0 0 """ return dtm def global_weighting(self, dtm): """ 1 - Entropy(words) / log(N) """ # normalized entropy for word pdtm = (dtm / dtm.sum(axis=0)) ndocs = pdtm.shape[0] gw = 1 + (pdtm.applymap(lambda x: x * math.log(x) if x != 0 else 0).sum() / math.log(ndocs)) """ canting 2.220446e-16 faguo 1.000000e+00 riben 1.000000e+00 zhongwen 1.000000e+00 """ return gw def local_weighting(self, dtm): """ math.log(freq + 1)""" lw = dtm.applymap(lambda freq: math.log(freq + 1)) """ canting faguo riben zhongwen 1001 0.693147 0.000000 0.000000 0.693147 991 0.693147 0.000000 0.693147 0.000000 203 0.693147 0.693147 0.000000 0.000000 """ return lw def logEntropyWeighting(self): """计算最终的logentropy得分""" dtm = self.mk_document_term_matrix() """ canting faguo riben zhongwen 1001 1.539096e-16 0.000000 0.000000 0.693147 991 1.539096e-16 0.000000 0.693147 0.000000 203 1.539096e-16 0.693147 0.000000 0.000000 """ logEntro = (self.global_weighting(dtm.copy()) * self.local_weighting(dtm)).applymap( lambda x: 0 if x < 0.001 else x ) logEntro.T.to_csv('data/keyWords.cates', sep='\t', encoding='UTF-8') if __name__ == '__main__': lsaEntropy = LogEntropy() lsaEntropy.logEntropyWeighting()
35.541667
100
0.557562
2,759
0.633525
0
0
0
0
0
0
2,487
0.571068
c7465ff1ea985cda2b457c6697cd774f312adad2
40
py
Python
Python/swap_numbers.py
saurabhcommand/Hello-world
647bad9da901a52d455f05ecc37c6823c22dc77e
[ "MIT" ]
1,428
2018-10-03T15:15:17.000Z
2019-03-31T18:38:36.000Z
Python/swap_numbers.py
saurabhcommand/Hello-world
647bad9da901a52d455f05ecc37c6823c22dc77e
[ "MIT" ]
1,162
2018-10-03T15:05:49.000Z
2018-10-18T14:17:52.000Z
Python/swap_numbers.py
saurabhcommand/Hello-world
647bad9da901a52d455f05ecc37c6823c22dc77e
[ "MIT" ]
3,909
2018-10-03T15:07:19.000Z
2019-03-31T18:39:08.000Z
a = 5 b = 7 a,b = b,a print a print b
5
9
0.5
0
0
0
0
0
0
0
0
0
0
c746b2ee9cd86b479c95bc6e51b1c40a08b1d7da
2,162
py
Python
algorithms/tests/test_unionfind.py
tommyod/PythonAlgorithms
f0a0f67be069fc9e9fa3027ed83942d6401223fe
[ "MIT" ]
1
2021-08-23T17:15:06.000Z
2021-08-23T17:15:06.000Z
algorithms/tests/test_unionfind.py
tommyod/PythonAlgorithms
f0a0f67be069fc9e9fa3027ed83942d6401223fe
[ "MIT" ]
1
2018-05-02T17:29:42.000Z
2018-05-02T17:31:18.000Z
algorithms/tests/test_unionfind.py
tommyod/PythonAlgorithms
f0a0f67be069fc9e9fa3027ed83942d6401223fe
[ "MIT" ]
1
2018-05-02T12:31:52.000Z
2018-05-02T12:31:52.000Z
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Tests for the union find data structure. """ try: from ..unionfind import UnionFind except ValueError: pass def test_unionfind_basics(): """ Test the basic properties of unionfind. """ u = UnionFind([1, 2, 3]) assert u.in_same_set(1, 2) is False assert u.in_same_set(2, 3) is False u.union(1, 3) assert u.in_same_set(1, 2) is False assert u.in_same_set(3, 1) assert u.get_root(1) == u.get_root(3) def test_unionfind_adding_elements(): """ Test adding operations, mostly syntactic sugar. """ u = UnionFind([1, 2]) u.add(['a', 'b']) assert 1 in u assert 'a' in u def test_unionfind_example(): """ Test on a slightly more invovled example. """ u = UnionFind([1, 2, 3, 4, 5]) u.union(1, 3) u.union(2, 4) assert u.in_same_set(1, 3) assert u.in_same_set(4, 2) assert not u.in_same_set(2, 5) assert not u.in_same_set(2, 1) assert not u.in_same_set(1, 4) u.union(5, 1) assert u.in_same_set(3, 5) def test_unionfind_several(): """ Test that we can take union of more than two elements. """ u = UnionFind([1, 2, 3, 4, 5, 6, 7, 8]) u.union([1, 2, 3]) u.union([4, 5, 6]) u.union([7, 8]) assert u.in_same_set(1, 3) assert u.in_same_set(6, 4) assert u.in_same_set(7, 8) assert not u.in_same_set(2, 5) assert not u.in_same_set(4, 8) def test_unionfind_compression(): """ Test path compression and the union by rank. """ # Test the ranking elements = list(range(100)) u = UnionFind(elements) for i in range(len(elements) - 1): u.union(elements[i], elements[i + 1]) assert max(u._rank.values()) == 1 # Test path compression parent_nodes = list(u._parent.values()) assert all(parent == parent_nodes[0] for parent in parent_nodes) if __name__ == "__main__": import pytest # --durations=10 <- May be used to show potentially slow tests pytest.main(args=['.', '--doctest-modules', '-v'])
21.62
68
0.584181
0
0
0
0
0
0
0
0
547
0.253006
c746ec91b306e818609b2388a6f07e590b53157d
10,961
py
Python
a3/ga.py
mishless/LearningSystems
635d9af9d00ae0360d7ca8571bf47f782fdcdfe9
[ "MIT" ]
1
2021-08-01T03:30:49.000Z
2021-08-01T03:30:49.000Z
a3/ga.py
mishless/LearningSystems
635d9af9d00ae0360d7ca8571bf47f782fdcdfe9
[ "MIT" ]
null
null
null
a3/ga.py
mishless/LearningSystems
635d9af9d00ae0360d7ca8571bf47f782fdcdfe9
[ "MIT" ]
null
null
null
# Genetic Algorithm for solving the Traveling Salesman problem # Authors: Mihaela Stoycheva, Vukan Turkulov # Includes import configparser import math import matplotlib.pyplot as plt import numpy import random import sys from operator import itemgetter #Global variables(yay!) # Configuration variables(read from config.txt) mutation_rate = 0; population_size = 0; elitism_rate = 0; tournament_rate = 0; max_iterations = 0; input_file_name = ""; parent_rate = 0; # General global variables cities = {}; number_of_cities = 0; parent_number = 0; tournament_size = 0; elite_number = 0; crossover_number = 0; def read_config(): global mutation_rate; global elitism_rate; global tournament_rate; global population_size; global input_file_name; global max_iterations; global parent_rate; global parent_number; global tournament_size; global elite_number; global crossover_number; config = configparser.ConfigParser(); config.read("config.txt"); mutation_rate = float(config['general']['mutation_rate']); population_size = int(config['general']['population_size']); elitism_rate = float(config['general']['elitism_rate']); tournament_rate = float(config['general']['tournament_rate']); max_iterations = int(config['general']['max_iterations']); parent_rate = float(config['general']['parent_rate']); input_file_name = config['general']['input_file_name']; parent_number = int(population_size * parent_rate); elite_number = int(population_size * elitism_rate); tournament_size = int(population_size * tournament_rate); crossover_number = population_size - elite_number; def print_config(): print("***** CONFIGURATION *****"); print_var("Population size", population_size); print_var("Elitism rate", elitism_rate); print_var("Tournament rate", tournament_rate); print_var("Mutation rate", mutation_rate); print_var("Parent rate", parent_rate); print_var("Iteration number", max_iterations); print(""); print_var("Tournament size", tournament_size); print_var("Parent number", parent_number); print_var("Elite number", elite_number); print_var("Crossover number", crossover_number); print(""); def read_input_file(): global number_of_cities; file = open(input_file_name, "r"); file_lines = file.readlines(); file.close(); for file_line in file_lines: temp = file_line.split(); cities[int(temp[0])] = {'x' : float(temp[1]), 'y' : float(temp[2])}; number_of_cities = len(cities); def get_distance(city1, city2): return math.sqrt( ((city1['x']-city2['x'])**2) + ((city1['y']-city2['y'])**2)); def print_cities(): print("***** CITIES *****"); for key, city in cities.items(): print("#" + "%2s" % str(key) + ": (" + "%6s" % str(city['x']) + ', ' + "%6s" % str(city['y']) + ')'); print(""); def print_var(name, var): print(name + ":" + " "*(17-len(name)) + str(var)); def init(): read_config(); read_input_file(); print_config(); def create_random_individual(): individual = []; # We must begin at first city individual.append(1); # Create list of city indexes indexes = list(range(2,number_of_cities+1)); while len(indexes) > 0: picked_index = random.choice(indexes); indexes.remove(picked_index); individual.append(picked_index); # We must end at first city individual.append(1); return individual; def print_population(population, name): print("***** POPULATION: " + name + " *****"); print("Population size = " + str(len(population))); i = 0; for individual in population: print("IND #" + str(i) + ": " + str(individual)); i += 1; def print_population_2(population, name): print("***** POPULATION: " + name + " *****"); print("Population size = " + str(len(population))); i = 0; for individual in population: print("IND #" + str(i) + " distance = " + str(evaluate_individual(individual))); i += 1; print(""); def print_population_3(population, name): print("***** POPULATION: " + name + " *****"); print("Population size = " + str(len(population))); for individual in population: print(str(individual) + ": distance = " + str(evaluate_individual(individual))); print(""); def create_random_population(population_size): population = []; for i in range(0, population_size): population.append(create_random_individual()); return population; def evaluate_individual(individual): distance_traveled = 0; for i in range(0, len(individual)-1): distance_traveled = (distance_traveled + get_distance(cities[individual[i]], cities[individual[i+1]])); return distance_traveled; def evaluate_population(population): evaluations = []; for individual in population: evaluations.append((evaluate_individual(individual), individual)); return evaluations; def select_tournament_pool(data): tournament_pool = []; indexes = list(range(0, len(data))); for i in range(0, tournament_size): chosen_index = random.choice(indexes); tournament_pool.append(data[chosen_index]); indexes.remove(chosen_index); return tournament_pool; def best_solution(pool): best_individual = {'eval' : sys.float_info.max}; for individual in pool: if individual['eval'] < best_individual['eval']: best_individual = individual; return best_individual; def run_tournament(pool): return best_solution(pool); def merge_popul_and_eval(population, evaluations): data = []; for i in range(0, len(population)): data.append({'ind' : population[i], 'eval' : evaluations[i]}); return data; def select_parent_pool(population, evaluations): parent_pool = []; data = merge_popul_and_eval(population, evaluations); for i in range(0, parent_number): tournament_pool = select_tournament_pool(data); parent = run_tournament(tournament_pool); parent_pool.append(parent['ind']); data.remove(parent); return parent_pool; def is_individual_valid(individual): if(len(individual) != (number_of_cities+1)): print("INVALID " + str(individual)); return False; if(individual[0] != 1): print("INVALID " + str(individual)); return False; if(individual[-1] != 1): print("INVALID " + str(individual)); return False; for city in individual: if city == 1: if individual.count(city) != 2: print("INVALID " + str(individual)); return False; else: if individual.count(city) != 1: print("INVALID " + str(individual)); return False; return True; def is_population_valid(population): for individual in population: if is_individual_valid(individual) == False: return False; return True; def create_child(parent1, parent2): l = len(parent1); x = random.randint(1, l-1); y = random.randint(x, l-1); child = []; extract = parent1[x:y]; """print_var("P1", parent1); print_var("P2", parent2); print_var("x", x); print_var("y", y); print_var("Extract", extract);""" i = 0; for j in range(0, x): while(parent2[i] in extract): i += 1; child.append(parent2[i]); i += 1; child.extend(extract); for j in range(y, l): while(parent2[i] in extract): i += 1; child.append(parent2[i]); i += 1; return child; def generate_children(parent_pool, child_num): children = []; for i in range(0, child_num): parent1 = random.choice(parent_pool); parent_pool.remove(parent1); parent2 = random.choice(parent_pool); parent_pool.append(parent1); new_child = create_child(parent1, parent2); children.append(new_child); return children; def generate_elites(population, evaluations, number): data = merge_popul_and_eval(population, evaluations); elites = []; for i in range(0, number): best = best_solution(data); elites.append(best['ind']); data.remove(best); return elites; def mutate_individual(individual): i = random.randint(1, len(individual)-2); j = i; while j == i: j = random.randint(1, len(individual)-2); individual[i], individual[j] = individual[j], individual[i]; def mutate_population(population): for individual in population: if random.random() < mutation_rate: mutate_individual(individual); def test_stuff(): """ p1 = "abcdefg"; p2 = "1234567"; for i in range(0,10): print(create_child(p1,p2)); ind = [1,2,3,4,5,6]; print("Before", ind); mutate_individual(ind); print("After", ind); exit();""" def perform_GA(): best_solutions = []; best_individuals = []; best_solution = None; #print("***** ALGORITHM START *****"); population = create_random_population(population_size); iteration_counter = 1; while True: #print("Running iteration " + str(iteration_counter) + ":"); evaluations = evaluate_population(population); best_solution = min(evaluations, key=lambda evaluation:evaluation[0]) best_solutions.append(best_solution[0]); best_individuals.append(best_solution[1]); evaluations = [evaluation[0] for evaluation in evaluations] if iteration_counter == max_iterations: break; parent_pool = select_parent_pool(population, evaluations); children = generate_children(parent_pool, crossover_number); mutate_population(children); elites = generate_elites(population, evaluations, elite_number); # Prepare population for the next iteration population = children + elites; iteration_counter += 1; if is_population_valid(population) == False: break; return (best_solutions, best_individuals); def do_what_needs_to_be_done(): results = []; bests = []; print("***** ALGORITHM START *****"); sys.stdout.flush() for i in range(0, 10): print("Starting cycle " + str(i+1)); results.append(perform_GA()); bests.append((results[i][0][-1], results[i][1][-1])); best_ind = bests.index(min(bests, key=lambda best:best[0])); print(str(best_ind)); print("***** RESULTS *****"); print("Best result is " + str(bests[best_ind][0])); print("Best result is " + str(bests[best_ind][1])); plt.plot(results[best_ind][0]); plt.show(); #main init(); do_what_needs_to_be_done()
26.159905
77
0.624487
0
0
0
0
0
0
0
0
1,666
0.151993
c7477304b232543e959b4e41d7f4db3d8d55814b
334
py
Python
products/migrations/0010_remove_product_updated_at.py
UB-ES-2021-A1/wannasell-backend
84360b2985fc28971867601373697f39303e396b
[ "Unlicense" ]
null
null
null
products/migrations/0010_remove_product_updated_at.py
UB-ES-2021-A1/wannasell-backend
84360b2985fc28971867601373697f39303e396b
[ "Unlicense" ]
62
2021-11-22T21:52:44.000Z
2021-12-17T15:07:02.000Z
products/migrations/0010_remove_product_updated_at.py
UB-ES-2021-A1/wannasell-backend
84360b2985fc28971867601373697f39303e396b
[ "Unlicense" ]
null
null
null
# Generated by Django 3.2.8 on 2021-11-25 17:50 from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('products', '0009_auto_20211125_1846'), ] operations = [ migrations.RemoveField( model_name='product', name='updated_at', ), ]
18.555556
48
0.598802
249
0.745509
0
0
0
0
0
0
103
0.308383
c74852ff0006431dcf627c07119eece06aae36cb
160
py
Python
ResumeAnalyser/apps.py
samyakj2307/recruitai_resume_backend
52f8eda63d479b28fc19fe2d7149ab9ee9be122f
[ "MIT" ]
null
null
null
ResumeAnalyser/apps.py
samyakj2307/recruitai_resume_backend
52f8eda63d479b28fc19fe2d7149ab9ee9be122f
[ "MIT" ]
null
null
null
ResumeAnalyser/apps.py
samyakj2307/recruitai_resume_backend
52f8eda63d479b28fc19fe2d7149ab9ee9be122f
[ "MIT" ]
1
2021-06-03T13:56:53.000Z
2021-06-03T13:56:53.000Z
from django.apps import AppConfig class ResumeanalyserConfig(AppConfig): default_auto_field = 'django.db.models.BigAutoField' name = 'ResumeAnalyser'
22.857143
56
0.78125
123
0.76875
0
0
0
0
0
0
47
0.29375
c748ba40f4f42a2340be17f0209db3df304f6bd7
196
py
Python
plugins/core/player_manager_plugin/__init__.py
StarryPy/StarryPy-Historic
b9dbd552b8c4631a5a8e9dda98b7ba447eca59da
[ "WTFPL" ]
38
2015-02-12T11:57:59.000Z
2018-11-15T16:03:45.000Z
plugins/core/player_manager_plugin/__init__.py
StarryPy/StarryPy-Historic
b9dbd552b8c4631a5a8e9dda98b7ba447eca59da
[ "WTFPL" ]
68
2015-02-05T23:29:47.000Z
2017-12-27T08:26:25.000Z
plugins/core/player_manager_plugin/__init__.py
StarryPy/StarryPy-Historic
b9dbd552b8c4631a5a8e9dda98b7ba447eca59da
[ "WTFPL" ]
21
2015-02-06T18:58:21.000Z
2017-12-24T20:08:59.000Z
from plugins.core.player_manager_plugin.plugin import PlayerManagerPlugin from plugins.core.player_manager_plugin.manager import ( Banned, UserLevels, permissions, PlayerManager )
24.5
73
0.795918
0
0
0
0
0
0
0
0
0
0
c74916514901ff1d3dbfb832b264c70329520805
3,063
py
Python
src/config/svc-monitor/svc_monitor/services/loadbalancer/drivers/ha_proxy/custom_attributes/haproxy_validator.py
jnpr-pranav/contrail-controller
428eee37c28c31830fd764315794e1a6e52720c1
[ "Apache-2.0" ]
37
2020-09-21T10:42:26.000Z
2022-01-09T10:16:40.000Z
src/config/svc-monitor/svc_monitor/services/loadbalancer/drivers/ha_proxy/custom_attributes/haproxy_validator.py
jnpr-pranav/contrail-controller
428eee37c28c31830fd764315794e1a6e52720c1
[ "Apache-2.0" ]
null
null
null
src/config/svc-monitor/svc_monitor/services/loadbalancer/drivers/ha_proxy/custom_attributes/haproxy_validator.py
jnpr-pranav/contrail-controller
428eee37c28c31830fd764315794e1a6e52720c1
[ "Apache-2.0" ]
21
2020-08-25T12:48:42.000Z
2022-03-22T04:32:18.000Z
from builtins import str from builtins import range from builtins import object import logging import inspect import os class CustomAttr(object): """This type handles non-flat data-types like int, str, bool. """ def __init__(self, key, value): self._value = value self._key = key def validate(self): pass def post_validation(self): pass class CustomAttrTlsContainer(CustomAttr): def __init__(self, key, value): super(CustomAttrTlsContainer, self).__init__(key, value) def validate(self): return True def post_validation(self): return self._value def validate_custom_attributes(custom_attributes_dict, section, custom_attributes): section_dict = {} if custom_attributes and section in custom_attributes_dict: for key, value in list(custom_attributes.items()): if key in custom_attributes_dict[section]: #Sanitize the value try: type_attr = custom_attributes_dict[section][key]['type'] limits = custom_attributes_dict[section][key]['limits'] if type_attr == 'int': value = int(value) if value in range(limits[0], limits[1]): section_dict.update({key:value}) else: logging.info("Skipping key: %s, value: %s due to" \ "validation failure" % (key, value)) elif type_attr == 'str': if len(value) in range(limits[0], limits[1]): section_dict.update({key:value}) else: logging.info("Skipping key: %s, value: %s due to" \ "validation failure" % (key, value)) elif type_attr == 'bool': if value in limits: if value == 'True': value = '' elif value == 'False': value = 'no ' section_dict.update({key:value}) else: logging.info("Skipping key: %s, value: %s due to" \ "validation failure" % (key, value)) elif inspect.isclass(eval(type_attr)): new_custom_attr = eval(type_attr)(key, value) if new_custom_attr.validate(): value = new_custom_attr.post_validation() section_dict.update({key:value}) else: logging.info("Skipping key: %s, value: %s due to" \ "validation failure" % (key, value)) except Exception as e: logging.error(str(e)) continue return section_dict
39.269231
79
0.479595
524
0.171074
0
0
0
0
0
0
369
0.12047
c74949362f59fa0673a80dd80fbdd7f5a0af70d8
1,405
py
Python
python/janitor/typecache.py
monkeyman79/janitor
a41187c1b58b736a5de2b0b30eb51d85a65b17c3
[ "MIT" ]
2
2018-11-06T13:02:27.000Z
2021-02-22T19:07:22.000Z
python/janitor/typecache.py
monkeyman79/janitor
a41187c1b58b736a5de2b0b30eb51d85a65b17c3
[ "MIT" ]
1
2016-09-28T12:24:43.000Z
2016-09-28T13:47:35.000Z
python/janitor/typecache.py
monkeyman79/janitor
a41187c1b58b736a5de2b0b30eb51d85a65b17c3
[ "MIT" ]
null
null
null
import gdb class TypeCache(object): def __init__(self): self.cache = {} self.intptr_type = False def clear(self): self.cache = {} self.intptr_type = False def get_type(self, typename): if typename in self.cache: return self.cache[typename] try: gdb_type = gdb.lookup_type(typename) self.cache[typename] = gdb_type return gdb_type except: pass try: proto = gdb.parse_and_eval("(%s*)0" % typename) gdb_type = proto.type.target() self.cache[typename] = gdb_type return gdb_type except: pass return None def get_intptr_type(self): if self.intptr_type != False: return self.intptr_type ptr_type = self.get_type("void*") if ptr_type == None: self.intptr_type = None return None ulong_type = self.get_type("unsigned long") if ulong_type == None: self.intptr_type = None return None if ulong_type.sizeof >= ptr_type.sizeof: self.intptr_type = ulong_type return ulong_type ullong_type = self.get_type("unsigned long long") self.intptr_type = ullong_type return ullong_type cache = TypeCache()
26.509434
59
0.540925
1,366
0.972242
0
0
0
0
0
0
50
0.035587
c74a04a139575fe8c546ea452d0215d058b4fa6f
805
py
Python
key_phrase.py
Santara/autoSLR
8c524b8a0023d1434cb7be4e110103605d0d2cab
[ "MIT" ]
1
2020-08-12T23:17:38.000Z
2020-08-12T23:17:38.000Z
key_phrase.py
Santara/autoSLR
8c524b8a0023d1434cb7be4e110103605d0d2cab
[ "MIT" ]
null
null
null
key_phrase.py
Santara/autoSLR
8c524b8a0023d1434cb7be4e110103605d0d2cab
[ "MIT" ]
1
2019-08-29T09:36:46.000Z
2019-08-29T09:36:46.000Z
import os import sys directory = sys.argv[1] outfile = open("key_phrases.csv","w") files = {} for filename in os.listdir(directory): text=[] with open(os.path.join(directory, filename)) as f: text=[l.strip() for l in f if len(l.strip())>2] data='' for t in text: if len(t.split()) > 1: data = data+'. '+t.strip() whitelist = set('abcdefghijklmnopqrstuvwxy ABCDEFGHIJKLMNOPQRSTUVWXYZ') answer = ''.join(filter(whitelist.__contains__, data)) answer=' '.join(answer.split()) import rake import operator rake_object = rake.Rake("/home/ashutosh/Sudeshna/RAKE-tutorial/data/stoplists/SmartStoplist.txt", 3,3,1) import pprint pp = pprint.PrettyPrinter() keywords = rake_object.run(answer) for entry in keywords: outfile.write("%s, %s\n" % (entry[0], str(entry[1])) ) outfile.close()
25.15625
105
0.695652
0
0
0
0
0
0
0
0
167
0.207453
c74ab0b0f80631d9cb06c8040217e1f860dd10c2
1,127
py
Python
tests/test_utils.py
aced-differentiate/dft-input-gen
14bee323517714c433682bad2dcb897b223dd5ec
[ "Apache-2.0" ]
1
2021-04-15T09:54:52.000Z
2021-04-15T09:54:52.000Z
tests/test_utils.py
CitrineInformatics/dft-input-gen
14bee323517714c433682bad2dcb897b223dd5ec
[ "Apache-2.0" ]
1
2021-01-28T22:12:07.000Z
2021-01-28T22:12:07.000Z
tests/test_utils.py
aced-differentiate/dft-input-gen
14bee323517714c433682bad2dcb897b223dd5ec
[ "Apache-2.0" ]
2
2020-12-08T18:14:13.000Z
2020-12-18T19:01:11.000Z
"""Unit tests for helper utilities in :mod:`dftinputgen.utils`.""" import os import pytest from ase import io as ase_io from dftinputgen.utils import get_elem_symbol from dftinputgen.utils import read_crystal_structure from dftinputgen.utils import get_kpoint_grid_from_spacing from dftinputgen.utils import DftInputGeneratorUtilsError test_base_dir = os.path.dirname(__file__) feo_conv_file = os.path.join(test_base_dir, "qe", "files", "feo_conv.vasp") feo_conv = ase_io.read(feo_conv_file) def test_get_elem_symbol(): assert get_elem_symbol("Fe-34") == "Fe" assert get_elem_symbol("3RGe-34") == "Ge" with pytest.raises(DftInputGeneratorUtilsError): get_elem_symbol("G23") def test_read_crystal_structure(): # str with path to crystal structure file is OK cs = read_crystal_structure(feo_conv_file) assert cs == feo_conv # any other type of input should throw an error with pytest.raises(TypeError): read_crystal_structure(feo_conv) def test_kpoint_grid_from_spacing(): assert get_kpoint_grid_from_spacing(feo_conv, 0.2) == pytest.approx( [7, 7, 7] )
28.897436
75
0.754215
0
0
0
0
0
0
0
0
215
0.190772
c74b3631946b737bd9c4684c29b89101e0d8c544
6,044
py
Python
core/models.py
nforesperance/Django-Channels-ChatApp
b244954206214f7dc1b8793291d957a5bf80f0e2
[ "MIT" ]
2
2020-07-18T05:19:36.000Z
2020-07-18T05:19:38.000Z
core/models.py
nforesperance/Django-Channels-ChatApp
b244954206214f7dc1b8793291d957a5bf80f0e2
[ "MIT" ]
4
2021-03-19T02:37:45.000Z
2021-06-04T23:02:41.000Z
core/models.py
nforesperance/Django-Channels-ChatApp
b244954206214f7dc1b8793291d957a5bf80f0e2
[ "MIT" ]
null
null
null
from django.contrib.auth.models import User from django.db.models import (Model, TextField, DateTimeField, ForeignKey, CASCADE) from asgiref.sync import async_to_sync from channels.layers import get_channel_layer from django.db import models import json class MessageModel(Model): """ This class represents a chat message. It has a owner (user), timestamp and the message body. """ user = ForeignKey(User, on_delete=CASCADE, verbose_name='user', related_name='from_user', db_index=True) recipient = ForeignKey(User, on_delete=CASCADE, verbose_name='recipient', related_name='to_user', db_index=True) timestamp = DateTimeField('timestamp', auto_now_add=True, editable=False, db_index=True) body = TextField('body') def __str__(self): return str(self.id) def characters(self): """ Toy function to count body characters. :return: body's char number """ return len(self.body) def notify_ws_clients(self): """ Inform client there is a new message. """ notification = { 'type': 'chat_message', 'message': '{}'.format(self.id) } channel_layer = get_channel_layer() print("user.id {}".format(self.user.id)) print("user.id {}".format(self.recipient.id)) async_to_sync(channel_layer.group_send)("{}".format(self.user.id), notification) async_to_sync(channel_layer.group_send)("{}".format(self.recipient.id), notification) def save(self, *args, **kwargs): """ Trims white spaces, saves the message and notifies the recipient via WS if the message is new. """ new = self.id self.body = self.body.strip() # Trimming whitespaces from the body super(MessageModel, self).save(*args, **kwargs) if new is None: self.notify_ws_clients() # Meta class Meta: app_label = 'core' verbose_name = 'message' verbose_name_plural = 'messages' ordering = ('-timestamp',) class Group(models.Model): name = models.CharField(max_length = 20) members = models.TextField() messages = models.TextField () def set_members(self,user_id_list): self.members = json.dumps(user_id_list) def get_members(self): return json.loads(self.members) def add(self,user_id): current_list = self.get_members() if user_id in current_list: print("user is already in the group") else: new_list = current_list.append(user_id) self.set_members(new_list) def remove(self,user_id): current_list = self.get_members() if user_id in current_list: new_list = current_list.remove(user_id) self.set_members(new_list) else: print("User is not a member of theis group") def has(self,user_id): current_list = self.get_members() return(user_id in current_list) # Set of functions for dealing with group messages def set_messages(self,message_id_list): self.messages = json.dumps(message_id_list) def get_messages(self): return json.loads(self.messages) def add_message(self,message_id): current_list = self.get_messages() new_list = current_list.append(message_id) self.set_messages(new_list) def delete_message(self,message_id): current_list = self.get_messages() if message_id in current_list: new_list = current_list.remove(message_id) self.set_messages(new_list) def save(self, *args, **kwargs): if self.pk is None or self.members is None or self.members == '': self.set_members([]) if self.pk is None or self.messages is None or self.messages == '': self.set_messages([]) super(Group, self).save(*args, **kwargs) def __str__(self): return self.name+" ID: "+str(self.id) # Meta class Meta: app_label = 'core' verbose_name = 'Group' verbose_name_plural = 'Groups' ordering = ('name',) class GroupMessage(Model): """ This class represents a chat message. It has a owner (user), timestamp and the message body. """ sender = ForeignKey(User, on_delete=CASCADE, verbose_name='sender', related_name='from_sender', db_index=True) group = ForeignKey(Group, on_delete=CASCADE, verbose_name='group', related_name='to_group', db_index=True) time = DateTimeField('time', auto_now_add=True, editable=False, db_index=True) body = TextField('body') def __str__(self): return str(self.id) def characters(self): """ Toy function to count body characters. :return: body's char number """ return len(self.body) def notify_ws_clients(self): """ Inform client there is a new message. """ notification = { 'type': 'group_message', 'group': '{}'.format(self.id) } channel_layer = get_channel_layer() group_id = "group"+str(self.group.id) print("group.id {}".format(group_id)) async_to_sync(channel_layer.group_send)(group_id, notification) def save(self, *args, **kwargs): """ Trims white spaces, saves the message and notifies the recipient via WS if the message is new. """ new = self.id self.body = self.body.strip() # Trimming whitespaces from the body super(GroupMessage, self).save(*args, **kwargs) if new is None: self.notify_ws_clients() # Meta class Meta: app_label = 'core' verbose_name = 'group message' verbose_name_plural = 'group messags' ordering = ('-time',)
32.67027
93
0.603077
5,752
0.951688
0
0
0
0
0
0
1,342
0.222038
c74bed1c84a21dce43450d469d8869b0372e61e0
15,798
py
Python
backup/model.py
jsikyoon/ASNP-RMR
ddd3e586b01ba3a7f8b3721582aca7403649400e
[ "MIT" ]
8
2020-07-21T02:49:54.000Z
2021-09-28T02:22:37.000Z
backup/model.py
jsikyoon/ASNP-RMR
ddd3e586b01ba3a7f8b3721582aca7403649400e
[ "MIT" ]
null
null
null
backup/model.py
jsikyoon/ASNP-RMR
ddd3e586b01ba3a7f8b3721582aca7403649400e
[ "MIT" ]
1
2020-09-02T06:39:49.000Z
2020-09-02T06:39:49.000Z
import tensorflow as tf import numpy as np # utility methods def batch_mlp(input, output_sizes, variable_scope): """Apply MLP to the final axis of a 3D tensor (reusing already defined MLPs). Args: input: input tensor of shape [B,n,d_in]. output_sizes: An iterable containing the output sizes of the MLP as defined in `basic.Linear`. variable_scope: String giving the name of the variable scope. If this is set to be the same as a previously defined MLP, then the weights are reused. Returns: tensor of shape [B,n,d_out] where d_out=output_sizes[-1] """ # Get the shapes of the input and reshape to parallelise across observations batch_size, _, filter_size = input.shape.as_list() output = tf.reshape(input, (-1, filter_size)) output.set_shape((None, filter_size)) # Pass through MLP with tf.variable_scope(variable_scope, reuse=tf.AUTO_REUSE): for i, size in enumerate(output_sizes[:-1]): output = tf.nn.relu( tf.layers.dense(output, size, name="layer_{}".format(i))) # Last layer without a ReLu output = tf.layers.dense( output, output_sizes[-1], name="layer_{}".format(i + 1)) # Bring back into original shape output = tf.reshape(output, (batch_size, -1, output_sizes[-1])) return output class DeterministicEncoder(object): """The Deterministic Encoder.""" def __init__(self, output_sizes, attention): """(A)NP deterministic encoder. Args: output_sizes: An iterable containing the output sizes of the encoding MLP. attention: The attention module. """ self._output_sizes = output_sizes self._attention = attention def __call__(self, context_x, context_y, target_x): """Encodes the inputs into one representation. Args: context_x: Tensor of shape [B,observations,d_x]. For this 1D regression task this corresponds to the x-values. context_y: Tensor of shape [B,observations,d_y]. For this 1D regression task this corresponds to the y-values. target_x: Tensor of shape [B,target_observations,d_x]. For this 1D regression task this corresponds to the x-values. Returns: The encoded representation. Tensor of shape [B,target_observations,d] """ # Concatenate x and y along the filter axes encoder_input = tf.concat([context_x, context_y], axis=-1) # Pass final axis through MLP hidden = batch_mlp(encoder_input, self._output_sizes, "deterministic_encoder") # Apply attention with tf.variable_scope("deterministic_encoder", reuse=tf.AUTO_REUSE): hidden = self._attention(context_x, target_x, hidden) return hidden class LatentEncoder(object): """The Latent Encoder.""" def __init__(self, output_sizes, num_latents): """(A)NP latent encoder. Args: output_sizes: An iterable containing the output sizes of the encoding MLP. num_latents: The latent dimensionality. """ self._output_sizes = output_sizes self._num_latents = num_latents def __call__(self, x, y): """Encodes the inputs into one representation. Args: x: Tensor of shape [B,observations,d_x]. For this 1D regression task this corresponds to the x-values. y: Tensor of shape [B,observations,d_y]. For this 1D regression task this corresponds to the y-values. Returns: A normal distribution over tensors of shape [B, num_latents] """ # Concatenate x and y along the filter axes encoder_input = tf.concat([x, y], axis=-1) # Pass final axis through MLP hidden = batch_mlp(encoder_input, self._output_sizes, "latent_encoder") # Aggregator: take the mean over all points hidden = tf.reduce_mean(hidden, axis=1) # Have further MLP layers that map to the parameters of the Gaussian latent with tf.variable_scope("latent_encoder", reuse=tf.AUTO_REUSE): # First apply intermediate relu layer hidden = tf.nn.relu( tf.layers.dense(hidden, (self._output_sizes[-1] + self._num_latents)/2, name="penultimate_layer")) # Then apply further linear layers to output latent mu and log sigma mu = tf.layers.dense(hidden, self._num_latents, name="mean_layer") log_sigma = tf.layers.dense(hidden, self._num_latents, name="std_layer") # Compute sigma sigma = 0.1 + 0.9 * tf.sigmoid(log_sigma) return tf.contrib.distributions.Normal(loc=mu, scale=sigma) class Decoder(object): """The Decoder.""" def __init__(self, output_sizes): """(A)NP decoder. Args: output_sizes: An iterable containing the output sizes of the decoder MLP as defined in `basic.Linear`. """ self._output_sizes = output_sizes def __call__(self, representation, target_x): """Decodes the individual targets. Args: representation: The representation of the context for target predictions. Tensor of shape [B,target_observations,?]. target_x: The x locations for the target query. Tensor of shape [B,target_observations,d_x]. Returns: dist: A multivariate Gaussian over the target points. A distribution over tensors of shape [B,target_observations,d_y]. mu: The mean of the multivariate Gaussian. Tensor of shape [B,target_observations,d_x]. sigma: The standard deviation of the multivariate Gaussian. Tensor of shape [B,target_observations,d_x]. """ # concatenate target_x and representation hidden = tf.concat([representation, target_x], axis=-1) # Pass final axis through MLP hidden = batch_mlp(hidden, self._output_sizes, "decoder") # Get the mean an the variance mu, log_sigma = tf.split(hidden, 2, axis=-1) # Bound the variance sigma = 0.1 + 0.9 * tf.nn.softplus(log_sigma) # Get the distribution dist = tf.contrib.distributions.MultivariateNormalDiag( loc=mu, scale_diag=sigma) return dist, mu, sigma class LatentModel(object): """The (A)NP model.""" def __init__(self, latent_encoder_output_sizes, num_latents, decoder_output_sizes, use_deterministic_path=True, deterministic_encoder_output_sizes=None, attention=None): """Initialises the model. Args: latent_encoder_output_sizes: An iterable containing the sizes of hidden layers of the latent encoder. num_latents: The latent dimensionality. decoder_output_sizes: An iterable containing the sizes of hidden layers of the decoder. The last element should correspond to d_y * 2 (it encodes both mean and variance concatenated) use_deterministic_path: a boolean that indicates whether the deterministic encoder is used or not. deterministic_encoder_output_sizes: An iterable containing the sizes of hidden layers of the deterministic encoder. The last one is the size of the deterministic representation r. attention: The attention module used in the deterministic encoder. Only relevant when use_deterministic_path=True. """ self._latent_encoder = LatentEncoder(latent_encoder_output_sizes, num_latents) self._decoder = Decoder(decoder_output_sizes) self._use_deterministic_path = use_deterministic_path if use_deterministic_path: self._deterministic_encoder = DeterministicEncoder( deterministic_encoder_output_sizes, attention) def __call__(self, query, num_targets, target_y=None): """Returns the predicted mean and variance at the target points. Args: query: Array containing ((context_x, context_y), target_x) where: context_x: Tensor of shape [B,num_contexts,d_x]. Contains the x values of the context points. context_y: Tensor of shape [B,num_contexts,d_y]. Contains the y values of the context points. target_x: Tensor of shape [B,num_targets,d_x]. Contains the x values of the target points. num_targets: Number of target points. target_y: The ground truth y values of the target y. Tensor of shape [B,num_targets,d_y]. Returns: log_p: The log_probability of the target_y given the predicted distribution. Tensor of shape [B,num_targets]. mu: The mean of the predicted distribution. Tensor of shape [B,num_targets,d_y]. sigma: The variance of the predicted distribution. Tensor of shape [B,num_targets,d_y]. """ (context_x, context_y), target_x = query # Pass query through the encoder and the decoder prior = self._latent_encoder(context_x, context_y) # For training, when target_y is available, use targets for latent encoder. # Note that targets contain contexts by design. if target_y is None: latent_rep = prior.sample() # For testing, when target_y unavailable, use contexts for latent encoder. else: posterior = self._latent_encoder(target_x, target_y) latent_rep = posterior.sample() latent_rep = tf.tile(tf.expand_dims(latent_rep, axis=1), [1, num_targets, 1]) if self._use_deterministic_path: deterministic_rep = self._deterministic_encoder(context_x, context_y, target_x) representation = tf.concat([deterministic_rep, latent_rep], axis=-1) else: representation = latent_rep dist, mu, sigma = self._decoder(representation, target_x) # If we want to calculate the log_prob for training we will make use of the # target_y. At test time the target_y is not available so we return None. if target_y is not None: log_p = dist.log_prob(target_y) posterior = self._latent_encoder(target_x, target_y) kl = tf.reduce_sum( tf.contrib.distributions.kl_divergence(posterior, prior), axis=-1, keepdims=True) kl = tf.tile(kl, [1, num_targets]) loss = - tf.reduce_mean(log_p - kl / tf.cast(num_targets, tf.float32)) else: log_p = None kl = None loss = None return mu, sigma, log_p, kl, loss def uniform_attention(q, v): """Uniform attention. Equivalent to np. Args: q: queries. tensor of shape [B,m,d_k]. v: values. tensor of shape [B,n,d_v]. Returns: tensor of shape [B,m,d_v]. """ total_points = tf.shape(q)[1] rep = tf.reduce_mean(v, axis=1, keepdims=True) # [B,1,d_v] rep = tf.tile(rep, [1, total_points, 1]) return rep def laplace_attention(q, k, v, scale, normalise): """Computes laplace exponential attention. Args: q: queries. tensor of shape [B,m,d_k]. k: keys. tensor of shape [B,n,d_k]. v: values. tensor of shape [B,n,d_v]. scale: float that scales the L1 distance. normalise: Boolean that determines whether weights sum to 1. Returns: tensor of shape [B,m,d_v]. """ k = tf.expand_dims(k, axis=1) # [B,1,n,d_k] q = tf.expand_dims(q, axis=2) # [B,m,1,d_k] unnorm_weights = - tf.abs((k - q) / scale) # [B,m,n,d_k] unnorm_weights = tf.reduce_sum(unnorm_weights, axis=-1) # [B,m,n] if normalise: weight_fn = tf.nn.softmax else: weight_fn = lambda x: 1 + tf.tanh(x) weights = weight_fn(unnorm_weights) # [B,m,n] rep = tf.einsum('bik,bkj->bij', weights, v) # [B,m,d_v] return rep def dot_product_attention(q, k, v, normalise): """Computes dot product attention. Args: q: queries. tensor of shape [B,m,d_k]. k: keys. tensor of shape [B,n,d_k]. v: values. tensor of shape [B,n,d_v]. normalise: Boolean that determines whether weights sum to 1. Returns: tensor of shape [B,m,d_v]. """ d_k = tf.shape(q)[-1] scale = tf.sqrt(tf.cast(d_k, tf.float32)) unnorm_weights = tf.einsum('bjk,bik->bij', k, q) / scale # [B,m,n] if normalise: weight_fn = tf.nn.softmax else: weight_fn = tf.sigmoid weights = weight_fn(unnorm_weights) # [B,m,n] rep = tf.einsum('bik,bkj->bij', weights, v) # [B,m,d_v] return rep def multihead_attention(q, k, v, num_heads=8): """Computes multi-head attention. Args: q: queries. tensor of shape [B,m,d_k]. k: keys. tensor of shape [B,n,d_k]. v: values. tensor of shape [B,n,d_v]. num_heads: number of heads. Should divide d_v. Returns: tensor of shape [B,m,d_v]. """ d_k = q.get_shape().as_list()[-1] d_v = v.get_shape().as_list()[-1] head_size = d_v / num_heads key_initializer = tf.random_normal_initializer(stddev=d_k**-0.5) value_initializer = tf.random_normal_initializer(stddev=d_v**-0.5) rep = tf.constant(0.0) for h in range(num_heads): o = dot_product_attention( tf.layers.Conv1D(head_size, 1, kernel_initializer=key_initializer, name='wq%d' % h, use_bias=False, padding='VALID')(q), tf.layers.Conv1D(head_size, 1, kernel_initializer=key_initializer, name='wk%d' % h, use_bias=False, padding='VALID')(k), tf.layers.Conv1D(head_size, 1, kernel_initializer=key_initializer, name='wv%d' % h, use_bias=False, padding='VALID')(v), normalise=True) rep += tf.layers.Conv1D(d_v, 1, kernel_initializer=value_initializer, name='wo%d' % h, use_bias=False, padding='VALID')(o) return rep class Attention(object): """The Attention module.""" def __init__(self, rep, output_sizes, att_type, scale=1., normalise=True, num_heads=8): """Create attention module. Takes in context inputs, target inputs and representations of each context input/output pair to output an aggregated representation of the context data. Args: rep: transformation to apply to contexts before computing attention. One of: ['identity','mlp']. output_sizes: list of number of hidden units per layer of mlp. Used only if rep == 'mlp'. att_type: type of attention. One of the following: ['uniform','laplace','dot_product','multihead'] scale: scale of attention. normalise: Boolean determining whether to: 1. apply softmax to weights so that they sum to 1 across context pts or 2. apply custom transformation to have weights in [0,1]. num_heads: number of heads for multihead. """ self._rep = rep self._output_sizes = output_sizes self._type = att_type self._scale = scale self._normalise = normalise if self._type == 'multihead': self._num_heads = num_heads def __call__(self, x1, x2, r): """Apply attention to create aggregated representation of r. Args: x1: tensor of shape [B,n1,d_x]. x2: tensor of shape [B,n2,d_x]. r: tensor of shape [B,n1,d]. Returns: tensor of shape [B,n2,d] Raises: NameError: The argument for rep/type was invalid. """ if self._rep == 'identity': k, q = (x1, x2) elif self._rep == 'mlp': # Pass through MLP k = batch_mlp(x1, self._output_sizes, "attention") q = batch_mlp(x2, self._output_sizes, "attention") else: raise NameError("'rep' not among ['identity','mlp']") if self._type == 'uniform': rep = uniform_attention(q, r) elif self._type == 'laplace': rep = laplace_attention(q, k, r, self._scale, self._normalise) elif self._type == 'dot_product': rep = dot_product_attention(q, k, r, self._normalise) elif self._type == 'multihead': rep = multihead_attention(q, k, r, self._num_heads) else: raise NameError(("'att_type' not among ['uniform','laplace','dot_product'" ",'multihead']")) return rep
36.068493
81
0.660653
11,330
0.717179
0
0
0
0
0
0
8,469
0.536081
c74e4682a52e8afc4e35ad4f69f1a64dccbd1416
3,520
py
Python
minotaur/_minotaur.py
giannitedesco/minotaur
1a043818775e14054cc3467ba6d1c07cbf128c6b
[ "Apache-2.0" ]
172
2020-08-24T14:34:00.000Z
2021-12-29T21:56:33.000Z
minotaur/_minotaur.py
giannitedesco/minotaur
1a043818775e14054cc3467ba6d1c07cbf128c6b
[ "Apache-2.0" ]
3
2020-08-25T13:46:30.000Z
2021-02-27T01:25:38.000Z
minotaur/_minotaur.py
giannitedesco/minotaur
1a043818775e14054cc3467ba6d1c07cbf128c6b
[ "Apache-2.0" ]
4
2020-08-24T17:21:18.000Z
2021-12-29T21:57:42.000Z
from typing import Dict, Tuple, Optional from pathlib import Path import asyncio from ._mask import Mask from ._event import Event from ._base import InotifyBase __all__ = ('Minotaur',) class Notification: __slots__ = ( '_path', '_type', '_isdir', '_unmount', '_qoverflow', ) def __init__(self, path: Path, type: Mask, isdir: bool, unmount: bool, qoverflow: bool = False): self._path = path self._type = type self._isdir = bool(isdir) self._unmount = bool(unmount) self._qoverflow = bool(qoverflow) @property def isdir(self) -> bool: return self._isdir @property def unmount(self) -> bool: return self._unmount @property def qoverflow(self) -> bool: return self._qoverflow @property def path(self) -> Path: return self._path def __repr__(self) -> str: t = self._isdir and 'dir' or 'file' return f'{type(self).__name__}({self._type.name} {t} {self._path})' @classmethod def create(cls, path: Path, mask: Mask) -> 'Notification': return cls(path, mask & Mask.EVENT_TYPE, bool(mask & Mask.ISDIR), bool(mask & Mask.UNMOUNT), bool(mask & Mask.Q_OVERFLOW)) class Minotaur(InotifyBase): """ Fancy interface for Inotify which does questionable things like: 1. Resolve watch-descriptors back to paths (which races with renames of original paths and can't be used safely, but other inotify packages provide this feature, so here it is for your delectation). 2. Link rename_from/rename_to events together. This feature would be useful but isn't yet actually implemented. Working on it... """ __slots__ = ( '_wdmap', '_cmap', ) _wdmap: Dict[int, Path] _cmap: Dict[Tuple[int, int], Event] def __init__(self, blocking: bool = True, cloexec: bool = True, loop: Optional[asyncio.AbstractEventLoop] = None, ) -> None: super().__init__(blocking, cloexec, loop) self._wdmap = {} self._cmap = {} def add_watch(self, p: Path, mask: Mask) -> int: try: wd = super().add_watch(p, mask) except Exception: raise else: self._wdmap[wd] = p.resolve() return wd def rm_watch(self, wd: int) -> int: try: return super().rm_watch(wd) except Exception: raise else: del self._wdmap[wd] def _resolve_path(self, wd: int, name: Path) -> Path: try: base_dir = self._wdmap[wd] except KeyError: path = name else: path = base_dir / name return path def __next__(self) -> Notification: evt = super()._next_event() if evt is None: raise StopIteration # TODO: Link rename_from/rename_to together if we have them path = self._resolve_path(evt.wd, evt.name) return Notification.create(path, evt.mask) async def __anext__(self) -> Notification: evt = await super()._next_event_async() if evt is None: raise StopIteration path = self._resolve_path(evt.wd, evt.name) return Notification.create(path, evt.mask)
26.268657
75
0.559659
3,325
0.944602
0
0
552
0.156818
250
0.071023
651
0.184943
c7508c28b649dccba896625618759517bbe0fd13
161
py
Python
pyclustering/container/examples/__init__.py
JosephChataignon/pyclustering
bf4f51a472622292627ec8c294eb205585e50f52
[ "BSD-3-Clause" ]
1,013
2015-01-26T19:50:14.000Z
2022-03-31T07:38:48.000Z
pyclustering/container/examples/__init__.py
peterlau0626/pyclustering
bf4f51a472622292627ec8c294eb205585e50f52
[ "BSD-3-Clause" ]
542
2015-01-20T16:44:32.000Z
2022-01-29T14:57:20.000Z
pyclustering/container/examples/__init__.py
peterlau0626/pyclustering
bf4f51a472622292627ec8c294eb205585e50f52
[ "BSD-3-Clause" ]
262
2015-03-19T07:28:12.000Z
2022-03-30T07:28:24.000Z
"""! @brief Collection of examples devoted to containers. @authors Andrei Novikov ([email protected]) @date 2014-2020 @copyright BSD-3-Clause """
17.888889
53
0.714286
0
0
0
0
0
0
0
0
161
1
c751066d68d4e91afb71f1ee11d13e9bcbb998a8
8,802
py
Python
novelty-detection/train_wood_vgg19.py
matherm/python-data-science
bdb49b18c5ef6044f8a9e6f95c81d5f7bb1d511f
[ "MIT" ]
1
2020-03-24T09:22:04.000Z
2020-03-24T09:22:04.000Z
novelty-detection/train_wood_vgg19.py
matherm/python-data-science
bdb49b18c5ef6044f8a9e6f95c81d5f7bb1d511f
[ "MIT" ]
1
2020-06-16T14:42:29.000Z
2020-06-16T14:42:29.000Z
novelty-detection/train_wood_vgg19.py
matherm/python-data-science
bdb49b18c5ef6044f8a9e6f95c81d5f7bb1d511f
[ "MIT" ]
null
null
null
import argparse import sys import torch import numpy as np import torch.nn as nn from torch.utils.data import DataLoader from torchvision.datasets import MNIST from torchvision.datasets import CIFAR10 import torchvision.transforms as transforms import matplotlib.pyplot as plt parser = argparse.ArgumentParser(description='PyTorch Novelty Detection') # TRAINING PARAMS parser.add_argument('--epochs', type=int, default=100, metavar='', help='Amount of epochs for training (default: 100)') parser.add_argument('--batch_size', type=int, default=1000, metavar='', help='Batch size for SGD (default: 100)') parser.add_argument('--lrate', type=float, default=0.0001, metavar="", help="Learning rate (default: 0.001") parser.add_argument('--with_cuda', action='store_true', dest='use_cuda', help="Shall cuda be used (default: False)") parser.add_argument('--model', type=int, default=0, help="Which model to train (0=KLminimizer, 1=Euclidean-Minimizer) (default: 0)") parser.add_argument('--plots', action='store_true', dest='plots', help="Shall matplotlib be used (default: False)") parser.add_argument('--grid', action='store_true', dest='grid', help="Grid search (default: False)") argv = parser.parse_args() sys.argv = [sys.argv[0]] from ummon import * from negvarbound import * from model import * from helpers import Evaluator import helpers torch.manual_seed(4) if __name__ == '__main__': # WOOD transform = transform=transforms.Compose([transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), VGG19Features("pool4"), helpers.flatten_transform]) wood_data = ImagePatches("/ext/data/Wood-0035.png", mode='rgb', train=True, stride_y=14, stride_x=14, window_size=28, transform=transform) wood_data_test = AnomalyImagePatches("/ext/data/Wood-0035.png", mode='rgb', train=True, stride_y=14, stride_x=14, window_size=28, transform=transform, propability=1.0, anomaly=SquareAnomaly(size=8, color=255)) wood_data = [wood_data[i][0].data for i in range(len(wood_data))] wood_data = torch.stack(wood_data).numpy() / 10 wood_data_test = [wood_data_test[i][0].data for i in range(len(wood_data_test))] wood_data_test = torch.stack(wood_data_test).numpy() / 10 # Novelty data_novelty = wood_data_test # Train data_train = wood_data # Val data_val = data_train ###################################################### # NORMAL DISTRIBUTION ###################################################### # Model model = ModelNormal(input_features = data_train.shape[1], hidden_layer=20, latent_features=20) torch.manual_seed(4) # LOSS criterion = KLLoss(model=model, size_average=False) # INSTANTIATE OPTIMIZER optimizer = torch.optim.SGD(model.parameters(), lr=argv.lrate, weight_decay=1) #Evaluator evaluator = Evaluator(model, data_train, data_val, data_novelty) # Activate matplotlib argv.plots = True with Logger(loglevel=10, log_batch_interval=601) as lg: # CREATE A TRAINER my_trainer = UnsupervisedTrainer(lg, model, criterion, optimizer, trainingstate = Trainingstate(), model_filename="KL_MIN", use_cuda= argv.use_cuda, profile = False, convergence_eps = 1e-5) # START TRAINING my_trainer.fit(dataloader_training=(wood_data, 20), epochs=200) evaluator.evaluate_model(argv) ###################################################### # LOGNORMAL ###################################################### # Model model = ModelLogNormal(input_features = data_train.shape[1], hidden_layer=20, latent_features=20) torch.manual_seed(4) # LOSS criterion = KLLoss_lognormal(model=model, size_average=False) # INSTANTIATE OPTIMIZER optimizer = torch.optim.SGD(model.parameters(), lr=argv.lrate, weight_decay=1) #Evaluator evaluator = Evaluator(model, data_train, data_val, data_novelty) # Activate matplotlib argv.plots = True with Logger(loglevel=10, log_batch_interval=601) as lg: # CREATE A TRAINER my_trainer = UnsupervisedTrainer(lg, model, criterion, optimizer, trainingstate = Trainingstate(), model_filename="KL_MIN", use_cuda= argv.use_cuda, profile = False, convergence_eps = 1e-5) # START TRAINING my_trainer.fit(dataloader_training=(data_train, 20), epochs=argv.epochs) evaluator.evaluate_model(argv) ###################################################### # LAPLACE ###################################################### # Model model = ModelLaplace(input_features = data_train.shape[1], hidden_layer=20, latent_features=20) torch.manual_seed(4) # LOSS criterion = KLLoss_laplace(model=model, size_average=False, mean=2, scale=0.5) # INSTANTIATE OPTIMIZER optimizer = torch.optim.SGD(model.parameters(), lr=0.000001, weight_decay=1) #Evaluator evaluator = Evaluator(model, data_train, data_val, data_novelty) # Activate matplotlib argv.plots = True with Logger(loglevel=10, log_batch_interval=601) as lg: # CREATE A TRAINER my_trainer = UnsupervisedTrainer(lg, model, criterion, optimizer, trainingstate = Trainingstate(), model_filename="KL_MIN", use_cuda= argv.use_cuda, profile = False, convergence_eps = 1e-1) # START TRAINING my_trainer.fit(dataloader_training=(data_train, 20), epochs=300) evaluator.evaluate_model(argv) # {'AUROC LAT (TRAIN)': 0.8743801652892562, # 'AUROC LAT (VAL)': 0.8661157024793389, # 'AUROC REC (TRAIN)': 0.86900826446281, # 'AUROC REC (VAL)': 0.8528925619834712} ###################################################### # LAPLACE WITH R-SHIFT ###################################################### class CombinedLoss(nn.Module): def __init__(self, model, *args, **kwargs): super(CombinedLoss, self).__init__() self.model = model self.r_shift = KLLoss_shift_r(model=model, size_average=False) self.kl_loss = KLLoss_laplace(model=model, size_average=False, mean=10, scale=0.3) def forward(self, inpt, outpt): self.r_shift() return self.kl_loss(inpt,outpt) # Model model = ModelLaplace(input_features = data_train.shape[1], hidden_layer=20, latent_features=20) torch.manual_seed(4) # LOSS criterion = CombinedLoss(model) # INSTANTIATE OPTIMIZER optimizer = torch.optim.SGD(model.parameters(), lr=argv.lrate, weight_decay=1) #Evaluator evaluator = Evaluator(model, data_train, data_val, data_novelty) # Activate matplotlib argv.plots = True with Logger(loglevel=10, log_batch_interval=601) as lg: # CREATE A TRAINER my_trainer = UnsupervisedTrainer(lg, model, criterion, optimizer, trainingstate = Trainingstate(), model_filename="KL_MIN", use_cuda= argv.use_cuda, profile = False, convergence_eps = 1e-3) # START TRAINING my_trainer.fit(dataloader_training=(data_train, 20), epochs=200) evaluator.evaluate_model(argv) # {'AUROC LAT (TRAIN)': 0.8590909090909091, # 'AUROC LAT (VAL)': 0.8752066115702479, # 'AUROC REC (TRAIN)': 0.8677685950413224, # 'AUROC REC (VAL)': 0.8619834710743801}
34.249027
213
0.546353
464
0.052715
0
0
0
0
0
0
1,888
0.214497
c7511256bf0b0f8d7c0f1ccc084e2e9144ad8ab3
2,948
py
Python
sample_architectures/cnn.py
hvarS/PyTorch-Refer
020445e3ae1f3627f39e1ab957cdff44a2127289
[ "MIT" ]
null
null
null
sample_architectures/cnn.py
hvarS/PyTorch-Refer
020445e3ae1f3627f39e1ab957cdff44a2127289
[ "MIT" ]
null
null
null
sample_architectures/cnn.py
hvarS/PyTorch-Refer
020445e3ae1f3627f39e1ab957cdff44a2127289
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- """CNN.ipynb Automatically generated by Colaboratory. Original file is located at https://colab.research.google.com/drive/1Tq6HUya2PrC0SmyOIFo2c_eVtguRED2q """ import torch import torch.nn as nn import torch.optim as optim import torch.nn.functional as F from torch.utils.data import DataLoader import torchvision.datasets as datasets import torchvision.transforms as transforms class CNN(nn.Module): def __init__(self,in_channels = 1,num_classes = 10): super(CNN,self).__init__() self.conv1 = nn.Conv2d(in_channels= in_channels,out_channels = 8,kernel_size =(3,3),stride = (1,1),padding = (1,1)) self.pool1 = nn.MaxPool2d(kernel_size=(2,2),stride=(2,2)) self.conv2 = nn.Conv2d(in_channels= 8,out_channels = 16,kernel_size =(3,3),stride = (1,1),padding = (1,1)) self.pool2 = nn.MaxPool2d(kernel_size=(2,2),stride=(2,2)) self.fc1 = nn.Linear(16*7*7,num_classes) def forward(self,x): x = F.relu(self.conv1(x)) x = self.pool1(x) x = F.relu(self.conv2(x)) x = self.pool2(x) x = x.reshape(x.shape[0],-1) x = self.fc1(x) return x model = CNN(1,10) x = torch.randn((64,1,28,28)) print(model(x).shape) device = torch.device("cuda" if torch.cuda.is_available() else "cpu") device in_channels = 1 num_classes = 10 learning_rate = 0.001 batch_size = 64 num_epochs = 4 train_dataset = datasets.MNIST(root = "dataset/",train = True,transform = transforms.ToTensor(),download = True) train_loader = DataLoader(dataset=train_dataset,batch_size=64,shuffle=True) test_dataset = train_dataset = datasets.MNIST(root = "dataset/",train = False,transform = transforms.ToTensor(),download = True) test_loader = DataLoader(dataset = test_dataset,batch_size = batch_size,shuffle = True) model = CNN(1,10).to(device = device) criterion = nn.CrossEntropyLoss() optimizer = optim.Adam(model.parameters(),lr = learning_rate) for epoch in range(num_epochs): for batch_idx,(data,targets) in enumerate(train_loader): #get data to cuda if possible data = data.cuda() targets = targets.cuda() scores = model(data) loss = criterion(scores,targets) #backward optimizer.zero_grad() loss.backward() #gradient_descent or adam-step optimizer.step() # Check the accuracy for the training step def check_accuracy(loader,model): if loader.dataset.train: print("Checking accuracy on training data") else: print("Checking accuracy on test data") num_correct = 0 num_samples = 0 model.eval() with torch.no_grad(): for x,y in loader: x = x.cuda() y = y.cuda() scores = model(x) _,predictions = scores.max(1) num_correct += (predictions == y).sum() num_samples += predictions.size(0) print(f' Got {num_correct}/{num_samples} with accuracy ={float(num_correct)/float(num_samples)*100:.2f} ') model.train() check_accuracy(train_loader,model) check_accuracy(test_loader,model)
28.07619
128
0.700136
701
0.237788
0
0
0
0
0
0
497
0.168589
c7551a216f55773fcf2668fcef4ad367660f3169
21,599
py
Python
aispace/layers/callbacks/qa_evaluators.py
SmileGoat/AiSpace
35fc120667e4263c99b300815e0bf018f5064a40
[ "Apache-2.0" ]
32
2020-01-16T07:59:03.000Z
2022-03-31T09:24:00.000Z
aispace/layers/callbacks/qa_evaluators.py
SmileGoat/AiSpace
35fc120667e4263c99b300815e0bf018f5064a40
[ "Apache-2.0" ]
9
2020-06-05T03:27:06.000Z
2022-03-12T01:00:17.000Z
aispace/layers/callbacks/qa_evaluators.py
SmileGoat/AiSpace
35fc120667e4263c99b300815e0bf018f5064a40
[ "Apache-2.0" ]
3
2020-06-09T02:22:50.000Z
2021-07-19T06:07:07.000Z
# -*- coding: utf-8 -*- # @Time : 2020-07-30 15:06 # @Author : yingyuankai # @Email : [email protected] # @File : qa_evaluators.py import os import logging import numpy as np import tensorflow as tf import json from pprint import pprint from collections import defaultdict from aispace.utils.eval_utils import calc_em_score, calc_f1_score from aispace.utils.io_utils import save_json from aispace.utils.print_utils import print_boxed from aispace.utils.metrics_utils import ConfusionMatrix __all__ = [ 'EvaluatorForQaSimple', 'EvaluatorForQaWithImpossible' ] logger = logging.getLogger(__name__) class EvaluatorForQaSimple(tf.keras.callbacks.Callback): """ start_top_log_prob and end_top_log_prob's shape is [batch, k] ref: https://keras.io/examples/nlp/text_extraction_with_bert/ """ def __init__(self, validation_dataset, validation_steps, test_dataset, test_steps, report_dir, max_answer_length=64, n_best_size=5): self.validation_dataset = validation_dataset self.validation_steps = validation_steps self.test_dataset = test_dataset self.test_steps = test_steps self.max_answer_length = max_answer_length self.n_best_size = n_best_size self.report_dir = report_dir def on_epoch_end(self, epoch, logs=None): new_logs = self.eval_process(self.validation_dataset, self.validation_steps) logs = logs or {} logs.update(new_logs) print(f"Epoch: {epoch + 1}, val_f1_score: {logs['val_f1_score']:.4f}, val_em_score: {logs['val_em_score']:.4f}, " f"val_f1_em_avg_score: {logs['val_f1_em_avg_score']:.4f}") def on_train_end(self, logs=None): logger.info("Start Evaluate.") if not os.path.exists(self.report_dir): os.makedirs(self.report_dir) new_logs = self.eval_process(self.test_dataset, self.test_steps) save_json(os.path.join(self.report_dir, 'performance.json'), new_logs) print_boxed(f"Question Answer Evaluation") pprint(new_logs) logger.info(f"Save question answer reports in {self.report_dir}") def eval_process(self, dataset, n_steps=None): f1 = 0 em = 0 total_count = 0 skip_count = 0 start_top_res, end_top_res, unique_id_res = self.model.predict(dataset, steps=n_steps) start_top_log_prob, start_top_index = start_top_res[:, :, 0], start_top_res[:, :, 1].astype(np.int) # [b, k] end_top_log_prob, end_top_index = end_top_res[:, :, 0], end_top_res[:, :, 1].astype(np.int) # [b, k] unique_id_res = unique_id_res.astype(np.int) # predict results results = {} for i in range(end_top_index.shape[0]): unique_id = unique_id_res[i][0] itm = { 'unique_id': unique_id, 'start_top_log_prob': start_top_log_prob[i], 'start_top_index': start_top_index[i], 'end_top_log_prob': end_top_log_prob[i], 'end_top_index': end_top_index[i], } results[unique_id] = itm # raw inputs start_n_top, end_n_top = start_top_index.shape[-1], end_top_index.shape[-1] qas_id_to_examples = defaultdict(list) unique_id_to_examples = {} for idx, (inputs, outputs) in enumerate(dataset): if n_steps is not None and idx >= n_steps: break unique_ids = inputs['unique_id'].numpy().astype(np.int).tolist() offsets = inputs['offset'].numpy().astype(np.int).tolist() qas_ids = inputs['qas_id'].numpy().astype(str).tolist() doc_token2char_raw_start_indexs = inputs['doc_token2char_raw_start_index'].numpy().astype(str).tolist() doc_token2char_raw_end_indexs = inputs['doc_token2char_raw_end_index'].numpy().astype(str).tolist() doc_token2doc_indexs = inputs['doc_token2doc_index'].numpy().astype(str).tolist() all_answers = inputs['all_answers'].numpy().astype(str).tolist() answer_texts = inputs['answer_text'].numpy().tolist() context_texts = inputs['context_text'].numpy().tolist() question_texts = inputs['question_text'].numpy().tolist() is_impossibles = inputs['is_impossible'].numpy().tolist() p_masks = inputs['p_mask'].numpy().astype(np.int).tolist() for t in range(len(unique_ids)): itm = { 'unique_id': unique_ids[t], 'qas_id': qas_ids[t], 'question_text': question_texts[t].decode("utf8"), 'context_text': context_texts[t].decode("utf8"), 'answer_text': answer_texts[t].decode("utf8"), 'all_answers': json.loads(all_answers[t]), 'doc_token2char_raw_start_index': json.loads(doc_token2char_raw_start_indexs[t]), 'doc_token2char_raw_end_index': json.loads(doc_token2char_raw_end_indexs[t]), 'doc_token2doc_index': json.loads(doc_token2doc_indexs[t]), 'is_impossible': is_impossibles[t], 'p_mask': p_masks[t], 'offset': offsets[t] } unique_id_to_examples[unique_ids[t]] = itm qas_id_to_examples[qas_ids[t]].append(itm) for qas_id, examples in qas_id_to_examples.items(): example_all_predicts = [] answers = set() for example in examples: cur_unique_id = example['unique_id'] if cur_unique_id not in results: continue if example['is_impossible'] == 1: continue # if example['answer_text'] not in answers: # answers.append(example['answer_text']) answers |= set(example['all_answers']) cur_result = results.get(cur_unique_id) cur_start_top_log_prob = cur_result['start_top_log_prob'] cur_start_top_index = cur_result['start_top_index'] cur_end_top_log_prob = cur_result['end_top_log_prob'] cur_end_top_index = cur_result['end_top_index'] cur_p_mask = example['p_mask'] for i in range(start_n_top): start_prob = cur_start_top_log_prob[i] start_index = cur_start_top_index[i] if not cur_p_mask[start_index]: continue for j in range(end_n_top): end_prob = cur_end_top_log_prob[j] end_index = cur_end_top_index[j] if not cur_p_mask[end_index]: continue answer_length = end_index - start_index + 1 if end_index < start_index or answer_length > self.max_answer_length: continue itm = { 'unique_id': cur_unique_id, 'start_prob': start_prob, 'start_index': start_index, 'end_prob': end_prob, 'end_index': end_index, 'predict_score': np.log(start_prob) + np.log(end_prob) } example_all_predicts.append(itm) if len(answers) != 0: total_count += 1 else: skip_count += 1 continue example_all_predicts.sort(key=lambda s: s['predict_score'], reverse=True) example_top_predicts = [] is_visited = set() for example_predict in example_all_predicts: if len(example_top_predicts) >= self.n_best_size: break example_feature = unique_id_to_examples[example_predict['unique_id']] if example_predict['start_index'] - example_feature['offset'] < 0 or example_predict['end_index'] - example_feature['offset'] < 0: predict_text = "" else: predict_start = example_feature['doc_token2char_raw_start_index'][ example_predict['start_index'] - example_feature['offset']] predict_end = example_feature['doc_token2char_raw_end_index'][ example_predict['end_index'] - example_feature['offset']] predict_text = example_feature['context_text'][predict_start: predict_end + 1].strip() if predict_text in is_visited: continue is_visited.add(predict_text) itm = { 'predict_text': predict_text, 'start_prob': example_predict['start_prob'], 'end_prob': example_predict['end_prob'], 'predict_score': example_predict['predict_score'] } example_top_predicts.append(itm) if len(example_top_predicts) == 0: example_top_predicts.append( { 'predict_text': "", 'start_prob': 0., 'end_prob': 0., 'predict_score': 0. } ) example_best_predict = example_top_predicts[0] cur_f1 = calc_f1_score(list(answers), example_best_predict['predict_text']) cur_em = calc_em_score(list(answers), example_best_predict['predict_text']) f1 += cur_f1 em += cur_em # debug if cur_f1 == 0 or cur_em == 0: example_output = {} example_output.update(example_best_predict) example_output['question'] = examples[0]['question_text'] example_output['answer'] = answers example_output['f1'] = cur_f1 example_output['em'] = cur_em print(example_output) # total_count = len(qas_id_to_examples) f1_score = f1 / total_count em_score = em / total_count logs = {} logs['skip_count'] = skip_count logs['total'] = total_count logs['val_f1_score'] = f1_score logs['val_em_score'] = em_score logs['val_f1_em_avg_score'] = (em_score + f1_score) / 2. return logs class EvaluatorForQaWithImpossible(tf.keras.callbacks.Callback): """ start_top_log_prob and end_top_log_prob's shape is [batch, k, k] ref: https://keras.io/examples/nlp/text_extraction_with_bert/ """ def __init__(self, validation_dataset, validation_steps, test_dataset, test_steps, report_dir, max_answer_length=64, n_best_size=5, is_impossible_threshold=0.5, weights=[1., 1., 1.]): self.validation_dataset = validation_dataset self.validation_steps = validation_steps self.test_dataset = test_dataset self.test_steps = test_steps self.max_answer_length = max_answer_length self.n_best_size = n_best_size self.report_dir = report_dir self.is_impossible_threshold = is_impossible_threshold self.weights = weights def on_epoch_end(self, epoch, logs=None): new_logs = self.eval_process(self.validation_dataset, self.validation_steps) logs = logs or {} logs.update(new_logs) print(f"\nEpoch: {epoch + 1}, val_f1_score: {logs['val_f1_score']:.4f}, " f"val_em_score: {logs['val_em_score']:.4f}, " f"val_f1_em_avg_score: {logs['val_f1_em_avg_score']:.4f}," f" val_f1_for_impossible: {logs['val_f1_for_impossible']:.4f}," f" val_f1_avg_score: {logs['val_f1_avg_score']:.4f},") def on_train_end(self, logs=None): logger.info("Start Evaluate.") if not os.path.exists(self.report_dir): os.makedirs(self.report_dir) new_logs = self.eval_process(self.test_dataset, self.test_steps) save_json(os.path.join(self.report_dir, 'performance.json'), new_logs) print_boxed(f"Question Answer Evaluation") pprint(new_logs) logger.info(f"Save question answer reports in {self.report_dir}") def eval_process(self, dataset, n_steps=None): f1 = 0 em = 0 total_count = 0 skip_count = 0 start_top_res, end_top_res, answer_prob, unique_id_res = self.model.predict(dataset, steps=n_steps) start_top_log_prob, start_top_index = start_top_res[:, :, 0], start_top_res[:, :, 1].astype(np.int) # [b, k] end_top_log_prob, end_top_index = end_top_res[:, :, :, 0], end_top_res[:, :, :, 1].astype(np.int) # [b, k, k] unique_id_res = unique_id_res.astype(np.int) # predict results results = {} for i in range(end_top_index.shape[0]): unique_id = unique_id_res[i][0] itm = { 'unique_id': unique_id, 'start_top_log_prob': start_top_log_prob[i], 'start_top_index': start_top_index[i], 'end_top_log_prob': end_top_log_prob[i], 'end_top_index': end_top_index[i], 'is_impossible_prob': answer_prob[i][0] } results[unique_id] = itm # raw inputs start_n_top, end_n_top = end_top_index.shape[1:] qas_id_to_examples = defaultdict(list) unique_id_to_examples = {} for idx, (inputs, outputs) in enumerate(dataset): if n_steps is not None and idx >= n_steps: break unique_ids = inputs['unique_id'].numpy().astype(np.int).tolist() offsets = inputs['offset'].numpy().astype(np.int).tolist() qas_ids = inputs['qas_id'].numpy().astype(str).tolist() doc_token2char_raw_start_indexs = inputs['doc_token2char_raw_start_index'].numpy().astype(str).tolist() doc_token2char_raw_end_indexs = inputs['doc_token2char_raw_end_index'].numpy().astype(str).tolist() doc_token2doc_indexs = inputs['doc_token2doc_index'].numpy().astype(str).tolist() all_answers = inputs['all_answers'].numpy().astype(str).tolist() answer_texts = inputs['answer_text'].numpy().tolist() context_texts = inputs['context_text'].numpy().tolist() question_texts = inputs['question_text'].numpy().tolist() is_impossibles = inputs['is_impossible'].numpy().tolist() p_masks = inputs['p_mask'].numpy().astype(np.int).tolist() for t in range(len(unique_ids)): itm = { 'unique_id': unique_ids[t], 'qas_id': qas_ids[t], 'question_text': question_texts[t].decode("utf8"), 'context_text': context_texts[t].decode("utf8"), 'answer_text': answer_texts[t].decode("utf8"), 'all_answers': json.loads(all_answers[t]), 'doc_token2char_raw_start_index': json.loads(doc_token2char_raw_start_indexs[t]), 'doc_token2char_raw_end_index': json.loads(doc_token2char_raw_end_indexs[t]), 'doc_token2doc_index': json.loads(doc_token2doc_indexs[t]), 'is_impossible': is_impossibles[t], 'p_mask': p_masks[t], 'offset': offsets[t] } unique_id_to_examples[unique_ids[t]] = itm qas_id_to_examples[qas_ids[t]].append(itm) ground_truth_for_impossible, predictions_for_impossible = [], [] for qas_id, examples in qas_id_to_examples.items(): example_all_predicts = [] answers = set() for example in examples: cur_unique_id = example['unique_id'] if cur_unique_id not in results: continue # if example['answer_text'] not in answers: # answers.append(example['answer_text']) answers |= set(example['all_answers']) cur_result = results.get(cur_unique_id) cur_start_top_log_prob = cur_result['start_top_log_prob'] cur_start_top_index = cur_result['start_top_index'] cur_end_top_log_prob = cur_result['end_top_log_prob'] cur_end_top_index = cur_result['end_top_index'] ground_truth_for_impossible.append(example['is_impossible']) predictions_for_impossible.append(int(cur_result['is_impossible_prob'] >= self.is_impossible_threshold)) if example['is_impossible'] == 1: continue cur_p_mask = example['p_mask'] for i in range(start_n_top): start_prob = cur_start_top_log_prob[i] start_index = cur_start_top_index[i] if not cur_p_mask[start_index]: continue for j in range(end_n_top): end_prob = cur_end_top_log_prob[i, j] end_index = cur_end_top_index[i, j] if not cur_p_mask[end_index]: continue answer_length = end_index - start_index + 1 if end_index < start_index or answer_length > self.max_answer_length: continue itm = { 'unique_id': cur_unique_id, 'start_prob': start_prob, 'start_index': start_index, 'end_prob': end_prob, 'end_index': end_index, 'predict_score': np.log(end_prob) } example_all_predicts.append(itm) if len(answers) != 0 and "" not in answers: total_count += 1 else: skip_count += 1 continue example_all_predicts.sort(key=lambda s: s['predict_score'], reverse=True) example_top_predicts = [] is_visited = set() for example_predict in example_all_predicts: if len(example_top_predicts) >= self.n_best_size: break example_feature = unique_id_to_examples[example_predict['unique_id']] if example_predict['start_index'] - example_feature['offset'] < 0 or example_predict['end_index'] - example_feature['offset'] < 0: predict_text = "" else: predict_start = example_feature['doc_token2char_raw_start_index'][ example_predict['start_index'] - example_feature['offset']] predict_end = example_feature['doc_token2char_raw_end_index'][ example_predict['end_index'] - example_feature['offset']] predict_text = example_feature['context_text'][predict_start: predict_end + 1].strip() if predict_text in is_visited: continue is_visited.add(predict_text) itm = { 'predict_text': predict_text, 'start_prob': example_predict['start_prob'], 'end_prob': example_predict['end_prob'], 'predict_score': example_predict['predict_score'] } example_top_predicts.append(itm) if len(example_top_predicts) == 0: example_top_predicts.append( { 'predict_text': "", 'start_prob': 0., 'end_prob': 0., 'predict_score': 0. } ) example_best_predict = example_top_predicts[0] cur_f1 = calc_f1_score(list(answers), example_best_predict['predict_text']) cur_em = calc_em_score(list(answers), example_best_predict['predict_text']) f1 += cur_f1 em += cur_em # debug if cur_f1 == 0 or cur_em == 0: example_output = {} example_output.update(example_best_predict) example_output['question'] = examples[0]['question_text'] example_output['answer'] = answers example_output['f1'] = cur_f1 example_output['em'] = cur_em print(example_output) # total_count = len(qas_id_to_examples) f1_score = f1 / total_count em_score = em / total_count cm = ConfusionMatrix(ground_truth_for_impossible, predictions_for_impossible) logs = {} logs['skip_count'] = skip_count logs['total'] = total_count logs['val_f1_score'] = f1_score logs['val_em_score'] = em_score logs['val_f1_em_avg_score'] = (em_score * self.weights[0] + f1_score * self.weights[1]) / sum(self.weights[:2]) logs['val_f1_for_impossible'] = cm.avg_f1_score(average='macro') logs['val_accuracy_for_impossible'] = cm.overall_accuracy() logs['val_f1_avg_score'] = (em_score * self.weights[0] + f1_score * self.weights[1] + logs['val_f1_for_impossible'] * self.weights[2]) / sum(self.weights) return logs
44.997917
146
0.570582
20,969
0.970832
0
0
0
0
0
0
3,931
0.181999
c75685d19bc8be9c76eb30777f9bd2a54b73db11
682
py
Python
tests/conftest.py
junjunjunk/torchgpipe
3db11e1da0fc432eb3f3807ddcb22967973c8b28
[ "Apache-2.0" ]
532
2019-05-27T09:23:04.000Z
2022-03-31T04:07:55.000Z
tests/conftest.py
junjunjunk/torchgpipe
3db11e1da0fc432eb3f3807ddcb22967973c8b28
[ "Apache-2.0" ]
29
2019-07-01T19:49:54.000Z
2021-11-28T00:51:00.000Z
tests/conftest.py
junjunjunk/torchgpipe
3db11e1da0fc432eb3f3807ddcb22967973c8b28
[ "Apache-2.0" ]
68
2019-05-27T09:27:32.000Z
2022-03-27T13:52:18.000Z
import pytest import torch @pytest.fixture(autouse=True) def manual_seed_zero(): torch.manual_seed(0) @pytest.fixture(scope='session') def cuda_sleep(): # Warm-up CUDA. torch.empty(1, device='cuda') # From test/test_cuda.py in PyTorch. start = torch.cuda.Event(enable_timing=True) end = torch.cuda.Event(enable_timing=True) start.record() torch.cuda._sleep(1000000) end.record() end.synchronize() cycles_per_ms = 1000000 / start.elapsed_time(end) def cuda_sleep(seconds): torch.cuda._sleep(int(seconds * cycles_per_ms * 1000)) return cuda_sleep def pytest_report_header(): return f'torch: {torch.__version__}'
22
62
0.696481
0
0
0
0
578
0.847507
0
0
95
0.139296
c756e2f724651746fcaf020b50f3e0f2bdeb6442
54,090
py
Python
lib/python/treadmill/scheduler/__init__.py
drienyov/treadmill
ce21537cd9a2fdb0567ac2aa3de1afcb2f6861de
[ "Apache-2.0" ]
null
null
null
lib/python/treadmill/scheduler/__init__.py
drienyov/treadmill
ce21537cd9a2fdb0567ac2aa3de1afcb2f6861de
[ "Apache-2.0" ]
null
null
null
lib/python/treadmill/scheduler/__init__.py
drienyov/treadmill
ce21537cd9a2fdb0567ac2aa3de1afcb2f6861de
[ "Apache-2.0" ]
null
null
null
"""Treadmill hierarchical scheduler. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import abc import collections import datetime import heapq import itertools import logging import operator import sys import time import enum import numpy as np import six _LOGGER = logging.getLogger(__name__) MAX_PRIORITY = 100 DEFAULT_RANK = 100 _UNPLACED_RANK = sys.maxsize DIMENSION_COUNT = None _MAX_UTILIZATION = float('inf') _GLOBAL_ORDER_BASE = time.mktime((2014, 1, 1, 0, 0, 0, 0, 0, 0)) # 21 day DEFAULT_SERVER_UPTIME = 21 * 24 * 60 * 60 # 1 day MIN_SERVER_UPTIME = 1 * 24 * 60 * 60 # 7 days DEFAULT_MAX_APP_LEASE = 7 * 24 * 60 * 60 # Default partition threshold DEFAULT_THRESHOLD = 0.9 # pylint: disable=C0302,too-many-lines def _bit_count(value): """Returns number of bits set. """ count = 0 while value: value &= value - 1 count += 1 return count def zero_capacity(): """Returns zero capacity vector. """ assert DIMENSION_COUNT is not None, 'Dimension count not set.' return np.zeros(DIMENSION_COUNT) def eps_capacity(): """Returns eps capacity vector. """ assert DIMENSION_COUNT is not None, 'Dimension count not set.' return np.array( [np.finfo(float).eps for _x in range(0, DIMENSION_COUNT)] ) def _global_order(): """Use timestamp in nanoseconds, from Jan 1st 2014, to break tie in scheduling conflicts for apps of the same priority, in a FIFO fashion. """ # Take the current EPOCH in nanosec global_order = int(time.time() * 1000000) - _GLOBAL_ORDER_BASE return global_order def utilization(demand, allocated, available): """Calculates utilization score. """ return np.max(np.subtract(demand, allocated) / available) def _all(oper, left, right): """Short circuit all for ndarray. """ return all( oper(ai, bi) for ai, bi in six.moves.zip(left, right) ) def _any(oper, left, right): """Short circuit any for ndarray. """ return any( oper(ai, bi) for ai, bi in six.moves.zip(left, right) ) def _any_eq(left, right): """Short circuit any eq for ndarray. """ return _any(operator.eq, left, right) def _any_isclose(left, right): """Short circuit any isclose for ndarray. """ return _any(np.isclose, left, right) def _any_lt(left, right): """Short circuit any lt for ndarray. """ return _any(operator.lt, left, right) def _any_le(left, right): """Short circuit any le for ndarray. """ return _any(operator.le, left, right) def _any_gt(left, right): """Short circuit any gt for ndarray. """ return _any(operator.gt, left, right) def _any_ge(left, right): """Short circuit any ge for ndarray. """ return _any(operator.ge, left, right) def _all_eq(left, right): """Short circuit all eq for ndarray. """ return _all(operator.eq, left, right) def _all_isclose(left, right): """Short circuit all isclose for ndarray. """ return _all(np.isclose, left, right) def _all_lt(left, right): """Short circuit all lt for ndarray. """ return _all(operator.lt, left, right) def _all_le(left, right): """Short circuit all le for ndarray. """ return _all(operator.le, left, right) def _all_gt(left, right): """Short circuit all gt for ndarray. """ return _all(operator.gt, left, right) def _all_ge(left, right): """Short circuit all ge for ndarray. """ return _all(operator.ge, left, right) class IdentityGroup: """Identity group. """ __slots__ = ( 'available', 'count', ) def __init__(self, count=0): self.count = count self.available = set(range(0, count)) def acquire(self): """Return next available identity or None. """ if self.available: return self.available.pop() else: return None def release(self, ident): """Mark identity as available. """ if ident < self.count: self.available.add(ident) def adjust(self, count): """Adjust identities with new count. If count is larger, add additional identities to the set. If count is lower, remove identities that are no longer valid. All apps that have invalid identities will be adjusted in the schedule cycle. """ if count >= self.count: self.available ^= set(six.moves.xrange(self.count, count)) else: self.available -= set(six.moves.xrange(count, self.count)) self.count = count class State(enum.Enum): """Enumeration of node/server states. """ # Ready to accept new applications. # TODO: Fix attribute name up = 'up' # pylint: disable=invalid-name # Applications need to be migrated. down = 'down' # Existing applications can stay, but will not accept new. frozen = 'frozen' class Affinity: """Model affinity and affinity limits. """ __slots__ = ( 'name', 'limits', 'constraints', ) def __init__(self, name, limits=None): self.name = name self.limits = collections.defaultdict(lambda: float('inf')) if limits: self.limits.update(limits) # freeze affinity shape constraints. self.constraints = tuple([self.name] + sorted(self.limits.values())) class Application: """Application object. """ __slots__ = ( 'global_order', 'name', 'demand', 'affinity', 'priority', 'allocation', 'data_retention_timeout', 'server', 'lease', 'identity', 'identity_group', 'identity_group_ref', 'schedule_once', 'evicted', 'placement_expiry', 'renew', 'unschedule', 'final_rank', 'final_util', 'constraints', ) def __init__(self, name, priority, demand, affinity, affinity_limits=None, data_retention_timeout=0, lease=0, identity_group=None, identity=None, schedule_once=False): self.global_order = _global_order() self.allocation = None self.server = None self.name = name self.affinity = Affinity(affinity, affinity_limits) self.priority = priority self.demand = np.array(demand, dtype=float) self.data_retention_timeout = data_retention_timeout self.lease = lease self.identity_group = identity_group self.identity = identity self.identity_group_ref = None self.schedule_once = schedule_once self.evicted = False self.unschedule = False self.placement_expiry = None self.renew = False def shape(self): """Return tuple of application (constraints, demand). Application shape is tuple of constraints that affect application placement. Currently this includes affinity constraints and app lease time. """ constraints = (self.affinity.constraints + (self.lease,)) if self.allocation: constraints += self.allocation.constraints return constraints, self.demand def acquire_identity(self): """Try to acquire identity if belong to the group. Returns True if successfull or if identity group is none. """ if not self.identity_group_ref: return True if self.identity is None: self.identity = self.identity_group_ref.acquire() _LOGGER.info('Acquired identity: %s: %s - %s', self.name, self.identity_group, self.identity) return self.identity is not None def release_identity(self): """Release app identity. """ if self.identity_group_ref and self.identity is not None: self.identity_group_ref.release(self.identity) self.identity = None def force_set_identity(self, identity): """Force identity of the app. """ if identity is not None: assert self.identity_group_ref self.identity = identity self.identity_group_ref.available.discard(identity) def has_identity(self): """Checks if app has identity if identity group is specified. """ return self.identity_group_ref is None or self.identity is not None @property def traits(self): """The app traits are derived from allocation. """ if self.allocation is None: return 0 else: return self.allocation.traits @six.add_metaclass(abc.ABCMeta) class Strategy: """Base class for all placement strategies. """ @abc.abstractmethod def suggested_node(self): """Suggested node that should be tried first. """ pass @abc.abstractmethod def next_node(self): """Next node to try, if previous suggestion was rejected. """ pass class SpreadStrategy(Strategy): """Spread strategy will suggest new node for each subsequent placement. """ __slots__ = ( 'current_idx', 'node', ) def __init__(self, node): self.current_idx = 0 self.node = node def suggested_node(self): """Suggest next node from the cycle. """ for _ in six.moves.xrange(0, len(self.node.children)): if self.current_idx == len(self.node.children): self.current_idx = 0 current = self.node.children[self.current_idx] self.current_idx += 1 if current: return current # Not a single non-none node. return None def next_node(self): """Suggest next node from the cycle. """ return self.suggested_node() class PackStrategy(Strategy): """Pack strategy will suggest same node until it is full. """ __slots__ = ( 'current_idx', 'node', ) def __init__(self, node): self.current_idx = 0 self.node = node def suggested_node(self): """Suggest same node as previous placement. """ for _ in six.moves.xrange(0, len(self.node.children)): if self.current_idx == len(self.node.children): self.current_idx = 0 node = self.node.children[self.current_idx] if node: return node return None def next_node(self): """Suggest next node from the cycle. """ self.current_idx += 1 return self.suggested_node() class TraitSet: """Hierarchical set of traits. """ __slots__ = ( 'self_traits', 'children_traits', 'traits', ) def __init__(self, traits=0): if not traits: traits = 0 # Private traits. assert isinstance(traits, six.integer_types) self.self_traits = traits # Union of all children traits. self.children_traits = dict() self._recalculate() def _recalculate(self): """Calculate combined set of all traits. """ self.traits = self.self_traits for trait in six.itervalues(self.children_traits): self.traits |= trait def has(self, traits): """Check if all traits are present. """ return (self.traits & traits) == traits def add(self, child, traits): """Add a child with given traits. """ # Update children traits. self.children_traits[child] = traits self._recalculate() def remove(self, child): """Remove child traits from the list. """ if child in self.children_traits: del self.children_traits[child] self._recalculate() def is_same(self, other): """Compares own traits, ignore child. """ return self.self_traits == other.self_traits class AffinityCounter: """Manages affinity count. """ __slots__ = ( 'affinity_counter', ) def __init__(self): self.affinity_counter = collections.Counter() class Node: """Abstract placement node. """ __slots__ = ( 'name', 'level', 'free_capacity', 'parent', 'children', 'children_by_name', 'traits', 'labels', 'affinity_counters', 'valid_until', '_state', '_state_since', ) def __init__(self, name, traits, level, valid_until=0): self.name = name self.level = level self.free_capacity = zero_capacity() self.parent = None self.children = list() self.children_by_name = dict() self.traits = TraitSet(traits) self.labels = set() self.affinity_counters = collections.Counter() self.valid_until = valid_until self._state = State.up self._state_since = time.time() def empty(self): """Return true if there are no children. """ return not bool(self.children_by_name) def children_iter(self): """Iterate over active children. """ for child in self.children: if child: yield child def get_state(self): """Returns tuple of (state, since). """ return self. _state, self._state_since def set_state(self, state, since): """Sets the state and time since. """ if self._state is not state: self._state_since = since self._state = state _LOGGER.debug('state: %s - (%s, %s)', self.name, self._state, self._state_since) @property def state(self): """Return current state. """ return self._state @state.setter def state(self, new_state): """Set node state and records time. """ self.set_state(new_state, time.time()) def add_child_traits(self, node): """Recursively add child traits up. """ self.traits.add(node.name, node.traits.traits) if self.parent: self.parent.remove_child_traits(self.name) self.parent.add_child_traits(self) def adjust_valid_until(self, child_valid_until): """Recursively adjust valid until time. """ if child_valid_until: self.valid_until = max(self.valid_until, child_valid_until) else: if self.empty(): self.valid_until = 0 else: self.valid_until = max([node.valid_until for node in self.children_iter()]) if self.parent: self.parent.adjust_valid_until(child_valid_until) def remove_child_traits(self, node_name): """Recursively remove child traits up. """ self.traits.remove(node_name) if self.parent: self.parent.remove_child_traits(self.name) self.parent.add_child_traits(self) def reset_children(self): """Reset children to empty list. """ for child in self.children_iter(): child.parent = None self.children = list() self.children_by_name = dict() def add_node(self, node): """Add child node, set the traits and propagate traits up. """ assert node.parent is None assert node.name not in self.children_by_name node.parent = self self.children.append(node) self.children_by_name[node.name] = node self.add_child_traits(node) self.increment_affinity(node.affinity_counters) self.add_labels(node.labels) self.adjust_valid_until(node.valid_until) def add_labels(self, labels): """Recursively add labels to self and parents. """ self.labels.update(labels) if self.parent: self.parent.add_labels(self.labels) def remove_node(self, node): """Remove child node and adjust the traits. """ assert node.name in self.children_by_name del self.children_by_name[node.name] for idx in six.moves.xrange(0, len(self.children)): if self.children[idx] == node: self.children[idx] = None self.remove_child_traits(node.name) self.decrement_affinity(node.affinity_counters) self.adjust_valid_until(None) node.parent = None return node def remove_node_by_name(self, nodename): """Removes node by name. """ assert nodename in self.children_by_name return self.remove_node(self.children_by_name[nodename]) def check_app_constraints(self, app): """Find app placement on the node. """ if app.allocation is not None: if app.allocation.label not in self.labels: _LOGGER.info('Missing label: %s on %s', app.allocation.label, self.name) return False if app.traits != 0 and not self.traits.has(app.traits): _LOGGER.info('Missing traits: %s on %s', app.traits, self.name) return False if not self.check_app_affinity_limit(app): return False if _any_gt(app.demand, self.free_capacity): _LOGGER.info('Not enough free capacity: %s', self.free_capacity) return False return True def check_app_affinity_limit(self, app): """Check app affinity limits """ count = self.affinity_counters[app.affinity.name] limit = app.affinity.limits[self.level] return count < limit def put(self, _app): """Abstract method, should never be called. """ raise Exception('Not implemented.') def size(self, label): """Returns total capacity of the children. """ if self.empty() or label not in self.labels: return eps_capacity() return np.sum([ n.size(label) for n in self.children_iter()], 0) def members(self): """Return set of all leaf node names. """ names = dict() for node in self.children_iter(): names.update(node.members()) return names def increment_affinity(self, counters): """Increment affinity counters recursively. """ self.affinity_counters.update(counters) if self.parent: self.parent.increment_affinity(counters) def decrement_affinity(self, counters): """Decrement affinity counters recursively. """ self.affinity_counters.subtract(counters) if self.parent: self.parent.decrement_affinity(counters) class Bucket(Node): """Collection of nodes/buckets. """ __slots__ = ( 'affinity_strategies', 'traits', ) _default_strategy_t = SpreadStrategy def __init__(self, name, traits=0, level=None): super(Bucket, self).__init__(name, traits, level) self.affinity_strategies = dict() self.traits = TraitSet(traits) def set_affinity_strategy(self, affinity, strategy_t): """Initilaizes placement strategy for given affinity. """ self.affinity_strategies[affinity] = strategy_t(self) def get_affinity_strategy(self, affinity): """Returns placement strategy for the affinity, defaults to spread. """ if affinity not in self.affinity_strategies: self.set_affinity_strategy(affinity, Bucket._default_strategy_t) return self.affinity_strategies[affinity] def adjust_capacity_up(self, new_capacity): """Node can only increase capacity. """ self.free_capacity = np.maximum(self.free_capacity, new_capacity) if self.parent: self.parent.adjust_capacity_up(self.free_capacity) def adjust_capacity_down(self, prev_capacity=None): """Called when capacity is decreased. """ if self.empty(): self.free_capacity = zero_capacity() if self.parent: self.parent.adjust_capacity_down() else: if prev_capacity is not None and _all_lt(prev_capacity, self.free_capacity): return free_capacity = zero_capacity() for child_node in self.children_iter(): if child_node.state is not State.up: continue free_capacity = np.maximum(free_capacity, child_node.free_capacity) # If resulting free_capacity is less the previous, we need to # adjust the parent, otherwise, nothing needs to be done. prev_capacity = self.free_capacity.copy() if _any_lt(free_capacity, self.free_capacity): self.free_capacity = free_capacity if self.parent: self.parent.adjust_capacity_down(prev_capacity) def add_node(self, node): """Adds node to the bucket. """ super(Bucket, self).add_node(node) self.adjust_capacity_up(node.free_capacity) def remove_node(self, node): """Removes node from the bucket. """ super(Bucket, self).remove_node(node) # if _any_isclose(self.free_capacity, node.free_capacity): self.adjust_capacity_down(node.free_capacity) return node def put(self, app): """Try to put app on one of the nodes that belong to the bucket. """ # Check if it is feasible to put app on some node low in the # hierarchy _LOGGER.debug('bucket.put: %s => %s', app.name, self.name) if not self.check_app_constraints(app): return False strategy = self.get_affinity_strategy(app.affinity.name) node = strategy.suggested_node() if node is None: _LOGGER.debug('All nodes in the bucket deleted.') return False nodename0 = node.name first = True while True: # End of iteration. if not first and node.name == nodename0: _LOGGER.debug('Finished iterating on: %s.', self.name) break first = False _LOGGER.debug('Trying node: %s:', node.name) if node.state is not State.up: _LOGGER.debug('Node not up: %s, %s', node.name, node.state) else: if node.put(app): return True node = strategy.next_node() return False class Server(Node): """Server object, final app placement. """ __slots__ = ( 'init_capacity', 'apps', 'up_since', 'presence_id', ) def __init__(self, name, capacity, up_since=0, valid_until=0, traits=0, label=None, presence_id=None): super(Server, self).__init__(name, traits=traits, level='server', valid_until=valid_until) self.labels = set([label]) self.init_capacity = np.array(capacity, dtype=float) self.free_capacity = self.init_capacity.copy() self.apps = dict() self.up_since = up_since self.presence_id = presence_id def __str__(self): return 'server: %s %s' % (self.name, self.init_capacity) def is_same(self, other): """Compares capacity and traits against another server. valid_until is ignored, as server comes up after reboot will have different valid_until value. """ return (self.labels == other.labels and _all_eq(self.init_capacity, other.init_capacity) and self.traits.is_same(other.traits)) def put(self, app): """Tries to put the app on the server. """ assert app.name not in self.apps _LOGGER.debug('server.put: %s => %s', app.name, self.name) if not self.check_app_lifetime(app): return False if not self.check_app_constraints(app): return False prev_capacity = self.free_capacity.copy() self.free_capacity -= app.demand self.apps[app.name] = app self.increment_affinity([app.affinity.name]) app.server = self.name if self.parent: self.parent.adjust_capacity_down(prev_capacity) if app.placement_expiry is None: app.placement_expiry = time.time() + app.lease return True def restore(self, app, placement_expiry=None): """Put app back on the server, ignore app lifetime. """ _LOGGER.debug('server.restore: %s => %s (%s)', app.name, self.name, placement_expiry) lease = app.lease # If not explicit if placement_expiry is None: placement_expiry = app.placement_expiry app.lease = 0 rc = self.put(app) app.lease = lease app.placement_expiry = placement_expiry return rc def renew(self, app): """Try to extend the placement for app lease. """ can_renew = self.check_app_lifetime(app) if can_renew: app.placement_expiry = time.time() + app.lease return can_renew def check_app_lifetime(self, app): """Check if the app lease fits until server is rebooted. """ # app with 0 lease can be placed anywhere (ignore potentially # expired servers) if not app.lease: return True return time.time() + app.lease < self.valid_until def remove(self, app_name): """Removes app from the server. """ assert app_name in self.apps app = self.apps[app_name] del self.apps[app_name] app.server = None app.evicted = True app.unschedule = False app.placement_expiry = None self.free_capacity += app.demand self.decrement_affinity([app.affinity.name]) if self.parent: self.parent.adjust_capacity_up(self.free_capacity) def remove_all(self): """Remove all apps. """ # iterate over copy of the keys, as we are removing them in the loop. for appname in list(self.apps): self.remove(appname) def size(self, label): """Return server capacity. """ if label not in self.labels: return eps_capacity() return self.init_capacity def members(self): """Return set of all leaf node names. """ return {self.name: self} def set_state(self, state, since): """Change host state. """ if self.state is state: return super(Server, self).set_state(state, since) if state == State.up: if self.parent: self.parent.adjust_capacity_up(self.free_capacity) elif state in (State.down, State.frozen): if self.parent: self.parent.adjust_capacity_down(self.free_capacity) else: raise Exception('Invalid state: ' % state) class Allocation: """Allocation manages queue of apps sharing same reserved capacity. In reality allocation is tied to grn via application proid. Applications within the allocation are organized by application priority. Allocations are ranked, and the rank is used to globally order applications from different allocations into global queue. Default allocation has rank 100. Defining allocation with lower rank will result in all it's applications to be evaluated first regardless of utilization. This is used to model "system" applications that should be always present regardless of utilization. Allocation queue can be capped with max_utilization parameter. If set, it will specify the max_utilization which will be considered for scheduling. """ __slots__ = ( 'reserved', 'rank', 'rank_adjustment', 'traits', 'label', 'max_utilization', 'apps', 'sub_allocations', 'path', 'constraints', ) def __init__(self, reserved=None, rank=None, traits=None, max_utilization=None, partition=None): self.set_reserved(reserved) self.rank = None self.rank_adjustment = 0 self.traits = 0 self.label = partition self.max_utilization = _MAX_UTILIZATION self.reserved = zero_capacity() self.set_max_utilization(max_utilization) self.set_traits(traits) self.update(reserved, rank, 0) self.apps = dict() self.sub_allocations = dict() self.path = [] # Freeze shape constraintes. self.constraints = (self.label, self.traits,) @property def name(self): """Returns full allocation name. """ return '/'.join(self.path) def set_reserved(self, reserved): """Update reserved capacity. """ if reserved is None: self.reserved = zero_capacity() elif isinstance(reserved, int): assert reserved == 0 self.reserved = zero_capacity() elif isinstance(reserved, float): assert reserved == 0.0 self.reserved = zero_capacity() elif isinstance(reserved, list): assert len(reserved) == DIMENSION_COUNT self.reserved = np.array(reserved, dtype=float) elif isinstance(reserved, np.ndarray): self.reserved = reserved else: assert 'Unsupported type: %r' % type(reserved) def update(self, reserved, rank, rank_adjustment, max_utilization=None): """Updates allocation. """ if rank is not None: self.rank = rank else: self.rank = DEFAULT_RANK if rank_adjustment is not None: self.rank_adjustment = rank_adjustment self.set_reserved(reserved) self.set_max_utilization(max_utilization) def set_max_utilization(self, max_utilization): """Sets max_utilization, accounting for default None value. """ if max_utilization is not None: self.max_utilization = max_utilization else: self.max_utilization = _MAX_UTILIZATION def set_traits(self, traits): """Set traits, account for default None value. """ if not traits: self.traits = 0 else: self.traits = traits def add(self, app): """Add application to the allocation queue. Once added, the scheduler will make an attempt to place the app on one of the cell nodes. """ # Check that there are no duplicate app names. if app.name in self.apps: _LOGGER.warning( 'Duplicate app on alllocation queue: %s', app.name ) return app.allocation = self self.apps[app.name] = app def remove(self, name): """Remove application from the allocation queue. """ if name in self.apps: self.apps[name].allocation = None del self.apps[name] def priv_utilization_queue(self): """Returns tuples for sorted by global utilization. Apps in the queue are ordered by priority, insertion order. Adding or removing maintains invariant that apps utilization monotonically increases as well. Returns local prioritization queue in a tuple where first element is utilization ratio, so that this queue is suitable for merging into global priority queue. """ def _app_key(app): """Compares apps by priority, state, global index """ return (-app.priority, 0 if app.server else 1, app.global_order, app.name) prio_queue = sorted(six.viewvalues(self.apps), key=_app_key) acc_demand = zero_capacity() available = self.reserved + np.finfo(float).eps util_before = utilization(acc_demand, self.reserved, available) for app in prio_queue: acc_demand = acc_demand + app.demand util_after = utilization(acc_demand, self.reserved, available) # Priority 0 apps are treated specially - utilization is set to # max float. # # This ensures that they are at the end of the all queues. if app.priority == 0: util_before = _MAX_UTILIZATION util_after = _MAX_UTILIZATION # All things equal, already scheduled applications have priority # over pending. pending = 0 if app.server else 1 if util_after <= self.max_utilization - 1: rank = self.rank if util_before < 0: rank -= self.rank_adjustment else: rank = _UNPLACED_RANK entry = (rank, util_before, util_after, pending, app.global_order, app) util_before = util_after yield entry def utilization_queue(self, free_capacity, visitor=None): """Returns utilization queue including the sub-allocs. All app queues from self and sub-allocs are merged in standard order, and then utilization is recalculated based on total reserved capacity of this alloc and sub-allocs combined. The function maintains invariant that any app (self or inside sub-alloc with utilization < 1 will remain with utilzation < 1. """ total_reserved = self.total_reserved() queues = [ alloc.utilization_queue(free_capacity, visitor) for alloc in six.itervalues(self.sub_allocations) ] queues.append(self.priv_utilization_queue()) acc_demand = zero_capacity() available = total_reserved + free_capacity + np.finfo(float).eps util_before = utilization(acc_demand, total_reserved, available) for item in heapq.merge(*queues): rank, _u_before, _u_after, pending, order, app = item acc_demand = acc_demand + app.demand util_after = utilization(acc_demand, total_reserved, available) if app.priority == 0: util_before = _MAX_UTILIZATION util_after = _MAX_UTILIZATION # - lower rank allocations take precedence. # - for same rank, utilization takes precedence # - False < True, so for apps with same utilization we prefer # those that already running (False == not pending) # - Global order entry = (rank, util_before, util_after, pending, order, app) if visitor: visitor(self, entry, acc_demand) util_before = util_after yield entry def total_reserved(self): """Total reserved capacity including sub-allocs. """ return six.moves.reduce( lambda acc, alloc: acc + alloc.total_reserved(), six.itervalues(self.sub_allocations), self.reserved ) def add_sub_alloc(self, name, alloc): """Add child allocation. """ self.sub_allocations[name] = alloc assert not alloc.path alloc.path = self.path + [name] alloc.label = self.label def remove_sub_alloc(self, name): """Remove chlid allocation. """ if name in self.sub_allocations: del self.sub_allocations[name] def get_sub_alloc(self, name): """Return sub allocation, create empty if it does not exist. """ if name not in self.sub_allocations: self.add_sub_alloc(name, Allocation()) return self.sub_allocations[name] def all_apps(self): """Return all apps in allocation and sub-allocations.""" all_apps = list(six.itervalues(self.apps)) for alloc in six.itervalues(self.sub_allocations): all_apps.extend(alloc.all_apps()) return all_apps class Partition: """Cell partition. """ __slots__ = ( 'allocation', 'max_server_uptime', 'max_lease', 'threshold', 'label', '_reboot_buckets', '_reboot_dates', '_reboot_last', ) def __init__(self, max_server_uptime=None, max_lease=None, threshold=None, label=None, reboot_schedule=None, now=None): self.label = label self.allocation = Allocation(partition=label) # Default - if not max_server_uptime: max_server_uptime = DEFAULT_SERVER_UPTIME if not max_lease: max_lease = DEFAULT_MAX_APP_LEASE if not threshold: threshold = DEFAULT_THRESHOLD self.max_server_uptime = max_server_uptime self.max_lease = max_lease self.threshold = threshold if not reboot_schedule: # reboot every day reboot_schedule = {day: (23, 59, 59) for day in range(7)} if not now: now = time.time() self._reboot_dates = reboot_dates( reboot_schedule, start_date=datetime.date.fromtimestamp(now) ) self._reboot_buckets = [] self._reboot_last = now self.tick(now) def _find_bucket(self, timestamp): """Try to find bucket with given timestamp. """ for bucket in self._reboot_buckets: if bucket.timestamp == timestamp: return bucket return None def add(self, server, timestamp=None): """Add server. """ bucket = None if timestamp: bucket = self._find_bucket(timestamp) # servers with larger than max lifetime should be rebooted at # the next opportunity if (self._reboot_buckets[0].timestamp > server.up_since + DEFAULT_SERVER_UPTIME): bucket = self._reboot_buckets[0] if not bucket: bucket = min(reversed(self._reboot_buckets), key=lambda b: b.cost(server)) bucket.add(server) def remove(self, server): """Remove server. """ for bucket in self._reboot_buckets: bucket.remove(server) def tick(self, now): """Do per-tick-bookkeeping. """ while self._reboot_last <= now + DEFAULT_SERVER_UPTIME: bucket = RebootBucket(next(self._reboot_dates)) self._reboot_buckets.append(bucket) self._reboot_last = bucket.timestamp while self._reboot_buckets[0].timestamp < now: self._reboot_buckets.pop(0) class PartitionDict(dict): """Dict that creates partitions on demand. We use this instead of collections.defaultdict so that we can provide the new partition with its label, to be propagated to its allocations. """ def __missing__(self, label): """Create a new partition, passing the label to its constructor. """ self[label] = Partition(label=label) return self[label] # pylint: disable=invalid-name def reboot_dates(schedule, start_date=None): """Generate list of valid reboot dates. """ date = datetime.date.today() if start_date: date = start_date while True: weekday = date.weekday() if weekday in schedule: h, m, s = schedule[weekday] yield time.mktime((date.year, date.month, date.day, h, m, s, 0, 0, 0)) date += datetime.timedelta(days=1) class RebootBucket: """Bucket of servers to be rebooted at the same time. """ __slots__ = ( 'timestamp', 'servers', ) def __init__(self, timestamp): self.timestamp = timestamp self.servers = [] def add(self, server): """Add server to this bucket. """ self.servers.append(server) server.valid_until = self.timestamp _LOGGER.info('Setting valid until on server: %s %s', server.name, server.valid_until) def remove(self, server): """Remove server from this bucket. """ try: self.servers.remove(server) except ValueError: pass def cost(self, server): """The cost of adding server to this bucket. """ if self.timestamp > server.up_since + DEFAULT_SERVER_UPTIME: return float('inf') if self.timestamp < server.up_since + MIN_SERVER_UPTIME: return float('inf') return len(self.servers) class PlacementFeasibilityTracker: """Tracks similar apps placement failures.""" def __init__(self): self.recorder = dict() def feasible(self, app): """Checks if it is feasible to satisfy demand.""" constraints, demand = app.shape() if constraints in self.recorder: # If demand is >= than recorded failure, placement is not feasible. if _all_ge(demand, self.recorder[constraints]): return False return True def adjust(self, app): """Adjust info about failed placement.""" constraints, demand = app.shape() if constraints not in self.recorder: self.recorder[constraints] = demand else: if _all_le(demand, self.recorder[constraints]): self.recorder[constraints] = demand class Cell(Bucket): """Top level node. """ __slots__ = ( 'partitions', 'next_event_at', 'apps', 'identity_groups', ) def __init__(self, name): super(Cell, self).__init__(name, traits=0, level='cell') self.partitions = PartitionDict() self.apps = dict() self.identity_groups = collections.defaultdict(IdentityGroup) self.next_event_at = np.inf def add_app(self, allocation, app): """Adds application to the scheduled list. """ assert allocation is not None if app.allocation: app.allocation.remove(app.name) allocation.add(app) self.apps[app.name] = app if app.identity_group: app.identity_group_ref = self.identity_groups[app.identity_group] def remove_app(self, appname): """Remove app from scheduled list. """ if appname not in self.apps: return app = self.apps[appname] servers = self.members() if app.server in servers: servers[app.server].remove(app.name) if app.allocation: app.allocation.remove(app.name) app.release_identity() del self.apps[appname] def configure_identity_group(self, name, count): """Add identity group to the cell. """ if name not in self.identity_groups: self.identity_groups[name] = IdentityGroup(count) else: self.identity_groups[name].adjust(count) def remove_identity_group(self, name): """Remove identity group. """ ident_group = self.identity_groups.get(name) if ident_group: in_use = False for app in six.itervalues(self.apps): if app.identity_group_ref == ident_group: ident_group.adjust(0) in_use = True break if not in_use: del self.identity_groups[name] def _fix_invalid_placements(self, queue, servers): """If app is placed on non-existent server, set server to None. """ for app in queue: if app.server and app.server not in servers: app.server = None app.evicted = True app.release_identity() def _record_rank_and_util(self, queue): """Set final rank and utilization for all apps in the queue. """ for item in queue: rank = item[0] util = item[1] app = item[-1] app.final_rank = rank app.final_util = util def _fix_invalid_identities(self, queue, servers): """Check that app identity is valid for given identity group. """ for app in queue: if app.identity is not None and app.identity_group_ref is not None: # Can happen if identity group was adjusted to lower count. if app.identity >= app.identity_group_ref.count: # Can't release identity as it is invalid. _LOGGER.info('Identity exceeds limit: %s - %s, limit %s', app.name, app.identity, app.identity_group_ref.count) app.identity = None # Invalidate any existing placement. if app.server: servers[app.server].remove(app.name) def _handle_inactive_servers(self, servers): """Migrate apps from inactive servers. """ self.next_event_at = np.inf for server in six.itervalues(servers): state, since = server.get_state() to_be_moved = [] if state == State.down: _LOGGER.debug('Server state is down: %s', server.name) for name, app in six.iteritems(server.apps): if app.data_retention_timeout is None: expires_at = 0 else: expires_at = since + app.data_retention_timeout if expires_at <= time.time(): _LOGGER.debug('Expired placement: %s', name) app.release_identity() to_be_moved.append(name) else: _LOGGER.debug('Keep placement: %s until %s', name, expires_at) self.next_event_at = min(expires_at, self.next_event_at) elif state == State.frozen: _LOGGER.debug('Server state is frozen: %s', server.name) to_be_moved = [ name for name, app in six.iteritems(server.apps) if app.unschedule ] for name in to_be_moved: server.remove(name) def _find_placements(self, queue, servers): """Run the queue and find placements. """ # TODO: refactor to get rid of warnings. # # pylint: disable=too-many-branches,too-many-statements # # At this point, if app.server is defined, it points to attached # server. evicted = dict() reversed_queue = queue[::-1] placement_tracker = PlacementFeasibilityTracker() for app in queue: _LOGGER.debug('scheduling %s', app.name) if app.final_rank == _UNPLACED_RANK: if app.server: assert app.server in servers assert app.has_identity() servers[app.server].remove(app.name) app.release_identity() continue restore = {} if app.renew: assert app.server assert app.has_identity() assert app.server in servers server = servers[app.server] if not server.renew(app): # Save information that will be used to restore placement # in case renewal fails. _LOGGER.debug('Cannot renew app %s on server %s', app.name, app.server) restore['server'] = server restore['placement_expiry'] = app.placement_expiry server.remove(app.name) # At this point app was either renewed on the same server, or # temporarily removed from server if renew failed. # # If placement will be found, renew should remain False. If # placement will not be found, renew will be set to True when # placement is restored to the server it was running. app.renew = False if app.server: assert app.server in servers assert app.has_identity() continue assert app.server is None if not app.acquire_identity(): _LOGGER.info('Unable to acquire identity: %s, %s', app.name, app.identity_group) continue # If app was evicted before, try to restore to the same node. if app in evicted: assert app.has_identity() evicted_from, app_expiry = evicted[app] del evicted[app] if evicted_from.restore(app, app_expiry): app.evicted = False continue assert app.server is None if app.schedule_once and app.evicted: continue # Check if placement is feasible. if not placement_tracker.feasible(app): _LOGGER.info( 'Placement not feasible: %s %r', app.name, app.shape() ) continue if not self.put(app): # There is not enough capacity, from the end of the queue, # evict apps, freeing capacity. for evicted_app in reversed_queue: # We reached the app we can't place if evicted_app == app: break # The app is not yet placed, skip if not evicted_app.server: continue assert evicted_app.server in servers evicted_app_server = servers[evicted_app.server] # Do not consider servers that are not up. if evicted_app_server.state is not State.up: continue evicted[evicted_app] = (evicted_app_server, evicted_app.placement_expiry) evicted_app_server.remove(evicted_app.name) # TODO: we need to check affinity limit constraints on # each level, all the way to the top. if evicted_app_server.put(app): break # Placement failed. if not app.server: # If renewal attempt failed, restore previous placement and # expiry date. if restore: restore['server'].restore(app, restore['placement_expiry']) app.renew = True else: app.release_identity() placement_tracker.adjust(app) def schedule_alloc(self, allocation, servers): """Run the scheduler for given allocation. """ begin = time.time() size = self.size(allocation.label) util_queue = list(allocation.utilization_queue(size)) self._record_rank_and_util(util_queue) queue = [item[-1] for item in util_queue] self._find_placements(queue, servers) _LOGGER.info('Scheduled %s (%d) apps in %r', allocation.label, len(queue), time.time() - begin) def schedule(self): """Run the scheduler. """ begin = time.time() all_apps = [] for label, partition in six.iteritems(self.partitions): allocation = partition.allocation all_apps.extend(allocation.all_apps()) before = [(app.name, app.server, app.placement_expiry) for app in all_apps] servers = self.members() self._fix_invalid_placements(six.viewvalues(self.apps), servers) self._handle_inactive_servers(servers) self._fix_invalid_identities(six.viewvalues(self.apps), servers) for label, partition in six.iteritems(self.partitions): allocation = partition.allocation allocation.label = label self.schedule_alloc(allocation, servers) after = [(app.server, app.placement_expiry) for app in all_apps] placement = [ tuple(itertools.chain(b, a)) for b, a in six.moves.zip(before, after) ] for appname, s_before, exp_before, s_after, exp_after in placement: if s_before != s_after: _LOGGER.info('New placement: %s - %s => %s', appname, s_before, s_after) else: if exp_before != exp_after: _LOGGER.info('Renewed: %s [%s] - %s => %s', appname, s_before, exp_before, exp_after) _LOGGER.info('Total scheduler time for %s apps: %r (sec)', len(all_apps), time.time() - begin) return placement def resolve_reboot_conflicts(self): """Adjust server exipiration time to avoid conflicts. """ pass def dumps(cell): """Serializes cell to string. """ del cell return '' def loads(data): """Loads scheduler from string. """ del data assert False, 'not implemented.'
30.016648
79
0.578203
49,666
0.91821
4,322
0.079904
956
0.017674
0
0
13,729
0.253818
c758c753c3644ae1a4c381597cfe0cc82c7e378b
1,260
py
Python
banners/bannerRan.py
gothyyy/AIDungeon
c198371c34d914e9d996559ef850c87a76f572c4
[ "MIT" ]
1
2019-12-30T21:45:06.000Z
2019-12-30T21:45:06.000Z
banners/bannerRan.py
gothyyy/AIDungeon
c198371c34d914e9d996559ef850c87a76f572c4
[ "MIT" ]
null
null
null
banners/bannerRan.py
gothyyy/AIDungeon
c198371c34d914e9d996559ef850c87a76f572c4
[ "MIT" ]
null
null
null
import random import sys import time import json import os import warnings import numpy as np import glob, os stat_mini = 1 stat_max = 0 listBanners = [] #HOW TO USE IT: #1 copy the opening.txt #2 remove the graphic (but do keep top logo for consistency) #3 add ASCII art that is 78 or less characters in width #4 save txt file under a complete new name class bannerRan: def __init__(self): banner_number = load_banner() #insert function to get random self.banner_number = banner_number def load_banner(): global stat_max global stat_mini global listBanners hey = scanBanners() #load text and get proper numbers choose_between = r(stat_mini, stat_max) x = random.choice(listBanners) return x def r(x,y): #randmom, picks between X and Y return int(str(random.randint(x,y))) def scanBanners(): global stat_max global listBanners dir_path = os.path.dirname(os.path.realpath(__file__)) # directory of banners path #os.chdir("") i = 0 for file in glob.glob("banners/*.txt"): i+=1 listBanners.append(file) #print(str(i), file) stat_max = i x = dir_path return x
14.823529
86
0.640476
157
0.124603
0
0
0
0
0
0
366
0.290476
c758e049e83a8786ae62f5c9ab2545ec4624de3e
511
py
Python
BondMarket/app/theme_lib.py
Meith0717/BondMarket
83d99bd5930758e73b4fe74a92e706c7bc0eadb6
[ "Apache-2.0" ]
null
null
null
BondMarket/app/theme_lib.py
Meith0717/BondMarket
83d99bd5930758e73b4fe74a92e706c7bc0eadb6
[ "Apache-2.0" ]
null
null
null
BondMarket/app/theme_lib.py
Meith0717/BondMarket
83d99bd5930758e73b4fe74a92e706c7bc0eadb6
[ "Apache-2.0" ]
null
null
null
from dataclasses import dataclass @dataclass class theme: name : str bg_color : str fg_color : str lb_color : str ttk_theme : str LIGHT = theme( name='LIGHT', bg_color=None, fg_color='black', lb_color='#f0f0f0', ttk_theme='xpnative' ) DARK = theme( name='DARK', bg_color='#424242', fg_color='white', lb_color='#424242', ttk_theme='black' )
19.653846
35
0.485323
127
0.248532
0
0
139
0.272016
0
0
71
0.138943
c7592054e40573b08b4d8a7a1efd9326b5695f4f
3,877
py
Python
run.py
rimijoker/CA-MTL
068e25e0860a8ec81462018126eace4c004bacd4
[ "MIT" ]
1
2021-08-03T03:54:02.000Z
2021-08-03T03:54:02.000Z
run.py
rimijoker/CA-MTL
068e25e0860a8ec81462018126eace4c004bacd4
[ "MIT" ]
null
null
null
run.py
rimijoker/CA-MTL
068e25e0860a8ec81462018126eace4c004bacd4
[ "MIT" ]
1
2021-07-31T09:44:00.000Z
2021-07-31T09:44:00.000Z
import os import sys import re import json import logging import torch from transformers import ( HfArgumentParser, set_seed, AutoTokenizer, AutoConfig, EvalPrediction, ) from src.model.ca_mtl import CaMtl, CaMtlArguments from src.utils.misc import MultiTaskDataArguments, Split from src.mtl_trainer import MultiTaskTrainer, MultiTaskTrainingArguments from src.data.mtl_dataset import MultiTaskDataset from src.data.task_dataset import TaskDataset logger = logging.getLogger(__name__) def setup_logging(training_args): logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN, ) logger.warning( "Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s", training_args.local_rank, training_args.device, training_args.n_gpu, bool(training_args.local_rank != -1), training_args.fp16, ) def parse_cmd_args(): parser = HfArgumentParser( ( CaMtlArguments, MultiTaskDataArguments, MultiTaskTrainingArguments, ) ) if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): model_args, data_args, training_args = parser.parse_json_file( json_file=os.path.abspath(sys.argv[1]) ) else: ( model_args, data_args, training_args, ) = parser.parse_args_into_dataclasses() logger.info("Training/evaluation parameters %s", training_args) return model_args, data_args, training_args def create_eval_datasets(mode, data_args, tokenizer): eval_datasets = {} for task_id, task_name in enumerate(data_args.tasks): eval_datasets[task_name] = TaskDataset( task_name, task_id, data_args, tokenizer, mode=mode ) if task_name == "mnli": # Loop to handle MNLI double evaluation (matched, mis-matched) eval_datasets["mnli-mm"] = TaskDataset( "mnli-mm", task_id, data_args, tokenizer, mode=mode ) return eval_datasets def main(): model_args, data_args, training_args = parse_cmd_args() setup_logging(training_args) set_seed(training_args.seed) config = AutoConfig.from_pretrained( CaMtl.get_base_model(model_args.model_name_or_path), ) model = CaMtl.from_pretrained( CaMtl.get_base_model(model_args.model_name_or_path), model_args, data_args, config=config) model.freeze_encoder_layers(model_args) logger.info(model) tokenizer = AutoTokenizer.from_pretrained( CaMtl.get_base_model(model_args.model_name_or_path), ) logger.info("Training tasks: %s", ", ".join([t for t in data_args.tasks])) trainer = MultiTaskTrainer( tokenizer, data_args, model=model, args=training_args, train_dataset=MultiTaskDataset(data_args, tokenizer, limit_length=50) if training_args.do_train else None, eval_datasets=create_eval_datasets(Split.dev, data_args, tokenizer) if training_args.do_eval or training_args.evaluate_during_training else None, test_datasets=create_eval_datasets(Split.test, data_args, tokenizer) if training_args.do_predict else None, ) if training_args.do_train: trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path) else None ) if training_args.do_eval: trainer.evaluate() if training_args.do_predict: trainer.predict() def _mp_fn(index): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
26.923611
98
0.660562
0
0
0
0
0
0
0
0
348
0.08976
c75af988694e7b9961b260a9f014fab177797bfa
1,033
py
Python
examples/readWebsocket.py
uadlq/PhyPiDAQ-PiOS11
fc6060551be2cc0143a157081341bf3c338d9fbd
[ "BSD-2-Clause" ]
null
null
null
examples/readWebsocket.py
uadlq/PhyPiDAQ-PiOS11
fc6060551be2cc0143a157081341bf3c338d9fbd
[ "BSD-2-Clause" ]
null
null
null
examples/readWebsocket.py
uadlq/PhyPiDAQ-PiOS11
fc6060551be2cc0143a157081341bf3c338d9fbd
[ "BSD-2-Clause" ]
null
null
null
#!/usr/bin/env python3 """Read data in CSV format from websocket """ import sys import asyncio import websockets # read url from command line if len(sys.argv) >= 2: uri = sys.argv[1] else: # host url and port uri = "ws://localhost:8314" print("*==* ", sys.argv[0], " Lese Daten von url ", uri) async def read_ws(): """asynchronous read from websocket """ async with websockets.connect(uri, ping_interval=None) as websocket: # test connection await websocket.send("req_connect") answ = await websocket.recv() if answ == "ack_connect": print("** connected to websocket ", uri) # get data await websocket.send("getData") while True: inp = await websocket.recv() if inp == '\n': # empty record, end print("empty input - closing") sys.exit(0) else: print('read: %s ' % inp, end='') # run web client asyncio.get_event_loop().run_until_complete(read_ws())
25.195122
72
0.580833
0
0
0
0
0
0
648
0.627299
372
0.360116
c75b6da97a2671884ced55ad3cbef590baf2e5c6
2,187
py
Python
settings/__init__.py
arcana261/python-grpc-boilerplate
dd20767ad5540a49e1db802ce578c7b8e416ccbb
[ "Unlicense" ]
null
null
null
settings/__init__.py
arcana261/python-grpc-boilerplate
dd20767ad5540a49e1db802ce578c7b8e416ccbb
[ "Unlicense" ]
null
null
null
settings/__init__.py
arcana261/python-grpc-boilerplate
dd20767ad5540a49e1db802ce578c7b8e416ccbb
[ "Unlicense" ]
null
null
null
import os import sys import itertools import json _NONE = object() class SettingManager: _sentry = object() def __init__(self): self.env = os.getenv('ENV', 'prd') try: self._default = __import__('settings.default', fromlist=['*']) except ModuleNotFoundError: self._default = object() try: self._env = __import__('settings.{}'.format(self.env), fromlist=['*']) except ModuleNotFoundError: self._env = object() self._loaded = [] def load(self, filename, fmt='json'): filename = os.path.abspath(filename) if fmt == 'json': with open(filename) as f: self._loaded.append((filename, json.load(f))) def unload(self, filename): filename = os.path.abspath(filename) self._loaded = [(f, v) for f, v in self._loaded if f != filename] def __getattr__(self, item): result = SettingManager._sentry for _, values in self._loaded: if item in values: result = values[item] result = os.getenv(item, result) if result is SettingManager._sentry: result = getattr(self._env, item, getattr(self._default, item, SettingManager._sentry)) if result is SettingManager._sentry: raise AttributeError return result def __contains__(self, item): try: self.__getattr__(item) return True except AttributeError: return False def get(self, item, default=_NONE): try: return self.__getattr__(item) except AttributeError: if default is not _NONE: return default raise AttributeError def __iter__(self): chained = itertools.chain(getattr(self._default, '__dict__', dict()).keys(), getattr(self._env, '__dict__', dict()).keys()) for _, values in self._loaded: chained = itertools.chain(chained, values.keys()) return iter(filter(lambda x: not x.startswith('_'), set(chained))) sys.modules[__name__] = SettingManager()
27.683544
99
0.577503
2,073
0.947874
0
0
0
0
0
0
82
0.037494
c75c60f75fce7285b991ad22486e1b1b13a02fed
1,990
py
Python
roblox/partials/partialgroup.py
speer-kinjo/ro.py
2d5b80aec8fd143b11101fbbfdf3b557f798a27f
[ "MIT" ]
28
2021-11-04T11:13:38.000Z
2022-03-11T05:00:16.000Z
roblox/partials/partialgroup.py
speer-kinjo/ro.py
2d5b80aec8fd143b11101fbbfdf3b557f798a27f
[ "MIT" ]
12
2021-11-24T06:25:24.000Z
2022-03-18T14:37:01.000Z
roblox/partials/partialgroup.py
speer-kinjo/ro.py
2d5b80aec8fd143b11101fbbfdf3b557f798a27f
[ "MIT" ]
21
2021-10-20T16:36:55.000Z
2022-03-27T21:43:53.000Z
""" This file contains partial objects related to Roblox groups. """ from __future__ import annotations from typing import TYPE_CHECKING from ..bases.basegroup import BaseGroup from ..bases.baseuser import BaseUser if TYPE_CHECKING: from ..client import Client class AssetPartialGroup(BaseGroup): """ Represents a partial group in the context of a Roblox asset. Intended to parse the `data[0]["creator"]` data from https://games.roblox.com/v1/games. Attributes: _client: The Client object, which is passed to all objects this Client generates. id: The group's name. creator: The group's owner. name: The group's name. """ def __init__(self, client: Client, data: dict): """ Arguments: client: The Client. data: The data from the endpoint. """ self._client: Client = client self.creator: BaseUser = BaseUser(client=client, user_id=data["Id"]) self.id: int = data["CreatorTargetId"] self.name: str = data["Name"] super().__init__(client, self.id) def __repr__(self): return f"<{self.__class__.__name__} id={self.id} name={self.name!r}>" class UniversePartialGroup(BaseGroup): """ Represents a partial group in the context of a Roblox universe. Attributes: _data: The data we get back from the endpoint. _client: The client object, which is passed to all objects this client generates. id: Id of the group name: Name of the group """ def __init__(self, client: Client, data: dict): """ Arguments: client: The ClientSharedObject. data: The data from the endpoint. """ self._client: Client = client self.id = data["id"] self.name: str = data["name"] super().__init__(client, self.id) def __repr__(self): return f"<{self.__class__.__name__} id={self.id} name={self.name!r}>"
28.028169
91
0.628643
1,715
0.861809
0
0
0
0
0
0
1,141
0.573367
c75d41f3ecd90250dc9544657aba89378f5765d0
2,150
py
Python
services/UserService.py
erginbalta/FarmChain
a542d19212f176b7b5d12806078459da105e5afa
[ "Apache-2.0" ]
1
2021-01-16T14:38:21.000Z
2021-01-16T14:38:21.000Z
services/UserService.py
erginbalta/FarmChain
a542d19212f176b7b5d12806078459da105e5afa
[ "Apache-2.0" ]
null
null
null
services/UserService.py
erginbalta/FarmChain
a542d19212f176b7b5d12806078459da105e5afa
[ "Apache-2.0" ]
1
2020-07-23T04:00:07.000Z
2020-07-23T04:00:07.000Z
import mysql.connector import socket from contextlib import closing import json import random packetType= ["INF","TRN","USR"] database = mysql.connector.connect( host="localhost", user="root", port="3307", passwd="ergin00000", database="farmchain" ) def userIdCreator(): data = [] numericId = 0 id = "" with open("/datas/userInformation.json",'r') as f: user = json.load(f) numericId = len(user) + 1 id = str(packetType[2])+str(numericId) return id def transactionIdCreator(): idKey = packetType[1] numericId = random.randint(10000,99999) id = idKey+str(numericId) return id def getUserConnectionInfo(): hst = socket.gethostname() usrHost = socket.gethostbyname(hst) usrPort = findFreePort() return [usrHost,usrPort] def findFreePort(): with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as s: s.bind(('', 0)) s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) return s.getsockname()[1] def checkOnlineMiners(): mycursor = database.cursor() sql = "select * from miners where status = 1;" mycursor.execute(sql) result = mycursor.fetchall() return result def minerInfo(): result = checkOnlineMiners() info = result[0] host = result[1] port = result[2] return [host,port] def userInfoPacket(password,name,surname,company,status): info = getUserConnectionInfo() userId = userIdCreator() name = str(name).lower() surname = str(surname).lower() company = str(company).lower() status = str(status).lower() packet = [packetType[0],[userId,password,name,surname,company,status],info[0],info[1]] return packet def transactionPacketCreator(productId,productName,productNumber,fromPlace,toPlace,date): info = getUserConnectionInfo() transactionId = transactionIdCreator() productName = str(productName).lower() fromPlace = str(fromPlace).lower() toPlace = str(toPlace).lower() packet = [packetType[1],[transactionId,productId,productName,productNumber,fromPlace,toPlace,date],info[0],info[1]] return packet
26.875
119
0.676744
0
0
0
0
0
0
0
0
137
0.063721
c75e39b34cd2c6335e68141ae306111fa4b684be
10,238
py
Python
tests/blackbox/access_settings/test_bb_access_settings.py
csanders-git/waflz
ec8fc7c845f20a2a8c757d13845ba22a6d7c5b28
[ "Apache-2.0" ]
1
2019-03-16T09:02:58.000Z
2019-03-16T09:02:58.000Z
tests/blackbox/access_settings/test_bb_access_settings.py
csanders-git/waflz
ec8fc7c845f20a2a8c757d13845ba22a6d7c5b28
[ "Apache-2.0" ]
null
null
null
tests/blackbox/access_settings/test_bb_access_settings.py
csanders-git/waflz
ec8fc7c845f20a2a8c757d13845ba22a6d7c5b28
[ "Apache-2.0" ]
1
2021-04-22T09:43:46.000Z
2021-04-22T09:43:46.000Z
#!/usr/bin/python '''Test WAF Access settings''' #TODO: make so waflz_server only runs once and then can post to it # ------------------------------------------------------------------------------ # Imports # ------------------------------------------------------------------------------ import pytest import subprocess import os import sys import json from pprint import pprint import time import requests # ------------------------------------------------------------------------------ # Constants # ------------------------------------------------------------------------------ G_TEST_HOST = 'http://127.0.0.1:12345/' # ------------------------------------------------------------------------------ # globals # ------------------------------------------------------------------------------ g_server_pid = -1 # ------------------------------------------------------------------------------ # # ------------------------------------------------------------------------------ def run_command(command): p = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout, stderr = p.communicate() return (p.returncode, stdout, stderr) # ------------------------------------------------------------------------------ #setup_func # ------------------------------------------------------------------------------ @pytest.fixture() def setup_func(): global g_server_pid l_cwd = os.getcwd() l_file_path = os.path.dirname(os.path.abspath(__file__)) l_ruleset_path = os.path.realpath(os.path.join(l_file_path, '../../data/waf/ruleset')) l_geoip2city_path = os.path.realpath(os.path.join(l_file_path, '../../data/waf/db/GeoLite2-City.mmdb')); l_geoip2ISP_path = os.path.realpath(os.path.join(l_file_path, '../../data/waf/db/GeoLite2-ASN.mmdb')); l_profile_path = os.path.realpath(os.path.join(l_file_path, 'test_bb_access_settings.waf.prof.json')) l_waflz_server_path = os.path.abspath(os.path.join(l_file_path, '../../../build/util/waflz_server/waflz_server')) l_subproc = subprocess.Popen([l_waflz_server_path, '-f', l_profile_path, '-r', l_ruleset_path, '-g', l_geoip2city_path, '-s', l_geoip2ISP_path]) time.sleep(1) g_server_pid = l_subproc.pid time.sleep(1) print 'setup g_server_pid: %d'%(g_server_pid) time.sleep(1) # ------------------------------------------------------------------------------ #teardown_func # ------------------------------------------------------------------------------ def teardown_func(): global g_server_pid time.sleep(.5) print 'teardown g_server_pid: %d'%(g_server_pid) if g_server_pid != -1: l_code, l_out, l_err = run_command('kill -9 %d'%(g_server_pid)) time.sleep(.5) # ------------------------------------------------------------------------------ # test_bb_modsecurity_ec_access_settings_ignore_args # ------------------------------------------------------------------------------ def test_bb_modsec_ec_access_settings_01_block_not_in_ignore_args(setup_func): #"ignore_query_args": ["ignore", "this", "crap"] l_uri = G_TEST_HOST + '?' + 'arg1&arg2&arg3&arg4&arg5' l_headers = {"host": "myhost.com"} l_r = requests.get(l_uri, headers=l_headers) assert l_r.status_code == 200 l_r_json = l_r.json() assert len(l_r_json) > 0 print json.dumps(l_r_json,indent=4) assert l_r_json['rule_intercept_status'] == 403 #assert 'modsecurity_crs_23_request_limits.conf' in l_r_json['sub_event'][0]['rule_file'] # ensure 403 because exceeded max_num_args assert 'Too many arguments in' in l_r_json['rule_msg'] # ------------------------------------------------------------------------------ # test_bb_modsec_ec_access_settings_02_bypass_in_ignore_args # ------------------------------------------------------------------------------ def test_bb_modsec_ec_access_settings_02_bypass_in_ignore_args(): #Test that passing ignore args lets it bypass #Max arg limit it 4, we pass 7 l_uri = G_TEST_HOST + '?' + 'arg1&arg2&arg3&arg4&ignore&this&crap' l_headers = {"host": "myhost.com"} l_r = requests.get(l_uri, headers=l_headers) assert l_r.status_code == 200 l_r_json = l_r.json() assert len(l_r_json) == 0 # ------------------------------------------------------------------------------ # test_bb_modsec_ec_access_settings_03_block_headers_not_in_ignore_header_list # ------------------------------------------------------------------------------ def test_bb_modsec_ec_access_settings_03_block_headers_not_in_ignore_header_list(): #ignore_header": ["(?i)(benign-header)", "super-whatever-header", "^D.*"] l_uri = G_TEST_HOST l_headers = {"host": "myhost.com", "kooky-Header" : "function () { doing this is kinda dumb" } l_r = requests.get(l_uri, headers=l_headers) assert l_r.status_code == 200 l_r_json = l_r.json() print l_r_json #We got an event assert len(l_r_json) > 0 # detect a bash shellshock assert 'Bash shellshock attack detected' in l_r_json['sub_event'][0]['rule_msg'] assert 'REQUEST_HEADERS' in l_r_json['sub_event'][0]['matched_var']['name'] assert 'ZnVuY3Rpb24gKCkgeyBkb2luZyB0aGlzIGlzIGtpbmRhIGR1bWI=' in l_r_json['sub_event'][0]['matched_var']['value'] # ------------------------------------------------------------------------------ # test_bb_modsec_ec_access_settings_04_bypass_headers_in_ignore_header_list # ------------------------------------------------------------------------------ def test_bb_modsec_ec_access_settings_04_bypass_headers_in_ignore_header_list(): #Test ignore headers are ignored l_uri = G_TEST_HOST l_headers = {"host": "myhost.com", "Benign-Header" : "function () { doing this is kinda dumb", "super-whatever-header" : "function () { doing this is kinda dumb" } l_r = requests.get(l_uri, headers=l_headers) assert l_r.status_code == 200 l_r_json = l_r.json() assert len(l_r_json) == 0 # ------------------------------------------------------------------------------- # test_bb_modsec_ec_access_settings_05_bypass_headers_in_ignore_header_list_regex # ------------------------------------------------------------------------------- def test_bb_modsec_ec_access_settings_05_bypass_headers_in_ignore_header_list_regex(): ######################################## # Test regex "^D.*" ######################################## l_uri = G_TEST_HOST #anything that starts with D should be ignored l_headers = {"host": "myhost.com", "Doopdoop" : "function () { doing this is kinda dumb", "Duper-duper-deader" : "function () { doing this is kinda dumb" } l_r = requests.get(l_uri, headers=l_headers) assert l_r.status_code == 200 l_r_json = l_r.json() assert len(l_r_json) == 0 # ------------------------------------------------------------------------------ # test_bb_modsec_ec_access_settings_06_block_cookie_not_in_ignore_cookie_list # ------------------------------------------------------------------------------ def test_bb_modsec_ec_access_settings_06_block_cookie_not_in_ignore_cookie_list(): #"ignore_cookie": ["(?i)(sketchy_origin)", "(?i)(yousocrazy)"] l_uri = G_TEST_HOST l_headers = {"host": "myhost.com", "Cookie": "blahblah=function () { asdf asdf asdf" } l_r = requests.get(l_uri, headers=l_headers) assert l_r.status_code == 200 l_r_json = l_r.json() assert len(l_r_json) > 0 # detect a bash shellshock assert 'Bash shellshock attack detected' in l_r_json['sub_event'][0]['rule_msg'] assert 'REQUEST_HEADERS' in l_r_json['sub_event'][0]['matched_var']['name'] # ------------------------------------------------------------------------------ # test_bb_modsec_ec_access_settings_07_bypass_cookie_in_ignore_cookie_list # ------------------------------------------------------------------------------ def test_bb_modsec_ec_access_settings_07_bypass_cookie_in_ignore_cookie_list(): #"ignore_cookie": ["(?i)(sketchy_origin)", "(?i)(yousocrazy)"] l_uri = G_TEST_HOST l_headers = {"host" : "myhost.com", "Cookie" : "SkeTchy_Origin=function () { asdf asdf asdf" } l_r = requests.get(l_uri, headers=l_headers) assert l_r.status_code == 200 l_r_json = l_r.json() #We get no event assert len(l_r_json) == 0 l_uri = G_TEST_HOST l_headers = {"host" : "myhost.com", "Cookie" : "SkeTchy_Origin=function () { asdf asdf asdf" } l_r = requests.get(l_uri, headers=l_headers) assert l_r.status_code == 200 l_r_json = l_r.json() assert len(l_r_json) == 0 # ------------------------------------------------------------------------------ # test_bb_modsec_ec_access_settings_08_ignore_cookie_in_ignore_cookie_list # ------------------------------------------------------------------------------ def test_bb_modsec_ec_access_settings_08_bypass_cookie_in_ignore_cookie_list_regex(): ######################################## # Test regex "^[0-9_].*$" ######################################## l_uri = G_TEST_HOST l_headers = {"host" : "myhost.com", "Cookie" : "0_123_ADB__bloop=function () { asdf asdf asdf" } l_r = requests.get(l_uri, headers=l_headers) assert l_r.status_code == 200 l_r_json = l_r.json() assert len(l_r_json) == 0 # ------------------------------------------------------------------------------ # test_bb_modsec_ec_access_settings_09_block_disallowed_http_method # ------------------------------------------------------------------------------ def test_bb_modsec_ec_access_settings_09_block_disallowed_http_method(): l_uri = G_TEST_HOST l_headers = {"host" : "myhost.com" } l_r = requests.put(l_uri, headers=l_headers) assert l_r.status_code == 200 l_r_json = l_r.json() assert len(l_r_json) > 0 assert 'Method is not allowed by policy' in l_r_json['rule_msg'] teardown_func()
49.458937
117
0.511428
0
0
0
0
1,097
0.10715
0
0
5,441
0.531451
c75ec65b0817a875da33fd517bd4f04f459ffba4
2,852
py
Python
cosmosis/runtime/analytics.py
ktanidis2/Modified_CosmoSIS_for_galaxy_number_count_angular_power_spectra
07e5d308c6a8641a369a3e0b8d13c4104988cd2b
[ "BSD-2-Clause" ]
1
2021-09-15T10:10:26.000Z
2021-09-15T10:10:26.000Z
cosmosis/runtime/analytics.py
ktanidis2/Modified_CosmoSIS_for_galaxy_number_count_angular_power_spectra
07e5d308c6a8641a369a3e0b8d13c4104988cd2b
[ "BSD-2-Clause" ]
null
null
null
cosmosis/runtime/analytics.py
ktanidis2/Modified_CosmoSIS_for_galaxy_number_count_angular_power_spectra
07e5d308c6a8641a369a3e0b8d13c4104988cd2b
[ "BSD-2-Clause" ]
1
2021-06-11T15:29:43.000Z
2021-06-11T15:29:43.000Z
#coding: utf-8 from __future__ import print_function from builtins import zip from builtins import object from cosmosis import output as output_module import numpy as np import sys import os class Analytics(object): def __init__(self, params, pool=None): self.params = params self.pool = pool self.total_steps = 0 nparam = len(params) self.means = np.zeros(nparam) self.m2 = np.zeros(nparam) self.cov_times_n = np.zeros((nparam,nparam)) def add_traces(self, traces): if traces.shape[1] != len(self.params): raise RuntimeError("The number of traces added to Analytics " "does not match the number of varied " "parameters!") num = float(self.total_steps) for x in traces: num += 1.0 delta = x - self.means old_means = self.means.copy() self.means += delta/num self.m2 += delta*(x - self.means) self.cov_times_n += np.outer(x-self.means, x-old_means) self.total_steps += traces.shape[0] def trace_means(self): if self.pool: return np.array(self.pool.gather(self.means)).T else: return self.means def trace_variances(self): if self.total_steps > 1: local_variance = self.m2 / float(self.total_steps-1) if self.pool: return np.array(self.pool.gather(local_variance)).T else: return local_variance return None def gelman_rubin(self, quiet=True): # takes current traces and returns if self.pool is None or not self.pool.size > 1: raise RuntimeError("Gelman-Rubin statistic is only " "valid for multiple chains.") if self.total_steps == 0: raise RuntimeError("Gelman-Rubin statistic not " "defined for 0-length chains.") # gather trace statistics to master process means = self.trace_means() variances = self.trace_variances() if self.pool.is_master(): B_over_n = np.var(means, ddof=1, axis=1) B = B_over_n * self.total_steps W = np.mean(variances, axis=1) V = ((1. - 1./self.total_steps) * W + (1. + 1./self.pool.size) * B_over_n) # TODO: check for 0-values in W Rhat = np.sqrt(V/W) else: Rhat = None Rhat = self.pool.bcast(Rhat) if not quiet and self.pool.is_master(): print() print("Gelman-Rubin:") for (p,R) in zip(self.params, Rhat): print(" ", p, " ", R) print("Worst = ", Rhat.max()) print() return Rhat
31.688889
73
0.543829
2,657
0.931627
0
0
0
0
0
0
371
0.130084
c76014b2a087d9f2456ffc8e8847fb9b397481a4
8,148
py
Python
sdcc2elf.py
Vector35/llil_transpiler
6f6f368d34cb872460ad1634ddcbc4207276feb6
[ "MIT" ]
14
2019-08-23T13:49:07.000Z
2021-12-24T20:09:57.000Z
sdcc2elf.py
Vector35/llil_transpiler
6f6f368d34cb872460ad1634ddcbc4207276feb6
[ "MIT" ]
null
null
null
sdcc2elf.py
Vector35/llil_transpiler
6f6f368d34cb872460ad1634ddcbc4207276feb6
[ "MIT" ]
1
2021-12-24T20:10:00.000Z
2021-12-24T20:10:00.000Z
#!/usr/bin/env python # # convert SDCC .rel files to 32-bit ELF relocatable # # resulting file is simple: # # ------------------------ # ELF header # ------------------------ # .text section # .shstrtab section # .strtab section # .symtab section # ------------------------ # NULL elf32_shdr # .text elf32_shdr # .shstrtab elf32_shdr # .symtab elf32_shdr # .strtab elf32_shdr # ------------------------ import os import re import sys from struct import pack #------------------------------------------------------------------------------ # ELF helpers #------------------------------------------------------------------------------ (PF_X, PF_W, PF_R) = (1,2,4) (SHT_NULL, SHT_PROGBITS, SHT_STRTAB) = (0,1,3) sz_ehdr = 0x34 sz_shdr = 0x28 def align(fp, to=4, pad=b'\x00'): while fp.tell() % to: fp.write(pad) #------------------------------------------------------------------------------ # read .map file for symbols #------------------------------------------------------------------------------ fpath_map = sys.argv[2] assert fpath_map.endswith('.map') with open(fpath_map) as fp: lines = fp.readlines() (_CODE_ADDR, _CODE_SZ) = (None, None) (i_code, i_header) = (None, None) for (i, line) in enumerate(lines): if line.startswith('_CODE'): m = re.match(r'^_CODE\s+([A-F0-9]{8})\s+([A-F0-9]{8})', line) (addr, size) = map(lambda x: int(x, 16), m.group(1,2)) if not i_code: i_code = i _CODE_ADDR = addr _CODE_SZ = size else: if addr != _CODE_ADDR: raise Exception('conflicting code segment addresses') if size != _CODE_SZ: raise Exception('conflicting code segment sizes') if line.startswith('_HEADER0'): i_header = i break assert i_code and i_header and i_code < i_header syms = [] for line in lines[i_code:i_header]: m = re.search(r'([A-F0-9]{8})\s+(_\w+)', line) if m: (addr, symname) = m.group(1, 2) print('found %s: %s' % (addr, symname)) syms.append((symname, int(addr, 16))); assert syms print('_CODE [%08X, %08X)' % (_CODE_ADDR, _CODE_ADDR+_CODE_SZ)) print('_CODE symbols from') for (name, addr) in syms: print('%08X: %s' % (addr, name)) #------------------------------------------------------------------------------ # read .ihx file #------------------------------------------------------------------------------ fpath_ihx = sys.argv[1] assert fpath_ihx.endswith('.ihx') code_area = [b'\x00'] * (_CODE_ADDR + _CODE_SZ) with open(fpath_ihx) as fp: for line in fp.readlines(): m = re.match(r'^:(..)(....)00(.*)(..)', line) if m: (count, addr, data, csum) = m.group(1,2,3,4) count = int(count,16) assert count == len(data)/2 addr = int(addr,16) if not (addr >= _CODE_ADDR and addr < (_CODE_ADDR + _CODE_SZ)): continue print('%08X: ' % addr, end='') for i in range(count): byte_str = data[2*i]+data[2*i+1] print('%s ' % byte_str, end='') code_area[addr + i] = pack('B', int(byte_str, 16)) print('') continue m = re.match(r'^:00000001FF', line) if m: break raise Exception('got unexpected IHX line: %s' % line) assert code_area #print(code_area) #------------------------------------------------------------------------------ # write ELF #------------------------------------------------------------------------------ # process symbols, build string table syms = sorted(syms, key=lambda name_addr: name_addr[1]) func2size = {} func2stroffs = {} strtab = b'\x00' for i in range(len(syms)): (name, addr) = syms[i] if i == len(syms)-1: func2size[name] = len(code_area) - addr else: func2size[name] = syms[i+1][1] - addr func2stroffs[name] = len(strtab) strtab = strtab + name.encode('utf-8') + b'\x00' print('%04X: %s size %X' % (addr, name, func2size[name])) fp = open('tests.elf', 'wb') # elf32_hdr (placeholder, we'll come back to fill in offsets) print('elf32_hdr @ %X' % fp.tell()) fp.write(b'\x00' * sz_ehdr) # .text section contents o_text = fp.tell() print('placing .text @ %X' % o_text) for byte in code_area: fp.write(byte) sz_text = fp.tell() - o_text # .shstrtab section contents scn_shstrtab = b'\x00.text\x00.shstrtab\x00.symtab\x00.strtab\x00' align(fp) o_shstrtab = fp.tell() print('placing .shstrtab @ %X' % o_shstrtab) fp.write(scn_shstrtab) sz_shstrtab = fp.tell() - o_shstrtab # .symtab section contents align(fp) o_symtab = fp.tell() print('placing .symtab @ %X' % o_symtab) for (name, addr) in syms: st_name = func2stroffs[name] st_value = addr st_size = func2size[name] st_info = 0x12 # bind:1(GLOBAL) type:2(FUNC) st_other = 0 st_shndx = 0x1 # section header index: 0'th: NULL 1'th: .text Elf32_Sym = pack('<IIIBBH', st_name, st_value, st_size, st_info, st_other, st_shndx) fp.write(Elf32_Sym) sz_symtab = fp.tell() - o_symtab # .strtab section contents align(fp) o_strtab = fp.tell() print('placing .strtab @ %X' % o_strtab) fp.write(strtab) sz_strtab = fp.tell() - o_strtab # null section header (index 0) align(fp) o_shdr_null = fp.tell() print('placing shdr NULL @ %X' % o_shdr_null) fp.write(b'\x00' * sz_shdr) # .text section header (index 1) o_shdr_text = fp.tell() print('placing shdr .text @ %X' % fp.tell()) sh_name = scn_shstrtab.index(b'.text') sh_type = 1 # SHT_PROGBITS sh_flags = 6 # ALLOC|EXECINSTR sh_addr = 0 sh_offset = o_text sh_size = sz_text sh_link = 0 sh_info = 0 sh_addralign = 4 sh_entsize = 0 tmp = pack('<IIIIIIIIII', \ sh_name, sh_type, sh_flags, sh_addr, sh_offset, sh_size, sh_link, sh_info, \ sh_addralign, sh_entsize) fp.write(tmp) # .shstrtab section header (index 2) o_shdr_shstrtab = fp.tell() print('placing shdr .shstrtab @ %X' % fp.tell()) sh_name = scn_shstrtab.index(b'.shstrtab') sh_type = 3 #SHT_STRTAB sh_flags = 0 sh_addr = 0 sh_offset = o_shstrtab sh_size = sz_shstrtab sh_link = 0 sh_info = 0 sh_addralign = 1 sh_entsize = 0 tmp = pack('<IIIIIIIIII', \ sh_name, sh_type, sh_flags, sh_addr, sh_offset, sh_size, sh_link, sh_info, \ sh_addralign, sh_entsize) fp.write(tmp) # .symtab section header (index 3) o_shdr_symtab = fp.tell() print('placing shdr .symtab @ %X' % fp.tell()) sh_name = scn_shstrtab.index(b'.symtab') sh_type = 2 #SHT_SYMTAB sh_flags = 0 sh_addr = 0 sh_offset = o_symtab sh_size = sz_symtab sh_link = 4 # link to scn #4 (find strings in .strtab) sh_info = 0 sh_addralign = 4 sh_entsize = 0 tmp = pack('<IIIIIIIIII', \ sh_name, sh_type, sh_flags, sh_addr, sh_offset, sh_size, sh_link, sh_info, \ sh_addralign, sh_entsize) fp.write(tmp) # .strtab section header (index 4) o_shdr_strtab = fp.tell() print('placing shdr .strtab @ %X' % fp.tell()) sh_name = scn_shstrtab.index(b'.strtab') sh_type = 3 #SHT_STRTAB sh_flags = 0 sh_addr = 0 sh_offset = o_strtab sh_size = sz_strtab sh_link = 0 sh_info = 0 sh_addralign = 1 sh_entsize = 0 tmp = pack('<IIIIIIIIII', \ sh_name, sh_type, sh_flags, sh_addr, sh_offset, sh_size, sh_link, sh_info, \ sh_addralign, sh_entsize) fp.write(tmp) # seek back, write real elf header hdr = b'\x7FELF' hdr += b'\x01' # e_ident[EI_CLASS] 32-bit hdr += b'\x01' # e_ident[EI_DATA] LSB (little-end) hdr += b'\x01\x00\x00' # version, osabi, abiversion hdr += b'\x00'*7 assert len(hdr) == 16 hdr += pack('<H', 1) # e_type = ET_REL hdr += pack('<H', 220) # e_machine = EM_Z80 hdr += pack('<I', 1) # e_version = EV_CURRENT hdr += pack('<I', 0) # e_entry hdr += pack('<I', 0) # e_phoff hdr += pack('<I', o_shdr_null) # e_shoff hdr += pack('<I', 0) # e_flags hdr += pack('<H', sz_ehdr) # e_ehsize hdr += pack('<H', 0) # e_phentsize hdr += pack('<H', 0) # e_phnum hdr += pack('<H', sz_shdr) # e_shentsize hdr += pack('<H', 5) # e_shnum hdr += pack('<H', 2) # e_shstrndx = index of .shstrtab assert len(hdr) == sz_ehdr fp.seek(0, os.SEEK_SET) fp.write(hdr) # done! fp.close()
27.714286
88
0.567624
0
0
0
0
0
0
0
0
2,850
0.349779
c760d11b6bcb337986c7f02b8372675729e8a684
3,743
py
Python
eval.py
nikinsta/deep-siamese-text-similarity-on-python-3
80fffd86da1d9f6bc0cb154a9415ff767d944777
[ "MIT" ]
null
null
null
eval.py
nikinsta/deep-siamese-text-similarity-on-python-3
80fffd86da1d9f6bc0cb154a9415ff767d944777
[ "MIT" ]
null
null
null
eval.py
nikinsta/deep-siamese-text-similarity-on-python-3
80fffd86da1d9f6bc0cb154a9415ff767d944777
[ "MIT" ]
null
null
null
#! /usr/bin/env python import tensorflow as tf import numpy as np import os import time import datetime from tensorflow.contrib import learn from input_helpers import InputHelper # Parameters # ================================================== # Eval Parameters tf.flags.DEFINE_integer("batch_size", 64, "Batch Size (default: 64)") tf.flags.DEFINE_string("checkpoint_dir", "", "Checkpoint directory from training run") tf.flags.DEFINE_string("eval_filepath", "match_valid.tsv", "Evaluate on this data (Default: None)") tf.flags.DEFINE_string("vocab_filepath", "runs/1479874609/checkpoints/vocab", "Load training time vocabulary (Default: None)") tf.flags.DEFINE_string("model", "runs/1479874609/checkpoints/model-32000", "Load trained model checkpoint (Default: None)") # Misc Parameters tf.flags.DEFINE_boolean("allow_soft_placement", True, "Allow device soft device placement") tf.flags.DEFINE_boolean("log_device_placement", False, "Log placement of ops on devices") FLAGS = tf.flags.FLAGS FLAGS._parse_flags() print("\nParameters:") for attr, value in sorted(FLAGS.__flags.items()): print("{}={}".format(attr.upper(), value)) print("") if FLAGS.eval_filepath==None or FLAGS.vocab_filepath==None or FLAGS.model==None : print("Eval or Vocab filepaths are empty.") exit() # load data and map id-transform based on training time vocabulary inpH = InputHelper() x1_test,x2_test,y_test = inpH.getTestDataSet(FLAGS.eval_filepath, FLAGS.vocab_filepath, 30) print("\nEvaluating...\n") # Evaluation # ================================================== checkpoint_file = FLAGS.model print(checkpoint_file) graph = tf.Graph() with graph.as_default(): session_conf = tf.ConfigProto( allow_soft_placement=FLAGS.allow_soft_placement, log_device_placement=FLAGS.log_device_placement) sess = tf.Session(config=session_conf) with sess.as_default(): # Load the saved meta graph and restore variables saver = tf.train.import_meta_graph("{}.meta".format(checkpoint_file)) sess.run(tf.initialize_all_variables()) saver.restore(sess, checkpoint_file) # Get the placeholders from the graph by name input_x1 = graph.get_operation_by_name("input_x1").outputs[0] input_x2 = graph.get_operation_by_name("input_x2").outputs[0] input_y = graph.get_operation_by_name("input_y").outputs[0] dropout_keep_prob = graph.get_operation_by_name("dropout_keep_prob").outputs[0] # Tensors we want to evaluate predictions = graph.get_operation_by_name("output/distance").outputs[0] accuracy = graph.get_operation_by_name("accuracy/accuracy").outputs[0] sim = graph.get_operation_by_name("accuracy/temp_sim").outputs[0] #emb = graph.get_operation_by_name("embedding/W").outputs[0] #embedded_chars = tf.nn.embedding_lookup(emb,input_x) # Generate batches for one epoch batches = inpH.batch_iter(list(zip(x1_test,x2_test,y_test)), 2*FLAGS.batch_size, 1, shuffle=False) # Collect the predictions here all_predictions = [] all_d=[] for db in batches: x1_dev_b,x2_dev_b,y_dev_b = zip(*db) batch_predictions, batch_acc, sim = sess.run([predictions,accuracy,sim], {input_x1: x1_dev_b, input_x2: x2_dev_b, input_y:y_dev_b, dropout_keep_prob: 1.0}) all_predictions = np.concatenate([all_predictions, batch_predictions]) print(batch_predictions) all_d = np.concatenate([all_d, sim]) print("DEV acc {}".format(batch_acc)) for ex in all_predictions: print(ex) correct_predictions = float(np.mean(all_d == y_test)) print("Accuracy: {:g}".format(correct_predictions))
42.05618
167
0.696767
0
0
0
0
0
0
0
0
1,240
0.331285
c76173ed74a504071f1116fc3a7dc17a1c832c39
4,626
py
Python
accounts/views.py
nikhiljohn10/django-auth
01d97e8173436c3446f039cfa6472ece3cd9f96a
[ "MIT" ]
null
null
null
accounts/views.py
nikhiljohn10/django-auth
01d97e8173436c3446f039cfa6472ece3cd9f96a
[ "MIT" ]
null
null
null
accounts/views.py
nikhiljohn10/django-auth
01d97e8173436c3446f039cfa6472ece3cd9f96a
[ "MIT" ]
null
null
null
from django.urls import reverse from django.conf import settings from django.contrib import messages from django.shortcuts import render, redirect from django.core.mail import send_mail from django.contrib.auth import login, logout, views, authenticate from django.views.generic.edit import CreateView from django.contrib.sessions.models import Session from django.contrib.auth.decorators import login_required, permission_required from accounts.tools import activater, mailer from accounts.forms import SignUpForm, LoginForm from accounts.models import User @login_required @permission_required("is_staff", login_url='/dashboard/') def gmail(request): request.session['oauth_state'] = mailer.auth_state return redirect(mailer.auth_uri) @login_required @permission_required("is_staff", login_url='/dashboard/') def gmail_verify(request): code = request.GET.get('code','') state = request.GET.get('state','') if code and state == request.session['oauth_state']: mailer.verify(code) return redirect('dash:gmail') class UserLogin(views.LoginView): template_name = 'auth/login.html' authentication_form = LoginForm def form_valid(self, form): user = form.get_user() login(self.request, user) if not self.request.POST.get('remember_me', None): self.request.session.set_expiry(0) messages.info(self.request, f"You are now logged in as {user}") return redirect(self.get_success_url()) class SignUpView(CreateView): form_class = SignUpForm template_name = 'auth/signup.html' def form_valid(self, form): if mailer.activated: user = form.save() mailer.send_mail( "Django Verification Code", "Hi "+str(user)+",\nClick this link to activate: " + reverse('auth:verify_email', args=( user, activater.make_token(user))), [user.email]) login(self.request, user) else: messages.error(self.request, "Gmail is not activate. Contact site administrator.") return redirect('auth:signup') return redirect('core:home') def user_manage_permission(user, username): if not user.is_staff: if user.username == username: return True else: if user.username != username: return True return False @login_required @permission_required("is_staff", login_url='/dashboard/') def user_force_logout(request, username): user = User.objects.get(username=username) sessions = [s.delete() for s in Session.objects.all() if s.get_decoded().get('_auth_user_id') == str(user.id)] print(sessions) return redirect('dash:users') def user_verify_email(request, username, token): user = User.objects.get(username=username) if activater.check_token(user, token): print(user, "is verified") user.email_verified = True user.save() return redirect('dash:users') @login_required def user_disable(request, username): if user_manage_permission(request.user, username): user = User.objects.get(username=username) user.is_active = False user.save() messages.error(request, 'Profile successfully disabled.') else: messages.error( request, 'You are not allowed to perform this operation.') if request.user.is_staff: return redirect('dash:users') else: return redirect('dash:profile') @login_required def user_enable(request, username): if user_manage_permission(request.user, username): user = User.objects.get(username=username) user.is_active = True user.save() messages.success(request, 'Profile successfully enabled.') else: messages.error( request, 'You are not allowed to perform this operation.') if request.user.is_staff: return redirect('dash:users') else: return redirect('dash:profile') @login_required def user_delete(request, username): if user_manage_permission(request.user, username): user = User.objects.get(username=username) user.delete() messages.error(request, 'Profile successfully deleted.') else: messages.error( request, 'You are not allowed to perform this operation.') if request.user.is_staff: return redirect('dash:users') else: return redirect('dash:profile') user_login = UserLogin.as_view() user_signup = SignUpView.as_view() user_logout = views.LogoutView.as_view()
31.684932
78
0.671422
1,152
0.249027
0
0
2,291
0.495244
0
0
734
0.158668
c7651286d18c5a48356115767024669710aad666
29
py
Python
python/testData/intentions/PyAnnotateVariableTypeIntentionTest/annotationTupleType.py
truthiswill/intellij-community
fff88cfb0dc168eea18ecb745d3e5b93f57b0b95
[ "Apache-2.0" ]
2
2019-04-28T07:48:50.000Z
2020-12-11T14:18:08.000Z
python/testData/intentions/PyAnnotateVariableTypeIntentionTest/annotationTupleType.py
truthiswill/intellij-community
fff88cfb0dc168eea18ecb745d3e5b93f57b0b95
[ "Apache-2.0" ]
173
2018-07-05T13:59:39.000Z
2018-08-09T01:12:03.000Z
python/testData/intentions/PyAnnotateVariableTypeIntentionTest/annotationTupleType.py
truthiswill/intellij-community
fff88cfb0dc168eea18ecb745d3e5b93f57b0b95
[ "Apache-2.0" ]
2
2020-03-15T08:57:37.000Z
2020-04-07T04:48:14.000Z
v<caret>ar = (1, 'foo', None)
29
29
0.551724
0
0
0
0
0
0
0
0
5
0.172414
c7655b5b6676c78858c562c5f53a9ea086d6bb5c
228
py
Python
bot/venv/lib/python3.7/site-packages/scipy/version.py
manaccac/sc2_bot
3aa8b3711378b71fd0a44662cdd7148846e39530
[ "MIT" ]
76
2020-07-06T14:44:05.000Z
2022-02-14T15:30:21.000Z
bot/venv/lib/python3.7/site-packages/scipy/version.py
manaccac/sc2_bot
3aa8b3711378b71fd0a44662cdd7148846e39530
[ "MIT" ]
37
2020-10-20T08:30:53.000Z
2020-12-22T13:15:45.000Z
bot/venv/lib/python3.7/site-packages/scipy/version.py
manaccac/sc2_bot
3aa8b3711378b71fd0a44662cdd7148846e39530
[ "MIT" ]
15
2020-11-30T22:12:22.000Z
2020-12-09T01:32:48.000Z
# THIS FILE IS GENERATED FROM SCIPY SETUP.PY short_version = '1.5.4' version = '1.5.4' full_version = '1.5.4' git_revision = '19acfed431060aafaa963f7e530c95e70cd4b85c' release = True if not release: version = full_version
20.727273
57
0.745614
0
0
0
0
0
0
0
0
107
0.469298
c765b8fb3017f33809adece1e8c0d5771ccc24b7
356
py
Python
emrichen/input/__init__.py
jbek7/emrichen
b6b8327e35cb2b9f3da49519110ecc766a9ad741
[ "MIT" ]
null
null
null
emrichen/input/__init__.py
jbek7/emrichen
b6b8327e35cb2b9f3da49519110ecc766a9ad741
[ "MIT" ]
null
null
null
emrichen/input/__init__.py
jbek7/emrichen
b6b8327e35cb2b9f3da49519110ecc766a9ad741
[ "MIT" ]
null
null
null
from typing import TextIO, Union from .json import load_json from .yaml import load_yaml PARSERS = { 'yaml': load_yaml, 'json': load_json, } def parse(data: Union[TextIO, str], format: str): if format in PARSERS: return PARSERS[format](data) else: raise ValueError('No parser for format {format}'.format(format=format))
20.941176
79
0.676966
0
0
0
0
0
0
0
0
43
0.120787
c7675ba7953da5231174f58bf3d8e9f9039a7d72
5,668
py
Python
sdk/python/pulumi_aws_native/workspaces/get_workspace.py
pulumi/pulumi-aws-native
1ae4a4d9c2256b2a79ca536f8d8497b28d10e4c3
[ "Apache-2.0" ]
29
2021-09-30T19:32:07.000Z
2022-03-22T21:06:08.000Z
sdk/python/pulumi_aws_native/workspaces/get_workspace.py
pulumi/pulumi-aws-native
1ae4a4d9c2256b2a79ca536f8d8497b28d10e4c3
[ "Apache-2.0" ]
232
2021-09-30T19:26:26.000Z
2022-03-31T23:22:06.000Z
sdk/python/pulumi_aws_native/workspaces/get_workspace.py
pulumi/pulumi-aws-native
1ae4a4d9c2256b2a79ca536f8d8497b28d10e4c3
[ "Apache-2.0" ]
4
2021-11-10T19:42:01.000Z
2022-02-05T10:15:49.000Z
# coding=utf-8 # *** WARNING: this file was generated by the Pulumi SDK Generator. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union, overload from .. import _utilities from . import outputs __all__ = [ 'GetWorkspaceResult', 'AwaitableGetWorkspaceResult', 'get_workspace', 'get_workspace_output', ] @pulumi.output_type class GetWorkspaceResult: def __init__(__self__, bundle_id=None, directory_id=None, id=None, root_volume_encryption_enabled=None, tags=None, user_volume_encryption_enabled=None, volume_encryption_key=None, workspace_properties=None): if bundle_id and not isinstance(bundle_id, str): raise TypeError("Expected argument 'bundle_id' to be a str") pulumi.set(__self__, "bundle_id", bundle_id) if directory_id and not isinstance(directory_id, str): raise TypeError("Expected argument 'directory_id' to be a str") pulumi.set(__self__, "directory_id", directory_id) if id and not isinstance(id, str): raise TypeError("Expected argument 'id' to be a str") pulumi.set(__self__, "id", id) if root_volume_encryption_enabled and not isinstance(root_volume_encryption_enabled, bool): raise TypeError("Expected argument 'root_volume_encryption_enabled' to be a bool") pulumi.set(__self__, "root_volume_encryption_enabled", root_volume_encryption_enabled) if tags and not isinstance(tags, list): raise TypeError("Expected argument 'tags' to be a list") pulumi.set(__self__, "tags", tags) if user_volume_encryption_enabled and not isinstance(user_volume_encryption_enabled, bool): raise TypeError("Expected argument 'user_volume_encryption_enabled' to be a bool") pulumi.set(__self__, "user_volume_encryption_enabled", user_volume_encryption_enabled) if volume_encryption_key and not isinstance(volume_encryption_key, str): raise TypeError("Expected argument 'volume_encryption_key' to be a str") pulumi.set(__self__, "volume_encryption_key", volume_encryption_key) if workspace_properties and not isinstance(workspace_properties, dict): raise TypeError("Expected argument 'workspace_properties' to be a dict") pulumi.set(__self__, "workspace_properties", workspace_properties) @property @pulumi.getter(name="bundleId") def bundle_id(self) -> Optional[str]: return pulumi.get(self, "bundle_id") @property @pulumi.getter(name="directoryId") def directory_id(self) -> Optional[str]: return pulumi.get(self, "directory_id") @property @pulumi.getter def id(self) -> Optional[str]: return pulumi.get(self, "id") @property @pulumi.getter(name="rootVolumeEncryptionEnabled") def root_volume_encryption_enabled(self) -> Optional[bool]: return pulumi.get(self, "root_volume_encryption_enabled") @property @pulumi.getter def tags(self) -> Optional[Sequence['outputs.WorkspaceTag']]: return pulumi.get(self, "tags") @property @pulumi.getter(name="userVolumeEncryptionEnabled") def user_volume_encryption_enabled(self) -> Optional[bool]: return pulumi.get(self, "user_volume_encryption_enabled") @property @pulumi.getter(name="volumeEncryptionKey") def volume_encryption_key(self) -> Optional[str]: return pulumi.get(self, "volume_encryption_key") @property @pulumi.getter(name="workspaceProperties") def workspace_properties(self) -> Optional['outputs.WorkspaceProperties']: return pulumi.get(self, "workspace_properties") class AwaitableGetWorkspaceResult(GetWorkspaceResult): # pylint: disable=using-constant-test def __await__(self): if False: yield self return GetWorkspaceResult( bundle_id=self.bundle_id, directory_id=self.directory_id, id=self.id, root_volume_encryption_enabled=self.root_volume_encryption_enabled, tags=self.tags, user_volume_encryption_enabled=self.user_volume_encryption_enabled, volume_encryption_key=self.volume_encryption_key, workspace_properties=self.workspace_properties) def get_workspace(id: Optional[str] = None, opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetWorkspaceResult: """ Resource Type definition for AWS::WorkSpaces::Workspace """ __args__ = dict() __args__['id'] = id if opts is None: opts = pulumi.InvokeOptions() if opts.version is None: opts.version = _utilities.get_version() __ret__ = pulumi.runtime.invoke('aws-native:workspaces:getWorkspace', __args__, opts=opts, typ=GetWorkspaceResult).value return AwaitableGetWorkspaceResult( bundle_id=__ret__.bundle_id, directory_id=__ret__.directory_id, id=__ret__.id, root_volume_encryption_enabled=__ret__.root_volume_encryption_enabled, tags=__ret__.tags, user_volume_encryption_enabled=__ret__.user_volume_encryption_enabled, volume_encryption_key=__ret__.volume_encryption_key, workspace_properties=__ret__.workspace_properties) @_utilities.lift_output_func(get_workspace) def get_workspace_output(id: Optional[pulumi.Input[str]] = None, opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetWorkspaceResult]: """ Resource Type definition for AWS::WorkSpaces::Workspace """ ...
41.985185
211
0.711009
3,904
0.688779
512
0.090332
3,611
0.637085
0
0
1,336
0.235709
c768daaabaf9920391bc3d2ee09b1a53e4d2788c
24
py
Python
testtools/__init__.py
afy2103/spambayes-9-10-Frozen
383db71e3b7b2141975cf66e6d223bb437511776
[ "PSF-2.0" ]
null
null
null
testtools/__init__.py
afy2103/spambayes-9-10-Frozen
383db71e3b7b2141975cf66e6d223bb437511776
[ "PSF-2.0" ]
null
null
null
testtools/__init__.py
afy2103/spambayes-9-10-Frozen
383db71e3b7b2141975cf66e6d223bb437511776
[ "PSF-2.0" ]
null
null
null
__author__ = 'AlexYang'
12
23
0.75
0
0
0
0
0
0
0
0
10
0.416667
c768fa044e6b10f72fbfbfa85435ada393a83af3
673
py
Python
tests/test_distance.py
mkclairhong/quail
a6d6502746c853518a670d542222eb5fc2b05542
[ "MIT" ]
1
2018-05-30T15:33:26.000Z
2018-05-30T15:33:26.000Z
tests/test_distance.py
mkclairhong/quail
a6d6502746c853518a670d542222eb5fc2b05542
[ "MIT" ]
null
null
null
tests/test_distance.py
mkclairhong/quail
a6d6502746c853518a670d542222eb5fc2b05542
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- from quail.distance import * import numpy as np import pytest from scipy.spatial.distance import cdist def test_match(): a = 'A' b = 'B' assert np.equal(match(a, b), 1) def test_euclidean_list(): a = [0, 1, 0] b = [0, 1, 0] assert np.equal(euclidean(a, b), 0) def test_euclidean_array(): a = np.array([0, 1, 0]) b = np.array([0, 1, 0]) assert np.equal(euclidean(a, b), 0) def test_correlation_list(): a = [0, 1, 0] b = [0, 1, 0] assert np.equal(correlation(a, b), 1) def test_correlation_array(): a = np.array([0, 1, 0]) b = np.array([0, 1, 0]) assert np.equal(correlation(a, b), 1)
21.03125
41
0.580981
0
0
0
0
0
0
0
0
29
0.043091
c769abd3fe7f81479f81afe9e3156873d7f5b0e2
17,050
py
Python
utils/manisfestManager.py
ovitrac/pizza3
0f4dc6e362fd8665c72ec13328df05f9119dfbc3
[ "MIT" ]
1
2022-02-07T14:10:10.000Z
2022-02-07T14:10:10.000Z
utils/manisfestManager.py
ovitrac/Pizza3
0f4dc6e362fd8665c72ec13328df05f9119dfbc3
[ "MIT" ]
null
null
null
utils/manisfestManager.py
ovitrac/Pizza3
0f4dc6e362fd8665c72ec13328df05f9119dfbc3
[ "MIT" ]
null
null
null
#!/usr/bin/env python ############################################################################### # # # manifestManager.py # # # # Work with online data manifests (creating / syncing / validating) # # # # Copyright (C) Michael Imelfort # # # ############################################################################### # # # This program is free software: you can redistribute it and/or modify # # it under the terms of the GNU General Public License as published by # # the Free Software Foundation, either version 3 of the License, or # # (at your option) any later version. # # # # This program is distributed in the hope that it will be useful, # # but WITHOUT ANY WARRANTY; without even the implied warranty of # # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # # GNU General Public License for more details. # # # # You should have received a copy of the GNU General Public License # # along with this program. If not, see <http://www.gnu.org/licenses/>. # # # ############################################################################### __author__ = "Michael Imelfort" __copyright__ = "Copyright 2014" __credits__ = ["Michael Imelfort"] __license__ = "GPLv3" __maintainer__ = "Michael Imelfort" __email__ = "[email protected]" __version__ = "0.35" ############################################################################### ############################################################################### ############################################################################### ############################################################################### __MANIFEST__ = ".dmanifest" ############################################################################### ############################################################################### ############################################################################### ############################################################################### # system includes import os import hashlib import urllib.request, urllib.error, urllib.parse import urllib.request, urllib.parse, urllib.error import shutil import errno # local includes from fileEntity import FileEntity as FE ############################################################################### ############################################################################### ############################################################################### ############################################################################### class ManifestManager(object): """Use this interface for storing and managing file and paths""" def __init__(self, manType=None, timeout=30): self.timeout = timeout self.myExtensions = [".py",".sh"] self.files = [] if manType is not None: self.type = manType else: self.type = "generic" def createManifest(self, path, manifestName=None): """inventory all files in path and create a manifest file""" if manifestName is None: manifestName = __MANIFEST__ # make the root file entity root_path = os.path.abspath(path) root_fe = FE('root', ".", None, "-", 0) self.files.append(root_fe) # now make all the ones below parents = [root_fe] dirs, files = self.listdir(path)[:2] self.walk(parents, root_path, '', dirs, files, skipFile=manifestName) with open(os.path.join(path, manifestName), 'w') as man_fh: # print the header man_fh.write("#\t::: %s ::: \tPizza3 manifest version %s\n\n" % (self.type, __version__)) for f in self.files: if f.parent is not None: man_fh.write("%s\n" % f) def diffManifests(self, localManifestLocation, sourceManifestLocation, localManifestName=None, sourceManifestName=None, printDiffs=False): """check for any differences between two manifests if remote is true then sourceManifestLocation is a URL returns a list of files that need to be updated """ if localManifestName is None: localManifestName = __MANIFEST__ if sourceManifestName is None: sourceManifestName = __MANIFEST__ # get the "type" of the local manifest l_type = "generic" with open(os.path.join(localManifestLocation, localManifestName)) as l_man: for line in l_man: if line[0] == "#": l_type = self.getManType(line) break # load the source manifest s_type = "generic" source_man = {} source = "" # first we assume it is remote try: s_man = urllib.request.urlopen(sourceManifestLocation + "/" + sourceManifestName, None, self.timeout) source = sourceManifestLocation + "/" except ValueError: # then it is probably a file s_man = open(os.path.join(sourceManifestLocation, sourceManifestName)) source = os.path.join(sourceManifestLocation) + os.path.sep except urllib.error.URLError: # problems connecting to server, perhaps user is behind a proxy or firewall print("Error: failed to connect to server.") return (None, None, None, None, None) first_line = True for line in s_man: if first_line: first_line = False if line[0] == "#": # get the type of the manifest s_type = self.getManType(line) if s_type != l_type: print("Error: type of source manifest (%s) does not match type of local manifest (%s)" % (s_type, l_type)) return (None, None, None, None, None) else: # no type specified print("Error: type of source manifest is not specified. Is this a valid manifest file?") return (None, None, None, None, None) self.type = l_type if line[0] != "#": fields = line.rstrip().split("\t") # set the dict up as {path => [hash, size, seenLocal] source_man[fields[0]] = [fields[1], fields[2], False] # keep lists of modifications deleted = [] addedDirs = [] addedFiles = [] modified = [] with open(os.path.join(localManifestLocation, localManifestName)) as l_man: for line in l_man: if line[0] != "#": fields = line.rstrip().split("\t") try: if source_man[fields[0]][0] != fields[1]: # hashes don't match modified.append(fields[0]) # seen this file source_man[fields[0]][2] = True except KeyError: # this file has been deleted from the source manifest deleted.append(fields[0]) # check for new files for f in list(source_man.keys()): if source_man[f][2] == False: if source_man[f][0] == '-': addedDirs.append(f) else: addedFiles.append(f) if printDiffs: new_size = 0 modified_size = 0 for f in addedFiles: new_size += int(source_man[f][1]) for f in modified: modified_size += int(source_man[f][1]) if len(addedFiles) > 0: print("#------------------------------------------------------") print("# Source contains %d new file(s) (%s)" % (len(addedFiles), self.formatData(new_size))) for f in addedFiles: print("\t".join([self.formatData(int(source_man[f][1])), f])) if len(addedDirs) > 0: print("#------------------------------------------------------") print("# Source contains %d new folders(s)" % (len(addedDirs))) for f in addedDirs: print(f) if len(modified) > 0: print("#------------------------------------------------------") print("# Source contains %d modified file(s) (%s)" % (len(modified), self.formatData(modified_size))) for f in modified: print(f) if len(deleted) > 0: print("#------------------------------------------------------") print("# %d files have been deleted in the source:" % len(deleted)) for f in deleted: print(f) else: return (source, [(a, source_man[a]) for a in addedFiles], [(a, source_man[a]) for a in addedDirs], deleted, [(m, source_man[m]) for m in modified]) def updateManifest(self, localManifestLocation, sourceManifestLocation, localManifestName=None, sourceManifestName=None, prompt=True): """Update local files based on remote changes""" # get the diffs source, added_files, added_dirs, deleted, modified = self.diffManifests(localManifestLocation, sourceManifestLocation, localManifestName, sourceManifestName) # bail if the diff failed if source is None: return False # no changes by default do_down = False if prompt: total_size = 0 for f in added_files: total_size += int(f[1][1]) for f in modified: total_size += int(f[1][1]) if total_size != 0: print("****************************************************************") print("%d new file(s) to be downloaded from source" % len(added_files)) print("%d existing file(s) to be updated" % len(modified)) print("%s will need to be downloaded" % self.formatData(total_size)) do_down = self.promptUserDownload() if not do_down: print("Download aborted") update_manifest = False if do_down: update_manifest = True for add in added_dirs: # make the dirs first full_path = os.path.abspath(os.path.join(localManifestLocation, add[0])) self.makeSurePathExists(full_path) for add in added_files: full_path = os.path.abspath(os.path.join(localManifestLocation, add[0])) urllib.request.urlretrieve(source+add[0], full_path) for modify in modified: full_path = os.path.abspath(os.path.join(localManifestLocation, modify[0])) urllib.request.urlretrieve(source+modify[0], full_path) if update_manifest: print("(re) creating manifest file (please be patient)") self.createManifest(localManifestLocation, manifestName=localManifestName) return True def getManType(self, line): """Work out the manifest type from the first line of the file""" return line.rstrip().split("##")[1] def formatData(self, amount): """Pretty print file sizes""" if amount < 1024*1024: return "%d B" % amount elif amount < 1024*1024*1024: return "%0.2f MB" % (float(amount)/(1024.*1024.)) elif amount < 1024*1024*1024*1024: return "%0.2f GB" % (float(amount)/(1024.*1024.*1024.)) elif amount < 1024*1024*1024*1024*1024: return "%0.2f TB" % (float(amount)/(1024.*1024.*1024.*1024.)) #----------------------------------------------------------------------------- # FS utilities def makeSurePathExists(self, path): try: os.makedirs(path) except OSError as exception: if exception.errno != errno.EEXIST: raise def promptUserDownload(self): """Check that the user is OK with making changes""" input_not_ok = True minimal=False valid_responses = {'Y':True,'N':False} vrs = ",".join([x.lower() for x in list(valid_responses.keys())]) while(input_not_ok): if(minimal): option = input("Download? ("+vrs+") : ").upper() else: option = input("Confirm you want to download this data\n" \ "Changes *WILL* be permanent\n" \ "Continue? ("+vrs+") : ").upper() if(option in valid_responses): print("****************************************************************") return valid_responses[option] else: print("ERROR: unrecognised choice '"+option+"'") minimal = True def walk(self, parents, full_path, rel_path, dirs, files, skipFile=__MANIFEST__): """recursive walk through directory tree""" # first do files here for f in files: if (f != skipFile) and os.path.splitext(f)[1] in self.myExtensions: path = os.path.join(full_path, f) self.files.append(FE(f, rel_path, parents[-1], self.hashfile(path), os.path.getsize(path) ) ) for d in dirs: # the walk will go into these dirs first tmp_fe = FE(d, rel_path, parents[-1], "-", 0) self.files.append(tmp_fe) parents.append(tmp_fe) new_full_path = os.path.join(full_path, d) new_rel_path = os.path.join(rel_path, d) new_dirs, new_files = self.listdir(new_full_path)[:2] self.walk(parents, new_full_path, new_rel_path, new_dirs, new_files) parents.pop() def listdir(self, path): """List dirs, files etc in path (one dir deep)""" dirs, files, links = [], [], [] for name in os.listdir(path): path_name = os.path.join(path, name) if os.path.isdir(path_name): dirs.append(name) elif os.path.isfile(path_name): files.append(name) elif os.path.islink(path_name): links.append(name) return dirs, files, links def hashfile(self, fileName, blocksize=65536): """Hash a file and return the digest""" hasher = hashlib.sha256() with open(fileName,"rb") as fh: buf = fh.read(blocksize) while len(buf) > 0: hasher.update(buf.strip()) buf = fh.read(blocksize) return hasher.hexdigest() return "?" ############################################################################### ############################################################################### ############################################################################### ############################################################################### # %% DEBUG # =================================================== # main() # =================================================== # for debugging purposes (code called as a script) # the code is called from here # =================================================== if __name__ == '__main__': man = ManifestManager() man.createManifest("/home/olivi/billy/python",manifestName="Pizza3.manifest")
44.285714
130
0.436716
12,937
0.758768
0
0
0
0
0
0
6,275
0.368035
c76b9236e24c24d26fa468bcec0fccac39b536c2
27,999
py
Python
pysnmp/ZYXEL-AclV2-MIB.py
agustinhenze/mibs.snmplabs.com
1fc5c07860542b89212f4c8ab807057d9a9206c7
[ "Apache-2.0" ]
11
2021-02-02T16:27:16.000Z
2021-08-31T06:22:49.000Z
pysnmp/ZYXEL-AclV2-MIB.py
agustinhenze/mibs.snmplabs.com
1fc5c07860542b89212f4c8ab807057d9a9206c7
[ "Apache-2.0" ]
75
2021-02-24T17:30:31.000Z
2021-12-08T00:01:18.000Z
pysnmp/ZYXEL-AclV2-MIB.py
agustinhenze/mibs.snmplabs.com
1fc5c07860542b89212f4c8ab807057d9a9206c7
[ "Apache-2.0" ]
10
2019-04-30T05:51:36.000Z
2022-02-16T03:33:41.000Z
# # PySNMP MIB module ZYXEL-AclV2-MIB (http://snmplabs.com/pysmi) # ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/ZYXEL-AclV2-MIB # Produced by pysmi-0.3.4 at Mon Apr 29 21:43:03 2019 # On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4 # Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15) # ObjectIdentifier, Integer, OctetString = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "Integer", "OctetString") NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues") ValueSizeConstraint, ConstraintsUnion, SingleValueConstraint, ConstraintsIntersection, ValueRangeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueSizeConstraint", "ConstraintsUnion", "SingleValueConstraint", "ConstraintsIntersection", "ValueRangeConstraint") InetAddress, = mibBuilder.importSymbols("INET-ADDRESS-MIB", "InetAddress") EnabledStatus, = mibBuilder.importSymbols("P-BRIDGE-MIB", "EnabledStatus") PortList, = mibBuilder.importSymbols("Q-BRIDGE-MIB", "PortList") ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup") Counter32, Integer32, Counter64, NotificationType, Bits, ModuleIdentity, MibScalar, MibTable, MibTableRow, MibTableColumn, MibIdentifier, TimeTicks, iso, Gauge32, Unsigned32, IpAddress, ObjectIdentity = mibBuilder.importSymbols("SNMPv2-SMI", "Counter32", "Integer32", "Counter64", "NotificationType", "Bits", "ModuleIdentity", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "MibIdentifier", "TimeTicks", "iso", "Gauge32", "Unsigned32", "IpAddress", "ObjectIdentity") RowStatus, MacAddress, DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "RowStatus", "MacAddress", "DisplayString", "TextualConvention") esMgmt, = mibBuilder.importSymbols("ZYXEL-ES-SMI", "esMgmt") zyxelAclV2 = ModuleIdentity((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 105)) if mibBuilder.loadTexts: zyxelAclV2.setLastUpdated('201207010000Z') if mibBuilder.loadTexts: zyxelAclV2.setOrganization('Enterprise Solution ZyXEL') zyxelAclV2ClassifierStatus = MibIdentifier((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 105, 1)) zyxelAclV2PolicyStatus = MibIdentifier((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 105, 2)) zyxelAclV2TrapInfoObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 105, 3)) zyxelAclV2Notifications = MibIdentifier((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 105, 4)) zyxelAclV2ClassifierTable = MibTable((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 105, 1, 1), ) if mibBuilder.loadTexts: zyxelAclV2ClassifierTable.setStatus('current') zyxelAclV2ClassifierEntry = MibTableRow((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 105, 1, 1, 1), ).setIndexNames((0, "ZYXEL-AclV2-MIB", "zyAclV2ClassifierName")) if mibBuilder.loadTexts: zyxelAclV2ClassifierEntry.setStatus('current') zyAclV2ClassifierName = MibTableColumn((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 105, 1, 1, 1, 1), DisplayString()) if mibBuilder.loadTexts: zyAclV2ClassifierName.setStatus('current') zyAclV2ClassifierState = MibTableColumn((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 105, 1, 1, 1, 2), EnabledStatus()).setMaxAccess("readonly") if mibBuilder.loadTexts: zyAclV2ClassifierState.setStatus('current') zyAclV2ClassifierWeight = MibTableColumn((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 105, 1, 1, 1, 3), Integer32()).setMaxAccess("readonly") if mibBuilder.loadTexts: zyAclV2ClassifierWeight.setStatus('current') zyAclV2ClassifierCountState = MibTableColumn((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 105, 1, 1, 1, 4), EnabledStatus()).setMaxAccess("readonly") if mibBuilder.loadTexts: zyAclV2ClassifierCountState.setStatus('current') zyAclV2ClassifierLogState = MibTableColumn((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 105, 1, 1, 1, 5), EnabledStatus()).setMaxAccess("readonly") if mibBuilder.loadTexts: zyAclV2ClassifierLogState.setStatus('current') zyAclV2ClassifierTimeRange = MibTableColumn((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 105, 1, 1, 1, 6), DisplayString()).setMaxAccess("readonly") if mibBuilder.loadTexts: zyAclV2ClassifierTimeRange.setStatus('current') zyAclV2ClassifierMatchCount = MibTableColumn((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 105, 1, 1, 1, 7), Counter64()).setMaxAccess("readonly") if mibBuilder.loadTexts: zyAclV2ClassifierMatchCount.setStatus('current') zyxelAclV2ClassifierEthernetTable = MibTable((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 105, 1, 2), ) if mibBuilder.loadTexts: zyxelAclV2ClassifierEthernetTable.setStatus('current') zyxelAclV2ClassifierEthernetEntry = MibTableRow((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 105, 1, 2, 1), ).setIndexNames((0, "ZYXEL-AclV2-MIB", "zyAclV2ClassifierName")) if mibBuilder.loadTexts: zyxelAclV2ClassifierEthernetEntry.setStatus('current') zyAclV2ClassifierEthernetSourcePorts = MibTableColumn((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 105, 1, 2, 1, 1), PortList()).setMaxAccess("readonly") if mibBuilder.loadTexts: zyAclV2ClassifierEthernetSourcePorts.setStatus('current') zyAclV2ClassifierEthernetSourceTrunks = MibTableColumn((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 105, 1, 2, 1, 2), PortList()).setMaxAccess("readonly") if mibBuilder.loadTexts: zyAclV2ClassifierEthernetSourceTrunks.setStatus('current') zyAclV2ClassifierEthernetPacketFormat = MibTableColumn((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 105, 1, 2, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("all", 1), ("ethernetIIUntagged", 2), ("ethernetIITagged", 3), ("ethernet802dot3Untagged", 4), ("ethernet802dot3Tagged", 5)))).setMaxAccess("readonly") if mibBuilder.loadTexts: zyAclV2ClassifierEthernetPacketFormat.setStatus('current') zyAclV2ClassifierEthernet8021pPriority = MibTableColumn((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 105, 1, 2, 1, 4), Integer32()).setMaxAccess("readonly") if mibBuilder.loadTexts: zyAclV2ClassifierEthernet8021pPriority.setStatus('current') zyAclV2ClassifierEthernetInner8021pPriority = MibTableColumn((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 105, 1, 2, 1, 5), Integer32()).setMaxAccess("readonly") if mibBuilder.loadTexts: zyAclV2ClassifierEthernetInner8021pPriority.setStatus('current') zyAclV2ClassifierEthernetType = MibTableColumn((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 105, 1, 2, 1, 6), Integer32()).setMaxAccess("readonly") if mibBuilder.loadTexts: zyAclV2ClassifierEthernetType.setStatus('current') zyAclV2ClassifierEthernetSourceMacAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 105, 1, 2, 1, 7), MacAddress()).setMaxAccess("readonly") if mibBuilder.loadTexts: zyAclV2ClassifierEthernetSourceMacAddress.setStatus('current') zyAclV2ClassifierEthernetSourceMACMask = MibTableColumn((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 105, 1, 2, 1, 8), MacAddress()).setMaxAccess("readonly") if mibBuilder.loadTexts: zyAclV2ClassifierEthernetSourceMACMask.setStatus('current') zyAclV2ClassifierEthernetDestinationMacAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 105, 1, 2, 1, 9), MacAddress()).setMaxAccess("readonly") if mibBuilder.loadTexts: zyAclV2ClassifierEthernetDestinationMacAddress.setStatus('current') zyAclV2ClassifierEthernetDestinationMACMask = MibTableColumn((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 105, 1, 2, 1, 10), MacAddress()).setMaxAccess("readonly") if mibBuilder.loadTexts: zyAclV2ClassifierEthernetDestinationMACMask.setStatus('current') zyxelAclV2ClassifierVlanTable = MibTable((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 105, 1, 3), ) if mibBuilder.loadTexts: zyxelAclV2ClassifierVlanTable.setStatus('current') zyxelAclV2ClassifierVlanEntry = MibTableRow((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 105, 1, 3, 1), ).setIndexNames((0, "ZYXEL-AclV2-MIB", "zyAclV2ClassifierName")) if mibBuilder.loadTexts: zyxelAclV2ClassifierVlanEntry.setStatus('current') zyAclV2ClassifierVlanMap1k = MibTableColumn((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 105, 1, 3, 1, 1), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 128))).setMaxAccess("readonly") if mibBuilder.loadTexts: zyAclV2ClassifierVlanMap1k.setStatus('current') zyAclV2ClassifierVlanMap2k = MibTableColumn((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 105, 1, 3, 1, 2), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 128))).setMaxAccess("readonly") if mibBuilder.loadTexts: zyAclV2ClassifierVlanMap2k.setStatus('current') zyAclV2ClassifierVlanMap3k = MibTableColumn((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 105, 1, 3, 1, 3), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 128))).setMaxAccess("readonly") if mibBuilder.loadTexts: zyAclV2ClassifierVlanMap3k.setStatus('current') zyAclV2ClassifierVlanMap4k = MibTableColumn((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 105, 1, 3, 1, 4), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 128))).setMaxAccess("readonly") if mibBuilder.loadTexts: zyAclV2ClassifierVlanMap4k.setStatus('current') zyxelAclV2ClassifierInnerVlanTable = MibTable((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 105, 1, 4), ) if mibBuilder.loadTexts: zyxelAclV2ClassifierInnerVlanTable.setStatus('current') zyxelAclV2ClassifierInnerVlanEntry = MibTableRow((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 105, 1, 4, 1), ).setIndexNames((0, "ZYXEL-AclV2-MIB", "zyAclV2ClassifierName")) if mibBuilder.loadTexts: zyxelAclV2ClassifierInnerVlanEntry.setStatus('current') zyAclV2ClassifierInnerVlanMap1k = MibTableColumn((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 105, 1, 4, 1, 1), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 128))).setMaxAccess("readonly") if mibBuilder.loadTexts: zyAclV2ClassifierInnerVlanMap1k.setStatus('current') zyAclV2ClassifierInnerVlanMap2k = MibTableColumn((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 105, 1, 4, 1, 2), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 128))).setMaxAccess("readonly") if mibBuilder.loadTexts: zyAclV2ClassifierInnerVlanMap2k.setStatus('current') zyAclV2ClassifierInnerVlanMap3k = MibTableColumn((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 105, 1, 4, 1, 3), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 128))).setMaxAccess("readonly") if mibBuilder.loadTexts: zyAclV2ClassifierInnerVlanMap3k.setStatus('current') zyAclV2ClassifierInnerVlanMap4k = MibTableColumn((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 105, 1, 4, 1, 4), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 128))).setMaxAccess("readonly") if mibBuilder.loadTexts: zyAclV2ClassifierInnerVlanMap4k.setStatus('current') zyxelAclV2ClassifierIpTable = MibTable((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 105, 1, 5), ) if mibBuilder.loadTexts: zyxelAclV2ClassifierIpTable.setStatus('current') zyxelAclV2ClassifierIpEntry = MibTableRow((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 105, 1, 5, 1), ).setIndexNames((0, "ZYXEL-AclV2-MIB", "zyAclV2ClassifierName")) if mibBuilder.loadTexts: zyxelAclV2ClassifierIpEntry.setStatus('current') zyAclV2ClassifierIpPacketLenRangeStart = MibTableColumn((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 105, 1, 5, 1, 1), Integer32()).setMaxAccess("readonly") if mibBuilder.loadTexts: zyAclV2ClassifierIpPacketLenRangeStart.setStatus('current') zyAclV2ClassifierIpPacketLenRangeEnd = MibTableColumn((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 105, 1, 5, 1, 2), Integer32()).setMaxAccess("readonly") if mibBuilder.loadTexts: zyAclV2ClassifierIpPacketLenRangeEnd.setStatus('current') zyAclV2ClassifierIpDSCP = MibTableColumn((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 105, 1, 5, 1, 3), Integer32()).setMaxAccess("readonly") if mibBuilder.loadTexts: zyAclV2ClassifierIpDSCP.setStatus('current') zyAclV2ClassifierIpPrecedence = MibTableColumn((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 105, 1, 5, 1, 4), Integer32()).setMaxAccess("readonly") if mibBuilder.loadTexts: zyAclV2ClassifierIpPrecedence.setStatus('current') zyAclV2ClassifierIpToS = MibTableColumn((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 105, 1, 5, 1, 5), Integer32()).setMaxAccess("readonly") if mibBuilder.loadTexts: zyAclV2ClassifierIpToS.setStatus('current') zyAclV2ClassifierIpProtocol = MibTableColumn((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 105, 1, 5, 1, 6), Integer32()).setMaxAccess("readonly") if mibBuilder.loadTexts: zyAclV2ClassifierIpProtocol.setStatus('current') zyAclV2ClassifierIpEstablishOnly = MibTableColumn((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 105, 1, 5, 1, 7), EnabledStatus()).setMaxAccess("readonly") if mibBuilder.loadTexts: zyAclV2ClassifierIpEstablishOnly.setStatus('current') zyAclV2ClassifierIpSourceIpAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 105, 1, 5, 1, 8), IpAddress()).setMaxAccess("readonly") if mibBuilder.loadTexts: zyAclV2ClassifierIpSourceIpAddress.setStatus('current') zyAclV2ClassifierIpSourceIpMaskBits = MibTableColumn((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 105, 1, 5, 1, 9), Integer32()).setMaxAccess("readonly") if mibBuilder.loadTexts: zyAclV2ClassifierIpSourceIpMaskBits.setStatus('current') zyAclV2ClassifierIpDestinationIpAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 105, 1, 5, 1, 10), IpAddress()).setMaxAccess("readonly") if mibBuilder.loadTexts: zyAclV2ClassifierIpDestinationIpAddress.setStatus('current') zyAclV2ClassifierIpDestinationIpMaskBits = MibTableColumn((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 105, 1, 5, 1, 11), Integer32()).setMaxAccess("readonly") if mibBuilder.loadTexts: zyAclV2ClassifierIpDestinationIpMaskBits.setStatus('current') zyAclV2ClassifierIpSourceSocketRangeStart = MibTableColumn((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 105, 1, 5, 1, 12), Integer32()).setMaxAccess("readonly") if mibBuilder.loadTexts: zyAclV2ClassifierIpSourceSocketRangeStart.setStatus('current') zyAclV2ClassifierIpSourceSocketRangeEnd = MibTableColumn((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 105, 1, 5, 1, 13), Integer32()).setMaxAccess("readonly") if mibBuilder.loadTexts: zyAclV2ClassifierIpSourceSocketRangeEnd.setStatus('current') zyAclV2ClassifierIpDestinationSocketRangeStart = MibTableColumn((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 105, 1, 5, 1, 14), Integer32()).setMaxAccess("readonly") if mibBuilder.loadTexts: zyAclV2ClassifierIpDestinationSocketRangeStart.setStatus('current') zyAclV2ClassifierIpDestinationSocketRangeEnd = MibTableColumn((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 105, 1, 5, 1, 15), Integer32()).setMaxAccess("readonly") if mibBuilder.loadTexts: zyAclV2ClassifierIpDestinationSocketRangeEnd.setStatus('current') zyxelAclV2ClassifierIpv6Table = MibTable((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 105, 1, 6), ) if mibBuilder.loadTexts: zyxelAclV2ClassifierIpv6Table.setStatus('current') zyxelAclV2ClassifierIpv6Entry = MibTableRow((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 105, 1, 6, 1), ).setIndexNames((0, "ZYXEL-AclV2-MIB", "zyAclV2ClassifierName")) if mibBuilder.loadTexts: zyxelAclV2ClassifierIpv6Entry.setStatus('current') zyAclV2ClassifierIPv6DSCP = MibTableColumn((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 105, 1, 6, 1, 1), Integer32()).setMaxAccess("readonly") if mibBuilder.loadTexts: zyAclV2ClassifierIPv6DSCP.setStatus('current') zyAclV2ClassifierIPv6NextHeader = MibTableColumn((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 105, 1, 6, 1, 2), Integer32()).setMaxAccess("readonly") if mibBuilder.loadTexts: zyAclV2ClassifierIPv6NextHeader.setStatus('current') zyAclV2ClassifierIPv6EstablishOnly = MibTableColumn((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 105, 1, 6, 1, 3), EnabledStatus()).setMaxAccess("readonly") if mibBuilder.loadTexts: zyAclV2ClassifierIPv6EstablishOnly.setStatus('current') zyAclV2ClassifierIPv6SourceIpAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 105, 1, 6, 1, 4), InetAddress()).setMaxAccess("readonly") if mibBuilder.loadTexts: zyAclV2ClassifierIPv6SourceIpAddress.setStatus('current') zyAclV2ClassifierIPv6SourceIpPrefixLength = MibTableColumn((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 105, 1, 6, 1, 5), Integer32()).setMaxAccess("readonly") if mibBuilder.loadTexts: zyAclV2ClassifierIPv6SourceIpPrefixLength.setStatus('current') zyAclV2ClassifierIPv6DestinationIpAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 105, 1, 6, 1, 6), InetAddress()).setMaxAccess("readonly") if mibBuilder.loadTexts: zyAclV2ClassifierIPv6DestinationIpAddress.setStatus('current') zyAclV2ClassifierIPv6DestinationIpPrefixLength = MibTableColumn((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 105, 1, 6, 1, 7), Integer32()).setMaxAccess("readonly") if mibBuilder.loadTexts: zyAclV2ClassifierIPv6DestinationIpPrefixLength.setStatus('current') zyxelAclV2ClassifierMatchOrder = MibScalar((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 105, 1, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("auto", 1), ("manual", 2)))).setMaxAccess("readonly") if mibBuilder.loadTexts: zyxelAclV2ClassifierMatchOrder.setStatus('current') zyxelAclV2ClassifierLoggingState = MibScalar((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 105, 1, 8), EnabledStatus()).setMaxAccess("readonly") if mibBuilder.loadTexts: zyxelAclV2ClassifierLoggingState.setStatus('current') zyxelAclV2ClassifierLoggingInterval = MibScalar((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 105, 1, 9), Integer32()).setMaxAccess("readonly") if mibBuilder.loadTexts: zyxelAclV2ClassifierLoggingInterval.setStatus('current') zyxelAclV2PolicyTable = MibTable((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 105, 2, 1), ) if mibBuilder.loadTexts: zyxelAclV2PolicyTable.setStatus('current') zyxelAclV2PolicyEntry = MibTableRow((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 105, 2, 1, 1), ).setIndexNames((0, "ZYXEL-AclV2-MIB", "zyAclV2PolicyName")) if mibBuilder.loadTexts: zyxelAclV2PolicyEntry.setStatus('current') zyAclV2PolicyName = MibTableColumn((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 105, 2, 1, 1, 1), DisplayString()) if mibBuilder.loadTexts: zyAclV2PolicyName.setStatus('current') zyAclV2PolicyState = MibTableColumn((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 105, 2, 1, 1, 2), EnabledStatus()).setMaxAccess("readonly") if mibBuilder.loadTexts: zyAclV2PolicyState.setStatus('current') zyAclV2PolicyClassifier = MibTableColumn((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 105, 2, 1, 1, 3), DisplayString()).setMaxAccess("readonly") if mibBuilder.loadTexts: zyAclV2PolicyClassifier.setStatus('current') zyAclV2PolicyVid = MibTableColumn((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 105, 2, 1, 1, 4), Integer32()).setMaxAccess("readonly") if mibBuilder.loadTexts: zyAclV2PolicyVid.setStatus('current') zyAclV2PolicyEgressPort = MibTableColumn((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 105, 2, 1, 1, 5), Integer32()).setMaxAccess("readonly") if mibBuilder.loadTexts: zyAclV2PolicyEgressPort.setStatus('current') zyAclV2Policy8021pPriority = MibTableColumn((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 105, 2, 1, 1, 6), Integer32()).setMaxAccess("readonly") if mibBuilder.loadTexts: zyAclV2Policy8021pPriority.setStatus('current') zyAclV2PolicyDSCP = MibTableColumn((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 105, 2, 1, 1, 7), Integer32()).setMaxAccess("readonly") if mibBuilder.loadTexts: zyAclV2PolicyDSCP.setStatus('current') zyAclV2PolicyTOS = MibTableColumn((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 105, 2, 1, 1, 8), Integer32()).setMaxAccess("readonly") if mibBuilder.loadTexts: zyAclV2PolicyTOS.setStatus('current') zyAclV2PolicyBandwidth = MibTableColumn((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 105, 2, 1, 1, 9), Integer32()).setMaxAccess("readonly") if mibBuilder.loadTexts: zyAclV2PolicyBandwidth.setStatus('current') zyAclV2PolicyOutOfProfileDSCP = MibTableColumn((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 105, 2, 1, 1, 10), Integer32()).setMaxAccess("readonly") if mibBuilder.loadTexts: zyAclV2PolicyOutOfProfileDSCP.setStatus('current') zyAclV2PolicyForwardingAction = MibTableColumn((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 105, 2, 1, 1, 11), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("noChange", 1), ("discardThePacket", 2), ("doNotDropTheMatchingFramePreviouslyMarkedForDropping", 3)))).setMaxAccess("readonly") if mibBuilder.loadTexts: zyAclV2PolicyForwardingAction.setStatus('current') zyAclV2PolicyPriorityAction = MibTableColumn((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 105, 2, 1, 1, 12), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("noChange", 1), ("setThePackets802dot1Priority", 2), ("sendThePacketToPriorityQueue", 3), ("replaceThe802dot1PriorityFieldWithTheIpTosValue", 4), ("replaceThe802dot1PriorityByInner802dot1Priority", 5)))).setMaxAccess("readonly") if mibBuilder.loadTexts: zyAclV2PolicyPriorityAction.setStatus('current') zyAclV2PolicyDiffServAction = MibTableColumn((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 105, 2, 1, 1, 13), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("noChange", 1), ("setThePacketsTosField", 2), ("replaceTheIpTosFieldWithThe802dot1PriorityValue", 3), ("setTheDiffservCodepointFieldInTheFrame", 4)))).setMaxAccess("readonly") if mibBuilder.loadTexts: zyAclV2PolicyDiffServAction.setStatus('current') zyAclV2PolicyOutgoingAction = MibTableColumn((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 105, 2, 1, 1, 14), Bits().clone(namedValues=NamedValues(("sendThePacketToTheMirrorPort", 0), ("sendThePacketToTheEgressPort", 1), ("sendTheMatchingFramesToTheEgressPort", 2), ("setThePacketVlanId", 3)))).setMaxAccess("readonly") if mibBuilder.loadTexts: zyAclV2PolicyOutgoingAction.setStatus('current') zyAclV2PolicyMeteringState = MibTableColumn((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 105, 2, 1, 1, 15), Integer32()).setMaxAccess("readonly") if mibBuilder.loadTexts: zyAclV2PolicyMeteringState.setStatus('current') zyAclV2PolicyOutOfProfileAction = MibTableColumn((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 105, 2, 1, 1, 16), Bits().clone(namedValues=NamedValues(("dropThePacket", 0), ("changeTheDscpValue", 1), ("setOutDropPrecedence", 2), ("doNotDropTheMatchingFramePreviouslyMarkedForDropping", 3)))).setMaxAccess("readonly") if mibBuilder.loadTexts: zyAclV2PolicyOutOfProfileAction.setStatus('current') zyAclV2PolicyRowstatus = MibTableColumn((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 105, 2, 1, 1, 17), RowStatus()).setMaxAccess("readonly") if mibBuilder.loadTexts: zyAclV2PolicyRowstatus.setStatus('current') zyAclV2PolicyQueueAction = MibTableColumn((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 105, 2, 1, 1, 18), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("noChange", 1), ("sendThePacketToPriorityQueue", 2)))).setMaxAccess("readonly") if mibBuilder.loadTexts: zyAclV2PolicyQueueAction.setStatus('current') zyAclV2TrapClassifierLogMatchCount = MibScalar((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 105, 3, 1), Integer32()) if mibBuilder.loadTexts: zyAclV2TrapClassifierLogMatchCount.setStatus('current') zyAclV2ClassifierLogNotification = NotificationType((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 105, 4, 1)).setObjects(("ZYXEL-AclV2-MIB", "zyAclV2ClassifierName"), ("ZYXEL-AclV2-MIB", "zyAclV2TrapClassifierLogMatchCount")) if mibBuilder.loadTexts: zyAclV2ClassifierLogNotification.setStatus('current') mibBuilder.exportSymbols("ZYXEL-AclV2-MIB", zyAclV2ClassifierInnerVlanMap1k=zyAclV2ClassifierInnerVlanMap1k, zyAclV2ClassifierIPv6DSCP=zyAclV2ClassifierIPv6DSCP, zyAclV2ClassifierEthernetInner8021pPriority=zyAclV2ClassifierEthernetInner8021pPriority, zyAclV2ClassifierInnerVlanMap4k=zyAclV2ClassifierInnerVlanMap4k, zyAclV2ClassifierEthernetPacketFormat=zyAclV2ClassifierEthernetPacketFormat, zyAclV2ClassifierVlanMap2k=zyAclV2ClassifierVlanMap2k, zyxelAclV2PolicyStatus=zyxelAclV2PolicyStatus, zyAclV2PolicyClassifier=zyAclV2PolicyClassifier, zyxelAclV2ClassifierInnerVlanTable=zyxelAclV2ClassifierInnerVlanTable, zyAclV2ClassifierIpEstablishOnly=zyAclV2ClassifierIpEstablishOnly, zyAclV2ClassifierEthernetType=zyAclV2ClassifierEthernetType, zyAclV2ClassifierEthernetSourceMacAddress=zyAclV2ClassifierEthernetSourceMacAddress, zyAclV2ClassifierIpSourceIpMaskBits=zyAclV2ClassifierIpSourceIpMaskBits, zyAclV2ClassifierEthernetDestinationMacAddress=zyAclV2ClassifierEthernetDestinationMacAddress, zyAclV2PolicyOutOfProfileDSCP=zyAclV2PolicyOutOfProfileDSCP, zyAclV2ClassifierIpDestinationSocketRangeEnd=zyAclV2ClassifierIpDestinationSocketRangeEnd, zyAclV2PolicyEgressPort=zyAclV2PolicyEgressPort, zyAclV2PolicyRowstatus=zyAclV2PolicyRowstatus, zyAclV2ClassifierEthernetSourceTrunks=zyAclV2ClassifierEthernetSourceTrunks, zyxelAclV2ClassifierInnerVlanEntry=zyxelAclV2ClassifierInnerVlanEntry, zyAclV2ClassifierLogNotification=zyAclV2ClassifierLogNotification, zyAclV2PolicyOutgoingAction=zyAclV2PolicyOutgoingAction, zyAclV2ClassifierIpDestinationIpAddress=zyAclV2ClassifierIpDestinationIpAddress, zyAclV2PolicyMeteringState=zyAclV2PolicyMeteringState, zyAclV2ClassifierInnerVlanMap2k=zyAclV2ClassifierInnerVlanMap2k, zyAclV2ClassifierIpPrecedence=zyAclV2ClassifierIpPrecedence, zyAclV2PolicyVid=zyAclV2PolicyVid, zyxelAclV2ClassifierEntry=zyxelAclV2ClassifierEntry, zyAclV2ClassifierIpDestinationIpMaskBits=zyAclV2ClassifierIpDestinationIpMaskBits, zyxelAclV2Notifications=zyxelAclV2Notifications, zyxelAclV2PolicyTable=zyxelAclV2PolicyTable, zyxelAclV2ClassifierMatchOrder=zyxelAclV2ClassifierMatchOrder, zyAclV2ClassifierIpDSCP=zyAclV2ClassifierIpDSCP, zyAclV2ClassifierWeight=zyAclV2ClassifierWeight, zyAclV2ClassifierMatchCount=zyAclV2ClassifierMatchCount, zyAclV2PolicyPriorityAction=zyAclV2PolicyPriorityAction, zyAclV2TrapClassifierLogMatchCount=zyAclV2TrapClassifierLogMatchCount, zyxelAclV2ClassifierEthernetEntry=zyxelAclV2ClassifierEthernetEntry, zyAclV2ClassifierIpPacketLenRangeStart=zyAclV2ClassifierIpPacketLenRangeStart, zyAclV2ClassifierEthernetSourceMACMask=zyAclV2ClassifierEthernetSourceMACMask, zyAclV2ClassifierEthernetDestinationMACMask=zyAclV2ClassifierEthernetDestinationMACMask, zyAclV2ClassifierVlanMap3k=zyAclV2ClassifierVlanMap3k, zyAclV2ClassifierTimeRange=zyAclV2ClassifierTimeRange, zyxelAclV2ClassifierIpv6Entry=zyxelAclV2ClassifierIpv6Entry, zyAclV2ClassifierIPv6EstablishOnly=zyAclV2ClassifierIPv6EstablishOnly, zyAclV2ClassifierIPv6DestinationIpPrefixLength=zyAclV2ClassifierIPv6DestinationIpPrefixLength, zyxelAclV2ClassifierIpEntry=zyxelAclV2ClassifierIpEntry, zyAclV2ClassifierIpToS=zyAclV2ClassifierIpToS, zyAclV2ClassifierEthernetSourcePorts=zyAclV2ClassifierEthernetSourcePorts, zyAclV2PolicyQueueAction=zyAclV2PolicyQueueAction, zyAclV2ClassifierIPv6NextHeader=zyAclV2ClassifierIPv6NextHeader, zyAclV2ClassifierVlanMap4k=zyAclV2ClassifierVlanMap4k, zyAclV2ClassifierEthernet8021pPriority=zyAclV2ClassifierEthernet8021pPriority, zyxelAclV2TrapInfoObjects=zyxelAclV2TrapInfoObjects, zyxelAclV2ClassifierIpTable=zyxelAclV2ClassifierIpTable, zyAclV2ClassifierIPv6SourceIpAddress=zyAclV2ClassifierIPv6SourceIpAddress, zyxelAclV2ClassifierLoggingState=zyxelAclV2ClassifierLoggingState, zyxelAclV2=zyxelAclV2, zyxelAclV2ClassifierIpv6Table=zyxelAclV2ClassifierIpv6Table, zyAclV2PolicyDiffServAction=zyAclV2PolicyDiffServAction, zyAclV2ClassifierIpDestinationSocketRangeStart=zyAclV2ClassifierIpDestinationSocketRangeStart, zyAclV2ClassifierVlanMap1k=zyAclV2ClassifierVlanMap1k, zyAclV2PolicyDSCP=zyAclV2PolicyDSCP, zyxelAclV2ClassifierEthernetTable=zyxelAclV2ClassifierEthernetTable, zyAclV2ClassifierLogState=zyAclV2ClassifierLogState, zyAclV2ClassifierInnerVlanMap3k=zyAclV2ClassifierInnerVlanMap3k, zyAclV2ClassifierIPv6SourceIpPrefixLength=zyAclV2ClassifierIPv6SourceIpPrefixLength, zyAclV2PolicyBandwidth=zyAclV2PolicyBandwidth, zyxelAclV2ClassifierLoggingInterval=zyxelAclV2ClassifierLoggingInterval, zyAclV2Policy8021pPriority=zyAclV2Policy8021pPriority, zyAclV2PolicyForwardingAction=zyAclV2PolicyForwardingAction, zyAclV2PolicyName=zyAclV2PolicyName, PYSNMP_MODULE_ID=zyxelAclV2, zyAclV2ClassifierName=zyAclV2ClassifierName, zyAclV2ClassifierIPv6DestinationIpAddress=zyAclV2ClassifierIPv6DestinationIpAddress, zyAclV2ClassifierState=zyAclV2ClassifierState, zyxelAclV2ClassifierVlanEntry=zyxelAclV2ClassifierVlanEntry, zyAclV2PolicyState=zyAclV2PolicyState, zyAclV2ClassifierIpSourceIpAddress=zyAclV2ClassifierIpSourceIpAddress, zyxelAclV2ClassifierTable=zyxelAclV2ClassifierTable, zyxelAclV2ClassifierStatus=zyxelAclV2ClassifierStatus, zyAclV2ClassifierIpSourceSocketRangeEnd=zyAclV2ClassifierIpSourceSocketRangeEnd, zyAclV2PolicyTOS=zyAclV2PolicyTOS, zyAclV2ClassifierIpPacketLenRangeEnd=zyAclV2ClassifierIpPacketLenRangeEnd, zyxelAclV2PolicyEntry=zyxelAclV2PolicyEntry, zyAclV2ClassifierIpProtocol=zyAclV2ClassifierIpProtocol, zyxelAclV2ClassifierVlanTable=zyxelAclV2ClassifierVlanTable, zyAclV2PolicyOutOfProfileAction=zyAclV2PolicyOutOfProfileAction, zyAclV2ClassifierIpSourceSocketRangeStart=zyAclV2ClassifierIpSourceSocketRangeStart, zyAclV2ClassifierCountState=zyAclV2ClassifierCountState)
144.324742
5,641
0.792314
0
0
0
0
0
0
0
0
3,564
0.12729
c76c70c2e310ab6dd7d23270c230a7b48cbff5cf
729
py
Python
temperature.py
rhwlr/TEST_PRELIM_SKILLS_EXAM
a776ab7631fac8bed1aea0470918e6250752ce8e
[ "MIT" ]
null
null
null
temperature.py
rhwlr/TEST_PRELIM_SKILLS_EXAM
a776ab7631fac8bed1aea0470918e6250752ce8e
[ "MIT" ]
null
null
null
temperature.py
rhwlr/TEST_PRELIM_SKILLS_EXAM
a776ab7631fac8bed1aea0470918e6250752ce8e
[ "MIT" ]
null
null
null
class Temperature: def __init__(self, kelvin=None, celsius=None, fahrenheit=None): values = [x for x in [kelvin, celsius, fahrenheit] if x] if len(values) < 1: raise ValueError('Need argument') if len(values) > 1: raise ValueError('Only one argument') if celsius is not None: self.kelvin = celsius + 273.15 elif fahrenheit is not None: self.kelvin = (fahrenheit - 32) * 5 / 9 + 273.15 else: self.kelvin = kelvin if self.kelvin < 0: raise ValueError('Temperature in Kelvin cannot be negative') def __str__(self): return f'Temperature = {self.kelvin} Kelvins'
29.16
72
0.562414
706
0.96845
0
0
0
0
0
0
114
0.156379
c76ca1375282328ef3e6038f93b1edf1d46d7f49
1,728
py
Python
af/shovel/test_canning.py
mimi89999/pipeline
3e9eaf74c0966df907a230fbe89407c2bbc3d930
[ "BSD-3-Clause" ]
null
null
null
af/shovel/test_canning.py
mimi89999/pipeline
3e9eaf74c0966df907a230fbe89407c2bbc3d930
[ "BSD-3-Clause" ]
null
null
null
af/shovel/test_canning.py
mimi89999/pipeline
3e9eaf74c0966df907a230fbe89407c2bbc3d930
[ "BSD-3-Clause" ]
null
null
null
#!/usr/bin/env python2.7 import unittest import canning class TestNop(unittest.TestCase): def test_nop(self): canning.NopTeeFd.write("asdf") class TestSlice(unittest.TestCase): REPORT = "20130505T065614Z-VN-AS24173-dns_consistency-no_report_id-0.1.0-probe.yaml" @staticmethod def rpt(year): assert year < 10000 return "{:04d}1231T065614Z-VN-AS24173-dns_consistency-no_report_id-0.1.0-probe.yaml".format( year ) def test_empty(self): asis, tarfiles = canning.pack_bucket(tuple()) self.assertFalse(asis) self.assertFalse(tarfiles) def test_badname(self): self.assertRaises(RuntimeError, canning.pack_bucket, [("foo", 42)]) self.assertRaises( RuntimeError, canning.pack_bucket, [("2013-05-05/" + self.REPORT, 42)] ) def test_single(self): for sz in [0, 1, 65 * 1048576]: asis, tarfiles = canning.pack_bucket([(self.REPORT, sz)]) self.assertEqual(asis, [self.REPORT]) self.assertFalse(tarfiles) def test_packing(self): asis, tarfiles = canning.pack_bucket( [(self.rpt(0), 42), (self.rpt(1), 64), (self.rpt(2), 64 * 1048576)] ) self.assertEqual(asis, [self.rpt(2)]) self.assertEqual(tarfiles, {"dns_consistency.0.tar": map(self.rpt, (0, 1))}) def test_stupid(self): # FIXME: is it really good behaviour?... asis, tarfiles = canning.pack_bucket( [(self.rpt(0), 42), (self.rpt(1), 64 * 1048576 - 1), (self.rpt(2), 64)] ) self.assertEqual(asis, map(self.rpt, (0, 1, 2))) self.assertEqual(tarfiles, {}) if __name__ == "__main__": unittest.main()
30.315789
100
0.609375
1,615
0.934606
0
0
188
0.108796
0
0
273
0.157986
c76d58f2d02929d5eb4690fddd86e4d2f3a6dc3d
244
py
Python
Exercicios/ex061.py
jlsmirandela/Curso_Python
2419b68d335a2a42beb3e98fb93552aca1264cae
[ "MIT" ]
null
null
null
Exercicios/ex061.py
jlsmirandela/Curso_Python
2419b68d335a2a42beb3e98fb93552aca1264cae
[ "MIT" ]
null
null
null
Exercicios/ex061.py
jlsmirandela/Curso_Python
2419b68d335a2a42beb3e98fb93552aca1264cae
[ "MIT" ]
null
null
null
print('-+-' *10) print(' GERADOR DE PA') print('+-+' * 10) c = 1 ter = int(input('Insira o primeiro termo - ')) rz = int(input('Insira a razão - ')) while c <= 10: print(ter, ' → ', end=' ') ter += rz c += 1 print('FIM')
14.352941
46
0.487705
0
0
0
0
0
0
0
0
95
0.384615
c76e7fcaeb2193c977b2c4ee81febf00b7763cee
2,175
py
Python
gpytorch/models/approximate_gp.py
phumm/gpytorch
4e8042bcecda049956f8f9e823d82ba6340766d5
[ "MIT" ]
1
2019-09-30T06:51:03.000Z
2019-09-30T06:51:03.000Z
gpytorch/models/approximate_gp.py
phumm/gpytorch
4e8042bcecda049956f8f9e823d82ba6340766d5
[ "MIT" ]
null
null
null
gpytorch/models/approximate_gp.py
phumm/gpytorch
4e8042bcecda049956f8f9e823d82ba6340766d5
[ "MIT" ]
1
2020-09-16T16:35:27.000Z
2020-09-16T16:35:27.000Z
#!/usr/bin/env python3 from .gp import GP from .pyro import _PyroMixin # This will only contain functions if Pyro is installed class ApproximateGP(GP, _PyroMixin): def __init__(self, variational_strategy): super().__init__() self.variational_strategy = variational_strategy def forward(self, x): """ As in the exact GP setting, the user-defined forward method should return the GP prior mean and covariance evaluated at input locations x. """ raise NotImplementedError def pyro_guide(self, input, beta=1.0, name_prefix=""): """ (For Pyro integration only). The component of a `pyro.guide` that corresponds to drawing samples from the latent GP function. Args: :attr:`input` (:obj:`torch.Tensor`) The inputs :math:`\mathbf X`. :attr:`beta` (float, default=1.) How much to scale the :math:`\text{KL} [ q(\mathbf f) \Vert p(\mathbf f) ]` term by. :attr:`name_prefix` (str, default="") A name prefix to prepend to pyro sample sites. """ return super().pyro_guide(input, beta=beta, name_prefix=name_prefix) def pyro_model(self, input, beta=1.0, name_prefix=""): r""" (For Pyro integration only). The component of a `pyro.model` that corresponds to drawing samples from the latent GP function. Args: :attr:`input` (:obj:`torch.Tensor`) The inputs :math:`\mathbf X`. :attr:`beta` (float, default=1.) How much to scale the :math:`\text{KL} [ q(\mathbf f) \Vert p(\mathbf f) ]` term by. :attr:`name_prefix` (str, default="") A name prefix to prepend to pyro sample sites. Returns: :obj:`torch.Tensor` samples from :math:`q(\mathbf f)` """ return super().pyro_model(input, beta=beta, name_prefix=name_prefix) def __call__(self, inputs, prior=False, **kwargs): if inputs.dim() == 1: inputs = inputs.unsqueeze(-1) return self.variational_strategy(inputs, prior=prior)
38.157895
114
0.593563
2,043
0.93931
0
0
0
0
0
0
1,406
0.646437
c76ec369645b0f101be129ffedbb1f290be5f94b
510
py
Python
tests/test_ping.py
d-wysocki/flask-resty
2a5e7d7ea7e2130dce44b8f50625df72ad0dcd19
[ "MIT" ]
86
2015-11-25T07:09:10.000Z
2022-02-15T19:40:30.000Z
tests/test_ping.py
d-wysocki/flask-resty
2a5e7d7ea7e2130dce44b8f50625df72ad0dcd19
[ "MIT" ]
180
2015-11-24T23:02:53.000Z
2022-03-31T04:05:38.000Z
tests/test_ping.py
d-wysocki/flask-resty
2a5e7d7ea7e2130dce44b8f50625df72ad0dcd19
[ "MIT" ]
17
2015-12-28T11:05:47.000Z
2022-03-15T12:10:02.000Z
import pytest from flask_resty import Api from flask_resty.testing import assert_response # ----------------------------------------------------------------------------- @pytest.fixture(autouse=True) def routes(app): api = Api(app, "/api") api.add_ping("/ping") # ----------------------------------------------------------------------------- def test_ping(base_client): response = base_client.get("/ping") assert_response(response, 200) assert response.get_data(as_text=True) == ""
23.181818
79
0.490196
0
0
0
0
99
0.194118
0
0
180
0.352941
c76f8dffc967eba49049f65ff4df98887b137c0d
1,476
py
Python
tests/test_vetters.py
pllim/exovetter
75c6ca609331c04a55c0a6b4c858be71a4dfdfea
[ "MIT", "BSD-3-Clause" ]
null
null
null
tests/test_vetters.py
pllim/exovetter
75c6ca609331c04a55c0a6b4c858be71a4dfdfea
[ "MIT", "BSD-3-Clause" ]
null
null
null
tests/test_vetters.py
pllim/exovetter
75c6ca609331c04a55c0a6b4c858be71a4dfdfea
[ "MIT", "BSD-3-Clause" ]
null
null
null
from numpy.testing import assert_allclose from astropy.io import ascii from astropy import units as u import lightkurve as lk from exovetter import const as exo_const from exovetter import vetters from exovetter.tce import Tce from astropy.utils.data import get_pkg_data_filename def get_wasp18_tce(): tce = Tce(period=0.94124 * u.day, epoch=58374.669883 * u.day, epoch_offset=-2400000.5 * u.day, depth=0.00990112 * exo_const.frac_amp, duration=0.08932 * u.day, event_name='WASP-18 b', target_name='WASP-18', snr=50) return tce def get_wasp18_lightcurve(): lc_file = get_pkg_data_filename("data/wasp18b_flat_lightcurve.csv") lc_table = ascii.read(lc_file, data_start=1) lc = lk.LightCurve(time=lc_table['col2'], flux=lc_table['col3'], flux_err=lc_table['col4'], time_format="btjd") return lc def test_vetters(): tce = get_wasp18_tce() lc = get_wasp18_lightcurve() metrics = dict() vetter_list = [vetters.Lpp(), vetters.OddEven(), vetters.TransitPhaseCoverage()] for v in vetter_list: vetter = v _ = vetter.run(tce, lc) metrics.update(vetter.__dict__) assert_allclose(metrics['norm_lpp'], 7.93119, rtol=1e-3) assert_allclose(metrics['tp_cover'], 1.0, rtol=1e-5) assert_allclose(metrics['odd_depth'][0], 0.99, rtol=1e-1)
25.894737
71
0.638889
0
0
0
0
0
0
0
0
109
0.073848
c770f106a56c64793bd9f4e329f2b5bb1fbfddef
4,270
py
Python
pyqtgraph/dockarea/DockDrop.py
hishizuka/pyqtgraph
4820625d93ffb41f324431d0d29b395cf91f339e
[ "MIT" ]
2,762
2015-01-02T14:34:10.000Z
2022-03-30T14:06:07.000Z
pyqtgraph/dockarea/DockDrop.py
hishizuka/pyqtgraph
4820625d93ffb41f324431d0d29b395cf91f339e
[ "MIT" ]
1,901
2015-01-12T03:20:30.000Z
2022-03-31T16:33:36.000Z
pyqtgraph/dockarea/DockDrop.py
hishizuka/pyqtgraph
4820625d93ffb41f324431d0d29b395cf91f339e
[ "MIT" ]
1,038
2015-01-01T04:05:49.000Z
2022-03-31T11:57:51.000Z
# -*- coding: utf-8 -*- from ..Qt import QtCore, QtGui class DockDrop(object): """Provides dock-dropping methods""" def __init__(self, allowedAreas=None): object.__init__(self) if allowedAreas is None: allowedAreas = ['center', 'right', 'left', 'top', 'bottom'] self.allowedAreas = set(allowedAreas) self.setAcceptDrops(True) self.dropArea = None self.overlay = DropAreaOverlay(self) self.overlay.raise_() def resizeOverlay(self, size): self.overlay.resize(size) def raiseOverlay(self): self.overlay.raise_() def dragEnterEvent(self, ev): src = ev.source() if hasattr(src, 'implements') and src.implements('dock'): #print "drag enter accept" ev.accept() else: #print "drag enter ignore" ev.ignore() def dragMoveEvent(self, ev): #print "drag move" # QDragMoveEvent inherits QDropEvent which provides posF() # PyQt6 provides only position() posF = ev.posF() if hasattr(ev, 'posF') else ev.position() ld = posF.x() rd = self.width() - ld td = posF.y() bd = self.height() - td mn = min(ld, rd, td, bd) if mn > 30: self.dropArea = "center" elif (ld == mn or td == mn) and mn > self.height()/3.: self.dropArea = "center" elif (rd == mn or ld == mn) and mn > self.width()/3.: self.dropArea = "center" elif rd == mn: self.dropArea = "right" elif ld == mn: self.dropArea = "left" elif td == mn: self.dropArea = "top" elif bd == mn: self.dropArea = "bottom" if ev.source() is self and self.dropArea == 'center': #print " no self-center" self.dropArea = None ev.ignore() elif self.dropArea not in self.allowedAreas: #print " not allowed" self.dropArea = None ev.ignore() else: #print " ok" ev.accept() self.overlay.setDropArea(self.dropArea) def dragLeaveEvent(self, ev): self.dropArea = None self.overlay.setDropArea(self.dropArea) def dropEvent(self, ev): area = self.dropArea if area is None: return if area == 'center': area = 'above' self.area.moveDock(ev.source(), area, self) self.dropArea = None self.overlay.setDropArea(self.dropArea) class DropAreaOverlay(QtGui.QWidget): """Overlay widget that draws drop areas during a drag-drop operation""" def __init__(self, parent): QtGui.QWidget.__init__(self, parent) self.dropArea = None self.hide() self.setAttribute(QtCore.Qt.WidgetAttribute.WA_TransparentForMouseEvents) def setDropArea(self, area): self.dropArea = area if area is None: self.hide() else: ## Resize overlay to just the region where drop area should be displayed. ## This works around a Qt bug--can't display transparent widgets over QGLWidget prgn = self.parent().rect() rgn = QtCore.QRect(prgn) w = min(30, prgn.width()/3.) h = min(30, prgn.height()/3.) if self.dropArea == 'left': rgn.setWidth(w) elif self.dropArea == 'right': rgn.setLeft(rgn.left() + prgn.width() - w) elif self.dropArea == 'top': rgn.setHeight(h) elif self.dropArea == 'bottom': rgn.setTop(rgn.top() + prgn.height() - h) elif self.dropArea == 'center': rgn.adjust(w, h, -w, -h) self.setGeometry(rgn) self.show() self.update() def paintEvent(self, ev): if self.dropArea is None: return p = QtGui.QPainter(self) rgn = self.rect() p.setBrush(QtGui.QBrush(QtGui.QColor(100, 100, 255, 50))) p.setPen(QtGui.QPen(QtGui.QColor(50, 50, 150), 3)) p.drawRect(rgn)
32.348485
91
0.525527
4,201
0.983841
0
0
0
0
0
0
667
0.156206
c7723eb15222900f00b69a2e3a6fb1a9708b8d3e
871
py
Python
data/download.py
pyaf/google-ai-open-images-object-detection-track
3dd19aeeca5eea07de341ade59d1513fda4597ee
[ "MIT" ]
null
null
null
data/download.py
pyaf/google-ai-open-images-object-detection-track
3dd19aeeca5eea07de341ade59d1513fda4597ee
[ "MIT" ]
null
null
null
data/download.py
pyaf/google-ai-open-images-object-detection-track
3dd19aeeca5eea07de341ade59d1513fda4597ee
[ "MIT" ]
null
null
null
import os from subprocess import call files = ['000002b66c9c498e.jpg', '000002b97e5471a0.jpg', '000002c707c9895e.jpg', '0000048549557964.jpg', '000004f4400f6ec5.jpg', '0000071d71a0a6f6.jpg', '000013ba71c12506.jpg', '000018acd19b4ad3.jpg', '00001bc2c4027449.jpg', '00001bcc92282a38.jpg', '0000201cd362f303.jpg', '000020780ccee28d.jpg', '000023aa04ab09ed.jpg', '0000253ea4ecbf19.jpg', '000025ea48cab6fc.jpg', '0000271195f2c007.jpg', '0000286a5c6a3eb5.jpg', '00002b368e91b947.jpg', '00002f4ff380c64c.jpg', '0000313e5dccf13b.jpg', '000032046c3f8371.jpg', '00003223e04e2e66.jpg', '0000333f08ced1cd.jpg'] for file in files: if not os.path.exists('train/' + file + '.jpg'): spath = "gs://open-images-dataset/train/%s " % file call(["gsutil", "cp", spath, 'train/']) print(file, 'done', 'count:') else: print(file, 'already downloaded')
67
560
0.712974
0
0
0
0
0
0
0
0
610
0.700344
c773836d5d08ecba5ffb7e86e3b25bdc07e2351a
3,927
py
Python
cisco-ios-xr/ydk/models/cisco_ios_xr/SNMP_FRAMEWORK_MIB.py
bopopescu/ACI
dd717bc74739eeed4747b3ea9e36b239580df5e1
[ "ECL-2.0", "Apache-2.0" ]
null
null
null
cisco-ios-xr/ydk/models/cisco_ios_xr/SNMP_FRAMEWORK_MIB.py
bopopescu/ACI
dd717bc74739eeed4747b3ea9e36b239580df5e1
[ "ECL-2.0", "Apache-2.0" ]
null
null
null
cisco-ios-xr/ydk/models/cisco_ios_xr/SNMP_FRAMEWORK_MIB.py
bopopescu/ACI
dd717bc74739eeed4747b3ea9e36b239580df5e1
[ "ECL-2.0", "Apache-2.0" ]
1
2020-07-22T04:04:44.000Z
2020-07-22T04:04:44.000Z
""" SNMP_FRAMEWORK_MIB """ from collections import OrderedDict from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64 from ydk.filters import YFilter from ydk.errors import YError, YModelError from ydk.errors.error_handler import handle_type_error as _handle_type_error class SnmpSecurityLevel(Enum): """ SnmpSecurityLevel (Enum Class) .. data:: noAuthNoPriv = 1 .. data:: authNoPriv = 2 .. data:: authPriv = 3 """ noAuthNoPriv = Enum.YLeaf(1, "noAuthNoPriv") authNoPriv = Enum.YLeaf(2, "authNoPriv") authPriv = Enum.YLeaf(3, "authPriv") class SNMPFRAMEWORKMIB(Entity): """ .. attribute:: snmpengine **type**\: :py:class:`Snmpengine <ydk.models.cisco_ios_xr.SNMP_FRAMEWORK_MIB.SNMPFRAMEWORKMIB.Snmpengine>` """ _prefix = 'SNMP_FRAMEWORK_MIB' _revision = '2002-10-14' def __init__(self): super(SNMPFRAMEWORKMIB, self).__init__() self._top_entity = None self.yang_name = "SNMP-FRAMEWORK-MIB" self.yang_parent_name = "SNMP-FRAMEWORK-MIB" self.is_top_level_class = True self.has_list_ancestor = False self.ylist_key_names = [] self._child_container_classes = OrderedDict([("snmpEngine", ("snmpengine", SNMPFRAMEWORKMIB.Snmpengine))]) self._child_list_classes = OrderedDict([]) self._leafs = OrderedDict() self.snmpengine = SNMPFRAMEWORKMIB.Snmpengine() self.snmpengine.parent = self self._children_name_map["snmpengine"] = "snmpEngine" self._children_yang_names.add("snmpEngine") self._segment_path = lambda: "SNMP-FRAMEWORK-MIB:SNMP-FRAMEWORK-MIB" class Snmpengine(Entity): """ .. attribute:: snmpengineid **type**\: str **pattern:** (([0\-9a\-fA\-F]){2}(\:([0\-9a\-fA\-F]){2})\*)? .. attribute:: snmpengineboots **type**\: int **range:** 1..2147483647 .. attribute:: snmpenginetime **type**\: int **range:** 0..2147483647 .. attribute:: snmpenginemaxmessagesize **type**\: int **range:** 484..2147483647 """ _prefix = 'SNMP_FRAMEWORK_MIB' _revision = '2002-10-14' def __init__(self): super(SNMPFRAMEWORKMIB.Snmpengine, self).__init__() self.yang_name = "snmpEngine" self.yang_parent_name = "SNMP-FRAMEWORK-MIB" self.is_top_level_class = False self.has_list_ancestor = False self.ylist_key_names = [] self._child_container_classes = OrderedDict([]) self._child_list_classes = OrderedDict([]) self._leafs = OrderedDict([ ('snmpengineid', YLeaf(YType.str, 'snmpEngineID')), ('snmpengineboots', YLeaf(YType.int32, 'snmpEngineBoots')), ('snmpenginetime', YLeaf(YType.int32, 'snmpEngineTime')), ('snmpenginemaxmessagesize', YLeaf(YType.int32, 'snmpEngineMaxMessageSize')), ]) self.snmpengineid = None self.snmpengineboots = None self.snmpenginetime = None self.snmpenginemaxmessagesize = None self._segment_path = lambda: "snmpEngine" self._absolute_path = lambda: "SNMP-FRAMEWORK-MIB:SNMP-FRAMEWORK-MIB/%s" % self._segment_path() def __setattr__(self, name, value): self._perform_setattr(SNMPFRAMEWORKMIB.Snmpengine, ['snmpengineid', 'snmpengineboots', 'snmpenginetime', 'snmpenginemaxmessagesize'], name, value) def clone_ptr(self): self._top_entity = SNMPFRAMEWORKMIB() return self._top_entity
28.456522
158
0.595111
3,573
0.909855
0
0
0
0
0
0
1,518
0.386555
c773cb05d9fdb9aa7ea5543ac5440822be912b9e
2,941
py
Python
handlers/redirects.py
Bainky/Ventify
638486dc5f265a4907a5a193ea2a7c9b44e8e943
[ "MIT" ]
6
2021-03-11T11:43:17.000Z
2021-12-08T05:26:20.000Z
handlers/redirects.py
Bainky/Ventify
638486dc5f265a4907a5a193ea2a7c9b44e8e943
[ "MIT" ]
null
null
null
handlers/redirects.py
Bainky/Ventify
638486dc5f265a4907a5a193ea2a7c9b44e8e943
[ "MIT" ]
2
2021-03-24T05:31:12.000Z
2021-04-13T22:03:11.000Z
from aiogram.utils.markdown import hide_link from aiogram.types import CallbackQuery from loader import dp from utils import ( get_object, get_attributes_of_object ) from keyboards import ( anime_choose_safe_category, anime_sfw_categories, anime_nsfw_categories, animals_categories, menu_with_categories, control_buttons ) @dp.callback_query_handler(text="menu") async def call_menu_with_categories(call: CallbackQuery): """ Function for sending a menu, with a selection of safe content """ await call.answer() # Editing the message await call.message.edit_text( text=( "<b>🔗 Select a category to get a picture.</b>" ), reply_markup=menu_with_categories() ) @dp.callback_query_handler(text="anime") async def call_anime_categories(call: CallbackQuery): """ Redirect to select anime actions """ await call.answer() # Editing the message await call.message.edit_text( text=( "<b>⚜️ Choose what content you want to see.</b>" ), reply_markup=anime_choose_safe_category() ) @dp.callback_query_handler(text=["sfw", "nsfw"]) async def call_nsfw_categories(call: CallbackQuery): """ Redirect to anime content """ data = call.data.upper() message = call.message # Send answer await call.answer() if data == "SFW": kb = anime_sfw_categories() else: kb = anime_nsfw_categories() # Editing the message await message.edit_text( text=( f"<b>🍿 You are in the {data} category.</b>" ), reply_markup=kb ) @dp.callback_query_handler(text="animals") async def call_anime_categories(call: CallbackQuery): """ Redirect to animals content """ await call.answer() # Editing the message await call.message.edit_text( text=( "<b>🦄 You are in the category with animals.</b>" ), reply_markup=animals_categories() ) @dp.callback_query_handler() async def call_get_photography(call: CallbackQuery): """ Function for sending photos """ message = call.message data = call.data # Get json document api = get_attributes_of_object() if data == "generate_new": data = message.text.split("#")[1] obj = api[data]["object"] atr = api[data]["attribute"] mark = api[data]["entity"] if mark == "anime": mark = api[data]["safe"] if mark == "memes": mark = "menu" # We get a link to the preview photo link = await get_object(obj, atr) await call.answer() # Editing the message await message.edit_text( text=( f"{hide_link(link)} #{data}" ), reply_markup=control_buttons(mark) )
23.717742
61
0.598776
0
0
0
0
2,554
0.86459
2,347
0.794516
782
0.264726
c774024668ea75381f4aedf887a584aaa227cbf7
320
py
Python
1stRound/Medium/322-Coin Change/DP.py
ericchen12377/Leetcode-Algorithm-Python
eb58cd4f01d9b8006b7d1a725fc48910aad7f192
[ "MIT" ]
2
2020-04-24T18:36:52.000Z
2020-04-25T00:15:57.000Z
1stRound/Medium/322-Coin Change/DP.py
ericchen12377/Leetcode-Algorithm-Python
eb58cd4f01d9b8006b7d1a725fc48910aad7f192
[ "MIT" ]
null
null
null
1stRound/Medium/322-Coin Change/DP.py
ericchen12377/Leetcode-Algorithm-Python
eb58cd4f01d9b8006b7d1a725fc48910aad7f192
[ "MIT" ]
null
null
null
class Solution: def coinChange(self, coins: List[int], amount: int) -> int: M = float('inf') # dynamic programming dp = [0] + [M] * amount for i in range(1, amount+1): dp[i] = 1 + min([dp[i-c] for c in coins if i >= c] or [M]) return dp[-1] if dp[-1] < M else -1
32
70
0.496875
319
0.996875
0
0
0
0
0
0
26
0.08125
c77456702d5939c9da605c3d65de2f70c1b95b26
8,695
py
Python
segmentation_test/Scripts/medpy_graphcut_voxel.py
rominashirazi/SpineSegmentation
fb08122ac6d9a598b60aecb4f1a1a2a31fba96ab
[ "MIT" ]
null
null
null
segmentation_test/Scripts/medpy_graphcut_voxel.py
rominashirazi/SpineSegmentation
fb08122ac6d9a598b60aecb4f1a1a2a31fba96ab
[ "MIT" ]
null
null
null
segmentation_test/Scripts/medpy_graphcut_voxel.py
rominashirazi/SpineSegmentation
fb08122ac6d9a598b60aecb4f1a1a2a31fba96ab
[ "MIT" ]
null
null
null
#!c:\users\hooma\documents\github\spinesegmentation\segmentation_test\scripts\python.exe """ Execute a graph cut on a voxel image based on some foreground and background markers. Copyright (C) 2013 Oskar Maier This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. """ # build-in modules from argparse import RawTextHelpFormatter import argparse import logging import os # third-party modules import scipy # path changes # own modules from medpy.core import ArgumentError, Logger from medpy.io import load, save, header from medpy import graphcut from medpy.graphcut.wrapper import split_marker # information __author__ = "Oskar Maier" __version__ = "r0.3.1, 2012-03-23" __email__ = "[email protected]" __status__ = "Release" __description__ = """ Perform a binary graph cut using Boykov's max-flow/min-cut algorithm. This implementation does only compute a boundary term and does not use any regional term. The desired boundary term can be selected via the --boundary argument. Depending on the selected term, an additional image has to be supplied as badditional. In the case of the difference of means, it is the original image. Furthermore the algorithm requires a binary image with foreground markers and a binary image with background markers. Additionally a filename for the created binary mask marking foreground and background has to be supplied. Note that the input images must be of the same dimensionality, otherwise an exception is thrown. Note to take into account the input images orientation. Note that the quality of the resulting segmentations depends also on the quality of the supplied markers. Copyright (C) 2013 Oskar Maier This program comes with ABSOLUTELY NO WARRANTY; This is free software, and you are welcome to redistribute it under certain conditions; see the LICENSE file or <http://www.gnu.org/licenses/> for details. """ # code def main(): # parse cmd arguments parser = getParser() parser.parse_args() args = getArguments(parser) # prepare logger logger = Logger.getInstance() if args.debug: logger.setLevel(logging.DEBUG) elif args.verbose: logger.setLevel(logging.INFO) # check if output image exists if not args.force: if os.path.exists(args.output): logger.warning('The output image {} already exists. Exiting.'.format(args.output)) exit(-1) # select boundary term ['diff_linear', 'diff_exp', 'diff_div', 'diff_pow', 'max_linear', 'max_exp', 'max_div', 'max_pow'] if 'diff_linear' == args.boundary: boundary_term = graphcut.energy_voxel.boundary_difference_linear logger.info('Selected boundary term: linear difference of intensities') elif 'diff_exp' == args.boundary: boundary_term = graphcut.energy_voxel.boundary_difference_exponential logger.info('Selected boundary term: exponential difference of intensities') elif 'diff_div' == args.boundary: boundary_term = graphcut.energy_voxel.boundary_difference_division logger.info('Selected boundary term: divided difference of intensities') elif 'diff_pow' == args.boundary: boundary_term = graphcut.energy_voxel.boundary_difference_power logger.info('Selected boundary term: power based / raised difference of intensities') elif 'max_linear' == args.boundary: boundary_term = graphcut.energy_voxel.boundary_maximum_linear logger.info('Selected boundary term: linear maximum of intensities') elif 'max_exp' == args.boundary: boundary_term = graphcut.energy_voxel.boundary_maximum_exponential logger.info('Selected boundary term: exponential maximum of intensities') elif 'max_div' == args.boundary: boundary_term = graphcut.energy_voxel.boundary_maximum_division logger.info('Selected boundary term: divided maximum of intensities') elif 'max_pow' == args.boundary: boundary_term = graphcut.energy_voxel.boundary_maximum_power logger.info('Selected boundary term: power based / raised maximum of intensities') # load input images badditional_image_data, reference_header = load(args.badditional) markers_image_data, _ = load(args.markers) # split marker image into fg and bg images fgmarkers_image_data, bgmarkers_image_data = split_marker(markers_image_data) # check if all images dimensions are the same if not (badditional_image_data.shape == fgmarkers_image_data.shape == bgmarkers_image_data.shape): logger.critical('Not all of the supplied images are of the same shape.') raise ArgumentError('Not all of the supplied images are of the same shape.') # extract spacing if required if args.spacing: spacing = header.get_pixel_spacing(reference_header) logger.info('Taking spacing of {} into account.'.format(spacing)) else: spacing = False # generate graph logger.info('Preparing BK_MFMC C++ graph...') gcgraph = graphcut.graph_from_voxels(fgmarkers_image_data, bgmarkers_image_data, boundary_term = boundary_term, boundary_term_args = (badditional_image_data, args.sigma, spacing)) # execute min-cut logger.info('Executing min-cut...') maxflow = gcgraph.maxflow() logger.debug('Maxflow is {}'.format(maxflow)) # reshape results to form a valid mask logger.info('Applying results...') result_image_data = scipy.zeros(bgmarkers_image_data.size, dtype=scipy.bool_) for idx in range(len(result_image_data)): result_image_data[idx] = 0 if gcgraph.termtype.SINK == gcgraph.what_segment(idx) else 1 result_image_data = result_image_data.reshape(bgmarkers_image_data.shape) # save resulting mask save(result_image_data.astype(scipy.bool_), args.output, reference_header, args.force) logger.info('Successfully terminated.') def getArguments(parser): "Provides additional validation of the arguments collected by argparse." return parser.parse_args() def getParser(): "Creates and returns the argparse parser object." parser = argparse.ArgumentParser(description=__description__, formatter_class=RawTextHelpFormatter) parser.add_argument('sigma', type=float, help='The sigma required for the boundary terms.') parser.add_argument('badditional', help='The additional image required by the boundary term. See there for details.') parser.add_argument('markers', help='Image containing the foreground (=1) and background (=2) markers.') parser.add_argument('output', help='The output image containing the segmentation.') parser.add_argument('--boundary', default='diff_exp', help='The boundary term to use. Note that the ones prefixed with diff_ require the original image, while the ones prefixed with max_ require the gradient image.', choices=['diff_linear', 'diff_exp', 'diff_div', 'diff_pow', 'max_linear', 'max_exp', 'max_div', 'max_pow']) parser.add_argument('-s', dest='spacing', action='store_true', help='Set this flag to take the pixel spacing of the image into account. The spacing data will be extracted from the baddtional image.') parser.add_argument('-f', dest='force', action='store_true', help='Set this flag to silently override files that exist.') parser.add_argument('-v', dest='verbose', action='store_true', help='Display more information.') parser.add_argument('-d', dest='debug', action='store_true', help='Display debug information.') return parser if __name__ == "__main__": main()
47.513661
328
0.692237
0
0
0
0
0
0
0
0
4,848
0.557562
c774862e87bf8aaea6f4bb5796d15dd56dc9ae0b
2,968
py
Python
_notes/book/conf.py
AstroMatt/astronaut-training-en
6250af8e10358016dcebee54bb9ad5bc40cfe4d1
[ "MIT" ]
1
2020-08-08T00:37:28.000Z
2020-08-08T00:37:28.000Z
_notes/book/conf.py
AstroMatt/astronaut-training-en
6250af8e10358016dcebee54bb9ad5bc40cfe4d1
[ "MIT" ]
null
null
null
_notes/book/conf.py
AstroMatt/astronaut-training-en
6250af8e10358016dcebee54bb9ad5bc40cfe4d1
[ "MIT" ]
null
null
null
author = 'Matt Harasymczuk' email = '[email protected]' project = 'Astronaut Training Program' description = 'Astronaut Training Program' extensions = [ 'sphinx.ext.todo', 'sphinx.ext.imgmath', ] todo_emit_warnings = False todo_include_todos = True exclude_patterns = [] # ----------------------------------------------------------------------------- # Standard book config # ----------------------------------------------------------------------------- import os import re import subprocess import sys from datetime import datetime needs_sphinx = '2.2' mathjax_path = 'https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.5/latest.js?config=TeX-MML-AM_CHTML' mathjax_config = { 'extensions': ['tex2jax.js'], 'jax': ['input/TeX', 'output/HTML-CSS'], } html_theme = 'sphinx_rtd_theme' exclude_patterns = exclude_patterns + [ '.*', 'venv*', 'virtualenv*', '_extensions', '_img', '_slides', '_static', '_themes', '_tmp', '*/_template.rst', '*/contrib/*', '*/solution/*', '*/solutions/*', '**.ipynb_checkpoints', 'README.rst', 'TODO.rst', ] numfig_format = { 'section': 'Sect. %s.', 'figure': 'Fig. %s.', 'table': 'Tab. %s.', 'code-block': 'Code Listing %s.', } language = 'en' source_directory = '.' master_doc = 'index' highlight_language = 'python3' pygments_style = 'borland' numfig = True templates_path = ['_templates'] source_suffix = ['.rst'] imgmath_image_format = 'svg' today_fmt = '%Y-%m-%d' project_slug = re.sub(r'[\W]+', '', project) sha1 = subprocess.Popen('git log -1 --format="%h"', stdout=subprocess.PIPE, shell=True).stdout.read().decode().replace('\n', '') now = datetime.now() year = now.year today = now.strftime('%Y-%m-%d') version = f'#{sha1}, {today}' release = f'#{sha1}, {today}' copyright = f'{year}, {author} <{email}>' extensions_dir = os.path.join(os.path.dirname(__file__), '', '_extensions') sys.path.append(extensions_dir) htmlhelp_basename = project html_theme_path = ['_themes'] html_static_path = ['_static'] html_favicon = '_static/favicon.png' html_sidebars = {'sidebar': ['localtoc.html', 'sourcelink.html', 'searchbox.html']} html_show_sphinx = False html_context = { 'css_files': [ '_static/theme-overrides.css', ], } latex_documents = [(master_doc, f'{project_slug}.tex', project, author, 'manual')] latex_elements = { 'papersize': 'a4paper', 'pointsize': '10pt', 'figure_align': 'htbp', # Fix for: LaTeX Backend Fails with Citations In Figure Captions 'preamble': r""" \usepackage{etoolbox} \AtBeginEnvironment{figure}{\renewcommand{\phantomsection}{}} """ } epub_title = project epub_author = author epub_publisher = author epub_copyright = copyright epub_exclude_files = ['search.html'] man_pages = [ (master_doc, project_slug, project, [author], 1) ] texinfo_documents = [ (master_doc, project_slug, project, author, project, '', 'Miscellaneous'), ]
24.130081
128
0.624326
0
0
0
0
0
0
0
0
1,379
0.464623
c774cc70f7362fd8daf5037ff3abf0db4ccef896
221
py
Python
tutorial_application/forms.py
yamasakih/django_rdkit_tutorial
9ac591963976da38cae962de2b98702bbb919cf4
[ "MIT" ]
2
2018-12-04T00:01:26.000Z
2021-03-25T08:28:06.000Z
tutorial_application/forms.py
yamasakih/django-rdkit-tutorial
9ac591963976da38cae962de2b98702bbb919cf4
[ "MIT" ]
null
null
null
tutorial_application/forms.py
yamasakih/django-rdkit-tutorial
9ac591963976da38cae962de2b98702bbb919cf4
[ "MIT" ]
null
null
null
from django_rdkit import models from django.forms.models import ModelForm from .models import Compound class SubstructureSearchForm(ModelForm): class Meta: model = Compound fields = ('molecule', )
18.416667
41
0.723982
113
0.511312
0
0
0
0
0
0
10
0.045249
c775a30ea8b55f2cd0df98a3a7cc00417a074bda
18,286
py
Python
data_structures/trees/tree.py
onyonkaclifford/data-structures-and-algorithms
e0ca4bfa878273d06bf22c303e47762b8ec3870b
[ "MIT" ]
null
null
null
data_structures/trees/tree.py
onyonkaclifford/data-structures-and-algorithms
e0ca4bfa878273d06bf22c303e47762b8ec3870b
[ "MIT" ]
null
null
null
data_structures/trees/tree.py
onyonkaclifford/data-structures-and-algorithms
e0ca4bfa878273d06bf22c303e47762b8ec3870b
[ "MIT" ]
null
null
null
from abc import ABC, abstractmethod from typing import Any, Generator, Iterable, List, Union class Empty(Exception): pass class Tree(ABC): """A tree is a hierarchical collection of nodes containing items, with each node having a unique parent and zero, one or many children items. The topmost element in a non-empty tree, the root, has no parent. Tree vocabularies include, but are not limited to: 1. Root - the topmost element in a non-empty tree, it has no parent 2. Leaf - a node with zero children 3. Siblings - nodes that share a parent node 4. Edge - a pair of nodes such the one is the parent of the other 5. Path - a collection of nodes such that any pair of adjacent nodes have a parent/child relationship 6. Height - number of edges between a node and it's furthest leaf 7. Depth - number of edges between a node and the root 8. Level - number of nodes in the path between a node and the root, inclusive of both the node itself and the root 9. Ordered tree - a tree with a meaningful organisation among its nodes such that its nodes can be arranged in a linear manner from first to last """ class _Node: def __init__(self, key, value, parent=None, children: Union[List, None] = None): self.key = key self.value = value self.parent = parent self.children = children if children is not None else [] class _Position: """A representation of the position of a node within a tree""" def __init__(self, belongs_to, node): self.__variables = {"belongs_to": belongs_to} self.__node = node def is_owned_by(self, owner): """Check whether position belongs to the tree, owner. Time complexity: O(1). :param owner: object to check whether it's the owner of this position :returns: True of the position is owned by the object passed, else False """ return owner is self.__variables["belongs_to"] def manipulate_variables(self, owner, method: str, *params): """Manipulate member variables of this position. Methods of the owner list are the only ones that can call this method. Time complexity: O(1). :param owner: tree object that owns this position :param method: method name of tree object that will manipulate the member variables of this position :param params: extra optional parameters to pass to the method :returns: the return value of the tree method whose name is passed """ if not self.is_owned_by(owner): raise ValueError("Position doesn't belong to the passed owner") return getattr(owner, method)(self.__variables, *params) def manipulate_node(self, owner, method: str, *params): """Manipulate the node held by this position. Methods of the owner list are the only ones that can call this method. Time complexity: O(1). :param owner: tree object that owns this position :param method: method name of tree object that will manipulate the node contained in this position :param params: extra optional parameters to pass to the method :returns: the return value of the tree method whose name is passed """ if not self.is_owned_by(owner): raise ValueError("Position doesn't belong to the passed owner") return getattr(owner, method)(self.__node, *params) def get_data(self): """Return the data stored by the node held by this position. Time complexity: O(1). :returns: data stored in node contained in this position """ return self.__node.key, self.__node.value def __init__(self): self._root: Union[Tree._Node, None] = None self._length = 0 self.__generator: Union[Generator, None] = None def __len__(self) -> int: """Return total number of items in tree :return: count of items in tree """ return self._length def __repr__(self) -> str: """Return a string representation of the tree :return: the string representation of the tree """ def helper(current_position): children = self.get_children(current_position) num_of_children = len(children) last_child_idx = num_of_children - 1 data_dict["string_data"] += f"{current_position.get_data()[0]}" for i, j in enumerate(children): data_dict["string_data"] += "(" if i == 0 else ", " helper(j) data_dict["string_data"] += ")" if i == last_child_idx else "" if self.is_empty(): return "" data_dict = {"string_data": ""} helper(Tree._Position(self, self._root)) return data_dict["string_data"] def __iter__(self) -> Iterable: """Return a tree iterable :return: tree iterable """ return self def __next__(self) -> _Position: """Return next position of tree iterator, implemented based on level-order traversal :return: next position :raises StopIteration: when the cursor denoting the current position surpasses the last position of the tree """ if self.__generator is None: self.__generator = self.traverse_tree_level_order() try: next_position = next(self.__generator) except StopIteration as e: self.__generator = None raise e return next_position @staticmethod def _validate_node(node): """Helper function to check if the node passed is a tree node. Returns the node passed if the validation passes, else raises a TypeError. Time complexity: O(1). :param node: node to validate :returns: the node passed if it passes validation :raises TypeError: if the validation fails """ if not isinstance(node, Tree._Node): raise TypeError("Not a tree node") return node @staticmethod def _invalidate_position(variables): """Helper function to set the belongs_to key of a dictionary to None. Used to revoke the ownership of a position by this tree. Time complexity: O(1). :returns: the dictionary passed, with the belongs_to key set to None """ variables["belongs_to"] = None return variables def is_empty(self) -> bool: """Return True if tree is empty, else False. Time complexity: O(1). :returns: True if tree is empty, else False """ return self._root is None def is_root(self, position: _Position) -> bool: """Check if the passed position contains the root node. Time complexity: O(1). :returns: True if the passed position holds the root node, else False """ if not position.is_owned_by(self): raise ValueError("Position doesn't belong to this tree") node = position.manipulate_node(self, "_validate_node") return node.parent is None def is_leaf(self, position: _Position) -> bool: """Check if the passed position contains a leaf. Time complexity: O(1). :returns: True if the passed position holds a leaf node, else False """ if not position.is_owned_by(self): raise ValueError("Position doesn't belong to this tree") return len(self.get_children(position)) == 0 def get_root(self) -> Union[_Position, None]: """Return the root position. Time complexity: O(1). :returns: the root position """ if self.is_empty(): return None else: return Tree._Position(self, self._root) def get_parent(self, position: _Position) -> Union[_Position, None]: """Return the parent of the given position. Time complexity: O(1). :param position: position containing the node whose parent is being sought :returns: the position of parent of the node contained in the passed position. None if the position passed contains the root node. """ if not position.is_owned_by(self): raise ValueError("Position doesn't belong to this tree") node = position.manipulate_node(self, "_validate_node") if self.is_root(Tree._Position(self, node)): return None else: return Tree._Position(self, node.parent) def get_children(self, position: _Position) -> Union[List[_Position], None]: """Return the children of the given position. Time complexity: O(1). :param position: position containing the node whose children are being sought :returns: the positions of the children of the node contained in the passed position. None if the position has no children. """ if not position.is_owned_by(self): raise ValueError("Position doesn't belong to this tree") node = position.manipulate_node(self, "_validate_node") children = node.children if children is None: return None else: return [Tree._Position(self, i) for i in children if i is not None] def get_siblings(self, position: _Position) -> Union[List[_Position], None]: """Return the siblings of the given position. Time complexity: O(1). :param position: position containing the node whose children are being sought :returns: the positions of the siblings of the node contained in the passed position """ if not position.is_owned_by(self): raise ValueError("Position doesn't belong to this tree") node = position.manipulate_node(self, "_validate_node") parent = node.parent if parent is None: return [] return [Tree._Position(self, i) for i in parent.children if i is not node] def get_height_of_node(self, position: _Position) -> int: """Return the number of edges between a node and the farthest leaf among its descendants. Time complexity: O(n). :param position: position containing the node whose height is being sought :returns: the number of edges between a node and the farthest leaf among its descendants """ if not position.is_owned_by(self): raise ValueError("Position doesn't belong to this tree") if self.is_leaf(position): return 0 return 1 + max(self.get_height_of_node(p) for p in self.get_children(position)) def get_height_of_tree(self) -> int: """Return the number of edges between the root node and the farthest leaf. Time complexity: O(n). :returns: the number of edges between the root node and the farthest leaf """ if self.is_empty(): raise Empty("Tree is empty") return self.get_height_of_node(Tree._Position(self, self._root)) def get_depth_of_node(self, position: _Position) -> int: """Return the number of edges between a node and the root. Time complexity: O(n). :param position: position containing the node whose depth is being sought :returns: the number of edges between a node and the root """ if not position.is_owned_by(self): raise ValueError("Position doesn't belong to this tree") if self.is_root(position): return 0 return 1 + self.get_depth_of_node(self.get_parent(position)) def get_depth_of_tree(self) -> int: """Return the number of edges between the farthest leaf and the root. Time complexity: O(n). :returns: the number of edges between the farthest leaf and the root """ return self.get_height_of_tree() def get_level_of_node(self, position: _Position) -> int: """Return the number of nodes between a node and the root, inclusive of itself. Time complexity: O(n). :param position: position containing the node whose level is being sought :returns: the number of nodes between a node and the root, inclusive of itself """ if not position.is_owned_by(self): raise ValueError("Position doesn't belong to this tree") return 1 + self.get_depth_of_node(position) def traverse_subtree_pre_order(self, position: _Position) -> Generator: """Pre-order traverse subtree whose root is the passed position and return a generator of the positions it contains :param position: position containing the node that's the root of the subtree to be traversed :returns: a generator of the positions """ if not position.is_owned_by(self): raise ValueError("Position doesn't belong to this tree") yield position for i in self.get_children(position): for j in self.traverse_subtree_pre_order(i): yield j def traverse_tree_pre_order(self) -> Generator: """Pre-order traverse tree and return a generator of the positions it contains :returns: a generator of the positions """ position = self.get_root() if position is not None: for i in self.traverse_subtree_pre_order(position): yield i def traverse_subtree_post_order(self, position: _Position) -> Generator: """Post-order traverse subtree whose root is the passed position and return a generator of the positions it contains :param position: position containing the node that's the root of the subtree to be traversed :returns: a generator of the positions """ if not position.is_owned_by(self): raise ValueError("Position doesn't belong to this tree") for i in self.get_children(position): for j in self.traverse_subtree_post_order(i): yield j yield position def traverse_tree_post_order(self) -> Generator: """Post-order traverse tree and return a generator of the positions it contains :returns: a generator of the positions """ position = self.get_root() if position is not None: for i in self.traverse_subtree_post_order(position): yield i def traverse_subtree_level_order(self, position: _Position) -> Generator: """Level-by-level traverse subtree whose root is the passed position and return a generator of the positions it contains :param position: position containing the node that's the root of the subtree to be traversed :returns: a generator of the positions """ if not position.is_owned_by(self): raise ValueError("Position doesn't belong to this tree") def helper(root_node, level): if root_node is not None: if level == 1: yield Tree._Position(self, root_node) elif level > 1: for child in root_node.children: for k in helper(child, level - 1): yield k node = position.manipulate_node(self, "_validate_node") number_of_levels = self.get_height_of_node(position) + 1 for i in range(1, number_of_levels + 1): for j in helper(node, i): yield j def traverse_tree_level_order(self) -> Generator: """Level-by-level traverse tree and return a generator of the positions it contains :returns: a generator of the positions """ position = self.get_root() if position is not None: for i in self.traverse_subtree_level_order(position): yield i def delete(self, position: _Position) -> None: """Delete a value from the tree :param position: position containing the node to be removed from the tree """ self._length -= 1 if not position.is_owned_by(self): raise ValueError("Position doesn't belong to this tree") def insert_node(node_to_insert, is_node_left_child, parent_node): if node_to_insert is not None: node_to_insert.parent = parent_node if is_node_left_child is not None: if is_node_left_child: parent_node.children[0] = node_to_insert else: parent_node.children[1] = node_to_insert def delete_node(node_to_delete, is_root): parent = node_to_delete.parent left = node_to_delete.children[0] right = node_to_delete.children[1] is_left_child = None if parent is None else node_to_delete.key < parent.key if left is None: insert_node(right, is_left_child, parent) if is_root: self._root = right else: current_node = left right_child = current_node.children[1] if right_child is None: current_node.children[1] = right insert_node(current_node, is_left_child, parent) if is_root: self._root = current_node else: new_node = Tree._Node( right_child.key, right_child.value, children=[current_node, right], ) insert_node(new_node, is_left_child, parent) if is_root: self._root = new_node delete_node(right_child, False) node = position.manipulate_node(self, "_validate_node") is_root_node = self.is_root(position) _ = position.manipulate_variables(self, "_invalidate_position") delete_node(node, is_root_node) @abstractmethod def insert(self, key: Any, value: Any) -> None: """Insert a value into the tree :param key: unique identifier of the item to be added to the tree :param value: item to be added to the tree """ self._length += 1
39.240343
119
0.626107
18,187
0.994586
3,393
0.185552
1,137
0.062179
0
0
8,669
0.474079
c775ae8fda6ca73f18c286d16c2c597ac2a87d30
6,857
py
Python
nodes/audio.py
sddhrthrt/COVFEFE
bc74ff0b5ee4d675482928110dda81443d4bec63
[ "Apache-2.0" ]
null
null
null
nodes/audio.py
sddhrthrt/COVFEFE
bc74ff0b5ee4d675482928110dda81443d4bec63
[ "Apache-2.0" ]
null
null
null
nodes/audio.py
sddhrthrt/COVFEFE
bc74ff0b5ee4d675482928110dda81443d4bec63
[ "Apache-2.0" ]
null
null
null
from abc import ABC, abstractmethod import os import logging from nodes.helper import FileOutputNode from utils import file_utils from utils import signal_processing as sp from utils.shell_run import shell_run from config import OPENSMILE_HOME class Mp3ToWav(FileOutputNode): def run(self, mp3_file): self.log(logging.INFO, "Starting %s" % (mp3_file)) if not mp3_file.endswith(".mp3"): self.log(logging.ERROR,"Failed %s. Not mp3 file" % (mp3_file)) return wav_file = self.derive_new_file_path(mp3_file, "wav") if file_utils.should_run(mp3_file, wav_file): res = shell_run(["lame", "--decode", mp3_file, wav_file]) if res != 0: self.log(logging.ERROR,"Failed %s -> %s with lame error code %i" % (mp3_file, wav_file, res)) return self.log(logging.INFO, "Done %s -> %s" % (mp3_file, wav_file)) self.emit(wav_file) class ResampleWav(FileOutputNode): def setup(self, new_sr): self.new_sr = new_sr def run(self, wav_file): self.log(logging.INFO, "Starting %s" % (wav_file)) if not wav_file.endswith(".wav"): self.log(logging.ERROR,"Failed %s. Not wav file" % (wav_file)) return new_wav_file = self.derive_new_file_path(wav_file, "wav") if file_utils.should_run(wav_file, new_wav_file): res = shell_run(["sox", wav_file, "--rate", str(self.new_sr), new_wav_file]) if res != 0: self.log(logging.ERROR,"Failed %s -> %s with lame error code %i" % (wav_file, new_wav_file, res)) return self.log(logging.INFO, "Done %s -> %s" % (wav_file, new_wav_file)) self.emit(new_wav_file) class ShellCommand(FileOutputNode): """ Take as input a format string representing a shell command that can accept an in_file and out_file. For example "someCommand -i {in_file} -o {out_file}" ext: Extension of the output file, ex. "wav", "csv" """ def setup(self, command, ext): self.command = command self.ext = ext def run(self, in_file): self.log(logging.INFO, "Starting %s" % (in_file)) out_file = self.derive_new_file_path(in_file, self.ext) if file_utils.should_run(in_file, out_file): cmd = self.command.format(in_file=in_file, out_file=out_file) res = shell_run(cmd.split(" ")) if res != 0: self.log(logging.ERROR,"Failed %s -> %s with error code %i. cmd: %s" % (in_file, out_file, res, cmd)) return self.log(logging.INFO, "Done %s -> %s" % (in_file, out_file)) self.emit(out_file) class OpenSmileRunner(FileOutputNode): """ conf_file: Either absolute path to an opensmile conf file or the name of a config file in opensmile's config folder out_flag: Flag to use for the output file. extra_flags: A string of extra flags to pass to SMILExtract. out_ext: Extension of the output file """ def setup(self, conf_file, out_flag="-csvoutput", extra_flags="-nologfile -noconsoleoutput -appendcsv 0", out_ext="csv"): self.conf_file = file_utils.locate_file(conf_file, [os.path.join(OPENSMILE_HOME, "config")]) self.extra_flags = extra_flags.split(" ") self.out_flag = out_flag self.out_ext = out_ext self.opensmile_exec = file_utils.locate_file("SMILExtract", [OPENSMILE_HOME, os.path.join(OPENSMILE_HOME, "bin")], use_path=True) def run(self, in_file): self.log(logging.INFO, "Starting %s" % (in_file)) out_file = self.derive_new_file_path(in_file, self.out_ext) if file_utils.should_run(in_file, out_file): cmd = [self.opensmile_exec, "-C", self.conf_file, "-I", in_file, self.out_flag, out_file] + self.extra_flags res = shell_run(cmd) if res != 0: self.log(logging.ERROR,"Failed %s -> %s with SmileExtract error code %i. cmd: %s" % (in_file, out_file, res, " ".join(cmd))) return self.log(logging.INFO, "Done %s -> %s" % (in_file, out_file)) self.emit([out_file]) class IS10_Paraling(OpenSmileRunner): def get_conf_name(self): return "IS10_paraling.conf" def get_command(self, wav_file, out_file): return [self.os_exec, "-C", self.conf_file, "-I", wav_file, "-csvoutput", out_file, "-nologfile", "-noconsoleoutput", "-appendcsv", "0"] class IS10_Paraling_lld(OpenSmileRunner): def get_conf_name(self): return "IS10_paraling.conf" def get_command(self, wav_file, out_file): return [self.os_exec, "-C", self.conf_file, "-I", wav_file, "-lldcsvoutput", out_file, "-nologfile", "-noconsoleoutput", "-appendcsv", "0"] class SplitSegments(FileOutputNode): """ segment_mapping_fn is a pointer to a function that takes as input a file and sample rate and returns a list of all the segments in that file in the format [(start1, end1, segname1), (start2, end2, segname2), ...] where start and end are in given in samples. Each tuple in the list can also have a 4th item, which can be any string. This string will get saved in segname.txt This is useful for isolating events of interest in audio files. For example, if the segment mapping function returns a list of where all speech occurs in the input audio, this will isolate all occurrences of speech into individual files. The 4th item may contain the annotation of what was said in the segment. """ def setup(self, segment_mapping_fn): self.segment_mapping_fn = segment_mapping_fn def run(self, in_file): self.log(logging.INFO, "Starting %s" % (in_file)) if not in_file.endswith(".wav"): self.log(logging.ERROR, "Failed %s. Not wav file" % (in_file)) return sr, original_data = sp.read_wave(in_file, first_channel=True) segments = self.segment_mapping_fn(in_file, sr) for segment in segments: if len(segment) == 3: start, end, seg_name = segment extra_info = None elif len(segment) == 4: start, end, seg_name, extra_info = segment else: self.log(logging.ERROR, "Failed %s. Segment length must be 3 or 4" % (in_file)) return seg_path = os.path.join(self.out_dir, "%s.wav" % seg_name) sp.write_wav(seg_path, sr, original_data[start:end]) extra_path = None if extra_info: extra_path = os.path.join(self.out_dir, "%s.txt" % seg_name) with open(extra_path, "w") as f: f.write(extra_info) self.emit([seg_path, extra_path])
36.865591
147
0.624763
6,590
0.961062
0
0
0
0
0
0
2,068
0.30159
c776010ff719981072eef5b7305ecf5eee272758
12,914
py
Python
texar/torch/modules/pretrained/gpt2.py
VegB/VLN-Transformer
da1fa71e419d8d05c96749445230a77338edba09
[ "Apache-2.0" ]
19
2020-07-29T15:25:45.000Z
2022-01-19T17:49:42.000Z
texar/torch/modules/pretrained/gpt2.py
VegB/VLN-Transformer
da1fa71e419d8d05c96749445230a77338edba09
[ "Apache-2.0" ]
3
2021-02-16T10:26:23.000Z
2021-06-08T16:50:40.000Z
texar/torch/modules/pretrained/gpt2.py
VegB/VLN-Transformer
da1fa71e419d8d05c96749445230a77338edba09
[ "Apache-2.0" ]
null
null
null
# Copyright 2019 The Texar Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Utils of GPT2 Modules. """ import json import os import warnings from abc import ABC from typing import Any, Dict import torch from texar.torch.modules.pretrained.pretrained_base import PretrainedMixin __all__ = [ "PretrainedGPT2Mixin", ] _GPT2_PATH = "https://storage.googleapis.com/gpt-2/models/" _CHECKPOINT_FILES = [ "checkpoint", "encoder.json", "hparams.json", "vocab.bpe", "model.ckpt.data-00000-of-00001", "model.ckpt.index", "model.ckpt.meta"] class PretrainedGPT2Mixin(PretrainedMixin, ABC): r"""A mixin class to support loading pre-trained checkpoints for modules that implement the GPT2 model. The GPT2 model was proposed in `Language Models are Unsupervised Multitask Learners`_ by `Radford et al.` from OpenAI. It is a unidirectional Transformer model pre-trained using the vanilla language modeling objective on a large corpus. The available GPT2 models are as follows: * ``gpt2-small``: Small version of GPT-2, 124M parameters. * ``gpt2-medium``: Medium version of GPT-2, 355M parameters. * ``gpt2-large``: Large version of GPT-2, 774M parameters. We provide the following GPT2 classes: * :class:`~texar.torch.modules.GPT2Encoder` for text encoding. * :class:`~texar.torch.modules.GPT2Decoder` for text generation and decoding. * :class:`~texar.torch.modules.GPT2Classifier` for text classification and sequence tagging. .. _`Language Models are Unsupervised Multitask Learners`: https://openai.com/blog/better-language-models/ """ _MODEL_NAME = "GPT2" _MODEL2URL = { 'gpt2-small': [_GPT2_PATH + f"124M/{file}" for file in _CHECKPOINT_FILES], 'gpt2-medium': [_GPT2_PATH + f"355M/{file}" for file in _CHECKPOINT_FILES], 'gpt2-large': [_GPT2_PATH + f"774M/{file}" for file in _CHECKPOINT_FILES], } _IS_DECODE = False # Raise warning for the deprecated pre-trained model names class MyDict(dict): def __contains__(self, key): if key == '117M': warnings.warn("Pre-trained model name '117M' is deprecated, " "use 'gpt2-small' instead.", UserWarning) return True elif key == '345M': warnings.warn("Pre-trained model name '345M' is deprecated, " "use 'gpt2-medium' instead.", UserWarning) return True else: return super().__contains__(key) _DEPRECATED_MODEL2URL = { '117M': [_GPT2_PATH + f"124M/{file}" for file in _CHECKPOINT_FILES], '345M': [_GPT2_PATH + f"355M/{file}" for file in _CHECKPOINT_FILES], } _MODEL2URL.update(_DEPRECATED_MODEL2URL) _MODEL2URL = MyDict(_MODEL2URL) # type: ignore def _transform_config(self, pretrained_model_name: str, # type: ignore cache_dir: str) -> Dict[str, Any]: info = list(os.walk(cache_dir)) root, _, files = info[0] config_path = None for file in files: if file.endswith('hparams.json'): config_path = os.path.join(root, file) if config_path is None: raise ValueError(f"Cannot find the config file in {cache_dir}") with open(config_path) as f: config_gpt = json.loads(f.read()) hidden_dim = config_gpt["n_embd"] configs = { "vocab_size": config_gpt["n_vocab"], "context_size": config_gpt["n_ctx"], "embedding_size": config_gpt["n_embd"], "embed": { "dim": hidden_dim, }, "position_size": config_gpt["n_ctx"], "position_embed": { "dim": hidden_dim } } module_name = 'decoder' if self._IS_DECODE else 'encoder' configs.update({module_name: { "dim": hidden_dim, "num_blocks": config_gpt["n_layer"], "embedding_dropout": 0, "residual_dropout": 0, "multihead_attention": { "use_bias": True, "num_units": hidden_dim, "num_heads": config_gpt["n_head"], "output_dim": hidden_dim, }, "initializer": { "type": "variance_scaling_initializer", "kwargs": { "factor": 1.0, "mode": "FAN_AVG", "uniform": True, }, }, "poswise_feedforward": { "layers": [ { "type": "Linear", "kwargs": { "in_features": hidden_dim, "out_features": hidden_dim * 4, "bias": True, } }, { "type": "GPTGELU", "kwargs": {} }, { "type": "Linear", "kwargs": { "in_features": hidden_dim * 4, "out_features": hidden_dim, "bias": True, } } ], "name": "ffn", }, }}) if self._IS_DECODE: configs[module_name].update({'use_gpt_config': True}) else: configs[module_name].update({'use_bert_config': False}) return configs def _init_from_checkpoint(self, pretrained_model_name: str, cache_dir: str, load_output_layer: bool = True, **kwargs): r"""Initialize model parameters from weights stored in the pre-trained checkpoint. Args: pretrained_model_name (str): Name of the pre-trained model. cache_dir (str): Path to the cache directory. load_output_layer (bool): If `False`, will not load weights of the output layer. Set this argument to `False` when loading weights into a GPT2 encoder. Defaults to `True`. """ try: import numpy as np import tensorflow as tf except ImportError: print("Loading TensorFlow models in PyTorch requires installing " "TensorFlow. Please see https://www.tensorflow.org/install/ " "for installation instructions.") raise module_name = 'decoder' if self._IS_DECODE else 'encoder' global_tensor_map = { "model/wte": "word_embedder.embedding", "model/wpe": "position_embedder.embedding", "model/ln_f/b": module_name + ".final_layer_norm.bias", "model/ln_f/g": module_name + ".final_layer_norm.weight", } layer_tensor_map = { "ln_1/b": module_name + ".self_attn_layer_norm.{}.bias", "ln_1/g": module_name + ".self_attn_layer_norm.{}.weight", "ln_2/b": module_name + ".poswise_layer_norm.{}.bias", "ln_2/g": module_name + ".poswise_layer_norm.{}.weight", "mlp/c_fc/b": module_name + ".poswise_networks.{}._layers.0.bias", "mlp/c_proj/b": module_name + ".poswise_networks.{}._layers.2.bias", "attn/c_proj/b": module_name + ".self_attns.{}.O_dense.bias", } layer_transpose_map = { "mlp/c_fc/w": module_name + ".poswise_networks.{}._layers.0.weight", "mlp/c_proj/w": module_name + ".poswise_networks.{}._layers.2." "weight", "attn/c_proj/w": module_name + ".self_attns.{}.O_dense.weight", } tf_path = os.path.abspath(os.path.join(cache_dir, 'model.ckpt')) # Load weights from TF model init_vars = tf.train.list_variables(tf_path) names = [] arrays = [] for name, _ in init_vars: array = tf.train.load_variable(tf_path, name) names.append(name) arrays.append(array.squeeze()) tensor_names = [] for name, _ in self.named_parameters(): tensor_names.append(name) for name, array in zip(names, arrays): if name in global_tensor_map: v_name = global_tensor_map[name] if name == "model/wte": pointer = self._name_to_variable(v_name) assert pointer.shape == array.shape pointer.data = torch.from_numpy(array) if load_output_layer: output_pointer = self._name_to_variable( "decoder._output_layer.weight") assert output_pointer.shape == array.shape output_pointer.data = torch.from_numpy(array) elif name == "model/wpe": pointer = self._name_to_variable(v_name) assert pointer.shape == array.shape pointer.data = torch.from_numpy(array) else: pointer = self._name_to_variable(v_name) assert pointer.shape == array.shape pointer.data = torch.from_numpy(array) else: name_tmp = name.split("/") layer_no = name_tmp[1][1:] name = "/".join(name_tmp[2:]) if name in layer_tensor_map: v_name = layer_tensor_map[name].format(layer_no) pointer = self._name_to_variable(v_name) assert pointer.shape == array.shape pointer.data = torch.from_numpy(array) elif name in layer_transpose_map: v_name = layer_transpose_map[name].format(layer_no) pointer = self._name_to_variable(v_name) array_t = np.transpose(array) assert pointer.shape == array_t.shape pointer.data = torch.from_numpy(array_t) elif name == "attn/c_attn/w": index_d = array.shape[-1] // 3 Q_w = np.transpose(array[:, :index_d]) K_w = np.transpose(array[:, index_d: 2 * index_d]) V_w = np.transpose(array[:, 2 * index_d:]) q_weight = self._name_to_variable( f"{module_name}.self_attns.{layer_no}.Q_dense.weight") k_weight = self._name_to_variable( f"{module_name}.self_attns.{layer_no}.K_dense.weight") v_weight = self._name_to_variable( f"{module_name}.self_attns.{layer_no}.V_dense.weight") assert q_weight.shape == Q_w.shape assert k_weight.shape == K_w.shape assert v_weight.shape == V_w.shape q_weight.data = torch.from_numpy(Q_w) k_weight.data = torch.from_numpy(K_w) v_weight.data = torch.from_numpy(V_w) elif name == "attn/c_attn/b": d = array.shape[0] Q_b = array[: d // 3] K_b = array[d // 3: 2 * d // 3] V_b = array[2 * d // 3:] q_bias = self._name_to_variable( f"{module_name}.self_attns.{layer_no}.Q_dense.bias") k_bias = self._name_to_variable( f"{module_name}.self_attns.{layer_no}.K_dense.bias") v_bias = self._name_to_variable( f"{module_name}.self_attns.{layer_no}.V_dense.bias") assert q_bias.shape == Q_b.shape assert k_bias.shape == K_b.shape assert v_bias.shape == V_b.shape q_bias.data = torch.from_numpy(Q_b) k_bias.data = torch.from_numpy(K_b) v_bias.data = torch.from_numpy(V_b) else: print("Name error", name) raise Exception
40.73817
80
0.533065
11,833
0.916292
0
0
0
0
0
0
4,553
0.352563
c77641557884ec300d6f17e14694ed49328569cf
4,930
py
Python
Image classifier/train.py
anirudha-bs/Farm_assist
f824b7594befdb1132da2a5c03500a1885c6f036
[ "MIT" ]
null
null
null
Image classifier/train.py
anirudha-bs/Farm_assist
f824b7594befdb1132da2a5c03500a1885c6f036
[ "MIT" ]
null
null
null
Image classifier/train.py
anirudha-bs/Farm_assist
f824b7594befdb1132da2a5c03500a1885c6f036
[ "MIT" ]
null
null
null
from __future__ import absolute_import, division, print_function, unicode_literals import tensorflow as tf from keras import regularizers from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Dense, Conv2D, Flatten, Dropout, MaxPooling2D from tensorflow.keras.preprocessing.image import ImageDataGenerator from keras.models import load_model import numpy as np from keras.preprocessing.image import img_to_array, load_img from keras.preprocessing import image import os import numpy as np import matplotlib.pyplot as plt # defining classes def soil(result): soil_type="" if result[0]==2: soil_type="Red soil" elif result[0]==1: soil_type="Black soil" else: soil_type="Alluvial soil" return soil_type # Adding dataset paths PATH = 'new_datasets' train_dir = os.path.join(PATH, 'train') validation_dir = os.path.join(PATH, 'validation') test_dir = os.path.join(PATH, 'test') train_red_dir = os.path.join(train_dir, 'Red_soil') validation_red_dir = os.path.join(validation_dir, 'Red_soil') train_black_dir = os.path.join(train_dir, 'Black_soil') validation_black_dir = os.path.join(validation_dir, 'Black_soil') train_all_dir = os.path.join(train_dir, 'Alluvial_soil') validation_all_dir = os.path.join(validation_dir, 'Alluvial_soil') num_soil_tr = len(os.listdir(train_red_dir)) + len(os.listdir(train_black_dir)) +len(os.listdir(train_all_dir)) num_soil_val = len(os.listdir(validation_red_dir)) + len(os.listdir(validation_black_dir)) + len((os.listdir(validation_all_dir))) print("Total training images = ",num_soil_tr) print("Total validation images = ",num_soil_val) # hyperparameters batch_size = 100 epochs = 15 IMG_HEIGHT = 128 IMG_WIDTH = 128 classes_num=3 # data generators train_image_generator = ImageDataGenerator(rescale=1./255) validation_image_generator = ImageDataGenerator(rescale=1./255) train_data_gen = train_image_generator.flow_from_directory(batch_size=batch_size, directory=train_dir, shuffle=True, target_size=(IMG_HEIGHT, IMG_WIDTH), class_mode='categorical') val_data_gen = validation_image_generator.flow_from_directory(batch_size=batch_size, directory=validation_dir, target_size=(IMG_HEIGHT, IMG_WIDTH), shuffle=True, class_mode='categorical') # defining the model model = Sequential([ Conv2D(16, 5, activation='relu', input_shape=(IMG_HEIGHT, IMG_WIDTH ,3)), MaxPooling2D(pool_size=(3, 3)), Dropout(0.2), Conv2D(32, 5, activation='relu'), MaxPooling2D(pool_size=(3, 3)), Dropout(0.2), Conv2D(64, 5, activation='relu'), MaxPooling2D(pool_size=(3, 3)), Dropout(0.3), Flatten(), Dense(32, activation='relu'), Dense(classes_num, activation='softmax') ]) model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy']) model.summary() history = model.fit_generator( train_data_gen, steps_per_epoch= num_soil_tr// batch_size, epochs=epochs, validation_data=val_data_gen, validation_steps=num_soil_val // batch_size ) acc = history.history['accuracy'] val_acc = history.history['val_accuracy'] loss = history.history['loss'] val_loss = history.history['val_loss'] epochs_range = range(epochs) # training and validation graphs plt.figure(figsize=(8, 8)) plt.subplot(1, 2, 1) plt.plot(epochs_range, acc, label='Training Accuracy') plt.plot(epochs_range, val_acc, label='Validation Accuracy') plt.legend(loc='lower right') plt.title('Training and Validation Accuracy') plt.subplot(1, 2, 2) plt.plot(epochs_range, loss, label='Training Loss') plt.plot(epochs_range, val_loss, label='Validation Loss') plt.legend(loc='upper right') plt.title('Training and Validation Loss') plt.show() model.save('new_soil_classify.h5') # for testing trained model with images differnent class image_path="red.jpg" img = image.load_img(image_path, target_size=(IMG_HEIGHT, IMG_WIDTH)) plt.imshow(img) img = np.expand_dims(img, axis=0) result=model.predict_classes(img) plt.title(result[0]) plt.show() image_path1="black.jpg" img1 = image.load_img(image_path1, target_size=(IMG_HEIGHT, IMG_WIDTH)) plt.imshow(img1) img1 = np.expand_dims(img1, axis=0) result=model.predict_classes(img1) plt.title(result[0]) plt.show() image_path="all.jpg" img = image.load_img(image_path, target_size=(IMG_HEIGHT, IMG_WIDTH)) plt.imshow(img) img = np.expand_dims(img, axis=0) result=model.predict_classes(img) plt.title(result[0]) plt.show()
29.878788
130
0.683773
0
0
0
0
0
0
0
0
737
0.149493
c776c16efce7e570422a5d1752b829a85d1dbe4b
686
py
Python
questions/q118_linked_list_loop_removal/code.py
aadhityasw/Competitive-Programs
901a48d35f024a3a87c32a45b7f4531e8004a203
[ "MIT" ]
null
null
null
questions/q118_linked_list_loop_removal/code.py
aadhityasw/Competitive-Programs
901a48d35f024a3a87c32a45b7f4531e8004a203
[ "MIT" ]
1
2021-05-15T07:56:51.000Z
2021-05-15T07:56:51.000Z
questions/q118_linked_list_loop_removal/code.py
aadhityasw/Competitive-Programs
901a48d35f024a3a87c32a45b7f4531e8004a203
[ "MIT" ]
null
null
null
def removeLoop(head): ptr = head ptr2 = head while True : if ptr is None or ptr2 is None or ptr2.next is None : return ptr = ptr.next ptr2 = ptr2.next.next if ptr is ptr2 : loopNode = ptr break ptr = loopNode.next count = 1 while ptr is not loopNode : ptr = ptr.next count += 1 ptr = head ptr1 = head ptr2 = head.next while count > 1 : ptr2 = ptr2.next ptr1 = ptr1.next count -= 1 while ptr is not ptr2 : ptr = ptr.next ptr2 = ptr2.next ptr1 = ptr1.next ptr1.next = None
19.055556
61
0.478134
0
0
0
0
0
0
0
0
0
0
c779118332635de2c8ae2f98f07d435f86ed8e76
2,361
py
Python
fastrunner/httprunner3/report/html/gen_report.py
Chankee/AutoTestRunner
5f329b0dfac91ccd3541aabf46cc997cc4f01da3
[ "MIT" ]
1
2020-04-30T08:41:19.000Z
2020-04-30T08:41:19.000Z
httprunner/report/html/gen_report.py
Barronliu/httprunner
463b8c68cbd413fd2bb66852752149bc1609e98d
[ "Apache-2.0" ]
null
null
null
httprunner/report/html/gen_report.py
Barronliu/httprunner
463b8c68cbd413fd2bb66852752149bc1609e98d
[ "Apache-2.0" ]
null
null
null
import io import os from datetime import datetime from jinja2 import Template from loguru import logger from httprunner.exceptions import SummaryEmpty def gen_html_report(summary, report_template=None, report_dir=None, report_file=None): """ render html report with specified report name and template Args: summary (dict): test result summary data report_template (str): specify html report template path, template should be in Jinja2 format. report_dir (str): specify html report save directory report_file (str): specify html report file path, this has higher priority than specifying report dir. """ if not summary["time"] or summary["stat"]["testcases"]["total"] == 0: logger.error(f"test result summary is empty ! {summary}") raise SummaryEmpty if not report_template: report_template = os.path.join( os.path.abspath(os.path.dirname(__file__)), "template.html" ) logger.debug("No html report template specified, use default.") else: logger.info(f"render with html report template: {report_template}") logger.info("Start to render Html report ...") start_at_timestamp = summary["time"]["start_at"] utc_time_iso_8601_str = datetime.utcfromtimestamp(start_at_timestamp).isoformat() summary["time"]["start_datetime"] = utc_time_iso_8601_str if report_file: report_dir = os.path.dirname(report_file) report_file_name = os.path.basename(report_file) else: report_dir = report_dir or os.path.join(os.getcwd(), "reports") # fix #826: Windows does not support file name include ":" report_file_name = "{}.html".format(utc_time_iso_8601_str.replace(":", "").replace("-", "")) if not os.path.isdir(report_dir): os.makedirs(report_dir) report_path = os.path.join(report_dir, report_file_name) with io.open(report_template, "r", encoding='utf-8') as fp_r: template_content = fp_r.read() with io.open(report_path, 'w', encoding='utf-8') as fp_w: rendered_content = Template( template_content, extensions=["jinja2.ext.loopcontrols"] ).render(summary) fp_w.write(rendered_content) logger.info(f"Generated Html report: {report_path}") return report_path
36.323077
110
0.674714
0
0
0
0
0
0
0
0
838
0.354934
c779400f9f454e7ffcd25d7cea5b32ebe4fe996a
730
py
Python
SD/lab1/client.py
matheuscr30/UFU
e947e5a4ccd5c025cb8ef6e00b42ea1160742712
[ "MIT" ]
null
null
null
SD/lab1/client.py
matheuscr30/UFU
e947e5a4ccd5c025cb8ef6e00b42ea1160742712
[ "MIT" ]
11
2020-01-28T22:59:24.000Z
2022-03-11T23:59:04.000Z
SD/lab1/client.py
matheuscr30/UFU
e947e5a4ccd5c025cb8ef6e00b42ea1160742712
[ "MIT" ]
null
null
null
#client.py #!/usr/bin/python # This is client.py file import socket # Import socket module s = socket.socket() # Create a socket object host = socket.gethostname() # Get local machine name port = 12352 # Reserve a port for your service. s.connect((host, port)) while True: message = input('Digite mensagem: ') s.send(bytes(message, encoding='utf8')) if message == 'SAIR': breaks print('Mensagem enviada.') print('Esperando resposta.') answer = s.recv(1024).decode('utf8') print('Resposta recebida: ' + answer) print('Desconectando.') s.close()
27.037037
82
0.536986
0
0
0
0
0
0
0
0
300
0.410959
c77943cb74b84356ac52ea818e7a35cca299778c
4,040
py
Python
tests/helpers.py
ws4/TopCTFd
3b1e25df1318e86ff163a0b546f6e9b7f8305a5a
[ "Apache-2.0" ]
1
2019-06-25T09:24:29.000Z
2019-06-25T09:24:29.000Z
tests/helpers.py
ws4/TopCTFd
3b1e25df1318e86ff163a0b546f6e9b7f8305a5a
[ "Apache-2.0" ]
null
null
null
tests/helpers.py
ws4/TopCTFd
3b1e25df1318e86ff163a0b546f6e9b7f8305a5a
[ "Apache-2.0" ]
null
null
null
from CTFd import create_app from CTFd.models import * from sqlalchemy_utils import database_exists, create_database, drop_database from sqlalchemy.engine.url import make_url import datetime import six if six.PY2: text_type = unicode binary_type = str else: text_type = str binary_type = bytes def create_ctfd(ctf_name="CTFd", name="admin", email="[email protected]", password="password", setup=True): app = create_app('CTFd.config.TestingConfig') if setup: with app.app_context(): with app.test_client() as client: data = {} r = client.get('/setup') # Populate session with nonce with client.session_transaction() as sess: data = { "ctf_name": ctf_name, "name": name, "email": email, "password": password, "nonce": sess.get('nonce') } client.post('/setup', data=data) return app def destroy_ctfd(app): drop_database(app.config['SQLALCHEMY_DATABASE_URI']) def register_user(app, name="user", email="[email protected]", password="password"): with app.app_context(): with app.test_client() as client: r = client.get('/register') with client.session_transaction() as sess: data = { "name": name, "email": email, "password": password, "nonce": sess.get('nonce') } client.post('/register', data=data) def login_as_user(app, name="user", password="password"): with app.app_context(): with app.test_client() as client: r = client.get('/login') with client.session_transaction() as sess: data = { "name": name, "password": password, "nonce": sess.get('nonce') } client.post('/login', data=data) return client def get_scores(user): scores = user.get('/scores') scores = json.loads(scores.get_data(as_text=True)) return scores['standings'] def gen_challenge(db, name='chal_name', description='chal_description', value=100, category='chal_category', type=0): chal = Challenges(name, description, value, category) db.session.add(chal) db.session.commit() return chal def gen_award(db, teamid, name="award_name", value=100): award = Awards(teamid, name, value) db.session.add(award) db.session.commit() return award def gen_tag(db, chal, tag='tag_tag'): tag = Tags(chal, tag) db.session.add(tag) db.session.commit() return tag def gen_file(): pass def gen_flag(db, chal, flag='flag', key_type=0): key = Keys(chal, flag, key_type) db.session.add(key) db.session.commit() return key def gen_team(db, name='name', email='[email protected]', password='password'): team = Teams(name, email, password) db.session.add(team) db.session.commit() return team def gen_hint(db, chal, hint="This is a hint", cost=0, type=0): hint = Hints(chal, hint, cost, type) db.session.add(hint) db.session.commit() return hint def gen_solve(db, teamid, chalid, ip='127.0.0.1', flag='rightkey'): solve = Solves(teamid, chalid, ip, flag) solve.date = datetime.datetime.utcnow() db.session.add(solve) db.session.commit() return solve def gen_wrongkey(db, teamid, chalid, ip='127.0.0.1', flag='wrongkey'): wrongkey = WrongKeys(teamid, chalid, ip, flag) wrongkey.date = datetime.datetime.utcnow() db.session.add(wrongkey) db.session.commit() return wrongkey def gen_tracking(db, ip, team): tracking = Tracking(ip, team) db.session.add(tracking) db.session.commit() return tracking def gen_page(db, route, html): page = Pages(route, html) db.session.add(page) db.session.commit() return page
27.297297
117
0.592574
0
0
0
0
0
0
0
0
512
0.126733
c77b3c34564c716c04ed2a2e2297c397f73e511f
1,741
py
Python
homeassistant/components/kaiterra/const.py
MrDelik/core
93a66cc357b226389967668441000498a10453bb
[ "Apache-2.0" ]
30,023
2016-04-13T10:17:53.000Z
2020-03-02T12:56:31.000Z
homeassistant/components/kaiterra/const.py
MrDelik/core
93a66cc357b226389967668441000498a10453bb
[ "Apache-2.0" ]
24,710
2016-04-13T08:27:26.000Z
2020-03-02T12:59:13.000Z
homeassistant/components/kaiterra/const.py
MrDelik/core
93a66cc357b226389967668441000498a10453bb
[ "Apache-2.0" ]
11,956
2016-04-13T18:42:31.000Z
2020-03-02T09:32:12.000Z
"""Consts for Kaiterra integration.""" from datetime import timedelta from homeassistant.const import ( CONCENTRATION_MICROGRAMS_PER_CUBIC_METER, CONCENTRATION_MILLIGRAMS_PER_CUBIC_METER, CONCENTRATION_PARTS_PER_BILLION, CONCENTRATION_PARTS_PER_MILLION, PERCENTAGE, Platform, ) DOMAIN = "kaiterra" DISPATCHER_KAITERRA = "kaiterra_update" AQI_SCALE = { "cn": [0, 50, 100, 150, 200, 300, 400, 500], "in": [0, 50, 100, 200, 300, 400, 500], "us": [0, 50, 100, 150, 200, 300, 500], } AQI_LEVEL = { "cn": [ "Good", "Satisfactory", "Moderate", "Unhealthy for sensitive groups", "Unhealthy", "Very unhealthy", "Hazardous", ], "in": [ "Good", "Satisfactory", "Moderately polluted", "Poor", "Very poor", "Severe", ], "us": [ "Good", "Moderate", "Unhealthy for sensitive groups", "Unhealthy", "Very unhealthy", "Hazardous", ], } ATTR_VOC = "volatile_organic_compounds" ATTR_AQI_LEVEL = "air_quality_index_level" ATTR_AQI_POLLUTANT = "air_quality_index_pollutant" AVAILABLE_AQI_STANDARDS = ["us", "cn", "in"] AVAILABLE_UNITS = [ "x", PERCENTAGE, "C", "F", CONCENTRATION_MILLIGRAMS_PER_CUBIC_METER, CONCENTRATION_MICROGRAMS_PER_CUBIC_METER, CONCENTRATION_PARTS_PER_MILLION, CONCENTRATION_PARTS_PER_BILLION, ] AVAILABLE_DEVICE_TYPES = ["laseregg", "sensedge"] CONF_AQI_STANDARD = "aqi_standard" CONF_PREFERRED_UNITS = "preferred_units" DEFAULT_AQI_STANDARD = "us" DEFAULT_PREFERRED_UNIT: list[str] = [] DEFAULT_SCAN_INTERVAL = timedelta(seconds=30) PLATFORMS = [Platform.SENSOR, Platform.AIR_QUALITY]
22.907895
51
0.649627
0
0
0
0
0
0
0
0
499
0.286617
c77bfcd69447b6d8753b518a3930aaea586d8856
440
py
Python
support/views.py
bhagirath1312/ich_bau
d37fe7aa3379f312a4d8b5f3d4715dd334b9adb0
[ "Apache-2.0" ]
1
2021-11-25T19:37:01.000Z
2021-11-25T19:37:01.000Z
support/views.py
bhagirath1312/ich_bau
d37fe7aa3379f312a4d8b5f3d4715dd334b9adb0
[ "Apache-2.0" ]
197
2017-09-06T22:54:20.000Z
2022-02-05T00:04:13.000Z
support/views.py
bhagirath1312/ich_bau
d37fe7aa3379f312a4d8b5f3d4715dd334b9adb0
[ "Apache-2.0" ]
2
2017-11-08T02:13:03.000Z
2020-09-30T19:48:12.000Z
from django.shortcuts import render, redirect from django.http import HttpResponseRedirect from .models import SupportProject # Create your views here. def index( request ): sp = SupportProject.objects.all() if sp.count() == 1: return HttpResponseRedirect( sp.first().project.get_absolute_url() ) else: context_dict = { 'sps' : sp, } return render( request, 'support/index.html', context_dict )
27.5
76
0.688636
0
0
0
0
0
0
0
0
50
0.113636
c77bfffe662ca6c238ec477ceec482de486d7271
2,931
py
Python
timeline/models.py
KolibriSolutions/BepMarketplace
c47d252fd744cde6b927e37c34d7a103c6162be5
[ "BSD-3-Clause" ]
1
2019-06-29T15:24:24.000Z
2019-06-29T15:24:24.000Z
timeline/models.py
KolibriSolutions/BepMarketplace
c47d252fd744cde6b927e37c34d7a103c6162be5
[ "BSD-3-Clause" ]
2
2020-01-12T17:47:33.000Z
2020-01-12T17:47:45.000Z
timeline/models.py
KolibriSolutions/BepMarketplace
c47d252fd744cde6b927e37c34d7a103c6162be5
[ "BSD-3-Clause" ]
2
2019-06-29T15:24:26.000Z
2020-01-08T15:15:03.000Z
# Bep Marketplace ELE # Copyright (c) 2016-2021 Kolibri Solutions # License: See LICENSE file or https://github.com/KolibriSolutions/BepMarketplace/blob/master/LICENSE # from datetime import datetime from django.core.exceptions import ValidationError from django.db import models class TimeSlot(models.Model): """ A timeslot is a year in which the current BEP runs. It consists of multiple timephases. """ Name = models.CharField(max_length=250) Begin = models.DateField() End = models.DateField() def __str__(self): return self.Name def clean(self): if not self.Begin or not self.End: raise ValidationError('Please fill in all required fields.') if self.Begin > self.End: raise ValidationError("End date should be larger than begin date") class Meta: ordering = ["Begin"] def is_finished(self): return self.End < datetime.now().date() class TimePhase(models.Model): """ A time phase is a phase the system is in. Each phase has its own pages and permissions. """ Types = ( (1, "Generating project proposals"), (2, "Projects quality check"), (3, "Students choosing projects"), (4, "Distribution of projects"), (5, "Gather and process objections"), (6, "Execution of the projects"), (7, "Presentation of results"), ) Description = models.IntegerField(choices=Types) Begin = models.DateField() End = models.DateField() CountdownEnd = models.DateField(null=True, blank=True, help_text='Fake end date, to set the homepage clock to an earlier date. ' 'A trick to motivate people.') TimeSlot = models.ForeignKey(TimeSlot, on_delete=models.PROTECT, related_name="timephases") def __str__(self): return self.Types[self.Description - 1][1] + " in " + str(self.TimeSlot) def clean(self): if not self.Begin or not self.End or not hasattr(self, 'TimeSlot'): raise ValidationError('Please fill in all required fields.') if self.Begin > self.End: raise ValidationError("End date should be larger than begin date") if not (self.TimeSlot.Begin <= self.Begin <= self.TimeSlot.End): raise ValidationError("Begin date should be in time slot {}".format(self.TimeSlot)) if not (self.TimeSlot.Begin <= self.End <= self.TimeSlot.End): raise ValidationError("End date should be in time slot {}".format(self.TimeSlot)) if self.TimeSlot.timephases.filter(Description=self.Description).exists(): if self.TimeSlot.timephases.get(Description=self.Description) != self: raise ValidationError("Time slot {} already has time phase {}".format(self.TimeSlot, self.Description)) class Meta: ordering = ['TimeSlot', 'Begin']
38.565789
119
0.643125
2,641
0.901058
0
0
0
0
0
0
984
0.335722
c77d8ee927213d5c37d334a8dc0c0e3d7493a2cf
2,221
py
Python
app/api/user_routes.py
nappernick/envelope
af4f574c04c51293b90ee2e09d0f95d12ca36d2c
[ "MIT" ]
2
2021-01-13T22:52:16.000Z
2021-01-29T18:37:51.000Z
app/api/user_routes.py
nappernick/envelope
af4f574c04c51293b90ee2e09d0f95d12ca36d2c
[ "MIT" ]
32
2021-01-08T19:05:33.000Z
2021-04-07T22:01:54.000Z
app/api/user_routes.py
nappernick/envelope
af4f574c04c51293b90ee2e09d0f95d12ca36d2c
[ "MIT" ]
null
null
null
from datetime import datetime from werkzeug.security import generate_password_hash from flask import Blueprint, jsonify, request from sqlalchemy.orm import joinedload from flask_login import login_required from app.models import db, User, Type from app.forms import UpdateUserForm from .auth_routes import authenticate, validation_errors_to_error_messages user_routes = Blueprint('users', __name__) @user_routes.route("/types") def types(): types = db.session.query(Type).all() return jsonify([type.name_to_id() for type in types]) @user_routes.route('/') @login_required def users(): users = db.session.query(User).all() return jsonify([user.to_dict_full() for user in users]) @user_routes.route('/<int:id>') @login_required def user(id): user = User.query.get(id) return user.to_dict() @user_routes.route('/<int:id>', methods=["DELETE"]) @login_required def user_delete(id): user = User.query.get(id) db.session.delete(user) db.session.commit() return { id: "Successfully deleted" } @user_routes.route('/<int:id>', methods=["POST"]) @login_required def user_update(id): user = User.query.options(joinedload("type")).get(id) form = UpdateUserForm() form['csrf_token'].data = request.cookies['csrf_token'] if form.validate_on_submit(): print("_______ FORM DATA",form.data) user.username=form.data['username'], user.email=form.data['email'], user.hashed_password=generate_password_hash(form.password.data), user.first_name=form.data['first_name'], user.last_name=form.data['last_name'], user.type_id=form.data['type_id'], user.updated_at=datetime.now() db.session.commit() return user.to_dict_full() return {'errors': validation_errors_to_error_messages(form.errors)} @user_routes.route("/<int:id>/clients") @login_required def admin_fetch_clients(id): authenticated = authenticate() clientUsers = db.session.query(User).filter_by(type_id=2).all() if authenticated["type_id"] != 1: return jsonify({ "errors": [ "Unauthorized" ] }) return jsonify([user.to_dict_full() for user in clientUsers])
30.013514
74
0.692031
0
0
0
0
1,804
0.812247
0
0
243
0.10941
c77e4ddc9f8fe255a8511d43e707cc1ce8c44d20
19,717
py
Python
timeflux/nodes/ml.py
OpenMindInnovation/timeflux
fd27ea6706df80fa52fb73ea3dba65e14ccd088c
[ "MIT" ]
null
null
null
timeflux/nodes/ml.py
OpenMindInnovation/timeflux
fd27ea6706df80fa52fb73ea3dba65e14ccd088c
[ "MIT" ]
null
null
null
timeflux/nodes/ml.py
OpenMindInnovation/timeflux
fd27ea6706df80fa52fb73ea3dba65e14ccd088c
[ "MIT" ]
null
null
null
"""Machine Learning""" import importlib import numpy as np import pandas as pd import json from jsonschema import validate from sklearn.pipeline import make_pipeline from timeflux.core.node import Node from timeflux.core.exceptions import ValidationError, WorkerInterrupt from timeflux.helpers.background import Task from timeflux.helpers.port import make_event, match_events, get_meta from timeflux.helpers.clock import now, min_time, max_time # Statuses IDLE = 0 ACCUMULATING = 1 FITTING = 2 READY = 3 class Pipeline(Node): """Fit, transform and predict. Training on continuous data is always unsupervised. Training on epoched data can either be supervised or unsupervised. If fit is `False`, input events are ignored, and initital training is not performed. Automatically set to False if mode is either 'fit_predict' or fit_transform'. Automatically set to True if mode is either 'predict', 'predict_proba' or 'predict_log_proba'. Attributes: i (Port): Continuous data input, expects DataFrame. i_* (Port): Epoched data input, expects DataFrame. i_training (Port): Continuous training data input, expects DataFrame. i_training_* (Port): Epoched training data input, expects DataFrame. i_events (Port): Event input, expects DataFrame. o (Port): Continuous data output, provides DataFrame. o_* (Port): Epoched data output, provides DataFrame. o_events (Port): Event output, provides DataFrame. Args: steps (dict): Pipeline steps and settings fit (bool): mode ('predict'|'predict_proba'|'predict_log_proba'|'transform'|'fit_predict'|'fit_transform'): meta_label (str|tuple|None): event_start_accumulation (str): event_stop_accumulation (str): event_start_training (str): event_reset (str): buffer_size (str): passthrough (bool): resample (bool): resample_direction ('right'|'left'|'both'): resample_rate (None|float): model: Load a pickle model - NOT IMPLEMENTED cv: Cross-validation - NOT IMPLEMENTED """ def __init__( self, steps, fit=True, mode="predict", meta_label=("epoch", "context", "target"), event_start_accumulation="accumulation_starts", event_stop_accumulation="accumulation_stops", event_start_training="training_starts", event_reset=None, buffer_size="5s", passthrough=False, resample=False, resample_direction="right", resample_rate=None, model=None, cv=None, use_task = True, ): # TODO: validation # TODO: model loading from file # TODO: cross-validation # TODO: provide more context for errors self.fit = fit self.mode = mode self.meta_label = meta_label self.event_start_accumulation = event_start_accumulation self.event_stop_accumulation = event_stop_accumulation self.event_start_training = event_start_training self.event_reset = event_reset self.passthrough = passthrough self.resample = resample self.resample_direction = resample_direction self.resample_rate = resample_rate self.use_task = use_task self._buffer_size = pd.Timedelta(buffer_size) self._make_pipeline(steps) self._reset() def update(self): # Let's get ready self._clear() # Reset if self.event_reset: matches = match_events(self.i_events, self.event_reset) if matches is not None: self.logger.debug("Reset") if self._task is not None: if self._status == FITTING: self._task.stop() self._reset() # Are we dealing with continuous data or epochs? if self._dimensions is None: port_name = "i_training" if self.fit else "i" if getattr(self, port_name).ready(): self._dimensions = 2 elif len(list(self.iterate(port_name + "_*"))) > 0: self._dimensions = 3 # Set the accumulation boundaries if self._accumulation_start is None: matches = match_events(self.i_events, self.event_start_accumulation) if matches is not None: self._accumulation_start = matches.index.values[0] self._status = ACCUMULATING self.logger.debug("Start accumulation") if self._accumulation_stop is None: matches = match_events(self.i_events, self.event_stop_accumulation) if matches is not None: self._accumulation_stop = matches.index.values[0] self.logger.debug("Stop accumulation") # Always buffer a few seconds, in case the start event is coming late if self._status == IDLE: start = (now() - self._buffer_size).to_datetime64() stop = max_time() self._accumulate(start, stop) # Accumulate between boundaries if self._status == ACCUMULATING: start = self._accumulation_start stop = self._accumulation_stop if self._accumulation_stop else max_time() self._accumulate(start, stop) # Should we start fitting the model? if self._status < FITTING: if match_events(self.i_events, self.event_start_training) is not None: self._status = FITTING self.logger.debug("Start training") if self.use_task: self._task = Task( self._pipeline, "fit", self._X_train, self._y_train ).start() else: try: self._pipeline = self._pipeline.fit(self._X_train, self._y_train) self._fitted_success = True except Exception as error: self._fitted_success = False # Is the model ready? if self._status == FITTING: ready_to_proceed = False if self.use_task: status = self._task.status() if status: ready_to_proceed = True else: ready_to_proceed = True if ready_to_proceed: if self.use_task: success = status["success"] else: success = self._fitted_success if success: if self.use_task: self._pipeline = status["instance"] self.logger.debug(f"Model fitted in {status['time']} seconds") else: self.logger.debug(f"Model fitted") self._status = READY # TODO: this can potentially be overwritten in _send() self.o_events.data = make_event("ready") else: if self.use_task: self.logger.error( f"An error occured while fitting: {status['exception'].args[0]}" ) self.logger.debug( "\nTraceback (most recent call last):\n" + "".join(status["traceback"]) ) else: self.logger.error( f"An error occured while fitting" ) raise WorkerInterrupt() # Run the pipeline if self._status == READY: self._receive() if self._X is not None: args = [self._X] if self.mode.startswith("fit"): args.append(self._y) # TODO: optionally loop through epochs instead of sending them all at once self._out = getattr(self._pipeline, self.mode)(*args) # Set output streams self._send() def terminate(self): # Kill the fit subprocess if self._task is not None: self._task.stop() def _reset(self): self._X_train = None self._y_train = None self._X_train_indices = np.array([], dtype=np.datetime64) self._accumulation_start = None self._accumulation_stop = None self._dimensions = None self._shape = () self._task = None if self.mode.startswith("fit"): self.fit = False elif self.mode.startswith("predict"): self.fit = True if self.fit: self._status = IDLE else: self._status = READY def _clear(self): self._X = None self._y = None self._X_indices = [] self._X_columns = [] self._X_meta = None self._out = None def _make_pipeline(self, steps): schema = { "type": "array", "minItems": 1, "items": { "type": "object", "properties": { "module": {"type": "string"}, "class": {"type": "string"}, "args": {"type": "object"}, }, "required": ["module", "class"], }, } try: validate(instance=steps, schema=schema) except Exception as error: raise ValidationError("steps", error.message) pipeline = [] for step in steps: try: args = step["args"] if "args" in step else {} m = importlib.import_module(step["module"]) c = getattr(m, step["class"]) i = c(**args) pipeline.append(i) except ImportError as error: raise ValidationError("steps", f"could not import '{step['module']}'") except AttributeError as error: raise ValidationError( "steps", f"could not find class '{step['class']}'" ) except TypeError as error: raise ValidationError( "steps", f"could not instantiate class '{step['class']}' with the given params", ) # TODO: memory and verbose args self._pipeline = make_pipeline(*pipeline, memory=None, verbose=False) def _accumulate(self, start, stop): # Do nothing if no fitting required if not self.fit: return # Set defaults indices = np.array([], dtype=np.datetime64) # Accumulate continuous data if self._dimensions == 2: if self.i_training.ready(): data = self.i_training.data mask = (data.index >= start) & (data.index < stop) data = data[mask] if not data.empty: if self._X_train is None: self._X_train = data.values self._shape = self._X_train.shape[1] indices = data.index.values else: if data.shape[1] == self._shape: self._X_train = np.vstack((self._X_train, data.values)) indices = data.index.values else: self.logger.warning("Invalid shape") # Accumulate epoched data if self._dimensions == 3: for _, _, port in self.iterate("i_training_*"): if port.ready(): index = port.data.index.values[0] if index >= start and index < stop: data = port.data.values label = get_meta(port, self.meta_label) if self._shape and (data.shape != self._shape): self.logger.warning("Invalid shape") continue if self.meta_label is not None and label is None: self.logger.warning("Invalid label") continue if self._X_train is None: self._X_train = np.array([data]) self._shape = self._X_train.shape[1:] else: self._X_train = np.vstack((self._X_train, [data])) indices = np.append(indices, index) if label is not None: if self._y_train is None: self._y_train = np.array([label]) else: self._y_train = np.append(self._y_train, [label]) # Store indices if indices.size != 0: self._X_train_indices = np.append(self._X_train_indices, indices) # Trim if self._X_train is not None: mask = (self._X_train_indices >= start) & (self._X_train_indices < stop) self._X_train = self._X_train[mask] self._X_train_indices = self._X_train_indices[mask] if self._y_train is not None: self._y_train = self._y_train[mask] def _receive(self): # Continuous data if self._dimensions == 2: if self.i.ready(): if not self._X_columns: self._X_columns = list(self.i.data.columns) if self._shape and (self.i.data.shape[1] != self._shape): self.logger.warning("Invalid shape") else: self._X = self.i.data.values self._X_indices = self.i.data.index.values self._X_meta = self.i.meta # Epochs if self._dimensions == 3: for name, _, port in self.iterate("i_*"): if port.ready() and "training" not in name and "events" not in name: data = port.data.values meta = port.meta indices = port.data.index.values label = get_meta(port, self.meta_label) if not self._X_columns: self._X_columns = list(port.data.columns) if self._shape and (data.shape != self._shape): self.logger.warning("Invalid shape") continue if not self.fit and self.meta_label is not None and label is None: self.logger.warning("Invalid label") continue if self._X is None: self._X = [] if self._y is None and label is not None: self._y = [] if self._X_meta is None: self._X_meta = [] self._X.append(data) self._X_indices.append(indices) self._X_meta.append(meta) if label is not None: self._y.append(label) def _send(self): # Passthrough if self._status < READY and self.passthrough: inputs = [] for _, suffix, port in self.iterate("i*"): if not suffix.startswith("_training") and not suffix.startswith( "_events" ): inputs.append((suffix, port)) for suffix, src_port in inputs: dst_port = getattr(self, "o" + suffix) dst_port.data = src_port.data dst_port.meta = src_port.meta # Model if self._out is not None: if "predict" in self.mode: # Send events if len(self._X_indices) == len(self._out): # TODO: skip JSON serialization? data = [ [self.mode, json.dumps({"result": self._np_to_native(result)})] for result in self._out ] times = ( self._X_indices if self._dimensions == 2 else np.asarray(self._X_indices)[:, 0] ) # Keep the first timestamp of each epoch names = ["label", "data"] meta = ( self._X_meta if self._dimensions == 2 else {"epochs": self._X_meta} ) # port.meta should always be an object self.o_events.set(data, times, names, meta) else: self.logger.warning( "Number of predictions inconsistent with input length" ) else: # Send data if self._dimensions == 2: try: self.o.data = self._reindex( self._out, self._X_indices, self._X_columns ) self.o.meta = self._X_meta except Exception as e: self.logger.warning(getattr(e, "message", repr(e))) if self._dimensions == 3: if len(self._X_indices) == len(self._out): for i, (data, times) in enumerate( zip(self._out, self._X_indices) ): try: getattr(self, "o_" + str(i)).data = self._reindex( data, times, self._X_columns ) getattr(self, "o_" + str(i)).meta = self._X_meta[i] except Exception as e: self.logger.warning(getattr(e, "message", repr(e))) else: self.logger.warning( "Number of transforms inconsistent with number of epochs" ) def _np_to_native(self, data): """Convert numpy scalars and objects to native types.""" return getattr(data, "tolist", lambda: data)() def _reindex(self, data, times, columns): if len(data) != len(times): if self.resample: # Resample at a specific frequency kwargs = {"periods": len(data)} if self.resample_rate is None: kwargs["freq"] = pd.infer_freq(times) kwargs["freq"] = pd.tseries.frequencies.to_offset(kwargs["freq"]) else: kwargs["freq"] = pd.DateOffset(seconds=1 / self.resample_rate) if self.resample_direction == "right": kwargs["start"] = times[0] elif self.resample_direction == "left": kwargs["end"] = times[-1] else: def middle(a): return int(np.ceil(len(a) / 2)) - 1 kwargs["start"] = times[middle(times)] - ( middle(data) * kwargs["freq"] ) times = pd.date_range(**kwargs) else: # Linearly arange between first and last times = pd.date_range(start=times[0], end=times[-1], periods=len(data)) return pd.DataFrame(data, times, columns)
39.121032
103
0.502054
19,208
0.974185
0
0
0
0
0
0
3,857
0.195618
c780e591cbad3129663e73ce7d7f50fa3fb44f8f
3,675
py
Python
cms/migrations/0006_auto_20170122_1545.py
josemlp91/django-landingcms
9d9270204369e9663ff15eb0bd4c4093b3727c6c
[ "Apache-2.0" ]
null
null
null
cms/migrations/0006_auto_20170122_1545.py
josemlp91/django-landingcms
9d9270204369e9663ff15eb0bd4c4093b3727c6c
[ "Apache-2.0" ]
null
null
null
cms/migrations/0006_auto_20170122_1545.py
josemlp91/django-landingcms
9d9270204369e9663ff15eb0bd4c4093b3727c6c
[ "Apache-2.0" ]
null
null
null
# -*- coding: utf-8 -*- # Generated by Django 1.10.5 on 2017-01-22 15:45 from __future__ import unicode_literals from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('content', '0002_auto_20170122_1509'), ('cms', '0005_auto_20170122_1534'), ] operations = [ migrations.AddField( model_name='paginahome', name='posts1_imagen', field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='posts1_imagen', to='content.ImageContent'), ), migrations.AddField( model_name='paginahome', name='posts1_texto', field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='posts1_texto', to='content.TextContent'), ), migrations.AddField( model_name='paginahome', name='posts1_titulo', field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='posts1_titulo', to='content.TitleContent'), ), migrations.AddField( model_name='paginahome', name='posts2_imagen', field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='posts2_imagen', to='content.ImageContent'), ), migrations.AddField( model_name='paginahome', name='posts2_texto', field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='posts2_texto', to='content.TextContent'), ), migrations.AddField( model_name='paginahome', name='posts2_titulo', field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='posts2_titulo', to='content.TitleContent'), ), migrations.AddField( model_name='paginahome', name='posts3_imagen', field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='posts3_imagen', to='content.ImageContent'), ), migrations.AddField( model_name='paginahome', name='posts3_texto', field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='posts3_texto', to='content.TextContent'), ), migrations.AddField( model_name='paginahome', name='posts3_titulo', field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='posts3_titulo', to='content.TitleContent'), ), migrations.AddField( model_name='paginahome', name='posts4_imagen', field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='posts4_imagen', to='content.ImageContent'), ), migrations.AddField( model_name='paginahome', name='posts4_texto', field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='posts4_texto', to='content.TextContent'), ), migrations.AddField( model_name='paginahome', name='posts4_titulo', field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='posts4_titulo', to='content.TitleContent'), ), ]
47.115385
164
0.653605
3,484
0.948027
0
0
0
0
0
0
891
0.242449
c781463cac684dcc8d5bd7e224347018ce45563c
3,641
py
Python
1-lab-lambdaDynamoDB/source/cdk/app.py
donnieprakoso/workshop-buildingRESTAPIwithAWS
b3287d5749b65648710dde4e736ba55b73371c6b
[ "Apache-2.0" ]
23
2021-04-24T06:32:58.000Z
2022-03-27T11:04:57.000Z
1-lab-lambdaDynamoDB/source/cdk/app.py
ivandi1980/workshop-restAPI
b3287d5749b65648710dde4e736ba55b73371c6b
[ "Apache-2.0" ]
null
null
null
1-lab-lambdaDynamoDB/source/cdk/app.py
ivandi1980/workshop-restAPI
b3287d5749b65648710dde4e736ba55b73371c6b
[ "Apache-2.0" ]
5
2021-04-24T12:10:02.000Z
2021-11-18T13:34:33.000Z
#!/usr/bin/env python3 from aws_cdk import aws_iam as _iam from aws_cdk import aws_lambda as _lambda from aws_cdk import aws_dynamodb as _ddb from aws_cdk import core class CdkStack(core.Stack): def __init__(self, scope: core.Construct, id: str, stack_prefix:str, **kwargs) -> None: super().__init__(scope, id, **kwargs) # Model all required resources ddb_table = _ddb.Table( self, id='{}-data'.format(stack_prefix), table_name='{}-data'.format(stack_prefix), partition_key=_ddb.Attribute(name='ID', type=_ddb.AttributeType.STRING), removal_policy=core.RemovalPolicy.DESTROY, # THIS IS NOT RECOMMENDED FOR PRODUCTION USE read_capacity=1, write_capacity=1) ## IAM Roles lambda_role = _iam.Role( self, id='{}-lambda-role'.format(stack_prefix), assumed_by=_iam.ServicePrincipal('lambda.amazonaws.com')) cw_policy_statement = _iam.PolicyStatement(effect=_iam.Effect.ALLOW) cw_policy_statement.add_actions("logs:CreateLogGroup") cw_policy_statement.add_actions("logs:CreateLogStream") cw_policy_statement.add_actions("logs:PutLogEvents") cw_policy_statement.add_actions("logs:DescribeLogStreams") cw_policy_statement.add_resources("*") lambda_role.add_to_policy(cw_policy_statement) # Add role for DynamoDB dynamodb_policy_statement = _iam.PolicyStatement( effect=_iam.Effect.ALLOW) dynamodb_policy_statement.add_actions("dynamodb:PutItem") dynamodb_policy_statement.add_actions("dynamodb:GetItem") dynamodb_policy_statement.add_actions("dynamodb:Scan") dynamodb_policy_statement.add_actions("dynamodb:Query") dynamodb_policy_statement.add_actions("dynamodb:ConditionCheckItem") dynamodb_policy_statement.add_resources(ddb_table.table_arn) lambda_role.add_to_policy(dynamodb_policy_statement) ## AWS Lambda Functions fnLambda_storeData = _lambda.Function( self, "{}-function-storeData".format(stack_prefix), code=_lambda.AssetCode("../lambda-functions/store-data"), handler="app.handler", timeout=core.Duration.seconds(60), role=lambda_role, runtime=_lambda.Runtime.PYTHON_3_8) fnLambda_storeData.add_environment("TABLE_NAME", ddb_table.table_name) fnLambda_listData = _lambda.Function( self, "{}-function-getData".format(stack_prefix), code=_lambda.AssetCode("../lambda-functions/list-data"), handler="app.handler", role=lambda_role, timeout=core.Duration.seconds(60), runtime=_lambda.Runtime.PYTHON_3_8) fnLambda_listData.add_environment("TABLE_NAME", ddb_table.table_name) core.CfnOutput(self, "{}-output-dynamodbTable".format(stack_prefix), value=ddb_table.table_name, export_name="{}-ddbTable".format(stack_prefix)) core.CfnOutput(self, "{}-output-lambdaStoreData".format(stack_prefix), value=fnLambda_storeData.function_name, export_name="{}-lambdaStoreDataName".format(stack_prefix)) core.CfnOutput(self, "{}-output-lambdaListData".format(stack_prefix), value=fnLambda_listData.function_name, export_name="{}-lambdaListDataName".format(stack_prefix)) stack_prefix='restAPI-lab1-lambdaDynamoDB' app = core.App() stack = CdkStack(app, stack_prefix, stack_prefix=stack_prefix) core.Tags.of(stack).add('Name',stack_prefix) app.synth()
44.950617
177
0.682505
3,288
0.903049
0
0
0
0
0
0
730
0.200494
c7821ff30782af7bc27dc24920e0c07f5856c1a5
326
py
Python
module_6_lets_make_a_web_app/webapp/yield.py
JCarlos831/python_getting_started_-pluralsight-
5059a1019c46eb8174fc86989fab7cc0c4caffd4
[ "MIT" ]
null
null
null
module_6_lets_make_a_web_app/webapp/yield.py
JCarlos831/python_getting_started_-pluralsight-
5059a1019c46eb8174fc86989fab7cc0c4caffd4
[ "MIT" ]
null
null
null
module_6_lets_make_a_web_app/webapp/yield.py
JCarlos831/python_getting_started_-pluralsight-
5059a1019c46eb8174fc86989fab7cc0c4caffd4
[ "MIT" ]
null
null
null
students = [] def read_file(): try: f = open("students.txt", "r") for student in read_students(f): students.append(student) f.close() except Exception: print("Could not read file") def read_students(f): for line in f: yield line read_file() print(students)
16.3
40
0.57362
0
0
59
0.180982
0
0
0
0
38
0.116564
c782a4a5ddbb4061270df891d7584a13d55d2191
6,325
py
Python
paul_analysis/Python/labird/gamma.py
lzkelley/arepo-mbh-sims_analysis
f14519552cedd39a040b53e6d7cc538b5b8f38a3
[ "MIT" ]
null
null
null
paul_analysis/Python/labird/gamma.py
lzkelley/arepo-mbh-sims_analysis
f14519552cedd39a040b53e6d7cc538b5b8f38a3
[ "MIT" ]
null
null
null
paul_analysis/Python/labird/gamma.py
lzkelley/arepo-mbh-sims_analysis
f14519552cedd39a040b53e6d7cc538b5b8f38a3
[ "MIT" ]
null
null
null
"""Module for finding an effective equation of state for in the Lyman-alpha forest from a snapshot. Ported to python from Matteo Viel's IDL script.""" import h5py import math import numpy as np def read_gamma(num,base): """Reads in an HDF5 snapshot from the NE gadget version, fits a power law to the equation of state for low density, low temperature gas. Inputs: num - snapshot number base - Snapshot directory Outputs: (T_0, \gamma) - Effective equation of state parameters """ # Baryon density parameter omegab0 = 0.0449 singlefile=False #base="/home/spb41/data2/runs/bf2/" snap=str(num).rjust(3,'0') fname=base+"/snapdir_"+snap+"/snap_"+snap try: f=h5py.File(fname+".0.hdf5",'r') except IOError: fname=base+"/snap_"+snap f=h5py.File(fname+".hdf5",'r') singlefile=True print 'Reading file from:',fname head=f["Header"].attrs npart=head["NumPart_ThisFile"] redshift=head["Redshift"] print "z=",redshift atime=head["Time"] h100=head["HubbleParam"] if npart[0] == 0 : print "No gas particles!\n" return f.close() # Scaling factors and constants Xh = 0.76 # Hydrogen fraction G = 6.672e-11 # N m^2 kg^-2 kB = 1.3806e-23 # J K^-1 Mpc = 3.0856e22 # m kpc = 3.0856e19 # m Msun = 1.989e30 # kg mH = 1.672e-27 # kg H0 = 1.e5/Mpc # 100 km s^-1 Mpc^-1 in SI units gamma = 5.0/3.0 rscale = (kpc * atime)/h100 # convert length to m #vscale = atime**0.5 # convert velocity to km s^-1 mscale = (1e10 * Msun)/h100 # convert mass to kg dscale = mscale / (rscale**3.0) # convert density to kg m^-3 escale = 1e6 # convert energy/unit mass to J kg^-1 N = 0 sx = 0 sy = 0 sxx = 0 sxy = 0 met = 0 carb = 0 oxy = 0 totmass=0 totigmmass=0 totmet = 0 sxxm = 0 sxym = 0 sxm = 0 sym = 0 for i in np.arange(0,500) : ffname=fname+"."+str(i)+".hdf5" if singlefile: ffname=fname+".hdf5" if i > 0: break #print 'Reading file ',ffname try: f=h5py.File(ffname,'r') except IOError: break head=f["Header"].attrs npart=head["NumPart_ThisFile"] if npart[0] == 0 : print "No gas particles in file ",i,"!\n" break bar = f["PartType0"] u=np.array(bar['InternalEnergy'],dtype=np.float64) rho=np.array(bar['Density'],dtype=np.float64) nelec=np.array(bar['ElectronAbundance'],dtype=np.float64) metalic = np.array(bar['GFM_Metallicity'],dtype=np.float64) metals = np.array(bar['GFM_Metals'],dtype=np.float64) mass = np.array(bar['Masses'], dtype=np.float64) #nH0=np.array(bar['NeutralHydrogenAbundance']) f.close() # Convert to physical SI units. Only energy and density considered here. rho *= dscale # kg m^-3, ,physical u *= escale # J kg^-1 ## Mean molecular weight mu = 1.0 / ((Xh * (0.75 + nelec)) + 0.25) #temp = mu/kB * (gamma-1) * u * mH #templog = alog10(temp) templog=np.log10(mu/kB * (gamma-1) * u * mH) ##### Critical matter/energy density at z=0.0 rhoc = 3 * (H0*h100)**2 / (8. * math.pi * G) # kg m^-3 ##### Mean hydrogen density of the Universe nHc = rhoc /mH * omegab0 *Xh * (1.+redshift)**3.0 ##### Physical hydrogen number density #nH = rho * Xh / mH ### Hydrogen density as a fraction of the mean hydrogen density overden = np.log10(rho*Xh/mH / nHc) ### Calculates average/median temperature in a given overdensity range# #overden = rho/(rhoc *omegab) #ind = where(overden ge -0.01 and overden le 0.01) #avgT0 = mean(temp(ind)) #medT0 = median(temp(ind)) #loT0 = min(temp(ind)) #hiT0 = max(temp(ind)) # #avgnH1 = mean(nH0(ind)) #mednH1 = median(nH0(ind)) #lonH1 = min(nH0(ind)) #hinH1 = max(nH0(ind)) # #print,'' #print,'Temperature (K) at mean cosmic density' #print,'Average temperature [K,log]:',avgT0,alog10(avgT0) #print,'Median temperature [K,log]:',medT0,alog10(medT0) #print,'Maximum temperature [K,log]:',hiT0,alog10(hiT0) #print,'Minimum temperature [K,log]:',loT0,alog10(loT0) # #print #print,'nH1/nH at mean cosmic density' #print,'Mean log H1 abundance [nH1/nH,log]:',avgnH1,alog10(avgnH1) #print,'Median log H1 abundance [nH1/nH,log]:',mednH1,alog10(mednH1) #print,'Maximum log H1 abundance [nH1/nH,log]:',hinH1,alog10(hinH1) #print,'Minimum log H1 abundance [nH1/nH,log]:',lonH1,alog10(lonH1) #print # ind2 = np.where((overden > 0) * (overden < 1.5) ) tempfit = templog[ind2] overdenfit = overden[ind2] N += np.size(ind2) #print, "Number of fitting points for equation of state", N indm = np.where(metals < 1e-10) metals[indm] = 1e-10 sx += np.sum(overdenfit) sy += np.sum(tempfit) sxx += np.sum(overdenfit*overdenfit) sxy += np.sum(overdenfit*tempfit) met += np.sum(mass[ind2]*metalic[ind2]) carb += np.sum(mass[ind2]*metals[ind2,2]) oxy += np.sum(mass[ind2]*metals[ind2,4]) totmet += np.sum(mass*metalic) totmass += np.sum(mass) totigmmass += np.sum(mass[ind2]) sym += np.sum(np.log10(metals[ind2,2])) sxym += np.sum(overdenfit*np.log10(metals[ind2,2])) # log T = log(T_0) + (gamma-1) log(rho/rho_0) # and use least squares fit. delta = (N*sxx)-(sx*sx) a = ((sxx*sy) - (sx*sxy))/delta b = ((N*sxy) - (sx*sy))/delta amet = ((sxx*sym) - (sx*sxym))/delta bmet = ((N*sxym) - (sx*sym))/delta print num,": gamma", b+1.0," log(T0)", a," T0 (K)", (10.0)**a, "Metallicity: ", met/totigmmass,totmet/totmass, "[C/H,O/H]: ",carb/totigmmass, oxy/totigmmass,"(a_Z, b_Z): ",10**amet, bmet raise Exception return (redshift,10.0**a, b+1)
32.772021
192
0.552727
0
0
0
0
0
0
0
0
2,734
0.432253
c782edc67d9a2546d01dc48945d663005d17b20d
10,490
py
Python
evogym/envs/change_shape.py
federico-camerota/evogym
fb3a792f93a61be15c9715a036da3721f99d2d42
[ "MIT" ]
78
2022-01-30T18:59:39.000Z
2022-03-31T00:26:41.000Z
evogym/envs/change_shape.py
Yuxing-Wang-THU/evogym
da3a0508fa6c5d3fcf589194778844b15a387ece
[ "MIT" ]
6
2022-01-31T02:37:49.000Z
2022-03-30T18:52:13.000Z
evogym/envs/change_shape.py
Yuxing-Wang-THU/evogym
da3a0508fa6c5d3fcf589194778844b15a387ece
[ "MIT" ]
6
2022-01-31T08:11:33.000Z
2022-02-22T01:49:50.000Z
import gym from gym import error, spaces from gym import utils from gym.utils import seeding from evogym import * from evogym.envs import BenchmarkBase import random from math import * import numpy as np import os class ShapeBase(BenchmarkBase): def __init__(self, world): super().__init__(world) def reset(self): super().reset() # observation obs = np.concatenate(( self.get_relative_pos_obs("robot"), )) return obs ### ---------------------------------------------------------------------- # This section of code is modified from the following author # from https://github.com/RodolfoFerro/ConvexHull # Author: Rodolfo Ferro # Mail: [email protected] # Script: Compute the Convex Hull of a set of points using the Graham Scan # Function to know if we have a CCW turn def CCW(self, p1, p2, p3): if (p3[1]-p1[1])*(p2[0]-p1[0]) >= (p2[1]-p1[1])*(p3[0]-p1[0]): return True return False # Main function: def jarvis_march(self, S): n = len(S) P = [None] * n l = np.where(S[:,0] == np.min(S[:,0])) pointOnHull = S[l[0][0]] i = 0 while True: P[i] = pointOnHull endpoint = S[0] for j in range(1,n): if (endpoint[0] == pointOnHull[0] and endpoint[1] == pointOnHull[1]) or not self.CCW(S[j],P[i],endpoint): endpoint = S[j] i = i + 1 pointOnHull = endpoint if endpoint[0] == P[0][0] and endpoint[1] == P[0][1]: break for i in range(n): if P[-1] is None: del P[-1] return np.array(P) ### ---------------------------------------------------------------------- def convex_poly_area(self, pts_cw): area = 0 for i in range(len(pts_cw)): i_1 = i + 1 if i_1 >= len(pts_cw): i_1 = 0 area += (pts_cw[i,0] * pts_cw[i_1,1] - pts_cw[i_1,0] * pts_cw[i,1]) return 0.5 * area class MaximizeShape(ShapeBase): def __init__(self, body, connections=None): # make world self.world = EvoWorld.from_json(os.path.join(self.DATA_PATH, 'ShapeChange.json')) self.world.add_from_array('robot', body, 7, 1, connections=connections) # init sim ShapeBase.__init__(self, self.world) # set action space and observation space num_actuators = self.get_actuator_indices('robot').size num_robot_points = self.object_pos_at_time(self.get_time(), "robot").size self.action_space = spaces.Box(low= 0.6, high=1.6, shape=(num_actuators,), dtype=np.float) self.observation_space = spaces.Box(low=-100.0, high=100.0, shape=(num_robot_points,), dtype=np.float) def step(self, action): # collect pre step information robot_pos_init = self.object_pos_at_time(self.get_time(), "robot") # step done = super().step({'robot': action}) # collect post step information robot_pos_final = self.object_pos_at_time(self.get_time(), "robot") # observation obs = np.concatenate(( self.get_relative_pos_obs("robot"), )) # compute reward reward = self.get_reward(robot_pos_init, robot_pos_final) # error check unstable simulation if done: print("SIMULATION UNSTABLE... TERMINATING") reward -= 3.0 # observation, reward, has simulation met termination conditions, debugging info return obs, reward, done, {} def get_reward(self, robot_pos_init, robot_pos_final): # find convex hull of initial state convex_hull_init = self.jarvis_march(np.transpose(robot_pos_init)) area_init = self.convex_poly_area(convex_hull_init) # find convex of final state convex_hull_final = self.jarvis_march(np.transpose(robot_pos_final)) area_final = self.convex_poly_area(convex_hull_final) reward = (area_final - area_init) * 10 return reward class MinimizeShape(ShapeBase): def __init__(self, body, connections=None): # make world self.world = EvoWorld.from_json(os.path.join(self.DATA_PATH, 'ShapeChange.json')) self.world.add_from_array('robot', body, 7, 1, connections=connections) # init sim ShapeBase.__init__(self, self.world) # set action space and observation space num_actuators = self.get_actuator_indices('robot').size num_robot_points = self.object_pos_at_time(self.get_time(), "robot").size self.action_space = spaces.Box(low= 0.6, high=1.6, shape=(num_actuators,), dtype=np.float) self.observation_space = spaces.Box(low=-100.0, high=100.0, shape=(num_robot_points,), dtype=np.float) def step(self, action): # collect pre step information robot_pos_init = self.object_pos_at_time(self.get_time(), "robot") # step done = super().step({'robot': action}) # collect post step information robot_pos_final = self.object_pos_at_time(self.get_time(), "robot") # observation obs = np.concatenate(( self.get_relative_pos_obs("robot"), )) # compute reward reward = self.get_reward(robot_pos_init, robot_pos_final) # error check unstable simulation if done: print("SIMULATION UNSTABLE... TERMINATING") reward -= 3.0 # observation, reward, has simulation met termination conditions, debugging info return obs, reward, done, {} def get_reward(self, robot_pos_init, robot_pos_final): # find convex hull of initial state convex_hull_init = self.jarvis_march(np.transpose(robot_pos_init)) area_init = self.convex_poly_area(convex_hull_init) # find convex of final state convex_hull_final = self.jarvis_march(np.transpose(robot_pos_final)) area_final = self.convex_poly_area(convex_hull_final) reward = (area_init - area_final) * 10 return reward class MaximizeXShape(ShapeBase): def __init__(self, body, connections=None): # make world self.world = EvoWorld.from_json(os.path.join(self.DATA_PATH, 'ShapeChange.json')) self.world.add_from_array('robot', body, 7, 1, connections=connections) # init sim ShapeBase.__init__(self, self.world) # set action space and observation space num_actuators = self.get_actuator_indices('robot').size num_robot_points = self.object_pos_at_time(self.get_time(), "robot").size self.action_space = spaces.Box(low= 0.6, high=1.6, shape=(num_actuators,), dtype=np.float) self.observation_space = spaces.Box(low=-100.0, high=100.0, shape=(num_robot_points,), dtype=np.float) def step(self, action): # collect pre step information robot_pos_init = self.object_pos_at_time(self.get_time(), "robot") # step done = super().step({'robot': action}) # collect post step information robot_pos_final = self.object_pos_at_time(self.get_time(), "robot") # observation obs = np.concatenate(( self.get_relative_pos_obs("robot"), )) # compute reward reward = self.get_reward(robot_pos_init, robot_pos_final) # error check unstable simulation if done: print("SIMULATION UNSTABLE... TERMINATING") reward -= 3.0 # observation, reward, has simulation met termination conditions, debugging info return obs, reward, done, {} def get_reward(self, robot_pos_init, robot_pos_final): robot_min_pos_init = np.min(robot_pos_init, axis=1) robot_max_pos_init = np.max(robot_pos_init, axis=1) robot_min_pos_final = np.min(robot_pos_final, axis=1) robot_max_pos_final = np.max(robot_pos_final, axis=1) span_final = (robot_max_pos_final[0] - robot_min_pos_final[0]) span_initial = (robot_max_pos_init[0] - robot_min_pos_init[0]) reward = (span_final - span_initial) return reward class MaximizeYShape(ShapeBase): def __init__(self, body, connections=None): # make world self.world = EvoWorld.from_json(os.path.join(self.DATA_PATH, 'ShapeChange.json')) self.world.add_from_array('robot', body, 7, 1, connections=connections) # init sim ShapeBase.__init__(self, self.world) # set action space and observation space num_actuators = self.get_actuator_indices('robot').size num_robot_points = self.object_pos_at_time(self.get_time(), "robot").size self.action_space = spaces.Box(low= 0.6, high=1.6, shape=(num_actuators,), dtype=np.float) self.observation_space = spaces.Box(low=-100.0, high=100.0, shape=(num_robot_points,), dtype=np.float) def step(self, action): # collect pre step information robot_pos_init = self.object_pos_at_time(self.get_time(), "robot") # step done = super().step({'robot': action}) # collect post step information robot_pos_final = self.object_pos_at_time(self.get_time(), "robot") # observation obs = np.concatenate(( self.get_relative_pos_obs("robot"), )) # compute reward reward = self.get_reward(robot_pos_init, robot_pos_final) # error check unstable simulation if done: print("SIMULATION UNSTABLE... TERMINATING") reward -= 3.0 # observation, reward, has simulation met termination conditions, debugging info return obs, reward, done, {} def get_reward(self, robot_pos_init, robot_pos_final): robot_min_pos_init = np.min(robot_pos_init, axis=1) robot_max_pos_init = np.max(robot_pos_init, axis=1) robot_min_pos_final = np.min(robot_pos_final, axis=1) robot_max_pos_final = np.max(robot_pos_final, axis=1) span_final = (robot_max_pos_final[1] - robot_min_pos_final[1]) span_initial = (robot_max_pos_init[1] - robot_min_pos_init[1]) reward = (span_final - span_initial) return reward
33.196203
121
0.6102
10,262
0.978265
0
0
0
0
0
0
2,078
0.198093
c7852c56539dc442622c1969bd3081ad523df76c
29,214
py
Python
pybind/slxos/v16r_1_00b/mpls_state/ldp/fec/ldp_fec_prefixes/__init__.py
shivharis/pybind
4e1c6d54b9fd722ccec25546ba2413d79ce337e6
[ "Apache-2.0" ]
null
null
null
pybind/slxos/v16r_1_00b/mpls_state/ldp/fec/ldp_fec_prefixes/__init__.py
shivharis/pybind
4e1c6d54b9fd722ccec25546ba2413d79ce337e6
[ "Apache-2.0" ]
null
null
null
pybind/slxos/v16r_1_00b/mpls_state/ldp/fec/ldp_fec_prefixes/__init__.py
shivharis/pybind
4e1c6d54b9fd722ccec25546ba2413d79ce337e6
[ "Apache-2.0" ]
1
2021-11-05T22:15:42.000Z
2021-11-05T22:15:42.000Z
from operator import attrgetter import pyangbind.lib.xpathhelper as xpathhelper from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType from pyangbind.lib.base import PybindBase from decimal import Decimal from bitarray import bitarray import __builtin__ import prefix import key class ldp_fec_prefixes(PybindBase): """ This class was auto-generated by the PythonClass plugin for PYANG from YANG module brocade-mpls-operational - based on the path /mpls-state/ldp/fec/ldp-fec-prefixes. Each member element of the container is represented as a class variable - with a specific YANG type. YANG Description: """ __slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_rest_name', '_extmethods', '__tot_no_of_prefix_fec','__tot_no_of_prefix_fec_installed','__tot_no_of_prefix_fec_filtered','__tot_no_of_prefix_fec_lwd','__filtered','__prefix_filtered','__prefix','__key',) _yang_name = 'ldp-fec-prefixes' _rest_name = 'ldp-fec-prefixes' _pybind_generated_by = 'container' def __init__(self, *args, **kwargs): path_helper_ = kwargs.pop("path_helper", None) if path_helper_ is False: self._path_helper = False elif path_helper_ is not None and isinstance(path_helper_, xpathhelper.YANGPathHelper): self._path_helper = path_helper_ elif hasattr(self, "_parent"): path_helper_ = getattr(self._parent, "_path_helper", False) self._path_helper = path_helper_ else: self._path_helper = False extmethods = kwargs.pop("extmethods", None) if extmethods is False: self._extmethods = False elif extmethods is not None and isinstance(extmethods, dict): self._extmethods = extmethods elif hasattr(self, "_parent"): extmethods = getattr(self._parent, "_extmethods", None) self._extmethods = extmethods else: self._extmethods = False self.__prefix_filtered = YANGDynClass(base=unicode, is_leaf=True, yang_name="prefix-filtered", rest_name="prefix-filtered", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='string', is_config=False) self.__tot_no_of_prefix_fec_installed = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="tot-no-of-prefix-fec-installed", rest_name="tot-no-of-prefix-fec-installed", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False) self.__tot_no_of_prefix_fec = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="tot-no-of-prefix-fec", rest_name="tot-no-of-prefix-fec", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False) self.__prefix = YANGDynClass(base=YANGListType("destination",prefix.prefix, yang_name="prefix", rest_name="prefix", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='destination', extensions={u'tailf-common': {u'callpoint': u'mpls-ldp-fec-prefix', u'cli-suppress-show-path': None}}), is_container='list', yang_name="prefix", rest_name="prefix", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-ldp-fec-prefix', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='list', is_config=False) self.__tot_no_of_prefix_fec_lwd = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="tot-no-of-prefix-fec-lwd", rest_name="tot-no-of-prefix-fec-lwd", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False) self.__key = YANGDynClass(base=key.key, is_container='container', presence=False, yang_name="key", rest_name="key", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-ldp-fec-key-key-1'}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='container', is_config=False) self.__filtered = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'filtered': {'value': 1}, u'filtered-in': {'value': 2}, u'filtered-out': {'value': 3}},), is_leaf=True, yang_name="filtered", rest_name="filtered", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='fec-filter-type', is_config=False) self.__tot_no_of_prefix_fec_filtered = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="tot-no-of-prefix-fec-filtered", rest_name="tot-no-of-prefix-fec-filtered", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False) load = kwargs.pop("load", None) if args: if len(args) > 1: raise TypeError("cannot create a YANG container with >1 argument") all_attr = True for e in self._pyangbind_elements: if not hasattr(args[0], e): all_attr = False break if not all_attr: raise ValueError("Supplied object did not have the correct attributes") for e in self._pyangbind_elements: nobj = getattr(args[0], e) if nobj._changed() is False: continue setmethod = getattr(self, "_set_%s" % e) if load is None: setmethod(getattr(args[0], e)) else: setmethod(getattr(args[0], e), load=load) def _path(self): if hasattr(self, "_parent"): return self._parent._path()+[self._yang_name] else: return [u'mpls-state', u'ldp', u'fec', u'ldp-fec-prefixes'] def _rest_path(self): if hasattr(self, "_parent"): if self._rest_name: return self._parent._rest_path()+[self._rest_name] else: return self._parent._rest_path() else: return [u'mpls-state', u'ldp', u'fec', u'ldp-fec-prefixes'] def _get_tot_no_of_prefix_fec(self): """ Getter method for tot_no_of_prefix_fec, mapped from YANG variable /mpls_state/ldp/fec/ldp_fec_prefixes/tot_no_of_prefix_fec (uint32) YANG Description: tot_no_of_prefix_fec """ return self.__tot_no_of_prefix_fec def _set_tot_no_of_prefix_fec(self, v, load=False): """ Setter method for tot_no_of_prefix_fec, mapped from YANG variable /mpls_state/ldp/fec/ldp_fec_prefixes/tot_no_of_prefix_fec (uint32) If this variable is read-only (config: false) in the source YANG file, then _set_tot_no_of_prefix_fec is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_tot_no_of_prefix_fec() directly. YANG Description: tot_no_of_prefix_fec """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="tot-no-of-prefix-fec", rest_name="tot-no-of-prefix-fec", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False) except (TypeError, ValueError): raise ValueError({ 'error-string': """tot_no_of_prefix_fec must be of a type compatible with uint32""", 'defined-type': "uint32", 'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="tot-no-of-prefix-fec", rest_name="tot-no-of-prefix-fec", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)""", }) self.__tot_no_of_prefix_fec = t if hasattr(self, '_set'): self._set() def _unset_tot_no_of_prefix_fec(self): self.__tot_no_of_prefix_fec = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="tot-no-of-prefix-fec", rest_name="tot-no-of-prefix-fec", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False) def _get_tot_no_of_prefix_fec_installed(self): """ Getter method for tot_no_of_prefix_fec_installed, mapped from YANG variable /mpls_state/ldp/fec/ldp_fec_prefixes/tot_no_of_prefix_fec_installed (uint32) YANG Description: tot_no_of_prefix_fec_installed """ return self.__tot_no_of_prefix_fec_installed def _set_tot_no_of_prefix_fec_installed(self, v, load=False): """ Setter method for tot_no_of_prefix_fec_installed, mapped from YANG variable /mpls_state/ldp/fec/ldp_fec_prefixes/tot_no_of_prefix_fec_installed (uint32) If this variable is read-only (config: false) in the source YANG file, then _set_tot_no_of_prefix_fec_installed is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_tot_no_of_prefix_fec_installed() directly. YANG Description: tot_no_of_prefix_fec_installed """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="tot-no-of-prefix-fec-installed", rest_name="tot-no-of-prefix-fec-installed", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False) except (TypeError, ValueError): raise ValueError({ 'error-string': """tot_no_of_prefix_fec_installed must be of a type compatible with uint32""", 'defined-type': "uint32", 'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="tot-no-of-prefix-fec-installed", rest_name="tot-no-of-prefix-fec-installed", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)""", }) self.__tot_no_of_prefix_fec_installed = t if hasattr(self, '_set'): self._set() def _unset_tot_no_of_prefix_fec_installed(self): self.__tot_no_of_prefix_fec_installed = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="tot-no-of-prefix-fec-installed", rest_name="tot-no-of-prefix-fec-installed", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False) def _get_tot_no_of_prefix_fec_filtered(self): """ Getter method for tot_no_of_prefix_fec_filtered, mapped from YANG variable /mpls_state/ldp/fec/ldp_fec_prefixes/tot_no_of_prefix_fec_filtered (uint32) YANG Description: tot_no_of_prefix_fec_filtered """ return self.__tot_no_of_prefix_fec_filtered def _set_tot_no_of_prefix_fec_filtered(self, v, load=False): """ Setter method for tot_no_of_prefix_fec_filtered, mapped from YANG variable /mpls_state/ldp/fec/ldp_fec_prefixes/tot_no_of_prefix_fec_filtered (uint32) If this variable is read-only (config: false) in the source YANG file, then _set_tot_no_of_prefix_fec_filtered is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_tot_no_of_prefix_fec_filtered() directly. YANG Description: tot_no_of_prefix_fec_filtered """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="tot-no-of-prefix-fec-filtered", rest_name="tot-no-of-prefix-fec-filtered", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False) except (TypeError, ValueError): raise ValueError({ 'error-string': """tot_no_of_prefix_fec_filtered must be of a type compatible with uint32""", 'defined-type': "uint32", 'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="tot-no-of-prefix-fec-filtered", rest_name="tot-no-of-prefix-fec-filtered", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)""", }) self.__tot_no_of_prefix_fec_filtered = t if hasattr(self, '_set'): self._set() def _unset_tot_no_of_prefix_fec_filtered(self): self.__tot_no_of_prefix_fec_filtered = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="tot-no-of-prefix-fec-filtered", rest_name="tot-no-of-prefix-fec-filtered", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False) def _get_tot_no_of_prefix_fec_lwd(self): """ Getter method for tot_no_of_prefix_fec_lwd, mapped from YANG variable /mpls_state/ldp/fec/ldp_fec_prefixes/tot_no_of_prefix_fec_lwd (uint32) YANG Description: tot_no_of_prefix_fec_lwd """ return self.__tot_no_of_prefix_fec_lwd def _set_tot_no_of_prefix_fec_lwd(self, v, load=False): """ Setter method for tot_no_of_prefix_fec_lwd, mapped from YANG variable /mpls_state/ldp/fec/ldp_fec_prefixes/tot_no_of_prefix_fec_lwd (uint32) If this variable is read-only (config: false) in the source YANG file, then _set_tot_no_of_prefix_fec_lwd is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_tot_no_of_prefix_fec_lwd() directly. YANG Description: tot_no_of_prefix_fec_lwd """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="tot-no-of-prefix-fec-lwd", rest_name="tot-no-of-prefix-fec-lwd", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False) except (TypeError, ValueError): raise ValueError({ 'error-string': """tot_no_of_prefix_fec_lwd must be of a type compatible with uint32""", 'defined-type': "uint32", 'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="tot-no-of-prefix-fec-lwd", rest_name="tot-no-of-prefix-fec-lwd", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)""", }) self.__tot_no_of_prefix_fec_lwd = t if hasattr(self, '_set'): self._set() def _unset_tot_no_of_prefix_fec_lwd(self): self.__tot_no_of_prefix_fec_lwd = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="tot-no-of-prefix-fec-lwd", rest_name="tot-no-of-prefix-fec-lwd", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False) def _get_filtered(self): """ Getter method for filtered, mapped from YANG variable /mpls_state/ldp/fec/ldp_fec_prefixes/filtered (fec-filter-type) YANG Description: Filter Type """ return self.__filtered def _set_filtered(self, v, load=False): """ Setter method for filtered, mapped from YANG variable /mpls_state/ldp/fec/ldp_fec_prefixes/filtered (fec-filter-type) If this variable is read-only (config: false) in the source YANG file, then _set_filtered is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_filtered() directly. YANG Description: Filter Type """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'filtered': {'value': 1}, u'filtered-in': {'value': 2}, u'filtered-out': {'value': 3}},), is_leaf=True, yang_name="filtered", rest_name="filtered", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='fec-filter-type', is_config=False) except (TypeError, ValueError): raise ValueError({ 'error-string': """filtered must be of a type compatible with fec-filter-type""", 'defined-type': "brocade-mpls-operational:fec-filter-type", 'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'filtered': {'value': 1}, u'filtered-in': {'value': 2}, u'filtered-out': {'value': 3}},), is_leaf=True, yang_name="filtered", rest_name="filtered", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='fec-filter-type', is_config=False)""", }) self.__filtered = t if hasattr(self, '_set'): self._set() def _unset_filtered(self): self.__filtered = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'filtered': {'value': 1}, u'filtered-in': {'value': 2}, u'filtered-out': {'value': 3}},), is_leaf=True, yang_name="filtered", rest_name="filtered", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='fec-filter-type', is_config=False) def _get_prefix_filtered(self): """ Getter method for prefix_filtered, mapped from YANG variable /mpls_state/ldp/fec/ldp_fec_prefixes/prefix_filtered (string) YANG Description: filter name """ return self.__prefix_filtered def _set_prefix_filtered(self, v, load=False): """ Setter method for prefix_filtered, mapped from YANG variable /mpls_state/ldp/fec/ldp_fec_prefixes/prefix_filtered (string) If this variable is read-only (config: false) in the source YANG file, then _set_prefix_filtered is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_prefix_filtered() directly. YANG Description: filter name """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=unicode, is_leaf=True, yang_name="prefix-filtered", rest_name="prefix-filtered", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='string', is_config=False) except (TypeError, ValueError): raise ValueError({ 'error-string': """prefix_filtered must be of a type compatible with string""", 'defined-type': "string", 'generated-type': """YANGDynClass(base=unicode, is_leaf=True, yang_name="prefix-filtered", rest_name="prefix-filtered", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='string', is_config=False)""", }) self.__prefix_filtered = t if hasattr(self, '_set'): self._set() def _unset_prefix_filtered(self): self.__prefix_filtered = YANGDynClass(base=unicode, is_leaf=True, yang_name="prefix-filtered", rest_name="prefix-filtered", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='string', is_config=False) def _get_prefix(self): """ Getter method for prefix, mapped from YANG variable /mpls_state/ldp/fec/ldp_fec_prefixes/prefix (list) """ return self.__prefix def _set_prefix(self, v, load=False): """ Setter method for prefix, mapped from YANG variable /mpls_state/ldp/fec/ldp_fec_prefixes/prefix (list) If this variable is read-only (config: false) in the source YANG file, then _set_prefix is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_prefix() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=YANGListType("destination",prefix.prefix, yang_name="prefix", rest_name="prefix", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='destination', extensions={u'tailf-common': {u'callpoint': u'mpls-ldp-fec-prefix', u'cli-suppress-show-path': None}}), is_container='list', yang_name="prefix", rest_name="prefix", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-ldp-fec-prefix', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='list', is_config=False) except (TypeError, ValueError): raise ValueError({ 'error-string': """prefix must be of a type compatible with list""", 'defined-type': "list", 'generated-type': """YANGDynClass(base=YANGListType("destination",prefix.prefix, yang_name="prefix", rest_name="prefix", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='destination', extensions={u'tailf-common': {u'callpoint': u'mpls-ldp-fec-prefix', u'cli-suppress-show-path': None}}), is_container='list', yang_name="prefix", rest_name="prefix", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-ldp-fec-prefix', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='list', is_config=False)""", }) self.__prefix = t if hasattr(self, '_set'): self._set() def _unset_prefix(self): self.__prefix = YANGDynClass(base=YANGListType("destination",prefix.prefix, yang_name="prefix", rest_name="prefix", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='destination', extensions={u'tailf-common': {u'callpoint': u'mpls-ldp-fec-prefix', u'cli-suppress-show-path': None}}), is_container='list', yang_name="prefix", rest_name="prefix", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-ldp-fec-prefix', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='list', is_config=False) def _get_key(self): """ Getter method for key, mapped from YANG variable /mpls_state/ldp/fec/ldp_fec_prefixes/key (container) """ return self.__key def _set_key(self, v, load=False): """ Setter method for key, mapped from YANG variable /mpls_state/ldp/fec/ldp_fec_prefixes/key (container) If this variable is read-only (config: false) in the source YANG file, then _set_key is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_key() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=key.key, is_container='container', presence=False, yang_name="key", rest_name="key", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-ldp-fec-key-key-1'}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='container', is_config=False) except (TypeError, ValueError): raise ValueError({ 'error-string': """key must be of a type compatible with container""", 'defined-type': "container", 'generated-type': """YANGDynClass(base=key.key, is_container='container', presence=False, yang_name="key", rest_name="key", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-ldp-fec-key-key-1'}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='container', is_config=False)""", }) self.__key = t if hasattr(self, '_set'): self._set() def _unset_key(self): self.__key = YANGDynClass(base=key.key, is_container='container', presence=False, yang_name="key", rest_name="key", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-ldp-fec-key-key-1'}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='container', is_config=False) tot_no_of_prefix_fec = __builtin__.property(_get_tot_no_of_prefix_fec) tot_no_of_prefix_fec_installed = __builtin__.property(_get_tot_no_of_prefix_fec_installed) tot_no_of_prefix_fec_filtered = __builtin__.property(_get_tot_no_of_prefix_fec_filtered) tot_no_of_prefix_fec_lwd = __builtin__.property(_get_tot_no_of_prefix_fec_lwd) filtered = __builtin__.property(_get_filtered) prefix_filtered = __builtin__.property(_get_prefix_filtered) prefix = __builtin__.property(_get_prefix) key = __builtin__.property(_get_key) _pyangbind_elements = {'tot_no_of_prefix_fec': tot_no_of_prefix_fec, 'tot_no_of_prefix_fec_installed': tot_no_of_prefix_fec_installed, 'tot_no_of_prefix_fec_filtered': tot_no_of_prefix_fec_filtered, 'tot_no_of_prefix_fec_lwd': tot_no_of_prefix_fec_lwd, 'filtered': filtered, 'prefix_filtered': prefix_filtered, 'prefix': prefix, 'key': key, }
73.772727
744
0.742726
28,795
0.985658
0
0
0
0
0
0
15,018
0.514069
c78545f3c73bfddebce8e778857a5662b6cdc383
610
py
Python
pug/dj/miner/model_mixin.py
hobson/pug-dj
55678b08755a55366ce18e7d3b8ea8fa4491ab04
[ "MIT" ]
null
null
null
pug/dj/miner/model_mixin.py
hobson/pug-dj
55678b08755a55366ce18e7d3b8ea8fa4491ab04
[ "MIT" ]
5
2021-09-07T23:53:24.000Z
2022-03-11T23:22:04.000Z
pug/dj/miner/model_mixin.py
hobson/pug-dj
55678b08755a55366ce18e7d3b8ea8fa4491ab04
[ "MIT" ]
1
2015-04-23T14:45:04.000Z
2015-04-23T14:45:04.000Z
from pug.nlp.db import representation from django.db import models class RepresentationMixin(models.Model): """Produce a meaningful string representation of a model with `str(model.objects.all[0])`.""" __unicode__ = representation class Meta: abstract = True class DateMixin(models.Model): """Add updated and created fields that auto-populate to create a ORM-level transaction log for auditing (though not a full log, just 2 events).""" updated = models.DateTimeField(auto_now=True) created = models.DateTimeField(auto_now_add=True) class Meta: abstract = True
32.105263
150
0.727869
538
0.881967
0
0
0
0
0
0
239
0.391803
c785e70d66977d68cd692ad4e28b80dae9e1f5c0
4,255
py
Python
custom_components/kodi_media_sensors/config_flow.py
JurajNyiri/kodi-media-sensors
055065e52b34555df95a905fc556d3086626deee
[ "MIT" ]
5
2021-03-20T23:32:58.000Z
2022-03-12T02:01:39.000Z
custom_components/kodi_media_sensors/config_flow.py
JurajNyiri/kodi-media-sensors
055065e52b34555df95a905fc556d3086626deee
[ "MIT" ]
11
2021-02-09T16:40:34.000Z
2022-03-20T11:43:06.000Z
custom_components/kodi_media_sensors/config_flow.py
JurajNyiri/kodi-media-sensors
055065e52b34555df95a905fc556d3086626deee
[ "MIT" ]
3
2021-02-09T17:01:25.000Z
2022-02-23T22:21:16.000Z
import logging from typing import Any, Dict, Optional from homeassistant import config_entries from homeassistant.components.kodi.const import DOMAIN as KODI_DOMAIN from homeassistant.core import callback import voluptuous as vol from .const import ( OPTION_HIDE_WATCHED, OPTION_USE_AUTH_URL, OPTION_SEARCH_LIMIT, OPTION_SEARCH_LIMIT_DEFAULT_VALUE, CONF_KODI_INSTANCE, DOMAIN, CONF_SENSOR_RECENTLY_ADDED_TVSHOW, CONF_SENSOR_RECENTLY_ADDED_MOVIE, CONF_SENSOR_PLAYLIST, CONF_SENSOR_SEARCH, ) _LOGGER = logging.getLogger(__name__) class KodiMediaSensorsConfigFlow(config_entries.ConfigFlow, domain=DOMAIN): """Kodi Media Sensors config flow.""" async def async_step_user(self, user_input: Optional[Dict[str, Any]]): """Handle a flow initialized via the user interface.""" # Find all configured kodi instances to allow the user to select one. kodi_instances: Dict[str, str] = { entry.entry_id: entry.title for entry in self.hass.config_entries.async_entries(KODI_DOMAIN) if entry.source != "ignore" } data_schema = vol.Schema( { vol.Required(CONF_KODI_INSTANCE): vol.In(list(kodi_instances.values())), vol.Optional(CONF_SENSOR_RECENTLY_ADDED_TVSHOW, default=False): bool, vol.Optional(CONF_SENSOR_RECENTLY_ADDED_MOVIE, default=False): bool, vol.Optional(CONF_SENSOR_PLAYLIST, default=False): bool, vol.Optional(CONF_SENSOR_SEARCH, default=False): bool, } ) errors = {} if not kodi_instances: errors["base"] = "kodi_not_configured" if user_input is not None: config_entry_id: Optional[str] = None for entry_id, title in kodi_instances.items(): if title == user_input[CONF_KODI_INSTANCE]: config_entry_id = entry_id break if config_entry_id is None: errors["base"] = "kodi_not_configured" if not errors: return self.async_create_entry( title="Kodi Media Sensors", data={ CONF_KODI_INSTANCE: config_entry_id, CONF_SENSOR_RECENTLY_ADDED_TVSHOW: user_input[ CONF_SENSOR_RECENTLY_ADDED_TVSHOW ], CONF_SENSOR_RECENTLY_ADDED_MOVIE: user_input[ CONF_SENSOR_RECENTLY_ADDED_MOVIE ], CONF_SENSOR_PLAYLIST: user_input[CONF_SENSOR_PLAYLIST], CONF_SENSOR_SEARCH: user_input[CONF_SENSOR_SEARCH], }, ) return self.async_show_form( step_id="user", data_schema=data_schema, errors=errors, ) @staticmethod @callback def async_get_options_flow(config_entry): """Get the options flow for this handler.""" return OptionsFlowHandler(config_entry) class OptionsFlowHandler(config_entries.OptionsFlow): """Handles options flow for the component.""" def __init__(self, config_entry: config_entries.ConfigEntry) -> None: self.config_entry = config_entry async def async_step_init(self, user_input=None): """Manage the options.""" if user_input is not None: return self.async_create_entry(title="", data=user_input) hide_watched = self.config_entry.options.get(OPTION_HIDE_WATCHED, False) use_auth_url = self.config_entry.options.get(OPTION_USE_AUTH_URL, False) search_limit = self.config_entry.options.get( OPTION_SEARCH_LIMIT, OPTION_SEARCH_LIMIT_DEFAULT_VALUE ) options_schema = vol.Schema( { vol.Optional(OPTION_HIDE_WATCHED, default=hide_watched): bool, vol.Optional(OPTION_USE_AUTH_URL, default=use_auth_url): bool, vol.Optional(OPTION_SEARCH_LIMIT, default=search_limit): int, } ) return self.async_show_form( step_id="init", data_schema=options_schema, )
36.681034
88
0.624207
3,678
0.864395
0
0
174
0.040893
3,150
0.740306
371
0.087192
c785fce89075a58bb84f43684cf4f43e70fff95c
3,561
py
Python
MySite/MainApp/views.py
tananyan/siteee
f90c4ed56122d1af2f3795a0f16c3f294b785ad3
[ "MIT" ]
1
2021-11-29T14:50:09.000Z
2021-11-29T14:50:09.000Z
MySite/MainApp/views.py
tananyan/siteee
f90c4ed56122d1af2f3795a0f16c3f294b785ad3
[ "MIT" ]
null
null
null
MySite/MainApp/views.py
tananyan/siteee
f90c4ed56122d1af2f3795a0f16c3f294b785ad3
[ "MIT" ]
null
null
null
from django.shortcuts import render from django.views.generic.edit import FormView from django.views.generic.edit import View from . import forms # Опять же, спасибо django за готовую форму аутентификации. from django.contrib.auth.forms import AuthenticationForm from django.contrib.auth import logout from django.http import HttpResponseRedirect from django.contrib.auth import login class index(FormView): form_class = AuthenticationForm # Аналогично регистрации, только используем шаблон аутентификации. template_name = "MainApp/homepage.html" # В случае успеха перенаправим на главную. success_url = "/" def get(self, request): form1 = AuthenticationForm(request.POST) return render(request, 'MainApp/homepage.html', {'form': form1, 'user': request.user}) def form_valid(self, form): # Получаем объект пользователя на основе введённых в форму данных. self.user = form.get_user() # Выполняем аутентификацию пользователя. login(self.request, self.user) return super(index, self).form_valid(form) class contact(FormView): form_class = AuthenticationForm # Аналогично регистрации, только используем шаблон аутентификации. template_name = "MainApp/contact.html" # В случае успеха перенаправим на главную. success_url = "../contact/" def get(self, request): form1 = AuthenticationForm(request.POST) return render(request, 'MainApp/contact.html', {'values': ['Звоните по телефону', '[email protected]', '8(977)335-77-77'], 'form': form1, 'user': request.user}) def form_valid(self, form): # Получаем объект пользователя на основе введённых в форму данных. self.user = form.get_user() # Выполняем аутентификацию пользователя. login(self.request, self.user) return super(contact, self).form_valid(form) class registration(FormView): form_class = forms.UserCreationForm # Ссылка, на которую будет перенаправляться пользователь в случае успешной регистрации. # В данном случае указана ссылка на страницу входа для зарегистрированных пользователей. success_url = "/login/" # Шаблон, который будет использоваться при отображении представления. template_name = "MainApp/registration_form.html" def form_valid(self, form): # Создаём пользователя, если данные в форму были введены корректно. form.save() # Вызываем метод базового класса return super(registration, self).form_valid(form) class LogoutView(View): def get(self, request): # Выполняем выход для пользователя, запросившего данное представление. logout(request) # После чего, перенаправляем пользователя на главную страницу. #return HttpResponseRedirect("/seeuagain") return render(request, 'MainApp/quitpage.html') class LoginFormView(FormView): form_class = AuthenticationForm # Аналогично регистрации, только используем шаблон аутентификации. template_name = "MainApp/login_form.html" # В случае успеха перенаправим на главную. success_url = "/news" def form_valid(self, form): # Получаем объект пользователя на основе введённых в форму данных. self.user = form.get_user() # Выполняем аутентификацию пользователя. login(self.request, self.user) return super(LoginFormView, self).form_valid(form)
33.914286
134
0.686043
4,099
0.897919
0
0
0
0
0
0
2,532
0.554655
c787795efbca79aae84c0943ac98820495ba5ee9
4,057
py
Python
imagetagger/imagetagger/settings_base.py
jbargu/imagetagger
216ac5e73902abadc1880321e285e68c55bdfd3d
[ "MIT" ]
1
2019-12-26T09:14:59.000Z
2019-12-26T09:14:59.000Z
imagetagger/imagetagger/settings_base.py
jbargu/imagetagger
216ac5e73902abadc1880321e285e68c55bdfd3d
[ "MIT" ]
4
2021-03-19T15:46:34.000Z
2022-01-13T03:33:04.000Z
imagetagger/imagetagger/settings_base.py
jbargu/imagetagger
216ac5e73902abadc1880321e285e68c55bdfd3d
[ "MIT" ]
2
2020-09-03T09:22:18.000Z
2020-09-09T15:13:35.000Z
""" Django settings for imagetagger project. Generated by 'django-admin startproject' using Django 1.10.3. For more information on this file, see https://docs.djangoproject.com/en/1.10/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/1.10/ref/settings/ """ import os from django.contrib.messages import constants as messages # Build paths inside the project like this: os.path.join(BASE_DIR, ...) BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) # SECURITY WARNING: don't run with debug turned on in production! DEBUG = False ALLOWED_HOSTS = [] # Application definition INSTALLED_APPS = [ 'imagetagger.annotations', 'imagetagger.base', 'imagetagger.images', 'imagetagger.users', 'imagetagger.tools', 'imagetagger.administration', 'django.contrib.admin', 'imagetagger.tagger_messages', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'rest_framework', 'widget_tweaks', 'friendlytagloader', ] MIDDLEWARE = [ 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', 'django.middleware.locale.LocaleMiddleware', ] ROOT_URLCONF = 'imagetagger.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', 'imagetagger.base.context_processors.base_data', ], }, }, ] WSGI_APPLICATION = 'imagetagger.wsgi.application' FILE_UPLOAD_HANDLERS = ( "django.core.files.uploadhandler.MemoryFileUploadHandler", "django.core.files.uploadhandler.TemporaryFileUploadHandler", ) # Password validation # https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] AUTH_USER_MODEL = 'users.User' # Internationalization # https://docs.djangoproject.com/en/1.10/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'Europe/Berlin' # Timezone of your server USE_I18N = True USE_L10N = True USE_TZ = True PROBLEMS_URL = 'https://github.com/bit-bots/imagetagger/issues' PROBLEMS_TEXT = '' LOGIN_URL = '/user/login/' LOGIN_REDIRECT_URL = '/images/' MESSAGE_STORAGE = 'django.contrib.messages.storage.session.SessionStorage' MESSAGE_TAGS = { messages.INFO: 'info', messages.ERROR: 'danger', messages.WARNING: 'warning', messages.SUCCESS: 'success', } # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/1.10/howto/static-files/ STATIC_URL = '/static/' EXPORT_SEPARATOR = '|' DATA_PATH = os.path.join(BASE_DIR, 'data') IMAGE_PATH = os.path.join(BASE_DIR, 'images') # the absolute path to the folder with the imagesets # filename extension of accepted imagefiles IMAGE_EXTENSION = { 'png', 'jpeg', } # Sets the default expire time for new messages in days DEFAULT_EXPIRE_TIME = 7 # Sets the default number of messages per page MESSAGES_PER_PAGE = 10
26.51634
99
0.709884
0
0
0
0
0
0
0
0
2,750
0.677841
c7879b591e4a17bc5cbafd6cd291d2d73183569a
23,794
py
Python
apps/project/views/issue.py
rainydaygit/testtcloudserver
8037603efe4502726a4d794fb1fc0a3f3cc80137
[ "MIT" ]
349
2020-08-04T10:21:01.000Z
2022-03-23T08:31:29.000Z
apps/project/views/issue.py
rainydaygit/testtcloudserver
8037603efe4502726a4d794fb1fc0a3f3cc80137
[ "MIT" ]
2
2021-01-07T06:17:05.000Z
2021-04-01T06:01:30.000Z
apps/project/views/issue.py
rainydaygit/testtcloudserver
8037603efe4502726a4d794fb1fc0a3f3cc80137
[ "MIT" ]
70
2020-08-24T06:46:14.000Z
2022-03-25T13:23:27.000Z
from flask import request from apps.auth.auth_require import required from apps.project.business.issue import IssueBusiness, IssueRecordBusiness, IssueDashBoardBusiness from apps.project.extentions import parse_json_form, validation, parse_list_args2 from library.api.render import json_detail_render, json_list_render2 from library.api.tBlueprint import tblueprint bpname = 'issue' view_permission = f'{bpname}_view' modify_permission = f'{bpname}_modify' issue = tblueprint(bpname, __name__) # 新增issue @issue.route('/', methods=['POST']) @required(modify_permission) @validation('POST:issue_create') def issue_add_handler(): """ @api {post} /v1/issue 新增 缺陷 @apiName CreateIssue @apiGroup 项目 @apiDescription 新增 缺陷 @apiParam {int} module_id 模块 ID @apiParam {int} handler 处理人 ID @apiParam {int} issue_type 类型 @apiParam {int} chance 出现几率 @apiParam {int} level 级别 @apiParam {int} priority 优先级 @apiParam {int} system 系统 @apiParam {string} title 标题 @apiParam {string} attach 福建 @apiParam {string} description 描述 @apiParam {int} detection_chance 用户识别度 @apiParam {int} project_id 项目 ID @apiParam {int} version 版本 @apiParam {int} creator 创建人 ID @apiParam {int} modifier 修改人 ID @apiParam {int} [requirement_id] 关联的 需求 ID @apiParam {string} [tag] 标签 @apiParamExample {json} Request-Example: { "module_id": 340, "handler": 93, "issue_type": 0, "chance": 0, "level": 0, "priority": 0, "system": 4, "title": "123", "attach": "{\"images\":[],\"files\":[],\"videos\":[]}", "description": "<p>test</p>", "detection_chance": 0, "project_id": 4, "version": 168, "creator": 93, "modifier": 93, "requirement_id": 123, "tag": 13,14 } @apiSuccessExample {json} Success-Response: HTTP/1.1 200 OK { "code": 0, "data": [], "message": "ok" } """ (system, version, project_id, module_id, creator, modifier, handler, issue_type, chance, level, priority, stage,title, attach, handle_status, description, comment, detection_chance, requirement_id, case_covered, tag) = parse_json_form('issue_create') ret = IssueBusiness.create(system, version, project_id, module_id, creator, modifier, handler, issue_type, chance, level, priority, stage, title, attach, handle_status, description, comment, detection_chance, requirement_id, case_covered, tag) return json_detail_render(ret) # 根据id修改,删除issue @issue.route('/<int:issue_id>', methods=['POST']) @required(modify_permission) @validation('POST:issue_modify') def issue_modify_handler(issue_id): """ @api {post} /v1/issue/{int:id} 修改 缺陷 @apiName ModifyIssue @apiGroup 项目 @apiDescription 修改 缺陷 @apiParam {int} module_id 模块 ID @apiParam {int} handler 处理人 ID @apiParam {int} issue_type 类型 @apiParam {int} chance 出现几率 @apiParam {int} level 级别 @apiParam {int} priority 优先级 @apiParam {int} system 系统 @apiParam {string} title 标题 @apiParam {string} attach 福建 @apiParam {string} description 描述 @apiParam {int} detection_chance 用户识别度 @apiParam {int} project_id 项目 ID @apiParam {int} version 版本 @apiParam {int} creator 创建人 ID @apiParam {int} modifier 修改人 ID @apiParam {int} [requirement_id] 关联的 需求 ID @apiParam {string} [tag] 标签 @apiParamExample {json} Request-Example: { "module_id": 340, "handler": 93, "issue_type": 0, "chance": 0, "level": 0, "priority": 0, "system": 4, "title": "123", "attach": "{\"images\":[],\"files\":[],\"videos\":[]}", "description": "<p>test</p>", "detection_chance": 0, "project_id": 4, "version": 168, "creator": 93, "modifier": 93, "requirement_id": 1, "tag": 13,14 } @apiSuccessExample {json} Success-Response: HTTP/1.1 200 OK { "code": 0, "data": [], "message": "ok" } """ (system, version, project_id, module_id, modifier, handler, issue_type, chance, level, priority, stage, title, attach, handle_status, description, comment, detection_chance, requirement_id, case_covered, tag) = parse_json_form('issue_modify') ret = IssueBusiness.modify(issue_id, system, version, project_id, module_id, modifier, handler, issue_type, chance, level, priority, stage, title, attach, handle_status, description, comment, detection_chance, requirement_id, case_covered, tag) return json_detail_render(ret) # 根据id修改,删除issue @issue.route('/<int:issue_id>', methods=['DELETE']) def issue_delete_handler(issue_id): """ @api {delete} /v1/issue/{int:id} 删除 缺陷 @apiName DeleteIssue @apiGroup 项目 @apiDescription 删除 缺陷 @apiParamExample {json} Request-Example: - @apiSuccessExample {json} Success-Response: HTTP/1.1 200 OK { "code": 0, "data": [], "message": "ok" } """ ret = IssueBusiness.delete(issue_id) return json_detail_render(ret) # 切换issue状态 @issue.route('/handlestatus/<int:issue_id>', methods=['POST']) @required(modify_permission) @validation('POST:handle_status') def issue_board_status_handler(issue_id): """ @api {post} /v1/issue/handlestatus/{int:id} 切换 缺陷状态 @apiName ModifyIssueStatus @apiGroup 项目 @apiDescription 切换 缺陷状态 @apiParamExample {json} Request-Example: { "handle_status": 1 } @apiSuccessExample {json} Success-Response: HTTP/1.1 200 OK { "code": 0, "data": [], "message": "ok" } """ handle_status = parse_json_form('handle_status')[0] ret = IssueBusiness.status_switch(issue_id, handle_status) return json_detail_render(ret) # 切换issue处理人 @issue.route('/handler/<int:issue_id>', methods=['POST']) @validation('POST:handler_switch') @required(modify_permission) def issue_handler_switch_handler(issue_id): """ @api {post} /v1/issue/handler/{int:id} 切换 缺陷处理人 @apiName ModifyIssueSwitch @apiGroup 项目 @apiDescription 切换 缺陷处理人 @apiParamExample {json} Request-Example: { "handler": 11 } @apiSuccessExample {json} Success-Response: HTTP/1.1 200 OK { "code": 0, "data": [], "message": "ok" } """ handler = parse_json_form('handler_switch') ret = IssueBusiness.handler_switch(issue_id, handler) return json_detail_render(ret) # 切换issue等级 @issue.route('/level/<int:issue_id>', methods=['POST']) @required(modify_permission) @validation('POST:level_switch') def issue_level_switch_handler(issue_id): """ @api {post} /v1/issue/level/{int:id} 切换 缺陷等级 @apiName ModifyIssueLevel @apiGroup 项目 @apiDescription 切换 缺陷等级 @apiParamExample {json} Request-Example: { "level": 3 } @apiSuccessExample {json} Success-Response: HTTP/1.1 200 OK { "code": 0, "data": [], "message": "ok" } """ level = parse_json_form('level_switch') ret = IssueBusiness.level_switch(issue_id, level) return json_detail_render(ret) # 切换issue优先级 @issue.route('/priority/<int:issue_id>', methods=['POST']) @required(modify_permission) @validation('POST:priority_switch') def issue_priority_switch_handler(issue_id): """ @api {post} /v1/issue/priority/{int:id} 切换 缺陷优先级 @apiName ModifyIssuePriority @apiGroup 项目 @apiDescription 切换 缺陷优先级 @apiParamExample {json} Request-Example: { "priority": 3 } @apiSuccessExample {json} Success-Response: HTTP/1.1 200 OK { "code": 0, "data": [], "message": "ok" } """ priority = parse_json_form('priority_switch') ret = IssueBusiness.priority_switch(issue_id, priority) return json_detail_render(ret) # 修改issue的comment @issue.route('/comment/<int:issue_id>', methods=['POST']) @validation('POST:add_comment') @required(modify_permission) def issue_add_comment_handler(issue_id): """ @api {post} /v1/issue/comment/{int:id} 切换 缺陷备注 @apiName ModifyIssueComment @apiGroup 项目 @apiDescription 切换 缺陷备注 @apiParamExample {json} Request-Example: { "comment": 3 } @apiSuccessExample {json} Success-Response: HTTP/1.1 200 OK { "code": 0, "data": [], "message": "ok" } """ comment = parse_json_form('add_comment') ret = IssueBusiness.add_comment(issue_id, comment) return json_detail_render(ret) # 查询issue-projectid,versionid @issue.route('/', methods=['GET']) def issue_query_all_handler(): """ @api {get} /v1/issue/ 查询 issue 列表 @apiName SearchIssue @apiGroup 项目 @apiDescription 查询 issue 列表 @apiParam {int} [projectid] 项目 ID @apiParam {int} [versionid] 版本 ID @apiParam {string} [creator_id] 创建人 ID,使用 ',' 分割 @apiParam {string} [handler_id] 处理人 ID,使用 ',' 分割 @apiParam {int} [title] 标题 @apiParam {string} [handle_status] 处理状态 ID,使用 ',' 分割 @apiParam {string} [module_id] 模块 ID,使用 ',' 分割 @apiParam {string} [priority] 优先级 ID,使用 ',' 分割 @apiParam {int} [page_size] 分页 页面大小 @apiparam {int} [page_index] 分页 页数 @apiParamExample {json} Request-Example: { "projectid": 4, "versionid": 173, "creator_id": "1,2,3,4", "page_size": 1 } @apiSuccessExample {json} Success-Response: HTTP/1.1 200 OK { "code": 0, "data": [ { "attach": "{"images":[],"files":[],"videos":[]}", "chance": 2, "comment": "", "creation_time": "2019-08-08 20:58:49", "creator": [ { "id": 96, "name": "张宇" } ], "description": "", "detection_chance": "", "handle_status": 2, "handler": [ { "id": 96, "name": "张宇" } ], "issue_number": "T398", "issue_type": 1, "issueid": 398, "level": 1, "modified_time": "2019-08-08 20:58:49", "modifier": [], "module": [ { "id": 329, "name": "用例二级2222" } ], "priority": 1, "project_id": 4, "rank": 12, "reopen": 0, "repair_time": "", "requirement_id": "", "requirement_title": "", "stage": "", "status": 0, "system": "", "test_time": "", "title": "1.2.7issuse55555", "version": [ { "id": 173, "name": "1.2.7" } ], "weight": "" } ], "message": "ok", "page_index": 1, "page_size": 1, "total": 8 } """ requirement_id = request.args.get('requirement_id') if requirement_id: page_size, page_index = parse_list_args2() data, count = IssueBusiness.paginate_data_by_rid(page_size, page_index, requirement_id) return json_list_render2(0, data, page_size, page_index, count) else: page_size, page_index = parse_list_args2() data, count = IssueBusiness.paginate_data(page_size, page_index) return json_list_render2(0, data, page_size, page_index, count) # 查询issue历史记录 @issue.route('/record', methods=['GET']) def issue_record_query_all_handler(): """ @api {get} /v1/issue/record 查询 缺陷历史记录列表 @apiName GetIssueRecordList @apiGroup 项目 @apiDescription 查询 缺陷历史记录列表 @apiParam {int} projectid 项目 ID @apiParam {int} versionid 版本 ID @apiParamExample {json} Request-Example: ?projectid=1 @apiSuccessExample {json} Success-Response: HTTP/1.1 200 OK { "code": 0, "data": [ { "attach": "{"images":[],"files":[],"videos":[]}", "chance": 0, "comment": "", "creation_time": "2019-05-10 16:23:28", "creator": [ { "id": 12, "name": "刘焕焕" } ], "description": "<p>分享微信不成功.</p>", "detection_chance": 0, "handle_status": 1, "handler": [ { "id": 12, "name": "刘焕焕" } ], "issue_number": "T309", "issue_type": 0, "issueid": 309, "level": 1, "modified_time": "2019-05-13 13:02:45", "modifier": [], "module": [ { "id": 291, "name": "V2.4.9版本用例飞科" } ], "priority": 1, "project_id": 1, "rank": 20, "reopen": 0, "repair_time": "", "requirement_id": "", "requirement_title": "", "stage": "", "status": 0, "system": 1, "test_time": "", "title": "分享微信不成功", "version": [ { "id": 128, "name": "V2.4.9" } ], "weight": "" } ], "message": "ok" } """ data = IssueRecordBusiness.query_all_json() return json_detail_render(0, data) # 查询issue历史记录详情 @issue.route('/record/detail/<int:issue_id>', methods=['GET']) def issue_record_detail_handler(issue_id): """ @api {get} /v1/issue/record/detail/{int:issue_id} 查询 缺陷历史记录详情 @apiName GetIssueRecordDetailById @apiGroup 项目 @apiDescription 查询 缺陷历史记录详情 @apiParamExample {json} Request-Example: - @apiSuccessExample {json} Success-Response: HTTP/1.1 200 OK { "code": 0, "data": [ { "modified_time": "2018-12-19 14:59:34", "modifier_id": 1, "modifier_name": "王金龙", "operation": "修改了处理状态 待办 为 处理中" }, { "modified_time": "2018-12-18 20:28:39", "modifier_id": 1, "modifier_name": "王金龙", "operation": "创建了BUG title" } ], "message": "ok" } """ data = IssueRecordBusiness.query_record_detail(issue_id) return json_detail_render(0, data) # 根据id查询issue @issue.route('/<int:issue_id>', methods=['GET']) def issue_query_handler(issue_id): """ @api {get} /v1/issue/{int:issue_id} 查询 缺陷详情 (id) @apiName GetIssueById @apiGroup 项目 @apiDescription 查询 缺陷详情 通过 ID @apiParamExample {json} Request-Example: - @apiSuccessExample {json} Success-Response: HTTP/1.1 200 OK { "code":0, "data":[ { "attach":"attach", "chance":1, "comment":"", "creation_time":"2018-12-18 20:28:39", "creator":[ { "id":1, "name":"王金龙" } ], "description":"description", "handle_status":3, "handler":[ { "id":1, "name":"王金龙" } ], "issue_number":"T1", "issue_type":1, "issueid":1, "level":1, "modified_time":"2019-03-01 16:46:10", "modifier":[ { "id":1, "name":"王金龙" } ], "module":[ { "id":1, "name":"音频" } ], "priority":1, "project_id":1, "reopen":0, "repair_time":"0:00:05", "requirement_id":"", "requirement_title":"", "stage":1, "status":0, "system":0, "test_time":"2 days, 20:21:05", "title":"title", "version":[ { "id":1, "name":"str" } ], "weight":"" } ], "message":"ok" } """ data = IssueBusiness.query_by_id(issue_id) return json_detail_render(0, data) # issue关闭和打开的dashboard @issue.route('/dashboard', methods=['POST']) @required(view_permission) @validation('POST:issue_dashboard') def issue_dashboard_work_handler(): start_date, end_date = parse_json_form('issue_dashboard') data = IssueDashBoardBusiness.issue_dashboard(start_date, end_date) return json_detail_render(0, data) # 查询测试人员每天创建的issue个数 @issue.route('/dashboard/tester', methods=['POST']) @required(view_permission) @validation('POST:issue_dashboard') def tester_issue_work_handler(): start_date, end_date = parse_json_form('issue_dashboard') data = IssueDashBoardBusiness.issue_all_tester_dashboard(start_date, end_date) return json_detail_render(0, data) # issue的状态分布和优先级分布 @issue.route('/dashboard/project', methods=['POST']) @required(view_permission) @validation('POST:issue_dashboard') def issue_project_dashboard_handler(): """ @api {POST} /v1/issue/dashboard/project 查询 缺陷状态分布和优先级分布 @apiName GetIssueByStatusAndPriority @apiGroup 项目 @apiDescription 查询 缺陷状态分布和优先级分布 @apiParamExample {json} Request-Example: { "start_date": "2019-01-02 10:10:11", "end_date": "2019-01-03 10:10:12", } @apiSuccessExample {json} Success-Response: HTTP/1.1 200 OK { "code": 0, "data": [ { "modified_time": "2018-12-19 14:59:34", "modifier_id": 1, "modifier_name": "王金龙", "operation": "修改了处理状态 待办 为 处理中" }, { "modified_time": "2018-12-18 20:28:39", "modifier_id": 1, "modifier_name": "王金龙", "operation": "创建了BUG title" } ], "message": "ok" } """ start_date, end_date = parse_json_form('issue_dashboard') data = IssueDashBoardBusiness.issue_project_dashboard(start_date, end_date) return json_detail_render(0, data) # 看板根据pro_id查询issue各个状态的数量 @issue.route('/dashboard/project/<int:pro_id>', methods=['GET']) def issue_query_pro_handler(pro_id): """ @api {post} /v1/issue/dashboard/project/{int:project_id} 查询 看板缺陷 根据 project ID @apiName GetBoardIssueByProjectId @apiGroup 项目 @apiDescription 根据 project ID 查询 看板缺陷 @apiParamExample {json} Request-Example: - @apiSuccessExample {json} Success-Response: HTTP/1.1 200 OK { "code":0, "data":[ { "info":[ { "detail":[ { "count":1, "handle_status":1 }, { "count":1, "handle_status":2 }, { "count":1, "handle_status":3 } ], "total":3, "version":1 }, { "detail":[ { "count":1, "handle_status":4 } ], "total":1, "version":2 }, { "detail":[ { "count":1, "handle_status":1 } ], "total":1, "version":3 }, { "detail":[ { "count":3, "handle_status":4 } ], "total":3, "version":4 }, { "detail":[ { "count":1, "handle_status":1 }, { "count":1, "handle_status":4 } ], "total":2, "version":128 } ], "project_id":1 } ], "message":"ok" } """ data = IssueDashBoardBusiness.issue_project_id_dashboard(pro_id) return json_detail_render(0, data) # 绑定 issue 到 requirement @issue.route('/bind/requirement', methods=['POST']) @required(modify_permission) @validation('POST:issue_bind_requirement') def issue_bind_requirement(): """ @api {post} /v1/issue/bind/requirement 绑定 缺陷 issue 到 需求 requirement @apiName IssueBindRequirement @apiGroup 项目 @apiDescription 绑定 缺陷到需求 @apiParam {int} issue_id 缺陷 ID @apiParam {int} requirement_id 需求 ID @apiParamExample {json} Request-Example: { "issue": 11, "requirement_id": 22 } @apiSuccessExample {json} Success-Response: HTTP/1.1 200 OK { "code": 0, "data": [], "message": "ok" } """ requirement_id, issue_id = parse_json_form('issue_bind_requirement') ret, msg = IssueBusiness.issue_bind_requirement(issue_id, requirement_id) return json_detail_render(ret, [], msg) # 导出 issue 列表 @issue.route('/export', methods=['GET']) def issue_export(): """ @api {get} /v1/issue/ 导出 issue 到 xls @apiName IssueExportToXls @apiGroup 项目 @apiDescription 导出 issue 到 xls @apiParam {int} [projectid] 项目 ID @apiParam {int} [versionid] 版本 ID @apiParam {int} [creator_id] 创建人 ID @apiParam {int} [title] 标题 @apiParam {int} [handle_status] 处理状态 ID @apiParam {int} [module_id] 模块 ID @apiParam {int} [priority] 优先级 ID @apiParam {int} [page_size] 分页 页面大小 @apiparam {int} [page_index] 分页 页数 @apiParamExample {json} Request-Example: { "projectid": 4, "versionid": 173, "page_size": 1 } @apiSuccessExample {json} Success-Response: HTTP/1.1 200 OK { "code": 0, "data": "http://tcloud-static.oss-cn-beijing.aliyuncs.com/issue_export/0/Issues-20190809.164431.xls", "message": "ok" } """ issue_url = IssueBusiness.export() return json_detail_render(code=0, data=issue_url)
29.159314
114
0.504329
0
0
0
0
23,967
0.956919
0
0
19,325
0.77158
c787d4b85054cce4a273d4cda061e7e65933333a
3,351
py
Python
PhysicsTools/PythonAnalysis/python/ParticleDecayDrawer.py
nistefan/cmssw
ea13af97f7f2117a4f590a5e654e06ecd9825a5b
[ "Apache-2.0" ]
null
null
null
PhysicsTools/PythonAnalysis/python/ParticleDecayDrawer.py
nistefan/cmssw
ea13af97f7f2117a4f590a5e654e06ecd9825a5b
[ "Apache-2.0" ]
null
null
null
PhysicsTools/PythonAnalysis/python/ParticleDecayDrawer.py
nistefan/cmssw
ea13af97f7f2117a4f590a5e654e06ecd9825a5b
[ "Apache-2.0" ]
null
null
null
# Benedikt Hegner, DESY # [email protected] # # this tool is based on Luca Lista's tree drawer module class ParticleDecayDrawer(object): """Draws particle decay tree """ def __init__(self): print "Init particleDecayDrawer" # booleans: printP4 printPtEtaPhi printVertex def _accept(self, candidate, skipList): if candidate in skipList: return False; return self._select(candidate) def _select(self, candidate): return candidate.status() == 3 def _hasValidDaughters(self, candidate): nDaughters = candidate.numChildren() for i in xrange(nDaughters): if self._select(candidate.listChildren()[i]): return True return False def _printP4(self, candidate): return " " def _decay(self, candidate, skipList): out = str() if candidate in skipList: return "" skipList.append(candidate) id = candidate.pdg_id() # here the part about the names :-( out += str(id) + self._printP4(candidate) validDau = 0 nOfDaughters = candidate.numChildren() for i in xrange(nOfDaughters): if self._accept(candidate.listChildren()[i], skipList): validDau+=1 if validDau == 0: return out out += " ->" for i in xrange(nOfDaughters): d = candidate.listChildren()[i] if self._accept(d, skipList): decString = self._decay(d, skipList) if ("->" in decString): out += " ( %s ) " %decString else: out += " %s" %decString return out def draw(self, particles): """ draw decay tree from list(HepMC.GenParticles)""" skipList = [] nodesList = [] momsList = [] for particle in particles: if particle.numParents() > 1: if self._select(particle): skipList.append(particle) nodesList.append(particle) for j in xrange(particle.numParents()): mom = particle.listParents()[j] while (mom.mother()):# != None ): mom = mom.mother() if self._select(mom): momsList.append(mom) print "-- decay --" if len(momsList) > 0: if len(momsList) > 1: for m in xrange(len(momsList)): decString = self._decay( momsList[m], skipList) if len(decString) > 0: print "{ %s } " %decString else: print self._decay(momsList[0], skipList) if len(nodesList) > 0: print "-> " if len(nodesList) > 1: for node in nodesList: skipList.remove(node) decString = self._decay(node, skipList) if len(decString) > 0: if "->" in decString: print " ( %s ) " %decString else: print " " + decString else: skipList.remove(nodesList[0]) print self._decay(nodesList[0], skipList) print
33.848485
81
0.497165
3,235
0.965383
0
0
0
0
0
0
383
0.114294
c788076445fbf7d0da81cc5cf12ab9482e59b110
357
py
Python
translator.py
liuprestin/pyninjaTUT-translator
903642ff56f573ed9c58b6f7db4e6fbb4e722c8d
[ "MIT" ]
null
null
null
translator.py
liuprestin/pyninjaTUT-translator
903642ff56f573ed9c58b6f7db4e6fbb4e722c8d
[ "MIT" ]
null
null
null
translator.py
liuprestin/pyninjaTUT-translator
903642ff56f573ed9c58b6f7db4e6fbb4e722c8d
[ "MIT" ]
null
null
null
from translate import Translator translator = Translator(to_lang="zh") try: with open('./example.md', mode='r') as in_file: text = in_file.read() with open('./example-tranlated.md', mode='w') as trans_file: trans_file.write(translator.translate(text)) except FileNotFoundError as e: print('check your file path')
27.461538
64
0.661064
0
0
0
0
0
0
0
0
70
0.196078
c788d246174ead31e98e8d4b7639bcc5eb1a1074
580
py
Python
reddit2telegram/channels/news/app.py
mainyordle/reddit2telegram
1163e15aed3b6ff0fba65b222d3d9798f644c386
[ "MIT" ]
187
2016-09-20T09:15:54.000Z
2022-03-29T12:22:33.000Z
reddit2telegram/channels/news/app.py
mainyordle/reddit2telegram
1163e15aed3b6ff0fba65b222d3d9798f644c386
[ "MIT" ]
84
2016-09-22T14:25:07.000Z
2022-03-19T01:26:17.000Z
reddit2telegram/channels/news/app.py
mainyordle/reddit2telegram
1163e15aed3b6ff0fba65b222d3d9798f644c386
[ "MIT" ]
172
2016-09-21T15:39:39.000Z
2022-03-16T15:15:58.000Z
#encoding:utf-8 from utils import weighted_random_subreddit t_channel = '@news756' subreddit = weighted_random_subreddit({ 'politics': 0.5, 'news': 0.5 }) def send_post(submission, r2t): return r2t.send_simple(submission, text='{title}\n\n{self_text}\n\n/r/{subreddit_name}\n{short_link}', gif='{title}\n\n/r/{subreddit_name}\n{short_link}', img='{title}\n\n/r/{subreddit_name}\n{short_link}', album='{title}\n{link}\n\n/r/{subreddit_name}\n{short_link}', other='{title}\n{link}\n\n/r/{subreddit_name}\n{short_link}' )
27.619048
75
0.653448
0
0
0
0
0
0
0
0
302
0.52069
c78915846f029ced4be55e06e50f81dcf24cc440
21,941
py
Python
xcbgen/xtypes.py
tizenorg/framework.uifw.xorg.xcb.xcb-proto
d5ce7205c9bdd3e28d96d162617e32de1c126f8b
[ "X11" ]
1
2022-03-21T15:39:01.000Z
2022-03-21T15:39:01.000Z
targetfs/XSGX/lib/python2.6/site-packages/xcbgen/xtypes.py
jhofstee/Graphics_SDK
805bd44f347ed40699a84979bc9f3e8eb085fd9e
[ "Fair", "Unlicense" ]
null
null
null
targetfs/XSGX/lib/python2.6/site-packages/xcbgen/xtypes.py
jhofstee/Graphics_SDK
805bd44f347ed40699a84979bc9f3e8eb085fd9e
[ "Fair", "Unlicense" ]
null
null
null
''' This module contains the classes which represent XCB data types. ''' from xcbgen.expr import Field, Expression import __main__ class Type(object): ''' Abstract base class for all XCB data types. Contains default fields, and some abstract methods. ''' def __init__(self, name): ''' Default structure initializer. Sets up default fields. Public fields: name is a tuple of strings specifying the full type name. size is the size of the datatype in bytes, or None if variable-sized. nmemb is 1 for non-list types, None for variable-sized lists, otherwise number of elts. booleans for identifying subclasses, because I can't figure out isinstance(). ''' self.name = name self.size = None self.nmemb = None self.resolved = False # Screw isinstance(). self.is_simple = False self.is_list = False self.is_expr = False self.is_container = False self.is_reply = False self.is_union = False self.is_pad = False self.is_switch = False self.is_bitcase = False def resolve(self, module): ''' Abstract method for resolving a type. This should make sure any referenced types are already declared. ''' raise Exception('abstract resolve method not overridden!') def out(self, name): ''' Abstract method for outputting code. These are declared in the language-specific modules, and there must be a dictionary containing them declared when this module is imported! ''' raise Exception('abstract out method not overridden!') def fixed_size(self): ''' Abstract method for determining if the data type is fixed-size. ''' raise Exception('abstract fixed_size method not overridden!') def make_member_of(self, module, complex_type, field_type, field_name, visible, wire, auto): ''' Default method for making a data type a member of a structure. Extend this if the data type needs to add an additional length field or something. module is the global module object. complex_type is the structure object. see Field for the meaning of the other parameters. ''' new_field = Field(self, field_type, field_name, visible, wire, auto) # We dump the _placeholder_byte if any fields are added. for (idx, field) in enumerate(complex_type.fields): if field == _placeholder_byte: complex_type.fields[idx] = new_field return complex_type.fields.append(new_field) class SimpleType(Type): ''' Derived class which represents a cardinal type like CARD32 or char. Any type which is typedef'ed to cardinal will be one of these. Public fields added: none ''' def __init__(self, name, size): Type.__init__(self, name) self.is_simple = True self.size = size self.nmemb = 1 def resolve(self, module): self.resolved = True def fixed_size(self): return True out = __main__.output['simple'] # Cardinal datatype globals. See module __init__ method. tcard8 = SimpleType(('uint8_t',), 1) tcard16 = SimpleType(('uint16_t',), 2) tcard32 = SimpleType(('uint32_t',), 4) tint8 = SimpleType(('int8_t',), 1) tint16 = SimpleType(('int16_t',), 2) tint32 = SimpleType(('int32_t',), 4) tchar = SimpleType(('char',), 1) tfloat = SimpleType(('float',), 4) tdouble = SimpleType(('double',), 8) class Enum(SimpleType): ''' Derived class which represents an enum. Fixed-size. Public fields added: values contains a list of (name, value) tuples. value is empty, or a number. bits contains a list of (name, bitnum) tuples. items only appear if specified as a bit. bitnum is a number. ''' def __init__(self, name, elt): SimpleType.__init__(self, name, 4) self.values = [] self.bits = [] for item in list(elt): # First check if we're using a default value if len(list(item)) == 0: self.values.append((item.get('name'), '')) continue # An explicit value or bit was specified. value = list(item)[0] if value.tag == 'value': self.values.append((item.get('name'), value.text)) elif value.tag == 'bit': self.values.append((item.get('name'), '%u' % (1 << int(value.text, 0)))) self.bits.append((item.get('name'), value.text)) def resolve(self, module): self.resolved = True def fixed_size(self): return True out = __main__.output['enum'] class ListType(Type): ''' Derived class which represents a list of some other datatype. Fixed- or variable-sized. Public fields added: member is the datatype of the list elements. parent is the structure type containing the list. expr is an Expression object containing the length information, for variable-sized lists. ''' def __init__(self, elt, member, *parent): Type.__init__(self, member.name) self.is_list = True self.member = member self.parents = list(parent) if elt.tag == 'list': elts = list(elt) self.expr = Expression(elts[0] if len(elts) else elt, self) elif elt.tag == 'valueparam': self.expr = Expression(elt, self) self.size = member.size if member.fixed_size() else None self.nmemb = self.expr.nmemb if self.expr.fixed_size() else None def make_member_of(self, module, complex_type, field_type, field_name, visible, wire, auto): if not self.fixed_size(): # We need a length field. # Ask our Expression object for it's name, type, and whether it's on the wire. lenfid = self.expr.lenfield_type lenfield_name = self.expr.lenfield_name lenwire = self.expr.lenwire needlen = True # See if the length field is already in the structure. for parent in self.parents: for field in parent.fields: if field.field_name == lenfield_name: needlen = False # It isn't, so we need to add it to the structure ourself. if needlen: type = module.get_type(lenfid) lenfield_type = module.get_type_name(lenfid) type.make_member_of(module, complex_type, lenfield_type, lenfield_name, True, lenwire, False) # Add ourself to the structure by calling our original method. Type.make_member_of(self, module, complex_type, field_type, field_name, visible, wire, auto) def resolve(self, module): if self.resolved: return self.member.resolve(module) self.expr.resolve(module, self.parents) # Find my length field again. We need the actual Field object in the expr. # This is needed because we might have added it ourself above. if not self.fixed_size(): for parent in self.parents: for field in parent.fields: if field.field_name == self.expr.lenfield_name and field.wire: self.expr.lenfield = field break self.resolved = True def fixed_size(self): return self.member.fixed_size() and self.expr.fixed_size() class ExprType(Type): ''' Derived class which represents an exprfield. Fixed size. Public fields added: expr is an Expression object containing the value of the field. ''' def __init__(self, elt, member, *parents): Type.__init__(self, member.name) self.is_expr = True self.member = member self.parents = parents self.expr = Expression(list(elt)[0], self) self.size = member.size self.nmemb = 1 def resolve(self, module): if self.resolved: return self.member.resolve(module) self.resolved = True def fixed_size(self): return True class PadType(Type): ''' Derived class which represents a padding field. ''' def __init__(self, elt): Type.__init__(self, tcard8.name) self.is_pad = True self.size = 1 self.nmemb = 1 if (elt == None) else int(elt.get('bytes'), 0) def resolve(self, module): self.resolved = True def fixed_size(self): return True class ComplexType(Type): ''' Derived class which represents a structure. Base type for all structure types. Public fields added: fields is an array of Field objects describing the structure fields. ''' def __init__(self, name, elt): Type.__init__(self, name) self.is_container = True self.elt = elt self.fields = [] self.nmemb = 1 self.size = 0 self.lenfield_parent = [self] def resolve(self, module): if self.resolved: return pads = 0 # Resolve all of our field datatypes. for child in list(self.elt): if child.tag == 'pad': field_name = 'pad' + str(pads) fkey = 'CARD8' type = PadType(child) pads = pads + 1 visible = False elif child.tag == 'field': field_name = child.get('name') fkey = child.get('type') type = module.get_type(fkey) visible = True elif child.tag == 'exprfield': field_name = child.get('name') fkey = child.get('type') type = ExprType(child, module.get_type(fkey), *self.lenfield_parent) visible = False elif child.tag == 'list': field_name = child.get('name') fkey = child.get('type') type = ListType(child, module.get_type(fkey), *self.lenfield_parent) visible = True elif child.tag == 'valueparam': field_name = child.get('value-list-name') fkey = 'CARD32' type = ListType(child, module.get_type(fkey), *self.lenfield_parent) visible = True elif child.tag == 'switch': field_name = child.get('name') # construct the switch type name from the parent type and the field name field_type = self.name + (field_name,) type = SwitchType(field_type, child, *self.lenfield_parent) visible = True type.make_member_of(module, self, field_type, field_name, visible, True, False) type.resolve(module) continue else: # Hit this on Reply continue # Get the full type name for the field field_type = module.get_type_name(fkey) # Add the field to ourself type.make_member_of(module, self, field_type, field_name, visible, True, False) # Recursively resolve the type (could be another structure, list) type.resolve(module) self.calc_size() # Figure out how big we are self.resolved = True def calc_size(self): self.size = 0 for m in self.fields: if not m.wire: continue if m.type.fixed_size(): self.size = self.size + (m.type.size * m.type.nmemb) else: self.size = None break def fixed_size(self): for m in self.fields: if not m.type.fixed_size(): return False return True class SwitchType(ComplexType): ''' Derived class which represents a List of Items. Public fields added: bitcases is an array of Bitcase objects describing the list items ''' def __init__(self, name, elt, *parents): ComplexType.__init__(self, name, elt) self.parents = parents # FIXME: switch cannot store lenfields, so it should just delegate the parents self.lenfield_parent = list(parents) + [self] # self.fields contains all possible fields collected from the Bitcase objects, # whereas self.items contains the Bitcase objects themselves self.bitcases = [] self.is_switch = True elts = list(elt) self.expr = Expression(elts[0] if len(elts) else elt, self) def resolve(self, module): if self.resolved: return # pads = 0 parents = list(self.parents) + [self] # Resolve all of our field datatypes. for index, child in enumerate(list(self.elt)): if child.tag == 'bitcase': field_name = child.get('name') if field_name is None: field_type = self.name + ('bitcase%d' % index,) else: field_type = self.name + (field_name,) # use self.parent to indicate anchestor, # as switch does not contain named fields itself type = BitcaseType(index, field_type, child, *parents) # construct the switch type name from the parent type and the field name if field_name is None: type.has_name = False # Get the full type name for the field field_type = type.name visible = True # add the field to ourself type.make_member_of(module, self, field_type, field_name, visible, True, False) # recursively resolve the type (could be another structure, list) type.resolve(module) inserted = False for new_field in type.fields: # We dump the _placeholder_byte if any fields are added. for (idx, field) in enumerate(self.fields): if field == _placeholder_byte: self.fields[idx] = new_field inserted = True break if False == inserted: self.fields.append(new_field) self.calc_size() # Figure out how big we are self.resolved = True def make_member_of(self, module, complex_type, field_type, field_name, visible, wire, auto): if not self.fixed_size(): # We need a length field. # Ask our Expression object for it's name, type, and whether it's on the wire. lenfid = self.expr.lenfield_type lenfield_name = self.expr.lenfield_name lenwire = self.expr.lenwire needlen = True # See if the length field is already in the structure. for parent in self.parents: for field in parent.fields: if field.field_name == lenfield_name: needlen = False # It isn't, so we need to add it to the structure ourself. if needlen: type = module.get_type(lenfid) lenfield_type = module.get_type_name(lenfid) type.make_member_of(module, complex_type, lenfield_type, lenfield_name, True, lenwire, False) # Add ourself to the structure by calling our original method. Type.make_member_of(self, module, complex_type, field_type, field_name, visible, wire, auto) # size for switch can only be calculated at runtime def calc_size(self): pass # note: switch is _always_ of variable size, but we indicate here wether # it contains elements that are variable-sized themselves def fixed_size(self): return False # for m in self.fields: # if not m.type.fixed_size(): # return False # return True class Struct(ComplexType): ''' Derived class representing a struct data type. ''' out = __main__.output['struct'] class Union(ComplexType): ''' Derived class representing a union data type. ''' def __init__(self, name, elt): ComplexType.__init__(self, name, elt) self.is_union = True out = __main__.output['union'] class BitcaseType(ComplexType): ''' Derived class representing a struct data type. ''' def __init__(self, index, name, elt, *parent): elts = list(elt) self.expr = Expression(elts[0] if len(elts) else elt, self) ComplexType.__init__(self, name, elts[1:]) self.has_name = True self.index = 1 self.lenfield_parent = list(parent) + [self] self.parents = list(parent) self.is_bitcase = True def make_member_of(self, module, switch_type, field_type, field_name, visible, wire, auto): ''' register BitcaseType with the corresponding SwitchType module is the global module object. complex_type is the structure object. see Field for the meaning of the other parameters. ''' new_field = Field(self, field_type, field_name, visible, wire, auto) # We dump the _placeholder_byte if any bitcases are added. for (idx, field) in enumerate(switch_type.bitcases): if field == _placeholder_byte: switch_type.bitcases[idx] = new_field return switch_type.bitcases.append(new_field) def resolve(self, module): if self.resolved: return self.expr.resolve(module, self.parents+[self]) # Resolve the bitcase expression ComplexType.resolve(self, module) class Reply(ComplexType): ''' Derived class representing a reply. Only found as a field of Request. ''' def __init__(self, name, elt): ComplexType.__init__(self, name, elt) self.is_reply = True def resolve(self, module): if self.resolved: return # Add the automatic protocol fields self.fields.append(Field(tcard8, tcard8.name, 'response_type', False, True, True)) self.fields.append(_placeholder_byte) self.fields.append(Field(tcard16, tcard16.name, 'sequence', False, True, True)) self.fields.append(Field(tcard32, tcard32.name, 'length', False, True, True)) ComplexType.resolve(self, module) class Request(ComplexType): ''' Derived class representing a request. Public fields added: reply contains the reply datatype or None for void requests. opcode contains the request number. ''' def __init__(self, name, elt): ComplexType.__init__(self, name, elt) self.reply = None self.opcode = elt.get('opcode') for child in list(elt): if child.tag == 'reply': self.reply = Reply(name, child) def resolve(self, module): if self.resolved: return # Add the automatic protocol fields if module.namespace.is_ext: self.fields.append(Field(tcard8, tcard8.name, 'major_opcode', False, True, True)) self.fields.append(Field(tcard8, tcard8.name, 'minor_opcode', False, True, True)) self.fields.append(Field(tcard16, tcard16.name, 'length', False, True, True)) ComplexType.resolve(self, module) else: self.fields.append(Field(tcard8, tcard8.name, 'major_opcode', False, True, True)) self.fields.append(_placeholder_byte) self.fields.append(Field(tcard16, tcard16.name, 'length', False, True, True)) ComplexType.resolve(self, module) if self.reply: self.reply.resolve(module) out = __main__.output['request'] class Event(ComplexType): ''' Derived class representing an event data type. Public fields added: opcodes is a dictionary of name -> opcode number, for eventcopies. ''' def __init__(self, name, elt): ComplexType.__init__(self, name, elt) self.opcodes = {} tmp = elt.get('no-sequence-number') self.has_seq = (tmp == None or tmp.lower() == 'false' or tmp == '0') def add_opcode(self, opcode, name, main): self.opcodes[name] = opcode if main: self.name = name def resolve(self, module): if self.resolved: return # Add the automatic protocol fields self.fields.append(Field(tcard8, tcard8.name, 'response_type', False, True, True)) if self.has_seq: self.fields.append(_placeholder_byte) self.fields.append(Field(tcard16, tcard16.name, 'sequence', False, True, True)) ComplexType.resolve(self, module) out = __main__.output['event'] class Error(ComplexType): ''' Derived class representing an error data type. Public fields added: opcodes is a dictionary of name -> opcode number, for errorcopies. ''' def __init__(self, name, elt): ComplexType.__init__(self, name, elt) self.opcodes = {} def add_opcode(self, opcode, name, main): self.opcodes[name] = opcode if main: self.name = name def resolve(self, module): if self.resolved: return # Add the automatic protocol fields self.fields.append(Field(tcard8, tcard8.name, 'response_type', False, True, True)) self.fields.append(Field(tcard8, tcard8.name, 'error_code', False, True, True)) self.fields.append(Field(tcard16, tcard16.name, 'sequence', False, True, True)) ComplexType.resolve(self, module) out = __main__.output['error'] _placeholder_byte = Field(PadType(None), tcard8.name, 'pad0', False, True, False)
34.661927
112
0.593182
21,160
0.964405
0
0
0
0
0
0
6,809
0.310332
c78a85d9115e200586e2ed2d790dc6b616c4151d
3,769
py
Python
BioKlustering-Website/mlmodel/parser/kmeans.py
solislemuslab/mycovirus-website
bc8d3d5f9a9472c35e40334f19e90bbf782f7a1b
[ "MIT" ]
1
2021-11-23T15:07:58.000Z
2021-11-23T15:07:58.000Z
BioKlustering-Website/mlmodel/parser/kmeans.py
solislemuslab/mycovirus-website
bc8d3d5f9a9472c35e40334f19e90bbf782f7a1b
[ "MIT" ]
2
2020-10-23T15:40:49.000Z
2020-10-28T13:21:16.000Z
BioKlustering-Website/mlmodel/parser/kmeans.py
solislemuslab/bioklustering
bc8d3d5f9a9472c35e40334f19e90bbf782f7a1b
[ "MIT" ]
2
2021-11-04T20:01:36.000Z
2021-11-23T15:13:34.000Z
# Copyright 2020 by Luke Selberg, Solis-Lemus Lab, WID. # All rights reserved. # This file is part of the BioKlustering Website. import pandas as pd from Bio import SeqIO from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.cluster import KMeans from sklearn.decomposition import PCA from sklearn.cluster import MeanShift from sklearn import preprocessing import numpy as np import os from .helpers import plotly_dash_show_plot def parseFasta(data): d = {fasta.id : str(fasta.seq) for fasta in SeqIO.parse(data, "fasta")} pd.DataFrame([d]) s = pd.Series(d, name='Sequence') s.index.name = 'ID' s.reset_index() return pd.DataFrame(s) def kmerXTable(s, a, b): tfid_vector = TfidfVectorizer(analyzer='char', ngram_range=(a,b)) s_hat = tfid_vector.fit_transform(s.Sequence) kmerNames = tfid_vector.get_feature_names() kmers = s_hat.toarray() return pd.DataFrame(kmers,columns=kmerNames, index = s.index) # credit to chunrong def read_fasta_sequences(sequence_paths): all_sequences = pd.DataFrame() for path in sequence_paths: path = os.path.join("media", path) sequence = parseFasta(path) all_sequences = pd.concat([all_sequences, sequence]) return all_sequences def kmeans(userId, fasta, klength_min, klength_max, rNum, cNum, method): inputData = read_fasta_sequences(fasta) inputData["Sequence"] = inputData["Sequence"].apply(lambda x: x.replace("-", "")) kmerXTableInput = kmerXTable(inputData, klength_min, klength_max) km = KMeans(random_state = rNum, n_clusters = cNum) km.fit(kmerXTableInput) y_hat = km.predict(kmerXTableInput) plotly_kmertable = kmerXTableInput if method == "PCA": plotly_kmertable = preprocessing.normalize(kmerXTableInput) plot_div = plotly_dash_show_plot(userId, plotly_kmertable, y_hat, "Unsupervised Kmeans", method) inputData.insert(0, "Labels", y_hat) return [[inputData], [plot_div]] def kmeans_semiSupervised(userId, fasta, klength_min, klength_max, rNum, y_hat, method): inputData = read_fasta_sequences(fasta) inputData["Sequence"] = inputData["Sequence"].apply(lambda x: x.replace("-", "")) kmerXTableInput = kmerXTable(inputData, klength_min, klength_max) PCAembedding = PCA(n_components=10) NkmerXTableInput = preprocessing.normalize(kmerXTableInput) PCAembedding_low = PCAembedding.fit_transform(NkmerXTableInput) ms = MeanShift() ms.fit(PCAembedding_low) cluster_centers = ms.cluster_centers_ import warnings with warnings.catch_warnings(): warnings.simplefilter("ignore") kmms = KMeans(init = cluster_centers, n_clusters = len(cluster_centers)) kmms_labels = kmms.fit_predict(PCAembedding_low) # convert all clusters into two clusters kmerXTableInput["pLabels"] = kmms_labels kmerXTableInput["aLabels"] = y_hat.tolist() newLabels_clusters_1 = kmerXTableInput[kmerXTableInput["aLabels"] == 1]["pLabels"].tolist() newLabels_clusters_0 = kmerXTableInput[kmerXTableInput["aLabels"] == 0]["pLabels"].tolist() newLabels = [] for label in kmms_labels: if newLabels_clusters_1.count(label) > newLabels_clusters_0.count(label): newLabels.append(1) else: newLabels.append(0) kmerTable = kmerXTableInput.drop(columns=["pLabels", "aLabels"]) plotly_kmertable = kmerTable plotly_labels = np.array(newLabels) if method == "PCA": plotly_kmertable = preprocessing.normalize(kmerTable) plotly_div = plotly_dash_show_plot(userId, plotly_kmertable, plotly_labels, "Semi-supervised Kmeans", method) inputData.insert(0, "Labels", newLabels) return [[inputData], [plotly_div]]
37.316832
113
0.717166
0
0
0
0
0
0
0
0
421
0.111701