blob_id
stringlengths
40
40
directory_id
stringlengths
40
40
path
stringlengths
3
616
content_id
stringlengths
40
40
detected_licenses
listlengths
0
112
license_type
stringclasses
2 values
repo_name
stringlengths
5
115
snapshot_id
stringlengths
40
40
revision_id
stringlengths
40
40
branch_name
stringclasses
777 values
visit_date
timestamp[us]date
2015-08-06 10:31:46
2023-09-06 10:44:38
revision_date
timestamp[us]date
1970-01-01 02:38:32
2037-05-03 13:00:00
committer_date
timestamp[us]date
1970-01-01 02:38:32
2023-09-06 01:08:06
github_id
int64
4.92k
681M
star_events_count
int64
0
209k
fork_events_count
int64
0
110k
gha_license_id
stringclasses
22 values
gha_event_created_at
timestamp[us]date
2012-06-04 01:52:49
2023-09-14 21:59:50
gha_created_at
timestamp[us]date
2008-05-22 07:58:19
2023-08-21 12:35:19
gha_language
stringclasses
149 values
src_encoding
stringclasses
26 values
language
stringclasses
1 value
is_vendor
bool
2 classes
is_generated
bool
2 classes
length_bytes
int64
3
10.2M
extension
stringclasses
188 values
content
stringlengths
3
10.2M
authors
listlengths
1
1
author_id
stringlengths
1
132
01097f42356c3084810e62f9534dfe02b930e580
44cbcfa0f861e92917e5617cf5fce5832c7fda12
/python/练习/客户端.py
26938dfaa0223a9678c59db3fa8846e465803893
[]
no_license
cao-xing/python
9581d1a3ff901d9cca81d368c3088f2ecae75611
d260a15b127763c9dc17b4cdb2bb514217373b5e
refs/heads/master
2021-09-21T03:10:35.460798
2018-08-20T01:24:22
2018-08-20T01:24:22
103,017,659
0
0
null
null
null
null
UTF-8
Python
false
false
628
py
#!/usr/bin/env python # -*- coding: utf-8 -*- # @Time : 2017/8/22 15:56 # @Author : Aries # @Site : # @File : 客户端.py # @Software: PyCharm import socket import struct client=socket.socket() client.connect(("127.0.0.1",8080)) while True: msg=input("=>>") if not msg:continue client.send(msg.encode("utf-8")) header_struct=client.recv(4) # print(type(header_struct),len(header_struct)) total_size=struct.unpack("i",header_struct)[0] # total_size=header_size[0] print("total_size",total_size) server_msg=client.recv(total_size) print(server_msg.decode('gbk')) client.close()
70768a1e403b7f3091fda143abdaa8a45a458a16
1fddb12ae9b7db260b9161983a726f1528ece940
/Part_02/Exercícios_alien_invasion/13_3_and_13_4_Gotes_de_chuva/test.py
b2cb711e049cd969d50595d0e68caac2b6850453
[]
no_license
davicosta12/python_work
aecf642bbbf2a007fba159f305b4ab21ff00503f
5df62d3ced25b7e04cda71846fd07862dda39b4c
refs/heads/master
2023-04-23T09:49:31.923211
2021-04-27T00:31:42
2021-04-27T00:31:42
null
0
0
null
null
null
null
UTF-8
Python
false
false
562
py
cria_linha_gota(screen, settings, gota, gotas) def cria_linha_gota(screen, settings, gota, gotas): if gota.VerificaOndeAgotaEncosta(): gota = Gota(screen, settings) avaliando_space_x = settings.screen_width - gota.rect.width number_gotas = int(avaliando_space_x / (2 * gota.rect.width)) for number_gota in range(number_gotas): gota = Gota(screen, settings) position_x = gota.rect.width + (2 * gota.rect.width) * number_gota gota.rect.x = position_x gotas.add(gota)
9f13fd2ca6acccc03d1dd7c37d11b283e8d37c1d
d6cb1c056b441fb435b780aac190786b52d18862
/python/git-github-fork
4c4d7dc4db2685a0e808047bb3ab7a9a2ddaade2
[ "MIT" ]
permissive
asottile/scratch
2e189b7c903b12a4ade35f1148bbb89673df73e3
616a6c972d5c3de8698eb41b8c4b74c393f7f397
refs/heads/main
2023-08-16T23:51:21.919592
2023-08-15T14:18:03
2023-08-15T14:18:03
30,501,563
125
13
MIT
2023-09-05T13:05:32
2015-02-08T18:53:26
Python
UTF-8
Python
false
false
1,633
#!/usr/bin/env python3 from __future__ import annotations import argparse import json import os import subprocess import sys import urllib.request from typing import NoReturn from typing import Sequence def get_slug() -> str: cmd = ('git', 'config', 'remote.origin.url') remote = subprocess.check_output(cmd).strip().decode() assert remote.startswith('[email protected]:'), remote _, _, slug = remote.partition(':') return slug def load_config() -> dict[str, str]: filename = os.path.expanduser('~/.github-auth.json') mode = os.stat(filename).st_mode & 0o777 if sys.platform != 'win32' and mode != 0o600: raise SystemExit( f'{filename} has too-permissive permissions, Expected 0o600, ' f'got 0o{mode:o}', ) with open(filename) as f: return json.load(f) def fork(slug: str, token: str) -> None: url = f'https://api.github.com/repos/{slug}/forks' print(url) request = urllib.request.Request(url, data=b'') request.add_header('Authorization', f'token {token}') urllib.request.urlopen(request) def add_remote(slug: str, username: str) -> NoReturn: _, _, repo_name = slug.partition('/') slug = f'{username}/{repo_name}' cmd = ('git', 'remote', 'add', username, f'[email protected]:{slug}') os.execvp(cmd[0], cmd) def main(argv: Sequence[str] | None = None) -> NoReturn: parser = argparse.ArgumentParser() parser.parse_args(argv) slug = get_slug() auth = load_config() fork(slug, auth['token']) add_remote(slug, auth['username']) if __name__ == '__main__': raise SystemExit(main())
e955b2bc0135273d801ec2818256ff087b9a4f86
ecf6fe6aa87b2c3f041acc30fab11b0cafe3dd46
/architecture_py/archi_v3_30.py
e3453c90d78789666fb45300a164841e7b14572b
[]
no_license
antgratia/Memoire_code
73c7806c4576c2e73e00d9a84b1063a2c8f6b559
2cdc1339ea24896a6628238f6467edff80f98166
refs/heads/main
2023-06-20T16:19:07.041464
2021-07-13T11:53:48
2021-07-13T11:53:48
null
0
0
null
null
null
null
UTF-8
Python
false
false
4,149
py
import numpy as np import os from keras import backend as K from tensorflow import keras from tensorflow.keras.callbacks import EarlyStopping from tensorflow.keras.models import Sequential, Model,load_model from tensorflow.keras.layers import Input, Add, Dense, Activation, ZeroPadding2D, BatchNormalization, Flatten, Conv2D, AveragePooling2D, MaxPooling2D, GlobalMaxPooling2D, GlobalAveragePooling2D, MaxPool2D, Concatenate, Dropout from tensorflow.keras.initializers import glorot_uniform from tensorflow.keras.utils import plot_model import tensorflow as tf import sys import traceback import csv from time import time type_archi = 'LENET' epsilon = 0.001 dropout_rate = 0.5 axis = 3 compress_factor = 0.5 # load dataset (train_x, train_y), (test_x, test_y) = keras.datasets.cifar10.load_data() # normalize to range 0-1 train_x = train_x / 255.0 test_x = test_x / 255.0 val_x = train_x[:5000] val_y = train_y[:5000] # init training time training_time = 0 # init result test/train test_result_loss = "" test_result_acc = "" train_result_loss = "" train_result_acc = "" nb_layers = "not build" try: def getModel(): X_input = X = Input([32, 32, 3]) X = Conv2D(18, kernel_size=6, strides=3, activation='selu', padding='valid')(X) X = Conv2D(36, kernel_size=4, strides=4, activation='tanh', padding='same')(X) X = Flatten()(X) X = Dense(234, activation='tanh')(X) X = Dense(10, activation='softmax')(X) model = Model(inputs=X_input, outputs=X) return model model = getModel() #plot_model(model, show_shapes=True, to_file="../architecture_img/archi_v3_30.png") model.compile(optimizer='adam', loss=keras.losses.sparse_categorical_crossentropy, metrics=['accuracy']) start = time() es = tf.keras.callbacks.EarlyStopping(monitor='loss', verbose=1, restore_best_weights=True, patience=1) list_cb = [es] history = model.fit(train_x, train_y, epochs=50, batch_size=64, validation_split=0.3, callbacks=list_cb) training_time = time()-start print(model.evaluate(test_x, test_y)) log_file = open("../architecture_log/archi_v3_30.log" , "w") # save test result log_file.write('test result : ' + str(model.evaluate(test_x, test_y))) test_result_loss = model.evaluate(test_x, test_y)[0] test_result_acc = model.evaluate(test_x, test_y)[1] # save train result log_file.write('train result : ' + str(model.evaluate(test_x, test_y))) log_file.write('History train result : ' + str(history.history)) train_result_loss = model.evaluate(train_x, train_y)[0] train_result_acc = model.evaluate(train_x, train_y)[1] print('OK: file ../architecture_log/archi_v3_30.log has been create') nb_layers = len(model.layers) log_file.close() except: print('error: file ../architecture_log/archi_v3_30_error.log has been create') error_file = open("../architecture_log/archi_v3_30_error.log" , "w") traceback.print_exc(file=error_file) result_loss = "Error" result_acc = "Error" error_file.close() finally: file = open('../architecture_results_v3.csv', 'a', newline ='') with file: # identifying header header = ['file_name', 'training_time(s)', 'test_result_loss', 'test_result_acc', 'train_result_acc', 'train_result_loss', 'nb_layers', 'epochs', 'type_archi'] writer = csv.DictWriter(file, fieldnames = header) # writing data row-wise into the csv file # writer.writeheader() writer.writerow({'file_name' : 'archi_v3_30', 'training_time(s)': training_time, 'test_result_loss': test_result_loss, 'test_result_acc': test_result_acc, 'train_result_acc': train_result_acc, 'train_result_loss': train_result_loss, 'nb_layers': nb_layers, 'epochs' : len(history.history['loss']), 'type_archi': type_archi}) print('add line into architecture_results_v3.csv') file.close()
0b9dbb64c50ea9248b0265712478b99b9d6c8300
09e57dd1374713f06b70d7b37a580130d9bbab0d
/benchmark/startQiskit_Class1786.py
610f8fde6dac66e86511229eaf7f8927e6e439db
[ "BSD-3-Clause" ]
permissive
UCLA-SEAL/QDiff
ad53650034897abb5941e74539e3aee8edb600ab
d968cbc47fe926b7f88b4adf10490f1edd6f8819
refs/heads/main
2023-08-05T04:52:24.961998
2021-09-19T02:56:16
2021-09-19T02:56:16
405,159,939
2
0
null
null
null
null
UTF-8
Python
false
false
4,887
py
# qubit number=5 # total number=70 import cirq import qiskit from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister from qiskit import BasicAer, execute, transpile from pprint import pprint from qiskit.test.mock import FakeVigo from math import log2,floor, sqrt, pi import numpy as np import networkx as nx def build_oracle(n: int, f) -> QuantumCircuit: # implement the oracle O_f^\pm # NOTE: use U1 gate (P gate) with \lambda = 180 ==> CZ gate # or multi_control_Z_gate (issue #127) controls = QuantumRegister(n, "ofc") oracle = QuantumCircuit(controls, name="Zf") for i in range(2 ** n): rep = np.binary_repr(i, n) if f(rep) == "1": for j in range(n): if rep[j] == "0": oracle.x(controls[j]) # oracle.h(controls[n]) if n >= 2: oracle.mcu1(pi, controls[1:], controls[0]) for j in range(n): if rep[j] == "0": oracle.x(controls[j]) # oracle.barrier() return oracle def make_circuit(n:int,f) -> QuantumCircuit: # circuit begin input_qubit = QuantumRegister(n,"qc") classical = ClassicalRegister(n, "qm") prog = QuantumCircuit(input_qubit, classical) prog.h(input_qubit[0]) # number=3 prog.h(input_qubit[1]) # number=4 prog.h(input_qubit[2]) # number=5 prog.h(input_qubit[1]) # number=29 prog.cz(input_qubit[3],input_qubit[1]) # number=30 prog.h(input_qubit[1]) # number=31 prog.h(input_qubit[3]) # number=6 prog.h(input_qubit[4]) # number=21 Zf = build_oracle(n, f) repeat = floor(sqrt(2 ** n) * pi / 4) for i in range(repeat): prog.append(Zf.to_gate(), [input_qubit[i] for i in range(n)]) prog.h(input_qubit[0]) # number=1 prog.h(input_qubit[1]) # number=2 prog.h(input_qubit[2]) # number=7 prog.h(input_qubit[3]) # number=8 prog.h(input_qubit[0]) # number=38 prog.cz(input_qubit[1],input_qubit[0]) # number=39 prog.h(input_qubit[0]) # number=40 prog.h(input_qubit[0]) # number=51 prog.cz(input_qubit[1],input_qubit[0]) # number=52 prog.h(input_qubit[0]) # number=53 prog.h(input_qubit[0]) # number=64 prog.cz(input_qubit[1],input_qubit[0]) # number=65 prog.h(input_qubit[0]) # number=66 prog.x(input_qubit[0]) # number=49 prog.h(input_qubit[0]) # number=57 prog.cz(input_qubit[1],input_qubit[0]) # number=58 prog.h(input_qubit[0]) # number=59 prog.h(input_qubit[0]) # number=54 prog.cz(input_qubit[1],input_qubit[0]) # number=55 prog.h(input_qubit[0]) # number=56 prog.h(input_qubit[4]) # number=41 prog.h(input_qubit[0]) # number=61 prog.cz(input_qubit[1],input_qubit[0]) # number=62 prog.h(input_qubit[0]) # number=63 prog.cx(input_qubit[0],input_qubit[1]) # number=67 prog.x(input_qubit[1]) # number=68 prog.cx(input_qubit[0],input_qubit[1]) # number=69 prog.h(input_qubit[2]) # number=25 prog.cz(input_qubit[0],input_qubit[2]) # number=26 prog.h(input_qubit[2]) # number=27 prog.x(input_qubit[2]) # number=23 prog.cx(input_qubit[0],input_qubit[2]) # number=24 prog.cx(input_qubit[0],input_qubit[3]) # number=32 prog.x(input_qubit[3]) # number=33 prog.h(input_qubit[3]) # number=42 prog.cz(input_qubit[0],input_qubit[3]) # number=43 prog.h(input_qubit[3]) # number=44 if n>=2: prog.mcu1(pi,input_qubit[1:],input_qubit[0]) prog.x(input_qubit[0]) # number=13 prog.rx(0.6157521601035993,input_qubit[1]) # number=60 prog.x(input_qubit[1]) # number=14 prog.x(input_qubit[2]) # number=15 prog.x(input_qubit[3]) # number=16 prog.h(input_qubit[0]) # number=17 prog.h(input_qubit[1]) # number=18 prog.h(input_qubit[2]) # number=19 prog.h(input_qubit[3]) # number=20 # circuit end return prog if __name__ == '__main__': key = "00000" f = lambda rep: str(int(rep == key)) prog = make_circuit(5,f) backend = BasicAer.get_backend('statevector_simulator') sample_shot =7924 info = execute(prog, backend=backend).result().get_statevector() qubits = round(log2(len(info))) info = { np.binary_repr(i, qubits): round((info[i]*(info[i].conjugate())).real,3) for i in range(2 ** qubits) } backend = FakeVigo() circuit1 = transpile(prog,backend,optimization_level=2) writefile = open("../data/startQiskit_Class1786.csv","w") print(info,file=writefile) print("results end", file=writefile) print(circuit1.depth(),file=writefile) print(circuit1,file=writefile) writefile.close()
ad88fe2e16a5adbea172806fa7d86ebdfa481342
673e829dda9583c8dd2ac8d958ba1dc304bffeaf
/data/multilingual/Latn.SPA/Mono_16/pdf_to_json_test_Latn.SPA_Mono_16.py
0be4ae65d67247118673d5d58736e42717295f37
[ "BSD-3-Clause" ]
permissive
antoinecarme/pdf_to_json_tests
58bab9f6ba263531e69f793233ddc4d33b783b7e
d57a024fde862e698d916a1178f285883d7a3b2f
refs/heads/master
2021-01-26T08:41:47.327804
2020-02-27T15:54:48
2020-02-27T15:54:48
243,359,934
2
1
null
null
null
null
UTF-8
Python
false
false
303
py
import pdf_to_json as p2j import json url = "file:data/multilingual/Latn.SPA/Mono_16/udhr_Latn.SPA_Mono_16.pdf" lConverter = p2j.pdf_to_json.pdf_to_json_converter() lConverter.mImageHashOnly = True lDict = lConverter.convert(url) print(json.dumps(lDict, indent=4, ensure_ascii=False, sort_keys=True))
e69bcc7516979ce09edabc7d718e8eca0a0d794f
e23a4f57ce5474d468258e5e63b9e23fb6011188
/125_algorithms/_examples/_algorithms_challenges/leetcode/LeetCode_with_solution/605 Can Place Flowers.py
658816e49cf0bf234c579ca59a7d4561b16531c3
[]
no_license
syurskyi/Python_Topics
52851ecce000cb751a3b986408efe32f0b4c0835
be331826b490b73f0a176e6abed86ef68ff2dd2b
refs/heads/master
2023-06-08T19:29:16.214395
2023-05-29T17:09:11
2023-05-29T17:09:11
220,583,118
3
2
null
2023-02-16T03:08:10
2019-11-09T02:58:47
Python
UTF-8
Python
false
false
1,311
py
#!/usr/bin/python3 """ Suppose you have a long flowerbed in which some of the plots are planted and some are not. However, flowers cannot be planted in adjacent plots - they would compete for water and both would die. Given a flowerbed (represented as an array containing 0 and 1, where 0 means empty and 1 means not empty), and a number n, return if n new flowers can be planted in it without violating the no-adjacent-flowers rule. Example 1: Input: flowerbed = [1,0,0,0,1], n = 1 Output: True Example 2: Input: flowerbed = [1,0,0,0,1], n = 2 Output: False Note: The input array won't violate no-adjacent-flowers rule. The input array size is in the range of [1, 20000]. n is a non-negative integer which won't exceed the input array size. """ from typing import List class Solution: def canPlaceFlowers(self, flowerbed: List[int], n: int) -> bool: """ greedy """ if n == 0: return True for i in range(len(flowerbed)): if ( flowerbed[i] != 1 and (i + 1 >= len(flowerbed) or flowerbed[i+1] != 1) and (i - 1 < 0 or flowerbed[i - 1] != 1) ): n -= 1 flowerbed[i] = 1 if n == 0: return True return False
855148317d46238f6f0a99be9e89688696668f5e
8845a9557ef7a93a4235bc5aff059110c7c0846c
/Decorators/double_return.py
7ec2f76092ad954a1a96de46e9c96d749990493c
[]
no_license
Makhanya/PythonMasterClass
c127791337c862bf5c6c8780a1643642d6e99ab6
241f48396e59cd20f1a275f15fa6fec3e8676bb6
refs/heads/master
2023-07-20T12:44:05.055259
2023-07-09T11:20:52
2023-07-09T11:20:52
86,587,952
0
0
null
null
null
null
UTF-8
Python
false
false
568
py
''' @double_return def add(x, y): return x + y add(1, 2) # [3, 3] @double_return def greet(name): return "Hi, I'm " + name greet("Colt") # ["Hi, I'm Colt", "Hi, I'm Colt"] ''' from functools import wraps def double_return(fn): @wraps(fn) def wrapper(*args, **kwargs): val = fn(*args, **kwargs) return [val, val] return wrapper @double_return def add(x, y): return x + y print(add(1, 2)) # [3, 3] @double_return def greet(name): return "Hi, I'm " + name print(greet("Colt")) # ["Hi, I'm Colt", "Hi, I'm Colt"]
7a59066ee89bb5ef496ad54402f4996626383876
fa8d2511217e3db4c3ec91ee0bacd3ee70b1af47
/asm-unifier.py
ffa4ee865ab0f06fa07ee105f9652370bbfc156e
[]
no_license
NickSto/indel-mix-denovo
588d5435e65895c8e40663225ab5090cc598ee91
e5a8b42aae6578565ceb5caea26e24fdea9196db
refs/heads/master
2020-12-24T08:54:57.857591
2019-09-19T21:07:26
2019-09-19T21:07:26
26,182,388
0
0
null
null
null
null
UTF-8
Python
false
false
19,314
py
#!/usr/bin/env python from __future__ import division import re import os import sys import shutil import string import logging import argparse import subprocess import lavintervals import fastareader import lavreader import distutils.spawn OPT_DEFAULTS = {'output_name':'Cleaned', 'score_by':'length', 'fasta_width':70} USAGE = "%(prog)s [options]" DESCRIPTION = """This tool will simplify an assembly using a reference sequence. It merges overlapping and adjacent contigs and discards redundant ones.""" EPILOG = """Notes: This is currently under active development and should be considered in alpha status. At the moment it requires contigs that all overlap or are adjacent (along the reference), with no gaps between.""" TMP_DIR_BASE = 'cleaning' SPADES_NAME_PATTERN = r'^(NODE_\d+)_length_\d+_cov_\d+\.?\d+_ID_\d+$' def main(): parser = argparse.ArgumentParser(description=DESCRIPTION, epilog=EPILOG) parser.set_defaults(**OPT_DEFAULTS) parser.add_argument('ref', metavar='reference.fa', help='The FASTA file of the reference. N.B.: Must contain only one ' 'sequence. If the full reference contains multiple chromosomes, you will ' 'have to break it into multiple files and run this once per chromsome.') parser.add_argument('asm', metavar='assembly.fa', help='The FASTA file of the raw assembly.') parser.add_argument('-s', '--score-by', choices=['length', 'id', 'support'], help='The method used to determine which of two overlapping contigs to use ' 'in constructing the final sequence. Default: %(default)s.') parser.add_argument('-O', '--orient', action='store_true', help='Only orient the contigs, but do not alter them otherwise.') parser.add_argument('-o', '--output', metavar='assembly-unified.fa', help='Write the processed assembly FASTA to this file instead of stdout.') parser.add_argument('-n', '--output-name', metavar='SeqName', help='Use this name for the output sequence in the FASTA header line. ' 'Default: "%(default)s".') parser.add_argument('-l', '--log', metavar='logfile.txt', help='A log file to use for writing details of the process, if one is ' 'desired.') parser.add_argument('-v', '--verbosity', type=int, metavar='level', help='How verbose the log file printing should be. If -v is given but -l ' 'is not, log printing will be turned on, to stderr. Give a number from 0 ' '(silent) to 3 (most verbose). Default: 2 when printing to a file, ' '1 when printing to stderr ("-l -").') parser.add_argument('-W', '--fasta-width', metavar='characters', type=int, help='Line width of the output FASTA file. Default: %(default)s.') args = parser.parse_args() if args.score_by not in ('length'): raise NotImplementedError('--score-by option not implemented yet.') if args.output: outfile = open(args.output, 'w') else: outfile = sys.stdout if not distutils.spawn.find_executable('lastz'): fail('Error: "lastz" command not found in PATH.') # Set up logger # logging level if args.verbosity == 0: loglevel = logging.WARNING elif args.verbosity == 1: loglevel = logging.INFO elif args.verbosity == 2: loglevel = logging.DEBUG elif args.verbosity >= 3: loglevel = logging.NOTSET elif args.verbosity is None: # default loglevel when printing to screen if args.log == '-': loglevel = logging.INFO # default loglevel when printing to file else: loglevel = logging.DEBUG else: fail('Error: Invalid verbosity "'+str(args.verbosity)+'"') # open logger, set to correct output destination if args.log == '-' or args.verbosity is not None: logging.basicConfig(stream=sys.stderr, level=loglevel, format='%(message)s') elif args.log: logging.basicConfig(filename=args.log, filemode='w', level=loglevel, format='%(message)s') else: logging.disable(logging.CRITICAL) # open LAV and FASTA files, make temporary directory tmpdir = get_tmp_path(TMP_DIR_BASE) try: os.makedirs(tmpdir) except OSError: fail('Error: temporary directory "'+tmpdir+'" exists.') # on any exception, first close the outfile and remove the temp directory def cleanup_excepthook(exceptype, value, traceback): cleanup(outfile, tmpdir) sys.__excepthook__(exceptype, value, traceback) sys.excepthook = cleanup_excepthook # Orient all contigs in forward direction pre_lav = align(args.ref, args.asm, tmpdir) # LASTZ align asm to ref asm_fasta_path = orient(args.asm, pre_lav, tmpdir, args.fasta_width) if args.orient: with open(asm_fasta_path) as asm_fasta: for line in asm_fasta: sys.stdout.write(line) cleanup(outfile, tmpdir) sys.exit(0) asm_fasta = fastareader.FastaLineGenerator(asm_fasta_path) # Now align the oriented assembly to the reference lav = align(args.ref, asm_fasta_path, tmpdir) # convert alignments to a set of intervals and map each to its alignment #TODO 3: Way to avoid rare collisions of intervals with identical start/ends # Maybe include an identifier of the origin alignment in the tuple. # (A simple third field of "query name" helps but won't be sufficient.) interval_to_aln = lavintervals.alignments_to_intervals(lav) # extract the intervals into a simple list intervals = interval_to_aln.keys() # Construct a final, non-redundant sequence out of the original, by walking # along the reference, adding sequence from the assembly. # Algorithm: # While intervals left: # Sort list of intervals by starting coordinate # Pop the first interval off the list # If the interval is unique (does not overlap any other interval): # Take the corresponding sequence from the assembly & add it to the output # Else (it overlaps): # Break the interval in two: a unique region, followed by the rest # Add the unique region (if any) to the list of intervals # Decide between the two overlapping intervals: # For each, align the overlapping region to the original assembly # Use the one with the highest LASTZ score # If the larger interval wins: # Pop the smaller interval from the list # Else (smaller interval wins): # Split the larger interval and pop it from the list # Add the non-overlapping segment to the list final_sequence = '' while len(intervals) > 0: # Pop the next interval, peek the one after it (if it exists) #TODO 3: Check how much of a performance bottleneck the sorting is. # Adds at most a few seconds for 4,000 contigs. # See: bisect module. intervals.sort(key=lambda interval: interval[0]) # sort by start coord interval = intervals[0] if len(intervals) > 1: next_interval = intervals[1] else: next_interval = None del(intervals[0]) assert next_interval is None or interval[0] <= next_interval[0] # Is this interval unique (no overlap between this interval and the next)? if next_interval is None or interval[1] < next_interval[0]: # Add query sequence of the interval to the output sequence seq_name = interval_to_aln[interval].parent.query['name'] interval_on_query = convert_with_alignment(interval, interval_to_aln[interval], fail='tryharder') logging.debug('Using {} for {:>5} to {:>5}'.format(nickname(seq_name), interval[0], interval[1])) final_sequence += asm_fasta.extract(*interval_on_query, chrom=seq_name) # Is there a gap between this interval and the next? if next_interval is not None and interval[1] - next_interval[0] > 1: #TODO 2: Fill gaps with N's raise NotImplementedError('There must be no gaps between contigs.') # ..or do the intervals overlap? else: # The overlapping region overlap = (next_interval[0], min(interval[1], next_interval[1])) # Portion of interval before the overlapping region unique = (interval[0], next_interval[0]-1) # All parts of interval after the unique region rest = (next_interval[0], interval[1]) # Add the unique sequence before the overlap (if any) to the list if length(unique) > 0: intervals.append(unique) interval_to_aln[unique] = interval_to_aln[interval] # Determine which sequence to use in the overlap section alignment1 = interval_to_aln[interval] alignment2 = interval_to_aln[next_interval] if choose_sequence(alignment1, alignment2, overlap, tmpdir, lav, args): (winner, loser) = (rest, next_interval) else: (winner, loser) = (next_interval, rest) assert winner[0] == loser[0] # Make sure the list contains the winning interval and not the losing one. if loser in intervals: intervals.remove(loser) if winner not in intervals: intervals.append(winner) if winner == next_interval: interval_to_aln[winner] = interval_to_aln[next_interval] else: interval_to_aln[winner] = interval_to_aln[interval] # If the losing interval is the larger one, split it and keep the part # after the overlap. if length(loser) > length(winner): loser_rest = (winner[1]+1, loser[1]) intervals.append(loser_rest) if loser == next_interval: interval_to_aln[loser_rest] = interval_to_aln[next_interval] else: interval_to_aln[loser_rest] = interval_to_aln[interval] outfile.write(fasta_format(final_sequence, args.output_name, args.fasta_width)) cleanup(outfile, tmpdir) def length(interval): """Get length of interval. 1-based: length((10, 10)) == 1""" return interval[1] - interval[0] + 1 def align(ref_path, asm_path, tmpdir): """LASTZ align two FASTA files and return an LavReader of the result. Executes "$ lastz ref_path asm_path" and writes the output to an LAV file in "tmpdir".""" basename = os.path.splitext(os.path.split(asm_path)[1])[0] lav_path = os.path.join(tmpdir, basename+'.lav') with open(lav_path, 'w') as lavfile: logging.info("$ "+" ".join(['lastz', ref_path, asm_path])) subprocess.call(['lastz', ref_path, asm_path], stdout=lavfile) #TODO 3: Check exit code for success or failure return lavreader.LavReader(lav_path) def orient(in_fasta_path, lav, tmpdir, fasta_width): basename = os.path.splitext(os.path.split(in_fasta_path)[1])[0] out_fasta_path = os.path.join(tmpdir, basename+'.oriented.fa') # Read the LAV alignment to determine the orientation of each sequence. # If there are hits to both orientations, decide based on the sum of the # alignment scores for each. orientations = {} scores = {} for hit in lav: name = hit.query['name'] #TODO 2: Check that summing alignment scores is a reasonable way of # scoring a hit. score = 0 for alignment in hit: score += alignment.score if score > scores.get(name, -1): scores[name] = score orientations[name] = hit.query['revcomp'] # Read through the input FASTA, printing to each sequence in the correct # orientation to the output FASTA. in_fasta = fastareader.FastaLineGenerator(in_fasta_path) name = None revcomp = None seqbuffer = '' with open(out_fasta_path, 'w') as out_fasta: for line in in_fasta: # Started a new sequence; finish up the last one and print header. if in_fasta.name != name: if revcomp: revcomp_seq = get_revcomp(seqbuffer) out_fasta.write(fasta_format(revcomp_seq, name, width=fasta_width, header=False)) out_fasta.write(">"+in_fasta.name+"\n") name = in_fasta.name revcomp = orientations.get(name, False) seqbuffer = '' # If it's reversed, save up the whole sequence so it can be revcomp'd as # a whole at the end. if revcomp: seqbuffer += line else: out_fasta.write(line+"\n") if revcomp: revcomp_seq = get_revcomp(seqbuffer) out_fasta.write(fasta_format(revcomp_seq, name, width=fasta_width, header=False)) return out_fasta_path def choose_sequence(alignment1, alignment2, overlap, tmpdir, lav, args): """Returns True if the first sequence is best, False otherwise. Decides based on the criteria specified in args.score_by: "length": Chooses the longer contig. "id": Chooses the contig with the highest % identity compared to the reference. "support": Chooses the contig with the highest coverage in supporting reads. """ if args.score_by == 'length': return choose_sequence_length(alignment1, alignment2) elif args.score_by == 'id': return choose_sequence_id(alignment1, alignment2, lav) elif args.score_by == 'support': return choose_sequence_support(alignment1, alignment2, tmpdir, args) def choose_sequence_length(alignment1, alignment2): id1 = nickname(alignment1.parent.query['id']) id2 = nickname(alignment2.parent.query['id']) logging.debug("choosing between {} and {}:".format(id1, id2)) length1 = alignment1.parent.query['length'] length2 = alignment2.parent.query['length'] if length1 >= length2: logging.debug(' winner: {} ({} >= {})'.format(id1, length1, length2)) return True else: logging.debug(' winner: {} ({} < {})'.format(id2, length1, length2)) return False def choose_sequence_id(alignment1, alignment2, lav): raise NotImplementedError def choose_sequence_support(alignment1, alignment2, tmpdir, args): raise NotImplementedError def choose_sequence_old(alignment1, alignment2, overlap, tmpdir, asm_raw_file): best_hits = [] logging.debug("processing {}:".format(overlap)) for (alignment, name) in zip((alignment1, alignment2), ('seq1', 'seq2')): fastapath = os.path.join(tmpdir, name+'.fa') overlap_on_query = convert_with_alignment(overlap, alignment, fail='tryharder') sequence = asm_merge.extract(*overlap_on_query) with open(fastapath, 'w') as fastafile: fastafile.write(fasta_format(sequence, name)) # Perform LASTZ alignment lavpath = os.path.join(tmpdir, name+'.lav') with open(lavpath, 'w') as lavfile: # sys.stderr.write("$ "+" ".join(['lastz', fastapath, asm_raw_file])+"\n") subprocess.call(['lastz', fastapath, asm_raw_file], stdout=lavfile) lav = lavreader.LavReader(lavpath) # Get the top-scoring alignment logging.debug(" scores for {}:".format(overlap_on_query)) (top_length, top_score) = lav_top_length(lav) best_hits.append({'length':top_length, 'score':top_score}) # If both match the same contig the best, choose the highest alignment score # (if length is equal, use alignment score as a tiebreaker). if best_hits[0]['length'] == best_hits[1]['length']: return best_hits[0]['score'] > best_hits[1]['score'] else: return best_hits[0]['length'] > best_hits[1]['length'] def lav_top_score(lav): """Score an LAV by the top score of all alignments.""" top_score = 0 for hit in lav: for alignment in hit: logging.debug(" {}".format(alignment.score)) if alignment.score > top_score: top_score = alignment.score return top_score def lav_top_id(lav): """Score an LAV by the top % identity of all alignments. % identity of an alignment is computed by an average of all the % identities of its blocks, weighted by block length.""" top_id = 0 for hit in lav: for alignment in hit: total_id = 0 total_length = 0 for block in alignment: length = block.query['length'] total_length += length total_id += length * block.identity id_pct = total_id / total_length logging.debug(" {}".format(round(id_pct, 2))) if id_pct > top_id: top_id = id_pct return top_id def lav_top_length(lav): """Score an LAV by the length of query sequence of the best hit. The best hit is determined by its top alignment score.""" length_of_best = 0 top_score = 0 for hit in lav: log_msg = " len: {}\tscores: ".format(hit.query['length']) for alignment in hit: log_msg += "{} ".format(alignment.score) if alignment.score > top_score: top_score = alignment.score length_of_best = hit.query['length'] logging.debug(log_msg) logging.debug(" best: {}".format(length_of_best)) return (length_of_best, top_score) def fasta_format(sequence, name, width=70, header=True): """Turn a sequence and name into a FASTA-formatted string.""" if header: output = '>'+name+'\n' else: output = '' for start in range(0, len(sequence), width): end = start + width if end > len(sequence): end = len(sequence) output += sequence[start:end]+'\n' return output def get_revcomp(sequence): delete_chars = '\r\n ' table = string.maketrans('acgtrymkbdhvACGTRYMKBDHV', 'tgcayrkmvhdbTGCAYRKMVHDB') return sequence.translate(table, delete_chars)[::-1] def convert_with_alignment(interval, alignment, fail='throw'): """One-off interval conversion, using a pre-chosen alignment. Converts the interval's start/end coordinates from subject to query coordinates using the given alignment. Assumes the interval is actually contained in the alignment.""" #TODO 1: What happens when a coordinate is in a small gap between blocks? # Make sure the resulting sequence doesn't include 1bp duplications. table = lavintervals.alignments_to_conv_table([alignment], query_to_subject=False) try: begin = lavintervals.convert(table, interval[0], fail=fail)[0] end = lavintervals.convert(table, interval[1], fail=fail)[0] except Exception as e: if len(e.args) > 1 and e.args[1] == 'fail': raise AssertionError('Interval must be contained in alignment.') else: raise if begin > end: (begin, end) = (end, begin) return (begin, end) def get_tmp_path(base, max_tries=20): """Return an unoccupied path based on the supplied one. The returned path will be the argument plus ".tmp", or if that's taken, with a number up to max_tries, like ".3.tmp". Once max_tries has been reached, it will throw an exception. N.B.: It will be a relative path if the input is.""" attempts = 0 candidate = base + '.tmp' while os.path.exists(candidate): candidate = "{}.{}.{}".format(base, attempts, 'tmp') attempts+=1 if attempts > 20: raise Exception('Cannot find an unoccupied temp directory name.') return candidate # Turn some types of verbose contig names into more concise ones def nickname(raw_name): # SPAdes new_name = raw_name match = re.search(SPADES_NAME_PATTERN, raw_name) if match: new_name = match.group(1) return new_name def cleanup(outfile, tmpdir): logging.shutdown() if outfile is not sys.stdout: outfile.close() shutil.rmtree(tmpdir) def fail(message): sys.stderr.write(message+"\n") sys.exit(1) if __name__ == '__main__': main()
4b3ff2faf3fb9d4006638355b98ca3527feb4f92
ca7aa979e7059467e158830b76673f5b77a0f5a3
/Python_codes/p02270/s475630671.py
a7913845a9653b909329e96d11e5910a53bdb908
[]
no_license
Aasthaengg/IBMdataset
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
refs/heads/main
2023-04-22T10:22:44.763102
2021-05-13T17:27:22
2021-05-13T17:27:22
367,112,348
0
0
null
null
null
null
UTF-8
Python
false
false
541
py
def check(P): global w, k t=1 wt=0 for wi in w: if wt+wi>P: wt=wi t+=1 else: wt+=wi if t>k: return False return True def search(l, r): if r-l==1: return r else: m=(l+r)//2 if not check(m): return search(m,r) else: return search(l,m) n,k=map(int,input().split()) w=[] for i in range(n): w.append(int(input())) S=sum(w);M=max(w) #print(S,M) P0=M-1 #print(P0) print(search(M-1,S))
33949dd2321b96eef46329e73735f12ac62437d1
0dee7cc69ae44e30c5cb372eb17f2e469635056b
/holbertonschool-higher_level_programming/0x01-python-if_else_loops_functions/8-uppercase.py
f2a5b337421ce94b40170a794bc783b5c51ba660
[]
no_license
HausCloud/Holberton
00cd25b4a489041e041551ea8f87674d53f43713
b39c5978698e02b9e746121d6c55d791b73e6d9b
refs/heads/master
2022-12-13T01:06:18.968047
2020-09-05T18:23:00
2020-09-05T18:23:00
293,129,232
0
1
null
null
null
null
UTF-8
Python
false
false
250
py
#!/usr/bin/python3 def uppercase(str): for var in str: if ord(var) >= 97 and ord(var) <= 122: var2 = ord(var) - 32 else: var2 = ord(var) print("{:c}".format(var2), end="") else: print()
d3d5566076b929a309da59234c53596ff385821b
9c81628bc814841d09eef15ea89068436838836d
/neo4j_graph/farm_template.py
fe6dddb1b37ce564a3e412716d0e4bee82585994
[]
no_license
glenn-edgar/cloud_scada
dae9ce58ba1a51a36a26dac578a9ed0461398289
d75006f47e67a17d0d07a107cc549322ae2c30d8
refs/heads/master
2021-01-22T11:10:37.136364
2017-11-16T01:58:35
2017-11-16T01:58:35
49,163,841
0
0
null
null
null
null
UTF-8
Python
false
false
8,118
py
import json from graph_functions import Build_Configuration class Construct_Farm(): def __init__( self, bc): self.bc = bc # Build configuration in graph_functions def construct_system( self,name=None): self.bc.construct_node( True,"SYSTEM","SYSTEM",name,{} ) def end_system( self): self.bc.pop_namespace() def construct_site( self,name=None,wired=True,address=None): self.bc.construct_node( push_namespace=True,relationship="SITE", label="SITE", name=name, properties ={"wired":wired,"address":address}) def end_site( self ): self.bc.pop_namespace() def construct_controller( self,name,web_queue,rpc_queue,local_ip,controller_type,vhost,card_dict,redis_controller_key ): card_dict_json = json.dumps( card_dict ) self.bc.construct_node( push_namespace=True,relationship="CONTROLLER", label="CONTROLLER", name=name, properties ={"web_queue":web_queue, "rpc_queue":rpc_queue,"local_ip":local_ip,"controller_type":controller_type,"vhost":vhost,"card_dict":card_dict_json, "irrigation_resets":0,"system_resets":0, "ping_loss":0, "ping_counts":0,"temperature":0 ,"redis_key":redis_controller_key }) def end_controller( self ): self.bc.pop_namespace() def add_event_queue( self,name, events ): self.bc.construct_node( push_namespace=False,relationship="EVENT_QUEUE", label="EVENT_QUEUE", name=name, properties = { "timestamp":0, "events":json.dumps(events) } ) def add_diagnostic_card_header( self, *args): self.bc.construct_node( push_namespace=True, relationship="DIAGNOSTIC_CARD_HEADER", label="DIAGNOSTIC_CARD_HEADER", name="DIAGNOSTIC_CARD_HEADER", properties = {} ) def end_diagnostic_card_header( self, *args): self.bc.pop_namespace() def add_diagnostic_card( self, org_name, board_name, list_name, card_name,description=None ): if description == None: description = card_name self.bc.construct_node( push_namespace=False, relationship="DIAGNOSTIC_CARD", label="DIAGNOSTIC_CARD", name = card_name, properties = { "org_name":org_name, "board_name":board_name, "list_name":list_name, "description":description,"label":"green","new_commit":[] } ) def add_schedule_header( self ): return self.bc.construct_node( push_namespace=True,relationship="Schedule_Header", label="Schedule_Header", name="Schedule_Header", properties ={}) def end_schedule_header( self ): self.bc.pop_namespace() def add_schedule( self,name,number,flow_sensor_names ,card_link ): schedule_node = self.bc.construct_node( push_namespace=True,relationship="IRRIGATION_SCHEDULE", label="IRRIGATION_SCHEDULE", name=name, properties ={"number":number}) for i in range(0,number): self.bc.construct_node( push_namespace=True,relationship="STEP", label="STEP", name=str(i+1), properties ={ "card":card_link+str(i+1) } ) self.bc.construct_node( push_namespace=True,relationship="FLOW_SENSOR_HEADERS", label="FLOW_SENSOR_HEADERS", name="FLOW_SENSOR_HEADERS", properties ={ }) for j in flow_sensor_names: self.bc.construct_node( push_namespace = True, relationship="FLOW_SENSOR_HEADER", label = "FLOW_SENSOR_HEADER", name = j, properties={} ) self.bc.construct_node( push_namespace = False, relationship="FLOW_SENSOR_LIMIT", label = "FLOW_SENSOR_LIMIT", name = j, properties={} ) self.bc.construct_node( push_namespace = False, relationship="FLOW_SENSOR_VALUE", label = "FLOW_SENSOR_VALUE", name = j, properties={} ) self.bc.pop_namespace() self.bc.pop_namespace() self.bc.construct_node( push_namespace=False,relationship="COIL_CURRENT", label="COIL_CURRENT", name= "COIL_CURRENT", properties ={ }) self.bc.construct_node( push_namespace=False,relationship="COIL_CURRENT_LIMIT", label="COIL_CURRENT_LIMIT", name= "COIL_CURRENT_LIMIT", properties ={ }) for j in flow_sensor_names: self.bc.construct_node( push_namespace = False, relationship="FLOW_SENSOR_LIMIT", label = "FLOW_SENSOR_LIMIT", name = j, properties={} ) self.bc.pop_namespace() self.bc.pop_namespace() def add_flow_sensor_header( self ): return self.bc.construct_node( push_namespace=True,relationship="FLOW_SENSOR_HEADER", label="FLOW_SENSOR_HEADER", name="flow_sensor_header", properties ={}) def end_flow_sensor_header( self ): self.bc.pop_namespace() def add_flow_sensor( self,name,controller,io,conversion_factor): return self.bc.construct_node( push_namespace=False,relationship="FLOW_SENSOR", label="FLOW_SENSOR", name=name, properties ={"name":name,"controller":controller,"io":io,"conversion_factor":conversion_factor}) def add_udp_io_sever(self, name, ip,remote_type, port, redis_key ): return self.bc.construct_node( push_namespace=True,relationship="UDP_IO_SERVER", label="UDP_IO_SERVER", name=name, properties ={"name":name,"ip":ip,"remote_type":remote_type,"port":port,"redis_key":redis_key }) def end_udp_io_server(self ): self.bc.pop_namespace() def add_rtu_interface(self, name ,protocol,baud_rate ): return self.bc.construct_node( push_namespace=True,relationship="RTU_INTERFACE", label="RTU_INTERFACE", name=name, properties ={"name":name,"protocol":protocol,"baud_rate":baud_rate }) def add_remote( self, name,modbus_address,irrigation_station_number, card_dict): card_dict_json = json.dumps(card_dict) self.bc.construct_node( push_namespace=True,relationship="REMOTE", label="REMOTE", name=name, properties ={"name":name,"modbus_address":modbus_address,"irrigation_station_number":irrigation_station_number, "card_dict":card_dict_json}) self.bc.construct_node( push_namespace=True,relationship="IRRIGATION_VALVE_CURRENT_HEADER", label="IRRIGATION_VALVE_CURRENT_HEADER", name = "valve_current_header", properties ={ }) for i in range(0,irrigation_station_number): self.bc.construct_node( push_namespace=False,relationship="IRRIGATION_VALVE_CURRENT", label="IRRIGATION_VALVE_CURRENT", name = str(i+1), properties ={ "active":False }) self.bc.pop_namespace() self.bc.construct_node( push_namespace=True,relationship="IRRIGATION_VALVE_CURRENT_HEADER", label="IRRIGATION_VALVE_CURRENT_LIMIT_HEADER", name = "valve_current_limit_header", properties ={ }) for i in range(0,irrigation_station_number): self.bc.construct_node( push_namespace=False,relationship="IRRIGATION_VALVE_CURRENT_LIMIT", label="IRRIGATION_VALVE_CURRENT_LIMIT", name= str(i+1), properties ={ "active":False }) self.bc.pop_namespace() self.bc.pop_namespace() def end_rtu_interface( self ): self.bc.pop_namespace() class Namespace_Decoder(): def __init__( self,key_list = [ None,"system","site","controller"] ): self.top_key_list = key_list def decode_name_space( self, app_key_list, name_space, node ): key_list = copy.copy(self.top_key_list) key_list.extend(app_key_list) name_space_list = node.properties["namespace"].split("/" ) if len(key_list) != len(name_space_list): print "unequal name space lengths" raise for i in range(0, len(key_list)): if key_list[i] != None: node.properties[key_list[i]] = name_space_list[i]
1055f2b9979f4f089b71c5f85e6a00f7a510e344
3032a58254a0d61403cc75476438bf60a119c2ea
/ADB Scripts/GMLAB Scripts/android_test/src/calculator/calculator_sanity.py
eec6d9f5a72faa191feac6c5b7f2e19d74a169fd
[]
no_license
anithamini/useful-info
1e05528d61609ca4249920e41c88957ed1476fd7
a393db8d8e727d29d185d75f7920e21770a39e70
refs/heads/master
2020-04-14T15:42:06.627213
2019-01-03T07:02:16
2019-01-03T07:02:16
163,935,084
2
3
null
null
null
null
UTF-8
Python
false
false
6,601
py
''' Created on Jul 24, 2018 @author: hnagella ''' import os from time import sleep from common import constants LaunchPassCnt=0 LaunchFailCnt=0 KillPassCnt=0 KillFailCnt=0 DegPassCnt=0 DegFailCnt=0 RadPassCnt=0 RadFailCnt=0 i=0 def Validate(s1,s2): os.system('adb shell dumpsys '+s2+' > calc.txt') #To Collect the activity logs with open("calc.txt","r") as fh: buff=fh.read() if(s1 in buff): return(True) else: return(False) def KillCalculator(): os.system("adb shell am force-stop com.google.android.calculator") #To kill the calculator application sleep(2) global KillPassCnt global KillFailCnt global fileobj_logfile if(Validate("I=com.android.launcher3/com.android.searchlauncher.SearchLauncher",'activity recents | find "Recent #0"')): fileobj_logfile =open(constants.logfile_absolutepath , 'a+') fileobj_logfile.write("TC_1" +"\t"+ "TestKillCalculator"+"\t"+"PASS\n") KillPassCnt+=1 else: fileobj_logfile =open(constants.logfile_absolutepath , 'a+') fileobj_logfile.write("TC_1" +"\t"+ "TestKillCalculator"+"\t"+"FAIL\n") KillFailCnt+=1 def LaunchCalculator(): global LaunchPassCnt global LaunchFailCnt global fileobj_logfile os.system("adb shell monkey -p com.google.android.calculator -c android.intent.category.LAUNCHER 1") #To Launch the Calculator application sleep(2) if(Validate("A=com.google.android.calculator",'activity recents | find "Recent #0"')): fileobj_logfile =open(constants.logfile_absolutepath , 'a+') fileobj_logfile.write("TC_2" +"\t"+ "TestLaunchCalculator"+"\t"+"PASS\n") LaunchPassCnt+=1 else: fileobj_logfile =open(constants.logfile_absolutepath , 'a+') fileobj_logfile.write("TC_2" +"\t"+ "TestLaunchCalculator"+"\t"+"FAIL\n") LaunchFailCnt+=1 def Mathematic(): for i in range(0,2): os.system("adb shell input keyevent 8") #For tapping number 1 os.system("adb shell input keyevent 67") #To delete the number os.system("adb shell input keyevent 81") #For '+' operator os.system("adb shell input keyevent 10") #For tapping number 3 os.system("adb shell input keyevent 66") #For tapping '=' or enter def Trigonometric(): os.system("adb shell input touchscreen swipe 1057 1343 170 1432") #For dragging the screen to trigonometric operations table os.system("adb shell input tap 450 1154") #For selecting the sin function os.system("adb shell input touchscreen swipe 170 1432 1057 1343") #For dragging the screen to mathematical operations table n=10 for i in range(0,3): os.system("adb shell input keyevent "+str(n)) #For tapping number 3 for iteration 0 if (i==0): #For tapping number 0 for iteration 1 n-=3 #For '-' operator for iteration 2 else: n+=59 def RadMode(): global RadPassCnt global RadFailCnt global fileobj_logsummary os.system("adb shell input tap 140 210") #Changing to Radian mode Mathematic() Trigonometric() if(Validate("BFGS change:active","activity")): fileobj_logfile =open(constants.logfile_absolutepath , 'a+') fileobj_logfile.write("TC_2" +"\t"+ "TestRadMode"+"\t"+"PASS\n") RadPassCnt+=1 else: fileobj_logfile =open(constants.logfile_absolutepath , 'a+') fileobj_logfile.write("TC_2" +"\t"+ "TestRadMode"+"\t"+"FAIL\n") RadFailCnt+=1 def DegMode(): global DegPassCnt global DegFailCnt global fileobj_logsummary os.system("adb shell input tap 140 210") #Changing to Degree mode Mathematic() Trigonometric() if(Validate("BFGS change:active","activity")): fileobj_logfile =open(constants.logfile_absolutepath , 'a+') fileobj_logfile.write("TC_2" +"\t"+ "TestDegMode"+"\t"+"PASS\n") DegPassCnt+=1 else: fileobj_logfile =open(constants.logfile_absolutepath , 'a+') fileobj_logfile.write("TC_2" +"\t"+ "TestDegMode"+"\t"+"FAIL\n") DegFailCnt+=1 def TestCalculatorSanity_Setup(): print(constants.logfile_absolutepath) global fileobj_logfile fileobj_logfile =open(constants.logfile_absolutepath , 'a+') fileobj_logfile.write("\nCalculator_sanity\n") fileobj_logfile.write("---------------------------------------------------\n") def TestCalculatorSanity_testsuit(): KillCalculator() LaunchCalculator() DegMode() RadMode() def Calculator_sanity_summary_log(): global LaunchPassCnt global LaunchFailCnt global KillPassCnt global KillFailCnt global DegPassCnt global DegFailCnt global RadPassCnt global RadFailCnt global fileobj_logsummary print("Launch Pass Count=",LaunchPassCnt,",LaunchFail Count=",LaunchFailCnt) print("Kill Pass Count=",KillPassCnt,",Kill Fail Count=",KillFailCnt) print("Degree mode Pass Count=",DegPassCnt,",LaunchFail Count=",DegFailCnt) print("Radian mode Pass Count=",RadPassCnt,",Kill Fail Count=",RadFailCnt) '''Below code is used to get summary report''' print(constants.logsummary_absolutepath) fileobj_logsummary =open(constants.logsummary_absolutepath , 'a+') fileobj_logsummary.write("\nCalculator_sanity\n") fileobj_logsummary.write("---------------------------------------------------\n") fileobj_logsummary.write(str(LaunchPassCnt+LaunchFailCnt)+"\t" + str(LaunchPassCnt) +"\t"+ str(LaunchFailCnt)+"\n") fileobj_logsummary.write(str(KillPassCnt+KillFailCnt)+"\t" + str(KillPassCnt) +"\t"+ str(KillFailCnt)+"\n") fileobj_logsummary.write(str(DegPassCnt+DegFailCnt)+"\t" + str(DegPassCnt) +"\t"+ str(DegFailCnt)+"\n") fileobj_logsummary.write(str(RadPassCnt+RadFailCnt)+"\t" + str(RadPassCnt) +"\t"+ str(RadFailCnt)) fileobj_logsummary.write("\n---------------------------------------------------\n") fileobj_logsummary.close() LaunchPassCnt=0 LaunchFailCnt=0 KillPassCnt=0 KillFailCnt=0 DegPassCnt=0 DegFailCnt=0 RadPassCnt=0 RadFailCnt=0 def TestCalculatorSanity_main(): print("Entry of TestCalculatorSanity_main") TestCalculatorSanity_Setup() TestCalculatorSanity_testsuit() Calculator_sanity_summary_log()
36f37781f09c008b4ba4219c9d9699650fb13314
4be56098894a95da5964622fc4102b69e4530ab6
/题库/1668.找出最长的超赞子字符串.py
b1669f495a20a33d517d87e391451a607da9a5f4
[]
no_license
ACENDER/LeetCode
7c7c7ecc8d0cc52215272f47ec34638637fae7ac
3383b09ab1246651b1d7b56ab426a456f56a4ece
refs/heads/master
2023-03-13T19:19:07.084141
2021-03-15T09:29:21
2021-03-15T09:29:21
299,332,864
0
0
null
null
null
null
UTF-8
Python
false
false
101
py
# !/usr/bin/env python3 # -*- coding: utf-8 -*- # @File : 1668.找出最长的超赞子字符串.py
189f941fc040e86b8d500c00756f506cd9174b59
d77c62ce48e13f1033d0dc0207d31094e9b32a94
/py/trialpatient.py
1e68cd8e71c746c64693bfb29fb96fe3907980d0
[ "Apache-2.0" ]
permissive
chb/clinical-trials-app
bf7c19a202071159353aa81e57a31764fffc7098
21a69050788974018b2ea31fc3ef8a3d8e584376
refs/heads/master
2021-03-27T10:42:22.532446
2016-01-13T19:58:49
2016-01-13T19:58:49
22,513,863
4
0
null
null
null
null
UTF-8
Python
false
false
10,193
py
#!/usr/bin/env python3 # -*- coding: utf-8 -*- import os.path import logging import datetime import markdown from dateutil.parser import * from dateutil.relativedelta import * import trialcondition import trialmedication import trialallergy import triallab import trialmutation import clinicaltrials.jsondocument.jsondocument as jsondocument import smartclient.fhirclient.models.condition as condition import smartclient.fhirclient.models.medicationprescription as medicationprescription import smartclient.fhirclient.models.allergyintolerance as allergyintolerance import smartclient.fhirclient.models.observation as observation class TrialPatient(jsondocument.JSONDocument): """ A representation for a patient. Properties: - full_name: string - gender: string, "female" or "male" - birthday: ISO-8601 date string - deathdate: ISO-8601 date string - age_years: int - age_string: string - city: string - region: string - country: string - location = city, region: string - conditions: [TrialCondition] - medications: [TrialMedication] - allergies: [TrialAllergy] - labs: [TrialLab] - trial_info: [TrialPatientInfo] (loaded from db on init) - cached: when the patient data was last cached """ def __init__(self, ident, json=None): super().__init__(ident, "patient", json) if self.gender is None: self.gender = "female" if self.country is None: self.country = "United States" if self.location is None: self.update_location() self.trial_info = TrialPatientInfo.find({'type': 'trial-patient-info', 'patient_id': ident}) def update_with(self, json): super().update_with(json) #print('===> ', json) if self.conditions is not None: cond = [] for c in self.conditions: if isinstance(c, trialcondition.TrialCondition): cond.append(c) else: cond.append(trialcondition.TrialCondition(c)) self.conditions = cond else: self.conditions = [] if self.medications is not None: meds = [] for m in self.medications: if isinstance(m, trialmedication.TrialMedication): meds.append(m) else: meds.append(trialmedication.TrialMedication(m)) self.medications = meds else: self.medications = [] if self.allergies is not None: allergs = [] for a in self.allergies: if isinstance(a, trialallergy.TrialAllergy): allergs.append(a) else: allergs.append(trialallergy.TrialAllergy(a)) self.allergies = allergs else: self.allergies = [] if self.labs is not None: lbs = [] for l in self.labs: if isinstance(l, triallab.TrialLab): lbs.append(l) else: lbs.append(triallab.TrialLab(l)) self.labs = lbs else: self.labs = [] def __setattr__(self, name, value): """ Overridden to perform some value generation after setting certain properties. """ super().__setattr__(name, value) if 'birthday' == name: self.update_age_years() if 'country' == name or 'city' == name or 'region' == name: self.update_location() def as_json(self): js_dict = super().as_json() if 'trial_info' in js_dict: del js_dict['trial_info'] if 'fhir' in js_dict: del js_dict['fhir'] return js_dict def for_api(self, stripped=False): js_dict = super().for_api() if stripped: #if 'conditions' in js_dict: # del js_dict['conditions'] if 'medications' in js_dict: del js_dict['medications'] if 'allergies' in js_dict: del js_dict['allergies'] if 'labs' in js_dict: del js_dict['labs'] if 'cached' in js_dict: del js_dict['cached'] if 'fhir' in js_dict: del js_dict['fhir'] return js_dict def process_observations(self, observations): """ Given a list of FHIR observations, determines which are mutations and which are lab values, filling the receiver's ivars accordingly. """ if observations is None: return if self.labs is None: self.labs = [] for obs in observations: if triallab.TrialLab.is_lab(obs): self.labs.append(triallab.TrialLab.from_fhir(obs)) elif trialmutation.TrialMutation.is_mutation(obs): mut = trialmutation.TrialMutation.from_fhir(obs) # this is a mutation, find corresponding observation if self.conditions is not None: found = False for cond in self.conditions: if mut.reference and cond.id == os.path.basename(mut.reference): if cond.mutations is None: cond.mutations = [] cond.mutations.append(mut) found = True break if not found: logging.warning("Found a mutation but not the matching condition in patient {}" .format(self.id)) else: logging.warning("Found a mutation but patient {} has no conditions" .format(self.id)) self.labs = self.labs if len(self.labs) > 0 else None @classmethod def load_from_fhir(cls, client): """ Instantiates a TrialPatient with data from a FHIR Patient resource, retrieved from a SMART client (fhirclient) instance. :param client: A handle to a `fhirclient` instance :returns: A TrialPatient instance, or None on error """ fpat = client.patient if client is not None else None if fpat is None: return None patient = cls(fpat.id) patient.fhir = fpat patient.full_name = client.human_name(fpat.name[0] if fpat.name and len(fpat.name) > 0 else None) patient.gender = fpat.gender patient.birthday = fpat.birthDate.isostring if fpat.address is not None and len(fpat.address) > 0: address = fpat.address[0] for addr in fpat.address: if 'home' == addr.use: address = addr break patient.city = address.city patient.region = address.state patient.country = address.country # retrieve problem list cond_search = condition.Condition.where(struct={'subject': fpat.id}) patient.conditions = [trialcondition.TrialCondition.from_fhir(c) for c in cond_search.perform_resources(fpat._server)] # retrieve observations: labs and mutations obs_search = observation.Observation.where(struct={'subject': fpat.id}) observations = obs_search.perform_resources(fpat._server) patient.process_observations(observations) # retrieve meds med_search = medicationprescription.MedicationPrescription.where(struct={'subject': fpat.id}) patient.medications = [trialmedication.TrialMedication.from_fhir(m) for m in med_search.perform_resources(fpat._server)] # retrieve allergies allerg_search = allergyintolerance.AllergyIntolerance.where(struct={'subject': fpat.id}) patient.allergies = [trialallergy.TrialAllergy.from_fhir(a) for a in allerg_search.perform_resources(fpat._server)] return patient # MARK: Trial Info def info_for_trial(self, trial_id): if self.trial_info is not None: for trialinfo in self.trial_info: if trialinfo.trial_id == trial_id: return trialinfo return None # MARK: Birthday & Age def age_delta(self): if self.birthday: try: birth = parse(self.birthday) except Exception as e: logging.error("Failed to parse birthday \"{}\": {}".format(self.birthday, e)) return None now = datetime.datetime.now() if self.deathdate: try: now = parse(self.deathdate) except Exception as e: logging.error("Failed to parse deathdate \"{}\": {}".format(self.deathdate, e)) return relativedelta(now, birth) return None @property def age_years(self): if self.__dict__.get('age_years') is None: self.update_age_years() return self.__dict__.get('age_years') @age_years.setter def age_years(self, years): self.__dict__['age_years'] = years def update_age_years(self): delta = self.age_delta() self.age_years = delta.years if delta is not None else None @property def age_string(self): delta = self.age_delta() if delta is not None: if 1 == delta.years: years = "{} year".format(delta.years) else: years = "{} years".format(delta.years) if delta.years < 3: if 1 == delta.months: return "{} {} month".format(years, delta.months) return "{} {} months".format(years, delta.months) return years return '' # MARK: Portrait def load_photo(self): """ Retrieves a FHIR Patient's first photo and returns a tuple with content-type and data. """ fpat = self.fhir if self.fhir is not None else None if fpat is None: logging.warning("Patient instance lost its handle to the FHIR Patient instance, cannot retrieve photo") return None, None if fpat.photo is not None: photo_data = None for photo in fpat.photo: if photo.url is not None: photo_data = fpat._server.request_data(photo.url) break elif photo.data is not None: logging.info("Base-64 encoded photo data is not yet supported") if photo_data is not None: return photo.contentType, photo_data return None, None # MARK: Location def update_location(self): parts = [] if self.city: parts.append(self.city) if self.region: parts.append(self.region) setattr(self, 'location', ', '.join(parts) if len(parts) > 0 else None) class TrialPatientInfo(jsondocument.JSONDocument): """ Information linking a patient and a trial, stored by app users. """ def __init__(self, trial_id=None, patient_id=None, json=None): if json is not None: if trial_id is None: trial_id = json.get('trial_id') if patient_id is None: patient_id = json.get('patient_id') if not trial_id or not patient_id: raise Exception("Need both a trial- and patient-id, have trial: {}, patient: {}" .format(trial_id, patient_id)) ident = '{}-{}'.format(trial_id, patient_id) super().__init__(ident, 'trial-patient-info', json) self.trial_id = trial_id self.patient_id = patient_id def for_api(self): js = { 'trial_id': self.trial_id, 'patient_id': self.patient_id, } if self.suggested: js['suggested'] = True if self.notes: js['notes'] = { 'raw': self.notes, 'html': markdown.markdown(self.notes), } return js def update_from_api(self, json): d = {} if 'suggested' in json: d['suggested'] = True if 'true' == json['suggested'] or 1 == int(json['suggested']) else False if 'notes' in json: d['notes'] = json['notes'] self.update_with(d) self.store()
52b7c325b80ecf5a7a3c95b1d6d91142d29c6b21
3546dd5dbcffc8509440c820faa7cf28080c5df7
/python35/Lib/site-packages/win32com/demos/connect.py
992965ee1eaf18af7978dfda4348d9e446dab4e2
[ "Apache-2.0", "MIT", "BSD-3-Clause", "LGPL-2.1-only" ]
permissive
Matchoc/python_env
55ad609c8270cc6148eda22d37f36709d73b3652
859d84d1717a265a4085ad29706b12c19c62d36f
refs/heads/master
2022-02-13T11:05:51.825544
2020-06-05T02:42:08
2020-06-05T02:42:08
75,793,921
0
1
Apache-2.0
2018-12-14T07:30:28
2016-12-07T03:06:13
Python
UTF-8
Python
false
false
3,741
py
# Implements _both_ a connectable client, and a connectable server. # # Note that we cheat just a little - the Server in this demo is not created # via Normal COM - this means we can avoid registering the server. # However, the server _is_ accessed as a COM object - just the creation # is cheated on - so this is still working as a fully-fledged server. import pythoncom import win32com.server.util import win32com.server.connect from win32com.server.exception import Exception from pywin32_testutil import str2bytes # This is the IID of the Events interface both Client and Server support. IID_IConnectDemoEvents = pythoncom.MakeIID("{A4988850-49C3-11d0-AE5D-52342E000000}") # The server which implements # Create a connectable class, that has a single public method # 'DoIt', which echos to a single sink 'DoneIt' class ConnectableServer(win32com.server.connect.ConnectableServer): _public_methods_ = ["DoIt"] + win32com.server.connect.ConnectableServer._public_methods_ _connect_interfaces_ = [IID_IConnectDemoEvents] # The single public method that the client can call on us # (ie, as a normal COM server, this exposes just this single method. def DoIt(self,arg): # Simply broadcast a notification. self._BroadcastNotify(self.NotifyDoneIt, (arg,)) def NotifyDoneIt(self, interface, arg): interface.Invoke(1000, 0, pythoncom.DISPATCH_METHOD, 1, arg) # Here is the client side of the connection world. # Define a COM object which implements the methods defined by the # IConnectDemoEvents interface. class ConnectableClient: # This is another cheat - I _know_ the server defines the "DoneIt" event # as DISPID==1000 - I also know from the implementation details of COM # that the first method in _public_methods_ gets 1000. # Normally some explicit DISPID->Method mapping is required. _public_methods_ = ["OnDoneIt"] def __init__(self): self.last_event_arg = None # A client must implement QI, and respond to a query for the Event interface. # In addition, it must provide a COM object (which server.util.wrap) does. def _query_interface_(self, iid): import win32com.server.util # Note that this seems like a necessary hack. I am responding to IID_IConnectDemoEvents # but only creating an IDispatch gateway object. if iid==IID_IConnectDemoEvents: return win32com.server.util.wrap(self) # And here is our event method which gets called. def OnDoneIt(self, arg): self.last_event_arg = arg def CheckEvent(server, client, val, verbose): client.last_event_arg = None server.DoIt(val) if client.last_event_arg != val: raise RuntimeError("Sent %r, but got back %r" % (val, client.last_event_arg)) if verbose: print("Sent and received %r" % val) # A simple test script for all this. # In the real world, it is likely that the code controlling the server # will be in the same class as that getting the notifications. def test(verbose=0): import win32com.client.dynamic, win32com.client.connect import win32com.server.policy server = win32com.client.dynamic.Dispatch(win32com.server.util.wrap(ConnectableServer())) connection = win32com.client.connect.SimpleConnection() client = ConnectableClient() connection.Connect(server, client, IID_IConnectDemoEvents) CheckEvent(server, client, "Hello", verbose) CheckEvent(server, client, str2bytes("Here is a null>\x00<"), verbose) CheckEvent(server, client, "Here is a null>\x00<", verbose) val = "test-\xe0\xf2" # 2 extended characters. CheckEvent(server, client, val, verbose) if verbose: print("Everything seemed to work!") # Aggressive memory leak checking (ie, do nothing!) :-) All should cleanup OK??? if __name__=='__main__': test(1)
ca04c2e7dbfd946dbcca42e690b220b9db48573d
50948d4cb10dcb1cc9bc0355918478fb2841322a
/azure-servicefabric/azure/servicefabric/models/deployed_service_package_health_state_py3.py
365b0c4a2811f97f28d15960c152de63d4aff27e
[ "MIT" ]
permissive
xiafu-msft/azure-sdk-for-python
de9cd680b39962702b629a8e94726bb4ab261594
4d9560cfd519ee60667f3cc2f5295a58c18625db
refs/heads/master
2023-08-12T20:36:24.284497
2019-05-22T00:55:16
2019-05-22T00:55:16
187,986,993
1
0
MIT
2020-10-02T01:17:02
2019-05-22T07:33:46
Python
UTF-8
Python
false
false
2,787
py
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is # regenerated. # -------------------------------------------------------------------------- from .entity_health_state_py3 import EntityHealthState class DeployedServicePackageHealthState(EntityHealthState): """Represents the health state of a deployed service package, containing the entity identifier and the aggregated health state. :param aggregated_health_state: The health state of a Service Fabric entity such as Cluster, Node, Application, Service, Partition, Replica etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' :type aggregated_health_state: str or ~azure.servicefabric.models.HealthState :param node_name: Name of the node on which the service package is deployed. :type node_name: str :param application_name: The name of the application, including the 'fabric:' URI scheme. :type application_name: str :param service_manifest_name: Name of the manifest describing the service package. :type service_manifest_name: str :param service_package_activation_id: The ActivationId of a deployed service package. If ServicePackageActivationMode specified at the time of creating the service is 'SharedProcess' (or if it is not specified, in which case it defaults to 'SharedProcess'), then value of ServicePackageActivationId is always an empty string. :type service_package_activation_id: str """ _attribute_map = { 'aggregated_health_state': {'key': 'AggregatedHealthState', 'type': 'str'}, 'node_name': {'key': 'NodeName', 'type': 'str'}, 'application_name': {'key': 'ApplicationName', 'type': 'str'}, 'service_manifest_name': {'key': 'ServiceManifestName', 'type': 'str'}, 'service_package_activation_id': {'key': 'ServicePackageActivationId', 'type': 'str'}, } def __init__(self, *, aggregated_health_state=None, node_name: str=None, application_name: str=None, service_manifest_name: str=None, service_package_activation_id: str=None, **kwargs) -> None: super(DeployedServicePackageHealthState, self).__init__(aggregated_health_state=aggregated_health_state, **kwargs) self.node_name = node_name self.application_name = application_name self.service_manifest_name = service_manifest_name self.service_package_activation_id = service_package_activation_id
13212c64b30f21f6d73d7d674933112d8939b4ad
394bde9dad62eb0b653b3b5b921dfd0425988298
/openstackclient/tests/identity/v2_0/test_role_assignment.py
ab48c2f418b49a80a8f72d157b4b7ee561ecb851
[ "Apache-2.0" ]
permissive
davisaaronj/python-openstackclient
13e02dfb0808a1fcabad9e7dcd6184454fc05bf0
5f6257206e1df085d1432778cfbc4489b87bc256
refs/heads/master
2020-05-29T12:28:29.226258
2016-07-27T19:50:21
2016-07-27T19:50:21
null
0
0
null
null
null
null
UTF-8
Python
false
false
8,516
py
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import copy import mock from openstackclient.common import exceptions from openstackclient.identity.v2_0 import role_assignment from openstackclient.tests import fakes from openstackclient.tests.identity.v2_0 import fakes as identity_fakes class TestRoleAssignment(identity_fakes.TestIdentityv2): def setUp(self): super(TestRoleAssignment, self).setUp() class TestRoleAssignmentList(TestRoleAssignment): columns = ( 'Role', 'User', 'Project', ) def setUp(self): super(TestRoleAssignment, self).setUp() # Get a shortcut to the UserManager Mock self.users_mock = self.app.client_manager.identity.users self.users_mock.reset_mock() # Get a shortcut to the ProjectManager Mock self.projects_mock = self.app.client_manager.identity.projects self.projects_mock.reset_mock() # Get a shortcut to the RoleManager Mock self.roles_mock = self.app.client_manager.identity.roles self.roles_mock.reset_mock() self.projects_mock.get.return_value = fakes.FakeResource( None, copy.deepcopy(identity_fakes.PROJECT), loaded=True, ) self.users_mock.get.return_value = fakes.FakeResource( None, copy.deepcopy(identity_fakes.USER), loaded=True, ) self.roles_mock.roles_for_user.return_value = [ fakes.FakeResource( None, copy.deepcopy(identity_fakes.ROLE), loaded=True, ), ] # Get the command object to test self.cmd = role_assignment.ListRoleAssignment(self.app, None) def test_role_assignment_list_no_filters(self): arglist = [] verifylist = [] parsed_args = self.check_parser(self.cmd, arglist, verifylist) # This argument combination should raise a CommandError self.assertRaises( exceptions.CommandError, self.cmd.take_action, parsed_args, ) def test_role_assignment_list_only_project_filter(self): arglist = [ '--project', identity_fakes.project_name, ] verifylist = [ ('project', identity_fakes.project_name), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) # This argument combination should raise a CommandError self.assertRaises( exceptions.CommandError, self.cmd.take_action, parsed_args, ) def test_role_assignment_list_only_user_filter(self): arglist = [ '--user', identity_fakes.user_name, ] verifylist = [ ('user', identity_fakes.user_name), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) # This argument combination should raise a CommandError self.assertRaises( exceptions.CommandError, self.cmd.take_action, parsed_args, ) def test_role_assignment_list_project_and_user(self): self.roles_mock.roles_for_user.return_value = [ fakes.FakeResource( None, copy.deepcopy( identity_fakes.ROLE), loaded=True, ), fakes.FakeResource( None, copy.deepcopy( identity_fakes.ROLE_2), loaded=True, ), ] arglist = [ '--project', identity_fakes.project_name, '--user', identity_fakes.user_name, ] verifylist = [ ('user', identity_fakes.user_name), ('project', identity_fakes.project_name), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) # In base command class Lister in cliff, abstract method take_action() # returns a tuple containing the column names and an iterable # containing the data to be listed. columns, data = self.cmd.take_action(parsed_args) self.roles_mock.roles_for_user.assert_called_with( identity_fakes.user_id, identity_fakes.project_id, ) self.assertEqual(self.columns, columns) datalist = (( identity_fakes.role_id, identity_fakes.user_id, identity_fakes.project_id, ), (identity_fakes.ROLE_2['id'], identity_fakes.user_id, identity_fakes.project_id, ),) self.assertEqual(datalist, tuple(data)) def test_role_assignment_list_def_creds(self): auth_ref = self.app.client_manager.auth_ref = mock.MagicMock() auth_ref.project_id.return_value = identity_fakes.project_id auth_ref.user_id.return_value = identity_fakes.user_id self.roles_mock.roles_for_user.return_value = [ fakes.FakeResource( None, copy.deepcopy( identity_fakes.ROLE), loaded=True, ), fakes.FakeResource( None, copy.deepcopy( identity_fakes.ROLE_2), loaded=True, ), ] arglist = [ '--auth-user', '--auth-project', ] verifylist = [ ('authuser', True), ('authproject', True), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) # In base command class Lister in cliff, abstract method take_action() # returns a tuple containing the column names and an iterable # containing the data to be listed. columns, data = self.cmd.take_action(parsed_args) self.roles_mock.roles_for_user.assert_called_with( identity_fakes.user_id, identity_fakes.project_id, ) self.assertEqual(self.columns, columns) datalist = (( identity_fakes.role_id, identity_fakes.user_id, identity_fakes.project_id, ), (identity_fakes.ROLE_2['id'], identity_fakes.user_id, identity_fakes.project_id, ),) self.assertEqual(datalist, tuple(data)) def test_role_assignment_list_by_name_project_and_user(self): self.roles_mock.roles_for_user.return_value = [ fakes.FakeResource( None, copy.deepcopy( identity_fakes.ROLE), loaded=True, ), fakes.FakeResource( None, copy.deepcopy( identity_fakes.ROLE_2), loaded=True, ), ] arglist = [ '--project', identity_fakes.project_name, '--user', identity_fakes.user_name, '--names' ] verifylist = [ ('user', identity_fakes.user_name), ('project', identity_fakes.project_name), ('names', True), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) # In base command class Lister in cliff, abstract method take_action() # returns a tuple containing the column names and an iterable # containing the data to be listed. columns, data = self.cmd.take_action(parsed_args) self.roles_mock.roles_for_user.assert_called_with( identity_fakes.user_id, identity_fakes.project_id, ) self.assertEqual(self.columns, columns) datalist = (( identity_fakes.role_name, identity_fakes.user_name, identity_fakes.project_name, ), (identity_fakes.ROLE_2['name'], identity_fakes.user_name, identity_fakes.project_name, ),) self.assertEqual(datalist, tuple(data))
84c661b811a5bddefd7c514ef2e30e4620d1de47
e8ea12b8114b5751c5a840c87da50773f8718039
/app/aldrovanda/urls.py
b45d5490ca2f5af49698cb183e287331f18ca424
[]
no_license
juanros13/aldrovanda
786964fb6896bc60216e0d297696fa1742d3e9f8
0fd363a91b4880da5f3d7978d62474eb81ff8307
refs/heads/master
2021-01-18T15:21:53.320987
2012-10-12T08:56:35
2012-10-12T08:56:35
null
0
0
null
null
null
null
UTF-8
Python
false
false
652
py
from django.conf.urls.defaults import patterns, include, url from django.contrib import admin from django.conf import settings from django.conf.urls.static import static admin.autodiscover() urlpatterns = patterns('aldrovanda.views', # Examples: # url(r'^$', 'app.views.home', name='home'), # url(r'^app/', include('app.foo.urls')), # Uncomment the admin/doc line below to enable admin documentation: # url(r'^admin/doc/', include('django.contrib.admindocs.urls')), # Uncomment the next line to enable the admin: url(r'^(?P<product_id>\d+)', 'detail'), )+ static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
8fdbc71c124a3d8aa5382ae8e28eb00b798df2e8
bcb36baf1b3d3eceffba383f72c2b5335cc7048d
/leetcode/198_house_robber/house_robber.py
a4ab696950ba57ffa5c4ef74a78060c3b6c8c914
[]
no_license
paulghaddad/solve-it
0aa1400cefab783f4ea757921811668fb2c9477c
e0f72be0fca82bc0378def5499f7158bafff975b
refs/heads/master
2023-01-24T03:46:24.285793
2021-07-06T19:44:29
2021-07-06T19:44:29
200,406,482
2
0
null
2023-01-06T13:53:43
2019-08-03T18:07:53
Python
UTF-8
Python
false
false
744
py
# Time Cost: O(n) # Space Cost: O(n) def rob(nums): if not nums: return 0 if len(nums) < 3: return max(nums) max_profit_at_each_house = [0] * len(nums) max_profit_at_each_house[0:2] = nums[0:2] max_profit_at_each_house[2] = nums[0] + nums[2] current_max = max(max_profit_at_each_house) for i in range(3, len(nums)): profit_two_back = max_profit_at_each_house[i-2] profit_three_back = max_profit_at_each_house[i-3] max_profit_at_each_house[i] = max( profit_two_back + nums[i], profit_three_back + nums[i] ) if max_profit_at_each_house[i] > current_max: current_max = max_profit_at_each_house[i] return current_max
51478892c734352ed8fe2f42a076a778a0ee4a67
e70a6500f0ad67ca418c34daaebddba60746525b
/prototypes/regex/tokenizer.py
62af0f6537149c0c6d5da1dc1a5f9ee6d4be7947
[ "BSD-3-Clause" ]
permissive
DasIch/editor
e2a2d47c59fa3e12696f0f983c8223c7337d0120
d906fdae92e1b1856e9b06e51dcd3a092be7b13d
refs/heads/master
2016-09-01T17:43:15.494985
2012-10-17T01:00:33
2012-10-17T01:00:33
null
0
0
null
null
null
null
UTF-8
Python
false
false
1,801
py
# coding: utf-8 """ regex.tokenizer ~~~~~~~~~~~~~~~ :copyright: 2012 by Daniel Neuhäuser :license: BSD, see LICENSE.rst """ from regex.parser import parse from regex.matcher import Span class TokenizerError(Exception): def __init__(self, reason, position): Exception.__init__(self, reason, position) self.reason = reason self.position = position class Token(object): def __init__(self, lexeme, span): self.lexeme = lexeme self.span = span def __eq__(self, other): if isinstance(other, self.__class__): return self.lexeme == other.lexeme and self.span == other.span return NotImplemented def __ne__(self, other): return not self == other def __repr__(self): return "%s(%r, %r)" % (self.__class__.__name__, self.lexeme, self.span) class Tokenizer(object): def __init__(self, definitions): self.definitions = [] for regex, token_cls in definitions: self.definitions.append((parse(regex).compile(), token_cls)) def __call__(self, string): start = 0 while string: token = self.match_token(string, start) if token is None: raise TokenizerError( "string cannot be further consumed at position %d" % start, start ) token, string, start = token yield token def match_token(self, string, start=0): for matcher, token_cls in self.definitions: end = matcher.match(string) if end is not None: return ( token_cls(string[:end], Span(start, start + end)), string[end:], start + end )
f543081460730c27cc2b22cadb1fbb32ce87b65e
f642c054451aa3c87bb18fa63037eea0e6358bda
/geektrust/loan_payments/loan_payments/common/utils.py
6a7587eee93092587afd23ed034337f7756f4108
[]
no_license
devendraprasad1984/python
30f3a539e92be13d893246ad28a42907457a38d5
0f1badabba07fbe7f5f792b7e543c0748eecd6c7
refs/heads/master
2023-07-21T08:22:45.193077
2021-08-27T15:09:28
2021-08-27T15:09:28
254,812,552
0
0
null
null
null
null
UTF-8
Python
false
false
3,862
py
import base64 import json from uuid import uuid4 from django.core import serializers from django.core.serializers.json import DjangoJSONEncoder from django.core.signing import Signer from django.utils import crypto from loan_manager import models from ..common import lookup, field_names from ..middleware import signer_check failed = "failed" success = "success" GET = 'GET' POST = 'POST' header = 'HEADER' signer_header_key = 'geek-signer' x_csrf_key = 'X-CSRFToken' app_code = 'g3eK_t7R_#278_s___T' len_of_uid = 17 CONTENT_TYPE = "application/json" not_allowed = 'operation not allowed or signer or jwt not verified or borrower mail not matched' NO_OP_ALLOWED = json.dumps({field_names.msg: not_allowed, field_names.status: failed}) MISSING_FIELD_MSG = {field_names.msg: "some input values are missing or left blank. ", field_names.status: failed} def getSum(object, field): sum = 0 num_list = [float(x[field]) for x in object.values()] for val in num_list: sum += val.real return float(sum) def getSumFromJsonConverted(object, field): return sum([float(x[field]) if x[field] != None else 0 for x in object]) def getList(ds): return list(ds.values()) def get_field_values_from_model_object(object, field): return getattr(object, field) def getJsonSet(qset): data = json.loads(serializers.serialize('json', qset)) rows = [f['fields'] for f in data] return rows def jsonEncode(obj): return json.loads(json.dumps(obj, cls=DjangoJSONEncoder)) def getBodyFromReq(req): return json.loads(req.body.decode('utf-8')) def getuuid(): uuid = base64.urlsafe_b64encode(uuid4().bytes).rstrip(b'=').decode('ascii') return uuid[0: len_of_uid].__str__() def getSecretAccessKey(): return crypto.get_random_string(len_of_uid) def getSignerObject(): signer = Signer() object = {field_names.key: getSecretAccessKey(), field_names.app_code: app_code} signed_object = signer.sign_object(object) return signed_object, object def getUnSignerObject(signObj): signer = Signer() matched = decoded = False key = not_allowed subscription = None try: unsignedObj = signer.unsign_object(signObj) decoded = True except Exception as ex: decoded = False if decoded == True: key = unsignedObj[field_names.key] subcription_object = lookup.check_subscriber(secret_key=key) subscription = subcription_object[field_names.object] matched = unsignedObj[field_names.app_code] == app_code return {field_names.key: key, field_names.matched: matched, field_names.subscription: subscription} def getUnsigned(signkey): signer = Signer() return signer.unsign_object(signkey) def get_uniq_bankid(): uid = f'bnk{getuuid()}' return uid def get_uniq_customerid(): uid = f'cst{getuuid()}' return uid def get_uniq_loanid(): uid = f'ln{getuuid()}' return uid def addlog(type, logObj): try: dblog = models.QUERY_LOG( type=type, log=json.dumps(logObj) ) dblog.save() return True except Exception as ex: return str(ex) def adderror(type, trace): addlog(type, {field_names.error: trace}) # returning middleware decorator function with parameter to deal with external api type or having CRUD access to do operations def external_check_signer_middleware(): return signer_check.check_signer_with_api_type(api_type=field_names.external) def crud_check_signer_middleware(): return signer_check.check_signer_with_api_type(api_type=field_names.crud) def manager_check_signer_middleware(): return signer_check.check_signer_with_api_type(api_type=field_names.manager) def borrower_check_signer_middleware(): return signer_check.check_signer_with_api_type(api_type=field_names.borrower)
1da209714527b7ebcdecc8cc1e560bd621f8a5e1
cef6d09715fb354c99987f927ecb961b8bc0e1f5
/app.py
d7170c77c4db2fcb8e6e231a1ae668fc774b242a
[]
no_license
markoknezevic/Store_app
745944917fd9f4a5e3f80de82a5cf6151f52856b
d5471fe7c65585e54a70bb3ecf183d558f3a9e52
refs/heads/master
2020-07-29T01:11:27.990991
2019-09-20T11:05:12
2019-09-20T11:05:12
209,610,913
0
0
null
null
null
null
UTF-8
Python
false
false
824
py
import os from flask import Flask from flask_restful import Api from flask_jwt import JWT from security import authenticate, identity from resources.user import UserRegister from resources.item import Item, ItemList from resources.store import Store, StoreList app = Flask(__name__) app.config['SQLALCHEMY_DATABASE_URI'] = os.environ.get('DATABASE_URL','sqlite:///data.db') app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True app.secret_key = "jose" api = Api(app) jwt = JWT(app, authenticate, identity) api.add_resource(Item,"/item/<string:name>") api.add_resource(ItemList,"/items") api.add_resource(UserRegister,"/register") api.add_resource(Store,"/store/<string:name>") api.add_resource(StoreList,"/store") if __name__ == "__main__": from db import db db.init_app(app) app.run(port=5000,debug=True)
bc9bddb24ed72ad9acd6f71deb360923c4599a63
1f4505ed66f4fd68c6d1edf18ecff58362742fad
/algorithm/Tree/922_Sort_Array_By_Parity_II.py
e4ee8de225c664f3d08c609454cb4b414ed66a5a
[ "MIT" ]
permissive
nishitpatel01/Data-Science-Toolbox
0d9b63a365698cc4a423abd5881cde8f6bf672be
80dc1310d103c9481feff8792426c550ddcc0a36
refs/heads/master
2020-05-19T08:26:40.319321
2019-05-04T05:58:48
2019-05-04T05:58:48
184,921,541
1
1
MIT
2019-05-04T16:53:21
2019-05-04T16:53:20
null
UTF-8
Python
false
false
370
py
class Solution: def sortArrayByParityII(self, A: List[int]) -> List[int]: j = 1 for i in range(0, len(A), 2): # is A[i] is odd, neets to swap it if A[i] % 2: # find an even, break loop when even while A[j] % 2: j += 2 A[i], A[j] = A[j], A[i] return A
0ea805e9b47a7af0d679f438dd1e1e660e4420cc
8c54df511f0d7e08cd63d08a18e38f67c60b43a9
/Classes/inheritance.py
ddf6e3a4b40fa038e2d459c18f094baccd20ee7a
[]
no_license
dsbrown1331/Python2
312dd040423daa99adf9cb3b420b276be0c6294d
930990a29ee09611ec8dd165acc451fd048265cb
refs/heads/master
2020-03-24T20:03:20.519261
2018-08-10T20:07:35
2018-08-10T20:07:35
142,957,210
0
0
null
null
null
null
UTF-8
Python
false
false
2,920
py
class Member: """a member of a school""" def __init__(self, name, email): print("Initializing member") self.name = name self.email = email def __str__(self): member_string = "{}\n{}".format(self.name, self.email) return member_string class Teacher(Member): """teacher at school""" def __init__(self, name, email, faculty_id): print("Initializing teacher") super().__init__(name, email) self.faculty_id = faculty_id self.courses_teaching = [] def __str__(self): member_str = super().__str__() member_str += "\n{}".format(self.faculty_id) if len(self.courses_teaching) > 0: member_str += "\nClasses Teaching: " for c in self.courses_teaching: member_str += c + ", " return member_str def add_class(self, class_name): self.courses_teaching.append(class_name) class Student(Member): def __init__(self, name, email, student_id): super().__init__(name, email) self.student_id = student_id self.classes_enrolled = list() self.transcript = dict() #maps from classname to letter grade def __str__(self): student_str = super().__str__() student_str += "\n{}".format(self.student_id) student_str += "\nClasses:\n" for c in self.classes_enrolled: student_str += "\t{}\n".format(c) student_str += "\nTranscript:" for k,v in self.transcript.items(): student_str += "\n\t{} : {}".format(k,v) student_str += "\nGPA = {}".format(self.compute_gpa()) return student_str def add_class(self, class_name): self.classes_enrolled.append(class_name) def add_class_grade(self, class_name, letter_grade): self.transcript[class_name] = letter_grade def letter_to_number(self, letter): if letter == "A": return 4.0 elif letter == "B": return 3.0 elif letter == "C": return 2.0 elif letter == "D": return 1.0 else: return 0.0 def compute_gpa(self): gpa_sum = 0 for c, g in self.transcript.items(): gpa_sum += self.letter_to_number(g) return gpa_sum / len(self.transcript) #code to test classes def main(): bob = Member("Bob Baggins", "[email protected]") print(bob) daniel = Teacher("Daniel B.", "[email protected]", 123) print(daniel) daniel.add_class("Math") daniel.add_class("Sleeping 101") print(daniel) print("-------") bill = Student("Bill Bunny", "[email protected]", 1345) bill.add_class("Chemistry") bill.add_class("Biology") bill.add_class_grade("English", "B") bill.add_class_grade("Mermaid 101", "A") print(bill) if __name__=="__main__": main()
8a2a26e1390e645131ceb530b85ca9ee1106ac52
0fdc732fcdad1c0d76d6ec80cb6e25b6ec17d6e1
/crud_sqlite/manage.py
da011eb5a543d9ae198637e45311b3c5a1fc13c1
[ "MIT" ]
permissive
markbirds/Django-Code-Repo
9b3c8bfba948dd8ea1be71e31cbfd2ef26bfa157
b55762d2dab00640acf2e8e00ddc66716d53c6b5
refs/heads/master
2023-01-05T22:44:16.405853
2020-11-03T07:17:50
2020-11-03T07:17:50
299,615,438
3
0
null
null
null
null
UTF-8
Python
false
false
667
py
#!/usr/bin/env python """Django's command-line utility for administrative tasks.""" import os import sys def main(): """Run administrative tasks.""" os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'crud_sqlite.settings') try: from django.core.management import execute_from_command_line except ImportError as exc: raise ImportError( "Couldn't import Django. Are you sure it's installed and " "available on your PYTHONPATH environment variable? Did you " "forget to activate a virtual environment?" ) from exc execute_from_command_line(sys.argv) if __name__ == '__main__': main()
5aab043341d3666b02ce0e83ee28d159970da1d9
790e797fecb4d57d7af37db512f44b47cd956483
/migrations/versions/2019_11_07_8f579339eb2d_introduce_broadcast_sent_flag.py
b2b1d373ea5100db49535e001cb7ac1dda39e1a6
[ "MIT", "LicenseRef-scancode-unknown-license-reference" ]
permissive
RuslanBitcash/ultimate-poll-bot
fc4d5d9a3e8c5bb0200b7d935e2e049f34d9ee10
33bc71b56f79453359043bd0e778cd153d3a83a3
refs/heads/master
2021-02-10T07:46:39.453842
2020-02-21T11:51:59
2020-02-21T11:51:59
244,362,504
1
0
MIT
2020-03-02T12:12:34
2020-03-02T12:12:33
null
UTF-8
Python
false
false
705
py
"""Introduce broadcast_sent flag Revision ID: 8f579339eb2d Revises: c4feb636bb05 Create Date: 2019-11-07 15:48:23.587967 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = '8f579339eb2d' down_revision = 'c4feb636bb05' branch_labels = None depends_on = None def upgrade(): # ### commands auto generated by Alembic - please adjust! ### op.add_column('user', sa.Column('broadcast_sent', sa.Boolean(), server_default='false', nullable=False)) # ### end Alembic commands ### def downgrade(): # ### commands auto generated by Alembic - please adjust! ### op.drop_column('user', 'broadcast_sent') # ### end Alembic commands ###
205799f30dbd8c4516c374573f461240e4d5045d
8d90e2eae476ecbe88d46ef2f03fe7ba92cc733b
/Programming Fundamentals with Python/3. List_basics/Ex_list_basics_ex5_faro_shuffle.py
7a2a16c2468ae995f27471c192a5c4a4a280203b
[]
no_license
KaterinaMutafova/SoftUni
c3f8bae3c2bf7bd4038da010ca03edc412672468
7aeef6f25c3479a8d677676cb1d66df20ca0d411
refs/heads/main
2023-03-08T10:53:49.748153
2021-02-19T15:55:13
2021-02-19T15:55:13
317,597,660
1
0
null
null
null
null
UTF-8
Python
false
false
538
py
cards = input().split(" ") n_shuffles = int(input()) current_shuffle = cards new_shuffle = [] counter = 0 for j in range(n_shuffles): for i in range(len(current_shuffle)//2): second_deck_start_diff = len(current_shuffle)//2 new_shuffle.append(current_shuffle[i]) new_shuffle.append(current_shuffle[i + second_deck_start_diff]) current_shuffle = new_shuffle counter += 1 if counter == n_shuffles: pass else: new_shuffle = [] print(new_shuffle)
f8d5b9fd542aa6f7c1e83fc3627dceffcf010d27
ca7aa979e7059467e158830b76673f5b77a0f5a3
/Python_codes/p03268/s723211580.py
a4e8e5dc56b77ccae356070cd7fdcb06fe135f85
[]
no_license
Aasthaengg/IBMdataset
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
refs/heads/main
2023-04-22T10:22:44.763102
2021-05-13T17:27:22
2021-05-13T17:27:22
367,112,348
0
0
null
null
null
null
UTF-8
Python
false
false
357
py
n, k = list(map(int, input().split())) count = 0 i = 1 zero = 0 rem = 0 if k % 2 == 0: while i <= n: if i % k == 0: zero += 1 elif i % int(k/2) == 0: rem += 1 i += 1 print(zero ** 3 + rem ** 3) else: while i <= n : if i % k == 0: count += 1 i += 1 print(count ** 3)
8f9a3b4ca834f94600df59ab404aaa4eebca0837
3d19e1a316de4d6d96471c64332fff7acfaf1308
/Users/D/drkane/northern_ireland_charity_commission_-_list_of_char.py
09ef93417596863fff7ab8b79eb7a099eecfba42
[]
no_license
BerilBBJ/scraperwiki-scraper-vault
4e98837ac3b1cc3a3edb01b8954ed00f341c8fcc
65ea6a943cc348a9caf3782b900b36446f7e137d
refs/heads/master
2021-12-02T23:55:58.481210
2013-09-30T17:02:59
2013-09-30T17:02:59
null
0
0
null
null
null
null
UTF-8
Python
false
false
2,848
py
############################################################################### # Scraper for Organisations Previously Known as Charities from the Northern Ireland Charity Commission # http://www.charitycommissionni.org.uk/Library/ccni_files/List_of_Organisations.htm ############################################################################### import scraperwiki from BeautifulSoup import BeautifulSoup import time import re # retrieve the Register of Mergers page starting_url = 'http://www.charitycommissionni.org.uk/Library/ccni_files/List_of_Organisations.htm' html = scraperwiki.scrape(starting_url) soup = BeautifulSoup(html) #ps = soup.findAll(style='font-size:8.0pt;font-family:Verdana') ps = soup.findAll('p') for p in ps: spans = p.findAll(style='font-size:8.0pt;font-family:Verdana') if len(spans)>0: name = '' for span in spans: try: name = name + span.string except: continue name = name.replace('\r',' ') name = name.replace('\n','') name = name.replace('&amp;','&') if name=='&nbsp;': continue elif name==' ': continue elif name=='': continue elif name==None: continue else: record = { "name" : name } scraperwiki.datastore.save(["name"], record) print record, "- saved" ############################################################################### # Scraper for Organisations Previously Known as Charities from the Northern Ireland Charity Commission # http://www.charitycommissionni.org.uk/Library/ccni_files/List_of_Organisations.htm ############################################################################### import scraperwiki from BeautifulSoup import BeautifulSoup import time import re # retrieve the Register of Mergers page starting_url = 'http://www.charitycommissionni.org.uk/Library/ccni_files/List_of_Organisations.htm' html = scraperwiki.scrape(starting_url) soup = BeautifulSoup(html) #ps = soup.findAll(style='font-size:8.0pt;font-family:Verdana') ps = soup.findAll('p') for p in ps: spans = p.findAll(style='font-size:8.0pt;font-family:Verdana') if len(spans)>0: name = '' for span in spans: try: name = name + span.string except: continue name = name.replace('\r',' ') name = name.replace('\n','') name = name.replace('&amp;','&') if name=='&nbsp;': continue elif name==' ': continue elif name=='': continue elif name==None: continue else: record = { "name" : name } scraperwiki.datastore.save(["name"], record) print record, "- saved"
a917f4358c749bf08b054a3886d45d1c55f1eb14
8192495d68eb7d1c1e4fe967e447422f8ad4f340
/tensorflow_datasets/image/inaturalist.py
10735264e3be107135ff69847b82702595fa9baf
[ "Apache-2.0" ]
permissive
thomwolf/datasets
a3fb69a8da680d2d1d1773e7cdd56c41e7505381
bc6c38a1bf079ddd030c7750047ee7e4355b813a
refs/heads/master
2021-04-22T00:40:02.747090
2020-03-24T21:44:03
2020-03-24T21:48:45
249,833,822
2
0
Apache-2.0
2020-03-24T22:40:55
2020-03-24T22:40:54
null
UTF-8
Python
false
false
6,047
py
# coding=utf-8 # Copyright 2020 The TensorFlow Datasets Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Lint as: python3 """INaturalist datasets.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import json import os import six.moves.urllib as urllib import tensorflow.compat.v2 as tf import tensorflow_datasets.public_api as tfds _DESCRIPTION = """\ This dataset contains a total of 5,089 categories, across 579,184 training images and 95,986 validation images. For the training set, the distribution of images per category follows the observation frequency of that category by the iNaturalist community. Although the original dataset contains some images with bounding boxes, currently, only image-level annotations are provided (single label/image). In addition, the organizers have not published the test labels, so we only provide the test images (label = -1). """ _CITATION = """\ @InProceedings{Horn_2018_CVPR, author = { Van Horn, Grant and Mac Aodha, Oisin and Song, Yang and Cui, Yin and Sun, Chen and Shepard, Alex and Adam, Hartwig and Perona, Pietro and Belongie, Serge}, title = {The INaturalist Species Classification and Detection Dataset}, booktitle = { The IEEE Conference on Computer Vision and Pattern Recognition (CVPR)}, month = {June}, year = {2018} } """ _URL = "http://www.vision.caltech.edu/~gvanhorn/datasets/inaturalist/fgvc4_competition/" class INaturalist2017(tfds.core.GeneratorBasedBuilder): """Dataset from the INaturalist Competition 2017.""" VERSION = tfds.core.Version("0.1.0") def _info(self): """Define the dataset info.""" return tfds.core.DatasetInfo( builder=self, description=_DESCRIPTION, features=tfds.features.FeaturesDict({ "id": tfds.features.Text(), "image": tfds.features.Image(), "label": tfds.features.ClassLabel( names_file=tfds.core.get_tfds_path( os.path.join("image", "inaturalist_labels.txt"))), "supercategory": tfds.features.ClassLabel( names_file=tfds.core.get_tfds_path( os.path.join("image", "inaturalist_supercategories.txt"))), }), supervised_keys=("image", "label"), homepage="https://github.com/visipedia/inat_comp/tree/master/2017", citation=_CITATION) def _split_generators(self, dl_manager): output_files = dl_manager.download_and_extract({ "trainval_images": tfds.download.Resource( url=urllib.parse.urljoin(_URL, "train_val_images.tar.gz"), extract_method=tfds.download.ExtractMethod.NO_EXTRACT), "trainval_annos": urllib.parse.urljoin(_URL, "train_val2017.zip"), "test_images": tfds.download.Resource( url=urllib.parse.urljoin(_URL, "test2017.tar.gz"), extract_method=tfds.download.ExtractMethod.NO_EXTRACT), }) return [ tfds.core.SplitGenerator( name=tfds.Split.TRAIN, gen_kwargs=dict( images_archive=dl_manager.iter_archive( output_files["trainval_images"]), annon_file=os.path.join(output_files["trainval_annos"], "train2017.json"), ), ), tfds.core.SplitGenerator( name=tfds.Split.VALIDATION, gen_kwargs=dict( images_archive=dl_manager.iter_archive( output_files["trainval_images"]), annon_file=os.path.join(output_files["trainval_annos"], "val2017.json"), ), ), tfds.core.SplitGenerator( name=tfds.Split.TEST, gen_kwargs=dict( images_archive=dl_manager.iter_archive( output_files["test_images"]), annon_file=None, ), ), ] def _generate_examples(self, images_archive, annon_file): """Generate examples.""" if annon_file is not None: # Training and validation images. with tf.io.gfile.GFile(annon_file, "r") as f: data = json.load(f) # First read the annotations file, used to filter the contents of the # tar.gz file when yielding examples. key2data = {} for image, annotation in zip(data["images"], data["annotations"]): category_id = annotation["category_id"] category = data["categories"][category_id]["name"] supercategory = data["categories"][category_id]["supercategory"] key = os.path.basename(image["file_name"]).split(".")[0] key2data[key] = { "id": key, "label": category, "supercategory": supercategory, } # Read tar.gz file containing train & validation images and yield relevant # examples. for fpath, fobj in images_archive: key = os.path.basename(fpath).split(".")[0] if key in key2data: data = key2data[key].copy() data["image"] = fobj yield key, data else: # Read tar.gz file containing all test images and yield all examples. for fpath, fobj in images_archive: key = os.path.basename(fpath).split(".")[0] # Note: test labels are not annotated, so just return -1 as labels. yield key, { "id": key, "image": fobj, "label": -1, "supercategory": -1, }
7980ff9b71c7abc0630352b3743f07c480b83c0c
d8a5fc2195165c970e2340eee87ae2ad5322da29
/{{cookiecutter.repo_name}}/{{cookiecutter.project_name}}/news/migrations/0005_auto__del_field_articleattachment_attachement__add_field_articleattach.py
ac6e714243b7731eb922fbb4cd67843300fe4680
[ "BSD-3-Clause" ]
permissive
lendlsmith/chrisdev-cookiecutter
b76e6194aa8369c2dbf1dac73e3282e025d2b146
e0ab2d16bd1a066800ce46bb1740b1254c259a70
refs/heads/master
2021-10-11T22:20:02.391847
2014-07-21T16:57:32
2014-07-21T16:57:32
null
0
0
null
null
null
null
UTF-8
Python
false
false
13,089
py
# -*- coding: utf-8 -*- import datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Deleting field 'ArticleAttachment.attachement' db.delete_column('news_articleattachment', 'attachement_id') # Adding field 'ArticleAttachment.attachment' db.add_column('news_articleattachment', 'attachment', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['filer.File'], null=True, blank=True), keep_default=False) def backwards(self, orm): # Adding field 'ArticleAttachment.attachement' db.add_column('news_articleattachment', 'attachement', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['filer.File'], null=True, blank=True), keep_default=False) # Deleting field 'ArticleAttachment.attachment' db.delete_column('news_articleattachment', 'attachment_id') models = { 'auth.group': { 'Meta': {'object_name': 'Group'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}) }, 'auth.permission': { 'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'}, 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, 'auth.user': { 'Meta': {'object_name': 'User'}, 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}), 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}) }, 'contenttypes.contenttype': { 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, 'filer.file': { 'Meta': {'object_name': 'File'}, '_file_size': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}), 'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'file': ('django.db.models.fields.files.FileField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'folder': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'all_files'", 'null': 'True', 'to': "orm['filer.Folder']"}), 'has_all_mandatory_data': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}), 'original_filename': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'owned_files'", 'null': 'True', 'to': "orm['auth.User']"}), 'polymorphic_ctype': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'polymorphic_filer.file_set'", 'null': 'True', 'to': "orm['contenttypes.ContentType']"}), 'sha1': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '40', 'blank': 'True'}), 'uploaded_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}) }, 'filer.folder': { 'Meta': {'ordering': "('name',)", 'unique_together': "(('parent', 'name'),)", 'object_name': 'Folder'}, 'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}), 'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}), 'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'filer_owned_folders'", 'null': 'True', 'to': "orm['auth.User']"}), 'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['filer.Folder']"}), 'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}), 'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}), 'uploaded_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}) }, 'filer.image': { 'Meta': {'object_name': 'Image', '_ormbases': ['filer.File']}, '_height': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}), '_width': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}), 'author': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'date_taken': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'default_alt_text': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'default_caption': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'file_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['filer.File']", 'unique': 'True', 'primary_key': 'True'}), 'must_always_publish_author_credit': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'must_always_publish_copyright': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'subject_location': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '64', 'null': 'True', 'blank': 'True'}) }, 'news.article': { 'Meta': {'ordering': "('-published',)", 'object_name': 'Article'}, 'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}), 'content_html': ('django.db.models.fields.TextField', [], {}), 'created': ('model_utils.fields.AutoCreatedField', [], {'default': 'datetime.datetime.now'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'modified': ('model_utils.fields.AutoLastModifiedField', [], {'default': 'datetime.datetime.now'}), 'published': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'section': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'articles'", 'null': 'True', 'to': "orm['news.Section']"}), 'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'}), 'summary_html': ('django.db.models.fields.TextField', [], {}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '200'}) }, 'news.articleattachment': { 'Meta': {'object_name': 'ArticleAttachment'}, 'article': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'attachments'", 'null': 'True', 'to': "orm['news.Article']"}), 'attachment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['filer.File']", 'null': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}) }, 'news.articleimage': { 'Meta': {'object_name': 'ArticleImage'}, 'article': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'images'", 'null': 'True', 'to': "orm['news.Article']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'image': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['filer.Image']", 'null': 'True', 'blank': 'True'}) }, 'news.revision': { 'Meta': {'object_name': 'Revision'}, 'article': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'revisions'", 'to': "orm['news.Article']"}), 'author': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'article_revisions'", 'to': "orm['auth.User']"}), 'content': ('django.db.models.fields.TextField', [], {}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'published': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'summary': ('django.db.models.fields.TextField', [], {}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '90'}), 'updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}) }, 'news.section': { 'Meta': {'ordering': "('title',)", 'object_name': 'Section'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, 'taggit.tag': { 'Meta': {'object_name': 'Tag'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'}) }, 'taggit.taggeditem': { 'Meta': {'object_name': 'TaggedItem'}, 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'taggit_taggeditem_tagged_items'", 'to': "orm['contenttypes.ContentType']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'object_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}), 'tag': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'taggit_taggeditem_items'", 'to': "orm['taggit.Tag']"}) } } complete_apps = ['news']
190b9124071676688dc962a736acac6ca92d2d14
f92496bfcd9c77525eb5f37cd205e711b9882f13
/manage.py
ca747c16f8be9959ad072444a95a9b61628c03c2
[]
no_license
crowdbotics-apps/flipit-18243
69f9d5b6d3ac7471219fa84ebe6c93f24cdebbd8
22309b917b3d100f96c025128899d5af7238c637
refs/heads/master
2022-11-05T09:13:24.850843
2020-06-19T03:59:20
2020-06-19T03:59:20
273,399,693
0
0
null
null
null
null
UTF-8
Python
false
false
632
py
#!/usr/bin/env python """Django's command-line utility for administrative tasks.""" import os import sys def main(): os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'flipit_18243.settings') try: from django.core.management import execute_from_command_line except ImportError as exc: raise ImportError( "Couldn't import Django. Are you sure it's installed and " "available on your PYTHONPATH environment variable? Did you " "forget to activate a virtual environment?" ) from exc execute_from_command_line(sys.argv) if __name__ == '__main__': main()
e11ffb8ac23069b6dc4034d4304ba5c6a542a9d5
5892ee857d8e3b747d57cdd63a17352e839a6320
/tiendita/views.py
8ef27776178a454af8efd1d582d434c0c8ecbdaa
[]
no_license
HugOoOguH/ComicsShop
aac1af3e558def3b14b628e737ad0c09910759b4
390fab60a5bf5701f843b9b36ea64f62577e8011
refs/heads/master
2020-12-06T21:32:26.379380
2016-09-02T19:15:56
2016-09-02T19:15:56
67,190,606
0
0
null
null
null
null
UTF-8
Python
false
false
731
py
from django.shortcuts import render, get_object_or_404, redirect, HttpResponse from django.views.generic import View from .models import Company, Comic # Create your views here. class CompaniesListView(View): def get(self,request): template_name = "tiendita/list_company.html" company = Company.objects.all() context = { 'company':company, } return render(request,template_name,context) class ComicsListView(View): def get(self, request, id): template_name = "tiendita/categories.html" company = get_object_or_404(Company, id=id) comics = Comic.objects.all().filter(company=company) context = { 'comics':comics, } return render(request, template_name, context) class ProductDetailView(View): pass
4587705cf2c8a014bd92ccf961338ae09dcf1002
354189955ae91d7fad68e1ca82460f3baf73df1f
/scripts/check_lof_proportions_in_seziure_genes.py
a975361914d8a0b85eda0bad64a742766f1064da
[]
no_license
jeremymcrae/ddd_4k
db85a121836c2f38042e66fb16189db53046c294
244b11a041ef2f80320690cf9f39e95aba5f14ec
refs/heads/master
2021-09-09T04:35:36.150425
2018-03-13T22:14:54
2018-03-13T22:14:54
54,972,015
0
0
null
null
null
null
UTF-8
Python
false
false
4,577
py
""" Copyright (c) 2015 Genome Research Ltd. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ from __future__ import print_function, division import os import argparse from scipy.stats import fisher_exact import pandas from ddd_4k.load_files import open_phenotypes, open_families, open_de_novos from ddd_4k.constants import PHENOTYPES, FAMILIES, TRIOS, SANGER_IDS, DIAGNOSED, \ VALIDATIONS, DENOVO_PATH, SEIZURE_GENES from ddd_4k.rank_hpo import rank_terms from ddd_4k.hpo_matches import find_hpo_matches from hpo_similarity.ontology import Ontology def get_options(): """ parse the command line arguments """ parser = argparse.ArgumentParser(description="script to test for a" \ "difference in the proportion of probands with different functional" \ "categories of de novos. This compares the proportion in genes known" \ "to be associated with seizures, and looks for differences in" \ "probands who have been recorded as having seizures versus probands" \ "without any record of seizures. The hypothesis is that probands with" \ "a de novo in a seizure gene, but without any record of seizures" \ "might have a difference in how severe their mutations are compared" \ "to probands who do have a record of seizures.") parser.add_argument("--de-novos", default=DENOVO_PATH, \ help="Path to table of variants in novel genes.") parser.add_argument("--phenotypes", default=PHENOTYPES, \ help="Path to table of phenotypic data from probands.") parser.add_argument("--validations", default=VALIDATIONS, \ help="Path to table of results from de novo validations.") parser.add_argument("--families", default=FAMILIES, help="Path to table of DDD families.") parser.add_argument("--trios", default=TRIOS, help="Path to table of DDD trios.") parser.add_argument("--sanger-ids", default=SANGER_IDS, help="Path to table of mapping DDD IDs to decipher IDs.") args = parser.parse_args() return args def main(): args = get_options() variants = open_de_novos(args.de_novos, args.validations) # diagnosed = pandas.read_table(args.diagnosed, sep="\t") # variants = variants[~variants["person_stable_id"].isin(diagnosed["person_id"])] # open the phenotype data, and restrict it to the probands with complete trios pheno = open_phenotypes(args.phenotypes, args.sanger_ids) trios = pandas.read_table(args.trios, sep="\t") proband_ids = set(trios["proband_stable_id"]) pheno = pheno[pheno["person_stable_id"].isin(proband_ids)] # open the HPO ontology, so we can get the set of terms which are relevant # to each disorder hpo_ontology = Ontology(None) graph = hpo_ontology.get_graph() seizure_root = "HP:0001250" seizure_terms = graph.get_descendants(seizure_root) | set([seizure_root]) pheno["has_seizures"] = find_hpo_matches(pheno["child_hpo"], seizure_terms) variants = variants[variants["symbol"].isin(SEIZURE_GENES)] seizure_variants = variants[["person_stable_id", "sex", "chrom", "pos", "ref", "alt", "symbol", "category", "consequence"]].merge( pheno[["person_stable_id", "has_seizures"]], how="left", on=["person_stable_id"]) has = seizure_variants[seizure_variants["has_seizures"]]["category"].value_counts() hasnt = seizure_variants[~seizure_variants["has_seizures"]]["category"].value_counts() print(fisher_exact([has, hasnt])) if __name__ == '__main__': main()
ac960c5200912432220c41002eaa00ee8d86af63
54e2ff8f41b98e09bf3fcb71446c539ed9524c16
/03_REST_API/dog_api/test_dog_api.py
8e825b30549b4d11863972c53be6df31a5253161
[]
no_license
vamotest/qa_automation
8b941aeebe17b783655f8391069b54bbfd64607d
54467b2334b33ae48b5fa5a65cab47fdf03967be
refs/heads/master
2023-01-07T02:05:07.645726
2020-06-04T20:31:10
2020-06-04T20:31:10
203,171,963
4
4
null
2022-12-26T20:59:48
2019-08-19T12:50:08
Python
UTF-8
Python
false
false
2,869
py
import pytest class TestDog: def test_response_status(self, response_status): """ Тест проверяет текст успешности ответа сервера для запроса произвольно выбранного изображения собаки """ if response_status.lower() == 'success': assert f'Response status: {response_status}' elif response_status != 'success': assert False, f'Response status: {response_status}' else: assert False, 'Something wrong' def test_response_status_code(self, response_status_code): """ Тест проверяет код успешности ответа сервера для запроса произвольно выбранного изображения собаки """ if response_status_code == 200: assert f'Response status code: {response_status_code}' elif response_status_code != 200: assert False, f'Response status code: {response_status_code}' else: assert False, 'Something wrong' def test_breed_in_list(self, response_breeds_list): """ Тест проверяет успешность нахождения породы собаки в списке всех пород """ if 'doberman' in response_breeds_list['message']: assert 'Doberman in response_breeds_list' elif 'doberman' not in response_breeds_list['message']: assert False, 'Doberman not in response_breeds_list' else: assert False, 'Something wrong' def test_response_status_breed(self, response_status_breed): """ Тест проверяет успешность ответа сервера для запроса заданной породы собаки """ if 'success' in response_status_breed: assert f'Response status breed: {response_status_breed}' elif 'success' not in response_status_breed: assert False, f'Response status breed: {response_status_breed}' else: assert False, 'Something wrong' @pytest.mark.parametrize('breed', ["akita", "eskimo", "vizsla", "whippet"]) def test_given_breed_in_list(self, response_breeds_list, breed): """ Тест проверяет успешность нахождения определенной породы собаки в списке всех пород """ if breed in response_breeds_list['message']: assert f'{breed} in response_breeds_list' elif 'doberman' not in response_breeds_list['message']: assert False, f'{breed} not in response_breeds_list' else: assert False, 'Something wrong'
32830090980c8ffd8b67f41cc253baa1245fb01e
592961def9fe287a31e117649f1ac1e97b085a9b
/venv/lib/python2.7/site-packages/pip/commands/show.py
5b0445325b5b827950bfb37d05cf589317a32ce6
[]
no_license
Rushin95/The_Trip_Planner-Lyft_vs_Uber
62f03a1df8c6a0268089f50f4e80ec3d9b6b9870
4eeea4029eb4df047471b92065455a6828232293
refs/heads/master
2021-01-19T11:52:47.766019
2018-05-03T23:59:58
2018-05-03T23:59:58
82,268,914
1
0
null
null
null
null
UTF-8
Python
false
false
5,941
py
from __future__ import absolute_import from email.parser import FeedParser import logging import os from pip.basecommand import Command from pip.status_codes import SUCCESS, ERROR from pip._vendor import pkg_resources from pip._vendor.packaging.utils import canonicalize_name logger = logging.getLogger(__name__) class ShowCommand(Command): """Show information about one or more installed packages.""" name = 'show' usage = """ %prog [options] <package> ...""" summary = 'Show information about installed packages.' def __init__(self, *args, **kw): super(ShowCommand, self).__init__(*args, **kw) self.cmd_opts.add_option( '-f', '--files', dest='files', action='store_true', default=False, help='Show the full list of installed files for each package.') self.parser.insert_option_group(0, self.cmd_opts) def run(self, options, args): if not args: logger.warning('ERROR: Please provide a package name or names.') return ERROR query = args results = search_packages_info(query) if not print_results( results, list_files=options.files, verbose=options.verbose): return ERROR return SUCCESS def search_packages_info(query): """ Gather details from installed distributions. Print distribution name, version, location, and installed files. Installed files requires a pip generated 'installed-files.txt' in the distributions '.egg-info' directory. """ installed = {} for p in pkg_resources.working_set: installed[canonicalize_name(p.project_name)] = p query_names = [canonicalize_name(name) for name in query] for dist in [installed[pkg] for pkg in query_names if pkg in installed]: package = { 'name': dist.project_name, 'version': dist.version, 'location': dist.location, 'requires': [dep.project_name for dep in dist.requires()], } file_list = None metadata = None if isinstance(dist, pkg_resources.DistInfoDistribution): # RECORDs should be part of .dist-info metadatas if dist.has_metadata('RECORD'): lines = dist.get_metadata_lines('RECORD') paths = [l.split(',')[0] for l in lines] paths = [os.path.join(dist.location, p) for p in paths] file_list = [os.path.relpath(p, dist.location) for p in paths] if dist.has_metadata('METADATA'): metadata = dist.get_metadata('METADATA') else: # Otherwise use pip's log for .egg-info's # noinspection PyUnresolvedReferences if dist.has_metadata('installed-files.txt'): paths = dist.get_metadata_lines('installed-files.txt') paths = [os.path.join(dist.egg_info, p) for p in paths] file_list = [os.path.relpath(p, dist.location) for p in paths] if dist.has_metadata('PKG-INFO'): metadata = dist.get_metadata('PKG-INFO') if dist.has_metadata('entry_points.txt'): entry_points = dist.get_metadata_lines('entry_points.txt') package['entry_points'] = entry_points if dist.has_metadata('INSTALLER'): for line in dist.get_metadata_lines('INSTALLER'): if line.strip(): package['installer'] = line.strip() break # @todo: Should pkg_resources.Distribution have a # `get_pkg_info` method? feed_parser = FeedParser() feed_parser.feed(metadata) pkg_info_dict = feed_parser.close() for key in ('metadata-version', 'summary', 'home-page', 'author', 'author-email', 'license'): package[key] = pkg_info_dict.get(key) # It looks like FeedParser cannot deal with repeated headers classifiers = [] for line in metadata.splitlines(): if line.startswith('Classifier: '): classifiers.append(line[len('Classifier: '):]) package['classifiers'] = classifiers if file_list: package['files'] = sorted(file_list) yield package def print_results(distributions, list_files=False, verbose=False): """ Print the informations from installed distributions found. """ results_printed = False for i, dist in enumerate(distributions): results_printed = True if i > 0: logger.info("---") logger.info("Name: %s", dist.get('name', '')) logger.info("Version: %s", dist.get('version', '')) logger.info("Summary: %s", dist.get('summary', '')) logger.info("Home-page: %s", dist.get('home-page', '')) logger.info("Author: %s", dist.get('author', '')) logger.info("Author-email: %s", dist.get('author-email', '')) logger.info("License: %s", dist.get('license', '')) logger.info("Location: %s", dist.get('location', '')) logger.info("Requires: %s", ', '.join(dist.get('requires', []))) if verbose: logger.info("Metadata-Version: %s", dist.get('metadata-version', '')) logger.info("Installer: %s", dist.get('installer', '')) logger.info("Classifiers:") for classifier in dist.get('classifiers', []): logger.info(" %s", classifier) logger.info("Entry-points:") for entry in dist.get('entry_points', []): logger.info(" %s", entry.strip()) if list_files: logger.info("Files:") for line in dist.get('files', []): logger.info(" %s", line.strip()) if "files" not in dist: logger.info("Cannot locate installed-files.txt") return results_printed
ea6a0f52b27ba8519529b66cee6fe8c2d89b917f
d0efa2026b7ed22ff4f9aa76c27ae2474c30f26d
/test/test_block_name.py
de8f97ef2f193b97e8727c0ce33e30a4bb88cf00
[]
no_license
begum-akbay/Python
2075650e0ddbf1c51823ebd749742646bf221603
fe8b47e29aae609b7510af2d21e53b8a575857d8
refs/heads/master
2023-03-28T00:11:00.997194
2021-03-25T16:38:17
2021-03-25T16:38:17
351,499,957
0
0
null
2021-03-25T16:38:17
2021-03-25T16:15:16
Python
UTF-8
Python
false
false
1,048
py
# coding: utf-8 """ Payment Gateway API Specification. The documentation here is designed to provide all of the technical guidance required to consume and integrate with our APIs for payment processing. To learn more about our APIs please visit https://docs.firstdata.com/org/gateway. # noqa: E501 The version of the OpenAPI document: 21.1.0.20210122.001 Generated by: https://openapi-generator.tech """ from __future__ import absolute_import import unittest import openapi_client from openapi_client.models.block_name import BlockName # noqa: E501 from openapi_client.rest import ApiException class TestBlockName(unittest.TestCase): """BlockName unit test stubs""" def setUp(self): pass def tearDown(self): pass def testBlockName(self): """Test BlockName""" # FIXME: construct object with mandatory attributes with example values # model = openapi_client.models.block_name.BlockName() # noqa: E501 pass if __name__ == '__main__': unittest.main()
1a0d0d8a72a7a500a34f2ff5bb7c6bd7caa8fb72
ab621c65fc91f5194c4032d68e750efaa5f85682
/pabi_asset_management/models/account_invoice.py
44523958b5d29300884b805342d127aa926086e3
[]
no_license
pabi2/pb2_addons
a1ca010002849b125dd89bd3d60a54cd9b9cdeef
e8c21082c187f4639373b29a7a0905d069d770f2
refs/heads/master
2021-06-04T19:38:53.048882
2020-11-25T03:18:24
2020-11-25T03:18:24
95,765,121
6
15
null
2022-10-06T04:28:27
2017-06-29T10:08:49
Python
UTF-8
Python
false
false
1,553
py
# -*- coding: utf-8 -*- from openerp import models, api, fields, _ from openerp.exceptions import ValidationError class AccountInvoice(models.Model): _inherit = 'account.invoice' asset_adjust_id = fields.Many2one( 'account.asset.adjust', string='Asset Adjustment', readonly=True, copy=False, ) @api.multi def action_open_asset_adjust(self): self.ensure_one() action = self.env.ref('pabi_asset_management.' 'action_account_asset_adjust') if not action: raise ValidationError(_('No Action')) res = action.read([])[0] res['domain'] = [('id', '=', self.asset_adjust_id.id)] return res class AccountInvoiceLine(models.Model): _inherit = 'account.invoice.line' @api.multi @api.constrains('asset_profile_id', 'asset_id') def _check_asset_id(self): for rec in self: if rec.asset_profile_id or rec.asset_id: raise ValidationError( _('For PABI2, creating asset on invoice is not allowed.')) @api.multi def onchange_account_id(self, product_id, partner_id, inv_type, fposition_id, account_id): """ For PABI2, never assigin asset profile in invoice """ res = super(AccountInvoiceLine, self).onchange_account_id( product_id, partner_id, inv_type, fposition_id, account_id) if 'value' in res: res['value'].update({'asset_profile_id': False}) return res
581fe975ef4f6f1b56c548e29459448b6bc675e0
e0980f704a573894350e285f66f4cf390837238e
/.history/news/models_20201124145056.py
36e7fa7552b6dca3ae2bded184debbc336fd977d
[]
no_license
rucpata/WagtailWebsite
28008474ec779d12ef43bceb61827168274a8b61
5aa44f51592f49c9a708fc5515ad877c6a29dfd9
refs/heads/main
2023-02-09T15:30:02.133415
2021-01-05T14:55:45
2021-01-05T14:55:45
303,961,094
0
0
null
null
null
null
UTF-8
Python
false
false
1,500
py
from django.db import models from wagtail.contrib.forms.models import AbstractEmailForm from wagtail.admin.edit_handlers import FieldPanel, InlinePanel, from wagtail.ImageChooserPanel # Create your models here. class FormField(AbstractEmailForm): page = ParentalKey( 'NewsPage', on_delete=models.CASCADE, related_name='form_fields', ) class NewsPage(AbstractEmailForm): tempalte ='news/news_page.html' leanding_page_template = 'news/news_page_leading.html' subpage_types = [] max_coun = 1 intro = RichTextField(blank=True, features=['bold', 'italic', 'ol', 'ul']) thank_you_text = RichTextField( blank=True, features=['bold', 'italic', 'ol', 'ul']) map_image = models.ForeignKey( 'wagtailimages.Image', null=True, blank=False, on_delete=models.SET_NULL, help_text='Obrazek będzie przycięty do rozmairu 588px na 355 px', related_name='+', ) map_url = models.URLField( blank=True, help_text='Opcjonalne. Jeśli podasz tutaj łączę, obraz stanie się łączem.' ) content_panels = AbstractEmailForm.content_panel + [ FieldPanel('intro'), ImageChooserPanel('map_iamge'), FieldPanel('map_url'), InlinePanel('form_fields', label="Form Fields"), FieldPanel('thank_you_text'), FieldPanel('from_address'), FieldPanel('to_address'), FieldPanel('subject'), ]
47b2b4cd2c7ec2ed353cd1389f68fd0d291375dc
1d3f531435a5c60b7676c4321d360722d34c70a4
/ksplit/tests/test_kmers.py
004412b7574ffac321bda8c3ca41c9549002a293
[]
no_license
t-l-g/ksplit
24bb425cdce219b294a571da82799ebbc675a290
3b0467b255d6defd19f410918d162b7d713c5a51
refs/heads/master
2023-01-16T06:49:14.325536
2020-11-26T10:32:26
2020-11-26T10:32:26
306,830,063
0
0
null
2020-11-26T10:32:27
2020-10-24T07:30:26
Python
UTF-8
Python
false
false
3,892
py
from hypothesis import given, note, strategies as st import pyximport import numpy as np pyximport.install(setup_args={ 'include_dirs': np.get_include() }) from ksplit import kmers def test_kmers(): testing = 'ATTTAACATGAGATAACATGCATGCATGCATTGCGGCTCAGCTAGTCAGCTAGCTAGCTAGCTACGATCGATCGTAGCATCGATCGATCGATCGATCGATCGATCGTACGTACGTAGCTACGATCGTAGCTAGCTAG' testing = testing.encode('ascii') print(kmers.kmers(testing)) testing = 'TTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA' testing = testing.encode('ascii') print(kmers.kmers(testing)) def test_error_detection(): testing = 'ATTTAACATGAGXTAACATGCATGCATGCAT' testing = testing.encode('ascii') assert kmers.kmers(testing) is None def test_kmers1(): testing = 'TTTCTTTTTTTTTTTTTTTTTTTTTTTTTTT' testing = testing.encode('ascii') ks = kmers.kmers(testing) assert len(testing) == 31 assert len(ks) == 1 def rc(t): rcd = { 'A': 'T', 'T': 'A', 'C': 'G', 'G': 'C', } return ''.join([rcd[c] for c in t[::-1]]) def test_kmers_reverse(): for t in [ 'TTATACATACTGTTGGTATGATAATAGTATA', 'TTTTTTTTTTTTTTTTTTTTTTTTTTTTTTT', 'ATTTTTTTTTTTTTTTTTTTTTTTTTTTTTT', 'AAAAAAAAAAATTTTTTTTTTTTTTTTTTTT', ]: assert np.all(kmers.kmers(t.encode('ascii')) == kmers.kmers(rc(t).encode('ascii'))) def test_kmers_reverse_embed(): k = 'TTATACATACTGTTGGTATGATAATAGTATA' t0 = k + 'C' t1 = rc(k) + 'T' assert kmers.kmers(t0.encode('ascii'))[0] == kmers.kmers(t1.encode('ascii'))[0] def test_max62(): assert len('{:b}'.format(kmers.kmers('GACATAGCGACGCGGACCCCCTTTTTTTTTTGG'.encode('ascii')).max())) <= 62 def test_kmers_different(): ks = [ 'TTATACATACTGTTGGTATGATAATAGTATA', 'TTTTTTTTTTTTTTTTTTTTTTTTTTTTTTT', 'ATTTTTTTTTTTTTTTTTTTTTTTTTTTTTT', 'TTTTTTTTTTTTTTTTTTTTTTTTTTTTTTA', ] ks = [kmers.kmers(k.encode('ascii'))[0] for k in ks] assert len(ks) == len(set(ks)) def test_regression_kmers(): "regression on kmer computation" ks_c = kmers.kmers(b'CCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC') ks_g = kmers.kmers(b'GGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGG') assert len(set(ks_c)) == 1 assert len(set(ks_g)) == 1 assert np.all(ks_c == ks_g) KMER_SIZE = 31 def encode_nt(nt): if type(nt) == str: nt = nt.encode('ascii') if nt == b'A': return 0 if nt == b'C': return 1 if nt == b'T': return 2 if nt == b'G': return 3 return -1 def rc1(nt): return { 'A': 'T', 'T': 'A', 'C': 'G', 'G' : 'C' }[nt] def rc(s): return ''.join(rc1(si) for si in s[::-1]) def encode_kmer(k): return int(''.join(['{:02b}'.format(encode_nt(ki)) for ki in k[::-1]]),2) def encode_kmer_rc(k): return encode_kmer(rc(k)) def encode_kmer_min(k): assert len(k) == KMER_SIZE return min(encode_kmer(k), encode_kmer_rc(k)) @given(seq=st.text(alphabet='ATGC', min_size=KMER_SIZE, max_size=65)) def test_naive(seq): import numpy as np n = np.array([encode_kmer_min(seq[i:i+KMER_SIZE]) for i in range(len(seq) - KMER_SIZE + 1)]) fast = kmers.kmers(seq.encode('ascii')) assert len(n) == len(fast) assert np.all(n == fast) @given(seq=st.text(alphabet='ATGC', min_size=KMER_SIZE, max_size=65)) def test_shift(seq): import numpy as np shifted = np.array([kmers.kmers(seq[i:i+KMER_SIZE].encode('ascii'))[0] for i in range(len(seq) - KMER_SIZE + 1)]) fast = kmers.kmers(seq.encode('ascii')) assert len(shifted) == len(fast) assert np.all(shifted == fast)
3f64959763032c01b3d7a56a62b787c7f1b10504
d719ca6b0006289ab43f95fd94460aaa5b879daa
/python/pyspark/linezone_peacebird_wh_1/edw_spark_sql/p_mod_sku_day_replenish.py
f592829da1ababf65f4bb2e1bac3a74e637c3a4e
[]
no_license
wugeer/little_code
0ca57823be2304ae985dacb9cbb85fc0218584e1
602b9de229c3d4e06c66d089ee0aea7afb32a029
refs/heads/master
2020-05-18T11:35:31.187734
2019-05-01T06:59:21
2019-05-01T06:59:21
184,380,006
0
0
null
null
null
null
UTF-8
Python
false
false
12,353
py
# -*- coding: utf-8 -*- # ---------------------------------------------------------------------------- # Project: peacebird # Filename: p_mod_sku_day_replenish # Author: zsm # Date: 2018/11/26 10:27 # ---------------------------------------------------------------------------- import sys import time import datetime from datetime import timedelta import os from dateutil.parser import parse from multiprocessing import cpu_count from concurrent import futures import pandas as pd from os.path import abspath from pyspark.sql import SparkSession from pyspark import SparkConf from pyspark.sql.functions import udf from pyspark.sql.types import StringType class SparkInit(object): def __init__(self, file_name): warehouse_location = abspath('hdfs://master1:9000/user/hive/warehouse') # app的名称 app_name = "".join(["PySpark-", file_name]) # config 配置 spark_conf = SparkConf() spark_conf.set("spark.sql.warehouse.dir", warehouse_location) spark_conf.set("hive.exec.dynamic.partition.mode", 'nonstrict') # 目的是解决报错:Detected cartesian product for INNER join between logical plans spark_conf.set("spark.sql.crossJoin.enabled", 'true') # 解决 分区下 小文件过多的问题 spark_conf.set("spark.sql.shuffle.partitions", '1') self.spark = SparkSession \ .builder \ .appName(app_name) \ .config(conf=spark_conf) \ .enableHiveSupport() \ .getOrCreate() # 获取脚本执行需要的参数 self.params_dict = self.get_params() @staticmethod def get_params(): """ 获取参数, 返回python脚本的参数字典, :return: params_dict 默认返回输入日期和输入日期截断到周一的日期, 正价率的字段,畅销款比率 """ param_list = sys.argv p_input_date = parse(param_list[1]).strftime('%Y-%m-%d') p_input_date_mon = date_trunc('week', p_input_date) p_input_date_add_one_day = (parse(p_input_date) + timedelta(days=1)).strftime('%Y-%m-%d') params_dict = {'p_input_date': p_input_date, 'p_input_date_add_one_day': p_input_date_add_one_day, 'p_input_date_mon': p_input_date_mon } return params_dict def create_temp_table(self, sql, table_name): """ 创建临时表 :param sql: sql语句 :param table_name: 临时表表名 :return: """ sql_temp = sql.format(**self.params_dict) temp_table = self.spark.sql(sql_temp).createOrReplaceTempView(table_name) return temp_table def drop_temp_table(self, table_name): """ drop临时表 :param table_name: :return: """ self.spark.catalog.dropTempView(table_name) def execute_sql(self, sql): """ spark引擎执行sql语句 :param sql: :return: """ sql_to_execute = sql.format(**self.params_dict) df = self.spark.sql(sql_to_execute) return df def date_trunc(interval, date_str): """ 截断到指定精度,返回相应的日期字符串 :param interval: ['week', 'month', 'year'] :param date_str: :return: after_trunc_date_str """ date_obj = parse(date_str) # date_obj = datetime.datetime.strptime(date_str, '%Y-%m-%d') if interval == 'week': res = date_obj - timedelta(days=(date_obj.isocalendar()[2] - 1)) elif interval == 'month': res = datetime.date(date_obj.year, date_obj.month, 1) elif interval == 'year': res = datetime.date(date_obj.year, 1, 1) else: raise Exception("interval must be ['week', 'month', 'year']") return res.strftime('%Y-%m-%d') def main(): """ 主款 替代款 优先级 sku1 sku1 0 sku1 a1 1 sku1 a2 2 --------------------------- """ sql_tmp_main_alter_sku = ''' select m_productalias_id as main_skuid , m_productalias_id as main_alter_skuid , '0' as priority from peacebird.st_sku_relate_view group by m_productalias_id union all select m_productalias_id as main_skuid , m_productalias2_id as main_alter_skuid ,priority from peacebird.st_sku_relate_view ''' spark.create_temp_table(sql_tmp_main_alter_sku, "tmp_main_alter_sku") # 取总仓CB37中,主款/替代款的库存 sql_main_alter_sku_stock = ''' with tmp_warehouse_stock as (select org_id , product_id , color_id , size_id , stock_qty from edw.mid_day_end_stock where stock_date = '{p_input_date}' and org_id = '420867' ) select tmp_main_alter_sku.main_skuid , tmp_main_alter_sku.main_alter_skuid , tmp_main_alter_sku.priority , stock.stock_qty from tmp_warehouse_stock as stock inner join edw.dim_product_sku as dim_sku on dim_sku.product_id = stock.product_id and dim_sku.color_id = stock.color_id and dim_sku.size_id = stock.size_id inner join tmp_main_alter_sku on tmp_main_alter_sku.main_alter_skuid = dim_sku.sku_id ''' main_alter_sku_stock = spark.execute_sql(sql_main_alter_sku_stock) # 原始模型表 # 不需要处理的模型数据 sql_satis_model = """ select sku.sku_id , a.send_org_id , a.receive_store_id , a.date_send , a.date_rec_pred , a.send_qty , a.dec_day_date from ( select * from edw.mod_sku_day_replenish_model where dec_day_date = '{p_input_date_add_one_day}' ) as a inner join edw.dim_product_sku as sku on a.product_id = sku.product_id and a.color_id = sku.color_id and a.size_id = sku.size_id left join tmp_main_alter_sku as tmp_sku on sku.sku_id = tmp_sku.main_skuid where tmp_sku.main_skuid is null """ satis_model = spark.execute_sql(sql_satis_model) print('不需要拆分的补货建议数:{}'.format(satis_model.count())) # 需要处理的模型数据 sql_origin_model = """ select sku.sku_id , a.send_org_id , a.receive_store_id , a.date_send , a.date_rec_pred , a.send_qty , a.dec_day_date from ( select * from edw.mod_sku_day_replenish_model where dec_day_date = '{p_input_date_add_one_day}' ) as a inner join edw.dim_product_sku as sku on a.product_id = sku.product_id and a.color_id = sku.color_id and a.size_id = sku.size_id inner join ( select distinct main_skuid from tmp_main_alter_sku ) as tmp_sku on sku.sku_id = tmp_sku.main_skuid """ origin_model = spark.execute_sql(sql_origin_model) print('需要拆分的补货建议数:{}'.format(origin_model.count())) unpack_model(main_alter_sku_stock, satis_model, origin_model) def unpack_model(main_alter_sku_stock, satis_model, origin_model): main_alter_sku_stock = main_alter_sku_stock.toPandas() # 判断是否有需要拆分的模型数据,有则处理,无则直接写入mod_sku_day_replenish if origin_model.count() != 0: origin_model = origin_model.toPandas() unpack_list = [] # 需要拆分的每一条模型建议 for i in range(origin_model.shape[0]): suggest_df = origin_model[i:i + 1] sku_id = suggest_df['sku_id'].values[0] send_qty_init = suggest_df['send_qty'].values[0] send_qty = send_qty_init stock_order_df = main_alter_sku_stock[main_alter_sku_stock['main_skuid'] == sku_id] \ .sort_values(by='priority') for j in range(stock_order_df.shape[0]): alter_stock_df = stock_order_df[j:j + 1] alter_stock_qty = alter_stock_df['stock_qty'].values[0] alter_sku_id = alter_stock_df['main_alter_skuid'].values[0] index = main_alter_sku_stock[main_alter_sku_stock['main_alter_skuid'] == alter_sku_id]. \ index.tolist()[0] if alter_stock_qty >= send_qty: # if j == 0: # unpack_list.append(suggest_df) # else: # 把新生成的一条放入原始模型df extend_suggest_df = suggest_df.replace(sku_id, alter_sku_id, inplace=False) \ .replace(send_qty_init, send_qty) unpack_list.append(extend_suggest_df) # 相应的库存赋值为alter_stock_qty-send_qty main_alter_sku_stock[index: index+1]['stock_qty'] = alter_stock_qty - send_qty # print(alter_sku_id, alter_stock_qty) break # 如果不够,先取出当前这条所有数量,再减去此次得到的数量,得到还需要多少qty,继续下一次循环 else: if alter_stock_qty <= 0: continue else: extend_suggest_df = suggest_df.replace(sku_id, alter_sku_id, inplace=False) \ .replace(send_qty_init, alter_stock_qty) unpack_list.append(extend_suggest_df) # 相应替代款的库存赋值为0 main_alter_sku_stock[index: index+1]['stock_qty'] = 0 send_qty = send_qty - alter_stock_qty # 把所有unpack_list中的df合并为大的df if len(unpack_list) != 0: unpack_model_df = pd.concat(unpack_list, axis=0) unpack_model_spark_df = spark.spark.createDataFrame(unpack_model_df) all_unpack_model_spark_df = unpack_model_spark_df.unionAll(satis_model) else: all_unpack_model_spark_df = satis_model else: all_unpack_model_spark_df = satis_model # 将大的df插入拆分后的模型表 all_unpack_model_spark_df.createOrReplaceTempView("unpack_model") insert_sql = """ insert overwrite table edw.mod_sku_day_replenish select b.product_id , b.color_id , b.size_id , a.send_org_id , a.receive_store_id , a.date_send , a.date_rec_pred , a.send_qty , '{p_input_date_add_one_day}' as dec_day_date , current_timestamp as etl_time from unpack_model as a inner join edw.dim_product_sku as b on a.sku_id = b.sku_id """ spark.execute_sql(insert_sql) # 写入标志位 sql_model_flag = """ select 'replenish' as category , '{p_input_date_add_one_day}' as day_date , 1 as status , current_timestamp as etl_time union all select * from edw.model_flag where day_date != '{p_input_date_add_one_day}' or category != 'replenish' """ model_flag_df = spark.execute_sql(sql_model_flag).toPandas() model_flag_spark_df = spark.spark.createDataFrame(model_flag_df) model_flag_spark_df.createOrReplaceTempView("tmp_model_flag") insert_sql = """ insert overwrite table edw.model_flag select * from tmp_model_flag """ spark.execute_sql(insert_sql) if __name__ == '__main__': file_name = os.path.basename(__file__) spark = SparkInit(file_name) start_time = time.time() main() end_time = time.time() print(end_time - start_time)
d04dda139b730895c0e6988185dfb7e479281c01
6b2a8dd202fdce77c971c412717e305e1caaac51
/solutions_5636311922769920_0/Python/aguy/fractiles.py
e02280bdf711f12cc673dc333fc9a4f39d8323b8
[]
no_license
alexandraback/datacollection
0bc67a9ace00abbc843f4912562f3a064992e0e9
076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf
refs/heads/master
2021-01-24T18:27:24.417992
2017-05-23T09:23:38
2017-05-23T09:23:38
84,313,442
2
4
null
null
null
null
UTF-8
Python
false
false
544
py
def f(k, c, tiles): ans = 0 for i in tiles: ans += i ans *= k if len(tiles) == c: return ans // k + 1 return ans * pow(k, c - len(tiles) - 1) + 1 def ans(k,c,s): if c * s < k: return "IMPOSSIBLE" current = [] ans = [] for i in range(k): current.append(i) if len(current) == c: ans.append(f(k,c,current)) current = [] if len(current): ans.append(f(k,c,current)) return ' '.join(map(str,sorted(ans))) n = int(input()) for i in range(n): x = ans(*map(int,input().split())) print("Case #{0}: {1}".format(i+1, x))
66eab3b2572e597e0882942e6953d4d89cd3b923
99745d24f3adf3031c146d2a57039527f5a9b51c
/hropapi/models.py
29318fca028bdbc840b218528e409ada69887557
[]
no_license
Tartar-san/antihrop
f1076fdab11af473217ca861cd5e577a45aae81c
5a4b0d9204beb1fd2ce99b3d3f42c07060b2eb76
refs/heads/master
2020-12-08T23:48:59.363173
2016-08-22T16:53:11
2016-08-22T16:53:11
66,143,162
0
0
null
2016-08-20T23:17:25
2016-08-20T11:24:48
JavaScript
UTF-8
Python
false
false
2,064
py
from __future__ import unicode_literals import time import random from random_words import RandomWords # Create your models here. from django.contrib.auth.models import User from django.db import models from django.utils import timezone from django.db.models.signals import post_save from django.dispatch import receiver # from random_words import RandomWords from rest_framework.authtoken.models import Token class Hrop(models.Model): user = models.ForeignKey(to=User, related_name='hrop', db_index=True, blank=False) time = models.DateTimeField(verbose_name='Time hroping', blank=False, db_index=True) period = models.PositiveIntegerField(blank=False, default=0) response_time = models.PositiveIntegerField(blank=False, default=0) intensity = models.PositiveIntegerField(blank=False, default=0) track_name = models.CharField(max_length=150, blank=True, default=None) volume_track = models.PositiveIntegerField(blank=False, default=0) class Meta: verbose_name = 'User hrop' def __unicode__(self): return 'Time: {} Response time: '.format(self.time, self.response_time) @receiver(post_save, sender=User) def create_auth_token(sender, instance=None, created=False, **kwargs): if created: Token.objects.create(user=instance) for day in xrange(62): timestamp = time.time() - day * 86400 hrop_iter = random.randint(10, 64) for i in xrange(hrop_iter): date = { "user": instance, "response_time": random.randint(10, 70), "intensity": random.randint(20, 95), "track_name": ' '.join(RandomWords().random_words(count=4)), "volume_track": random.randint(10, 100) } date.update({"period": date['response_time'] + random.randint(5, 15),}) timestamp -= date['period'] date.update({"time": timezone.now().fromtimestamp(timestamp)}) Hrop.objects.create(**date)
6313d711dc7d48738f6bc4acf1966ca2e94a348a
6929a33a7259dad9b45192ca088a492085ed2953
/solutions/0369-plus-one-linked-list/plus-one-linked-list.py
f2bdf2be627faddbb243dc48c20d4e041cae2605
[]
no_license
moqi112358/leetcode
70366d29c474d19c43180fd4c282cc02c890af03
fab9433ff7f66d00023e3af271cf309b2d481722
refs/heads/master
2022-12-10T01:46:14.799231
2021-01-14T05:00:09
2021-01-14T05:00:09
218,163,960
3
0
null
2022-07-06T20:26:38
2019-10-28T23:26:47
Python
UTF-8
Python
false
false
1,338
py
# Given a non-negative integer represented as a linked list of digits, plus one to the integer. # # The digits are stored such that the most significant digit is at the head of the list. # #   # Example 1: # Input: head = [1,2,3] # Output: [1,2,4] # Example 2: # Input: head = [0] # Output: [1] # #   # Constraints: # # # The number of nodes in the linked list is in the range [1, 100]. # 0 <= Node.val <= 9 # The number represented by the linked list does not contain leading zeros except for the zero itself.  # # # Definition for singly-linked list. # class ListNode: # def __init__(self, val=0, next=None): # self.val = val # self.next = next class Solution: def plusOne(self, head: ListNode) -> ListNode: rev_head = self.reverse(head) flag = 1 cur = rev_head while cur: cur.val = cur.val + flag if cur.val >= 10: cur.val -= 10 flag = 1 else: flag = 0 cur = cur.next res = self.reverse(rev_head) if flag == 1: res = ListNode(1, res) return res def reverse(self, head): res = None while head: tmp = head.next head.next = res res = head head = tmp return res
eb94c351a5475c49b2a69d6f6a0188c182563a2c
295eb752d0ea8d6f9c3f0646f9d6b07a707da700
/Python/SpeechRecognition/asr_nlp_main_sr_eval/Evaluation_Data_III/evaluate_III.py
ce834454888bd170114f87fed6fab5cfc5e0f7ab
[]
no_license
GiulliMo/Master_Soft
f4f836c0f2a9e105d809e68f9064546ab08fbc0a
ae168a90bab75acb4753c87b9d24af3e0752d189
refs/heads/master
2022-07-27T11:03:00.356237
2020-10-20T14:39:12
2020-10-20T14:39:12
241,363,290
0
0
null
2022-07-06T20:53:41
2020-02-18T13:07:44
HTML
UTF-8
Python
false
false
6,072
py
## Author: Hannes Dittmann ## Version: 1.0 ## Email: [email protected] / [email protected] ## Status: Fertig ## Kommentar: -Programm zum Auswerten von Hypothesen mit ausgewählten ASR System ## - ground-truth und hypothese wird benötigt ## - form : siehe datei ## - Hier wird zwischen Ordnern unterschieden etc ## - Jeweils für eine Netzwerkarchitektur wird getest, da muessen die flags und modelle angepasst werden ################################################## from libraries.nlp import * import tensorflow as tf import numpy as np from jiwer import wer, mer from numpy import savetxt globals() nlp = nlp(rnn= True, embedded=True) ## Daten I sind test daten und wurden vom netzwerk noch nicht gesehen ## Daten Laden ground_truth1 = nlp.loadJsons("ground_truthDeepSpech_DataIII.json") hypothesis1 = nlp.loadJsons("hypothesisDeepSpech_DataIII.json") labelsTask1 = nlp.loadJsons("labelsTaskDeepSpech_DataIII.json") processingtime1 = nlp.loadJsons("processingtimeDeepSpech_DataIII.json") ground_truth2 = nlp.loadJsons("ground_truthEspnet_DataIII.json") hypothesis2 = nlp.loadJsons("hypothesisEspnet_DataIII.json") labelsTask2 = nlp.loadJsons("labelsTaskEspnet_DataIII.json") processingtime2 = nlp.loadJsons("processingtimeEspnet_DataIII.json") ground_truth3 = nlp.loadJsons("ground_truthPocket_DataIII.json") hypothesis3 = nlp.loadJsons("hypothesisPocket_DataIII.json") labelsTask3 = nlp.loadJsons("labelsTaskPocket_DataIII.json") processingtime3 = nlp.loadJsons("processingtimePocket_DataIII.json") gtall = [ground_truth1, ground_truth2, ground_truth3] htall = [hypothesis1, hypothesis2, hypothesis3] labelsall = [labelsTask1, labelsTask2, labelsTask3] ptall = [processingtime1, processingtime2, processingtime3] data = [] # scores = [score[0]["task"], score[0]["unknow"], score[0]["iterationen"]] nlp.words = nlp.readWords("../models/words.txt") print(len(nlp.words)) nlp.vocab_size = len(nlp.words) nlp.modelTaskClassifier = tf.lite.Interpreter("../models/taskClassifierPhonWordEmbeddingRNN.tflite") # Flags setzen!! nlp.modelTaskClassifier.allocate_tensors() for i in range(len(gtall)): ground_truth = np.asarray(gtall[i]) hypothesis = np.asarray(htall[i]) labelsTask = labelsall[i] processingtime = ptall[i] confidence = [] numclass1 = 0 numclass2 = 0 numclass3 = 0 numclass4 = 0 numclass5 = 0 numclass6 = 0 ## ab hier wird getestet for hypo in hypothesis: confidence.append(nlp.classifierTask(transcript=hypo)) # Flags setzen cnf2 = np.asarray(confidence) ###Auswertung### ## Mittlere Konfidenz mean_conf_r = 0 # richtige confidenz mean_conf_w = 0 # mit wieviel % liegt er falsch? i_r = 0 # anzahl richtige i_w = 0 # anzahl falsche wer_r = 0 wer_w = 0 wer_g = 0 k = 0 gt = [] ht = [] gt2 = [] ht2 = [] score_top = 0 for cnf in enumerate(confidence): probs = list(cnf2[cnf[0]]) max1 = max(probs) print(probs) idx1 = np.argmax(probs) probs[idx1] = 0 idx2 = np.argmax(probs) print(idx1) print(idx2) classified = np.argmax(cnf[1]) wer2 = 0 if ground_truth[k] == " ": ground_truth[k] = "none" wer3 = wer(ground_truth[k], hypothesis[k]) wer_g = wer_g + wer3 print(labelsTask[cnf[0]]) print(classified) if idx1 == labelsTask[cnf[0]] or idx2 == labelsTask[cnf[0]]: score_top = score_top + 1 if classified == labelsTask[cnf[0]]: mean_conf_r = mean_conf_r + max(cnf[1]) i_r = i_r + 1 wer2 = wer(ground_truth[k], hypothesis[k]) wer_r = wer_r + wer2 gt2.append(ground_truth[k]) ht2.append(hypothesis[k]) elif classified != labelsTask[cnf[0]]: mean_conf_w = mean_conf_w + max(cnf[1]) i_w = i_w + 1 gt.append(ground_truth[k]) ht.append(hypothesis[k]) wer2 = wer(ground_truth[k], hypothesis[k]) wer_w = wer_w + wer2 if classified == 0: numclass1 = numclass1 + 1 elif classified == 1: numclass2 = numclass2 + 1 elif classified == 2: numclass3 = numclass3 + 1 elif classified == 3: numclass4 = numclass4 + 1 elif classified == 4: numclass5 = numclass5 + 1 elif classified == 5: numclass6 = numclass6 + 1 k = k + 1 numclass = np.asarray([numclass1, numclass2, numclass3, numclass4, numclass5, numclass6]) mean_conf_r = mean_conf_r / i_r mean_conf_w = mean_conf_w / i_w wer_rg = (wer_r + wer_w) / (i_r + i_w) print(wer_rg) wer_r = wer_r / i_r wer_w = wer_w / i_w print(score_top) print("Anzahl richtige: " + str(i_r)) print("Anzahl falsche: " + str(i_w)) print("Accuracy: " + str(i_r / len(labelsTask))) print("Top 2 Error: " + str(score_top / len(labelsTask))) print("Mittlere Konfidenz bei den richtigen: " + str(mean_conf_r)) print("Mittlere Konfidenz bei den falschen: " + str(mean_conf_w)) print("Mittlere bei allen WER: " + str(wer_rg)) print("Mittlere WER bei richtigen " + str(wer_r)) print("Mittlere WER bei falschen " + str(wer_w)) print("Verteilung Falsche: " + str(numclass / i_w)) print("Transkriptionszeit: " + str(np.mean(np.asarray(processingtime)))) print("wer test fuer falsch: " + str(wer(gt, ht))) print("wer test fuer richtig: " + str(wer(gt2, ht2))) print("wer test gesamt: " + str(wer_g / (2328 + 456))) arr = list([wer_rg, wer_r, wer_w, i_r / len(labelsTask), mean_conf_r, mean_conf_w]) arr = arr + list(numclass / i_w) arr.append(np.mean(np.asarray(processingtime))) arr = np.asarray(arr) print(arr) data.append(arr) name = 'data.csv' if os.path.exists(name): os.remove(name) np.savetxt(name, (data[1], data[2],data[0]), fmt='%f')
d6f66c713de1d552092f12111e42da8300b3e978
f786ed57a3b3a689d87e81eef53a94b7da2c82e7
/medical_appointment/appointment/tests/selectors/test_appointment_assign_by_id.py
8751717f716147b1fa6f1b542a1da0b9dd778a93
[]
no_license
italoantonio0909/medical_appointment
4c2381285016f2e85dd48554551d99912b029cca
4eaadb8e7e8455ea1307c1a06e66cde0ef9b79d2
refs/heads/master
2023-04-22T07:24:00.613621
2021-05-14T06:17:18
2021-05-14T06:17:18
351,559,720
0
0
null
null
null
null
UTF-8
Python
false
false
2,004
py
import datetime from datetime import timedelta from unittest.mock import patch from django.core.exceptions import ValidationError from django.test import TestCase from medical_appointment.appointment.models import AppointmentAssign, Specialty from medical_appointment.appointment.selectors import appointment_assign_by_id from medical_appointment.appointment.services import ( appointment_assign_create, specialty_create) from medical_appointment.common.test_utils import fake from medical_appointment.users.services import user_create_superuser class AppointmentAssignByIdTest(TestCase): def setUp(self): self.selector = appointment_assign_by_id @patch('medical_appointment.appointment.selectors.appointment_assign_by_id') def test_selector_return_nothing(self, appointment_assign_by_id_mock): with self.assertRaises(ValidationError): self.selector(id=fake.random_digit()) @patch('medical_appointment.appointment.selectors.appointment_assign_by_id') def test_selector_return_appointment_assign(self, appointment_assign_by_id): # Create user user = user_create_superuser( first_name=fake.first_name(), last_name=fake.last_name(), password=fake.password(), email=fake.email() ) # Create specialty specialty_create(title=fake.bothify(text='Specialty ????-###')) # Create appointment # Add day to date current_date = datetime.date.today() add_day = timedelta(days=1) date = current_date + add_day # Set hour-> range 8:00:00 - 16:00:00 time = datetime.time(10, 30, 00) appointment_assign_create( user=user.id, date=date, time=time ) appointment_assign_id = AppointmentAssign.objects.first().id result = self.selector(id=appointment_assign_id) # Match appointment with database self.assertNotEqual([], len([result]))
f2d1801896babaa067accf4c22c61a4788603700
5f0590f00e27822dde2fba0e57edbc783d763c64
/extract_csv_column.py
8e8bc4e9421310cd5ba856a466259abb3a93c4a5
[ "MIT" ]
permissive
YazzyYaz/ethereum-etl
188720869e66ca618fe54fcd8c245a5e390ea1c0
eb69307ddb7b09733de2c71f17a4f9f0191f9049
refs/heads/master
2023-04-07T09:14:26.865159
2018-11-14T06:10:12
2018-11-14T06:10:12
157,900,047
0
1
MIT
2023-03-28T18:07:32
2018-11-16T17:26:43
Python
UTF-8
Python
false
false
1,580
py
# MIT License # # Copyright (c) 2018 Evgeny Medvedev, [email protected] # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. import sys from ethereumetl.cli.extract_csv_column import extract_csv_column print('========================================================================================', file=sys.stderr) print('THIS SCRIPT IS DEPRECATED AND WILL BE REMOVED ON 2019-01-01. Use ethereumetl.py instead.', file=sys.stderr) print('========================================================================================', file=sys.stderr) extract_csv_column()
19533f4c1dda80cc45133cbfd33ea2b59e8996d6
3d19e1a316de4d6d96471c64332fff7acfaf1308
/Users/P/pinakighosh/pincode2.py
1bba96d5e48206ba2d839677d738d653de81693d
[]
no_license
BerilBBJ/scraperwiki-scraper-vault
4e98837ac3b1cc3a3edb01b8954ed00f341c8fcc
65ea6a943cc348a9caf3782b900b36446f7e137d
refs/heads/master
2021-12-02T23:55:58.481210
2013-09-30T17:02:59
2013-09-30T17:02:59
null
0
0
null
null
null
null
UTF-8
Python
false
false
6,726
py
import scraperwiki import mechanize import re import lxml.html import sys import requests def get_start(): url="http://www.indiapost.gov.in/Pin/" br = mechanize.Browser() response = br.open(url) response = br.response() # this is a copy of response headers = response.info() # currently, this is a mimetools.Message headers["Content-type"] = "text/html; charset=utf-8" response.set_data(response.get_data().replace("<!---", "<!--")) br.set_response(response) html = scraperwiki.scrape(url) root = lxml.html.fromstring(html) br = mechanize.Browser() response = br.open(url) VAR1=response.read() response.set_data(response.get_data()[717:]) br.set_response(response) br.select_form(nr = 0) br.set_all_readonly(False) response = br.submit() VAR2 = response.read() # source code after submitting show all #print VAR2 root = lxml.html.fromstring(VAR2) return VAR2 starturl='http://www.indiapost.gov.in/pin/' s=requests.session() r1 = s.get(starturl) html=get_start() root = lxml.html.fromstring(html) l=[] count=0 br=0 sl_no=1 for el in root.cssselect("table#gvw_offices tr"): for el2 in el.cssselect("tr td"): var=el2.text_content() #print el2.text_content() if count<3: l.append(var) count+=1 else: l.append(var) scraperwiki.sqlite.save(unique_keys=["sl_no"],data={"sl_no":sl_no,"Office Name":l[1],"Pincode":l[2],"District":l[3]}) sl_no+=1 #print l[1:] count=0 l=[] br+=1 if br==10: break if br==10: break start=4543 i=0 bias=1 while i<15537: #for i in range(15537): #for i in range(7767): if i==0: i+=1 continue print i #pick up the javascript values EVENTVALIDATION = root.xpath('//input[@name="__EVENTVALIDATION"]')[0].attrib['value'] print "EVENTVALIDATION"+str(i) #print EVENTVALIDATION #find the __EVENTVALIDATION value VIEWSTATE = root.xpath('//input[@name="__VIEWSTATE"]')[0].attrib['value'] #print VIEWSTATE #EVENTARGUMENT='Page$'+str(i) EVENTARGUMENT='Page$11' #payload={} payload = {'__EVENTTARGET':'gvw_offices','__EVENTARGUMENT':EVENTARGUMENT,'referer':'http://www.indiapost.gov.in/pin/','__EVENTVALIDATION':EVENTVALIDATION,'__VIEWSTATE':VIEWSTATE,'__VIEWSTATEENCRYPTED':''} ret=s.post(starturl,data=payload) html=ret.text root=lxml.html.fromstring(html) if i<start: if i%10==0: i+=1 else: #i=bias*10 #bias+=1 i+=5 continue else: i+=1 l=[] count=0 br=0 for el in root.cssselect("table#gvw_offices tr"): for el2 in el.cssselect("tr td"): var=el2.text_content() #print el2.text_content() if count<3: l.append(var) count+=1 else: l.append(var) scraperwiki.sqlite.save(unique_keys=["sl_no"],data={"sl_no":sl_no,"Office Name":l[1],"Pincode":l[2],"District":l[3]}) sl_no+=1 #print l[1:] count=0 l=[] br+=1 if br==10: break if br==10: breakimport scraperwiki import mechanize import re import lxml.html import sys import requests def get_start(): url="http://www.indiapost.gov.in/Pin/" br = mechanize.Browser() response = br.open(url) response = br.response() # this is a copy of response headers = response.info() # currently, this is a mimetools.Message headers["Content-type"] = "text/html; charset=utf-8" response.set_data(response.get_data().replace("<!---", "<!--")) br.set_response(response) html = scraperwiki.scrape(url) root = lxml.html.fromstring(html) br = mechanize.Browser() response = br.open(url) VAR1=response.read() response.set_data(response.get_data()[717:]) br.set_response(response) br.select_form(nr = 0) br.set_all_readonly(False) response = br.submit() VAR2 = response.read() # source code after submitting show all #print VAR2 root = lxml.html.fromstring(VAR2) return VAR2 starturl='http://www.indiapost.gov.in/pin/' s=requests.session() r1 = s.get(starturl) html=get_start() root = lxml.html.fromstring(html) l=[] count=0 br=0 sl_no=1 for el in root.cssselect("table#gvw_offices tr"): for el2 in el.cssselect("tr td"): var=el2.text_content() #print el2.text_content() if count<3: l.append(var) count+=1 else: l.append(var) scraperwiki.sqlite.save(unique_keys=["sl_no"],data={"sl_no":sl_no,"Office Name":l[1],"Pincode":l[2],"District":l[3]}) sl_no+=1 #print l[1:] count=0 l=[] br+=1 if br==10: break if br==10: break start=4543 i=0 bias=1 while i<15537: #for i in range(15537): #for i in range(7767): if i==0: i+=1 continue print i #pick up the javascript values EVENTVALIDATION = root.xpath('//input[@name="__EVENTVALIDATION"]')[0].attrib['value'] print "EVENTVALIDATION"+str(i) #print EVENTVALIDATION #find the __EVENTVALIDATION value VIEWSTATE = root.xpath('//input[@name="__VIEWSTATE"]')[0].attrib['value'] #print VIEWSTATE #EVENTARGUMENT='Page$'+str(i) EVENTARGUMENT='Page$11' #payload={} payload = {'__EVENTTARGET':'gvw_offices','__EVENTARGUMENT':EVENTARGUMENT,'referer':'http://www.indiapost.gov.in/pin/','__EVENTVALIDATION':EVENTVALIDATION,'__VIEWSTATE':VIEWSTATE,'__VIEWSTATEENCRYPTED':''} ret=s.post(starturl,data=payload) html=ret.text root=lxml.html.fromstring(html) if i<start: if i%10==0: i+=1 else: #i=bias*10 #bias+=1 i+=5 continue else: i+=1 l=[] count=0 br=0 for el in root.cssselect("table#gvw_offices tr"): for el2 in el.cssselect("tr td"): var=el2.text_content() #print el2.text_content() if count<3: l.append(var) count+=1 else: l.append(var) scraperwiki.sqlite.save(unique_keys=["sl_no"],data={"sl_no":sl_no,"Office Name":l[1],"Pincode":l[2],"District":l[3]}) sl_no+=1 #print l[1:] count=0 l=[] br+=1 if br==10: break if br==10: break
f89a5dab2d61601fb980032c20f0df33ed1bd820
1f63dde39fcc5f8be29f2acb947c41f1b6f1683e
/Boss2D/addon/tensorflow-1.2.1_for_boss/tensorflow/python/kernel_tests/linalg_ops_test.py
2d31ac85b02d688ab260f840cb62e38435764f23
[ "MIT", "Apache-2.0" ]
permissive
koobonil/Boss2D
09ca948823e0df5a5a53b64a10033c4f3665483a
e5eb355b57228a701495f2660f137bd05628c202
refs/heads/master
2022-10-20T09:02:51.341143
2019-07-18T02:13:44
2019-07-18T02:13:44
105,999,368
7
2
MIT
2022-10-04T23:31:12
2017-10-06T11:57:07
C++
UTF-8
Python
false
false
8,265
py
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for tensorflow.python.ops.linalg_ops.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.python.framework import dtypes from tensorflow.python.ops import array_ops from tensorflow.python.ops import linalg_ops from tensorflow.python.ops import math_ops from tensorflow.python.platform import test def _random_pd_matrix(n, rng): """Random positive definite matrix.""" temp = rng.randn(n, n) return temp.dot(temp.T) class CholeskySolveTest(test.TestCase): _use_gpu = False def setUp(self): self.rng = np.random.RandomState(0) def test_works_with_five_different_random_pos_def_matrices(self): with self.test_session(): for n in range(1, 6): for np_type, atol in [(np.float32, 0.05), (np.float64, 1e-5)]: # Create 2 x n x n matrix array = np.array( [_random_pd_matrix(n, self.rng), _random_pd_matrix(n, self.rng) ]).astype(np_type) chol = linalg_ops.cholesky(array) for k in range(1, 3): rhs = self.rng.randn(2, n, k).astype(np_type) x = linalg_ops.cholesky_solve(chol, rhs) self.assertAllClose( rhs, math_ops.matmul(array, x).eval(), atol=atol) class CholeskySolveGpuTest(CholeskySolveTest): _use_gpu = True class EyeTest(test.TestCase): def test_non_batch_2x2(self): num_rows = 2 dtype = np.float32 np_eye = np.eye(num_rows).astype(dtype) with self.test_session(): eye = linalg_ops.eye(num_rows, dtype=dtype) self.assertAllEqual((num_rows, num_rows), eye.get_shape()) self.assertAllEqual(np_eye, eye.eval()) def test_non_batch_2x3(self): num_rows = 2 num_columns = 3 dtype = np.float32 np_eye = np.eye(num_rows, num_columns).astype(dtype) with self.test_session(): eye = linalg_ops.eye(num_rows, num_columns=num_columns, dtype=dtype) self.assertAllEqual((num_rows, num_columns), eye.get_shape()) self.assertAllEqual(np_eye, eye.eval()) def test_1x3_batch_4x4(self): num_rows = 4 batch_shape = [1, 3] dtype = np.float32 np_eye = np.eye(num_rows).astype(dtype) with self.test_session(): eye = linalg_ops.eye(num_rows, batch_shape=batch_shape, dtype=dtype) self.assertAllEqual(batch_shape + [num_rows, num_rows], eye.get_shape()) eye_v = eye.eval() for i in range(batch_shape[0]): for j in range(batch_shape[1]): self.assertAllEqual(np_eye, eye_v[i, j, :, :]) def test_1x3_batch_4x4_dynamic(self): num_rows = 4 batch_shape = [1, 3] dtype = np.float32 np_eye = np.eye(num_rows).astype(dtype) with self.test_session(): num_rows_ph = array_ops.placeholder(dtypes.int32) batch_shape_ph = array_ops.placeholder(dtypes.int32) eye = linalg_ops.eye(num_rows_ph, batch_shape=batch_shape_ph, dtype=dtype) eye_v = eye.eval( feed_dict={num_rows_ph: num_rows, batch_shape_ph: batch_shape}) for i in range(batch_shape[0]): for j in range(batch_shape[1]): self.assertAllEqual(np_eye, eye_v[i, j, :, :]) def test_1x3_batch_5x4(self): num_rows = 5 num_columns = 4 batch_shape = [1, 3] dtype = np.float32 np_eye = np.eye(num_rows, num_columns).astype(dtype) with self.test_session(): eye = linalg_ops.eye(num_rows, num_columns=num_columns, batch_shape=batch_shape, dtype=dtype) self.assertAllEqual(batch_shape + [num_rows, num_columns], eye.get_shape()) eye_v = eye.eval() for i in range(batch_shape[0]): for j in range(batch_shape[1]): self.assertAllEqual(np_eye, eye_v[i, j, :, :]) def test_1x3_batch_5x4_dynamic(self): num_rows = 5 num_columns = 4 batch_shape = [1, 3] dtype = np.float32 np_eye = np.eye(num_rows, num_columns).astype(dtype) with self.test_session(): num_rows_ph = array_ops.placeholder(dtypes.int32) num_columns_ph = array_ops.placeholder(dtypes.int32) batch_shape_ph = array_ops.placeholder(dtypes.int32) eye = linalg_ops.eye(num_rows_ph, num_columns=num_columns_ph, batch_shape=batch_shape_ph, dtype=dtype) eye_v = eye.eval(feed_dict={ num_rows_ph: num_rows, num_columns_ph: num_columns, batch_shape_ph: batch_shape }) for i in range(batch_shape[0]): for j in range(batch_shape[1]): self.assertAllEqual(np_eye, eye_v[i, j, :, :]) def test_non_batch_0x0(self): num_rows = 0 dtype = np.int64 np_eye = np.eye(num_rows).astype(dtype) with self.test_session(): eye = linalg_ops.eye(num_rows, dtype=dtype) self.assertAllEqual((num_rows, num_rows), eye.get_shape()) self.assertAllEqual(np_eye, eye.eval()) def test_non_batch_2x0(self): num_rows = 2 num_columns = 0 dtype = np.int64 np_eye = np.eye(num_rows, num_columns).astype(dtype) with self.test_session(): eye = linalg_ops.eye(num_rows, num_columns=num_columns, dtype=dtype) self.assertAllEqual((num_rows, num_columns), eye.get_shape()) self.assertAllEqual(np_eye, eye.eval()) def test_non_batch_0x2(self): num_rows = 0 num_columns = 2 dtype = np.int64 np_eye = np.eye(num_rows, num_columns).astype(dtype) with self.test_session(): eye = linalg_ops.eye(num_rows, num_columns=num_columns, dtype=dtype) self.assertAllEqual((num_rows, num_columns), eye.get_shape()) self.assertAllEqual(np_eye, eye.eval()) def test_1x3_batch_0x0(self): num_rows = 0 batch_shape = [1, 3] dtype = np.float32 np_eye = np.eye(num_rows).astype(dtype) with self.test_session(): eye = linalg_ops.eye(num_rows, batch_shape=batch_shape, dtype=dtype) self.assertAllEqual((1, 3, 0, 0), eye.get_shape()) eye_v = eye.eval() for i in range(batch_shape[0]): for j in range(batch_shape[1]): self.assertAllEqual(np_eye, eye_v[i, j, :, :]) def test_1x3_batch_2x0(self): num_rows = 2 num_columns = 0 batch_shape = [1, 3] dtype = np.float32 np_eye = np.eye(num_rows, num_columns).astype(dtype) with self.test_session(): eye = linalg_ops.eye(num_rows, num_columns=num_columns, batch_shape=batch_shape, dtype=dtype) self.assertAllEqual(batch_shape + [num_rows, num_columns], eye.get_shape()) eye_v = eye.eval() for i in range(batch_shape[0]): for j in range(batch_shape[1]): self.assertAllEqual(np_eye, eye_v[i, j, :, :]) def test_1x3_batch_0x2(self): num_rows = 0 num_columns = 2 batch_shape = [1, 3] dtype = np.float32 np_eye = np.eye(num_rows, num_columns).astype(dtype) with self.test_session(): eye = linalg_ops.eye(num_rows, num_columns=num_columns, batch_shape=batch_shape, dtype=dtype) self.assertAllEqual(batch_shape + [num_rows, num_columns], eye.get_shape()) eye_v = eye.eval() for i in range(batch_shape[0]): for j in range(batch_shape[1]): self.assertAllEqual(np_eye, eye_v[i, j, :, :]) if __name__ == '__main__': test.main()
6a661f3fa6057b4829f61a7afe266be48fd767fb
180dc578d12fff056fce1ef8bd1ba5c227f82afc
/official/projects/s3d/modeling/inception_utils.py
29e6cbb2859c4ed93a07f237d0e04cf7d99c7522
[ "Apache-2.0" ]
permissive
jianzhnie/models
6cb96c873d7d251db17afac7144c4dbb84d4f1d6
d3507b550a3ade40cade60a79eb5b8978b56c7ae
refs/heads/master
2023-07-12T05:08:23.314636
2023-06-27T07:54:20
2023-06-27T07:54:20
281,858,258
2
0
Apache-2.0
2022-03-27T12:53:44
2020-07-23T05:22:33
Python
UTF-8
Python
false
false
20,360
py
# Copyright 2023 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Contains modules related to Inception networks.""" from typing import Callable, Dict, Optional, Sequence, Set, Text, Tuple, Type, Union import tensorflow as tf from official.modeling import tf_utils from official.projects.s3d.modeling import net_utils from official.vision.modeling.layers import nn_blocks_3d INCEPTION_V1_CONV_ENDPOINTS = [ 'Conv2d_1a_7x7', 'Conv2d_2c_3x3', 'Mixed_3b', 'Mixed_3c', 'Mixed_4b', 'Mixed_4c', 'Mixed_4d', 'Mixed_4e', 'Mixed_4f', 'Mixed_5b', 'Mixed_5c' ] # Mapping from endpoint to branch filters. The endpoint shapes below are # specific for input 64x224x224. INCEPTION_V1_ARCH_SKELETON = [ ('Mixed_3b', [[64], [96, 128], [16, 32], [32]]), # 32x28x28x256 ('Mixed_3c', [[128], [128, 192], [32, 96], [64]]), # 32x28x28x480 ('MaxPool_4a_3x3', [[3, 3, 3], [2, 2, 2]]), # 16x14x14x480 ('Mixed_4b', [[192], [96, 208], [16, 48], [64]]), # 16x14x14x512 ('Mixed_4c', [[160], [112, 224], [24, 64], [64]]), # 16x14x14x512 ('Mixed_4d', [[128], [128, 256], [24, 64], [64]]), # 16x14x14x512 ('Mixed_4e', [[112], [144, 288], [32, 64], [64]]), # 16x14x14x528 ('Mixed_4f', [[256], [160, 320], [32, 128], [128]]), # 16x14x14x832 ('MaxPool_5a_2x2', [[2, 2, 2], [2, 2, 2]]), # 8x7x7x832 ('Mixed_5b', [[256], [160, 320], [32, 128], [128]]), # 8x7x7x832 ('Mixed_5c', [[384], [192, 384], [48, 128], [128]]), # 8x7x7x1024 ] INCEPTION_V1_LOCAL_SKELETON = [ ('MaxPool_5a_2x2_local', [[2, 2, 2], [2, 2, 2]]), # 8x7x7x832 ('Mixed_5b_local', [[256], [160, 320], [32, 128], [128]]), # 8x7x7x832 ('Mixed_5c_local', [[384], [192, 384], [48, 128], [128]]), # 8x7x7x1024 ] initializers = tf.keras.initializers regularizers = tf.keras.regularizers def inception_v1_stem_cells( inputs: tf.Tensor, depth_multiplier: float, final_endpoint: Text, temporal_conv_endpoints: Optional[Set[Text]] = None, self_gating_endpoints: Optional[Set[Text]] = None, temporal_conv_type: Text = '3d', first_temporal_kernel_size: int = 7, use_sync_bn: bool = False, norm_momentum: float = 0.999, norm_epsilon: float = 0.001, temporal_conv_initializer: Union[ Text, initializers.Initializer] = initializers.TruncatedNormal( mean=0.0, stddev=0.01), kernel_initializer: Union[Text, initializers.Initializer] = 'truncated_normal', kernel_regularizer: Union[Text, regularizers.Regularizer] = 'l2', parameterized_conv_layer: Type[ net_utils.ParameterizedConvLayer] = net_utils.ParameterizedConvLayer, layer_naming_fn: Callable[[Text], Text] = lambda end_point: None, ) -> Tuple[tf.Tensor, Dict[Text, tf.Tensor]]: """Stem cells used in the original I3D/S3D model. Args: inputs: A 5-D float tensor of size [batch_size, num_frames, height, width, channels]. depth_multiplier: A float to reduce/increase number of channels. final_endpoint: Specifies the endpoint to construct the network up to. It can be one of ['Conv2d_1a_7x7', 'MaxPool_2a_3x3', 'Conv2d_2b_1x1', 'Conv2d_2c_3x3', 'MaxPool_3a_3x3']. temporal_conv_endpoints: Specifies the endpoints where to perform temporal convolution. self_gating_endpoints: Specifies the endpoints where to perform self gating. temporal_conv_type: '3d' for I3D model and '2+1d' for S3D model. first_temporal_kernel_size: temporal kernel size of the first convolution layer. use_sync_bn: If True, use synchronized batch normalization. norm_momentum: A `float` of normalization momentum for the moving average. norm_epsilon: A `float` added to variance to avoid dividing by zero. temporal_conv_initializer: Weight initializer for temporal convolution inside the cell. It only applies to 2+1d and 1+2d cases. kernel_initializer: Weight initializer for convolutional layers other than temporal convolution. kernel_regularizer: Weight regularizer for all convolutional layers. parameterized_conv_layer: class for parameterized conv layer. layer_naming_fn: function to customize conv / pooling layer names given endpoint name of the block. This is mainly used to creat model that is compatible with TF1 checkpoints. Returns: A dictionary from components of the network to the corresponding activation. """ if temporal_conv_endpoints is None: temporal_conv_endpoints = set() if self_gating_endpoints is None: self_gating_endpoints = set() if use_sync_bn: batch_norm = tf.keras.layers.experimental.SyncBatchNormalization else: batch_norm = tf.keras.layers.BatchNormalization if tf.keras.backend.image_data_format() == 'channels_last': bn_axis = -1 else: bn_axis = 1 end_points = {} # batch_size x 32 x 112 x 112 x 64 end_point = 'Conv2d_1a_7x7' net = tf.keras.layers.Conv3D( filters=net_utils.apply_depth_multiplier(64, depth_multiplier), kernel_size=[first_temporal_kernel_size, 7, 7], strides=[2, 2, 2], padding='same', use_bias=False, kernel_initializer=tf_utils.clone_initializer(kernel_initializer), kernel_regularizer=kernel_regularizer, name=layer_naming_fn(end_point))( inputs) net = batch_norm( axis=bn_axis, momentum=norm_momentum, epsilon=norm_epsilon, scale=False, gamma_initializer='ones', name=layer_naming_fn(end_point + '/BatchNorm'))( net) net = tf.nn.relu(net) end_points[end_point] = net if final_endpoint == end_point: return net, end_points # batch_size x 32 x 56 x 56 x 64 end_point = 'MaxPool_2a_3x3' net = tf.keras.layers.MaxPool3D( pool_size=[1, 3, 3], strides=[1, 2, 2], padding='same', name=layer_naming_fn(end_point))( net) end_points[end_point] = net if final_endpoint == end_point: return net, end_points # batch_size x 32 x 56 x 56 x 64 end_point = 'Conv2d_2b_1x1' net = tf.keras.layers.Conv3D( filters=net_utils.apply_depth_multiplier(64, depth_multiplier), strides=[1, 1, 1], kernel_size=[1, 1, 1], padding='same', use_bias=False, kernel_initializer=tf_utils.clone_initializer(kernel_initializer), kernel_regularizer=kernel_regularizer, name=layer_naming_fn(end_point))( net) net = batch_norm( axis=bn_axis, momentum=norm_momentum, epsilon=norm_epsilon, scale=False, gamma_initializer='ones', name=layer_naming_fn(end_point + '/BatchNorm'))( net) net = tf.nn.relu(net) end_points[end_point] = net if final_endpoint == end_point: return net, end_points # batch_size x 32 x 56 x 56 x 192 end_point = 'Conv2d_2c_3x3' if end_point not in temporal_conv_endpoints: temporal_conv_type = '2d' net = parameterized_conv_layer( conv_type=temporal_conv_type, kernel_size=3, filters=net_utils.apply_depth_multiplier(192, depth_multiplier), strides=[1, 1, 1], rates=[1, 1, 1], use_sync_bn=use_sync_bn, norm_momentum=norm_momentum, norm_epsilon=norm_epsilon, temporal_conv_initializer=temporal_conv_initializer, kernel_initializer=tf_utils.clone_initializer(kernel_initializer), kernel_regularizer=kernel_regularizer, name=layer_naming_fn(end_point))( net) if end_point in self_gating_endpoints: net = nn_blocks_3d.SelfGating( filters=net_utils.apply_depth_multiplier(192, depth_multiplier), name=layer_naming_fn(end_point + '/self_gating'))( net) end_points[end_point] = net if final_endpoint == end_point: return net, end_points # batch_size x 32 x 28 x 28 x 192 end_point = 'MaxPool_3a_3x3' net = tf.keras.layers.MaxPool3D( pool_size=[1, 3, 3], strides=[1, 2, 2], padding='same', name=layer_naming_fn(end_point))( net) end_points[end_point] = net return net, end_points def _construct_branch_3_layers( channels: int, swap_pool_and_1x1x1: bool, pool_type: Text, batch_norm_layer: tf.keras.layers.Layer, kernel_initializer: Union[Text, initializers.Initializer], kernel_regularizer: Union[Text, regularizers.Regularizer], ): """Helper function for Branch 3 inside Inception module.""" kernel_size = [1, 3, 3] if pool_type == '2d' else [3] * 3 conv = tf.keras.layers.Conv3D( filters=channels, kernel_size=[1, 1, 1], padding='same', use_bias=False, kernel_initializer=kernel_initializer, kernel_regularizer=kernel_regularizer) activation = tf.keras.layers.Activation('relu') pool = tf.keras.layers.MaxPool3D( pool_size=kernel_size, strides=[1, 1, 1], padding='same') if swap_pool_and_1x1x1: branch_3_layers = [conv, batch_norm_layer, activation, pool] else: branch_3_layers = [pool, conv, batch_norm_layer, activation] return branch_3_layers class InceptionV1CellLayer(tf.keras.layers.Layer): """A single Tensorflow 2 cell used in the original I3D/S3D model.""" def __init__( self, branch_filters: Sequence[Sequence[int]], conv_type: Text = '3d', temporal_dilation_rate: int = 1, swap_pool_and_1x1x1: bool = False, use_self_gating_on_branch: bool = False, use_self_gating_on_cell: bool = False, use_sync_bn: bool = False, norm_momentum: float = 0.999, norm_epsilon: float = 0.001, temporal_conv_initializer: Union[ Text, initializers.Initializer] = initializers.TruncatedNormal( mean=0.0, stddev=0.01), kernel_initializer: Union[Text, initializers.Initializer] = 'truncated_normal', kernel_regularizer: Union[Text, regularizers.Regularizer] = 'l2', parameterized_conv_layer: Type[ net_utils.ParameterizedConvLayer] = net_utils.ParameterizedConvLayer, **kwargs): """A cell structure inspired by Inception V1. Args: branch_filters: Specifies the number of filters in four branches (Branch_0, Branch_1, Branch_2, Branch_3). Single number for Branch_0 and Branch_3. For Branch_1 and Branch_2, each need to specify two numbers, one for 1x1x1 and one for 3x3x3. conv_type: The type of parameterized convolution. Currently, we support '2d', '3d', '2+1d', '1+2d'. temporal_dilation_rate: The dilation rate for temporal convolution. swap_pool_and_1x1x1: A boolean flag indicates that whether to swap the order of convolution and max pooling in Branch_3. use_self_gating_on_branch: Whether or not to apply self gating on each branch of the inception cell. use_self_gating_on_cell: Whether or not to apply self gating on each cell after the concatenation of all branches. use_sync_bn: If True, use synchronized batch normalization. norm_momentum: A `float` of normalization momentum for the moving average. norm_epsilon: A `float` added to variance to avoid dividing by zero. temporal_conv_initializer: Weight initializer for temporal convolution inside the cell. It only applies to 2+1d and 1+2d cases. kernel_initializer: Weight initializer for convolutional layers other than temporal convolution. kernel_regularizer: Weight regularizer for all convolutional layers. parameterized_conv_layer: class for parameterized conv layer. **kwargs: keyword arguments to be passed. Returns: out_tensor: A 5-D float tensor of size [batch_size, num_frames, height, width, channels]. """ super(InceptionV1CellLayer, self).__init__(**kwargs) self._branch_filters = branch_filters self._conv_type = conv_type self._temporal_dilation_rate = temporal_dilation_rate self._swap_pool_and_1x1x1 = swap_pool_and_1x1x1 self._use_self_gating_on_branch = use_self_gating_on_branch self._use_self_gating_on_cell = use_self_gating_on_cell self._use_sync_bn = use_sync_bn self._norm_momentum = norm_momentum self._norm_epsilon = norm_epsilon self._temporal_conv_initializer = temporal_conv_initializer self._kernel_initializer = kernel_initializer self._kernel_regularizer = kernel_regularizer self._parameterized_conv_layer = parameterized_conv_layer if use_sync_bn: self._norm = tf.keras.layers.experimental.SyncBatchNormalization else: self._norm = tf.keras.layers.BatchNormalization if tf.keras.backend.image_data_format() == 'channels_last': self._channel_axis = -1 else: self._channel_axis = 1 def _build_branch_params(self): branch_0_params = [ # Conv3D dict( filters=self._branch_filters[0][0], kernel_size=[1, 1, 1], padding='same', use_bias=False, kernel_initializer=tf_utils.clone_initializer( self._kernel_initializer), kernel_regularizer=self._kernel_regularizer), # norm dict( axis=self._channel_axis, momentum=self._norm_momentum, epsilon=self._norm_epsilon, scale=False, gamma_initializer='ones'), # relu dict(), ] branch_1_params = [ # Conv3D dict( filters=self._branch_filters[1][0], kernel_size=[1, 1, 1], padding='same', use_bias=False, kernel_initializer=tf_utils.clone_initializer( self._kernel_initializer), kernel_regularizer=self._kernel_regularizer), # norm dict( axis=self._channel_axis, momentum=self._norm_momentum, epsilon=self._norm_epsilon, scale=False, gamma_initializer='ones'), # relu dict(), # ParameterizedConvLayer dict( conv_type=self._conv_type, kernel_size=3, filters=self._branch_filters[1][1], strides=[1, 1, 1], rates=[self._temporal_dilation_rate, 1, 1], use_sync_bn=self._use_sync_bn, norm_momentum=self._norm_momentum, norm_epsilon=self._norm_epsilon, temporal_conv_initializer=self._temporal_conv_initializer, kernel_initializer=tf_utils.clone_initializer( self._kernel_initializer), kernel_regularizer=self._kernel_regularizer), ] branch_2_params = [ # Conv3D dict( filters=self._branch_filters[2][0], kernel_size=[1, 1, 1], padding='same', use_bias=False, kernel_initializer=tf_utils.clone_initializer( self._kernel_initializer), kernel_regularizer=self._kernel_regularizer), # norm dict( axis=self._channel_axis, momentum=self._norm_momentum, epsilon=self._norm_epsilon, scale=False, gamma_initializer='ones'), # relu dict(), # ParameterizedConvLayer dict( conv_type=self._conv_type, kernel_size=3, filters=self._branch_filters[2][1], strides=[1, 1, 1], rates=[self._temporal_dilation_rate, 1, 1], use_sync_bn=self._use_sync_bn, norm_momentum=self._norm_momentum, norm_epsilon=self._norm_epsilon, temporal_conv_initializer=self._temporal_conv_initializer, kernel_initializer=tf_utils.clone_initializer( self._kernel_initializer), kernel_regularizer=self._kernel_regularizer) ] branch_3_params = [ # Conv3D dict( filters=self._branch_filters[3][0], kernel_size=[1, 1, 1], padding='same', use_bias=False, kernel_initializer=tf_utils.clone_initializer( self._kernel_initializer), kernel_regularizer=self._kernel_regularizer), # norm dict( axis=self._channel_axis, momentum=self._norm_momentum, epsilon=self._norm_epsilon, scale=False, gamma_initializer='ones'), # relu dict(), # pool dict( pool_size=([1, 3, 3] if self._conv_type == '2d' else [3] * 3), strides=[1, 1, 1], padding='same') ] if self._use_self_gating_on_branch: branch_0_params.append(dict(filters=self._branch_filters[0][0])) branch_1_params.append(dict(filters=self._branch_filters[1][1])) branch_2_params.append(dict(filters=self._branch_filters[2][1])) branch_3_params.append(dict(filters=self._branch_filters[3][0])) out_gating_params = [] if self._use_self_gating_on_cell: out_channels = ( self._branch_filters[0][0] + self._branch_filters[1][1] + self._branch_filters[2][1] + self._branch_filters[3][0]) out_gating_params.append(dict(filters=out_channels)) return [ branch_0_params, branch_1_params, branch_2_params, branch_3_params, out_gating_params ] def build(self, input_shape): branch_params = self._build_branch_params() self._branch_0_layers = [ tf.keras.layers.Conv3D(**branch_params[0][0]), self._norm(**branch_params[0][1]), tf.keras.layers.Activation('relu', **branch_params[0][2]), ] self._branch_1_layers = [ tf.keras.layers.Conv3D(**branch_params[1][0]), self._norm(**branch_params[1][1]), tf.keras.layers.Activation('relu', **branch_params[1][2]), self._parameterized_conv_layer(**branch_params[1][3]), ] self._branch_2_layers = [ tf.keras.layers.Conv3D(**branch_params[2][0]), self._norm(**branch_params[2][1]), tf.keras.layers.Activation('relu', **branch_params[2][2]), self._parameterized_conv_layer(**branch_params[2][3]) ] if self._swap_pool_and_1x1x1: self._branch_3_layers = [ tf.keras.layers.Conv3D(**branch_params[3][0]), self._norm(**branch_params[3][1]), tf.keras.layers.Activation('relu', **branch_params[3][2]), tf.keras.layers.MaxPool3D(**branch_params[3][3]), ] else: self._branch_3_layers = [ tf.keras.layers.MaxPool3D(**branch_params[3][3]), tf.keras.layers.Conv3D(**branch_params[3][0]), self._norm(**branch_params[3][1]), tf.keras.layers.Activation('relu', **branch_params[3][2]), ] if self._use_self_gating_on_branch: self._branch_0_layers.append( nn_blocks_3d.SelfGating(**branch_params[0][-1])) self._branch_1_layers.append( nn_blocks_3d.SelfGating(**branch_params[1][-1])) self._branch_2_layers.append( nn_blocks_3d.SelfGating(**branch_params[2][-1])) self._branch_3_layers.append( nn_blocks_3d.SelfGating(**branch_params[3][-1])) if self._use_self_gating_on_cell: self.cell_self_gating = nn_blocks_3d.SelfGating(**branch_params[4][0]) super(InceptionV1CellLayer, self).build(input_shape) def call(self, inputs): x = inputs for layer in self._branch_0_layers: x = layer(x) branch_0 = x x = inputs for layer in self._branch_1_layers: x = layer(x) branch_1 = x x = inputs for layer in self._branch_2_layers: x = layer(x) branch_2 = x x = inputs for layer in self._branch_3_layers: x = layer(x) branch_3 = x out_tensor = tf.concat([branch_0, branch_1, branch_2, branch_3], axis=self._channel_axis) if self._use_self_gating_on_cell: out_tensor = self.cell_self_gating(out_tensor) return out_tensor
15927dcf24025279907d2e741868b61cee115051
09e57dd1374713f06b70d7b37a580130d9bbab0d
/benchmark/startQiskit_QC113.py
3d630e9b6df273b3ce63a9772869e196fbbddb71
[ "BSD-3-Clause" ]
permissive
UCLA-SEAL/QDiff
ad53650034897abb5941e74539e3aee8edb600ab
d968cbc47fe926b7f88b4adf10490f1edd6f8819
refs/heads/main
2023-08-05T04:52:24.961998
2021-09-19T02:56:16
2021-09-19T02:56:16
405,159,939
2
0
null
null
null
null
UTF-8
Python
false
false
2,588
py
# qubit number=3 # total number=11 import numpy as np from qiskit import QuantumCircuit, execute, Aer, QuantumRegister, ClassicalRegister, transpile, BasicAer, IBMQ import networkx as nx from qiskit.visualization import plot_histogram from typing import * from pprint import pprint from math import log2 from collections import Counter from qiskit.test.mock import FakeVigo, FakeYorktown kernel = 'circuit/bernstein' def make_circuit(n:int) -> QuantumCircuit: # circuit begin input_qubit = QuantumRegister(n,"qc") prog = QuantumCircuit(input_qubit) prog.h(input_qubit[0]) # number=1 prog.h(input_qubit[1]) # number=2 prog.h(input_qubit[2]) # number=3 prog.x(input_qubit[2]) # number=6 prog.h(input_qubit[3]) # number=4 prog.y(input_qubit[3]) # number=5 for edge in E: k = edge[0] l = edge[1] prog.cp(-2 * gamma, input_qubit[k-1], input_qubit[l-1]) prog.p(gamma, k) prog.p(gamma, l) prog.rx(2 * beta, range(len(V))) prog.swap(input_qubit[1],input_qubit[0]) # number=7 prog.swap(input_qubit[1],input_qubit[0]) # number=8 prog.x(input_qubit[2]) # number=9 prog.x(input_qubit[2]) # number=10 # circuit end return prog if __name__ == '__main__': n = 4 V = np.arange(0, n, 1) E = [(0, 1, 1.0), (0, 2, 1.0), (1, 2, 1.0), (3, 2, 1.0), (3, 1, 1.0)] G = nx.Graph() G.add_nodes_from(V) G.add_weighted_edges_from(E) step_size = 0.1 a_gamma = np.arange(0, np.pi, step_size) a_beta = np.arange(0, np.pi, step_size) a_gamma, a_beta = np.meshgrid(a_gamma, a_beta) F1 = 3 - (np.sin(2 * a_beta) ** 2 * np.sin(2 * a_gamma) ** 2 - 0.5 * np.sin(4 * a_beta) * np.sin(4 * a_gamma)) * ( 1 + np.cos(4 * a_gamma) ** 2) result = np.where(F1 == np.amax(F1)) a = list(zip(result[0], result[1]))[0] gamma = a[0] * step_size beta = a[1] * step_size prog = make_circuit(4) sample_shot =5600 writefile = open("../data/startQiskit_QC113.csv", "w") # prog.draw('mpl', filename=(kernel + '.png')) IBMQ.load_account() provider = IBMQ.get_provider(hub='ibm-q') provider.backends() backend = provider.get_backend("ibmq_5_yorktown") circuit1 = transpile(prog, FakeYorktown()) circuit1.measure_all() prog = circuit1 info = execute(prog,backend=backend, shots=sample_shot).result().get_counts() print(info, file=writefile) print("results end", file=writefile) print(circuit1.depth(), file=writefile) print(circuit1, file=writefile) writefile.close()
2d2395a4ac28c64b53583e2fe39ad243adc5160f
2db1a0038d26ccb6adc572b536cb5cd401fd7498
/tryTen/Lib/encodings/iso2022_jp_2004.py
3c1eaafdaa145af7e2fa8dab5dc18e48af4c3667
[]
no_license
syurk/labpin
e795c557e7d7bcd4ff449cb9a3de32959a8c4968
04070dd5ce6c0a32c9ed03765f4f2e39039db411
refs/heads/master
2022-12-12T02:23:54.975797
2018-11-29T16:03:26
2018-11-29T16:03:26
159,692,630
0
1
null
2022-11-19T12:15:55
2018-11-29T16:04:20
Python
UTF-8
Python
false
false
1,074
py
# # iso2022_jp_2004.py: Python Unicode Codec for ISO2022_JP_2004 # # Written by Hye-Shik Chang <[email protected]> # import _codecs_iso2022, codecs import _multibytecodec as mbc codec = _codecs_iso2022.getcodec('iso2022_jp_2004') class Codec(codecs.Codec): encode = codec.encode decode = codec.decode class IncrementalEncoder(mbc.MultibyteIncrementalEncoder, codecs.IncrementalEncoder): codec = codec class IncrementalDecoder(mbc.MultibyteIncrementalDecoder, codecs.IncrementalDecoder): codec = codec class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader): codec = codec class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter): codec = codec def getregentry(): return codecs.CodecInfo( name='iso2022_jp_2004', encode=Codec().encode, decode=Codec().decode, incrementalencoder=IncrementalEncoder, incrementaldecoder=IncrementalDecoder, streamreader=StreamReader, streamwriter=StreamWriter, )
a9488eccd4dbce8dcc9a434b97e18a37d6b2374c
fe9b3bbffe50c7cc6c61f08176cc12ede5b6a90e
/solutions/2039. The Time When the Network Becomes Idle/2039.py
356df5e55b05a74b32c6ed1bbf5981746b22ee95
[]
no_license
freephys/LeetCode-6
56778d3d5d37b5c3370295c5ada30f4a45fa76f6
bbabe6ace73b0176a2210dc0a20cc2d8e5566122
refs/heads/main
2023-09-03T19:21:16.985774
2021-10-19T06:01:34
2021-10-19T06:01:34
null
0
0
null
null
null
null
UTF-8
Python
false
false
783
py
class Solution: def networkBecomesIdle(self, edges: List[List[int]], patience: List[int]) -> int: n = len(patience) ans = 0 dist = [inf] * n # dist[i] := dist(i, 0) graph = [[] for _ in range(n)] for u, v in edges: graph[u].append(v) graph[v].append(u) q = deque([0]) dist[0] = 0 while q: for _ in range(len(q)): node = q.popleft() for child in graph[node]: if dist[child] == inf: dist[child] = dist[node] + 1 q.append(child) for i in range(1, n): numResending = (dist[i] * 2 - 1) // patience[i] lastResendingTime = patience[i] * numResending lastArrivingTime = lastResendingTime + dist[i] * 2 ans = max(ans, lastArrivingTime) return ans + 1
a52aba24ee0c24c2f845c80c62c29ce3528659b8
91fe8f479fa921fa84111d19222a5c6aa6eff030
/basis/progr-py/Gui/Tour/grid2_same.py
6eab34889e18fd70042bc8b849e9ada203ed8582
[]
no_license
romanticair/python
2055c9cdaa46894c9788d5797643283786ed46dd
6f91fe5e7cbedcdf4b8f7baa7641fd615b4d6141
refs/heads/master
2022-11-03T17:17:17.608786
2019-07-05T07:07:29
2019-07-05T07:07:29
195,356,190
0
1
null
2022-10-14T20:51:14
2019-07-05T07:00:33
Python
UTF-8
Python
false
false
553
py
""" build pack and grid forms on different frames in same window; can't grid and pack in same parent container (e.g., root window) but can mix in same window if done in different parent frames; """ from tkinter import * from grid2 import gridbox, packbox root = Tk() Label(root, text='Grid:').pack() frm = Frame(root, bd=5, relief=RAISED) frm.pack(padx=5, pady=5) gridbox(frm) Label(root, text='Pack:').pack() frm = Frame(root, bd=5, relief=RAISED) frm.pack(padx=5, pady=5) packbox(frm) Button(root, text='Quit', command=root.quit).pack() mainloop()
a6f1e1eb27d29d840568bd9dee440e51a8b78d9f
9135a1c5bcd20f77971085496d9e966d892fb7e9
/reports/python/jobRunsReport/jobRunsReport.py
1c20786a601006a31a3fbc95bf8f14abc0b6b594
[ "Apache-2.0" ]
permissive
bseltz-cohesity/scripts
cc54b2b3534175562b0d9cfba85bd63aa5ca2346
53c4b057bb4f41ae079fc8236caacf13fd35c10e
refs/heads/master
2023-08-23T13:13:12.169724
2023-08-22T13:21:22
2023-08-22T13:21:22
142,414,700
83
44
Apache-2.0
2023-08-24T11:42:22
2018-07-26T08:50:47
PowerShell
UTF-8
Python
false
false
4,679
py
#!/usr/bin/env python """job runs report""" # import pyhesity wrapper module from pyhesity import * from datetime import datetime import codecs # command line arguments import argparse parser = argparse.ArgumentParser() parser.add_argument('-v', '--vip', type=str, required=True) # cluster to connect to parser.add_argument('-u', '--username', type=str, required=True) # username parser.add_argument('-d', '--domain', type=str, default='local') # (optional) domain - defaults to local parser.add_argument('-i', '--useApiKey', action='store_true') # use API key authentication parser.add_argument('-pwd', '--password', type=str, default=None) # optional password parser.add_argument('-j', '--jobname', type=str, required=True) parser.add_argument('-n', '--numruns', type=int, default=100) parser.add_argument('-y', '--days', type=int, default=7) parser.add_argument('-units', '--units', type=str, choices=['MB', 'GB', 'mb', 'gb'], default='MB') args = parser.parse_args() vip = args.vip username = args.username domain = args.domain password = args.password useApiKey = args.useApiKey jobname = args.jobname numruns = args.numruns days = args.days units = args.units multiplier = 1024 * 1024 if units.lower() == 'gb': multiplier = 1024 * 1024 * 1024 # authenticate apiauth(vip=vip, username=username, domain=domain, password=password, useApiKey=useApiKey, noretry=True) now = datetime.now() nowUsecs = dateToUsecs(now.strftime("%Y-%m-%d %H:%M:%S")) daysBackUsecs = timeAgo(days, 'days') # outfile cluster = api('get', 'cluster') dateString = now.strftime("%Y-%m-%d") outfile = 'jobRunsReport-%s-%s-%s.csv' % (cluster['name'], jobname, dateString) f = codecs.open(outfile, 'w') # headings f.write('Run Date,Run Type,Duration Seconds,Status,Data Read (%s),Data Written (%s),Success,Error\n' % (units, units)) jobs = api('get', 'data-protect/protection-groups?isDeleted=false&isActive=true&includeTenants=true', v=2) job = [j for j in jobs['protectionGroups'] if j['name'].lower() == jobname.lower()] if jobs is None or len(jobs) == 0: print('job %s not found' % jobname) exit() else: job = job[0] finishedStates = ['kCanceled', 'kSuccess', 'kFailure', 'kWarning', 'kCanceling', '3', '4', '5', '6'] endUsecs = nowUsecs while 1: runs = api('get', 'data-protect/protection-groups/%s/runs?numRuns=%s&endTimeUsecs=%s&includeTenants=true&includeObjectDetails=true' % (job['id'], numruns, endUsecs), v=2) for run in runs['runs']: try: if 'localBackupInfo' in run: info = run['localBackupInfo'] else: info = run['archivalInfo']['archivalTargetResults'][0] endUsecs = info['startTimeUsecs'] - 1 runtype = info['runType'][1:] if runtype == 'Regular': runType = 'Incremental' startTimeUsecs = info['startTimeUsecs'] if 'endTimeUsecs' in info: endTimeUsecs = info['endTimeUsecs'] else: endTimeUsecs = nowUsecs durationSecs = round((endTimeUsecs - startTimeUsecs) / 1000000, 0) runStartTime = usecsToDate(info['startTimeUsecs']) if info['startTimeUsecs'] < daysBackUsecs: break if 'localSnapshotStats' in info: bytesread = round(info['localSnapshotStats']['bytesRead'] / multiplier, 2) byteswritten = round(info['localSnapshotStats']['bytesWritten'] / multiplier, 2) numsuccess = len([o for o in run['objects'] if o['localSnapshotInfo']['snapshotInfo']['status'] in ['kSuccessful', 'kWarning']]) numfailed = len([o for o in run['objects'] if o['localSnapshotInfo']['snapshotInfo']['status'] == 'kFailed']) else: bytesread = '' byteswritten = '' if 'stats' in info and 'bytesRead' in info['stats']: bytesread = round(info['stats']['bytesRead'] / multiplier, 2) if 'stats' in info and 'physicalBytesTransferred' in info['stats']: byteswritten = round(info['stats']['physicalBytesTransferred'] / multiplier, 2) numsuccess = '' numfailed = '' status = info['status'] print(" %s %s" % (runStartTime, status)) f.write('"%s","%s","%s","%s","%s","%s","%s","%s"\n' % (runStartTime, runtype, durationSecs, status, bytesread, byteswritten, numsuccess, numfailed)) except Exception as e: print('exception!') pass if len(runs['runs']) < numruns: break f.close() print('\nOutput saved to %s\n' % outfile)
2b6c535bcc9b8cbd7dad3ca4768e3cf8edf3e7b6
ca7aa979e7059467e158830b76673f5b77a0f5a3
/Python_codes/p03455/s099249586.py
ed7bae5ea3248483cf2e487ab2a7bf12c75b9a63
[]
no_license
Aasthaengg/IBMdataset
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
refs/heads/main
2023-04-22T10:22:44.763102
2021-05-13T17:27:22
2021-05-13T17:27:22
367,112,348
0
0
null
null
null
null
UTF-8
Python
false
false
188
py
import sys input_list = sys.stdin for num in input_list: num_list = num.split(' ') a = int(num_list[0]) % 2 b = int(num_list[1]) % 2 ans = 'Even' if a * b == 1: ans = 'Odd' print(ans)
04876ea4060849c387eef743dca95dec82adb55c
0b8ae7a9bc185b76c64cb353808da7713c4b8ecd
/baekjoon/[Bruteforce]/연습/[Brute_Force]*스타트와 링크.py
12fc86f41061398ecda8721884d633c54fd7a0fa
[]
no_license
snowedev/baekjoon-code.plus
b5f468b99fa70b00521657122f46cef575cabb9b
2f983a9c559e803f6dcdeb549aa8d304ff5a664c
refs/heads/master
2023-04-05T22:22:01.652744
2021-04-14T16:11:57
2021-04-14T16:11:57
357,951,541
0
0
null
null
null
null
UTF-8
Python
false
false
3,158
py
# 스타트와 링크 # B_14889 # N제한(4<=N<=20) # N명중에 N/2명을 골라서 두 팀으로 나누는 경우의수: N C N/2 -> 20 C 10 # 경우의 수 백만개 이하 """ 1. 백트래킹으로 푸는 방법 (백트래킹: 재귀함수를 이용한 브루트포스 방법.) * 요구하는 답을 도출할 수 없을 때 불필요한 경우의 수를 만들지 않음으로써(재귀호출중단) 처리속도를 월등히 빨라지게 """ def go(index, first, second): if index == n: if len(first) != n//2: return -1 if len(second) != n//2: return -1 t1 = 0 t2 = 0 for i in range(n//2): for j in range(n//2): if i == j: continue t1 += s[first[i]][first[j]] t2 += s[second[i]][second[j]] diff = abs(t1-t2) return diff # 백트래킹 컷팅조건: 팀이 n/2을 넘어서면 더 이상 구할 필요가 없음 if len(first) > n//2: return -1 if len(second) > n//2: return -1 ans = -1 t1 = go(index+1, first+[index], second) if ans == -1 or (t1 != -1 and ans > t1): ans = t1 t2 = go(index+1, first, second+[index]) if ans == -1 or (t2 != -1 and ans > t2): ans = t2 return ans n = int(input()) s = [list(map(int,input().split())) for _ in range(n)] print(go(0, [], [])) """ 2. 비트마스크 풀이 n = int(input()) s = [list(map(int,input().split())) for _ in range(n)] ans = -1 for i in range((1<<n)): cnt = 0 for j in range(n): if (i&(1<<j)) > 0: cnt += 1 if cnt != n//2: continue first = [] second = [] for j in range(n): if (i&(1<<j)) > 0: first += [j] else: second += [j] if len(first) != n//2: continue t1 = 0 t2 = 0 for l1 in range(n//2): for l2 in range(n//2): if l1 == l2: continue t1 += s[first[l1]][first[l2]] t2 += s[second[l1]][second[l2]] diff = abs(t1-t2) if ans == -1 or ans > diff: ans = diff print(ans) """ """ 3. 브루트 포스로 푸는 방법 def next_permutation(a): i = len(a)-1 while i > 0 and a[i-1] >= a[i]: i -= 1 if i <= 0: return False j = len(a)-1 while a[j] <= a[i-1]: j -= 1 a[i-1],a[j] = a[j],a[i-1] j = len(a)-1 while i < j: a[i],a[j] = a[j],a[i] i += 1 j -= 1 return True n = int(input()) a = [list(map(int,input().split())) for _ in range(n)] b = [0 if i < n/2 else 1 for i in range(n)] ans = 100000000 while True: first = [] second = [] for i in range(n): if b[i] == 0: first.append(i) else: second.append(i) one = 0 two = 0 for i in range(n//2): for j in range(n//2): if i == j: continue one += a[first[i]][first[j]] two += a[second[i]][second[j]] diff = abs(one-two) if ans > diff: ans = diff if not next_permutation(b): break print(ans) """
ca4654278aafa08267bc95a5e8282e44e8ff23d0
a44557a162d42edfce3c51d3cf27363342591f96
/server/client.py
cf5d50bfb0557df17486b2ccecb1ed8b91276448
[]
no_license
Jeffrey-Ede/simple-webserver
57d9370e630a5949106bd55d32120b898b848543
0da4de4567f6e46487ce70af1936e116b7c67339
refs/heads/master
2020-07-09T14:59:04.047471
2019-08-23T13:26:13
2019-08-23T13:26:13
204,003,153
0
0
null
null
null
null
UTF-8
Python
false
false
1,755
py
from socketIO_client import SocketIO import zlib import numpy as np from scipy.misc import imread import cv2 from helpers import * model_dir = "//flexo.ads.warwick.ac.uk/Shared41/Microscopy/Jeffrey-Ede/models/stem-random-walk-nin-20-54/" addr = model_dir + "truth-975000.tif" inputs = imread(addr, mode='F') inputs = np.random.rand(512,512) #inputs = cv2.resize(inputs, (103,103), interpolation=cv2.INTER_AREA) #maximum = np.max(inputs) #minimum = np.min(inputs) #inputs = (128*norm_img(inputs)).clip(-127, 128) ############################################################################### message = zlib.compress(inputs.astype(np.float16).tostring()).hex() import time t0 = time.time() def handle_processed_data(stringarray): #img = np.fromstring(zlib.decompress(bytes.fromhex(stringarray["data"])), dtype=np.float16).reshape((512,512)) print("time", time.process_time()-t0) #disp(img) #def handle_processed_data(stringarray): # #img = np.fromstring(zlib.decompress(bytes.fromhex(stringarray)), dtype=np.float16).reshape((512,512)) # print("time", time.process_time()-t0) # #disp(img) #Establish connection to server socketIO = SocketIO('137.205.164.177', 8000, async_mode="gevent", engineio_logger=True) #socketIO = SocketIO('137.205.164.200', 8000, async_mode="gevent", engineio_logger=True) socketIO.wait(seconds=1) socketIO.on("processed", handle_processed_data) ############################################################################### for _ in range(5): t0 = time.process_time() #socketIO.emit('process', message) socketIO.emit('process', message) socketIO.wait(seconds=1) #socketIO.wait(seconds=1)
bd0dfb49776e3f17cd52e506a9c1a66a185ca7dd
ab6cfc2aedad3de7a04efae4a6105dc893958b9e
/hivwholeseq/cross_sectional/clean_reference_alignment.py
0cba991807859d03aeaa484bd25f53dd8312ead1
[ "MIT" ]
permissive
neherlab/hivwholeseq
158c0ce590bc67d1d36042c71b8b0afa3e8d8abf
978ce4060362e4973f92b122ed5340a5314d7844
refs/heads/master
2021-01-15T16:48:15.769316
2015-09-04T08:33:52
2015-09-04T08:33:52
49,801,765
4
3
null
2016-01-17T03:43:46
2016-01-17T03:43:44
null
UTF-8
Python
false
false
2,847
py
# vim: fdm=marker ''' author: Fabio Zanini date: 15/01/15 content: Clean a reference alignment from LANL sequences from problematic seqs. ''' # Modules import os import argparse from collections import defaultdict from Bio import AlignIO from hivwholeseq.cross_sectional.filenames import ( get_raw_LANL_sequences_filename, get_subtype_reference_alignment_filename) from hivwholeseq.utils.sequence import align_codon_pairwise # Globals # Functions def filter_sequence(seq, VERBOSE=0): '''Align sequence to refernce, stripping reference gaps''' seqstr = ''.join(seq).upper() n_amb = len(seqstr) - sum(map(seqstr.count, ('A', 'C', 'G', 'T', '-'))) if n_amb > 2: return False return True def clean_reference_alignment(region, refname, VERBOSE=0, subtype='B', ): '''Clean reference alignment''' from hivwholeseq.reference import load_custom_reference from Bio import SeqIO from Bio.Align import MultipleSeqAlignment from hivwholeseq.cross_sectional.filenames import ( get_subtype_reference_alignment_filename) fn = get_subtype_reference_alignment_filename(region, subtype=subtype, refname=refname, VERBOSE=VERBOSE) ali = AlignIO.read(fn, 'fasta') nseqs = len(ali) ali = MultipleSeqAlignment(filter(filter_sequence, ali)) nseqsnew = len(ali) if VERBOSE >= 2: print refname, region, subtype+':', nseqsnew, 'of', nseqs, 'seqs kept' return ali # Script if __name__ == '__main__': parser = argparse.ArgumentParser(description='Align to reference', formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('--region', required=True, help='Region to anign (e.g. V3)') parser.add_argument('--reference', default='HXB2', help='Reference to use for alignment') parser.add_argument('--verbose', type=int, default=0, help='Verbosity level [0-4]') parser.add_argument('--subtype', default='B', help='Subtype to analyze') args = parser.parse_args() region = args.region refname = args.reference VERBOSE = args.verbose subtype = args.subtype ali = clean_reference_alignment(region, refname, subtype=subtype, VERBOSE=VERBOSE) fn = get_subtype_reference_alignment_filename(region, subtype=subtype, refname=refname, VERBOSE=VERBOSE) AlignIO.write(ali, fn, 'fasta')
78ef491bb7f375603e96284d1a56004c0d3f2715
207bc9a3e7a9f035353963876757745ddbcfc384
/knet/tests/accounts/test_oauth.py
9692e639f90aae1d3d13c51afdf526b41f76f4e3
[]
no_license
oddbird/knet
e6322cbca0350dc78d2a4e824a84d81f42960878
30e41c37dd608cbc8f1bd794cb30c7d935cf6723
refs/heads/master
2021-01-25T07:18:53.337897
2013-07-27T20:19:33
2013-07-27T20:19:33
9,507,222
0
0
null
2013-07-27T20:43:07
2013-04-17T20:46:55
Python
UTF-8
Python
false
false
726
py
from django.core.urlresolvers import reverse from django.test.utils import override_settings from pretend import stub from knet.accounts.oauth import get_provider class FakeProvider(stub): pass @override_settings( BASE_URL = 'http://example.com', OAUTH_PROVIDER='knet.tests.accounts.test_oauth.FakeProvider', OAUTH_CLIENT_ID='client id', OAUTH_CLIENT_SECRET='client secret') def test_get_provider(): provider = get_provider(redirect_to='/foo/') assert isinstance(provider, FakeProvider) assert provider.redirect_uri == ( 'http://example.com' + reverse('oauth') + '?next=%2Ffoo%2F') assert provider.client_id == 'client id' assert provider.client_secret == 'client secret'
f750b7d131900809440e3ecca557b7f423d4ec30
8c8ed3e1046655beddc768672ca45e16a7a1c15e
/skbm/generate_dataset.py
25290c34ff04c022b713a1c9b9794723a79d5c10
[]
no_license
DSABenchmark/DSAB
e44759eb31d369d3a9a63505588ee14481c60dfe
7af890230add7ac3039c15061eb130f86b4de3b8
refs/heads/master
2020-04-03T11:23:04.675308
2018-12-05T12:26:15
2018-12-05T12:26:15
155,220,126
2
1
null
null
null
null
UTF-8
Python
false
false
2,977
py
import sys import random from scipy.stats import powerlaw import math import os ''' input: dir - directory of the dataset distributionName - dataset type(zipf/powerlaw/weibull) bytePerStr - bytes number per item totalNum - total number in dataset distinctNum - distinct number in dataset parameter1 - for zipf/powerlaw, skewness; for weibull, a parameter in discrete Weibull distribution (0<parameter1<1) (ref http://www.math.wm.edu/~leemis/chart/UDR/PDFs/Discreteweibull.pdf) parameter2 - only for weibull, a positive shape parameter output: .dat file - for zipf/powerlaw, 'dir+ distributionName+ parameter1.dat' for weibull, 'dir+ distributionName+ parameter1_parameter2.dat' ''' def powerlaw(N, s): res = [] base = 0.0 for n in range(1, N + 1): t = 1 / (n ** s) base += t res.append(t) return [r / base for r in res] def weibull(N, p, k): res = [] for n in range(0, N): power1 = n ** k p1 = (1 - p) ** power1 power2 = (n + 1) ** k p2 = (1 - p) ** power2 res.append(p1 - p2) return res def random_bytes(byts): st = '' for j in range(byts): st += random.choice(string.printable[:-5]) b = bytes(st, encoding = 'utf-8') return b def gen_random_strings(len, byts): strs = set() res = [] for i in range(len): s = os.urandom(byts) while s in strs: s = os.urandom(byts) res.append(s) strs.add(s) return res def gen(freqs, byts): data = [] strs = gen_random_strings(len(freqs), byts) chs = [i for i in range(len(freqs))] while len(chs) != 0: p = random.randint(0, len(chs) - 1) pos = chs[p] data.append(strs[pos]) freqs[pos] -= 1 if freqs[pos] == 0: del chs[p] random.shuffle(data) return data def dataset_write(fp, distriName, byts, tot, dis, pa1, pa2): if distriName == 'zipf' or distriName == 'powerlaw': props = powerlaw(dis, pa1) elif distriName == 'weibull': props = weibull(dis, pa1, pa2) # if not os.path.exists(fp): # os.mkdir(fp) freq = [round(prop * tot)+1 for prop in props] dataset = gen(freq, byts) #print(len(dataset)) if distriName == 'zipf' or distriName == 'powerlaw': # fpath = fp + distriName + str(pa1) + '.dat' fpath = fp elif distriName == 'weibull': # fpath = fp + distriName + str(pa1) + '_' + str(pa2) + '.dat' fpath = fp with open(fpath, 'wb') as f: for d in dataset: f.write(d) return fpath if __name__ == "__main__": dir = 'dataset/' distributionName = 'weibull' bytePerStr = 4 totalNum = 1000000 distinctNum = 50000 parameter1 = 0.3 parameter2 = 2 dataset_write(dir, distributionName, bytePerStr, totalNum, distinctNum, parameter1, parameter2)
338db2c5671740e8cf914067ab343ce6f98b8c7e
6fcfb638fa725b6d21083ec54e3609fc1b287d9e
/python/strongh_storm-drpc-client-py/storm-drpc-client-py-master/storm/drpc.py
f2e2a2c86713ccfb33f3c443049113a066b9410e
[]
no_license
LiuFang816/SALSTM_py_data
6db258e51858aeff14af38898fef715b46980ac1
d494b3041069d377d6a7a9c296a14334f2fa5acc
refs/heads/master
2022-12-25T06:39:52.222097
2019-12-12T08:49:07
2019-12-12T08:49:07
227,546,525
10
7
null
2022-12-19T02:53:01
2019-12-12T07:29:39
Python
UTF-8
Python
false
false
2,017
py
from thrift.transport import TSocket from thrift.transport import TTransport from thrift.protocol import TBinaryProtocol from repoze.lru import lru_cache from DistributedRPC import Client from sasl_client_transport import SaslClientTransport import json STORM_SASL_SERVICE = 'storm_thrift_server' class DRPCClient: def __init__(self, host, port=3772, timeout=None, reconnect=False, use_plain_sasl=False): self.host = host self.port = port self.timeout = timeout self.reconnect = reconnect self.use_plain_sasl = use_plain_sasl if not reconnect: self.connect() def connect(self): self.socket = TSocket.TSocket(self.host, self.port) if self.timeout: self.socket.setTimeout(self.timeout) if self.use_plain_sasl: self.transport = SaslClientTransport(self.host, self.socket, service=STORM_SASL_SERVICE, mechanism='PLAIN') else: self.transport = TTransport.TFramedTransport(self.socket) self.protocol = TBinaryProtocol.TBinaryProtocol(self.transport) self.transport.open() self.client = Client(self.protocol) def execute(self, func, args): if self.reconnect: self.connect() r = json.loads(self.client.execute(func, args)) if self.reconnect: self.close() return r def executeJSON(self, func, **kwargs): return self.execute(func, json.dumps(kwargs)) def close(self): self.transport.close() class DRPCLRUClient(DRPCClient): def __init__(self, host, port=3772, timeout=None, cache_size=50, reconnect=False, use_plain_sasl=False): self.host = host self.port = port self.timeout = timeout cache_size = 100 self.cache = lru_cache(maxsize=cache_size) self.execute = self.cache(self.execute) self.reconnect = reconnect self.use_plain_sasl = use_plain_sasl if not reconnect: self.connect()
8fc0f772e77af4b547b18176adc8fbf3a50a97c7
ce083128fa87ca86c65059893aa8882d088461f5
/python/flask-mail-labs/.venv/lib/python2.7/site-packages/alembic/ddl/oracle.py
554ac221ab786d06459486e4572bad48841e3978
[]
no_license
marcosptf/fedora
581a446e7f81d8ae9a260eafb92814bc486ee077
359db63ff1fa79696b7bc803bcfa0042bff8ab44
refs/heads/master
2023-04-06T14:53:40.378260
2023-03-26T00:47:52
2023-03-26T00:47:52
26,059,824
6
5
null
2022-12-08T00:43:21
2014-11-01T18:48:56
null
UTF-8
Python
false
false
2,773
py
from sqlalchemy.ext.compiler import compiles from .impl import DefaultImpl from .base import alter_table, AddColumn, ColumnName, \ format_column_name, ColumnNullable, \ format_server_default, ColumnDefault, format_type, ColumnType class OracleImpl(DefaultImpl): __dialect__ = 'oracle' transactional_ddl = False batch_separator = "/" command_terminator = "" def __init__(self, *arg, **kw): super(OracleImpl, self).__init__(*arg, **kw) self.batch_separator = self.context_opts.get( "oracle_batch_separator", self.batch_separator) def _exec(self, construct, *args, **kw): result = super(OracleImpl, self)._exec(construct, *args, **kw) if self.as_sql and self.batch_separator: self.static_output(self.batch_separator) return result def emit_begin(self): self._exec("SET TRANSACTION READ WRITE") def emit_commit(self): self._exec("COMMIT") @compiles(AddColumn, 'oracle') def visit_add_column(element, compiler, **kw): return "%s %s" % ( alter_table(compiler, element.table_name, element.schema), add_column(compiler, element.column, **kw), ) @compiles(ColumnNullable, 'oracle') def visit_column_nullable(element, compiler, **kw): return "%s %s %s" % ( alter_table(compiler, element.table_name, element.schema), alter_column(compiler, element.column_name), "NULL" if element.nullable else "NOT NULL" ) @compiles(ColumnType, 'oracle') def visit_column_type(element, compiler, **kw): return "%s %s %s" % ( alter_table(compiler, element.table_name, element.schema), alter_column(compiler, element.column_name), "%s" % format_type(compiler, element.type_) ) @compiles(ColumnName, 'oracle') def visit_column_name(element, compiler, **kw): return "%s RENAME COLUMN %s TO %s" % ( alter_table(compiler, element.table_name, element.schema), format_column_name(compiler, element.column_name), format_column_name(compiler, element.newname) ) @compiles(ColumnDefault, 'oracle') def visit_column_default(element, compiler, **kw): return "%s %s %s" % ( alter_table(compiler, element.table_name, element.schema), alter_column(compiler, element.column_name), "DEFAULT %s" % format_server_default(compiler, element.default) if element.default is not None else "DEFAULT NULL" ) def alter_column(compiler, name): return 'MODIFY %s' % format_column_name(compiler, name) def add_column(compiler, column, **kw): return "ADD %s" % compiler.get_column_specification(column, **kw)
0b7b856a5e1e08c1b8295026c552fa5de82b3e2f
7925e4ff3950cd94275779a51f80d9f2078fe132
/algorithm_python/로또_만들기(lotto).py
51ff56e714528d74cb1377fd968769821ffee639
[]
no_license
Covee/Algorithm_Prac_Python
6e7336fbb62cd1e2b3b0215d34618481540983c4
42ea338a96949ac8b420e83320610a435b2cee59
refs/heads/master
2020-03-23T07:35:33.693367
2018-08-11T17:27:37
2018-08-11T17:27:37
141,280,858
0
0
null
null
null
null
UTF-8
Python
false
false
3,294
py
# 백준 / 10947 # 정답 비율: 3.83% # 문제: 로또 만들기 # 1보다 크거나 같고, 45보다 작거나 같은 수 6개를 출력한다. 채점 프로그램은 랜덤으로 이용해서 '수 6개와, 보너스 숫자 하나'를 구한다. # 채점 프로그램이 구한 숫자와 6개와 번호가 모두 일치하면 100점을 받는다. # 5개의 번호가 일치하고, 일치하지 않는 숫자가 보너스 숫자와 일치하면 80점을 받는다. # 5개의 번호가 일치하면 60점을 받는다. 4개의 번호가 일치하면 40점을 받는다. 3개의 번호가 일치하면 20점을 받는다. # 그 외의 경우에는 틀렸습니다를 받는다. import random def lotto(my_pick, bonus): lottolist = [] lottobonus = [] for i in range(0, 6): pick = random.randint(1, 45) while pick in lottolist: # 리스트 안에 중복되는 수가 존재하면 다시 랜덤 돌려서, 중복 없을때까지 돌린 후 빠져나오는 조건. pick = random.randint(1, 45) lottolist.append(pick) pick = random.randint(1, 45) # 다시 보너스 숫자 한개 추첨(역시 중복 방지) while pick in lottobonus: pick = random.randint(1, 45) lottobonus.append(pick) lottolist.sort() my_pick.sort() # 프로그램이 잘 돌아가는지 체크해 볼 예제 (왜냐하면 로또 1등 당첨이 잘 돌아가는지 확인하려면 1등 당첨이 되야하니까...) # lottolist = [1, 2, 3, 4, 5, 30] # lottobonus = [7] print("추첨 번호: " + str(lottolist) + " / 보너스 번호" + str(lottobonus)) print("입력 번호: " + str(my_pick) + " / 보너스 번호" + str(bonus)) if my_pick == lottolist: print("숫자 6개가 모두 일치하여 100점 입니다.") else: count = 0 for i in range(0, 6): for j in range(0, 6): if my_pick[i] == lottolist[j]: count += 1 if count == 5: if bonus == lottobonus: print("숫자 5개가 일치하고 보너스 숫자가 일치하여 80점 입니다.") else: print("숫자 5개가 일치하여 60점 입니다.") elif count == 4: print("숫자 4개가 일치하여 40점 입니다.") elif count == 3: print("숫자 3개가 일치하여 30점 입니다.") elif count == 2: print("숫자 2개가 일치하여 20점 입니다.") elif count <= 1: print("'도박하시면 망할 팔자시네여!'") my_pick = list(map(int, input("숫자 1-45 사이의 6개 로또 번호를 띄어쓰기로 구분해서 입력해 주세요(아니면 오류 잘남): ").split(" "))) bonus = list(map(int, input("보너스 번호를 입력해 주세요: "))) lotto(my_pick, bonus) # comment: # 간단할 줄 알았는데 생각보다 손이 많이가고, while문으로 중복되는 수를 거르고 돌리는 개념 자체를 이해하는데 헤맬 수 있는 문제였다. # 그리고 입력에 제한을 두는 것(6개 로또번호 입력해야데 7개 혹은 5개 입력한다던지, 번호가 1-45 사이가 아닌 수가 입력되었다던지)은 코딩하지 않았다. 넘 귀찮.. # 다음에 기회되면 그건 넣겠다. if문으로 조건만 걸어주고 똑같이 my_pick 안에 같은수가 입력되면 오류를 뱉는 것만 써주면 되서 어렵지 않을거다. # 로또를 해본 적이 없어서 약간 헷갈렸다. 그럴 수 있당
ee6653a72b02fd21634a17952ea141bfba570c33
b11e5545ae0de900a0bac981555d2584fbc719ce
/0x0A-python-inheritance/8-rectangle.py
8ae5581173d3061e4eaaea314725fd1364cbf5cf
[]
no_license
AndresCallejasG/holbertonschool-higher_level_programming
6fa844002dad56545bc500c839e88cd5c71033f5
062fe54721ef0fb3c02da1a7d9b2327496b0838a
refs/heads/master
2022-12-19T00:23:09.277883
2020-09-28T20:19:18
2020-09-28T20:19:18
259,443,426
1
0
null
null
null
null
UTF-8
Python
false
false
549
py
#!/usr/bin/python3 """Inheritance project """ BaseGeometry = __import__("7-base_geometry").BaseGeometry class Rectangle(BaseGeometry): """ Rectangle class, inherts from BaseGeometry """ def __init__(self, width, height): """init method Arguments: width {[type]} -- Rectangle width height {[type]} -- Rectangle height """ super().integer_validator("width", width) self.__width = width super().integer_validator("height", height) self.__height = height
af30ff3d5dbe26b5a0df0ded652657eec22ba988
8b606215d26314c046b24f779cad1d29679de73f
/GroundSegment/Telemetry/migrations/0016_auto_20190214_2113.py
b9d30f70f9242d23cc76484e7c700d502575f117
[]
no_license
unlamgidsa/unlam_gs_backend
b5a76660458fd43557602840eb24f838aabc4ce2
6284a5d55b8fe3b5b7c8f3a8def505409f7ea735
refs/heads/master
2023-07-20T10:30:30.618320
2022-12-29T21:55:51
2022-12-29T21:55:51
244,700,010
0
0
null
null
null
null
UTF-8
Python
false
false
930
py
# Generated by Django 2.0.7 on 2019-02-14 21:13 from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('Telemetry', '0015_auto_20190214_1900'), ] operations = [ migrations.AddField( model_name='bltlmyvar', name='endTlmy', field=models.ForeignKey(default=0, on_delete=django.db.models.deletion.CASCADE, related_name='endBLTmys', to='Telemetry.TlmyVar'), ), migrations.AddField( model_name='bltlmyvar', name='startTlmy', field=models.ForeignKey(default=0, on_delete=django.db.models.deletion.CASCADE, related_name='startBLTlmys', to='Telemetry.TlmyVar'), ), migrations.AddField( model_name='tlmyvarlevel', name='lastDateTime', field=models.DateTimeField(null=True), ), ]
59fe96776dc25c7660fe1ba570f9db92e694dcf2
37c3b81ad127c9e3cc26fa9168fda82460ca9bda
/SW_expert/sw_1244_최대상금.py
c123a5bcc2adb0fe196f3bb4e52211faaabb1755
[]
no_license
potomatoo/TIL
5d85b69fdaed68966db7cfe2a565b7c64ed3e816
395dc190fa13e5ed036e1e3c7d9e0bc2e1ee4d6c
refs/heads/master
2021-07-08T16:19:40.410097
2021-04-19T02:33:40
2021-04-19T02:33:40
238,872,774
0
0
null
null
null
null
UTF-8
Python
false
false
774
py
import sys sys.stdin = open('./input/input_1244.txt','r') def getMostMony(k): global ans if k == n: ans = max(int(''.join(arr)), ans) return for i in range(N-1): for j in range(i+1, N): arr[i], arr[j] = arr[j] , arr[i] middle = int(''.join(arr)) if middle in visit[k+1]: continue visit[k+1].add(middle) getMostMony(k+1) arr[i], arr[j] = arr[j], arr[i] TC = int(input()) for tc in range(TC): number, n = map(str,input().split()) n = int(n) arr = [] ans = 0 for i in range(len(number)): arr.append(number[i]) N = len(arr) visit = [set() for _ in range(n+1)] getMostMony(0) print('#{} {}'.format(tc+1, ans))
efad72c2ee262764185c9fb6083bb3c7e7e5862d
ea00f60f8f690536ad27d4a75586ae5d74a23e13
/Book/Ch05/Ch5_3.py
bec54b7fac9e42a5701b704baabfa3f3b644c8e1
[]
no_license
ricky4235/Python
3603a085567d1554358a52c8ad47be70fdb463e9
fdeecadc7e156f517cb4ec2eff2bb28423dc1d79
refs/heads/master
2023-07-20T06:17:18.443342
2020-12-23T04:27:53
2020-12-23T04:27:53
221,387,521
2
2
null
2023-07-06T21:21:28
2019-11-13T06:22:25
Jupyter Notebook
UTF-8
Python
false
false
207
py
from bs4 import BeautifulSoup soup = BeautifulSoup("<b class='score'>Joe</b>", "lxml") tag = soup.b tag.name = "p" tag["class"] = "question" tag["id"] = "name" print(tag) del tag["class"] print(tag)
8f72f19372a8a6d00c1e1dde589bbaee09eace59
e53c9a5694b00153f8ac50e6d680fc9817798986
/case/migrations/0003_auto_20190111_1705.py
2824d646c80940d1ec4d7920a696742d8bd166d3
[]
no_license
peijipe/project-management
a40923a935d81be58029ea53258e042b5edeb498
4446313c382750fa57b855c4c64e9f57f2184831
refs/heads/master
2020-04-11T17:56:55.017519
2019-01-23T08:19:31
2019-01-23T08:19:31
161,980,473
0
0
null
null
null
null
UTF-8
Python
false
false
750
py
# Generated by Django 2.1.4 on 2019-01-11 17:05 from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('case', '0002_auto_20190108_1659'), ] operations = [ migrations.RemoveField( model_name='case', name='low_skill', ), migrations.RemoveField( model_name='case', name='nationality', ), migrations.RemoveField( model_name='case', name='status', ), migrations.DeleteModel( name='LowSkill', ), migrations.DeleteModel( name='Nationality', ), migrations.DeleteModel( name='Status', ), ]
6606ed75976525c40589fa0b1fcf65119bffd351
1498f1ff7ed0e4318b4475c04d486b301328a4b5
/test/test_field_permission.py
15f71e6c80ad4db11cf7097dc545b8fd0cc662ce
[ "Apache-2.0" ]
permissive
greenpau/pycherwell
b66e27b3e0e07e914a07a092472575a56c48a8bf
2a25446d5cf86d69e6158067faf27ce250aba966
refs/heads/master
2021-07-25T01:58:57.682666
2020-07-19T01:40:26
2020-07-19T01:40:26
199,328,283
2
2
Apache-2.0
2020-07-16T21:31:33
2019-07-28T19:24:17
Python
UTF-8
Python
false
false
890
py
# coding: utf-8 """ Cherwell REST API Unofficial Python Cherwell REST API library. # noqa: E501 The version of the OpenAPI document: 9.3.2 Contact: See AUTHORS. Generated by: https://openapi-generator.tech """ from __future__ import absolute_import import unittest import pycherwell from pycherwell.models.field_permission import FieldPermission # noqa: E501 from pycherwell.rest import ApiException class TestFieldPermission(unittest.TestCase): """FieldPermission unit test stubs""" def setUp(self): pass def tearDown(self): pass def testFieldPermission(self): """Test FieldPermission""" # FIXME: construct object with mandatory attributes with example values # model = pycherwell.models.field_permission.FieldPermission() # noqa: E501 pass if __name__ == '__main__': unittest.main()
5e9d59a96ffe4d5350f32346b070ded5a6294235
0353ecdc410b3b047d873ea9d5e30000d4d7b724
/contrib/devtools/check-doc.py
be4293146b1fe3d74652e2897016589e4689cf2c
[ "MIT" ]
permissive
MotoAcidic/BaseSource
d3c58b881997f5434b12d5c4f5e548b4a8a1d3b2
2b48377ddda416fff1c83dbfc8b96997613521ea
refs/heads/main
2023-06-10T19:31:39.127177
2021-05-21T00:20:55
2021-05-21T00:20:55
369,367,072
1
0
null
null
null
null
UTF-8
Python
false
false
1,909
py
#!/usr/bin/env python # Copyright (c) 2015-2016 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. ''' This checks if all command line args are documented. Return value is 0 to indicate no error. Author: @MarcoFalke ''' from subprocess import check_output import re FOLDER_GREP = 'src' FOLDER_TEST = 'src/test/' CMD_ROOT_DIR = '`git rev-parse --show-toplevel`/%s' % FOLDER_GREP CMD_GREP_ARGS = r"egrep -r -I '(map(Multi)?Args(\.count\(|\[)|Get(Bool)?Arg\()\"\-[^\"]+?\"' %s | grep -v '%s'" % (CMD_ROOT_DIR, FOLDER_TEST) CMD_GREP_DOCS = r"egrep -r -I 'HelpMessageOpt\(\"\-[^\"=]+?(=|\")' %s" % (CMD_ROOT_DIR) REGEX_ARG = re.compile(r'(?:map(?:Multi)?Args(?:\.count\(|\[)|Get(?:Bool)?Arg\()\"(\-[^\"]+?)\"') REGEX_DOC = re.compile(r'HelpMessageOpt\(\"(\-[^\"=]+?)(?:=|\")') # list unsupported, deprecated and duplicate args as they need no documentation SET_DOC_OPTIONAL = set(['-rpcssl', '-benchmark', '-h', '-help', '-socks', '-tor', '-debugnet', '-whitelistalwaysrelay', '-prematurewitness', '-walletprematurewitness', '-promiscuousmempoolflags', '-blockminsize', '-sendfreetransactions', '-checklevel', '-liquidityprovider', '-anonymizenwoamount']) def main(): used = check_output(CMD_GREP_ARGS, shell=True) docd = check_output(CMD_GREP_DOCS, shell=True) args_used = set(re.findall(REGEX_ARG,used)) args_docd = set(re.findall(REGEX_DOC,docd)).union(SET_DOC_OPTIONAL) args_need_doc = args_used.difference(args_docd) args_unknown = args_docd.difference(args_used) print "Args used : %s" % len(args_used) print "Args documented : %s" % len(args_docd) print "Args undocumented: %s" % len(args_need_doc) print args_need_doc print "Args unknown : %s" % len(args_unknown) print args_unknown exit(len(args_need_doc)) if __name__ == "__main__": main()
669b428f6be8288621b19bee3a8405ce64aa9813
d2df591c50458f88b3ebef4e5f9c65fa8ba42688
/Algorithm-Study/Backjoon/10989_countingSort.py
711d61749dae0fb5d6fe0143c610309119da9caf
[]
no_license
JuHyang/today-I-learned
16eba628b3e9e8c99cb3ca6bcdf1d49f9f496f94
6a262bc7a88612aa22fcb5119d4d182bacb22b49
refs/heads/main
2022-02-10T16:37:36.793196
2022-01-28T01:06:02
2022-01-28T01:06:02
222,265,241
0
0
null
null
null
null
UTF-8
Python
false
false
379
py
## counting sort import sys def countingSort (N, list_num) : for i in range (10000) : list_num.append(0) for i in range (N) : list_num[int (sys.stdin.readline()) - 1] += 1 for i in range (len(list_num)) : for j in range (list_num[i]) : print (i + 1) N = int (input ()) list_num = list () countingSort(N, list_num)
1e6601ecbaad109d19ffd77a2d1735d7d723e8b4
e246361dddc75a79c22e983c6af06e47ad76fb31
/JumpScale9RecordChain/servers/gedis/GedisCmds.py
f78d542b483991fd8da71eb613684e10689300d0
[]
no_license
Dinaamagdy/recordchain
2a3e97c1db9c3ccddade2e498777e35824d2ca18
a5b048d7b5d3ee3f41f5c7702731e65e28e13bd7
refs/heads/master
2020-03-22T05:28:23.195917
2018-07-03T09:09:38
2018-07-03T09:09:38
139,568,296
0
0
null
2018-07-03T10:39:35
2018-07-03T10:39:34
null
UTF-8
Python
false
false
3,428
py
from js9 import j import inspect # import imp import sys import os JSBASE = j.application.jsbase_get_class() class GedisCmds(JSBASE): def __init__(self, server, namespace, class_): JSBASE.__init__(self) self.server = server SCHEMA = """ @url = jumpscale.gedis.cmd @name = GedisCmd name = "" comment = "" code = "" schema_in = "" schema_out = "" @url = jumpscale.gedis.api @name = GedisServerSchema namespace = "" cmds = (LO) !jumpscale.gedis.cmd """ j.data.schema.schema_add(SCHEMA) # s1 = self.schema_from_url("jumpscale.gedis.cmd") self.schema = j.data.schema.schema_from_url("jumpscale.gedis.api") self.data = self.schema.new() self.name = namespace self.data.namespace = namespace for name, item in inspect.getmembers(class_): if name.startswith("_"): continue if (name, item) in inspect.getmembers(JSBASE): continue if inspect.isfunction(item): cmd = self.data.cmds.new() cmd.name = name code = inspect.getsource(item) cmd.code,cmd.comment,cmd.schema_in, cmd.schema_out= self.source_process(code) @property def code(self): code = self.server.code_server_template.render(obj=self) return code def source_process(self, txt): """ return code, comment, schema_in, schema_out """ txt = j.data.text.strip(txt) code = "" comment = "" schema_in = "" schema_out = "" state = "START" for line in txt.split("\n"): lstrip = line.strip() if state == "START" and lstrip.startswith("def"): state = "DEF" continue if lstrip.startswith("\"\"\""): if state == "DEF": state = "COMMENT" continue if state == "COMMENT": state = "CODE" continue raise RuntimeError() if lstrip.startswith("```") or lstrip.startswith("'''"): if state.startswith("SCHEMA"): # are already in schema go back to comment state = "COMMENT" continue if state == "COMMENT": # are in comment, now found the schema if lstrip.endswith("out"): state = "SCHEMAO" else: state = "SCHEMAI" continue raise RuntimeError() if state == "SCHEMAI": schema_in += "%s\n" % line continue if state == "SCHEMAO": schema_out += "%s\n" % line continue if state == "COMMENT": comment += "%s\n" % line continue if state == "CODE" or state == "DEF": code += "%s\n" % line continue raise RuntimeError() schema_in = j.data.schema.schema_from_text(j.data.text.strip(schema_in)) schema_out = j.data.schema.schema_from_text(j.data.text.strip(schema_out)) return j.data.text.strip(code), j.data.text.strip(comment), schema_in, schema_out
948e9f658720fceb019e2fc85c9764063c0f34ac
d99ac626d62c663704444a9cce7e7fc793a9e75e
/asm2vec/examples/non_crypto_code/PackedArray_functions.py
3dd0a419891408f18b4950cba1887d59b0e80456
[]
no_license
Experiment5X/CryptoFunctionDetection
3ab32d5573a249d24db1faf772721bc80b8d905d
dac700193e7e84963943593e36844b173211a8a1
refs/heads/master
2023-04-19T09:12:35.828268
2021-05-13T22:39:27
2021-05-13T22:39:27
355,299,557
1
0
null
null
null
null
UTF-8
Python
false
false
2,211
py
function_names = [ '___PackedArray_pack_1', '___PackedArray_unpack_1', '___PackedArray_pack_2', '___PackedArray_unpack_2', '___PackedArray_pack_3', '___PackedArray_unpack_3', '___PackedArray_pack_4', '___PackedArray_unpack_4', '___PackedArray_pack_5', '___PackedArray_unpack_5', '___PackedArray_pack_6', '___PackedArray_unpack_6', '___PackedArray_pack_7', '___PackedArray_unpack_7', '___PackedArray_pack_8', '___PackedArray_unpack_8', '___PackedArray_pack_9', '___PackedArray_unpack_9', '___PackedArray_pack_10', '___PackedArray_unpack_10', '___PackedArray_pack_11', '___PackedArray_unpack_11', '___PackedArray_pack_12', '___PackedArray_unpack_12', '___PackedArray_pack_13', '___PackedArray_unpack_13', '___PackedArray_pack_14', '___PackedArray_unpack_14', '___PackedArray_pack_15', '___PackedArray_unpack_15', '___PackedArray_pack_16', '___PackedArray_unpack_16', '___PackedArray_pack_17', '___PackedArray_unpack_17', '___PackedArray_pack_18', '___PackedArray_unpack_18', '___PackedArray_pack_19', '___PackedArray_unpack_19', '___PackedArray_pack_20', '___PackedArray_unpack_20', '___PackedArray_pack_21', '___PackedArray_unpack_21', '___PackedArray_pack_22', '___PackedArray_unpack_22', '___PackedArray_pack_23', '___PackedArray_unpack_23', '___PackedArray_pack_24', '___PackedArray_unpack_24', '___PackedArray_pack_25', '___PackedArray_unpack_25', '___PackedArray_pack_26', '___PackedArray_unpack_26', '___PackedArray_pack_27', '___PackedArray_unpack_27', '___PackedArray_pack_28', '___PackedArray_unpack_28', '___PackedArray_pack_29', '___PackedArray_unpack_29', '___PackedArray_pack_30', '___PackedArray_unpack_30', '___PackedArray_pack_31', '___PackedArray_unpack_31', '___PackedArray_pack_32', '___PackedArray_unpack_32', '_PackedArray_create', '_PackedArray_destroy', '_PackedArray_pack', '_PackedArray_unpack', '_PackedArray_set', '_PackedArray_get', '_PackedArray_bufferSize', '_PackedArray_computeBitsPerItem' ]
c95ff50362c7341f7c89fa4b3dd5edb04f69b817
9cf434b6ee59ab22496ee031fb4ab145bbaff1a2
/tranque_v1.8.4_source/backend/src/api/v1/views/template_views.py
7164ea45395b746ffd813d084a755a1961feb06e
[]
no_license
oliverhernandezmoreno/SourcesOH
f2ff1a5e3377f0ac1fb8b3153d99d0ee703700b7
5d9ca5ab1caceafd4d11207139c9e56210156ef8
refs/heads/master
2023-01-05T02:51:25.172103
2020-08-27T14:39:34
2020-08-27T14:39:34
64,422,812
0
1
null
2022-12-30T17:25:10
2016-07-28T19:33:44
JavaScript
UTF-8
Python
false
false
5,809
py
import tempfile import coreapi import coreschema from django.http import FileResponse from rest_framework import response, viewsets, serializers from rest_framework.decorators import action from rest_framework.utils.urls import replace_query_param from base.pagination import PageNumberPagination from base.permissions import StaffOnly from base import schemas from targets.graphs import template_graph from targets.profiling.base import serialize_node class CustomTemplateViewSchema(schemas.CustomSchema): @schemas.parameters.get('/template/') def default_list_filters(self): return [ coreapi.Field( name='page', required=False, location='query', schema=coreschema.Integer( description='A page number within the pageinated result set.' ), ), coreapi.Field( name='page_size', required=False, location='query', schema=coreschema.Integer( description='Number of results to return per page.' ), ), coreapi.Field( name='category', required=False, location='query', schema=coreschema.String( description='The category to filter templates', ), ), ] @schemas.parameters.get('/graph/') def graph_parameters(self): return [ coreapi.Field( name='direction', required=False, location='query', schema=coreschema.String( description='The direction the graph should be traversed (up or down)', ), ), ] def int_or_default(variant, default, minimum, maximum): if not variant: return default try: parsed = int(variant, 10) if parsed < minimum: return minimum if parsed > maximum: return maximum return parsed except ValueError: return default class TemplateView(viewsets.GenericViewSet): lookup_value_regex = '[^/]+' lookup_field = 'canonical_name' pagination_class = None schema = CustomTemplateViewSchema.as_schema() serializer_class = serializers.Serializer def get_paginated_response(self, data): page_data = sorted(data, key=lambda n: n.value.canonical_name) page_size = int_or_default( self.request.query_params.get("page_size"), PageNumberPagination.page_size, 1, PageNumberPagination.max_page_size, ) max_page = len(page_data) // page_size + 1 if len(page_data) == (max_page - 1) * page_size: max_page -= 1 page = int_or_default( self.request.query_params.get("page"), 1, 1, len(page_data) // page_size + 1, ) start = (page - 1) * page_size end = page * page_size return response.Response(data={ "count": len(page_data), "next": ( replace_query_param( self.request.build_absolute_uri(), "page", page + 1, ) if page < max_page else None ), "previous": ( replace_query_param( self.request.build_absolute_uri(), "page", page - 1, ) if page > 1 else None ), "results": map(serialize_node, page_data[start:end]), }) def list(self, request, **kwargs): from targets.profiling import FOREST category = self.request.query_params.get("category") return self.get_paginated_response( node for node in FOREST.values() if (category is not None and node.value.category == category) or category is None ) def retrieve(self, request, canonical_name=None, **kwargs): from targets.profiling import FOREST node = next( ( n for n in FOREST.values() if n.value.canonical_name == canonical_name ), None, ) if node is None: return response.Response(status=404, data={"detail": "Not found."}) return response.Response(data=serialize_node(node)) @action(methods=["get"], detail=True, permission_classes=(StaffOnly,)) def graph(self, request, canonical_name=None, **kwargs): direction = {"down": "inputs", "up": "derivations"}.get( request.query_params.get("direction", "down"), "inputs", ) try: tmp = tempfile.TemporaryFile() template_graph(canonical_name, tmp, direction=direction) tmp.seek(0) except ValueError: return response.Response(status=404, data={"detail": "Not found."}) return FileResponse(tmp, content_type="image/svg+xml") class ManifestView(viewsets.GenericViewSet): lookup_value_regex = '[^/]+' lookup_field = 'name' pagination_class = None serializer_class = serializers.Serializer def list(self, request, **kwargs): from targets.profiling import MANIFESTS return response.Response(data={"results": list(MANIFESTS.values())}) def retrieve(self, request, name=None, **kwargs): from targets.profiling import MANIFESTS manifest = MANIFESTS.get(name) if manifest is None: return response.Response(status=404, data={"detail": "Not found."}) return response.Response(data=manifest)
7a440f8fa9a713d4169d492dec2d1fd25cfc15a4
7676f139bea57f37b7af399afdfb04016f17b30a
/test/test_lor.py
e309eb79ee6824b4e528837bfddeaf40b6ee2417
[]
no_license
Canisback/pantheon
32da2239dd20255a85f5a4d78edd888d0ed995df
3a9588a635035e2f89489fef35eec690f3069f96
refs/heads/master
2023-07-21T05:00:32.315615
2023-07-14T19:19:58
2023-07-14T19:19:58
129,768,571
57
17
null
2022-11-10T15:22:19
2018-04-16T15:51:04
Python
UTF-8
Python
false
false
643
py
from .config import * def test_leaderboard(): try: data = loop.run_until_complete(panth.get_lor_leaderboard()) except Exception as e: print(e) assert "players" in data assert type(data["players"]) == list def test_match(): try: data = loop.run_until_complete(panth.get_lor_match(lor_matchId)) except Exception as e: print(e) assert "metadata" in data assert "info" in data def test_matchlist(): try: data = loop.run_until_complete(panth.get_lor_matchlist(lor_puuid)) except Exception as e: print(e) assert type(data) == list
047903756bde3ab9de086df778b2e72bda0cbd03
82b728e805d887102c0b8c415731b353877690cd
/samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_vizier_service_lookup_study_sync.py
5d076d04aa6c105097f75f75cd4c7af7ec1a84e8
[ "Apache-2.0" ]
permissive
geraint0923/python-aiplatform
90c7742c9bdbde05b9688b117e8e59c0406d6f85
7ab05d5e127636d96365b7ea408974ccd6c2f0fe
refs/heads/main
2023-08-24T05:30:38.519239
2021-10-27T20:38:25
2021-10-27T20:38:25
370,803,114
0
0
Apache-2.0
2021-05-25T19:15:47
2021-05-25T19:15:46
null
UTF-8
Python
false
false
1,599
py
# -*- coding: utf-8 -*- # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Generated code. DO NOT EDIT! # # Snippet for LookupStudy # NOTE: This snippet has been automatically generated for illustrative purposes only. # It may require modifications to work in your environment. # To install the latest published package dependency, execute the following: # python3 -m pip install google-cloud-aiplatform # [START aiplatform_generated_aiplatform_v1beta1_VizierService_LookupStudy_sync] from google.cloud import aiplatform_v1beta1 def sample_lookup_study(): """Snippet for lookup_study""" # Create a client client = aiplatform_v1beta1.VizierServiceClient() # Initialize request argument(s) request = aiplatform_v1beta1.LookupStudyRequest( parent="projects/{project}/locations/{location}", display_name="display_name_value", ) # Make the request response = client.lookup_study(request=request) # Handle response print(response) # [END aiplatform_generated_aiplatform_v1beta1_VizierService_LookupStudy_sync]
fa5c4a3f82722a7f4ea1911d46cd805a4cc0a118
5a9798598f3e3575c46584f1b0185186066860ac
/tfx/experimental/templates/penguin/pipeline/pipeline.py
4dc773cb75bab4e60a0aab3810f136f36d2a9176
[ "Apache-2.0" ]
permissive
UsharaniPagadala/tfx
1cfbc8dbc5934cfceea8ceae4896d09404638369
4576b783e8c91e688337d3cd3b5dbdf93dbd278f
refs/heads/master
2023-08-07T17:47:29.629144
2021-09-21T08:03:06
2021-09-21T08:03:06
353,321,718
0
0
Apache-2.0
2021-09-21T08:03:07
2021-03-31T10:46:51
Python
UTF-8
Python
false
false
5,988
py
# Copyright 2020 Google LLC. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """TFX penguin template pipeline definition. This file defines TFX pipeline and various components in the pipeline. """ from typing import List, Optional, Text import tensorflow_model_analysis as tfma from tfx import v1 as tfx from tfx.experimental.templates.penguin.models import features from ml_metadata.proto import metadata_store_pb2 def create_pipeline( pipeline_name: Text, pipeline_root: Text, data_path: Text, preprocessing_fn: Text, run_fn: Text, train_args: tfx.proto.TrainArgs, eval_args: tfx.proto.EvalArgs, eval_accuracy_threshold: float, serving_model_dir: Text, metadata_connection_config: Optional[ metadata_store_pb2.ConnectionConfig] = None, beam_pipeline_args: Optional[List[Text]] = None, ) -> tfx.dsl.Pipeline: """Implements the penguin pipeline with TFX.""" components = [] # Brings data into the pipeline or otherwise joins/converts training data. # TODO(step 2): Might use another ExampleGen class for your data. example_gen = tfx.components.CsvExampleGen(input_base=data_path) components.append(example_gen) # Computes statistics over data for visualization and example validation. statistics_gen = tfx.components.StatisticsGen( examples=example_gen.outputs['examples']) components.append(statistics_gen) # Generates schema based on statistics files. schema_gen = tfx.components.SchemaGen( statistics=statistics_gen.outputs['statistics'], infer_feature_shape=True) components.append(schema_gen) # Performs anomaly detection based on statistics and data schema. example_validator = tfx.components.ExampleValidator( # pylint: disable=unused-variable statistics=statistics_gen.outputs['statistics'], schema=schema_gen.outputs['schema']) components.append(example_validator) # Performs transformations and feature engineering in training and serving. transform = tfx.components.Transform( # pylint: disable=unused-variable examples=example_gen.outputs['examples'], schema=schema_gen.outputs['schema'], preprocessing_fn=preprocessing_fn) # TODO(step 3): Uncomment here to add Transform to the pipeline. # components.append(transform) # Uses user-provided Python function that implements a model using Tensorflow. trainer = tfx.components.Trainer( run_fn=run_fn, examples=example_gen.outputs['examples'], # Use outputs of Transform as training inputs if Transform is used. # examples=transform.outputs['transformed_examples'], # transform_graph=transform.outputs['transform_graph'], schema=schema_gen.outputs['schema'], train_args=train_args, eval_args=eval_args) # TODO(step 4): Uncomment here to add Trainer to the pipeline. # components.append(trainer) # Get the latest blessed model for model validation. model_resolver = tfx.dsl.Resolver( strategy_class=tfx.dsl.experimental.LatestBlessedModelStrategy, model=tfx.dsl.Channel(type=tfx.types.standard_artifacts.Model), model_blessing=tfx.dsl.Channel( type=tfx.types.standard_artifacts.ModelBlessing)).with_id( 'latest_blessed_model_resolver') # TODO(step 5): Uncomment here to add Resolver to the pipeline. # components.append(model_resolver) # Uses TFMA to compute a evaluation statistics over features of a model and # perform quality validation of a candidate model (compared to a baseline). eval_config = tfma.EvalConfig( model_specs=[tfma.ModelSpec(label_key=features.LABEL_KEY)], slicing_specs=[tfma.SlicingSpec()], metrics_specs=[ tfma.MetricsSpec(metrics=[ tfma.MetricConfig( class_name='SparseCategoricalAccuracy', threshold=tfma.MetricThreshold( value_threshold=tfma.GenericValueThreshold( lower_bound={'value': eval_accuracy_threshold}), change_threshold=tfma.GenericChangeThreshold( direction=tfma.MetricDirection.HIGHER_IS_BETTER, absolute={'value': -1e-10}))) ]) ]) evaluator = tfx.components.Evaluator( # pylint: disable=unused-variable examples=example_gen.outputs['examples'], model=trainer.outputs['model'], baseline_model=model_resolver.outputs['model'], # Change threshold will be ignored if there is no baseline (first run). eval_config=eval_config) # TODO(step 5): Uncomment here to add Evaluator to the pipeline. # components.append(evaluator) # Pushes the model to a file destination if check passed. pusher = tfx.components.Pusher( # pylint: disable=unused-variable model=trainer.outputs['model'], model_blessing=evaluator.outputs['blessing'], push_destination=tfx.proto.PushDestination( filesystem=tfx.proto.PushDestination.Filesystem( base_directory=serving_model_dir))) # TODO(step 5): Uncomment here to add Pusher to the pipeline. # components.append(pusher) return tfx.dsl.Pipeline( pipeline_name=pipeline_name, pipeline_root=pipeline_root, components=components, # Change this value to control caching of execution results. Default value # is `False`. # enable_cache=True, metadata_connection_config=metadata_connection_config, beam_pipeline_args=beam_pipeline_args, )
117876ae2a3f04ee3bbb6df29713c1dad3bcc9c0
d5d5b042bdc74130d562574dce62bf4d732411b7
/test/gdsctools/test_gdsc1000.py
b16c3cac1f32336f643dc4ba8d48bd3dee5eb8e8
[ "BSD-3-Clause", "BSD-2-Clause" ]
permissive
cokelaer/gdsctools
e8d932f29c4ab9d2f4fea8b64c701a13995f3535
5609aadf8b29c8672dc7e47a1d37fbef43728da2
refs/heads/master
2023-08-13T21:32:57.542738
2021-10-17T15:58:48
2021-10-17T15:58:48
417,879,371
1
0
null
null
null
null
UTF-8
Python
false
false
1,629
py
from gdsctools import GDSC1000 def test_download(tmpdir): p = tmpdir.mkdir("download") name = "download/" gg = GDSC1000(data_folder_name=name) gg.download_data() gg = GDSC1000(data_folder_name=name) gg.load_data(annotation=False) # now some filtering. Let us start with alteration_type assert gg.genomic_df.shape == (42231, 8) gg.filter_by_alteration_type(["DELETION"]) assert gg.genomic_df.shape == (1614, 8) gg.reset_genomic_data() assert gg.genomic_df.shape == (42231, 8) # by gene gg.filter_by_gene(["A2ML1"]) assert len(gg.genomic_df) gg.reset_genomic_data() # by core genes # numbers labelled (for sure) were found in Liz document gg = GDSC1000(data_folder_name=name) gg.load_data() # Here, we include the annotations gg.filter_by_gene("Core Genes") assert len(gg.genomic_df['GENE'].unique()) == 310 # For sure assert len(gg.genomic_df) assert gg.get_genomic_info()["cosmic"].METHYLATION == 108 # for sure assert gg.get_methylation_info().iloc[0,0] == 338 # for sure gg.get_cna_info() gg.reset_genomic_data() # by tissues gg.filter_by_tissue_type(["COAD/READ"]) assert len(gg.genomic_df) gg.reset_genomic_data() # by cell line gg.filter_by_cell_line() gg.filter_by_cell_line(["SW1116"]) assert len(gg.genomic_df) gg.reset_genomic_data() # by cosmic id gg.filter_by_cosmic_id() gg.filter_by_cosmic_id([909746]) assert len(gg.genomic_df) gg.reset_genomic_data() gg.filter_by_recurrence() gg.filter_by_recurrence(3) gg.make_matrix()
fdcdc18790dfa2b5e639771b3495089885d4d850
ebf3e0fdb7ca2c19e04f893b49120cd4fdf10649
/Models_pro/population.py
e4121535f2c153a614db5986cb4b4c74d53eb44f
[]
no_license
Sem31/Django-Basics
19f8528f683c1bbc67a4d5d01988fa8d64d251c2
3219143d135a1d918e0994b61128375b01b35a5d
refs/heads/master
2022-12-12T00:01:04.518559
2019-09-29T18:16:57
2019-09-29T18:16:57
190,061,340
0
0
null
2022-12-08T05:22:25
2019-06-03T18:37:25
Python
UTF-8
Python
false
false
1,008
py
import os os.environ.setdefault('DJANGO_SETTINGS_MODULE','Models_pro.settings') import django django.setup() #fake pop script import random from first_app.models import AccessRecord,Webpage,Topic from faker import Faker fakegen = Faker() topics = ['Search','Social','Marketplace','News','Games'] def add_topic(): t = Topic.objects.get_or_create(top_name=random.choice(topics))[0] t.save() return t def population(N=5): for entry in range(N): #get topic top = add_topic() #get fake data for that entry fake_url = fakegen.url() fake_date = fakegen.date() fake_name = fakegen.company() #create the webpage webpg = Webpage.objects.get_or_create(topic = top,url = fake_url,name = fake_name)[0] #fake accessrecords acc_rec = AccessRecord.objects.get_or_create(name = webpg,date = fake_date) if __name__ == "__main__": print('Populate the script') population(20) print('Populatating complete')
58c50599c5bb0fed81a5e29f8f9ab92dad0b3ed3
b3528a3795ce373e27d52362128de3cff6f9969d
/python/delta_debugging/delta_debugging.py
775517fdbe826a7e4fff8b4f14483d909055a65e
[]
no_license
greenmonn/daily-coding
43e0f3775678c7d6116df7ba5034ea18489d87c9
ef6ecc88e6db61e18364eef3ea071c11e1385a99
refs/heads/master
2023-01-14T04:59:14.130309
2021-02-08T23:32:56
2021-02-08T23:32:56
157,735,438
1
1
null
2022-12-21T02:13:17
2018-11-15T15:47:37
Python
UTF-8
Python
false
false
2,663
py
import subprocess prefix = """ import org.junit.FixMethodOrder; import org.junit.Test; import org.junit.runners.MethodSorters; @FixMethodOrder(MethodSorters.NAME_ASCENDING) public class MyTest { public static boolean debug = false; @Test public void test4() throws Throwable { Account account1 = new Account((int) 'a'); """ lines = [ "boolean boolean3 = account1.Borrow((int) '#');", "boolean boolean5 = account1.Repay((int) '#');", "boolean boolean7 = account1.Borrow((int) (short) 10);", "java.lang.Class<?> wildcardClass8 = account1.getClass();", "boolean boolean10 = account1.Borrow((int) '4');", "boolean boolean12 = account1.Send(0);", "java.lang.Class<?> wildcardClass13 = account1.getClass();", "boolean boolean15 = account1.Receive(0);", "boolean boolean17 = account1.Borrow((int) '4');", "boolean boolean19 = account1.Borrow(1);", "boolean boolean21 = account1.Borrow(100);", "boolean boolean23 = account1.Borrow((int) (short) 100);", ] suffix = "\n}}" def compile(source): filename = 'MyTest' with open(filename + '.java', 'w') as f: f.write(source) try: subprocess.check_call( 'javac -cp junit-4.13-beta-2.jar:hamcrest-core-1.3.jar:.:./target ' + filename + '.java', shell=True) except subprocess.CalledProcessError: return None return filename def execute(target): result = subprocess.run( 'java -cp junit-4.13-beta-2.jar:hamcrest-core-1.3.jar:.:./target org.junit.runner.JUnitCore ' + target, stdout=subprocess.PIPE, shell=True) output = str(result.stdout) if 'FAILURES!!!' in output: return 'fail' elif 'SUCCESS' in output: return 'success' else: return 'unknown' def check_failure(input_lines): source = prefix + '\n'.join(input_lines) + suffix target = compile(source) if target: test_result = execute(target) if test_result == 'fail': return True return False def DD(p, lines): length = len(lines) if length == 1: return lines l1 = lines[:length // 2] l2 = lines[length // 2:] p1 = p + l1 p2 = p + l2 if check_failure(p1): return DD(p, l1) elif check_failure(p2): return DD(p, l2) else: return DD(p2, l1) + DD(p1, l2) def apply_delta_debugging(lines): return DD([], lines) reduced_lines = apply_delta_debugging(lines) source = prefix + '\n'.join(reduced_lines) + suffix target = compile(source) if target: test_result = execute(target) try: assert test_result == 'fail' except AssertionError: print('Failed to Generate Reduced Test Suite') exit(1) print('Reduced Test Suite Generated: ' + target + '.java')
f45259da4d410c951bd7fab63394ea93796504a9
1c9a5513a573e43aab30b032529699e6767540ba
/flask_sqlalchemy_jsonapi/sort.py
eb62cf35b7021747096db589ea00a016e7a7174d
[ "MIT" ]
permissive
Raistlfiren/sqlalchemy-jsonapi-collections
bdefeb411316950c944d0ff53822467e8a6330ee
890d6377e9a3ee07e14a0b4028ba24a03a1420ef
refs/heads/master
2020-12-11T01:47:36.187863
2016-03-29T13:24:24
2016-03-29T13:24:24
55,098,906
0
0
null
2016-03-30T21:24:45
2016-03-30T21:24:45
null
UTF-8
Python
false
false
3,322
py
# -*- coding: utf-8 -*- from sqlalchemy import desc from flask_sqlalchemy_jsonapi.errors import FieldError class SortValue(object): """Validate and sort a provided `marshmallow` schema field name.""" attribute = None descending = False join = None def __init__(self, schema, value): """Set the `SQLAlchemy` column name from the provided attribute. Dot seperated strings are understood to be the attributes of a related schema. If the preceding name does not match a valid relationship field an error will be thrown. If the proceeding name does not match an attribute on the related schema an error will be thrown. :param schema: `marshmallow` schema object. :param value: String path to sorted schema attribute. """ self.schema = schema self.value = value self.descending = value.startswith('-') if self.descending: value = value[1:] if "." in value: table, column = value.split('.', 1) relationship = self._get_field(table, schema) field = self._get_field(column, relationship.schema) self.attribute = field.attribute or column self.join = table else: field = self._get_field(value, schema) self.attribute = field.attribute or value @property def column(self): """A sorted `SQLAlchemy` column reference.""" column = getattr(self.schema.Meta.model, self.attribute) if self.descending: return desc(column) return column def _get_field(self, field, schema): """Get the schema field associated with the specified name. :param field: String name of a declared attribute. :param schema: `marshmallow` schema object. """ if field not in schema._declared_fields: raise FieldError( 'Invalid field specified: {}.'.format(self.value)) return schema._declared_fields[field] @classmethod def generate(cls, schema, values): """Parse a series of strings into `SortValue` instances. Dot notation can be used to sort by the attributes of a related schema. E.g. `relationship.attribute`. If the string can not be converted, an error is marshaled as a member of a string list. :param schema: `marshmallow` schema reference. :param values: String list of attributes. """ errors = [] fields = [] for value in values: try: fields.append(cls(schema, value)) except FieldError as error: errors.append(error.message) return fields, errors @staticmethod def sort_by(query, values): """Apply a series of `SortValue` instances to a `SQLAlchemy` query. Dot seperated sorts will have the appropriate tables joined prior to applying the sort. :param query: `SQLAlchemy` query object. :param values: List of `SortValue` instances. """ sorts = [] for value in values: if value.join is not None: query = query.join(value.join) sorts.append(value.column) return query.order_by(*sorts)
5a395cb6dd196f8daa741e17a6699b05aabaf543
9b55ea0385c7508b0373d2e1059bbb4aa44a6f37
/oxigraph_admin/schemas/security.py
0182a145f8540c1753ad7df738d40d925e84f6a0
[ "LicenseRef-scancode-unknown-license-reference", "MIT" ]
permissive
edmondchuc/oxigraph-admin
0b239ca34c3e5cece2dc18211f69f9a67e76549f
689d1ee8905b81075d0809487bbad26c2c935939
refs/heads/master
2023-03-29T20:06:34.560483
2021-03-28T05:41:34
2021-03-28T05:41:34
352,244,254
1
0
null
null
null
null
UTF-8
Python
false
false
86
py
from pydantic import BaseModel class SecuritySettings(BaseModel): enabled: bool
e8d26fb2e48c2f7a7151e58560c9f695f093a8be
05dcaeb9cbb86d8139118fdc306b2b722739098c
/media_manager/decorators.py
0891df76694b9887e251caf1624f001895fbf0b2
[]
permissive
Harvard-ATG/media_management_lti
a6363276a920653dfc7d130c1fc1155e90b1f638
a4b89c3e1c5f7a8adc31a258413b512507916a94
refs/heads/main
2023-05-25T09:41:56.843354
2021-09-21T14:24:50
2021-09-21T14:24:50
47,143,864
5
5
BSD-3-Clause
2023-05-22T23:55:11
2015-11-30T20:24:09
JavaScript
UTF-8
Python
false
false
368
py
from media_manager.lti import LTILaunch from functools import wraps def require_permission(permission): def perm_decorator(func): @wraps(func) def func_wrapper(request, *args, **kwargs): LTILaunch(request).require_perm(permission) return func(request, *args, **kwargs) return func_wrapper return perm_decorator
8703fbf0dae4888bfc097f5cda483e79df4193bb
4a852785538f4589e6e5122c075437118f1ae31f
/src/richmond/webapp/api/gateways/e_scape/backend.py
ea552c7bb7be9c3ac04b96a957395b12b0ed6672
[]
no_license
euan/richmond
3ebc132cc103567ed6c93f25d735bce848128180
cd4b02998f5f5cc50f21aea6608e66449c0f32c9
refs/heads/master
2021-01-21T01:27:27.783146
2010-08-18T08:16:05
2010-08-18T08:16:05
null
0
0
null
null
null
null
UTF-8
Python
false
false
3,120
py
from richmond.webapp.api.utils import callback import xml.etree.ElementTree as ET from collections import namedtuple class E_ScapeException(Exception): pass class E_ScapeReponse(object): recipient = None sender = None status = None smsc = None text = None class E_Scape(object): def __init__(self, api_id): """ * api_id is assigned to you by E-scape * if smsc is provided then this is used as the default for all messages sent unless it's specified again when calling send_sms """ self.api_id = api_id self.gateway_url = 'http://site.demoru.com/api.php' def send_sms(self, smsc, sender, recipients, text): """ We mimick a FORM post to the given URL and get back an HTML table. Got this from http://site.demoru.com/?page_id=27 HTTP 200 OK Date Thu: 03 Jun 2010 18:32:15 GMT Connection Close Transfer-Encoding chunked Content-Type text/html; charset=UTF-8 <table class="widefat" border="0"> <thead> <tr> <th>from</th> <th>to</th> <th>smsc</th> <th>status</th> <th>text</th> </tr> </thead> <tbody> <tr> <td>+35566</td> <td>+44778962937</td> <td>ESC-P1Celtel</td> <td>0: Accepted for delivery</td> <td> http://www.mobi-fee.com/link/g.lnk?ID=135</td> </tr> </tbody> <tfoot> <tr> <th>from</th> <th>to</th> <th>smsc</th> <th>status</th> <th>text</th> </tr> </tfoot> </table> """ if not all([msisdn.startswith('+') for msisdn in recipients]): raise E_ScapeException, 'All msisdns should start with a +' kwargs = { 's': sender, 'r': ','.join(recipients), 'text': text, 'smsc': smsc, 'api_id': self.api_id, 'send': 'go' # apparently the form submission key } return parse_response(callback(self.gateway_url, kwargs.items())) def parse_response(self, response): tree = ET.fromstring(response) rows = tree.findall('tbody/tr') if not rows: raise E_ScapeException('Unparsable response: %s' % response) responses = [] for row in rows: sender, recipient, smsc, status, text = [td.text for td in row.findall('td')] response = E_ScapeReponse() response.sender = sender response.recipient = recipient response.smsc = smsc response.status = [s.strip() for s in status.split(':')] response.text = text responses.append(response) return responses
b9fbf432be4d10e425c65617e63fa9e902bcfb71
cf0ab8503d4d704045070deea1e2125375711e86
/apps/admins/migrations/0016_email_trgm_index.py
37fb89311f3da4b1e63edf8e88407ba4ae5ec7d9
[]
no_license
faierbol/syncano-platform
c3c6468600115752fd9fa5e46a0ad59f75f6bc9c
879111874d1ef70418b4890cf970720b0a2be4d8
refs/heads/master
2023-07-20T10:13:40.066127
2021-02-08T15:01:13
2021-02-08T15:01:13
null
0
0
null
null
null
null
UTF-8
Python
false
false
377
py
# -*- coding: utf-8 -*- # Generated by Django 1.9.7 on 2016-07-04 13:14 from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('admins', '0015_auto_20151116_1124'), ] operations = [ migrations.RunSQL( """ CREATE INDEX admins_admin_trgm_email ON admins_admin USING GIN (email gin_trgm_ops); """ ) ]
8b19f88bf31d2955f25c21644e2842d7b82b3fb4
15f321878face2af9317363c5f6de1e5ddd9b749
/solutions_python/Problem_155/3211.py
c858ddd5d9f486b0a798b83f0bd5067a8acacfd8
[]
no_license
dr-dos-ok/Code_Jam_Webscraper
c06fd59870842664cd79c41eb460a09553e1c80a
26a35bf114a3aa30fc4c677ef069d95f41665cc0
refs/heads/master
2020-04-06T08:17:40.938460
2018-10-14T10:12:47
2018-10-14T10:12:47
null
0
0
null
null
null
null
UTF-8
Python
false
false
612
py
#!/usr/bin/env python from __future__ import print_function def main(): f = open('myfile','w') with open("sample.txt") as fp: line_count = int(next(fp).strip()) for inst in range(line_count): line = next(fp).strip() shyMax, audi = line.split() sum = 0 pplcnt = 0 for i in range(int(shyMax) + 1): if i > sum: pplcnt += 1 sum += 1 sum += int(audi[i]) print("Case #{}: {}".format(inst + 1, pplcnt), file=f) if __name__ == "__main__": main()
88d169aa75a10b91bbdce47120e6b90e421aba8c
8a6dde82fa2bb14a92ee9c45013c15424572c076
/flask_app/adoption_app/app.py
64805087af9022502b90179d089ec251ad63dee6
[]
no_license
irekpi/flask
4c9d6f65d0b54bcb4bf5e2e59fdca3e426793c7e
4bc9caddebcb903a7da0e2fd9eef507112d358ed
refs/heads/master
2022-04-11T03:33:16.630625
2020-03-26T19:30:57
2020-03-26T19:30:57
250,352,874
0
0
null
null
null
null
UTF-8
Python
false
false
182
py
from adoption import app from flask import render_template @app.route('/') def index(): return render_template('home.html') if __name__ == '__main__': app.run(debug=True)
5ae87aa4a615766ce9e138470e20a278f275cc6f
7deda84f7a280f5a0ee69b98c6a6e7a2225dab24
/Receptionist/migrations/0003_auto_20200101_1048.py
faf01ddd9b6e58c1f60a74740ab12f7d4d570747
[]
no_license
Cornex-Inc/Coffee
476e30f29412373fb847b2d518331e6c6b9fdbbf
fcd86f20152e2b0905f223ff0e40b1881db634cf
refs/heads/master
2023-01-13T01:56:52.755527
2020-06-08T02:59:18
2020-06-08T02:59:18
240,187,025
0
0
null
2023-01-05T23:58:52
2020-02-13T05:47:41
Python
UTF-8
Python
false
false
386
py
# Generated by Django 2.1.15 on 2020-01-01 10:48 from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('Receptionist', '0002_auto_20200101_1047'), ] operations = [ migrations.RenameField( model_name='payment', old_name='discounted_percent', new_name='discounted', ), ]
effe1cf035f81d7f598fa870362c76740cf17bdd
65c31008f79a1227e8eda04f507e2ef26413bd3a
/maximum.py
ff9df3ccf61cda2c3e302ef62ece83362625c5bf
[]
no_license
qwangzone/leetcode_pro
da2b98770d12e3d3e57b585f24727cdd600adb96
0e008fa293f54cc97c79e86648fadf67c0507e7a
refs/heads/master
2020-03-06T22:22:47.434221
2018-04-28T09:00:53
2018-04-28T09:00:53
127,101,654
0
0
null
null
null
null
UTF-8
Python
false
false
385
py
class Solution: # @param A, a list of integers # @return an integer # 6:57 def maxSubArray(self, A): if not A: return 0 curSum = maxSum = A[0] for num in A[1:]: curSum = max(num, curSum + num) maxSum = max(maxSum, curSum) return maxSum a = Solution() print() print(a.maxSubArray([1,2,3,4,5,-1,-3]))
efae14ea5b64bab7cfc0a336e7bea98939e7bd96
5bc9cd49ea8852852fc7caf38bd40c261c178985
/blogs/urls.py
f38a877ec835a48ff07207693b6918c5b28df95c
[ "Apache-2.0" ]
permissive
Tooskich/python_core
90f2ae97c1cde6b7eca0bdd84da1237d54db9b53
ae2d9e201a903fc5fabcddf66b3e96aff1176007
refs/heads/master
2021-01-19T01:42:23.866215
2015-12-29T12:20:20
2015-12-29T12:20:20
16,333,738
0
1
null
2016-06-28T06:30:19
2014-01-29T02:55:21
Python
UTF-8
Python
false
false
903
py
from django.conf.urls import patterns, url from blogs import views urlpatterns = patterns( 'blogs.views', # url( # regex=r'^$', # view='bloggers', # name='basic_urls', # ), # url( # regex=r'^bloggers/$', # view='bloggers', # name='basic_urls', # ), # url( # regex=r'^posts/$', # view='posts', # name='basic_urls', # ), # url( # regex=r'^posts/blog/$', # view='blogId', # name='basic_urls', # ), # url( # regex=r'^bloggers/save/$', # view='saveBlogger', # name='basic_urls', # ), # url( # regex=r'^posts/save/$', # view='savePost', # name='basic_urls', # ), # url( # regex=r'^bloggers/delete/$', # view='deleteBlogger', # name='basic_urls', # ), # url( # regex=r'^posts/delete/$', # view='deletePost', # name='basic_urls', # ), )
9334d740d8de1a87835894fd708ca2daeea156d5
9df2fb0bc59ab44f026b0a2f5ef50c72b2fb2ceb
/sdk/network/azure-mgmt-network/generated_samples/inbound_nat_rule_list.py
5b4a92fc37a9078fe6a20e541fbfbbf2e19b8d7a
[ "MIT", "LGPL-2.1-or-later", "LicenseRef-scancode-generic-cla" ]
permissive
openapi-env-test/azure-sdk-for-python
b334a2b65eeabcf9b7673879a621abb9be43b0f6
f61090e96094cfd4f43650be1a53425736bd8985
refs/heads/main
2023-08-30T14:22:14.300080
2023-06-08T02:53:04
2023-06-08T02:53:04
222,384,897
1
0
MIT
2023-09-08T08:38:48
2019-11-18T07:09:24
Python
UTF-8
Python
false
false
1,547
py
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- from azure.identity import DefaultAzureCredential from azure.mgmt.network import NetworkManagementClient """ # PREREQUISITES pip install azure-identity pip install azure-mgmt-network # USAGE python inbound_nat_rule_list.py Before run the sample, please set the values of the client ID, tenant ID and client secret of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, AZURE_CLIENT_SECRET. For more info about how to get the value, please see: https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal """ def main(): client = NetworkManagementClient( credential=DefaultAzureCredential(), subscription_id="subid", ) response = client.inbound_nat_rules.list( resource_group_name="testrg", load_balancer_name="lb1", ) for item in response: print(item) # x-ms-original-file: specification/network/resource-manager/Microsoft.Network/stable/2022-11-01/examples/InboundNatRuleList.json if __name__ == "__main__": main()
570c820b05b72513c06662d62245d9fabbae0603
d635f439116674f6ed5d8c7b3bbc6991026925eb
/scheduled_bots/disease_ontology/disease_ontology/deprecated_code/mesh_changes.py
147b6a5cc89e21fa9f3b6a274a15637436151428
[ "MIT" ]
permissive
SuLab/scheduled-bots
141a73949e2b2e97738944ecb1a0fcd214af5d77
bc83f5e013fd18247805efc4bf1f8f948aef859e
refs/heads/main
2023-02-24T16:19:24.926230
2023-02-17T10:48:03
2023-02-17T10:48:03
74,090,171
7
13
MIT
2022-12-08T05:07:36
2016-11-18T03:04:06
Jupyter Notebook
UTF-8
Python
false
false
9,994
py
""" Instead of the change detector looking at each revision for an item what i want here, is to compare the current state of an item's key/value pairs that I define, with another set of data (a reference dataset, from an owl/obographs json file) Steps: - Does a sparql query against wikidata to get all mesh IDs on all items with a DOID. Looks for a mapping relation type (P4390) if available. If no mapping rel type is specified, default to oboInOwl:hasDbXref - Sparql query against the latest doid.owl release file looking for mesh terms using the relations: {skos:closeMatch skos:narrowMatch skos:broadMatch skos:relatedMatch skos:exactMatch oboInOwl:hasDbXref} - Compare the mesh IDs on wd vs whats in DO. Returns a table listing all of the differences """ import subprocess from collections import defaultdict import pandas as pd import requests from rdflib import Graph from rdflib import URIRef, Literal from tqdm import tqdm from wikidataintegrator.wdi_core import WDItemEngine from wikidataintegrator.wdi_helpers import id_mapper BIOPORTAL_KEY = "a1ac23bb-23cb-44cf-bf5e-bcdd7446ef37" DOID_QID = id_mapper("P699") DO_OWL_PATH = "doid.owl" QID_MAP_REL_TYPE_CURIE = {'Q39893184': 'skos:closeMatch', 'Q39893967': 'skos:narrowMatch', 'Q39894595': 'skos:broadMatch', 'Q39894604': 'skos:relatedMatch', 'Q39893449': 'skos:exactMatch'} QID_MAP_REL_TYPE_CURIE = defaultdict(lambda: "oboInOwl:hasDbXref", QID_MAP_REL_TYPE_CURIE) """ MAP_REL_TYPE_QID = {'http://www.w3.org/2004/02/skos/core#broadMatch': 'Q39894595', 'http://www.w3.org/2004/02/skos/core#closeMatch': 'Q39893184', 'http://www.w3.org/2004/02/skos/core#exactMatch': 'Q39893449', 'http://www.w3.org/2004/02/skos/core#narrowMatch': 'Q39893967', 'http://www.w3.org/2004/02/skos/core#relatedMatch': 'Q39894604'} """ PREFIX_TO_CURIE = { 'http://www.w3.org/2004/02/skos/core#': 'skos', 'http://www.geneontology.org/formats/oboInOwl#': 'oboInOwl' } purl_to_curie = lambda s: s.replace("http://purl.obolibrary.org/obo/", "").replace("_", ":") curie_to_purl = lambda s: "http://purl.obolibrary.org/obo/" + s.replace(":", "_") def get_wikidata_do_mesh(): # get mesh xrefs, and including mapping relation type # {'DOID:0050856': {'skos:broadMatch_D019958'}} query = """ select ?item ?doid ?mesh ?mesh_rt where { ?item wdt:P699 ?doid . ?item p:P486 ?mesh_s . ?mesh_s ps:P486 ?mesh . optional { ?mesh_s pq:P4390 ?mesh_rt } }""" results = WDItemEngine.execute_sparql_query(query)['results']['bindings'] results = [{k: v['value'].replace("http://www.wikidata.org/entity/", "") for k, v in item.items()} for item in results] df = pd.DataFrame(results) df['mesh_rt'] = df.apply(lambda row: QID_MAP_REL_TYPE_CURIE[row.mesh_rt] + "_MESH:" + row.mesh, axis=1) df['_item'] = df['item'] r = df.groupby("_item").aggregate(lambda x: set(y for y in x if not pd.isnull(y))).to_dict("records") wd = {list(x['doid'])[0]: x for x in r} wd = {k: v['mesh_rt'] for k, v in wd.items()} wd = {k: v for k, v in wd.items() if v} return wd def getConceptLabel(qid): return getConceptLabels((qid,))[qid] def getConceptLabels(qids): qids = "|".join({qid.replace("wd:", "") if qid.startswith("wd:") else qid for qid in qids}) params = {'action': 'wbgetentities', 'ids': qids, 'languages': 'en', 'format': 'json', 'props': 'labels'} r = requests.get("https://www.wikidata.org/w/api.php", params=params) print(r.url) r.raise_for_status() wd = r.json()['entities'] return {k: v['labels']['en']['value'] for k, v in wd.items()} def get_do_metadata(): # from the do owl file, get do labels, descriptions g = Graph() g.parse(DO_OWL_PATH) disease_ontology = Literal('disease_ontology', datatype=URIRef('http://www.w3.org/2001/XMLSchema#string')) query = """ SELECT * WHERE { ?id oboInOwl:hasOBONamespace ?disease_ontology . ?id rdfs:label ?label . OPTIONAL {?id obo:IAO_0000115 ?descr} FILTER NOT EXISTS {?id owl:deprecated ?dep} } """ rows = g.query(query, initBindings={'disease_ontology': disease_ontology}) res = [{str(k): str(v) for k, v in binding.items()} for binding in rows.bindings] df = pd.DataFrame(res) df.drop_duplicates(subset=['id'], inplace=True) df.fillna("", inplace=True) do = df.to_dict("records") do = {purl_to_curie(x['id']): x for x in do} return do def parse_do_owl(): """ Parse xrefs and skos matches from owl file. Returns dict. key: doid curie, value: set of xrefs in the format: relation type + "_" + xref. (ex: oboInOwl:hasDbXref_MESH:D007690) :return: """ g = Graph() g.parse(DO_OWL_PATH) disease_ontology = Literal('disease_ontology', datatype=URIRef('http://www.w3.org/2001/XMLSchema#string')) true = Literal('true', datatype=URIRef('http://www.w3.org/2001/XMLSchema#boolean')) query = """ PREFIX skos: <http://www.w3.org/2004/02/skos/core#> SELECT ?id ?rel_type ?xref WHERE { ?id oboInOwl:hasOBONamespace ?disease_ontology . OPTIONAL { values ?rel_type {skos:closeMatch skos:narrowMatch skos:broadMatch skos:relatedMatch skos:exactMatch oboInOwl:hasDbXref} ?id ?rel_type ?xref . } FILTER NOT EXISTS {?id owl:deprecated ?true} } """ rows = g.query(query, initBindings={'disease_ontology': disease_ontology, 'true': true}) res = [{str(k): str(v) for k, v in binding.items()} for binding in rows.bindings] df = pd.DataFrame(res) df["doid"] = df["id"] df.dropna(subset=['xref'], inplace=True) df.rel_type = df.rel_type.apply( lambda x: x.replace(x.split("#")[0] + "#", PREFIX_TO_CURIE[x.split("#")[0] + "#"] + ":")) df.xref = df.apply(lambda row: row.rel_type + "_" + row.xref, axis=1) r = df.groupby("id").aggregate(lambda x: set(y for y in x if not pd.isnull(y))).to_dict("records") do = {purl_to_curie(list(x['doid'])[0]): x for x in r} do = {k: v['xref'] for k, v in do.items()} # filter mesh xrefs only do = {k: set([x for x in v if "MESH:" in x]) for k, v in do.items()} do = {k: v for k, v in do.items() if v} # do['DOID:5570'] return do def compare(wd, do): # for each DO item, does wd have everything it should? What else does it have? wd = defaultdict(set, wd) do = defaultdict(set, do) leftover_in_wd = dict() leftover_in_do = dict() doids = set(wd.keys()) | set(do.keys()) missing = [] for doid in doids: leftover_in_wd[doid] = set() leftover_in_do[doid] = set() if doid not in wd: missing.append(doid) continue leftover_in_wd[doid] = wd[doid] - do[doid] leftover_in_do[doid] = do[doid] - wd[doid] leftover_in_wd = {k: v for k, v in leftover_in_wd.items() if v} leftover_in_do = {k: v for k, v in leftover_in_do.items() if v} print("Items missing in wikidata: {}".format(missing)) return leftover_in_wd, leftover_in_do def get_changes(): wd = get_wikidata_do_mesh() do = parse_do_owl() leftover_in_wd, leftover_in_do = compare(wd, do) return leftover_in_wd, leftover_in_do def get_mesh_info(mesh_id): url = "http://data.bioontology.org/ontologies/MESH/classes/http%3A%2F%2Fpurl.bioontology.org%2Fontology%2FMESH%2F{}" d = requests.get(url.format(mesh_id), params={'apikey': BIOPORTAL_KEY}).json() if "errors" in d: return {'mesh_label': '', 'mesh_descr': ''} d = {'mesh_label': d['prefLabel'], 'mesh_descr': d['definition'], 'mesh_synonyms': ";".join(d['synonym'])} d['mesh_descr'] = d['mesh_descr'][0] if d['mesh_descr'] else '' return d def get_mesh_changes(leftover_in_wd): # from the things added to wikidata, make a table with the metadata about the change # starting with things added to wd mesh_info = [] mesh_url = "https://meshb.nlm.nih.gov/record/ui?ui={}" do_metadata = get_do_metadata() for doid, meshs in tqdm(leftover_in_wd.items()): for mesh in meshs: relation, mesh = mesh.split("_") mesh = mesh.split(":")[1] qid = DOID_QID[doid] do_node = do_metadata.get(doid, dict()) x = {'qid': qid, 'wd_label': getConceptLabel(qid), 'doid': doid, 'do_label': do_node.get("label"), 'doid_url': curie_to_purl(doid), 'do_def': do_node.get("descr"), 'mesh': mesh, 'mesh_url': mesh_url.format(mesh), 'relation': relation} x.update(get_mesh_info(mesh)) mesh_info.append(x) df = pd.DataFrame(mesh_info) df = df[['doid', 'do_label', 'do_def', 'doid_url', 'mesh', 'mesh_label', 'mesh_descr', 'mesh_synonyms', 'mesh_url', 'qid', 'wd_label', 'relation']] print(df.head(2)) remove_me = df[df.mesh_label.isnull()] if not remove_me.empty: print("you should remove these") print(remove_me) # make a formatted df df_fmt = df.copy() df_fmt.doid = df_fmt.apply(lambda x: "[" + x.doid + "](" + x.doid_url + ")", 1) del df_fmt['doid_url'] df_fmt.mesh = df_fmt.apply(lambda x: "[" + x.mesh + "](" + x.mesh_url + ")", 1) del df_fmt['mesh_url'] df_fmt.qid = df_fmt.qid.apply(lambda x: "[" + x + "](https://www.wikidata.org/wiki/" + x + ")") return df, df_fmt def download_do_owl(release): url = "https://github.com/DiseaseOntology/HumanDiseaseOntology/raw/master/src/ontology/releases/{}/doid.owl" subprocess.check_call(["wget", "-N", url.format(release)]) def main(release): # release = "2017-11-28" download_do_owl(release) leftover_in_wd, leftover_in_do = get_changes() df, df_fmt = get_mesh_changes(leftover_in_wd) return df, df_fmt
16ee3ce5d7916d66726b2fa0302489056100e2b9
5a4b7c41ffe95a1b0422a8eb1cb89bd9a75fd68c
/tests/models/mappa/test_response_models.py
7c3405ea6f91df8fbd0e20e83d48b3bfe62ef812
[ "MIT" ]
permissive
escoteirando/escoteiros-mappa-old
3c7ca4f6b4c4e9dbee1abb5430cc773c9ee4d903
4917da4cae49ded521db248fdea39ca79b7ef8f7
refs/heads/develop
2022-12-11T06:26:10.239310
2020-04-20T00:52:03
2020-04-20T00:52:03
253,770,625
1
1
MIT
2022-12-08T09:39:59
2020-04-07T11:16:18
Python
UTF-8
Python
false
false
2,188
py
import unittest from datetime import datetime, tzinfo from dateutil.tz import tzutc from mappa.models.mappa.associado import AssociadoModel from mappa.models.mappa.escotista import EscotistaModel from mappa.models.mappa.login import LoginModel class TestResponseModels(unittest.TestCase): def test_login_mappa(self): data = '''{ "id": "904QVxCGR0mLG6uDqWt7EOZLZZyfbaBRatKnoMefohwfkpPjc5jeuyUNAWED5t7H", "ttl": 1209600, "created": "2019-10-26T02:19:09.146Z", "userId": 50442 }''' login = LoginModel(data) self.assertEqual( login.id, "904QVxCGR0mLG6uDqWt7EOZLZZyfbaBRatKnoMefohwfkpPjc5jeuyUNAWED5t7H") self.assertEqual(login.ttl, 1209600) self.assertEqual(login.created, datetime( 2019, 10, 26, 2, 19, 9, 146000, tzutc())) self.assertEqual(login.userId, 50442) def test_associado_mappa(self): data = '''{ "codigo":850829, "nome":"GUIONARDO FURLAN", "codigoFoto":null, "codigoEquipe":null, "username":1247937, "numeroDigito":3, "dataNascimento":"Sat Feb 05 1977 00:00:00 GMT+0000 (UTC)", "dataValidade":"2019-01-01T00:00:00.000Z", "nomeAbreviado":"", "sexo":"M", "codigoRamo":2, "codigoCategoria":5, "codigoSegundaCategoria":0, "codigoTerceiraCategoria":0, "linhaFormacao":"Escotista", "codigoRamoAdulto":2, "dataAcompanhamento":null }''' associado = AssociadoModel(data) self.assertEqual(associado.codigo, 850829) self.assertEqual(associado.nome, 'GUIONARDO FURLAN') self.assertEqual(associado.codigoFoto, None) self.assertEqual(associado.dataAcompanhamento, None) def test_escotista_mappa(self): data = '''{ "codigo": 50442, "codigoAssociado": 850829, "username": "Guionardo", "nomeCompleto": "GuionardoFurlan", "ativo": "S", "codigoGrupo": 32, "codigoRegiao": "SC", "codigoFoto": null }''' escotista = EscotistaModel(data) self.assertEqual(escotista.codigo, 50442) self.assertEqual(escotista.codigoAssociado, 850829) self.assertEqual(escotista.ativo, 'S')
da4219c9fa4f5b076e85b7cc131e88c1acf86898
353def93fa77384ee3a5e3de98cfed318c480634
/.history/week01/homework02/maoyanspiders/maoyanspiders/spiders/movies_20200628000400.py
d60bfbff6cf07f0498fc2d12a1ed68303c1eecd8
[]
no_license
ydbB/Python001-class01
d680abc3ea1ccaeb610751e3488421417d381156
ad80037ccfc68d39125fa94d2747ab7394ac1be8
refs/heads/master
2022-11-25T11:27:45.077139
2020-07-19T12:35:12
2020-07-19T12:35:12
272,783,233
0
0
null
2020-06-16T18:28:15
2020-06-16T18:28:15
null
UTF-8
Python
false
false
2,441
py
# -*- coding: utf-8 -*- import scrapy from maoyanspiders.items import MaoyanspidersItem # import xlml.etree from bs4 import BeautifulSoup as bs class MoviesSpider(scrapy.Spider): name = 'movies' allowed_domains = ['maoyan.com'] start_urls = ['http://maoyan.com/board/4'] # def parse(self, response): # pass def start_requests(self): header = { 'Content-Type': 'text/plain; charset=UTF-8', 'Cookie' : '__mta=251934006.1593072991075.1593267136578.1593267143630.29; uuid_n_v=v1; uuid=2395D3F0B6BC11EA9F28E30FF5FFF73C9A16AE2FA53A448DA75AEAA9D715CB59; _csrf=8557626db9b655cf9050ae7e5b2aab69278c8061c21eca95e1c3cf2130b0b64c; _lxsdk_cuid=172ea8cb247c8-0a73066b1c0a8b-4353760-100200-172ea8cb248c8; _lxsdk=2395D3F0B6BC11EA9F28E30FF5FFF73C9A16AE2FA53A448DA75AEAA9D715CB59; mojo-uuid=c457eacb7c1eb59d3d2f6c1f8d75b9c9; Hm_lvt_703e94591e87be68cc8da0da7cbd0be2=1593072989,1593073002; _lx_utm=utm_source%3Dgoogle%26utm_medium%3Dorganic; __mta=251934006.1593072991075.1593140975947.1593145813576.21; Hm_lpvt_703e94591e87be68cc8da0da7cbd0be2=1593267144; _lxsdk_s=172f6570f57-262-d57-29%7C%7C1', # 'Host' : 'http://www.baidu.com', 'Origin': 'https://maoyan.com', 'Referer': 'https://maoyan.com/board/4', 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.116 Safari/537.36', } url = f'https://maoyan.com/board/4' yield scrapy.Request(url=url,headers=header,callback=self.parse) def parse(self, response): soup = bs(response.text,'html.parser') for i in soup.find_all('div',attrs={'class' : 'movie-item-info'}): item = MaoyanspidersItem() title = i.find('p',attrs={'class':'name'}).find('a') name = title.get('title') link = 'https://maoyan.com/'+ title.get('href') time = i.find('p',attrs={'class' : 'releasetime'}).text item['films_name'] = name item['release_time'] = time yield scrapy.Request(url=link, meta={'item':item},callback=self.parse1) return soup def parse1(self, response): item = response.meta['item'] soup = bs(response.text,'html.parser') type = soup.find('div',attrs={'class' :'movie-brief-container'}).find_all('li')[0] item['films_type'] = type yield item
dff0faa0489696fca7561a52f1eed242eed66498
a838d4bed14d5df5314000b41f8318c4ebe0974e
/sdk/mixedreality/azure-mixedreality-authentication/tests/test_static_access_token_credential_async.py
f9b7171460fd4f5c33b8e663c249d8255f338ad3
[ "MIT", "LicenseRef-scancode-generic-cla", "LGPL-2.1-or-later" ]
permissive
scbedd/azure-sdk-for-python
ee7cbd6a8725ddd4a6edfde5f40a2a589808daea
cc8bdfceb23e5ae9f78323edc2a4e66e348bb17a
refs/heads/master
2023-09-01T08:38:56.188954
2021-06-17T22:52:28
2021-06-17T22:52:28
159,568,218
2
0
MIT
2019-08-11T21:16:01
2018-11-28T21:34:49
Python
UTF-8
Python
false
false
933
py
# ------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # -------------------------------------------------------------------------- from azure.core.credentials import AccessToken from devtools_testutils import AzureTestCase from azure.mixedreality.authentication._shared.aio.static_access_token_credential import StaticAccessTokenCredential class TestAsyncStaticAccessTokenCredential: @AzureTestCase.await_prepared_test async def test_get_token(self): token = "My access token" expiration = 0 access_token = AccessToken(token=token, expires_on=expiration) staticAccessToken = StaticAccessTokenCredential(access_token) actual = await staticAccessToken.get_token() assert access_token == actual
7944dbc113b7f3dbd221b524d325116a297c83f5
f640fcb49bf99ebec5f34603748121fbbe9171dc
/lib_openshift/models/v1_image_change_trigger.py
b8b6b0fc4058951c8cd4fd60243032141171ce3b
[]
no_license
tbielawa/lib_openshift
bea8a11c4904a7d6c815abdd2b206de5a4cc7a93
34ca0f6a0c5388624a040223f29552dc4c0f8c49
refs/heads/master
2023-06-16T22:41:15.894021
2016-07-11T21:26:59
2016-07-11T21:26:59
63,156,531
0
0
null
2016-07-12T12:35:29
2016-07-12T12:35:29
null
UTF-8
Python
false
false
4,909
py
# coding: utf-8 """ Copyright 2016 SmartBear Software Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Ref: https://github.com/swagger-api/swagger-codegen """ from pprint import pformat from six import iteritems class V1ImageChangeTrigger(object): """ NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. """ def __init__(self): """ V1ImageChangeTrigger - a model defined in Swagger :param dict swaggerTypes: The key is attribute name and the value is attribute type. :param dict attributeMap: The key is attribute name and the value is json key in definition. """ self.swagger_types = { 'last_triggered_image_id': 'str', '_from': 'V1ObjectReference' } self.attribute_map = { 'last_triggered_image_id': 'lastTriggeredImageID', '_from': 'from' } self._last_triggered_image_id = None self.__from = None @property def last_triggered_image_id(self): """ Gets the last_triggered_image_id of this V1ImageChangeTrigger. LastTriggeredImageID is used internally by the ImageChangeController to save last used image ID for build :return: The last_triggered_image_id of this V1ImageChangeTrigger. :rtype: str """ return self._last_triggered_image_id @last_triggered_image_id.setter def last_triggered_image_id(self, last_triggered_image_id): """ Sets the last_triggered_image_id of this V1ImageChangeTrigger. LastTriggeredImageID is used internally by the ImageChangeController to save last used image ID for build :param last_triggered_image_id: The last_triggered_image_id of this V1ImageChangeTrigger. :type: str """ self._last_triggered_image_id = last_triggered_image_id @property def _from(self): """ Gets the _from of this V1ImageChangeTrigger. From is a reference to an ImageStreamTag that will trigger a build when updated It is optional. If no From is specified, the From image from the build strategy will be used. Only one ImageChangeTrigger with an empty From reference is allowed in a build configuration. :return: The _from of this V1ImageChangeTrigger. :rtype: V1ObjectReference """ return self.__from @_from.setter def _from(self, _from): """ Sets the _from of this V1ImageChangeTrigger. From is a reference to an ImageStreamTag that will trigger a build when updated It is optional. If no From is specified, the From image from the build strategy will be used. Only one ImageChangeTrigger with an empty From reference is allowed in a build configuration. :param _from: The _from of this V1ImageChangeTrigger. :type: V1ObjectReference """ self.__from = _from def to_dict(self): """ Returns the model properties as a dict """ result = {} for attr, _ in iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): """ Returns the string representation of the model """ return pformat(self.to_dict()) def __repr__(self): """ For `print` and `pprint` """ return self.to_str() def __eq__(self, other): """ Returns true if both objects are equal """ return self.__dict__ == other.__dict__ def __ne__(self, other): """ Returns true if both objects are not equal """ return not self == other
7065790d2280e02944b4499fb05d3b4af79dbde3
ce18877752c43eb66f03bdc169e3ef45a1720d15
/src/apps/shop/__init__.py
e929914882f6d6647c0575acdfc548c0d37f700c
[]
no_license
ajlexgit/robin
26e8682ae09795acf0f3fc1297d20044285b83df
25ac1c3455838fc26656cfa16d05b2943d0cbba6
refs/heads/master
2021-07-13T22:49:09.177207
2017-10-13T07:44:42
2017-10-13T07:44:42
103,655,240
1
0
null
null
null
null
UTF-8
Python
false
false
1,396
py
""" Интернет-магазин с корзиной в localStorage и сессии. Установка: settings.py: INSTALLED_APPS = ( ... 'mptt', 'shop', ... ) SUIT_CONFIG = { ... { 'app': 'shop', 'icon': 'icon-shopping-cart', 'models': ( 'ShopOrder', 'ShopProduct', 'ShopCategory', 'ShopConfig', ) }, ... } MIDDLEWARE_CLASSES = ( ... 'libs.js_storage.middleware.JSStorageMiddleware', ... 'shop.middleware.CartMiddleware', ... ) urls.py: ... url(r'^shop/', include('shop.urls', namespace='shop')), ... При конкретной реализации, нужно вызывать Django-сигналы для подтверждения, оплаты и отмены заказа: from .signals import order_confirmed ... order_confirmed.send(sender=ShopOrder, order=order, request=request) """ default_app_config = 'shop.apps.Config'
8e8a7aaabbfac8bdccff57b2331e7edeaf82e5b9
9dab41a71bf19a9ad17ee3e9f77c0f58aebd1d6d
/python/DistrictUserUploadTask/main.py
21fca594a2a3b2985d5fe605991b29340f8573b3
[]
no_license
apollowesley/Demo
f0ef8ec6c4ceb0aec76771da8dd9a62fb579eac8
471c4af95d3a7222d6933afc571a8e52e8fe4aee
refs/heads/master
2021-02-15T04:01:51.590697
2018-01-29T01:44:29
2018-01-29T01:44:29
null
0
0
null
null
null
null
UTF-8
Python
false
false
2,274
py
#!/usr/bin/env python # -*- coding:utf-8 -*- import logging import logging.config import ConfigParser import string import os import sys import re import getpass from task import UploadTask # 日志 logging.config.fileConfig("./log.conf") logger = logging.getLogger("example01") # 获取配置信息 def load_config(): mysql = {} try: cf = ConfigParser.ConfigParser() cf.read("config.conf") # 获取所有节 secs = cf.sections() for sec in secs: task = {} if sec == "mysql": for item in cf.items(sec): mysql[item[0]] = item[1] except Exception as e: logger.error("加载配置文件出现异常:{}".format(e)) return mysql def prn_obj(obj): print '\n'.join(['%s:%s' % item for item in obj.__dict__.items()]) def main(): reload(sys) sys.setdefaultencoding('utf-8') logger.info("准备运行工具") # 加载配置文件 mysql = load_config() logger.info("MySQL配置信息为 %s" % str(mysql)) # 获取目录upload下文件,查看文件命名是否符合要求,如果有不符合退出 upload_files = os.listdir('./upload') if len(upload_files) == 0: print("上传目录文件为空,请先放置上传文件") return for filename in upload_files: print(filename) if not re.match(".*area_(\d+)_.*\..*", filename): print("文件名不符合规范. %s" % filename) os.exit(1) # 提示输入数据库密码 password = getpass.getpass("请输入MySQL用户%s密码:" % mysql["username"]) mysql["password"] = password # 遍历查看Excel记录是否符合要求 logger.info("核对Excel记录是否正确") for filename in upload_files: task = UploadTask(filename, mysql, logger) if not task.verify_records(): print("上传文件中内容格式有误,请按提示修改后,再执行操作") os.exit(1) # 执行上传功能 logger.info("准备写入Excel记录到MySQL上") for filename in upload_files: task = UploadTask(filename, mysql, logger) task.upload() logger.info("上传完毕") if __name__ == "__main__": main()