id
stringlengths
2
8
text
stringlengths
16
264k
dataset_id
stringclasses
1 value
337880
# -*- coding: utf-8 -*- # This code is part of Qiskit. # # (C) Copyright IBM 2019. # # This code is licensed under the Apache License, Version 2.0. You may # obtain a copy of this license in the LICENSE.txt file in the root directory # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. # # Any modifications or derivative works of this code must retain this # copyright notice, and modified files need to carry a notice indicating # that they have been altered from the originals. """ Automatically generate Ising Hamiltonians from general models of optimization problems. This program converts general models of optimization problems into Ising Hamiltonian. To write models of optimization problems, DOcplex (Python library for optimization problems) is used in the program. (https://cdn.rawgit.com/IBMDecisionOptimization/docplex-doc/master/docs/index.html) It supports models that consist of the following elements now. - Binary variables. - Linear or quadratic object function. - Equality constraints. * Symbols in constrains have to be equal (==). Inequality constrains (e.g. x+y <= 5) are not allowed. The following is an example of use. --- # Create an instance of a model and variables with DOcplex. mdl = Model(name='tsp') x = {(i,p): mdl.binary_var(name='x_{0}_{1}'.format(i,p)) for i in range(num_node) for p in range(num_node)} # Object function tsp_func = mdl.sum(ins.w[i,j] * x[(i,p)] * x[(j,(p+1)%num_node)] for i in range(num_node) for j in range(num_node) for p in range(num_node)) mdl.minimize(tsp_func) # Constrains for i in range(num_node): mdl.add_constraint(mdl.sum(x[(i,p)] for p in range(num_node)) == 1) for p in range(num_node): mdl.add_constraint(mdl.sum(x[(i,p)] for i in range(num_node)) == 1) # Call the method to convert the model into Ising Hamiltonian. qubitOp, offset = get_qubitops(mdl) # Calculate with the generated Ising Hamiltonian. ee = ExactEigensolver(qubitOp, k=1) result = ee.run() print('get_qubitops') print('tsp objective:', result['energy'] + offset) --- """ import logging from collections import OrderedDict from math import fsum import numpy as np from docplex.mp.constants import ComparisonType from docplex.mp.model import Model from qiskit.quantum_info import Pauli from qiskit.aqua import Operator, AquaError logger = logging.getLogger(__name__) def get_qubitops(mdl, auto_penalty=True, default_penalty=1e5): """ Generate Ising Hamiltonian from a model of DOcplex. Args: mdl (docplex.mp.model.Model): A model of DOcplex for a optimization problem. auto_penalty (bool): If true, the penalty coefficient is automatically defined by "_auto_define_penalty()". default_penalty (float): The default value of the penalty coefficient for the constraints. This value is used if "auto_penalty" is False. Returns: operator.Operator, float: operator for the Hamiltonian and a constant shift for the obj function. """ _validate_input_model(mdl) # set the penalty coefficient by _auto_define_penalty() or manually. if auto_penalty: penalty = _auto_define_penalty(mdl, default_penalty) else: penalty = default_penalty # set a sign corresponding to a maximized or minimized problem. # sign == 1 is for minimized problem. sign == -1 is for maximized problem. sign = 1 if mdl.is_maximized(): sign = -1 # assign variables of the model to qubits. qd = {} index = 0 for i in mdl.iter_variables(): if i in qd: continue qd[i] = index index += 1 # initialize Hamiltonian. num_nodes = len(qd) pauli_list = [] shift = 0 zero = np.zeros(num_nodes, dtype=np.bool) # convert a constant part of the object function into Hamiltonian. shift += mdl.get_objective_expr().get_constant() * sign # convert linear parts of the object function into Hamiltonian. l_itr = mdl.get_objective_expr().iter_terms() for j in l_itr: zp = np.zeros(num_nodes, dtype=np.bool) index = qd[j[0]] weight = j[1] * sign / 2 zp[index] = True pauli_list.append([-weight, Pauli(zp, zero)]) shift += weight # convert quadratic parts of the object function into Hamiltonian. q_itr = mdl.get_objective_expr().iter_quads() for i in q_itr: index1 = qd[i[0][0]] index2 = qd[i[0][1]] weight = i[1] * sign / 4 if index1 == index2: shift += weight else: zp = np.zeros(num_nodes, dtype=np.bool) zp[index1] = True zp[index2] = True pauli_list.append([weight, Pauli(zp, zero)]) zp = np.zeros(num_nodes, dtype=np.bool) zp[index1] = True pauli_list.append([-weight, Pauli(zp, zero)]) zp = np.zeros(num_nodes, dtype=np.bool) zp[index2] = True pauli_list.append([-weight, Pauli(zp, zero)]) shift += weight # convert constraints into penalty terms. for constraint in mdl.iter_constraints(): constant = constraint.right_expr.get_constant() # constant parts of penalty*(Constant-func)**2: penalty*(Constant**2) shift += penalty * constant ** 2 # linear parts of penalty*(Constant-func)**2: penalty*(-2*Constant*func) for l in constraint.left_expr.iter_terms(): zp = np.zeros(num_nodes, dtype=np.bool) index = qd[l[0]] weight = l[1] zp[index] = True pauli_list.append([penalty * constant * weight, Pauli(zp, zero)]) shift += -penalty * constant * weight # quadratic parts of penalty*(Constant-func)**2: penalty*(func**2) for l in constraint.left_expr.iter_terms(): for l2 in constraint.left_expr.iter_terms(): index1 = qd[l[0]] index2 = qd[l2[0]] weight1 = l[1] weight2 = l2[1] penalty_weight1_weight2 = penalty * weight1 * weight2 / 4 if index1 == index2: shift += penalty_weight1_weight2 else: zp = np.zeros(num_nodes, dtype=np.bool) zp[index1] = True zp[index2] = True pauli_list.append([penalty_weight1_weight2, Pauli(zp, zero)]) zp = np.zeros(num_nodes, dtype=np.bool) zp[index1] = True pauli_list.append([-penalty_weight1_weight2, Pauli(zp, zero)]) zp = np.zeros(num_nodes, dtype=np.bool) zp[index2] = True pauli_list.append([-penalty_weight1_weight2, Pauli(zp, zero)]) shift += penalty_weight1_weight2 # Remove paulis whose coefficients are zeros. qubitOp = Operator(paulis=pauli_list) qubitOp.zeros_coeff_elimination() return qubitOp, shift def _validate_input_model(mdl): """ Check whether an input model is valid. If not, raise an AquaError Args: mdl (docplex.mp.model.Model): A model of DOcplex for a optimization problem. """ valid = True # validate an object type of the input. if not isinstance(mdl, Model): raise AquaError('An input model must be docplex.mp.model.Model.') # raise an error if the type of the variable is not a binary type. for var in mdl.iter_variables(): if not var.is_binary(): logger.warning( 'The type of Variable {} is {}. It must be a binary variable. '.format(var, var.vartype.short_name)) valid = False # raise an error if the constraint type is not an equality constraint. for constraint in mdl.iter_constraints(): if not constraint.sense == ComparisonType.EQ: logger.warning('Constraint {} is not an equality constraint.'.format(constraint)) valid = False if not valid: raise AquaError('The input model has unsupported elements.') def _auto_define_penalty(mdl, default_penalty=1e5): """ Automatically define the penalty coefficient. This returns object function's (upper bound - lower bound + 1). Args: mdl (docplex.mp.model.Model): A model of DOcplex for a optimization problem. default_penalty (float): The default value of the penalty coefficient for the constraints. Returns: float: The penalty coefficient for the Hamiltonian. """ # if a constraint has float coefficient, return 1e5 for the penalty coefficient. terms = [] for constraint in mdl.iter_constraints(): terms.append(constraint.right_expr.get_constant()) terms.extend(term[1] for term in constraint.left_expr.iter_terms()) if any(isinstance(term, float) and not term.is_integer() for term in terms): logger.warning('Using %f for the penalty coefficient because a float coefficient exists in constraints. \n' 'The value could be too small. If so, set the penalty coefficient manually.', default_penalty) return default_penalty # (upper bound - lower bound) can be calculate as the sum of absolute value of coefficients # Firstly, add 1 to guarantee that infeasible answers will be greater than upper bound. penalties = [1] # add linear terms of the object function. penalties.extend(abs(i[1]) for i in mdl.get_objective_expr().iter_terms()) # add quadratic terms of the object function. penalties.extend(abs(i[1]) for i in mdl.get_objective_expr().iter_quads()) return fsum(penalties) def sample_most_likely(state_vector): """Compute the most likely binary string from state vector. Args: state_vector (numpy.ndarray or dict): state vector or counts. Returns: numpy.ndarray: binary string as numpy.ndarray of ints. """ if isinstance(state_vector, dict) or isinstance(state_vector, OrderedDict): # get the binary string with the largest count binary_string = sorted(state_vector.items(), key=lambda kv: kv[1])[-1][0] x = np.asarray([int(y) for y in reversed(list(binary_string))]) return x else: n = int(np.log2(state_vector.shape[0])) k = np.argmax(np.abs(state_vector)) x = np.zeros(n) for i in range(n): x[i] = k % 2 k >>= 1 return x
StarcoderdataPython
1798493
<filename>app/starling/routes.py import base64 import hashlib import json from flask import request from dateutil import parser from app import db from app.helpers import json_response from app.starling import bp from app.starling.models import StarlingTransaction from app.users.models import User @bp.route('/webhook/<string:uuid>', methods=['POST']) def webhook(uuid): user = User.query.filter_by(uuid=uuid).first() if user is None: return json_response(404, { 'success': False, 'message': 'User does not exist' }) body = request.get_data(as_text=True) signature = str(request.headers.get('X-Hook-Signature')) hash = hashlib.sha512(str(user.starling_webhook_secret + body).encode('utf-8')) encoded = base64.b64encode(hash.digest()).decode("utf-8") print('--- THEIR SIGNATURE ---') print(signature) print('--- OUR SIGNATURE ---') print(encoded) print('---------------') # TODO: test this with actual request if False and signature != encoded: return json_response(403, { 'success': False, 'message': 'Invalid signature' }) json_data = json.loads(body) trans_data = { 'user_id': user.id, 'transaction_uid': json_data['content']['transactionUid'], 'amount': json_data['content']['amount'], 'transaction_type': json_data['content']['type'], 'payee': json_data['content']['counterParty'], 'transaction_date': parser.parse(json_data['timestamp']), } trans = StarlingTransaction.query.filter_by( transaction_uid=trans_data['transaction_uid'] ).first() status = 200 if trans is None: trans = StarlingTransaction(**trans_data) db.session.add(trans) status = 201 else: trans.update(trans_data) db.session.merge(trans) db.session.commit() return json_response(status, {'success': True})
StarcoderdataPython
11301862
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # ____________developed by <NAME>____________________ # ________in collaboration with <NAME> _________ from colorama import Cursor, init, Fore, Back, Style import re #init() STYLE = re.compile("\[[F,B,S][A-Z]\]") print(Style.RESET_ALL) color = {"[FR]": Fore.RED, "[FY]": Fore.YELLOW, "[FB]": Fore.BLUE, "[FG]": Fore.GREEN, "[FM]": Fore.MAGENTA, "[FC]": Fore.CYAN, "[FW]": Fore.WHITE, "[FN]": Fore.BLACK, "[FS]": Fore.RESET, "[BB]": Back.BLUE, "[BR]": Back.RED, "[BG]": Back.GREEN, "[BY]": Back.YELLOW, "[BM]": Back.MAGENTA, "[BC]": Back.CYAN, "[BW]": Back.WHITE, "[BS]": Back.RESET, "[SD]": Style.DIM, "[SN]": Style.NORMAL, "[SB]": Style.BRIGHT, "[SR]": Style.RESET_ALL } def pos(x, y): return Cursor.POS(x, y) def up(n): return Cursor.UP(n) def down(n): return Cursor.DOWN(n) def forward(n): return Cursor.FORDWARD(n) def back(n): return Cursor.BACK(n) def log_color(mesaje): colors = [s for s in STYLE.findall(mesaje) if s in color] for s in colors: mesaje = mesaje.replace(s, color[s]) return mesaje + Style.RESET_ALL def rawlog_color(mesaje): colors = [s for s in STYLE.findall(mesaje) if s in color] for s in colors: mesaje = mesaje.replace(s, "") return mesaje
StarcoderdataPython
269580
<reponame>becarefullee/django-fitbit # Your Fitbit access credentials, which must be requested from Fitbit. # You must provide these in your project's settings. FITAPP_CONSUMER_KEY = None FITAPP_CONSUMER_SECRET = None # The verification code for verifying subscriber endpoints FITAPP_VERIFICATION_CODE = None # Where to redirect to after Fitbit authentication is successfully completed. FITAPP_LOGIN_REDIRECT = '/' # Where to redirect to after Fitbit authentication credentials have been # removed. FITAPP_LOGOUT_REDIRECT = '/' # By default, don't subscribe to user data. Set this to true to subscribe. FITAPP_SUBSCRIBE = False # Only retrieve data for resources in FITAPP_SUBSCRIPTIONS. The default value # of none results in all subscriptions being retrieved. Override it to be an # OrderedDict of just the items you want retrieved, in the order you want them # retrieved, eg: # from collections import OrderedDict # FITAPP_SUBSCRIPTIONS = OrderedDict([ # ('foods', ['log/caloriesIn', 'log/water']), # ]) # The default ordering is ['category', 'resource'] when a subscriptions dict is # not specified. FITAPP_SUBSCRIPTIONS = None # The initial delay (in seconds) when doing the historical data import FITAPP_HISTORICAL_INIT_DELAY = 10 # The delay (in seconds) between items when doing requests FITAPP_BETWEEN_DELAY = 5 # By default, don't try to get intraday time series data. See # https://dev.fitbit.com/docs/activity/#get-activity-intraday-time-series for # more info. FITAPP_GET_INTRADAY = False # The verification code used by Fitbit to verify subscription endpoints. Only # needed temporarily. See: # https://dev.fitbit.com/docs/subscriptions/#verify-a-subscriber FITAPP_VERIFICATION_CODE = None # The template to use when an unavoidable error occurs during Fitbit # integration. FITAPP_ERROR_TEMPLATE = 'fitapp/error.html' # The default message used by the fitbit_integration_warning decorator to # inform the user about Fitbit integration. If a callable is given, it is # called with the request as the only parameter to get the final value for the # message. FITAPP_DECORATOR_MESSAGE = 'This page requires Fitbit integration.' # Whether or not a user must be authenticated in order to hit the login, # logout, error, and complete views. FITAPP_LOGIN_REQUIRED = True # Whether or not intraday data points with step values of 0 are saved # to the database. FITAPP_SAVE_INTRADAY_ZERO_VALUES = False # The default amount of data we pull for each user registered with this app FITAPP_DEFAULT_PERIOD = 'max' # The collection we want to recieve subscription updates for # (e.g. 'activities'). None defaults to all collections. FITAPP_SUBSCRIPTION_COLLECTION = None # The default fitbit scope, None defaults to all scopes, otherwise take # a list of scopes (eg. ["activity", "profile", "settings"]) FITAPP_SCOPE = None
StarcoderdataPython
1659695
import os import numpy as np import scipy import tensorflow as tf from scipy.misc import imread from tensorpack import DataFlow class DatasetMetadata(object): """Helper class which loads and stores dataset metadata.""" def __init__(self, filename): import csv """Initializes instance of DatasetMetadata.""" self._true_labels = {} with open(filename) as f: reader = csv.reader(f) header_row = next(reader) try: row_idx_image_id = header_row.index('name') row_idx_true_label = header_row.index('label') except ValueError: raise IOError('Invalid format of dataset metadata.') for row in reader: if len(row) < len(header_row): # skip partial or empty lines continue try: image_id = row[row_idx_image_id] self._true_labels[image_id] = int(row[row_idx_true_label]) except (IndexError, ValueError): raise IOError('Invalid format of dataset metadata') def get_true_label(self, image_ids): """Returns true label for image with given ID.""" return [self._true_labels[image_id] for image_id in image_ids] class PNGDataFlow(DataFlow): def __init__(self, imagedir, imagelistfile, gtfile, img_num=-1): self.imagedir = imagedir with open(imagelistfile, 'r') as f: self.imagename = f.readlines() self.imagename = [x.strip() for x in self.imagename] self.gt_dict = DatasetMetadata(gtfile)._true_labels self.img_num = img_num if img_num > -1 else len(self.imagename) def __iter__(self): for imgname in self.imagename[:self.img_num]: with tf.gfile.Open(os.path.join(self.imagedir, imgname + ".png"), 'rb') as f: image = imread(f, mode='RGB').astype(np.float) / 255.0 # image = np.load(os.path.join(self.imagedir, imgname + ".npy")) / 255.0 yield [image, self.gt_dict[imgname], imgname] def save_images(images, savenames, savedir): for image, savename in zip(images, savenames): # np.save(os.path.join(savedir, savename + ".npy"), image * 255) scipy.misc.toimage(image * 255, cmin=0, cmax=255).save(os.path.join(savedir, savename + ".png"))
StarcoderdataPython
12801885
<gh_stars>1-10 __author__ = 'plasmashadow' from .answer import * from .questions import * from .search import *
StarcoderdataPython
314280
import bson import pytz import os.path import tarfile import datetime import cStringIO from .web import base from .web.request import AccessType from . import config from . import util from . import validators import os from .dao.containerutil import pluralize log = config.log BYTES_IN_MEGABYTE = float(1<<20) def _filter_check(property_filter, property_values): minus = set(property_filter.get('-', []) + property_filter.get('minus', [])) plus = set(property_filter.get('+', []) + property_filter.get('plus', [])) if "null" in plus and not property_values: return True if "null" in minus and property_values: return False elif not minus.isdisjoint(property_values): return False if plus and plus.isdisjoint(property_values): return False return True class Download(base.RequestHandler): def _append_targets(self, targets, cont_name, container, prefix, total_size, total_cnt, data_path, filters): inputs = [('input', f) for f in container.get('inputs', [])] outputs = [('output', f) for f in container.get('files', [])] for file_group, f in inputs + outputs: if filters: filtered = True for filter_ in filters: type_as_list = [f['type']] if f.get('type') else [] if ( _filter_check(filter_.get('tags', {}), f.get('tags', [])) and _filter_check(filter_.get('types', {}), type_as_list) ): filtered = False break if filtered: continue filepath = os.path.join(data_path, util.path_from_hash(f['hash'])) if os.path.exists(filepath): # silently skip missing files if cont_name == 'analyses': targets.append((filepath, '{}/{}/{}'.format(prefix, file_group, f['name']), cont_name, str(container.get('_id')), f['size'])) else: targets.append((filepath, '{}/{}'.format(prefix, f['name']), cont_name, str(container.get('_id')), f['size'])) total_size += f['size'] total_cnt += 1 else: log.warn("Expected {} to exist but it is missing. File will be skipped in download.".format(filepath)) return total_size, total_cnt def _bulk_preflight_archivestream(self, file_refs): data_path = config.get_item('persistent', 'data_path') arc_prefix = self.get_param('prefix', 'scitran') file_cnt = 0 total_size = 0 targets = [] for fref in file_refs: cont_id = fref.get('container_id', '') filename = fref.get('filename', '') cont_name = fref.get('container_name','') if cont_name not in ['project', 'session', 'acquisition', 'analysis']: self.abort(400, 'Bulk download only supports files in projects, sessions, analyses and acquisitions') cont_name = pluralize(fref.get('container_name','')) file_obj = None try: # Try to find the file reference in the database (filtering on user permissions) bid = bson.ObjectId(cont_id) query = {'_id': bid} if not self.superuser_request: query['permissions._id'] = self.uid file_obj = config.db[cont_name].find_one( query, {'files': { '$elemMatch': { 'name': filename }} })['files'][0] except Exception: # pylint: disable=broad-except # self.abort(404, 'File {} on Container {} {} not found'.format(filename, cont_name, cont_id)) # silently skip missing files/files user does not have access to log.warn("Expected file {} on Container {} {} to exist but it is missing. File will be skipped in download.".format(filename, cont_name, cont_id)) continue filepath = os.path.join(data_path, util.path_from_hash(file_obj['hash'])) if os.path.exists(filepath): # silently skip missing files targets.append((filepath, cont_name+'/'+cont_id+'/'+file_obj['name'], cont_name, cont_id, file_obj['size'])) total_size += file_obj['size'] file_cnt += 1 if len(targets) > 0: filename = arc_prefix + '_ '+datetime.datetime.utcnow().strftime('%Y%m%d_%H%M%S') + '.tar' ticket = util.download_ticket(self.request.client_addr, self.origin, 'batch', targets, filename, total_size) config.db.downloads.insert_one(ticket) return {'ticket': ticket['_id'], 'file_cnt': file_cnt, 'size': total_size} else: self.abort(404, 'No files requested could be found') def _preflight_archivestream(self, req_spec, collection=None): data_path = config.get_item('persistent', 'data_path') arc_prefix = self.get_param('prefix', 'scitran') file_cnt = 0 total_size = 0 targets = [] filename = None ids_of_paths = {} base_query = {'deleted': {'$exists': False}} if not self.superuser_request: base_query['permissions._id'] = self.uid for item in req_spec['nodes']: item_id = bson.ObjectId(item['_id']) base_query['_id'] = item_id if item['level'] == 'project': project = config.db.projects.find_one(base_query, ['group', 'label', 'files']) if not project: # silently(while logging it) skip missing objects/objects user does not have access to log.warn("Expected project {} to exist but it is missing. Node will be skipped".format(item_id)) continue prefix = '/'.join([arc_prefix, project['group'], project['label']]) total_size, file_cnt = self._append_targets(targets, 'projects', project, prefix, total_size, file_cnt, data_path, req_spec.get('filters')) sessions = config.db.sessions.find({'project': item_id, 'deleted': {'$exists': False}}, ['label', 'files', 'uid', 'timestamp', 'timezone', 'subject']) session_dict = {session['_id']: session for session in sessions} acquisitions = config.db.acquisitions.find({'session': {'$in': session_dict.keys()}, 'deleted': {'$exists': False}}, ['label', 'files', 'session', 'uid', 'timestamp', 'timezone']) session_prefixes = {} subject_dict = {} subject_prefixes = {} for session in session_dict.itervalues(): if session.get('subject'): subject = session.get('subject', {'code': 'unknown_subject'}) code = subject.get('code') if code is None: code = 'unknown_subject' subject['code'] = code subject_dict[code] = subject for code, subject in subject_dict.iteritems(): subject_prefix = self._path_from_container(prefix, subject, ids_of_paths, code) subject_prefixes[code] = subject_prefix total_size, file_cnt = self._append_targets(targets, 'subjects', subject, subject_prefix, total_size, file_cnt, data_path, req_spec.get('filters')) for session in session_dict.itervalues(): subject_code = session['subject'].get('code', 'unknown_subject') subject = subject_dict[subject_code] session_prefix = self._path_from_container(subject_prefixes[subject_code], session, ids_of_paths, session["_id"]) session_prefixes[session['_id']] = session_prefix total_size, file_cnt = self._append_targets(targets, 'sessions', session, session_prefix, total_size, file_cnt, data_path, req_spec.get('filters')) for acq in acquisitions: session = session_dict[acq['session']] acq_prefix = self._path_from_container(session_prefixes[session['_id']], acq, ids_of_paths, acq['_id']) total_size, file_cnt = self._append_targets(targets, 'acquisitions', acq, acq_prefix, total_size, file_cnt, data_path, req_spec.get('filters')) elif item['level'] == 'session': session = config.db.sessions.find_one(base_query, ['project', 'label', 'files', 'uid', 'timestamp', 'timezone', 'subject']) if not session: # silently(while logging it) skip missing objects/objects user does not have access to log.warn("Expected session {} to exist but it is missing. Node will be skipped".format(item_id)) continue project = config.db.projects.find_one({'_id': session['project']}, ['group', 'label']) subject = session.get('subject', {'code': 'unknown_subject'}) if not subject.get('code'): subject['code'] = 'unknown_subject' prefix = self._path_from_container(self._path_from_container(project['group'] + '/' + project['label'], subject, ids_of_paths, subject["code"]), session, ids_of_paths, session['_id']) total_size, file_cnt = self._append_targets(targets, 'sessions', session, prefix, total_size, file_cnt, data_path, req_spec.get('filters')) # If the param `collection` holding a collection id is not None, filter out acquisitions that are not in the collection a_query = {'session': item_id, 'deleted': {'$exists': False}} if collection: a_query['collections'] = bson.ObjectId(collection) acquisitions = config.db.acquisitions.find(a_query, ['label', 'files', 'uid', 'timestamp', 'timezone']) for acq in acquisitions: acq_prefix = self._path_from_container(prefix, acq, ids_of_paths, acq['_id']) total_size, file_cnt = self._append_targets(targets, 'acquisitions', acq, acq_prefix, total_size, file_cnt, data_path, req_spec.get('filters')) elif item['level'] == 'acquisition': acq = config.db.acquisitions.find_one(base_query, ['session', 'label', 'files', 'uid', 'timestamp', 'timezone']) if not acq: # silently(while logging it) skip missing objects/objects user does not have access to log.warn("Expected acquisition {} to exist but it is missing. Node will be skipped".format(item_id)) continue session = config.db.sessions.find_one({'_id': acq['session']}, ['project', 'label', 'uid', 'timestamp', 'timezone', 'subject']) subject = session.get('subject', {'code': 'unknown_subject'}) if not subject.get('code'): subject['code'] = 'unknown_subject' project = config.db.projects.find_one({'_id': session['project']}, ['group', 'label']) prefix = self._path_from_container(self._path_from_container(self._path_from_container(project['group'] + '/' + project['label'], subject, ids_of_paths, subject['code']), session, ids_of_paths, session["_id"]), acq, ids_of_paths, acq['_id']) total_size, file_cnt = self._append_targets(targets, 'acquisitions', acq, prefix, total_size, file_cnt, data_path, req_spec.get('filters')) elif item['level'] == 'analysis': analysis = config.db.analyses.find_one(base_query, ['parent', 'label', 'inputs', 'files', 'uid', 'timestamp']) if not analysis: # silently(while logging it) skip missing objects/objects user does not have access to log.warn("Expected anaylysis {} to exist but it is missing. Node will be skipped".format(item_id)) continue prefix = self._path_from_container("", analysis, ids_of_paths, util.sanitize_string_to_filename(analysis['label'])) filename = 'analysis_' + util.sanitize_string_to_filename(analysis['label']) + '.tar' total_size, file_cnt = self._append_targets(targets, 'analyses', analysis, prefix, total_size, file_cnt, data_path, req_spec.get('filters')) if len(targets) > 0: if not filename: filename = arc_prefix + '_' + datetime.datetime.utcnow().strftime('%Y%m%d_%H%M%S') + '.tar' ticket = util.download_ticket(self.request.client_addr, self.origin, 'batch', targets, filename, total_size) config.db.downloads.insert_one(ticket) return {'ticket': ticket['_id'], 'file_cnt': file_cnt, 'size': total_size, 'filename': filename} else: self.abort(404, 'No requested containers could be found') def _path_from_container(self, prefix, container, ids_of_paths, _id): """ Returns the full path of a container instead of just a subpath, it must be provided with a prefix though """ def _find_new_path(path, ids_of_paths, _id): """ Checks to see if the full path is used """ if _id in ids_of_paths.keys(): # If the id is already associated with a path, use that instead of modifying it return ids_of_paths[_id] used_paths = [ids_of_paths[id_] for id_ in ids_of_paths if id_ != _id] i = 0 modified_path = path while modified_path in used_paths: modified_path = path + '_' + str(i) i += 1 return modified_path path = '' if not path and container.get('label'): path = container['label'] if not path and container.get('timestamp'): timezone = container.get('timezone') if timezone: path = pytz.timezone('UTC').localize(container['timestamp']).astimezone(pytz.timezone(timezone)).strftime('%Y%m%d_%H%M') else: path = container['timestamp'].strftime('%Y%m%d_%H%M') if not path and container.get('uid'): path = container['uid'] if not path and container.get('code'): path = container['code'] path = path.encode('ascii', errors='ignore') if not path: path = 'untitled' path = prefix + '/' + path path = _find_new_path(path, ids_of_paths, _id) ids_of_paths[_id] = path return path def archivestream(self, ticket): BLOCKSIZE = 512 CHUNKSIZE = 2**20 # stream files in 1MB chunks stream = cStringIO.StringIO() with tarfile.open(mode='w|', fileobj=stream) as archive: for filepath, arcpath, cont_name, cont_id, _ in ticket['target']: yield archive.gettarinfo(filepath, arcpath).tobuf() with open(filepath, 'rb') as fd: chunk = '' for chunk in iter(lambda: fd.read(CHUNKSIZE), ''): # pylint: disable=cell-var-from-loop yield chunk if len(chunk) % BLOCKSIZE != 0: yield (BLOCKSIZE - (len(chunk) % BLOCKSIZE)) * b'\0' self.log_user_access(AccessType.download_file, cont_name=cont_name, cont_id=cont_id, filename=os.path.basename(arcpath), multifile=True, origin_override=ticket['origin']) # log download yield stream.getvalue() # get tar stream trailer stream.close() def symlinkarchivestream(self, ticket, data_path): for filepath, arcpath, cont_name, cont_id, _ in ticket['target']: t = tarfile.TarInfo(name=arcpath) t.type = tarfile.SYMTYPE t.linkname = os.path.relpath(filepath, data_path) yield t.tobuf() self.log_user_access(AccessType.download_file, cont_name=cont_name, cont_id=cont_id, filename=os.path.basename(arcpath), multifile=True, origin_override=ticket['origin']) # log download stream = cStringIO.StringIO() with tarfile.open(mode='w|', fileobj=stream) as _: pass yield stream.getvalue() # get tar stream trailer stream.close() def download(self): """Download files or create a download ticket""" ticket_id = self.get_param('ticket') if ticket_id: ticket = config.db.downloads.find_one({'_id': ticket_id}) if not ticket: self.abort(404, 'no such ticket') if ticket['ip'] != self.request.client_addr: self.abort(400, 'ticket not for this source IP') if self.get_param('symlinks'): self.response.app_iter = self.symlinkarchivestream(ticket, config.get_item('persistent', 'data_path')) else: self.response.app_iter = self.archivestream(ticket) self.response.headers['Content-Type'] = 'application/octet-stream' self.response.headers['Content-Disposition'] = 'attachment; filename=' + ticket['filename'].encode('ascii', errors='ignore') else: req_spec = self.request.json_body if self.is_true('bulk'): return self._bulk_preflight_archivestream(req_spec.get('files', [])) else: payload_schema_uri = validators.schema_uri('input', 'download.json') validator = validators.from_schema_path(payload_schema_uri) validator(req_spec, 'POST') return self._preflight_archivestream(req_spec, collection=self.get_param('collection')) def summary(self): """Return a summary of what has been/will be downloaded based on a given query""" res = {} req = self.request.json_body cont_query = { 'projects': {'_id': {'$in':[]}}, 'sessions': {'_id': {'$in':[]}}, 'acquisitions': {'_id': {'$in':[]}}, 'analyses' : {'_id': {'$in':[]}} } for node in req: node['_id'] = bson.ObjectId(node['_id']) level = node['level'] containers = {'projects':0, 'sessions':0, 'acquisitions':0, 'analyses':0} if level == 'project': # Grab sessions and their ids sessions = config.db.sessions.find({'project': node['_id'], 'deleted': {'$exists': False}}, {'_id': 1}) session_ids = [s['_id'] for s in sessions] acquisitions = config.db.acquisitions.find({'session': {'$in': session_ids}, 'deleted': {'$exists': False}}, {'_id': 1}) acquisition_ids = [a['_id'] for a in acquisitions] containers['projects']=1 containers['sessions']=1 containers['acquisitions']=1 # for each type of container below it will have a slightly modified match query cont_query.get('projects',{}).get('_id',{}).get('$in').append(node['_id']) cont_query['sessions']['_id']['$in'] = cont_query['sessions']['_id']['$in'] + session_ids cont_query['acquisitions']['_id']['$in'] = cont_query['acquisitions']['_id']['$in'] + acquisition_ids elif level == 'session': acquisitions = config.db.acquisitions.find({'session': node['_id'], 'deleted': {'$exists': False}}, {'_id': 1}) acquisition_ids = [a['_id'] for a in acquisitions] # for each type of container below it will have a slightly modified match query cont_query.get('sessions',{}).get('_id',{}).get('$in').append(node['_id']) cont_query['acquisitions']['_id']['$in'] = cont_query['acquisitions']['_id']['$in'] + acquisition_ids containers['sessions']=1 containers['acquisitions']=1 elif level == 'acquisition': cont_query.get('acquisitions',{}).get('_id',{}).get('$in').append(node['_id']) containers['acquisitions']=1 elif level == 'analysis': cont_query.get('analyses',{}).get('_id',{}).get('$in').append(node['_id']) containers['analyses'] = 1 else: self.abort(400, "{} not a recognized level".format(level)) containers = [cont for cont in containers if containers[cont] == 1] for cont_name in containers: # Aggregate file types pipeline = [ {'$match': cont_query[cont_name]}, {'$unwind': '$files'}, {'$project': {'_id': '$_id', 'type': '$files.type','mbs': {'$divide': ['$files.size', BYTES_IN_MEGABYTE]}}}, {'$group': { '_id': '$type', 'count': {'$sum' : 1}, 'mb_total': {'$sum':'$mbs'} }} ] try: result = config.db.command('aggregate', cont_name, pipeline=pipeline) except Exception as e: # pylint: disable=broad-except log.warning(e) self.abort(500, "Failure to load summary") if result.get("ok"): for doc in result.get("result"): type_ = doc['_id'] if res.get(type_): res[type_]['count'] += doc.get('count',0) res[type_]['mb_total'] += doc.get('mb_total',0) else: res[type_] = doc return res
StarcoderdataPython
11241844
<reponame>dannyb2018/fastavro<filename>tests/test_utils.py import random from io import BytesIO from fastavro import schemaless_writer from fastavro.utils import generate_one, generate_many, anonymize_schema def test_generate(): schema = { "type": "record", "name": "Test", "namespace": "test", "fields": [ {"name": "null", "type": "null"}, {"name": "boolean", "type": "boolean"}, {"name": "string", "type": "string"}, {"name": "bytes", "type": "bytes"}, {"name": "int", "type": "int"}, {"name": "long", "type": "long"}, {"name": "float", "type": "float"}, {"name": "double", "type": "double"}, { "name": "fixed", "type": {"type": "fixed", "name": "fixed_field", "size": 5}, }, { "name": "union", "type": [ "null", "int", { "type": "record", "name": "union_record", "fields": [{"name": "union_record_field", "type": "string"}], }, ], }, { "name": "enum", "type": { "type": "enum", "name": "enum_field", "symbols": ["FOO", "BAR"], }, }, {"name": "array", "type": {"type": "array", "items": "string"}}, {"name": "map", "type": {"type": "map", "values": "int"}}, { "name": "record", "type": { "type": "record", "name": "subrecord", "fields": [{"name": "sub_int", "type": "int"}], }, }, {"name": "named_type", "type": "subrecord"}, ], } count = 10 # Use list() to exhaust the generator) assert len(list(generate_many(schema, count))) == count def test_anonymize(): schema = { "type": "record", "name": "Test", "namespace": "test", "doc": "this is a record", "fields": [ {"name": "null", "type": "null"}, {"name": "boolean", "type": "boolean"}, {"name": "string", "type": "string", "default": "foo"}, {"name": "bytes", "type": "bytes", "aliases": ["alias_field"]}, {"name": "int", "type": "int", "doc": "doc"}, {"name": "long", "type": "long"}, {"name": "float", "type": "float"}, {"name": "double", "type": "double"}, { "name": "fixed", "type": {"type": "fixed", "name": "fixed_field", "size": 5}, }, { "name": "union", "type": [ "null", "int", { "type": "record", "name": "union_record", "fields": [{"name": "union_record_field", "type": "string"}], }, ], }, {"name": "array", "type": {"type": "array", "items": "string"}}, {"name": "map", "type": {"type": "map", "values": "int"}}, { "name": "record", "type": { "type": "record", "name": "subrecord", "fields": [{"name": "sub_int", "type": "int"}], }, }, {"name": "named_type", "type": "subrecord"}, {"name": "other_int", "type": {"type": "int"}}, ], } anonymous_schema = anonymize_schema(schema) # Maintain random state so that other tests continue to be based off the # main starting seed seed_state = random.getstate() random.seed(1) record = generate_one(schema) random.seed(1) anonymous_record = generate_one(anonymous_schema) random.setstate(seed_state) bio1 = BytesIO() schemaless_writer(bio1, schema, record) bio2 = BytesIO() schemaless_writer(bio2, anonymous_schema, anonymous_record) assert bio1.getvalue() == bio2.getvalue() def test_enum_symbols_get_anonymized(): schema = { "type": "enum", "name": "enum_field", "symbols": ["FOO", "BAR"], } anonymous_schema = anonymize_schema(schema) assert anonymous_schema["symbols"] != schema["symbols"]
StarcoderdataPython
5101551
import numpy as np from skimage.transform import resize from tqdm import tqdm from dianna import utils def normalize(saliency, n_masks, p_keep): return saliency / n_masks / p_keep def _upscale(grid_i, up_size): return resize(grid_i, up_size, order=1, mode='reflect', anti_aliasing=False) class RISE: """ RISE implementation based on https://github.com/eclique/RISE/blob/master/Easy_start.ipynb """ # axis labels required to be present in input image data required_labels = ('batch', 'channels') def __init__(self, n_masks=1000, feature_res=8, p_keep=0.5, # pylint: disable=too-many-arguments axes_labels=None, preprocess_function=None): """RISE initializer. Args: n_masks (int): Number of masks to generate. feature_res (int): Resolution of features in masks. p_keep (float): Fraction of image to keep in each mask axes_labels (dict/list, optional): If a dict, key,value pairs of axis index, name. If a list, the name of each axis where the index in the list is the axis index preprocess_function (callable, optional): Function to preprocess input data with """ self.n_masks = n_masks self.feature_res = feature_res self.p_keep = p_keep self.preprocess_function = preprocess_function self.masks = None self.predictions = None self.axes_labels = axes_labels if axes_labels is not None else [] def explain_text(self, model_or_function, input_text, labels=(0,), batch_size=100): runner = utils.get_function(model_or_function, preprocess_function=self.preprocess_function) input_tokens = np.asarray(model_or_function.tokenizer(input_text)) text_length = len(input_tokens) p_keep = self._determine_p_keep_for_text(input_tokens, runner) if self.p_keep is None else self.p_keep input_shape = (text_length,) self.masks = self._generate_masks_for_text(input_shape, p_keep, self.n_masks) # Expose masks for to make user inspection possible sentences = self._create_masked_sentences(input_tokens, self.masks) saliencies = self._get_saliencies(runner, sentences, text_length, batch_size, p_keep) return self._reshape_result(input_tokens, labels, saliencies) def _determine_p_keep_for_text(self, input_data, runner, n_masks=100): """ See n_mask default value https://github.com/dianna-ai/dianna/issues/24#issuecomment-1000152233 """ p_keeps = np.arange(0.1, 1.0, 0.1) stds = [] for p_keep in p_keeps: std = self._calculate_mean_class_std_for_text(p_keep, runner, input_data, n_masks=n_masks) stds += [std] best_i = np.argmax(stds) best_p_keep = p_keeps[best_i] print(f'Rise parameter p_keep was automatically determined at {best_p_keep}') return best_p_keep def _calculate_mean_class_std_for_text(self, p_keep, runner, input_data, n_masks): batch_size = 50 masks = self._generate_masks_for_text(input_data.shape, p_keep, n_masks) masked = self._create_masked_sentences(input_data, masks) predictions = [] for i in range(0, n_masks, batch_size): current_input = masked[i:i + batch_size] current_predictions = runner(current_input) predictions.append(current_predictions.max(axis=1)) predictions = np.concatenate(predictions) std_per_class = predictions.std() return np.mean(std_per_class) def _generate_masks_for_text(self, input_shape, p_keep, n_masks): masks = np.random.choice(a=(True, False), size=(n_masks,) + input_shape, p=(p_keep, 1 - p_keep)) return masks def _get_saliencies(self, runner, sentences, text_length, batch_size, p_keep): # pylint: disable=too-many-arguments self.predictions = self._get_predictions(sentences, runner, batch_size) unnormalized_saliency = self.predictions.T.dot(self.masks.reshape(self.n_masks, -1)).reshape(-1, text_length) return normalize(unnormalized_saliency, self.n_masks, p_keep) @staticmethod def _reshape_result(input_tokens, labels, saliencies): word_lengths = [len(t) for t in input_tokens] word_indices = [sum(word_lengths[:i]) + i for i in range(len(input_tokens))] return [list(zip(input_tokens, word_indices, saliencies[label])) for label in labels] def _get_predictions(self, sentences, runner, batch_size): predictions = [] for i in tqdm(range(0, self.n_masks, batch_size), desc='Explaining'): predictions.append(runner(sentences[i:i + batch_size])) predictions = np.concatenate(predictions) return predictions def _create_masked_sentences(self, tokens, masks): tokens_masked = [] for mask in masks: tokens_masked.append(tokens[mask]) sentences = [" ".join(t) for t in tokens_masked] return sentences def explain_image(self, model_or_function, input_data, batch_size=100): """Run the RISE explainer. The model will be called with masked images, with a shape defined by `batch_size` and the shape of `input_data` Args: model_or_function (callable or str): The function that runs the model to be explained _or_ the path to a ONNX model on disk. input_data (np.ndarray): Image to be explained batch_size (int): Batch size to use for running the model. Returns: Explanation heatmap for each class (np.ndarray). """ # convert data to xarray input_data = utils.to_xarray(input_data, self.axes_labels, RISE.required_labels) # batch axis should always be first input_data = utils.move_axis(input_data, 'batch', 0) input_data, full_preprocess_function = self._prepare_image_data(input_data) runner = utils.get_function(model_or_function, preprocess_function=full_preprocess_function) p_keep = self._determine_p_keep_for_images(input_data, runner) if self.p_keep is None else self.p_keep # data shape without batch axis and channel axis img_shape = input_data.shape[1:3] # Expose masks for to make user inspection possible self.masks = self.generate_masks_for_images(img_shape, p_keep, self.n_masks) # Make sure multiplication is being done for correct axes masked = input_data * self.masks batch_predictions = [] for i in tqdm(range(0, self.n_masks, batch_size), desc='Explaining'): batch_predictions.append(runner(masked[i:i + batch_size])) self.predictions = np.concatenate(batch_predictions) saliency = self.predictions.T.dot(self.masks.reshape(self.n_masks, -1)).reshape(-1, *img_shape) return normalize(saliency, self.n_masks, p_keep) def _determine_p_keep_for_images(self, input_data, runner, n_masks=100): """ See n_mask default value https://github.com/dianna-ai/dianna/issues/24#issuecomment-1000152233 """ p_keeps = np.arange(0.1, 1.0, 0.1) stds = [] for p_keep in p_keeps: std = self._calculate_mean_class_std_for_images(p_keep, runner, input_data, n_masks=n_masks) stds += [std] best_i = np.argmax(stds) best_p_keep = p_keeps[best_i] print(f'Rise parameter p_keep was automatically determined at {best_p_keep}') return best_p_keep def _calculate_mean_class_std_for_images(self, p_keep, runner, input_data, n_masks): batch_size = 50 img_shape = input_data.shape[1:3] masks = self.generate_masks_for_images(img_shape, p_keep, n_masks) masked = input_data * masks predictions = [] for i in range(0, n_masks, batch_size): current_input = masked[i:i + batch_size] current_predictions = runner(current_input) predictions.append(current_predictions.max(axis=1)) predictions = np.concatenate(predictions) std_per_class = predictions.std() return np.mean(std_per_class) def generate_masks_for_images(self, input_size, p_keep, n_masks): """Generate a set of random masks to mask the input data Args: input_size (int): Size of a single sample of input data, for images without the channel axis. Returns: The generated masks (np.ndarray) """ cell_size = np.ceil(np.array(input_size) / self.feature_res) up_size = (self.feature_res + 1) * cell_size grid = np.random.choice(a=(True, False), size=(n_masks, self.feature_res, self.feature_res), p=(p_keep, 1 - p_keep)) grid = grid.astype('float32') masks = np.empty((n_masks, *input_size), dtype=np.float32) for i in range(n_masks): y = np.random.randint(0, cell_size[0]) x = np.random.randint(0, cell_size[1]) # Linear upsampling and cropping masks[i, :, :] = _upscale(grid[i], up_size)[y:y + input_size[0], x:x + input_size[1]] masks = masks.reshape(-1, *input_size, 1) return masks def _prepare_image_data(self, input_data): """ Transforms the data to be of the shape and type RISE expects Args: input_data (xarray): Data to be explained Returns: transformed input data, preprocessing function to use with utils.get_function() """ # ensure channels axis is last and keep track of where it was so we can move it back channels_axis_index = input_data.dims.index('channels') input_data = utils.move_axis(input_data, 'channels', -1) # create preprocessing function that puts model input generated by RISE into the right shape and dtype, # followed by running the user's preprocessing function full_preprocess_function = self._get_full_preprocess_function(channels_axis_index, input_data.dtype) return input_data, full_preprocess_function def _get_full_preprocess_function(self, channel_axis_index, dtype): """ Create a preprocessing function that incorporates both the (optional) user's preprocessing function, as well as any needed dtype and shape conversions Args: channel_axis_index (int): Axis index of the channels in the input data dtype (type): Data type of the input data (e.g. np.float32) Returns: Function that first ensures the data has the same shape and type as the input data, then runs the users' preprocessing function """ def moveaxis_function(data): return utils.move_axis(data, 'channels', channel_axis_index).astype(dtype).values if self.preprocess_function is None: return moveaxis_function return lambda data: self.preprocess_function(moveaxis_function(data))
StarcoderdataPython
3289781
<gh_stars>1-10 """Author: <NAME>, Copyright 2019""" import multiprocessing import tensorflow as tf import numpy as np from mineral.core.savers.saver import Saver from mineral.core.trainers.local_trainer import LocalTrainer from mineral.core.monitors.local_monitor import LocalMonitor from mineral.networks import Dense from mineral.distributions.gaussians.tanh_gaussian import TanhGaussian from mineral.algorithms.actors.soft_actor_critic import SoftActorCritic from mineral.algorithms.critics.soft_q_network import SoftQNetwork from mineral.algorithms.tuners.entropy_tuner import EntropyTuner from mineral.algorithms.multi_algorithm import MultiAlgorithm from mineral.core.envs.normalized_env import NormalizedEnv from mineral.core.envs.ant.ant_maze_env import AntMazeEnv from mineral.core.buffers.path_buffer import PathBuffer from mineral.relabelers import GoalConditionedRelabeler from mineral.relabelers import HIRORelabeler from mineral.core.samplers.path_sampler import PathSampler def run_experiment(variant): ######### # SETUP # ######### for gpu in tf.config.experimental.list_physical_devices('GPU'): tf.config.experimental.set_memory_growth(gpu, True) experiment_id = variant["experiment_id"] logging_dir = "./ant_maze/hiro/sac/{}".format( experiment_id) max_path_length = variant["max_path_length"] max_size = variant["max_size"] num_warm_up_paths = variant["num_warm_up_paths"] num_exploration_paths = variant["num_exploration_paths"] num_evaluation_paths = variant["num_evaluation_paths"] num_trains_per_step = variant["num_trains_per_step"] update_tuner_every = variant["update_tuner_every"] update_actor_every = variant["update_actor_every"] batch_size = variant["batch_size"] num_steps = variant["num_steps"] monitor = LocalMonitor(logging_dir) env = NormalizedEnv( AntMazeEnv(**variant["env_kwargs"]), reward_scale=(1 / max_path_length)) ################## # LOWER POLICIES # ################## lower_policy = Dense( [256, 256, 4], optimizer_class=tf.keras.optimizers.Adam, optimizer_kwargs=dict(lr=0.0001), distribution_class=TanhGaussian, distribution_kwargs=dict(std=None)) lower_target_policy = Dense( [256, 256, 4], tau=1e-1, optimizer_class=tf.keras.optimizers.Adam, optimizer_kwargs=dict(lr=0.0001), distribution_class=TanhGaussian, distribution_kwargs=dict(std=None)) ######################### # LOWER VALUE FUNCTIONS # ######################### lower_qf = Dense( [256, 256, 1], optimizer_class=tf.keras.optimizers.Adam, optimizer_kwargs={"lr": 0.0001}) lower_target_qf = Dense( [256, 256, 1], tau=1e-1, optimizer_class=tf.keras.optimizers.Adam, optimizer_kwargs={"lr": 0.0001}) ################## # UPPER POLICIES # ################## upper_policy = Dense( [256, 256, 4], optimizer_class=tf.keras.optimizers.Adam, optimizer_kwargs=dict(lr=0.0001), distribution_class=TanhGaussian, distribution_kwargs=dict(std=None)) upper_target_policy = Dense( [256, 256, 4], tau=1e-1, optimizer_class=tf.keras.optimizers.Adam, optimizer_kwargs=dict(lr=0.0001), distribution_class=TanhGaussian, distribution_kwargs=dict(std=None)) ######################### # UPPER VALUE FUNCTIONS # ######################### upper_qf = Dense( [256, 256, 1], optimizer_class=tf.keras.optimizers.Adam, optimizer_kwargs={"lr": 0.0001}) upper_target_qf = Dense( [256, 256, 1], tau=1e-1, optimizer_class=tf.keras.optimizers.Adam, optimizer_kwargs={"lr": 0.0001}) #################################### # OBSERVATION DICTIONARY SELECTORS # #################################### observation_selector = ( lambda x: x["proprio_observation"]) goal_selector = ( lambda x: x["goal"]) both_selector = ( lambda x: np.concatenate([observation_selector(x), goal_selector(x)], -1)) hierarchy_selector = ( lambda i, x: observation_selector(x) if i == 1 else both_selector(x)) ################## # REPLAY BUFFERS # ################## lower_buffer = GoalConditionedRelabeler( PathBuffer( max_size=max_size, max_path_length=max_path_length, monitor=monitor), observation_selector=observation_selector, goal_selector=goal_selector) upper_buffer = HIRORelabeler( lower_policy, PathBuffer( max_size=max_size, max_path_length=max_path_length, monitor=monitor), observation_selector=observation_selector, num_samples=8) ############ # SAMPLERS # ############ sampler = PathSampler( env, lower_policy, lower_buffer, upper_policy, upper_buffer, time_skips=(1, 5), max_path_length=max_path_length, num_warm_up_paths=num_warm_up_paths, num_exploration_paths=num_exploration_paths, num_evaluation_paths=num_evaluation_paths, selector=hierarchy_selector, monitor=monitor) ############################# # LOWER TRAINING ALGORITHMS # ############################# lower_tuner = EntropyTuner( lower_policy, optimizer_class=tf.keras.optimizers.Adam, optimizer_kwargs=dict(lr=0.0001), target=(-2.0), update_every=update_tuner_every, batch_size=batch_size, selector=both_selector, monitor=monitor, logging_prefix="lower_") lower_critic = SoftQNetwork( lower_target_policy, lower_qf, lower_target_qf, gamma=0.99, clip_radius=0.2, std=0.1, log_alpha=lower_tuner.get_tuning_variable(), batch_size=batch_size, selector=both_selector, monitor=monitor, logging_prefix="lower_") lower_actor = SoftActorCritic( lower_policy, lower_target_policy, lower_critic, log_alpha=lower_tuner.get_tuning_variable(), update_every=update_actor_every, batch_size=batch_size, selector=both_selector, monitor=monitor, logging_prefix="lower_") lower_algorithm = MultiAlgorithm(lower_actor, lower_critic, lower_tuner) ############################# # UPPER TRAINING ALGORITHMS # ############################# upper_tuner = EntropyTuner( upper_policy, optimizer_class=tf.keras.optimizers.Adam, optimizer_kwargs=dict(lr=0.0001), target=(-2.0), update_every=update_tuner_every, batch_size=batch_size, selector=observation_selector, monitor=monitor, logging_prefix="upper_") upper_critic = SoftQNetwork( upper_target_policy, upper_qf, upper_target_qf, gamma=0.99, clip_radius=0.2, std=0.1, log_alpha=upper_tuner.get_tuning_variable(), batch_size=batch_size, selector=observation_selector, monitor=monitor, logging_prefix="upper_") upper_actor = SoftActorCritic( upper_policy, upper_target_policy, upper_critic, log_alpha=upper_tuner.get_tuning_variable(), update_every=update_actor_every, batch_size=batch_size, selector=observation_selector, monitor=monitor, logging_prefix="upper_") upper_algorithm = MultiAlgorithm(upper_actor, upper_critic, upper_tuner) ################## # START TRAINING # ################## saver = Saver( logging_dir, lower_policy=lower_policy, lower_target_policy=lower_target_policy, lower_qf=lower_qf, lower_target_qf=lower_target_qf, upper_policy=upper_policy, upper_target_policy=upper_target_policy, upper_qf=upper_qf, upper_target_qf=upper_target_qf) trainer = LocalTrainer( sampler, lower_buffer, lower_algorithm, upper_buffer, upper_algorithm, num_steps=num_steps, num_trains_per_step=num_trains_per_step, save_function=saver, monitor=monitor) trainer.train() if __name__ == "__main__": ############### # ENTRY POINT # ############### num_seeds = 5 for experiment_id in range(num_seeds): variant = dict( env_kwargs=dict(maze_id="Maze"), experiment_id=experiment_id, max_path_length=10, max_size=1000000, num_warm_up_paths=100, num_exploration_paths=1, num_evaluation_paths=100, num_trains_per_step=100, update_tuner_every=100, update_actor_every=100, batch_size=100, num_steps=10000) ##################### # LAUNCH MANY SEEDS # ##################### multiprocessing.Process( target=run_experiment, args=(variant,)).start()
StarcoderdataPython
9780155
# 1. find optimal binary values # 2. make an order-index(by 1 0 difference desc) # 3. increase a number 1 to K and apply order-index here # 4. xor to optimal binary values # 5. check if exists in impossible binaries from audioop import reverse T = int(input()) for tc in range(1, T+1): # num of lines, num of impossible, len of binary N, M, P = map(int, input().split()) mat = [] for i in range(N): mat.append(list(map(int, list(input())))) base = [] base_bin = [] diff = [] diff_bin = [] for i in range(P): cnt = 0 for j in range(N): cnt += mat[j][i] base.append(cnt) if cnt >= (N+1)//2: base_bin.append(1) else: base_bin.append(0) diff_bin.append([cnt, N-cnt]) diff.append(N - cnt) diff_idx = [(i, a) for i, a in enumerate(diff)] diff_idx.sort(key=lambda x:x[1], reverse=True) diff_idx = [i[0] for i in diff_idx] impossibles = [] for i in range(M): impossibles.append(list(map(int, list(input())))) result = 0 for i in range(0, M+1): tmp = list(map(int, list(bin(i)[2:].zfill(P)))) tmp2 = [tmp[i] for i in diff_idx] new_bin = [0]*P result = 0 for j in range(P): new_bin[j] = tmp2[j]^base_bin[j] result += diff_bin[j][new_bin[j]] # print(result) if new_bin in impossibles: continue else: break print(f"Case #{tc}: {result}")
StarcoderdataPython
9745517
#!/usr/bin/env python # simple.py - bind a javascript function to python function import STPyV8 with STPyV8.JSContext() as ctxt: upcase = ctxt.eval(""" ( (lowerString) => { return lowerString.toUpperCase(); }) """) print(upcase("hello world!"))
StarcoderdataPython
1612587
import os try: from setuptools import setup except ImportError: from distutils import setup long_description = open(os.path.join(os.path.dirname(__file__), "README.rst")).read() setup( name="iso8601", version="0.1.12", description=long_description.split("\n")[0], long_description=long_description, author="<NAME>", author_email="<EMAIL>", url="https://bitbucket.org/micktwomey/pyiso8601", packages=["iso8601"], license="MIT", classifiers=[ "License :: OSI Approved :: MIT License", "Programming Language :: Python :: 2", "Programming Language :: Python :: 3", ], )
StarcoderdataPython
9773257
i = 0 listaNumeros = [] while i == 0: num = int(input("Digite um numero para a lista: ")) listaNumeros.append(num) i = int(input("Para continuar digite 0, para sair qualquer outro valor: ")) print("--------") num = int(input("Digite um numero para busca: ")) def index(lista,numero): i = 0 while i < len(lista): if lista[i] == numero: return i i += 1 return -1 qtdElementos = index(listaNumeros,num) if qtdElementos != -1: print("index: " + str(qtdElementos)) else: print('Numero não encontrado')
StarcoderdataPython
8102807
t=int(input()) while t: N = int(input()) S = list(input().split()) f = 0 for j in range(N): if (j+1)<N and S[j]=="cookie" and S[j+1]!="milk": f=1 break elif S[N-1]=="cookie": f=1 break if f==1: print("NO") else: print("YES") t = t-1
StarcoderdataPython
353365
<reponame>orionlee/PH_TESS_I_LightCurveViewer<gh_stars>1-10 # # Helpers to download TESS-specific non-lightcurve data: TOIs, TCEs, etc. # import os from pathlib import Path import re import shutil import time from types import SimpleNamespace import warnings from memoization import cached import requests import numpy as np import pandas as pd from pandas.io.formats.style import Styler # for accessing / parsing TCEs from MAST from astroquery.exceptions import NoResultsWarning from astroquery.mast import Observations import xmltodict # # Misc constatants # R_earth = 6371000 # radius of the Earth [m] R_jup = 69911000 # radius of Jupiter [m] BTJD_REF = 2457000 # # Generic file download, and CSV helper # def _policy_always_use(url, filename): return True def _policy_always_reject(url, filename): return False def _create_policy_ttl_in_seconds(ttl_in_seconds): def _policy_ttl(url, filename): try: time_since_last_modified = time.time() - os.path.getmtime(filename) if time_since_last_modified <= ttl_in_seconds: return True else: return False except Exception as e: warnings.warn( f"Unexpected error in determining if local file should be used. Local file is thus not used. Error: {e}", ) return False return _policy_ttl def _create_policy_ttl_in_days(ttl_in_days): return _create_policy_ttl_in_seconds(ttl_in_days * 86400) LocalFileUsePolicy = SimpleNamespace( ALWAYS_USE=_policy_always_use, ALWAYS_REJECT=_policy_always_reject, TTL_IN_SECONDS=_create_policy_ttl_in_seconds, TTL_IN_DAYS=_create_policy_ttl_in_days, ) def _create_local_filename(url, filename, download_dir): if filename is not None: local_filename = filename else: local_filename = url.split("/")[-1] local_filename = re.sub(r"\?.*$", "", local_filename) return os.path.join(download_dir, local_filename) def _download_file(url, filename=None, download_dir=None): if download_dir is None: download_dir = "" local_filename = _create_local_filename(url, filename, download_dir) with requests.get(url, stream=True) as response: response.raise_for_status() # write to a temporary file. If successful, make it the real local file # it is to preven interrupted download leaving a partial file local_filename_temp = f"{local_filename}.download" with open(local_filename_temp, "wb") as out_file: shutil.copyfileobj(response.raw, out_file) os.replace(local_filename_temp, local_filename) return local_filename def _download_file_if_needed(url, filename=None, download_dir=None, use_localfile_func=None): if download_dir is None: download_dir = "" local_filename = _create_local_filename(url, filename, download_dir) if os.path.isfile(local_filename): if use_localfile_func is None or use_localfile_func(url, local_filename): return local_filename return _download_file(url, filename, download_dir) def _get_csv(url, filename, download_dir, use_localfile_func, **kwargs): local_filename = _download_file_if_needed( url, filename=filename, download_dir=download_dir, use_localfile_func=use_localfile_func ) return pd.read_csv(local_filename, **kwargs) def _single_row(df): if len(df) > 0: return df.iloc[0] else: return None # # TOIs / CTOIs # class TOIAccessor: Headers = SimpleNamespace( TIC="TIC ID", TOI="TOI", MASTER_PRIORITY="Master", EPOCH_BJD="Epoch (BJD)", EPOCH_BTJD="Epoch (BTJD)", # derived PERIOD="Period (days)", DURATION_HR="Duration (hours)", DEPTH_PPM="Depth (ppm)", DEPTH_PCT="Depth (percent)", # derived PLANET_RADIUS_E="Planet Radius (R_Earth)", PLANET_RADIUS_J="Planet Radius (R_Jupiter)", # derived TESS_DISPOSITION="TESS Disposition", TFOPWG_DISPOSITION="TFOPWG Disposition", COMMENTS="Comments", ) # TODO: in-memory cache (with @cached) needs to be redone to properly support use_localfile_func @classmethod def get_all_tois(cls, download_dir=None, use_localfile_func=None): url = "https://exofop.ipac.caltech.edu/tess/download_toi.php?sort=toi&output=csv" filename = "tess_tois.csv" res = _get_csv(url, filename, download_dir, use_localfile_func=use_localfile_func, dtype={cls.Headers.TOI: str}) # add dervied columns res[cls.Headers.EPOCH_BTJD] = res[cls.Headers.EPOCH_BJD] - BTJD_REF res[cls.Headers.PLANET_RADIUS_J] = res[cls.Headers.PLANET_RADIUS_E] * R_earth / R_jup res[cls.Headers.DEPTH_PCT] = res[cls.Headers.DEPTH_PPM] / 10000 return res def __init__(self, download_dir=None, use_localfile_func=None): self._all = self.get_all_tois(download_dir=download_dir, use_localfile_func=use_localfile_func) def all(self): return self._all def of_toi(self, toi): tois = self._all[self._all[self.Headers.TOI] == str(toi)] return _single_row(tois) def of_tic(self, tic): return self._all[self._all[self.Headers.TIC] == int(tic)] class CTOIAccessor: Headers = SimpleNamespace( TIC="TIC ID", CTOI="CTOI", TOI="Promoted to TOI", EPOCH_BJD="Transit Epoch (BJD)", EPOCH_BTJD="Transit Epoch (BTJD)", # derived PERIOD="Period (days)", DURATION_HR="Duration (hrs)", DEPTH_PPM="Depth ppm", DEPTH_PCT="Depth percent", # derived PLANET_RADIUS_E="Radius (R_Earth)", PLANET_RADIUS_J="Radius (R_Jupiter)", # derived COMMENTS="Notes", ) @classmethod def get_all_ctois(cls, download_dir=None, use_localfile_func=None): url = "https://exofop.ipac.caltech.edu/tess/download_ctoi.php?sort=ctoi&output=csv" filename = "tess_ctois.csv" res = _get_csv( url, filename, download_dir, use_localfile_func=use_localfile_func, dtype={cls.Headers.CTOI: str, cls.Headers.TOI: str}, ) # add dervied columns res[cls.Headers.EPOCH_BTJD] = res[cls.Headers.EPOCH_BJD] - BTJD_REF res[cls.Headers.PLANET_RADIUS_J] = res[cls.Headers.PLANET_RADIUS_E] * R_earth / R_jup res[cls.Headers.DEPTH_PCT] = res[cls.Headers.DEPTH_PPM] / 10000 return res def __init__(self, download_dir=None, use_localfile_func=None): self._all = self.get_all_ctois(download_dir=download_dir, use_localfile_func=use_localfile_func) def all(self): return self._all def of_ctoi(self, ctoi): ctois = self._all[self._all[self.Headers.CTOI] == str(ctoi)] return _single_row(ctois) def of_tic(self, tic): return self._all[self._all[self.Headers.TIC] == int(tic)] # # Download and parse TCEs # def parse_dvs_filename(filename): # e.g.: tess2020267090513-s0030-s0030-0000000142087638-01-00394_dvs.pdf match = re.match(r"^tess\d+-(s\d+-s\d+)-(\d+)-(\d+)-.+_dvs[.]pdf", filename) if not match: return {} sector_range, tic_id_padded, tce_num_padded = ( match.group(1), match.group(2), match.group(3), ) tic_id = re.sub(r"^0+", "", tic_id_padded) tce_num = re.sub(r"^0+", "", tce_num_padded) # sufficient to identify one for a given TIC, less visually busy tce_id_short = f"{sector_range}:TCE{tce_num}" # tce_id is the format used on ExoMAT, e.g, TIC142087638S0030S0030TCE1 tce_id = f"""TIC{tic_id}{re.sub("-", "", sector_range.upper())}TCE{tce_num}""" return dict( tce_id=tce_id, tce_id_short=tce_id_short, sector_range=sector_range, tic_id=tic_id, tce_num=tce_num, ) def parse_dvr_filename(filename): match = re.match(r"^tess\d+-(s\d+-s\d+)-(\d+)-.+_dvr[.](pdf|xml)", filename) if not match: return {} sector_range, tic_id_padded, file_type = ( match.group(1), match.group(2), match.group(3), ) tic_id = re.sub(r"^0+", "", tic_id_padded) return dict(sector_range=sector_range, tic_id=tic_id, file_type=file_type) def get_dv_products_of_tic(tic_id, productSubGroupDescription, download_dir=None): # Based on: # - https://outerspace.stsci.edu/display/TESS/7.0+-+Tips+and+Tricks+to+Getting+TESS+Data+At+MAST # https://github.com/spacetelescope/notebooks/blob/master/notebooks/MAST/TESS/beginner_astroquery_dv/beginner_astroquery_dv.ipynb # Note: for TESS, tic_id (the number without TIC) is what an exact match works # Kepler / K2 ids will need some additional processing for exact match to work. exact_target_name = tic_id with warnings.catch_warnings(): warnings.filterwarnings("ignore", category=NoResultsWarning, message=".*No products to download.*") obs_wanted = Observations.query_criteria( target_name=exact_target_name, dataproduct_type="timeseries", obs_collection="TESS", ) data_products = Observations.get_product_list(obs_wanted) return Observations.filter_products(data_products, productSubGroupDescription=productSubGroupDescription) def parse_dvr_xml(file_path): def as_list(data): """Wrap an item as a list, if it's not one. Useful for handling dict from XML where elements might be one or multiple elements""" if type(data) is list: return data else: return [data] def param_value(model_params_dict, param_name): param_dict = model_params_dict.get(param_name) if param_dict is None: return None val_str = param_dict.get("@value") if val_str is None: return None return float(val_str) # the body with open(file_path, "r") as f: dvr_xml_str = f.read() parsed = xmltodict.parse(dvr_xml_str) planets_dict = {} e_pr_list = as_list(parsed["dv:dvTargetResults"]["dv:planetResults"]) for e_pr in e_pr_list: e_afit = e_pr["dv:allTransitsFit"] planet_num = e_afit["@planetNumber"] params_dict = {} # a temporary structure to access params internally for mp in e_afit["dv:modelParameters"]["dv:modelParameter"]: params_dict[mp["@name"]] = mp # TODO: add other DV fitting parameters, odd/even test, centroid, etc. # use the underlying xml attribute names, even thought it breaks the convention a_planet_dict = dict( planetNumber=planet_num, transitEpochBtjd=param_value(params_dict, "transitEpochBtjd"), planetRadiusEarthRadii=param_value(params_dict, "planetRadiusEarthRadii"), transitDurationHours=param_value(params_dict, "transitDurationHours"), orbitalPeriodDays=param_value(params_dict, "orbitalPeriodDays"), transitDepthPpm=param_value(params_dict, "transitDepthPpm"), minImpactParameter=param_value(params_dict, "minImpactParameter"), ) planets_dict[planet_num] = a_planet_dict return planets_dict @cached def get_tce_infos_of_tic(tic_id, download_dir=None): def filter_by_dataURI_suffix(products, suffix): # Helper to filter products into summary, full report, full report xml using suffix. # It replaces the logic to filter by "description" column, as description is sometimes unreliable # E.g., for the TCE for TIC 43843023 sector 5, the dvr xml has incorrect description # so that the entry is treated as a dvr pdf return products[np.char.endswith(products["dataURI"], suffix)] products_wanted = get_dv_products_of_tic(tic_id, ["DVS", "DVR"], download_dir=download_dir) res = [] # basic info for p in filter_by_dataURI_suffix(products_wanted, "_dvs.pdf"): tce_info = parse_dvs_filename(p["productFilename"]) entry = dict( obsID=p["obsID"], tic_id=tce_info.get("tic_id"), sector_range=tce_info.get("sector_range"), tce_num=tce_info.get("tce_num"), tce_id=tce_info.get("tce_id"), tce_id_short=tce_info.get("tce_id_short"), dvs_dataURI=p["dataURI"], ) res.append(entry) # DVR pdf link for p in filter_by_dataURI_suffix(products_wanted, "_dvr.pdf"): # find TCEs for the same observation (sometimes there are multiple TCEs for the same observation) for entry in [e for e in res if e["obsID"] == p["obsID"]]: entry["dvr_dataURI"] = p["dataURI"] products_dvr_xml = filter_by_dataURI_suffix(products_wanted, "_dvr.xml") manifest = Observations.download_products(products_dvr_xml, download_dir=download_dir) if manifest is None: return res for m in manifest: dvr_xml_local_path = m["Local Path"] dvr_info = parse_dvr_filename(Path(dvr_xml_local_path).name) for entry in [e for e in res if e["tic_id"] == dvr_info["tic_id"] and e["sector_range"] == dvr_info["sector_range"]]: entry["dvr_xml_local_path"] = dvr_xml_local_path planets_dict = parse_dvr_xml(dvr_xml_local_path) for a_planet_dict in planets_dict.values(): for entry in [ e for e in res if e["tic_id"] == dvr_info["tic_id"] and e["sector_range"] == dvr_info["sector_range"] and e["tce_num"] == a_planet_dict["planetNumber"] ]: entry["planet"] = a_planet_dict return res # # Top-level report logic, which provides # - stellar metadata (in LightCurve file) # - TCEs # - TOIs # def _tce_info_to_html(tce_info_list): if len(tce_info_list) < 1: return "No TCEs." def link(link_text, url): return f"""<a href="{url}" target="_blank">{link_text}</a>""" def row(*args): return "<tr>" + "".join(f"<td>{v}</td>" for v in args) + "</tr>" html = "" header = [ ("TCE", ""), ("Reports", ""), ("R<sub>p</sub>", "R<sub>j</sub>"), ("Epoch", "BTJD"), ("Duration", "hr"), ("Period", "day"), ("Depth", "%"), ("Impact P.", "<i>b</i>"), ("Codes", ""), ] html += """<table> <thead>""" html += "<tr>" html += " ".join([f"<th>{h[0]}</th>" for h in header]) html += "</tr>\n" html += "<tr>" html += " ".join([f"<th>{h[1]}</th>" for h in header]) html += "</tr>\n" html += """ </thead> <tbody> """ R_EARTH_TO_R_JUPITER = 6378.1 / 71492 for info in tce_info_list: exomast_url = f'https://exo.mast.stsci.edu/exomast_planet.html?planet={info.get("tce_id")}' dvs_url = f'https://exo.mast.stsci.edu/api/v0.1/Download/file?uri={info.get("dvs_dataURI")}' dvr_url = f'https://exo.mast.stsci.edu/api/v0.1/Download/file?uri={info.get("dvr_dataURI")}' p_i = info.get("planet", {}) html += row( link(info.get("tce_id_short"), exomast_url), f"""{link("dvs", dvs_url)},&emsp;{link("full", dvr_url)}""", f'{p_i.get("planetRadiusEarthRadii", 0) * R_EARTH_TO_R_JUPITER:.3f}', f'{p_i.get("transitEpochBtjd", 0):.4f}', f'{p_i.get("transitDurationHours", 0):.4f}', f'{p_i.get("orbitalPeriodDays", 0):.6f}', f'{p_i.get("transitDepthPpm", 0) / 10000:.4f}', f'{p_i.get("minImpactParameter", 0):.2f}', # code fragments to so that users can easily use a TCE as an entry in transit_specs f"""\ <input type="text" style="margin-left: 3ch; font-size: 90%; color: #666; width: 10ch;" onclick="this.select();" readonly value='epoch={p_i.get("transitEpochBtjd", 0):.4f}, duration_hr={p_i.get("transitDurationHours", 0):.4f}, \ period={p_i.get("orbitalPeriodDays", 0):.6f}, label="{info.get("tce_id_short")}",'>""", ) html += "\n" html += "</tbody></table>\n" return html def _get_tces_in_html(tic, download_dir=None): # For TCEs, query MAST download / parse results (the _dvr.xml), then show: # - basic planet parameters and orbital info # - TODO: red flags in vetting report # see: https://archive.stsci.edu/missions-and-data/tess/data-products tce_info_list = get_tce_infos_of_tic(tic, download_dir=download_dir) return _tce_info_to_html(tce_info_list) def add_transit_as_codes_column_to_df(df, headers, label_value_func): h = headers # string interpolation does not work. So use old-school concatenation df["Codes"] = ( "epoch=" + df[h.EPOCH_BTJD].map("{:.4f}".format) + ", duration_hr=" + df[h.DURATION_HR].map("{:.4f}".format) + ", period=" + df[h.PERIOD].map("{:.6f}".format) + ', label="' + label_value_func(df) + '",' ) return df def _get_tois_in_html(tic, download_dir=None): h = TOIAccessor.Headers # Consider cache TOIAccessor in some module global (keyed by download_dir) to avoid # repeated loading/parsing the underlying TOI csv tois = TOIAccessor(download_dir=download_dir).of_tic(tic) if len(tois) < 1: return "<p>No TOIs.</p>" add_transit_as_codes_column_to_df(tois, h, label_value_func=lambda df: "TOI " + df[h.TOI]) report_view = tois[ [ h.TOI, h.MASTER_PRIORITY, h.TFOPWG_DISPOSITION, h.PLANET_RADIUS_J, h.EPOCH_BTJD, h.DURATION_HR, h.PERIOD, h.DEPTH_PCT, h.COMMENTS, "Codes", ] ] # tweak output styling styler = Styler(report_view, cell_ids=False) # avoid unnecessary long cell ids styler.hide_index() styler.format( formatter={ (h.PLANET_RADIUS_J): "{:.3f}", (h.EPOCH_BTJD, h.DURATION_HR): "{:.4f}", (h.PERIOD): "{:.6f}", (h.DEPTH_PCT): "{:.4f}", } ) styler.set_table_styles( [ # make the TOI table align (roughly) with the TCE table {"selector": "td.col0", "props": [("padding-left", "10px")]}, ] ) html = styler._repr_html_() # make the headers to make them more compact html = html.replace(h.MASTER_PRIORITY, "Master<br>priority", 1) html = html.replace(h.TFOPWG_DISPOSITION, "TFOPWG<br>Dispo.", 1) html = html.replace(h.PLANET_RADIUS_J, "R<sub>p</sub><br>R<sub>j</sub>", 1) html = html.replace(h.EPOCH_BTJD, "Epoch<br>BTJD", 1) html = html.replace(h.DURATION_HR, "Duration<br>hr", 1) html = html.replace(h.PERIOD, "Period<br>day", 1) html = html.replace(h.DEPTH_PCT, "Depth<br>%", 1) # render nan as -- (as nan is really no value in our case) # - styler.format()'s na_rep option seems to fix some but not all, so we do it ourselves # - replace the pattern of <td class="..." >nan</td> html = html.replace(">nan</td>", ">--</td>") # turn Codes column into html input element (easier to be selected) html = re.sub( r"<td([^>]+)>(epoch=.+,)</td>", r"""<td\1><input type="text" style="margin-left: 3ch; font-size: 90%; color: #666; width: 10ch;" onclick="this.select();" readonly="" value='\2'></td>""", html, ) return html def _get_ctois_in_html(tic, download_dir=None): # TODO: lots of codes similar to _get_tois_in_html(). factor them out h = CTOIAccessor.Headers # Consider cache TOIAccessor in some module global (keyed by download_dir) to avoid # repeated loading/parsing the underlying TOI csv ctois = CTOIAccessor(download_dir=download_dir).of_tic(tic) if len(ctois) < 1: return "<p>No CTOIs.</p>" add_transit_as_codes_column_to_df(ctois, h, label_value_func=lambda df: "CTOI " + df[h.CTOI]) report_view = ctois[ [ h.CTOI, h.TOI, h.PLANET_RADIUS_J, h.EPOCH_BTJD, h.DURATION_HR, h.PERIOD, h.DEPTH_PCT, h.COMMENTS, "Codes", ] ] # tweak output styling styler = Styler(report_view, cell_ids=False) # avoid unnecessary long cell ids styler.hide_index() styler.format( formatter={ (h.PLANET_RADIUS_J): "{:.3f}", (h.EPOCH_BTJD, h.DURATION_HR): "{:.4f}", (h.PERIOD): "{:.6f}", (h.DEPTH_PCT): "{:.4f}", } ) styler.set_table_styles( [ # make the CTOI table align (roughly) with the TCE table {"selector": "td.col0", "props": [("padding-left", "20px")]}, # min-width to ensure TOI column, often no value, are wide enough to hold typical TOI value # so as to make alignment more consistent {"selector": "td.col1", "props": [("min-width", "80px")]}, ] ) html = styler._repr_html_() # make the headers to make them more compact html = html.replace(h.TOI, "TOI?", 1) html = html.replace(h.PLANET_RADIUS_J, "R<sub>p</sub><br>R<sub>j</sub>", 1) html = html.replace(h.EPOCH_BTJD, "Epoch<br>BTJD", 1) html = html.replace(h.DURATION_HR, "Duration<br>hr", 1) html = html.replace(h.PERIOD, "Period<br>day", 1) html = html.replace(h.DEPTH_PCT, "Depth<br>%", 1) # render nan as -- (as nan is really no value in our case) # - styler.format()'s na_rep option seems to fix some but not all, so we do it ourselves # - replace the pattern of <td class="..." >nan</td> html = html.replace(">nan</td>", ">--</td>") # turn Codes column into html input element (easier to be selected) html = re.sub( r"<td([^>]+)>(epoch=.+,)</td>", r"""<td\1><input type="text" style="margin-left: 3ch; font-size: 90%; color: #666; width: 10ch;" onclick="this.select();" readonly="" value='\2'></td>""", html, ) return html def get_tic_meta_in_html(lc, a_subject_id=None, download_dir=None): # This function does not do the actual display, # so that the caller can call it in background # and display it whereever it's needed def link(link_text, url): return f"""<a href="{url}" target="_blank">{link_text}</a>""" def prop(prop_name, prop_value): return f""" <tr><td>{prop_name}</td><td>{prop_value}</td></tr>\n""" # main logic m = lc.meta tic_id = str(m.get("TICID")) def safe_m_get(key, default_val): # in some meta, the key exists but the value is None # this helper handles it res = m.get(key, default_val) return res if res is not None else default_val html = f""" <div id="tic_metadata_ctr"> <div id="tic_metadata_ctl"> <span id="float_expand_toggle" title="Toggle whether the metadata is shown or not"></span> <span id="float_fixed_toggle" title="Toggle whether the metadata is shown in a floating box or a regular cell"></span> </div> <div id="tic_metadata_body"> <h3>TIC {tic_id}</h3> """ html += "&emsp;" + link("ExoFOP", f"https://exofop.ipac.caltech.edu/tess/target.php?id={tic_id}") html += "\n&emsp;|&emsp;" html += link( "PHT Talk", f"https://www.zooniverse.org/projects/nora-dot-eisner/planet-hunters-tess/talk/search?query={tic_id}", ) if a_subject_id is not None: # note, a TIC can have multiple subjects, here is just one of them. html += "\n , a subject: " html += link( a_subject_id, f"https://www.zooniverse.org/projects/nora-dot-eisner/planet-hunters-tess/talk/subjects/{a_subject_id}", ) # show the sector number (here we assume a_subject_id does correspond the the sector) # the sector is useful to be included so that users can easily locate the TCE matching the sector. html += f' (sector {safe_m_get("SECTOR", "")})' html += "<br>\n" # stellar parameters html += "<table>\n" html += prop("R<sub>S</sub> (in R<sub>☉</sub>)", f'{safe_m_get("RADIUS", 0):.3f}') html += prop("Magnitude (TESS)", f'{safe_m_get("TESSMAG", 0):.2f}') html += prop("T_eff (in K)", safe_m_get("TEFF", 0)) html += "</table>\n" html += "<p>TCEs:</p>" html += _get_tces_in_html(tic_id, download_dir=download_dir) # TOIs/CTOIs html += "<p>TOIs / CTOIs:</p>" html += _get_tois_in_html(tic_id, download_dir=download_dir) html += _get_ctois_in_html(tic_id, download_dir=download_dir) html += """ </div> <!-- id="tic_metadata_body" --> </div> <!-- id="tic_metadata_ctr" --> <style id="tic_metadata_out_style"> #tic_metadata_ctr.float { position: fixed; bottom: 12px; right: 36px; z-index: 999; background-color: rgba(255, 255, 0, 0.3); padding: 6px; max-height: 75vh; /* ensure for TIC with large number of TCEs, the floating box won't fill up too much space */ overflow-y: scroll; } #tic_metadata_ctl { margin-left: 12em; /* make it roughly to the left of TIC heading */ } #tic_metadata_ctr.float #tic_metadata_ctl { margin-left: 0; float: right; } #float_fixed_toggle { cursor: pointer; padding: 6px; font-size: 16px; font-weight: normal; } #float_fixed_toggle:before { content: "[To float >]"; } #tic_metadata_ctr.float #float_fixed_toggle:before { content: "[X]"; } #float_expand_toggle { cursor: pointer; padding: 6px; font-size: 16px; font-weight: normal; margin-left: 10px; } #tic_metadata_ctr.float #float_expand_toggle:before { content: "<<"; } #tic_metadata_ctr.float.expand #float_expand_toggle:before { content: ">>"; } #tic_metadata_ctr.float #tic_metadata_body { display: none; } #tic_metadata_ctr.float.expand #tic_metadata_body { display: block; } </style> <script> document.getElementById("float_fixed_toggle").onclick = function(evt) { const ctr = document.getElementById("tic_metadata_ctr"); ctr.classList.toggle("float"); if (ctr.classList.contains("float")) { ctr.classList.add("expand"); } }; document.getElementById("float_expand_toggle").onclick = function(evt) { document.getElementById("tic_metadata_ctr").classList.toggle("expand"); }; </script> """ return html
StarcoderdataPython
4899904
from common.utils import read_file from .adapters import get_num_arrangements, get_differences def main() -> None: adapters = read_file('d10/data/input.txt', int) result = get_differences(adapters) print(f"Result 1: {result}") choices = get_num_arrangements(adapters) print(f"Result 2: {choices} ") if __name__ == "__main__": main()
StarcoderdataPython
4997100
<reponame>flucto-gmbh/SAAFOWE #!/usr/bin/env python import argparse from datetime import datetime, timezone, timedelta from glob import glob import matplotlib.pyplot as plt from mpl_toolkits.basemap import Basemap import numpy as np from os import environ, path, makedirs import sys import pandas as pd import time def parse_arguments() -> dict: arg_parser = argparse.ArgumentParser() arg_parser.add_argument( "--verbose", action="store_true", help="for debugging purposes" ) arg_parser.add_argument( "--data-dir", help="directory to consistently store recorded data. Defaults to $HOME/msb_data", default=path.join(environ["HOME"], "msb_processed"), type=str, ) arg_parser.add_argument( "--results-dir", help="directory to consistently store recorded data. Defaults to $HOME/msb_data", default=path.join(environ["HOME"], path.join("msb_results", "24h-report")), type=str, ) arg_parser.add_argument( "--report-time-window", help="duration of report in hours", default=24, type=int, ) return arg_parser.parse_args().__dict__ def find_time_files( file_dir: str, file_pattern: str = "*.csv", begin: datetime = datetime.fromisoformat("1970-01-01T00:00:00+00:00"), end: datetime = datetime.fromtimestamp(time.time(), timezone.utc), verbose=False, ) -> list: files = list() for file in sorted(glob(path.join(file_dir, file_pattern))): timestamp = datetime.fromisoformat(file.split("_")[-1].split(".")[0]) if verbose: print(f"timestamp: {timestamp}") if begin <= timestamp <= end: if verbose: print(f"matching file: {file}") files.append(file) continue if verbose: print(f"skipping: {file}") return files def sanitize_data(data: pd.DataFrame, verbose: bool = False): if data.empty: raise Exception("empty dataframe") if verbose: print("cleaning NaNs") # data.fillna(method="ffill", inplace=True) data.dropna(inplace=True) if verbose: print("dropping duplicate indices") data = data.loc[~data.index.duplicated(keep="first")] def read_data(files: list, verbose=False) -> pd.DataFrame: data = list() if verbose: print(f'processing {len(files)} data files') for file in files: tmp = pd.read_csv(file) try: set_index(tmp) except Exception as e: print(f"failed to set index: {e} skipping") continue try: sanitize_data(tmp) except Exception as e: print(f"failed to sanitize data: {e} skipping") continue data.append(tmp) return pd.concat(data) def set_index(data: pd.DataFrame, verbose: bool = False): if data.empty: raise Exception("empty DataFrame") data.epoch = pd.to_datetime(data.epoch, unit="s", utc=True) data.set_index("epoch", inplace=True) if verbose: print(f"{data.info()}") def plot_empty_track(figsize=(16,9), save_fig=None, verbose=False): fig = plt.figure(figsize=tuple(i/2.54 for i in figsize)) plt.text( x = 0.5, y = 0.5, s = "No GPS Track available", fontsize=36, horizontalalignment='center', ) fig.tight_layout() if save_fig: plt.savefig(save_fig, dpi=150) else: plt.show() def plot_track( track: pd.DataFrame, margin=2, figsize=(16, 9), save_fig=None, verbose=False, transparent=True, ): # create new figure, axes instances. fig = plt.figure(figsize=tuple((i/2.54 for i in figsize))) if transparent: fig.patch.set_alpha(0) min_lat = track.lat.min() - margin max_lat = track.lat.max() + margin min_lon = track.lon.min() - margin max_lon = track.lon.max() + margin if verbose: print( f"min_lat: {min_lat} min_lon: {min_lon} max_lat: {max_lat} max_lon: {max_lon}" ) m = Basemap( llcrnrlon=min_lon, llcrnrlat=min_lat, urcrnrlon=max_lon, urcrnrlat=max_lat, resolution="h", projection="merc", lat_0=(max_lat - min_lat) / 2, lon_0=(max_lon - min_lon) / 2, ) m.drawcoastlines() m.fillcontinents() m.drawcountries() m.drawstates() m.etopo() m.drawmapboundary(fill_color="#46bcec") m.fillcontinents(color="white", lake_color="#46bcec") # draw parallels m.drawparallels(np.arange(-90, 90, 2), labels=[1, 1, 1, 1]) # draw meridians m.drawmeridians(np.arange(-180, 180, 2), labels=[1, 1, 1, 1]) lons, lats = m(track.lon, track.lat) m.scatter(lons, lats, marker="o", color="tab:red", zorder=5, s=5) fig.tight_layout() if save_fig: plt.savefig(save_fig, dpi=150) else: plt.show() def get_msb_dataset( data_dir: str, begin: datetime, end: datetime, data_type: str = "imu", verbose=False, ) -> pd.DataFrame: files = find_time_files( file_dir=path.join(data_dir, data_type), begin=begin, end=end, verbose=verbose, ) if not files: if verbose: print('did not find any matching files!') return pd.DataFrame() if verbose: print(f'found {len(files)} data files') data = read_data(files, verbose=verbose) data.sort_index(inplace=True) return data def calc_abs_acc(data: pd.DataFrame, verbose=False): if verbose: print('calculating absolute acceleration') data.insert( loc=3, column="acc_abs", value=( np.sqrt( np.power(data.acc_x, 2) + np.power(data.acc_y, 2) + np.power(data.acc_z, 2) ) ), ) def calc_block_maxima( data: pd.DataFrame, resample_interval: str = "10min", verbose=False, ) -> pd.DataFrame: t_i = list() max_acc_abs = list() max_acc_abs_i = list() std_acc_abs = list() for t, d in data.acc_abs.resample(resample_interval): if d.empty: continue max_acc_abs.append(d.max()) max_acc_abs_i.append(d.idxmax()) std_acc_abs = d.std() t_i.append(t) if verbose: print(f'calculated block maxima') return pd.DataFrame( { "max_acc_block_i": max_acc_abs_i, "max_acc_block": max_acc_abs, "std_acc_block": std_acc_abs, }, index=t_i, ) def plot_block_maxima( data: pd.DataFrame, acc_max_block: pd.DataFrame, save_fig=None, transparent=False, figsize=(18, 9), verbose=False, ): fig = plt.figure(figsize=tuple((i/2.54 for i in figsize))) data.acc_abs.plot(label="acc_abs") # block_max_acc_abs.scatter(label='10 min maxima') plt.scatter( acc_max_block.max_acc_block_i, acc_max_block.max_acc_block, marker="x", color="tab:orange", ) fig.tight_layout() if save_fig: plt.savefig(save_fig, dpi=150) def main(): config = parse_arguments() now = datetime.fromtimestamp(time.time(), timezone.utc) now_string = now.strftime('%Y%m%dT%H%M%S%z') # now_string = now.__str__().replace(':', '').replace('-','') begin = now - timedelta(hours=config["report_time_window"]) if config["verbose"]: print(f"postprocess measurements in interval: {begin} -> {now}") if not path.isdir(config["data_dir"]): print(f'not a directory: {config["data_dir"]}') sys.exit(-1) if not path.isdir(config["results_dir"]): try: makedirs(config["results_dir"], exist_ok=True) except Exception as e: print(f"failed to create directory: {e}") sys.exit(-1) output_dir = path.join(config['results_dir'], now_string) if config['verbose']: print(f'output dir: {output_dir}') try: makedirs(output_dir, exist_ok=True) except Exception as e: print(f'failed to create directory: {output_dir}') sys.exit(-1) # iterate over available data directories for msb in glob(path.join(config["data_dir"], "MSB-????-A")): msb_name = path.basename(msb) if config["verbose"]: print(f"processing imu {msb}") imu_data = get_msb_dataset( data_dir=msb, begin=begin, end=now, data_type="imu", verbose=config["verbose"], ) if imu_data.empty: if config["verbose"]: print(f"did not find any measurements, skipping") continue if config["verbose"]: print(f"{imu_data.info()}") calc_abs_acc(imu_data, verbose=config["verbose"]) block_maxima_acc = calc_block_maxima(imu_data, verbose=config["verbose"]) # create specifc output dir for the current msb msb_output_dir = path.join(output_dir, msb_name) if config['verbose']: print(msb_output_dir) try: makedirs(msb_output_dir, exist_ok=False) except Exception as e: print(f'failed to create directory: {msb_output_dir}: {e}') sys.exit(-1) plot_block_maxima( imu_data, block_maxima_acc, save_fig=path.join( msb_output_dir, f"{msb_name}_acc-max-block_{now_string}.jpg" ), verbose=config["verbose"], ) block_maxima_acc.to_csv( path.join( msb_output_dir, f"{msb_name}_acc-max-block_{now_string}.csv" ), date_format = "%s", ) for msb in glob(path.join(config["data_dir"], "MSB-????-A")): msb_name = path.basename(msb) if config["verbose"]: print(f"processing gps {msb}") gps_data = get_msb_dataset(data_dir=msb, begin=begin, end=now, data_type="gps") # if there is no fix, then lat or lon are 0 if config["verbose"]: print("dropping rows with no lat/lon data") gps_data = gps_data[gps_data.lat != 0] if config['verbose']: print(f'{gps_data.info()}') # create specifc output dir for the current msb msb_output_dir = path.join(output_dir, msb_name) try: makedirs(msb_output_dir, exist_ok=True) except Exception as e: print(f'failed to create directory: {msb_output_dir}: {e}') sys.exit(-1) if gps_data.empty: if config['verbose']: print('no gps tracks available') plot_empty_track( save_fig=path.join(msb_output_dir, f"{msb_name}_gps_{now_string}.jpg") ) continue # build gps maps plot_track( track=gps_data, save_fig=path.join(msb_output_dir, f"{msb_name}_gps_{now_string}.jpg"), ) gps_data.to_csv( path.join( msb_output_dir, f"{msb_name}_gps_{now_string}.csv" ), date_format = "%s", ) if __name__ == "__main__": main()
StarcoderdataPython
1821540
<filename>array_api_tests/test_special_cases.py # We use __future__ for forward reference type hints - this will work for even py3.8.0 # See https://stackoverflow.com/a/33533514/5193926 from __future__ import annotations import inspect import math import operator import re from dataclasses import dataclass, field from decimal import ROUND_HALF_EVEN, Decimal from enum import Enum, auto from typing import Any, Callable, Dict, List, Optional, Protocol, Tuple from warnings import warn import pytest from hypothesis import assume, given, note from hypothesis import strategies as st from array_api_tests.typing import Array, DataType from . import dtype_helpers as dh from . import hypothesis_helpers as hh from . import pytest_helpers as ph from . import shape_helpers as sh from . import xps from ._array_module import mod as xp from .stubs import category_to_funcs from .test_operators_and_elementwise_functions import ( oneway_broadcastable_shapes, oneway_promotable_dtypes, ) pytestmark = pytest.mark.ci # The special case test casess are built on runtime via the parametrized # test_unary and test_binary functions. Most of this file consists of utility # classes and functions, all bought together to create the test cases (pytest # params), to finally be run through the general test logic of either test_unary # or test_binary. UnaryCheck = Callable[[float], bool] BinaryCheck = Callable[[float, float], bool] def make_strict_eq(v: float) -> UnaryCheck: if math.isnan(v): return math.isnan if v == 0: if ph.is_pos_zero(v): return ph.is_pos_zero else: return ph.is_neg_zero def strict_eq(i: float) -> bool: return i == v return strict_eq def make_strict_neq(v: float) -> UnaryCheck: strict_eq = make_strict_eq(v) def strict_neq(i: float) -> bool: return not strict_eq(i) return strict_neq def make_rough_eq(v: float) -> UnaryCheck: assert math.isfinite(v) # sanity check def rough_eq(i: float) -> bool: return math.isclose(i, v, abs_tol=0.01) return rough_eq def make_gt(v: float) -> UnaryCheck: assert not math.isnan(v) # sanity check def gt(i: float) -> bool: return i > v return gt def make_lt(v: float) -> UnaryCheck: assert not math.isnan(v) # sanity check def lt(i: float) -> bool: return i < v return lt def make_or(cond1: UnaryCheck, cond2: UnaryCheck) -> UnaryCheck: def or_(i: float) -> bool: return cond1(i) or cond2(i) return or_ def make_and(cond1: UnaryCheck, cond2: UnaryCheck) -> UnaryCheck: def and_(i: float) -> bool: return cond1(i) or cond2(i) return and_ def make_not_cond(cond: UnaryCheck) -> UnaryCheck: def not_cond(i: float) -> bool: return not cond(i) return not_cond def absify_cond(cond: UnaryCheck) -> UnaryCheck: def abs_cond(i: float) -> bool: return cond(abs(i)) return abs_cond repr_to_value = { "NaN": float("nan"), "infinity": float("inf"), "0": 0.0, "1": 1.0, } r_value = re.compile(r"([+-]?)(.+)") r_pi = re.compile(r"(\d?)π(?:/(\d))?") @dataclass class ParseError(ValueError): value: str def parse_value(value_str: str) -> float: """ Parses a value string to return a float, e.g. >>> parse_value('1') 1. >>> parse_value('-infinity') -float('inf') >>> parse_value('3π/4') 2.356194490192345 """ m = r_value.match(value_str) if m is None: raise ParseError(value_str) if pi_m := r_pi.match(m.group(2)): value = math.pi if numerator := pi_m.group(1): value *= int(numerator) if denominator := pi_m.group(2): value /= int(denominator) else: value = repr_to_value[m.group(2)] if sign := m.group(1): if sign == "-": value *= -1 return value r_code = re.compile(r"``([^\s]+)``") r_approx_value = re.compile( rf"an implementation-dependent approximation to {r_code.pattern}" ) def parse_inline_code(inline_code: str) -> float: """ Parses a Sphinx code string to return a float, e.g. >>> parse_value('``0``') 0. >>> parse_value('``NaN``') float('nan') """ if m := r_code.match(inline_code): return parse_value(m.group(1)) else: raise ParseError(inline_code) r_not = re.compile("not (.+)") r_equal_to = re.compile(f"equal to {r_code.pattern}") r_array_element = re.compile(r"``([+-]?)x([12])_i``") r_either_code = re.compile(f"either {r_code.pattern} or {r_code.pattern}") r_gt = re.compile(f"greater than {r_code.pattern}") r_lt = re.compile(f"less than {r_code.pattern}") class FromDtypeFunc(Protocol): """ Type hint for functions that return an elements strategy for arrays of the given dtype, e.g. xps.from_dtype(). """ def __call__(self, dtype: DataType, **kw) -> st.SearchStrategy[float]: ... @dataclass class BoundFromDtype(FromDtypeFunc): """ A xps.from_dtype()-like callable with bounded kwargs, filters and base function. We can bound: 1. Keyword arguments that xps.from_dtype() can use, e.g. >>> from_dtype = BoundFromDtype(kwargs={'min_value': 0, 'allow_infinity': False}) >>> strategy = from_dtype(xp.float64) is equivalent to >>> strategy = xps.from_dtype(xp.float64, min_value=0, allow_infinity=False) i.e. a strategy that generates finite floats above 0 2. Functions that filter the elements strategy that xps.from_dtype() returns, e.g. >>> from_dtype = BoundFromDtype(filter=lambda i: i != 0) >>> strategy = from_dtype(xp.float64) is equivalent to >>> strategy = xps.from_dtype(xp.float64).filter(lambda i: i != 0) i.e. a strategy that generates any float except +0 and -0 3. The underlying function that returns an elements strategy from a dtype, e.g. >>> from_dtype = BoundFromDtype( ... from_dtype=lambda d: st.integers( ... math.ceil(xp.finfo(d).min), math.floor(xp.finfo(d).max) ... ) ... ) >>> strategy = from_dtype(xp.float64) is equivalent to >>> strategy = st.integers( ... math.ceil(xp.finfo(xp.float64).min), math.floor(xp.finfo(xp.float64).max) ... ) i.e. a strategy that generates integers (within the dtype's range) This is useful to avoid translating special case conditions into either a dict, filter or "base func", and instead allows us to generalise these three components into a callable equivalent of xps.from_dtype(). Additionally, BoundFromDtype instances can be added together. This allows us to keep parsing each condition individually - so we don't need to duplicate complicated parsing code - as ultimately we can represent (and subsequently test for) special cases which have more than one condition per array, e.g. "If x1_i is greater than 0 and x1_i is not 42, ..." could be translated as >>> gt_0_from_dtype = BoundFromDtype(kwargs={'min_value': 0}) >>> not_42_from_dtype = BoundFromDtype(filter=lambda i: i != 42) >>> gt_0_from_dtype + not_42_from_dtype BoundFromDtype(kwargs={'min_value': 0}, filter=<lambda>(i)) """ kwargs: Dict[str, Any] = field(default_factory=dict) filter_: Optional[Callable[[Array], bool]] = None base_func: Optional[FromDtypeFunc] = None def __call__(self, dtype: DataType, **kw) -> st.SearchStrategy[float]: assert len(kw) == 0 # sanity check from_dtype = self.base_func or xps.from_dtype strat = from_dtype(dtype, **self.kwargs) if self.filter_ is not None: strat = strat.filter(self.filter_) return strat def __add__(self, other: BoundFromDtype) -> BoundFromDtype: for k in self.kwargs.keys(): if k in other.kwargs.keys(): assert self.kwargs[k] == other.kwargs[k] # sanity check kwargs = {**self.kwargs, **other.kwargs} if self.filter_ is not None and other.filter_ is not None: filter_ = lambda i: self.filter_(i) and other.filter_(i) else: if self.filter_ is not None: filter_ = self.filter_ elif other.filter_ is not None: filter_ = other.filter_ else: filter_ = None # sanity check assert not (self.base_func is not None and other.base_func is not None) if self.base_func is not None: base_func = self.base_func elif other.base_func is not None: base_func = other.base_func else: base_func = None return BoundFromDtype(kwargs, filter_, base_func) def wrap_strat_as_from_dtype(strat: st.SearchStrategy[float]) -> FromDtypeFunc: """ Wraps an elements strategy as a xps.from_dtype()-like function """ def from_dtype(dtype: DataType, **kw) -> st.SearchStrategy[float]: assert len(kw) == 0 # sanity check return strat return from_dtype def parse_cond(cond_str: str) -> Tuple[UnaryCheck, str, BoundFromDtype]: """ Parses a Sphinx-formatted condition string to return: 1. A function which takes an input and returns True if it meets the condition, otherwise False. 2. A string template for expressing the condition. 3. A xps.from_dtype()-like function which returns a strategy that generates elements that meet the condition. e.g. >>> cond, expr_template, from_dtype = parse_cond('greater than ``0``') >>> cond(42) True >>> cond(-123) False >>> expr_template.replace('{}', 'x_i') 'x_i > 0' >>> strategy = from_dtype(xp.float64) >>> for _ in range(5): ... print(strategy.example()) 1. 0.1 1.7976931348623155e+179 inf 124.978 """ # We first identify whether the condition starts with "not". If so, we note # this but parse the condition as if it was not negated. if m := r_not.match(cond_str): cond_str = m.group(1) not_cond = True else: not_cond = False # We parse the condition to identify the condition function, expression # template, and xps.from_dtype()-like condition strategy. kwargs = {} filter_ = None from_dtype = None # type: ignore if m := r_code.match(cond_str): value = parse_value(m.group(1)) cond = make_strict_eq(value) expr_template = "{} is " + m.group(1) from_dtype = wrap_strat_as_from_dtype(st.just(value)) elif m := r_either_code.match(cond_str): v1 = parse_value(m.group(1)) v2 = parse_value(m.group(2)) cond = make_or(make_strict_eq(v1), make_strict_eq(v2)) expr_template = "({} is " + m.group(1) + " or {} == " + m.group(2) + ")" from_dtype = wrap_strat_as_from_dtype(st.sampled_from([v1, v2])) elif m := r_equal_to.match(cond_str): value = parse_value(m.group(1)) if math.isnan(value): raise ParseError(cond_str) cond = lambda i: i == value expr_template = "{} == " + m.group(1) elif m := r_gt.match(cond_str): value = parse_value(m.group(1)) cond = make_gt(value) expr_template = "{} > " + m.group(1) kwargs = {"min_value": value, "exclude_min": True} elif m := r_lt.match(cond_str): value = parse_value(m.group(1)) cond = make_lt(value) expr_template = "{} < " + m.group(1) kwargs = {"max_value": value, "exclude_max": True} elif cond_str in ["finite", "a finite number"]: cond = math.isfinite expr_template = "isfinite({})" kwargs = {"allow_nan": False, "allow_infinity": False} elif cond_str in "a positive (i.e., greater than ``0``) finite number": cond = lambda i: math.isfinite(i) and i > 0 expr_template = "isfinite({}) and {} > 0" kwargs = { "allow_nan": False, "allow_infinity": False, "min_value": 0, "exclude_min": True, } elif cond_str == "a negative (i.e., less than ``0``) finite number": cond = lambda i: math.isfinite(i) and i < 0 expr_template = "isfinite({}) and {} < 0" kwargs = { "allow_nan": False, "allow_infinity": False, "max_value": 0, "exclude_max": True, } elif cond_str == "positive": cond = lambda i: math.copysign(1, i) == 1 expr_template = "copysign(1, {}) == 1" # We assume (positive) zero is special cased seperately kwargs = {"min_value": 0, "exclude_min": True} elif cond_str == "negative": cond = lambda i: math.copysign(1, i) == -1 expr_template = "copysign(1, {}) == -1" # We assume (negative) zero is special cased seperately kwargs = {"max_value": 0, "exclude_max": True} elif "nonzero finite" in cond_str: cond = lambda i: math.isfinite(i) and i != 0 expr_template = "isfinite({}) and {} != 0" kwargs = {"allow_nan": False, "allow_infinity": False} filter_ = lambda n: n != 0 elif cond_str == "an integer value": cond = lambda i: i.is_integer() expr_template = "{}.is_integer()" from_dtype = integers_from_dtype # type: ignore elif cond_str == "an odd integer value": cond = lambda i: i.is_integer() and i % 2 == 1 expr_template = "{}.is_integer() and {} % 2 == 1" if not_cond: expr_template = f"({expr_template})" def from_dtype(dtype: DataType, **kw) -> st.SearchStrategy[float]: return integers_from_dtype(dtype, **kw).filter(lambda n: n % 2 == 1) else: raise ParseError(cond_str) if not_cond: # We handle negated conitions by simply negating the condition function # and using it as a filter for xps.from_dtype() (or an equivalent). cond = make_not_cond(cond) expr_template = f"not {expr_template}" filter_ = cond return cond, expr_template, BoundFromDtype(filter_=filter_) else: return cond, expr_template, BoundFromDtype(kwargs, filter_, from_dtype) def parse_result(result_str: str) -> Tuple[UnaryCheck, str]: """ Parses a Sphinx-formatted result string to return: 1. A function which takes an input and returns True if it is the expected result (or meets the condition of the expected result), otherwise False. 2. A string that expresses the result. e.g. >>> check_result, expr = parse_result('``42``') >>> check_result(7) False >>> check_result(42) True >>> expr '42' """ if m := r_code.match(result_str): value = parse_value(m.group(1)) check_result = make_strict_eq(value) # type: ignore expr = m.group(1) elif m := r_approx_value.match(result_str): value = parse_value(m.group(1)) check_result = make_rough_eq(value) # type: ignore repr_ = m.group(1).replace("π", "pi") # for pytest param names expr = f"roughly {repr_}" elif "positive" in result_str: def check_result(result: float) -> bool: if math.isnan(result): # The sign of NaN is out-of-scope return True return math.copysign(1, result) == 1 expr = "positive sign" elif "negative" in result_str: def check_result(result: float) -> bool: if math.isnan(result): # The sign of NaN is out-of-scope return True return math.copysign(1, result) == -1 expr = "negative sign" else: raise ParseError(result_str) return check_result, expr class Case(Protocol): cond_expr: str result_expr: str def cond(self, *args) -> bool: ... def check_result(self, *args) -> bool: ... def __str__(self) -> str: return f"{self.cond_expr} -> {self.result_expr}" def __repr__(self) -> str: return f"{self.__class__.__name__}(<{self}>)" class UnaryCond(Protocol): def __call__(self, i: float) -> bool: ... class UnaryResultCheck(Protocol): def __call__(self, i: float, result: float) -> bool: ... @dataclass(repr=False) class UnaryCase(Case): cond_expr: str result_expr: str cond_from_dtype: FromDtypeFunc cond: UnaryCheck check_result: UnaryResultCheck r_unary_case = re.compile("If ``x_i`` is (.+), the result is (.+)") r_even_round_halves_case = re.compile( "If two integers are equally close to ``x_i``, " "the result is the even integer closest to ``x_i``" ) def trailing_halves_from_dtype(dtype: DataType) -> st.SearchStrategy[float]: """ Returns a strategy that generates floats that end with .5 and are within the bounds of dtype. """ # We bound our base integers strategy to a range of values which should be # able to represent a decimal 5 when .5 is added or subtracted. if dtype == xp.float32: abs_max = 10**4 else: abs_max = 10**16 return st.sampled_from([0.5, -0.5]).flatmap( lambda half: st.integers(-abs_max, abs_max).map(lambda n: n + half) ) even_round_halves_case = UnaryCase( cond_expr="modf(i)[0] == 0.5", cond=lambda i: math.modf(i)[0] == 0.5, cond_from_dtype=trailing_halves_from_dtype, result_expr="Decimal(i).to_integral_exact(ROUND_HALF_EVEN)", check_result=lambda i, result: ( result == float(Decimal(i).to_integral_exact(ROUND_HALF_EVEN)) ), ) def make_unary_check_result(check_just_result: UnaryCheck) -> UnaryResultCheck: def check_result(i: float, result: float) -> bool: return check_just_result(result) return check_result def parse_unary_docstring(docstring: str) -> List[UnaryCase]: """ Parses a Sphinx-formatted docstring of a unary function to return a list of codified unary cases, e.g. >>> def sqrt(x): ... ''' ... Calculates the square root ... ... **Special Cases** ... ... For floating-point operands, ... ... - If ``x_i`` is less than ``0``, the result is ``NaN``. ... - If ``x_i`` is ``NaN``, the result is ``NaN``. ... - If ``x_i`` is ``+0``, the result is ``+0``. ... - If ``x_i`` is ``-0``, the result is ``-0``. ... - If ``x_i`` is ``+infinity``, the result is ``+infinity``. ... ... Parameters ... ---------- ... x: array ... input array ... ... Returns ... ------- ... out: array ... an array containing the square root of each element in ``x`` ... ''' ... >>> unary_cases = parse_unary_docstring(sqrt.__doc__) >>> for case in unary_cases: ... print(repr(case)) UnaryCase(<x_i < 0 -> NaN>) UnaryCase(<x_i == NaN -> NaN>) UnaryCase(<x_i == +0 -> +0>) UnaryCase(<x_i == -0 -> -0>) UnaryCase(<x_i == +infinity -> +infinity>) >>> lt_0_case = unary_cases[0] >>> lt_0_case.cond(-123) True >>> lt_0_case.check_result(-123, float('nan')) True """ match = r_special_cases.search(docstring) if match is None: return [] lines = match.group(1).split("\n")[:-1] cases = [] for line in lines: if m := r_case.match(line): case = m.group(1) else: warn(f"line not machine-readable: '{line}'") continue if m := r_unary_case.search(case): try: cond, cond_expr_template, cond_from_dtype = parse_cond(m.group(1)) _check_result, result_expr = parse_result(m.group(2)) except ParseError as e: warn(f"not machine-readable: '{e.value}'") continue cond_expr = cond_expr_template.replace("{}", "x_i") # Do not define check_result in this function's body - see # parse_binary_case comment. check_result = make_unary_check_result(_check_result) case = UnaryCase( cond_expr=cond_expr, cond=cond, cond_from_dtype=cond_from_dtype, result_expr=result_expr, check_result=check_result, ) cases.append(case) elif m := r_even_round_halves_case.search(case): cases.append(even_round_halves_case) else: if not r_remaining_case.search(case): warn(f"case not machine-readable: '{case}'") return cases class BinaryCond(Protocol): def __call__(self, i1: float, i2: float) -> bool: ... class BinaryResultCheck(Protocol): def __call__(self, i1: float, i2: float, result: float) -> bool: ... @dataclass(repr=False) class BinaryCase(Case): cond_expr: str result_expr: str x1_cond_from_dtype: FromDtypeFunc x2_cond_from_dtype: FromDtypeFunc cond: BinaryCond check_result: BinaryResultCheck r_special_cases = re.compile( r"\*\*Special [Cc]ases\*\*(?:\n.*)+" r"For floating-point operands,\n+" r"((?:\s*-\s*.*\n)+)" ) r_case = re.compile(r"\s+-\s*(.*)\.\n?") r_binary_case = re.compile("If (.+), the result (.+)") r_remaining_case = re.compile("In the remaining cases.+") r_cond_sep = re.compile(r"(?<!``x1_i``),? and |(?<!i\.e\.), ") r_cond = re.compile("(.+) (?:is|have) (.+)") r_input_is_array_element = re.compile( f"{r_array_element.pattern} is {r_array_element.pattern}" ) r_both_inputs_are_value = re.compile("are both (.+)") r_element = re.compile("x([12])_i") r_input = re.compile(rf"``{r_element.pattern}``") r_abs_input = re.compile(rf"``abs\({r_element.pattern}\)``") r_and_input = re.compile(f"{r_input.pattern} and {r_input.pattern}") r_or_input = re.compile(f"either {r_input.pattern} or {r_input.pattern}") r_result = re.compile(r"(?:is|has a) (.+)") class BinaryCondArg(Enum): FIRST = auto() SECOND = auto() BOTH = auto() EITHER = auto() @classmethod def from_x_no(cls, string): if string == "1": return cls.FIRST elif string == "2": return cls.SECOND else: raise ValueError(f"{string=} not '1' or '2'") def noop(n: float) -> float: return n def make_binary_cond( cond_arg: BinaryCondArg, unary_cond: UnaryCheck, *, input_wrapper: Optional[Callable[[float], float]] = None, ) -> BinaryCond: """ Wraps a unary condition as a binary condition, e.g. >>> unary_cond = lambda i: i == 42 >>> binary_cond_first = make_binary_cond(BinaryCondArg.FIRST, unary_cond) >>> binary_cond_first(42, 0) True >>> binary_cond_second = make_binary_cond(BinaryCondArg.SECOND, unary_cond) >>> binary_cond_second(42, 0) False >>> binary_cond_second(0, 42) True >>> binary_cond_both = make_binary_cond(BinaryCondArg.BOTH, unary_cond) >>> binary_cond_both(42, 0) False >>> binary_cond_both(42, 42) True >>> binary_cond_either = make_binary_cond(BinaryCondArg.EITHER, unary_cond) >>> binary_cond_either(0, 0) False >>> binary_cond_either(42, 0) True >>> binary_cond_either(0, 42) True >>> binary_cond_either(42, 42) True """ if input_wrapper is None: input_wrapper = noop if cond_arg == BinaryCondArg.FIRST: def partial_cond(i1: float, i2: float) -> bool: return unary_cond(input_wrapper(i1)) elif cond_arg == BinaryCondArg.SECOND: def partial_cond(i1: float, i2: float) -> bool: return unary_cond(input_wrapper(i2)) elif cond_arg == BinaryCondArg.BOTH: def partial_cond(i1: float, i2: float) -> bool: return unary_cond(input_wrapper(i1)) and unary_cond(input_wrapper(i2)) else: def partial_cond(i1: float, i2: float) -> bool: return unary_cond(input_wrapper(i1)) or unary_cond(input_wrapper(i2)) return partial_cond def make_eq_input_check_result( eq_to: BinaryCondArg, *, eq_neg: bool = False ) -> BinaryResultCheck: """ Returns a result checker for cases where the result equals an array element >>> check_result_first = make_eq_input_check_result(BinaryCondArg.FIRST) >>> check_result(42, 0, 42) True >>> check_result_second = make_eq_input_check_result(BinaryCondArg.SECOND) >>> check_result(42, 0, 42) False >>> check_result(0, 42, 42) True >>> check_result_neg_first = make_eq_input_check_result(BinaryCondArg.FIRST, eq_neg=True) >>> check_result_neg_first(42, 0, 42) False >>> check_result_neg_first(42, 0, -42) True """ if eq_neg: input_wrapper = lambda i: -i else: input_wrapper = noop if eq_to == BinaryCondArg.FIRST: def check_result(i1: float, i2: float, result: float) -> bool: eq = make_strict_eq(input_wrapper(i1)) return eq(result) elif eq_to == BinaryCondArg.SECOND: def check_result(i1: float, i2: float, result: float) -> bool: eq = make_strict_eq(input_wrapper(i2)) return eq(result) else: raise ValueError(f"{eq_to=} must be FIRST or SECOND") return check_result def make_binary_check_result(check_just_result: UnaryCheck) -> BinaryResultCheck: def check_result(i1: float, i2: float, result: float) -> bool: return check_just_result(result) return check_result def integers_from_dtype(dtype: DataType, **kw) -> st.SearchStrategy[float]: """ Returns a strategy that generates float-casted integers within the bounds of dtype. """ for k in kw.keys(): # sanity check assert k in ["min_value", "max_value", "exclude_min", "exclude_max"] m, M = dh.dtype_ranges[dtype] if "min_value" in kw.keys(): m = kw["min_value"] if "exclude_min" in kw.keys(): m += 1 if "max_value" in kw.keys(): M = kw["max_value"] if "exclude_max" in kw.keys(): M -= 1 return st.integers(math.ceil(m), math.floor(M)).map(float) def parse_binary_case(case_str: str) -> BinaryCase: """ Parses a Sphinx-formatted binary case string to return codified binary cases, e.g. >>> case_str = ( ... "If ``x1_i`` is greater than ``0``, ``x1_i`` is a finite number, " ... "and ``x2_i`` is ``+infinity``, the result is ``NaN``." ... ) >>> case = parse_binary_case(case_str) >>> case BinaryCase(<x1_i > 0 and isfinite(x1_i) and x2_i == +infinity -> NaN>) >>> case.cond(42, float('inf')) True >>> case.check_result(42, float('inf'), float('nan')) True """ case_m = r_binary_case.match(case_str) if case_m is None: raise ParseError(case_str) cond_strs = r_cond_sep.split(case_m.group(1)) partial_conds = [] partial_exprs = [] x1_cond_from_dtypes = [] x2_cond_from_dtypes = [] for cond_str in cond_strs: if m := r_input_is_array_element.match(cond_str): in_sign, in_no, other_sign, other_no = m.groups() if in_sign != "" or other_no == in_no: raise ParseError(cond_str) partial_expr = f"{in_sign}x{in_no}_i == {other_sign}x{other_no}_i" # For these scenarios, we want to make sure both array elements # generate respective to one another by using a shared strategy. shared_from_dtype = lambda d, **kw: st.shared( xps.from_dtype(d, **kw), key=cond_str ) input_wrapper = lambda i: -i if other_sign == "-" else noop if other_no == "1": def partial_cond(i1: float, i2: float) -> bool: eq = make_strict_eq(input_wrapper(i1)) return eq(i2) _x2_cond_from_dtype = shared_from_dtype # type: ignore def _x1_cond_from_dtype(dtype, **kw) -> st.SearchStrategy[float]: return shared_from_dtype(dtype, **kw).map(input_wrapper) elif other_no == "2": def partial_cond(i1: float, i2: float) -> bool: eq = make_strict_eq(input_wrapper(i2)) return eq(i1) _x1_cond_from_dtype = shared_from_dtype # type: ignore def _x2_cond_from_dtype(dtype, **kw) -> st.SearchStrategy[float]: return shared_from_dtype(dtype, **kw).map(input_wrapper) else: raise ParseError(cond_str) x1_cond_from_dtypes.append(BoundFromDtype(base_func=_x1_cond_from_dtype)) x2_cond_from_dtypes.append(BoundFromDtype(base_func=_x2_cond_from_dtype)) elif m := r_both_inputs_are_value.match(cond_str): unary_cond, expr_template, cond_from_dtype = parse_cond(m.group(1)) left_expr = expr_template.replace("{}", "x1_i") right_expr = expr_template.replace("{}", "x2_i") partial_expr = f"{left_expr} and {right_expr}" partial_cond = make_binary_cond( # type: ignore BinaryCondArg.BOTH, unary_cond ) x1_cond_from_dtypes.append(cond_from_dtype) x2_cond_from_dtypes.append(cond_from_dtype) else: cond_m = r_cond.match(cond_str) if cond_m is None: raise ParseError(cond_str) input_str, value_str = cond_m.groups() if value_str == "the same mathematical sign": partial_expr = "copysign(1, x1_i) == copysign(1, x2_i)" def partial_cond(i1: float, i2: float) -> bool: return math.copysign(1, i1) == math.copysign(1, i2) elif value_str == "different mathematical signs": partial_expr = "copysign(1, x1_i) != copysign(1, x2_i)" def partial_cond(i1: float, i2: float) -> bool: return math.copysign(1, i1) != math.copysign(1, i2) else: unary_cond, expr_template, cond_from_dtype = parse_cond(value_str) # Do not define partial_cond via the def keyword or lambda # expressions, as one partial_cond definition can mess up # previous definitions in the partial_conds list. This is a # hard-limitation of using local functions with the same name # and that use the same outer variables (i.e. unary_cond). Use # def in a called function avoids this problem. input_wrapper = None if m := r_input.match(input_str): x_no = m.group(1) partial_expr = expr_template.replace("{}", f"x{x_no}_i") cond_arg = BinaryCondArg.from_x_no(x_no) elif m := r_abs_input.match(input_str): x_no = m.group(1) partial_expr = expr_template.replace("{}", f"abs(x{x_no}_i)") cond_arg = BinaryCondArg.from_x_no(x_no) input_wrapper = abs elif r_and_input.match(input_str): left_expr = expr_template.replace("{}", "x1_i") right_expr = expr_template.replace("{}", "x2_i") partial_expr = f"{left_expr} and {right_expr}" cond_arg = BinaryCondArg.BOTH elif r_or_input.match(input_str): left_expr = expr_template.replace("{}", "x1_i") right_expr = expr_template.replace("{}", "x2_i") partial_expr = f"{left_expr} or {right_expr}" if len(cond_strs) != 1: partial_expr = f"({partial_expr})" cond_arg = BinaryCondArg.EITHER else: raise ParseError(input_str) partial_cond = make_binary_cond( # type: ignore cond_arg, unary_cond, input_wrapper=input_wrapper ) if cond_arg == BinaryCondArg.FIRST: x1_cond_from_dtypes.append(cond_from_dtype) elif cond_arg == BinaryCondArg.SECOND: x2_cond_from_dtypes.append(cond_from_dtype) elif cond_arg == BinaryCondArg.BOTH: x1_cond_from_dtypes.append(cond_from_dtype) x2_cond_from_dtypes.append(cond_from_dtype) else: # For "either x1_i or x2_i is <condition>" cases, we want to # test three scenarios: # # 1. x1_i is <condition> # 2. x2_i is <condition> # 3. x1_i AND x2_i is <condition> # # This is achieved by a shared base strategy that picks one # of these scenarios to determine whether each array will # use either cond_from_dtype() (i.e. meet the condition), or # simply xps.from_dtype() (i.e. be any value). use_x1_or_x2_strat = st.shared( st.sampled_from([(True, False), (False, True), (True, True)]) ) def _x1_cond_from_dtype(dtype, **kw) -> st.SearchStrategy[float]: assert len(kw) == 0 # sanity check return use_x1_or_x2_strat.flatmap( lambda t: cond_from_dtype(dtype) if t[0] else xps.from_dtype(dtype) ) def _x2_cond_from_dtype(dtype, **kw) -> st.SearchStrategy[float]: assert len(kw) == 0 # sanity check return use_x1_or_x2_strat.flatmap( lambda t: cond_from_dtype(dtype) if t[1] else xps.from_dtype(dtype) ) x1_cond_from_dtypes.append( BoundFromDtype(base_func=_x1_cond_from_dtype) ) x2_cond_from_dtypes.append( BoundFromDtype(base_func=_x2_cond_from_dtype) ) partial_conds.append(partial_cond) partial_exprs.append(partial_expr) result_m = r_result.match(case_m.group(2)) if result_m is None: raise ParseError(case_m.group(2)) result_str = result_m.group(1) # Like with partial_cond, do not define check_result in this function's body. if m := r_array_element.match(result_str): sign, x_no = m.groups() result_expr = f"{sign}x{x_no}_i" check_result = make_eq_input_check_result( # type: ignore BinaryCondArg.from_x_no(x_no), eq_neg=sign == "-" ) else: _check_result, result_expr = parse_result(result_m.group(1)) check_result = make_binary_check_result(_check_result) cond_expr = " and ".join(partial_exprs) def cond(i1: float, i2: float) -> bool: return all(pc(i1, i2) for pc in partial_conds) x1_cond_from_dtype = sum(x1_cond_from_dtypes, start=BoundFromDtype()) x2_cond_from_dtype = sum(x2_cond_from_dtypes, start=BoundFromDtype()) return BinaryCase( cond_expr=cond_expr, cond=cond, x1_cond_from_dtype=x1_cond_from_dtype, x2_cond_from_dtype=x2_cond_from_dtype, result_expr=result_expr, check_result=check_result, ) r_redundant_case = re.compile("result.+determined by the rule already stated above") def parse_binary_docstring(docstring: str) -> List[BinaryCase]: """ Parses a Sphinx-formatted docstring of a binary function to return a list of codified binary cases, e.g. >>> def logaddexp(x1, x2): ... ''' ... Calculates the logarithm of the sum of exponentiations ... ... **Special Cases** ... ... For floating-point operands, ... ... - If either ``x1_i`` or ``x2_i`` is ``NaN``, the result is ``NaN``. ... - If ``x1_i`` is ``+infinity`` and ``x2_i`` is not ``NaN``, the result is ``+infinity``. ... - If ``x1_i`` is not ``NaN`` and ``x2_i`` is ``+infinity``, the result is ``+infinity``. ... ... Parameters ... ---------- ... x1: array ... first input array ... x2: array ... second input array ... ... Returns ... ------- ... out: array ... an array containing the results ... ''' ... >>> binary_cases = parse_binary_docstring(logaddexp.__doc__) >>> for case in binary_cases: ... print(repr(case)) BinaryCase(<x1_i == NaN or x2_i == NaN -> NaN>) BinaryCase(<x1_i == +infinity and not x2_i == NaN -> +infinity>) BinaryCase(<not x1_i == NaN and x2_i == +infinity -> +infinity>) """ match = r_special_cases.search(docstring) if match is None: return [] lines = match.group(1).split("\n")[:-1] cases = [] for line in lines: if m := r_case.match(line): case_str = m.group(1) else: warn(f"line not machine-readable: '{line}'") continue if r_redundant_case.search(case_str): continue if m := r_binary_case.match(case_str): try: case = parse_binary_case(case_str) cases.append(case) except ParseError as e: warn(f"not machine-readable: '{e.value}'") else: if not r_remaining_case.match(case_str): warn(f"case not machine-readable: '{case_str}'") return cases unary_params = [] binary_params = [] iop_params = [] func_to_op: Dict[str, str] = {v: k for k, v in dh.op_to_func.items()} for stub in category_to_funcs["elementwise"]: if stub.__doc__ is None: warn(f"{stub.__name__}() stub has no docstring") continue marks = [] try: func = getattr(xp, stub.__name__) except AttributeError: marks.append( pytest.mark.skip(reason=f"{stub.__name__} not found in array module") ) func = None sig = inspect.signature(stub) param_names = list(sig.parameters.keys()) if len(sig.parameters) == 0: warn(f"{func=} has no parameters") continue if param_names[0] == "x": if cases := parse_unary_docstring(stub.__doc__): func_name_to_func = {stub.__name__: func} if stub.__name__ in func_to_op.keys(): op_name = func_to_op[stub.__name__] op = getattr(operator, op_name) func_name_to_func[op_name] = op for func_name, func in func_name_to_func.items(): for case in cases: id_ = f"{func_name}({case.cond_expr}) -> {case.result_expr}" p = pytest.param(func_name, func, case, id=id_) unary_params.append(p) continue if len(sig.parameters) == 1: warn(f"{func=} has one parameter '{param_names[0]}' which is not named 'x'") continue if param_names[0] == "x1" and param_names[1] == "x2": if cases := parse_binary_docstring(stub.__doc__): func_name_to_func = {stub.__name__: func} if stub.__name__ in func_to_op.keys(): op_name = func_to_op[stub.__name__] op = getattr(operator, op_name) func_name_to_func[op_name] = op # We collect inplaceoperator test cases seperately iop_name = "__i" + op_name[2:] iop = getattr(operator, iop_name) for case in cases: id_ = f"{iop_name}({case.cond_expr}) -> {case.result_expr}" p = pytest.param(iop_name, iop, case, id=id_) iop_params.append(p) for func_name, func in func_name_to_func.items(): for case in cases: id_ = f"{func_name}({case.cond_expr}) -> {case.result_expr}" p = pytest.param(func_name, func, case, id=id_) binary_params.append(p) continue else: warn( f"{func=} starts with two parameters '{param_names[0]}' and " f"'{param_names[1]}', which are not named 'x1' and 'x2'" ) # test_unary and test_binary naively generate arrays, i.e. arrays that might not # meet the condition that is being test. We then forcibly make the array meet # the condition by picking a random index to insert an acceptable element. # # good_example is a flag that tells us whether Hypothesis generated an array # with at least on element that is special-cased. We reject the example when # its False - Hypothesis will complain if we reject too many examples, thus # indicating we've done something wrong. @pytest.mark.parametrize("func_name, func, case", unary_params) @given( x=xps.arrays(dtype=xps.floating_dtypes(), shape=hh.shapes(min_side=1)), data=st.data(), ) def test_unary(func_name, func, case, x, data): set_idx = data.draw( xps.indices(x.shape, max_dims=0, allow_ellipsis=False), label="set idx" ) set_value = data.draw(case.cond_from_dtype(x.dtype), label="set value") x[set_idx] = set_value note(f"{x=}") res = func(x) good_example = False for idx in sh.ndindex(res.shape): in_ = float(x[idx]) if case.cond(in_): good_example = True out = float(res[idx]) f_in = f"{sh.fmt_idx('x', idx)}={in_}" f_out = f"{sh.fmt_idx('out', idx)}={out}" assert case.check_result(in_, out), ( f"{f_out}, but should be {case.result_expr} [{func_name}()]\n" f"condition: {case.cond_expr}\n" f"{f_in}" ) break assume(good_example) x1_strat, x2_strat = hh.two_mutual_arrays( dtypes=dh.float_dtypes, two_shapes=hh.mutually_broadcastable_shapes(2, min_side=1), ) @pytest.mark.parametrize("func_name, func, case", binary_params) @given(x1=x1_strat, x2=x2_strat, data=st.data()) def test_binary(func_name, func, case, x1, x2, data): result_shape = sh.broadcast_shapes(x1.shape, x2.shape) all_indices = list(sh.iter_indices(x1.shape, x2.shape, result_shape)) indices_strat = st.shared(st.sampled_from(all_indices)) set_x1_idx = data.draw(indices_strat.map(lambda t: t[0]), label="set x1 idx") set_x1_value = data.draw(case.x1_cond_from_dtype(x1.dtype), label="set x1 value") x1[set_x1_idx] = set_x1_value note(f"{x1=}") set_x2_idx = data.draw(indices_strat.map(lambda t: t[1]), label="set x2 idx") set_x2_value = data.draw(case.x2_cond_from_dtype(x2.dtype), label="set x2 value") x2[set_x2_idx] = set_x2_value note(f"{x2=}") res = func(x1, x2) # sanity check ph.assert_result_shape(func_name, [x1.shape, x2.shape], res.shape, result_shape) good_example = False for l_idx, r_idx, o_idx in all_indices: l = float(x1[l_idx]) r = float(x2[r_idx]) if case.cond(l, r): good_example = True o = float(res[o_idx]) f_left = f"{sh.fmt_idx('x1', l_idx)}={l}" f_right = f"{sh.fmt_idx('x2', r_idx)}={r}" f_out = f"{sh.fmt_idx('out', o_idx)}={o}" assert case.check_result(l, r, o), ( f"{f_out}, but should be {case.result_expr} [{func_name}()]\n" f"condition: {case}\n" f"{f_left}, {f_right}" ) break assume(good_example) @pytest.mark.parametrize("iop_name, iop, case", iop_params) @given( oneway_dtypes=oneway_promotable_dtypes(dh.float_dtypes), oneway_shapes=oneway_broadcastable_shapes(), data=st.data(), ) def test_iop(iop_name, iop, case, oneway_dtypes, oneway_shapes, data): x1 = data.draw( xps.arrays(dtype=oneway_dtypes.result_dtype, shape=oneway_shapes.result_shape), label="x1", ) x2 = data.draw( xps.arrays(dtype=oneway_dtypes.input_dtype, shape=oneway_shapes.input_shape), label="x2", ) all_indices = list(sh.iter_indices(x1.shape, x2.shape, x1.shape)) indices_strat = st.shared(st.sampled_from(all_indices)) set_x1_idx = data.draw(indices_strat.map(lambda t: t[0]), label="set x1 idx") set_x1_value = data.draw(case.x1_cond_from_dtype(x1.dtype), label="set x1 value") x1[set_x1_idx] = set_x1_value note(f"{x1=}") set_x2_idx = data.draw(indices_strat.map(lambda t: t[1]), label="set x2 idx") set_x2_value = data.draw(case.x2_cond_from_dtype(x2.dtype), label="set x2 value") x2[set_x2_idx] = set_x2_value note(f"{x2=}") res = xp.asarray(x1, copy=True) iop(res, x2) # sanity check ph.assert_result_shape(iop_name, [x1.shape, x2.shape], res.shape) good_example = False for l_idx, r_idx, o_idx in all_indices: l = float(x1[l_idx]) r = float(x2[r_idx]) if case.cond(l, r): good_example = True o = float(res[o_idx]) f_left = f"{sh.fmt_idx('x1', l_idx)}={l}" f_right = f"{sh.fmt_idx('x2', r_idx)}={r}" f_out = f"{sh.fmt_idx('out', o_idx)}={o}" assert case.check_result(l, r, o), ( f"{f_out}, but should be {case.result_expr} [{iop_name}()]\n" f"condition: {case}\n" f"{f_left}, {f_right}" ) break assume(good_example)
StarcoderdataPython
6583633
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Utilities for forward-mode automatic differentiation.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import threading from tensorflow.python import pywrap_tfe from tensorflow.python.eager import backprop from tensorflow.python.eager import backprop_util from tensorflow.python.eager import def_function from tensorflow.python.eager import execute from tensorflow.python.eager import forwardprop_util from tensorflow.python.framework import ops from tensorflow.python.ops import array_ops from tensorflow.python.ops.unconnected_gradients import UnconnectedGradients from tensorflow.python.platform import tf_logging as logging from tensorflow.python.util import nest from tensorflow.python.util.tf_export import tf_export # Dictionary mapping from op names to special-cased jvp functions. Otherwise # backward functions are transposed on the tape. _SPECIAL_CASES = {} def _identity_jvp(attr_tuple, inputs, outputs, tangents): # Special-cased mostly for resource handles, where creating ones Tensors from # handle data for transposing the backward function on the tape is error-prone # (even if we get good handle data, partially defined shapes are an issue). del attr_tuple, inputs, outputs return [array_ops.identity(t) for t in tangents] _SPECIAL_CASES["Identity"] = _identity_jvp def _read_variable_jvp(attr_tuple, inputs, outputs, tangents): # Like for Identity, this special case means we don't need to create # variable-shaped Tensors from resource handles. del attr_tuple, inputs, outputs return [array_ops.identity(t) for t in tangents] _SPECIAL_CASES["ReadVariableOp"] = _read_variable_jvp _TRACE_COUNT_CONSISTENCY_LOCK = threading.Lock() # Map from op names to number of traces of _jvp_helper. Used to cap the number # of traces due to shape differences while still specializing where possible. _TRACE_COUNT = {} def _jvp_helper(op_name, attr_tuple, inputs, outputs, tangents): """Computes a Jacobian-vector product for an op. Note that this function would be wasteful if executed eagerly. It runs the backward gradient function and throws away the result just to record its operations on a GradientTape. These unused ops are pruned away when this function is traced. Args: op_name: A string, the type of operation being executed. attr_tuple: Attributes of the operation. inputs: A flat list of input Tensors to the operation. outputs: A flat list of output Tensors from the operation. tangents: A flat list of Tensors, same shape as `inputs`. Returns: A flat list of tangents corresponding to `outputs`. """ with _TRACE_COUNT_CONSISTENCY_LOCK: # Just make sure writes don't clobber each other's increments; reads in # _jvp_dispatch do not lock. _TRACE_COUNT[op_name] = _TRACE_COUNT.get(op_name, 0) + 1 special_case = _SPECIAL_CASES.get(op_name, None) if special_case is not None: return special_case(attr_tuple, inputs, outputs, tangents) if not outputs: # tape.gradients([], inputs) doesn't make much sense return [] # Generally inner GradientTapes won't function while outer accumulators are # recording. We temporarily reset forwardprop state to allow GradientTapes to # function here. with forwardprop_util.push_forwardprop_state(): trainable_inputs = [] trainable_indices = [] nontrivial_tangents = [] for input_index, tensor in enumerate(inputs): if backprop_util.IsTrainable(tensor): trainable_inputs.append(tensor) trainable_indices.append(input_index) nontrivial_tangents.append(tangents[input_index]) with backprop.GradientTape() as transpose_tape: with backprop.GradientTape() as backfunc_tape: backfunc_tape.watch(trainable_inputs) execute.record_gradient(op_name, inputs, attr_tuple, outputs) forwardprop_aids = [] trainable_outputs = [] nontrivial_output_indices = [] for output_index, output in enumerate(outputs): if backprop_util.IsTrainable(output): forwardprop_aids.append( array_ops.ones_like(output, name="unused_forwardprop_aid")) trainable_outputs.append(output) nontrivial_output_indices.append(output_index) transpose_tape.watch(forwardprop_aids) grads = backfunc_tape.gradient( trainable_outputs, trainable_inputs, forwardprop_aids, unconnected_gradients=UnconnectedGradients.ZERO) nontrivial_output_tangents = transpose_tape.gradient( grads, forwardprop_aids, output_gradients=nontrivial_tangents) output_tangents = [None] * len(outputs) for index, tangent in zip(nontrivial_output_indices, nontrivial_output_tangents): output_tangents[index] = tangent return output_tangents # TODO(allenl): experimental_relax_shapes for gradients which rely on static # shape information are underspecialized. We may want hand-written forward # implementations, or a more satisfying story about how we re-specialize # gradients which were traced with relaxed shapes (e.g. use conds instead of # trace-time Python logic). _jvp_relaxed_shapes = def_function.function( _jvp_helper, experimental_relax_shapes=True) _jvp_exact_shapes = def_function.function( _jvp_helper, experimental_relax_shapes=False) # The maximum number of exact-shape traces to perform for a single op before # switching to shape relaxation. _TRACE_COUNT_LIMIT = 32 def _jvp_dispatch(op_name, attr_tuple, inputs, outputs, tangents): """Determine which forwardprop function to call.""" # Note that this _TRACE_COUNT read races with writes. That's fine, it just # means we may trace a few more exact shapes before moving on to relaxation. if _TRACE_COUNT.get(op_name, 0) < _TRACE_COUNT_LIMIT: return _jvp_exact_shapes( op_name, attr_tuple, inputs, outputs, tangents) else: return _jvp_relaxed_shapes( op_name, attr_tuple, inputs, outputs, tangents) pywrap_tfe.TFE_Py_RegisterJVPFunction(_jvp_dispatch) @tf_export("autodiff.ForwardAccumulator", v1=[]) class ForwardAccumulator(object): """Computes Jacobian-vector products ("JVP"s) using forward-mode autodiff. Compare to `tf.GradientTape` which computes vector-Jacobian products ("VJP"s) using reverse-mode autodiff (backprop). Reverse mode is more attractive when computing gradients of a scalar-valued function with respect to many inputs (e.g. a neural network with many parameters and a scalar loss). Forward mode works best on functions with many outputs and few inputs. Since it does not hold on to intermediate activations, it is much more memory efficient than backprop where it is applicable. Consider a simple linear regression: >>> x = tf.constant([[2.0, 3.0], [1.0, 4.0]]) >>> dense = tf.keras.layers.Dense(1) >>> dense.build([None, 2]) >>> with tf.autodiff.ForwardAccumulator( ... primals=dense.kernel, ... tangents=tf.constant([[1.], [0.]])) as acc: ... loss = tf.reduce_sum((dense(x) - tf.constant([1., -1.])) ** 2.) >>> acc.jvp(loss) <tf.Tensor: shape=(), dtype=float32, numpy=...> The example has two variables containing parameters, `dense.kernel` (2 parameters) and `dense.bias` (1 parameter). Considering the training data `x` as a constant, this means the Jacobian matrix for the function mapping from parameters to loss has one row and three columns. With forwardprop, we specify a length-three vector in advance which multiplies the Jacobian. The `primals` constructor argument is the parameter (a `tf.Tensor` or `tf.Variable`) we're specifying a vector for, and the `tangents` argument is the "vector" in Jacobian-vector product. If our goal is to compute the entire Jacobian matrix, forwardprop computes one column at a time while backprop computes one row at a time. Since the Jacobian in the linear regression example has only one row, backprop requires fewer invocations: >>> x = tf.constant([[2.0, 3.0], [1.0, 4.0]]) >>> dense = tf.keras.layers.Dense(1) >>> dense.build([None, 2]) >>> loss_fn = lambda: tf.reduce_sum((dense(x) - tf.constant([1., -1.])) ** 2.) >>> kernel_fprop = [] >>> with tf.autodiff.ForwardAccumulator( ... dense.kernel, tf.constant([[1.], [0.]])) as acc: ... kernel_fprop.append(acc.jvp(loss_fn())) >>> with tf.autodiff.ForwardAccumulator( ... dense.kernel, tf.constant([[0.], [1.]])) as acc: ... kernel_fprop.append(acc.jvp(loss_fn())) >>> with tf.autodiff.ForwardAccumulator(dense.bias, tf.constant([1.])) as acc: ... bias_fprop = acc.jvp(loss_fn()) >>> with tf.GradientTape() as tape: ... loss = loss_fn() >>> kernel_grad, bias_grad = tape.gradient(loss, (dense.kernel, dense.bias)) >>> np.testing.assert_allclose( ... kernel_grad, tf.stack(kernel_fprop)[:, tf.newaxis]) >>> np.testing.assert_allclose(bias_grad, bias_fprop[tf.newaxis]) Implicit in the `tape.gradient` call is a length-one vector which left-multiplies the Jacobian, a vector-Jacobian product. `ForwardAccumulator` maintains JVPs corresponding primal tensors it is watching, derived from the original `primals` specified in the constructor. As soon as a primal tensor is deleted, `ForwardAccumulator` deletes the corresponding JVP. `acc.jvp(x)` retrieves `acc`'s JVP corresponding to the primal tensor `x`. It does not perform any computation. `acc.jvp` calls can be repeated as long as `acc` is accessible, whether the context manager is active or not. New JVPs are only computed while the context manager is active. Note that `ForwardAccumulator`s are always applied in the order their context managers were entered, so inner accumulators will not see JVP computation from outer accumulators. Take higher-order JVPs from outer accumulators: >>> primal = tf.constant(1.1) >>> with tf.autodiff.ForwardAccumulator(primal, tf.constant(1.)) as outer: ... with tf.autodiff.ForwardAccumulator(primal, tf.constant(1.)) as inner: ... primal_out = primal ** tf.constant(3.5) >>> inner_jvp = inner.jvp(primal_out) >>> inner_jvp # 3.5 * 1.1 ** 2.5 <tf.Tensor: shape=(), dtype=float32, numpy=4.4417057> >>> outer.jvp(inner_jvp) # 3.5 * 2.5 * 1.1 ** 1.5 <tf.Tensor: shape=(), dtype=float32, numpy=10.094786> Reversing the collection in the last line to instead retrieve `inner.jvp(outer.jvp(primal_out))` will not work. Strict nesting also applies to combinations of `ForwardAccumulator` and `tf.GradientTape`. More deeply nested `GradientTape` objects will ignore the products of outer `ForwardAccumulator` objects. This allows (for example) memory-efficient forward-over-backward computation of Hessian-vector products, where the inner `GradientTape` would otherwise hold on to all intermediate JVPs: >>> v = tf.Variable([1., 2.]) >>> with tf.autodiff.ForwardAccumulator( ... v, ... # The "vector" in Hessian-vector product. ... tf.constant([1., 0.])) as acc: ... with tf.GradientTape() as tape: ... y = tf.reduce_sum(v ** 3.) ... backward = tape.gradient(y, v) >>> backward # gradient from backprop <tf.Tensor: shape=(2,), dtype=float32, numpy=array([ 3., 12.], dtype=float32)> >>> acc.jvp(backward) # forward-over-backward Hessian-vector product <tf.Tensor: shape=(2,), dtype=float32, numpy=array([6., 0.], dtype=float32)> """ def __init__(self, primals, tangents): """Specify tensors to watch and their Jacobian-vector products. Mathematically, `tangents` is a vector right-multiplying the Jacobian matrix (a Jacobian-vector product) for the function computed while this accumulator is active. Since JVPs are computed in forward mode as the computation happens, this vector must be supplied in advance. Listing a single tensor multiple times in `primals` raises an exception. Excluding a tensor from `primals` is equivalent to watching it with a tangent tensor of zeros. Args: primals: A tensor or nested structure of tensors to watch. tangents: A tensor or nested structure of tensors, with the same nesting structure as `primals`, with each element being a vector with the same size as the corresponding primal element. Raises: ValueError: If the same tensor or variable is specified multiple times in `primals`. """ self._accumulator = pywrap_tfe.TFE_Py_ForwardAccumulatorNew() self._recording = False primal_ids = set() for primal in nest.flatten(primals): if id(primal) in primal_ids: raise ValueError( "Tensor {} was specified as a primal multiple times. This may " "indicate an error. If it was intended, please sum the " "corresponding tangents.") primal_ids.add(id(primal)) self._watch(primals, tangents) def __enter__(self): self._push_accumulator() return self def __exit__(self, typ, value, traceback): if self._recording: self._pop_accumulator() def _push_accumulator(self): if self._recording: raise ValueError("Accumulator is already recording.") pywrap_tfe.TFE_Py_ForwardAccumulatorSetAdd(self._accumulator) self._recording = True def _pop_accumulator(self): if not self._recording: raise ValueError("Accumulator is not recording.") pywrap_tfe.TFE_Py_ForwardAccumulatorSetRemove(self._accumulator) self._recording = False def _watch(self, primals, tangents): """Ensures that `primals` are being traced by this accumulator. Mathematically, `tangents` is a vector right-multiplying the Jacobian matrix (a Jacobian-vector product) for the function computed while this accumulator is active. Since JVPs are computed in forward mode as the computation happens, this vector must be supplied in advance. Watching a single tensor multiple times sums each of its `tangents`. Any un-watched tensor has zeros for its tangent vector. Args: primals: A Tensor or list of Tensors. tangents: A Tensor or list of Tensors matching `primals`. """ nest.assert_same_structure(primals, tangents) for t, g in zip(nest.flatten(primals), nest.flatten(tangents)): if not t.dtype.is_floating: logging.log_first_n( logging.WARN, "The dtype of the watched primal must be " "floating (e.g. tf.float32), got %r", 5, t.dtype) g = ops.convert_to_tensor(g, dtype=t.dtype) if hasattr(t, "handle"): # Run convert_to_tensor to get the captured handle from whichever # function we're running if necessary. t = ops.convert_to_tensor(t.handle) pywrap_tfe.TFE_Py_ForwardAccumulatorWatch(self._accumulator, t, g) def jvp(self, primals, unconnected_gradients=UnconnectedGradients.NONE): """Fetches the Jacobian-vector product computed for `primals`. Note that this method performs no computation, and simply looks up a JVP that was already computed (unlike backprop using a `tf.GradientTape`, where the computation happens on the call to `tape.gradient`). Args: primals: A watched Tensor or structure of Tensors to fetch the JVPs for. unconnected_gradients: A value which can either hold 'none' or 'zero' and alters the value which will be returned if no JVP was computed for `primals`. The possible values and effects are detailed in 'tf.UnconnectedGradients' and it defaults to 'none'. Returns: Tensors with the same shapes and dtypes as `primals`, or None if no JVP is available. """ unconnected_gradients = UnconnectedGradients(unconnected_gradients) if self._accumulator is None: raise ValueError("Called jvp() without first tracing anything.") def _fetch_jvp(tensor): if hasattr(tensor, "handle"): tensor = ops.convert_to_tensor(tensor.handle) result = pywrap_tfe.TFE_Py_ForwardAccumulatorJVP(self._accumulator, tensor) if result is None and unconnected_gradients == UnconnectedGradients.ZERO: return array_ops.zeros_like(tensor) return result return nest.map_structure(_fetch_jvp, primals)
StarcoderdataPython
8034393
from python_tsp.exact import solve_tsp_dynamic_programming def alle_kanten(graph, knoten): alle_kanten = [] for index1 in range(0,len(knoten)): for index2 in range(index1+1,len(knoten)): knoten1 = knoten[index1] knoten2 = knoten[index2] alle_kanten.append(graph[knoten1][knoten2]) return alle_kanten def tsp_lower_bound(graph, knoten, depot): if len(knoten)==1: return 2 * graph[knoten[0]][depot].distanz knoten.append(depot) alle_kanten_kosten = [kante.distanz for kante in alle_kanten(graph, knoten)] return sum(sorted(alle_kanten_kosten)[:len(knoten)]) def tsp_upper_bound(graph, knoten, depot): knoten.append(depot) result = 0 for index in range(0,len(knoten)): result += graph[knoten[index-1]][knoten[index]].distanz return result
StarcoderdataPython
242115
from os.path import join, basename, splitext import os, glob, random import numpy import scipy.io import mne import pandas from autoreject import AutoReject from eegprep.bids.naming import filename2tuple from eegprep.guess import guess_montage from eegprep.util import ( resample_events_on_resampled_epochs, plot_rejectlog, save_rejectlog ) from eegprep.configuration import Configuration from eegprep.defaults import defaults def run_preproc(datadir='/data'): print('data directory: {}'.format(datadir)) conf_file_path = join(datadir, 'eegprep.conf') config = Configuration() config.setDefaults(defaults) if os.path.isfile(conf_file_path): with open(conf_file_path) as fh: conf_string = fh.read() config.updateFromString(conf_string) print('configuration:') print(config) bidsdir = join(datadir, 'BIDS') eegprepdir = join(bidsdir, 'derivatives', 'eegprep') subjectdirs = sorted(glob.glob(join(bidsdir, 'sub-*'))) for subjectdir in subjectdirs: assert os.path.isdir(subjectdir) sub = basename(subjectdir)[4:] # prepare derivatives directory derivdir = join(eegprepdir, 'sub-' + sub) os.makedirs(derivdir, exist_ok=True) reportsdir = join(eegprepdir, 'reports', 'sub-' + sub) os.makedirs(reportsdir, exist_ok=True) subject_epochs = {} rawtypes = {'.set': mne.io.read_raw_eeglab, '.bdf': mne.io.read_raw_edf} for fname in sorted(glob.glob(join(subjectdir, 'eeg', '*'))): _, ext = splitext(fname) if ext not in rawtypes.keys(): continue sub, ses, task, run = filename2tuple(basename(fname)) print('\nProcessing raw file: ' + basename(fname)) # read data raw = rawtypes[ext](fname, preload=True, verbose=False) events = mne.find_events(raw) #raw, consecutive=False, min_duration=0.005) # Set channel types and select reference channels channelFile = fname.replace('eeg' + ext, 'channels.tsv') channels = pandas.read_csv(channelFile, index_col='name', sep='\t') bids2mne = { 'MISC': 'misc', 'EEG': 'eeg', 'VEOG': 'eog', 'TRIG': 'stim', 'REF': 'eeg', } channels['mne'] = channels.type.replace(bids2mne) # the below fails if the specified channels are not in the data raw.set_channel_types(channels.mne.to_dict()) # set bad channels raw.info['bads'] = channels[channels.status=='bad'].index.tolist() # pick channels to use for epoching epoching_picks = mne.pick_types(raw.info, eeg=True, eog=False, stim=False, exclude='bads') # Filtering #raw.filter(l_freq=0.05, h_freq=40, fir_design='firwin') montage = mne.channels.read_montage(guess_montage(raw.ch_names)) print(montage) raw.set_montage(montage) # plot raw data nchans = len(raw.ch_names) pick_channels = numpy.arange(0, nchans, numpy.floor(nchans/20)).astype(int) start = numpy.round(raw.times.max()/2) fig = raw.plot(start=start, order=pick_channels) fname_plot = 'sub-{}_ses-{}_task-{}_run-{}_raw.png'.format(sub, ses, task, run) fig.savefig(join(reportsdir, fname_plot)) # Set reference refChannels = channels[channels.type=='REF'].index.tolist() raw.set_eeg_reference(ref_channels=refChannels) ## epoching epochs_params = dict( events=events, tmin=-0.1, tmax=0.8, reject=None, # dict(eeg=250e-6, eog=150e-6) picks=epoching_picks, detrend=0, ) file_epochs = mne.Epochs(raw, preload=True, **epochs_params) file_epochs.drop_channels(refChannels) # autoreject (under development) ar = AutoReject(n_jobs=4) clean_epochs = ar.fit_transform(file_epochs) rejectlog = ar.get_reject_log(clean_epochs) fname_log = 'sub-{}_ses-{}_task-{}_run-{}_reject-log.npz'.format(sub, ses, task, run) save_rejectlog(join(reportsdir, fname_log), rejectlog) fig = plot_rejectlog(rejectlog) fname_plot = 'sub-{}_ses-{}_task-{}_run-{}_bad-epochs.png'.format(sub, ses, task, run) fig.savefig(join(reportsdir, fname_plot)) # store for now subject_epochs[(ses, task, run)] = clean_epochs # create evoked plots conds = clean_epochs.event_id.keys() selected_conds = random.sample(conds, min(len(conds), 6)) picks = mne.pick_types(clean_epochs.info, eeg=True) for cond in selected_conds: evoked = clean_epochs[cond].average() fname_plot = 'sub-{}_ses-{}_task-{}_run-{}_evoked-{}.png'.format(sub, ses, task, run, cond) fig = evoked.plot_joint(picks=picks) fig.savefig(join(reportsdir, fname_plot)) sessSeg = 0 sessions = sorted(list(set([k[sessSeg] for k in subject_epochs.keys()]))) for session in sessions: taskSeg = 1 tasks = list(set([k[taskSeg] for k in subject_epochs.keys() if k[sessSeg]==session])) for task in tasks: print('\nGathering epochs for session {} task {}'.format(session, task)) epochs_selection = [v for (k, v) in subject_epochs.items() if k[:2]==(session, task)] task_epochs = mne.epochs.concatenate_epochs(epochs_selection) # downsample if configured to do so # important to do this after concatenation because # downsampling may cause rejection for 'TOOSHORT' if config['downsample'] < task_epochs.info['sfreq']: task_epochs = task_epochs.copy().resample(config['downsample'], npad='auto') ext = config['out_file_format'] fname = join(derivdir, 'sub-{}_ses-{}_task-{}_epo.{}'.format(sub, session, task, ext)) variables = { 'epochs': task_epochs.get_data(), 'events': task_epochs.events, 'timepoints': task_epochs.times } if ext == 'fif': task_epochs.save(fname) elif ext == 'mat': scipy.io.savemat(fname, mdict=variables) elif ext == 'npy': numpy.savez(fname, **variables)
StarcoderdataPython
9605453
import math import random from Instrucciones.TablaSimbolos.Instruccion import Instruccion from Instrucciones.TablaSimbolos import Instruccion3D as c3d from Optimizador.C3D import Valor as ClassValor from Optimizador.C3D import OP_ARITMETICO as ClassOP_ARITMETICO from Optimizador.C3D import Identificador as ClassIdentificador class SetSeed(Instruccion): def __init__(self, valor, tipo, strGram, linea, columna): Instruccion.__init__(self,tipo,linea,columna, strGram) self.valor = valor def ejecutar(self, tabla, arbol): super().ejecutar(tabla,arbol) #print(random.seed(self.valor)) arbol.consola.append('Función en proceso...') def generar3D(self, tabla, arbol): super().generar3D(tabla,arbol) code = [] t0 = c3d.getLastTemporal() t1 = c3d.getTemporal() code.append(c3d.operacion(t1, ClassIdentificador(t0), ClassValor("\"SETSEED(" + str(self.valor.generar3D(tabla, arbol)) + ")\"", "STRING"), ClassOP_ARITMETICO.SUMA)) return code
StarcoderdataPython
8012400
<gh_stars>1-10 import config import time import logging; logging.basicConfig(level=logging.INFO) import asyncio, os, json, time from datetime import datetime from aiohttp import web from jinja2 import Environment, FileSystemLoader from config import configs import orm from coroweb import add_routes, add_static, add_vue_static from api_handlers import cookie2user, COOKIE_NAME print(time.time()) @get("/") async def parse_data(request): cookie_str = request.cookies.get(COOKIE_NAME) if cookie_str: user = await cookie2user(cookie_str) if user: logging.info('set current user: %s' % user.email) request.__user__ = user print(user) loop = asyncio.get_event_loop() loop.run_until_complete(parse_data()) loop.run_forever()
StarcoderdataPython
3520479
<gh_stars>0 from django.urls import path, include from . import views urlpatterns = ( path('admins/', views.DashboardView.as_view(), name='admins_dashboard'), # urls for Product path('admins/product/', views.ProductListView.as_view(), name='admins_product_list'), path('admins/product/create/', views.ProductCreateView.as_view(), name='admins_product_create'), path('admins/product/detail/<int:pk>/', views.ProductDetailView.as_view(), name='admins_product_detail'), path('admins/product/update/<int:pk>/', views.ProductUpdateView.as_view(), name='admins_product_update'), ) urlpatterns += ( # urls for Order path('admins/order/', views.OrderListView.as_view(), name='admins_order_list'), path('admins/order/create/', views.OrderCreateView.as_view(), name='admins_order_create'), path('admins/order/detail/<int:pk>/', views.OrderDetailView.as_view(), name='admins_order_detail'), path('admins/order/update/<int:pk>/', views.OrderUpdateView.as_view(), name='admins_order_update'), path('admins/order/invoice/<int:pk>/', views.InvoiceView.as_view(), name='admins_order_invoice'), ) urlpatterns += ( # urls for Cart path('admins/cart/', views.CartListView.as_view(), name='admins_cart_list'), path('admins/cart/create/', views.CartCreateView.as_view(), name='admins_cart_create'), path('admins/cart/detail/<int:pk>/', views.CartDetailView.as_view(), name='admins_cart_detail'), path('admins/cart/update/<int:pk>/', views.CartUpdateView.as_view(), name='admins_cart_update'), ) urlpatterns += ( path('api/product/', views.ProductListAPI.as_view(), name='product_list'), path('api/order/', views.ProcessOrderAPI.as_view(), name='process_order'), path('api/order/<int:pk>/delete/', views.DeleteOrderAPI.as_view(), name='delete_order'), path('api/return/<int:pk>/', views.ReturnAPI.as_view(), name='return_order'), )
StarcoderdataPython
3511019
""" Re-Space: Oh, no! You have accidentally removed all spaces, punctuation, and capitalization in a lengthy document. A sentence like "I reset the computer. It still didn`t boot!" became"iresetthecomputeritstilldidntboot': You'll deal with the punctuation and capi- talization later; right now you need to re-insert the spaces. Most of the words are in a dictionary but a few are not. Given a dictionary (a list of strings) and the document (a string), design an algorithm to unconcatenate the document in a way that minimizes the number of unrecognized characters. In - document: str, dictionary: List[str] string: iresetthecomputeritstilldidntboot dictionary: reset it still boot thec computer i reset thec [omputer] it still didnt boot or i reset [the] computer it still didnt boot receive a string and test the different positions that a space can fit loop i in string if substring not in words: invalid = len(substring) min_ = min(min_, invalid + solve(string[i+2:])) Uses memoization to keep the time complexity at O(n^3) where n is the size of the document """ import collections import math from typing import List, Set, Union, Dict SplitResult = collections.namedtuple('SplitResult', 'words invalid') Cache = Dict[str, SplitResult] def re_space(document: str, dictionary: List[str]) -> str: cache: Cache = {} split: SplitResult = split_words(document, set(dictionary), cache) return ' '.join(split.words) def split_words(document: str, dictionary: Set[str], cache: Cache) -> SplitResult: if not document: return SplitResult([], 0) if document in cache: return cache[document] min_invalids: Union[int, float] = math.inf words: List[str] = [] for i in range(len(document)): curr_word: str = document[0:i+1] curr_splits: SplitResult = split_words(document[i+1:], dictionary, cache) curr_invalids: int = curr_splits.invalid if curr_word not in dictionary: curr_invalids += len(curr_word) if curr_invalids <= min_invalids: min_invalids = curr_invalids words = [curr_word] + curr_splits.words cache[document] = SplitResult(words, min_invalids) return cache[document] print(re_space('ires', ['r', 're'])) print(re_space('iresetthecomputeritstilldidntboot', ['reset', 'it', 'still', 'thec', 'computer', 'boot']))
StarcoderdataPython
219454
<reponame>ma1VAR3/Enhanced-Noticeboard<gh_stars>0 from django.contrib import admin # Register your models here. from .models import Event,FAQ admin.site.register(Event) admin.site.register(FAQ)
StarcoderdataPython
174422
#!/usr/bin/env python # -*- coding: utf-8 -*- import json from alipay.aop.api.constant.ParamConstants import * class AlipayOpenAppQrcodeCreateModel(object): def __init__(self): self._color = None self._describe = None self._query_param = None self._size = None self._url_param = None @property def color(self): return self._color @color.setter def color(self, value): self._color = value @property def describe(self): return self._describe @describe.setter def describe(self, value): self._describe = value @property def query_param(self): return self._query_param @query_param.setter def query_param(self, value): self._query_param = value @property def size(self): return self._size @size.setter def size(self, value): self._size = value @property def url_param(self): return self._url_param @url_param.setter def url_param(self, value): self._url_param = value def to_alipay_dict(self): params = dict() if self.color: if hasattr(self.color, 'to_alipay_dict'): params['color'] = self.color.to_alipay_dict() else: params['color'] = self.color if self.describe: if hasattr(self.describe, 'to_alipay_dict'): params['describe'] = self.describe.to_alipay_dict() else: params['describe'] = self.describe if self.query_param: if hasattr(self.query_param, 'to_alipay_dict'): params['query_param'] = self.query_param.to_alipay_dict() else: params['query_param'] = self.query_param if self.size: if hasattr(self.size, 'to_alipay_dict'): params['size'] = self.size.to_alipay_dict() else: params['size'] = self.size if self.url_param: if hasattr(self.url_param, 'to_alipay_dict'): params['url_param'] = self.url_param.to_alipay_dict() else: params['url_param'] = self.url_param return params @staticmethod def from_alipay_dict(d): if not d: return None o = AlipayOpenAppQrcodeCreateModel() if 'color' in d: o.color = d['color'] if 'describe' in d: o.describe = d['describe'] if 'query_param' in d: o.query_param = d['query_param'] if 'size' in d: o.size = d['size'] if 'url_param' in d: o.url_param = d['url_param'] return o
StarcoderdataPython
3238282
<filename>demo/cict_demo/cict_train_GAN.py import os import time import json import glob import torch import torch.optim as optim import torchvision.transforms as transforms from easydict import EasyDict from PIL import Image from torch.utils.data import DataLoader, WeightedRandomSampler from torchvision.transforms.transforms import Grayscale from core.data.cict_dataset import CictDataset from demo.cict_demo.cict_model import GeneratorUNet, Discriminator from core.utils.learner_utils.loss_utils import Loss #from core.utils.learner_utils.optim_utils import adjust_learning_rate_auto from core.utils.others.checkpoint_helper import is_ready_to_save, get_latest_saved_checkpoint from core.utils.others.general_helper import create_log_folder, create_exp_path, erase_logs train_config = dict( NUMBER_OF_LOADING_WORKERS=4, IMG_HEIGHT=128, IMG_WIDTH=256, SENSORS=dict(rgb=[3, 360, 640]), DEST=0, # choose bird-view destination (0) or camera-view destination (1) START_EPISODE=0, # set which episodes for training END_EPISODE=37, BATCH_SIZE=32, COMMON=dict(folder='sample', exp='cict_GAN', dataset_path='datasets'), GPU='0', SAVE_INTERVAL=1000, MAX_CKPT_SAVE_NUM=40, N_EPOCHS=60, SPEED_FACTOR=25.0, TRAIN_DATASET_NAME='cict_datasets_train', MODEL_TYPE='cict_GAN', PREFIX='_preloads', UNPAIRED=False, GAN=False, MODEL_CONFIGURATION=dict( generator=dict( down_channels=[6, 64, 128, 256, 512, 512, 512, 512], up_channels=[0, 512, 512, 512, 256, 128, 64], kernel_size=4, stride=2, padding=1, down_norm=[False, True, True, True, True, True, False], up_norm=[True, True, True, True, True, True], down_dropout=[0, 0, 0, 0.5, 0.5, 0.5, 0.5], up_dropout=[0.5, 0.5, 0.5, 0, 0, 0], final_channels=1, num_branches=1, ), discriminator=dict( channels=[7, 64, 128, 256, 512], kernel_size=4, stride=2, padding=1, norm=[False, True, True, True], dropout=[0, 0, 0, 0] ) ), PRE_TRAINED=False, LEARNING_RATE=0.0003, BETA1=0.5, BETA2=0.999, GAN_LOSS_FUNCTION='MSE', PIXEL_LOSS_FUNCTION='L1', PIXEL_LOSS_WEIGHT=2, PRELOAD_MODEL_ALIAS=None, PRELOAD_MODEL_BATCH=None, PRELOAD_MODEL_CHECKPOINT=None, REMOVE=None, ) def write_params(log_path, config): with open(os.path.join(log_path, 'params.json'), 'w+') as f: json.dump('# Params', f) json.dump(config, f) def remove_old_ckpt(ckpt_path, cfg): # get infos of all saved checkpoints ckpt_list = glob.glob(os.path.join(ckpt_path, '*.pth')) # sort checkpoints by saving time ckpt_list.sort(key=os.path.getmtime) # remove surplus ckpt file if the number is larger than max_ckpt_save_num if len(ckpt_list) >= cfg.MAX_CKPT_SAVE_NUM: for cur_file_idx in range(0, len(ckpt_list) - cfg.MAX_CKPT_SAVE_NUM + 1): os.remove(ckpt_list[cur_file_idx]) def execute(cfg): gpu = cfg.GPU exp_batch = cfg.COMMON.folder exp_alias = cfg.COMMON.exp os.environ["CUDA_VISIBLE_DEVICES"] = gpu if cfg.PRELOAD_MODEL_ALIAS is not None: checkpoint = torch.load( os.path.join( '_logs', cfg.PRELOAD_MODEL_BATCH, cfg.PRELOAD_MODEL_ALIAS, 'checkpoints', str(cfg.PRELOAD_MODEL_CHECKPOINT) + '.pth' ) ) checkpoint_file = get_latest_saved_checkpoint(exp_batch, exp_alias) if checkpoint_file is not None: checkpoint = torch.load(os.path.join('_logs', exp_batch, exp_alias, 'checkpoints', checkpoint_file)) iteration = checkpoint['iteration'] best_loss_G = checkpoint['best_loss_G'] best_loss_iter_G = checkpoint['best_loss_iter_G'] best_loss_D = checkpoint['best_loss_D'] best_loss_iter_D = checkpoint['best_loss_iter_D'] else: if not os.path.exists(os.path.join('_logs', exp_batch, exp_alias, 'checkpoints')): os.mkdir(os.path.join('_logs', exp_batch, exp_alias, 'checkpoints')) iteration = 0 best_loss_G = 10000.0 best_loss_iter_G = 0 best_loss_D = 10000.0 best_loss_iter_D = 0 write_params(os.path.join('_logs', exp_batch, exp_alias), train_config) full_dataset = os.path.join(cfg.COMMON.dataset_path, cfg.TRAIN_DATASET_NAME) pm_transforms = [ transforms.Resize((cfg.IMG_HEIGHT, cfg.IMG_WIDTH), Image.BICUBIC), transforms.ToTensor(), transforms.Normalize((0.5, ), (0.5, )) ] img_transforms = [ transforms.Resize((cfg.IMG_HEIGHT, cfg.IMG_WIDTH), Image.BICUBIC), transforms.ColorJitter(brightness=0.2, contrast=0.2, hue=0.2), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)), ] dest_transforms = [ transforms.Resize((cfg.IMG_HEIGHT, cfg.IMG_WIDTH), Image.BICUBIC), transforms.RandomRotation(15, resample=Image.BICUBIC, expand=False), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)), ] dataset = CictDataset( full_dataset, cfg, img_transform=img_transforms, dest_transform=dest_transforms, pm_transform=pm_transforms ) print("Loaded dataset") sample_weights = dataset.sample_weights() print(len(sample_weights), len(dataset)) sampler = WeightedRandomSampler(sample_weights, len(dataset)) data_loader = DataLoader( dataset, batch_size=cfg.BATCH_SIZE, sampler=sampler, shuffle=False, num_workers=cfg.NUMBER_OF_LOADING_WORKERS ) generator = GeneratorUNet(cfg.MODEL_CONFIGURATION['generator']) discriminator = Discriminator(cfg.MODEL_CONFIGURATION['discriminator']) generator.cuda() discriminator.cuda() optimizer_G = optim.Adam(generator.parameters(), lr=cfg.LEARNING_RATE, betas=(cfg.BETA1, cfg.BETA2)) optimizer_D = optim.Adam(discriminator.parameters(), lr=cfg.LEARNING_RATE, betas=(cfg.BETA1, cfg.BETA2)) if checkpoint_file is not None or cfg.PRELOAD_MODEL_ALIAS is not None: generator.load_state_dict(checkpoint['state_dict_G']) optimizer_G.load_state_dict(checkpoint['optimizer_G']) discriminator.load_state_dict(checkpoint['state_dict_D']) optimizer_D.load_state_dict(checkpoint['optimizer_D']) accumulated_time = checkpoint['total_time'] loss_window = checkpoint['loss_window'] else: # We accumulate iteration time and keep the average speed accumulated_time = 0 loss_window = [] print("Before the loss") criterion_GAN = torch.nn.MSELoss() criterion_pixel = torch.nn.L1Loss() print('Start to train ...') iteration = 0 for epoch in range(cfg.N_EPOCHS): for data in data_loader: iteration += 1 #if iteration % 1000 == 0: # adjust_learning_rate_auto( # optimizer, loss_window, cfg.LEARNING_RATE, cfg.LEARNING_RATE_THRESHOLD, # cfg.LEARNING_RATE_DECAY_LEVEL # ) capture_time = time.time() img = data['rgb'] dest = data['dest'] pm = data['pm'] command = data['command'] valid = torch.ones(img.size(0), 1, cfg.IMG_HEIGHT // 16, cfg.IMG_WIDTH // 16).cuda() fake = torch.zeros(img.size(0), 1, cfg.IMG_HEIGHT // 16, cfg.IMG_WIDTH // 16).cuda() input = torch.cat([img, dest], dim=1).cuda() pm = pm.cuda() generator.zero_grad() pm_fake = generator(input, command) #print(input.shape, pm_fake.shape) pred_fake = discriminator(pm_fake, input) loss_GAN = criterion_GAN(pred_fake, valid) loss_pixel = criterion_pixel(pm_fake, pm) if cfg.UNPAIRED: fake_dest = data['fake_dest'] fake_input = torch.cat([img, fake_dest], dim=1).cuda() pm_fake2 = generator(fake_input, command) pred_fake2 = discriminator(pm_fake2, fake_input) loss_GAN2 = criterion_GAN(pred_fake2, valid) loss_G = 0.5 * (loss_GAN + loss_GAN2) + cfg.PIXEL_LOSS_WEIGHT * loss_pixel else: if not cfg.GAN: loss_G = cfg.PIXEL_LOSS_WEIGHT * loss_pixel else: cfg.PIXEL_LOSS_WEIGHT * loss_pixel + loss_GAN loss_G.backward() torch.nn.utils.clip_grad_value_(generator.parameters(), clip_value=20) optimizer_G.step() if cfg.GAN: discriminator.zero_grad() pred_real = discriminator(pm, input) loss_real = criterion_GAN(pred_real, valid) pred_fake = discriminator(pm_fake.detach(), input) loss_fake = criterion_GAN(pred_fake, fake) if cfg.UNPAIRED: pred_fake2 = discriminator(pm_fake2.detach(), fake_input) loss_fake2 = criterion_GAN(pred_fake2, fake) loss_D = 0.5 * (loss_real + 0.5 * (loss_fake + loss_fake2)) else: loss_D = 0.5 * (loss_real + loss_fake) loss_D.backward() torch.nn.utils.clip_grad_value_(discriminator.parameters(), clip_value=20) optimizer_D.step() else: loss_D = torch.FloatTensor([0]).cuda() if iteration % cfg.SAVE_INTERVAL == 0: remove_old_ckpt(os.path.join('_logs', exp_batch, exp_alias, 'checkpoints'), cfg) state = { 'iteration': iteration, 'state_dict_G': generator.state_dict(), 'state_dict_D': discriminator.state_dict(), 'best_loss_G': best_loss_G, 'best_loss_D': best_loss_D, 'total_time': accumulated_time, 'optimizer_G': optimizer_G.state_dict(), 'optimizer_D': optimizer_D.state_dict(), 'best_loss_iter_G': best_loss_iter_G, 'best_loss_iter_D': best_loss_iter_D, 'loss_window': loss_window } torch.save(state, os.path.join('_logs', exp_batch, exp_alias, 'checkpoints', str(iteration) + '.pth')) if loss_G.data < best_loss_G: best_loss_G = loss_G.data.tolist() best_loss_iter_G = iteration if loss_D.data < best_loss_D: best_loss_D = loss_D.data best_loss_iter_D = iteration accumulated_time += time.time() - capture_time loss_dict = { 'loss_G': loss_G.data.tolist(), 'loss_D': loss_D.data.tolist(), 'loss_GAN': loss_GAN.data.tolist(), 'loss_pixel': loss_pixel.data.tolist(), } loss_window.append(loss_dict) print( "Iteration: %d Loss_pixel: %f Loss_GAN: %f Loss_G: %f Loss_D: %f" % (iteration, loss_pixel.data, loss_GAN.data, loss_G.data, loss_D.data) ) if __name__ == '__main__': cfg = EasyDict(train_config) create_log_folder(cfg.COMMON.folder) erase_logs(cfg.COMMON.folder) create_exp_path(cfg.COMMON.folder, cfg.COMMON.exp) execute(cfg)
StarcoderdataPython
1969405
<filename>models.py from peewee import * from playhouse.migrate import * db = SqliteDatabase('register.db') migrator = SqliteMigrator(db) class BaseModel(Model): """Base class that specifies the database.""" class Meta: database = db class User(BaseModel): """Users model.""" username = CharField() password = CharField() class Student(BaseModel): """Students model.""" student_name = CharField() checked_in = BooleanField() class Class_(BaseModel): """Class in model.""" class_name = CharField() session = BooleanField() start_time = DateTimeField(default=0) end_time = DateTimeField(default=0) class Checkout_Log(BaseModel): """Chekout model.""" student_name = CharField() student_id = IntegerField() reason = TextField() class Checkin(BaseModel): """Checkin model.""" student = ForeignKeyField(Student, related_name='students') class_ = ForeignKeyField(Class_, related_name='classes') no_of_checkins = IntegerField(default=0) status = BooleanField()
StarcoderdataPython
5023148
<filename>hydrocode/scripts/dump_replayer.py #!/usr/bin/env python3 import socket import sys import time import numpy as np sys.path.insert(0, '../modules') import common.const import comms.const import pinger.const def getSize(file): file.seek(0, 2) size = file.tell() return size # check whether dump filename was specified try: dump_filename = sys.argv[1] except IndexError: raise Exception('Dump filename not specified') # load binary file specified from terminal dump_file = open(dump_filename, 'rb') # initialize UDP networking sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) pkt_size = common.const.SAMPLE_PKT_DTYPE.itemsize print('Replaying ' + dump_filename + '...') # send packets for pkt_num in range(getSize(dump_file) // pkt_size): # seek to the correct place in the binary file and load packet dump_file.seek(pkt_num * pkt_size) pkt_bytes = dump_file.read(pkt_size) pkt = np.frombuffer(pkt_bytes, dtype=common.const.SAMPLE_PKT_DTYPE)[0] # send packet to the correct port depending on its type if pkt['pkt_type'] == 0: sock.sendto(pkt_bytes, ('127.0.0.1', pinger.const.RECV_PORT)) elif pkt['pkt_type'] == 1: sock.sendto(pkt_bytes, ('127.0.0.1', comms.const.RECV_PORT)) else: raise ValueError('Valid packet types are 0 (pinger) and 1 (comms)') # wait for the amount of time the hydrophones board would take # to send another packet time.sleep(common.const.L_PKT / common.const.SAMPLE_RATE)
StarcoderdataPython
11241490
# coding=utf-8 import sys import time from config import * def save_to_log_file(message): global LOG_FILE_HANDLE LOG_FILE_HANDLE.write(message) def console(message): sys.stdout.write(message + "\n") def write(message): is_msg_data = False for msg in MSG_DATA: if msg == message: is_msg_data = True break if is_msg_data: message = MSG_DATA[message] + "\n" sys.stdout.write(message) return message = time.strftime('%Y-%m-%d %H:%M:%S') + ": " + message + "\n" save_to_log_file(message) def generate_log_file_name(): file_name = "network-diagnostic-" date_str = time.strftime('%Y-%m-%d-%H_%M_%S') return file_name + date_str + ".log" def open_file(file_name): global LOG_FILE_HANDLE LOG_FILE_HANDLE = open(file_name, "a+") def close_file(): global LOG_FILE_HANDLE LOG_FILE_HANDLE.close() LOG_FILE_HANDLE = None
StarcoderdataPython
260022
<gh_stars>1-10 import yaml import json import argparse class obj(object): def __init__(self, d): for a, b in d.items(): if isinstance(b, (list, tuple)): setattr(self, a, [obj(x) if isinstance(x, dict) else x for x in b]) else: setattr(self, a, obj(b) if isinstance(b, dict) else b) class Config: def get_config_from_yaml(path): with open(path, 'r') as ymlfile: cfg = yaml.safe_load(ymlfile) config = obj(cfg) return config def get_config_from_json(path): with open(path) as fd: config = json.load(fd, object_hook=dict_to_sns) return config
StarcoderdataPython
6448805
<filename>no11/p2.py # download music import requests import re import execjs import json class Down(object): def __init__(self): pass # 获取音乐文件的 ids 参数 def getids(self): _headers = {'Referer': 'https://music.163.com/', 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko'} # 通过该链接,获取该页面的源码。 html = requests.get('https://music.163.com/artist?id=3685', headers=_headers).text # 返回通过正则匹配获得的所有 ids 值。 return re.findall('<li><a.href=.*?song.*?id=(.*?)">(.*?)</a></li>', html) # 计算 ids 的加密后的值(通过引入js文件,计算相应的值) def countids(self,ids): # 传入的参数,这里指的是获取音乐URL时,需要传入含有该音乐文件ids的字符串。 ddd = '{"ids":"['+ids+']","level":"standard","encodeType":"aac","csrf_token":""}' # 导入js文件 f=open('countdis.js','r',encoding='utf-8') line = f.readline() htmlstr = '' while line: htmlstr = htmlstr + line line = f.readline() ctx = execjs.compile(htmlstr) f.close() # 运行js的 d 函数,并传入参数 ddd,也就是刚才定义的完整字符串,并返回。 return ctx.call('d', ddd) # 获取到该音乐的真实 url 地址 def geturl(self): # 因为该页面有多个音乐,会生成多个加密文本,所以这里迭代出来。 for i in self.getids(): # getids返回的是含有params 和 encSecKey两个加密文的,所以通过列表获取到相应的值。 str=self.countids(i[0]) encSecKey=str[0] params=str[1] _headers={'Referer':'https://music.163.com/', 'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Safari/537.36' } _data={'encSecKey':encSecKey,'params':params} # 把获取到的两个参数值,提交到服务器,获得 URL 地址。 urltext=requests.post('https://music.163.com/weapi/song/enhance/player/url/v1?csrf_token=',headers=_headers,data=_data).text _json=json.loads(urltext) url=_json['data'][0]['url'] # 获得URL后,直接使用get下载音乐文件到本地。 data=requests.get(url,_headers,stream=True) with open(i[1]+'.mp3','wb') as f: for j in data.iter_content(chunk_size=512): f.write(j) print(i[1]+'.mp3 写出完毕!') # 运行 if __name__=='__main__': bb=Down() bb.geturl()
StarcoderdataPython
9645349
<gh_stars>10-100 from . import exceptions class AssetsClient(object): ASSESTS_BASE_URI = '/api/search.json' def __init__(self, client): self.client = client def list(self): assets = [] uri = '/api.json' assets = self.client.get(uri) return assets def get(self, object_name): if object_name not in self.list(): raise exceptions.CloudHealthError( 'Object {0} does not exist'.format(object_name)) url = self.client.get_asset(uri=self.ASSESTS_BASE_URI, asset=object_name) return url
StarcoderdataPython
1728470
<filename>sme_management/migrations/0009_auto_20200525_0142.py<gh_stars>1-10 # Generated by Django 3.0.6 on 2020-05-25 01:42 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('sme_management', '0008_auto_20200525_0113'), ] operations = [ migrations.AlterField( model_name='smeproject', name='documents', field=models.FileField(blank=True, upload_to='projects/'), ), ]
StarcoderdataPython
5132242
# ----------------------------------------------------------------------------- # Copyright (c) 2005-2016, PyInstaller Development Team. # # Distributed under the terms of the GNU General Public License with exception # for distributing bootloader. # # The full license is in the file COPYING.txt, distributed with this software. # ----------------------------------------------------------------------------- """ warning for 'import queue' in 2.7 from the future Problem appears to be that pyinstaller cannot have two modules of the same name that differ only by lower/upper case. The from the future 'queue' simply imports all of the 'Queue' module. So by my reading, since 'queue' and 'Queue' can not coexist in a frozen app, and since 'queue' requires 'Queue', there is no way to use 'queue' in a frozen 2.7 app. """ from PyInstaller.compat import is_py2 from PyInstaller.utils.hooks import logger def pre_find_module_path(api): if not is_py2: return # maybe the 'import queue' was not really needed, so just make sure it # is not found, otherwise it will crowd out the potential future # import of 'Queue' api.search_dirs = [] logger.warning("import queue (lowercase), not supported")
StarcoderdataPython
15273
<gh_stars>0 """ Znaleść kąt á, przy którym zasięg skoku z wahadła będzie maksymalny. Należy posłużyć się metodą złotego podziału. <NAME> Index:216708 """ import matplotlib.pyplot as plt import numpy as np class Zloty_podzial: def __init__(self, h, line, a0): tau = (np.sqrt(5) - 1) / 2 a = 0.2 b = 0.7 dok = 1e-9 self.xw = self.st_rad(np.linspace(1, int(a0)-1, int(a0)-1)) self.line = float(line) self.h = float(h) self.a0 = float(self.st_rad(int(a0))) n = int(np.ceil((np.log(2 * dok) - np.log(b - a)) / np.log(tau))) wynik, blad = self.steps(n, tau, a, b) print("dokladnosc: {} krokow: {}\nZasięg skoku będzie maksymalny dla kąta: {} +/- {} stopni.".format(dok, n, '{0:.9f}'.format(self.rad_st(wynik)), "{0:.9f}".format(self.rad_st(blad)))) def steps(self, n, tau, a, b): for i in range(n): t1 = a + (1 - tau) * (b - a) t2 = b - (1 - tau) * (b - a) f1 = self.f(t1, self.a0, self.h, self.line) f2 = self.f(t2, self.a0, self.h, self.line) if f1 > f2: b = t2 plt.text(b, self.f(b, self.a0, self.h, self.line), i, color="blue", fontsize=10) ax.plot(b, self.f(b, self.a0, self.h, self.line), 'ob', markersize=2) else: a = t1 plt.text(a, self.f(a, self.a0, self.h, self.line), i, color="red", fontsize=10) ax.plot(a, self.f(a, self.a0, self.h, self.line), 'or', markersize=2) return (a+b)/2, (b-a)/2 def st_rad(self, a): return a * (np.pi / 180) def rad_st(self, a): return a * (180 / np.pi) def f(self, a, a0, h, line): return line * np.sin(a) + 2 * line * (np.cos(a) - np.cos(a0)) * np.cos(a) * (np.sin(a) + np.sqrt(np.sin(a) ** 2 + ((h / line - np.cos(a)) / (np.cos(a) - np.cos(a0))))) while True: h0 = input("podaj wysokosc:") line0 = input("podaj długość liny:") a00 = input("podaj amplitude wahañ W stopniach:") if int(line0) > int(h0): print("Error: wysokość mniejsza od długości liny!!!") else: break fig = plt.figure() ax = fig.add_subplot(111) a = Zloty_podzial(h0, line0, a00) ax.plot(a.xw, a.f(a.xw, a.a0, a.h, a.line), "-b") plt.show()
StarcoderdataPython
8198413
from typing import Any, Generator, TypeVar from pydantic import BaseModel from pydantic.typing import AnyCallable __all__ = [ "NormalizableModel", ] T = TypeVar("T", bound="NormalizableModel") CallableGenerator = Generator[AnyCallable, None, None] class NormalizableModel(BaseModel): """A model that normalizes input before validation.""" @classmethod def normalize_input(cls, value: Any) -> Any: return value # @overrides BaseModel @classmethod def _enforce_dict_if_root(cls, obj: Any) -> Any: return super()._enforce_dict_if_root(cls.normalize_input(obj))
StarcoderdataPython
3512793
<gh_stars>1-10 """Dataloader for pivot-based-entity-linking. Encodes the knowledge base and pivoting language links using a trained entity similarity model. Author: <NAME> (<EMAIL>) Last update: 2019-04-15 """ import codecs from traindataloader import TrainDataLoader from max_margin_encoder import MaxMarginEncoder import numpy as np import sys import logging from utils.constants import ID_IDX,SOURCE_IDX,TARGET_IDX,DEFAULT_ENCODE_BATCH_SIZE,DELIM logging.basicConfig(format='%(asctime)s: %(message)s', level=logging.INFO, datefmt='%Y-%m-%d %H:%M:%S') class PivotDataLoader(object): def __init__(self, kb_filename, links_filename=None, kb_encoding_path="kb.encode", links_encoding_path="links.encode", training_data_loader=None, encoder_model=None, load_encodings=False): self.kb, kb_entries = self.load_kb(kb_filename) if links_filename: self.links, links_entries = self.load_links(links_filename) else: self.links = None if encoder_model and not load_encodings: self.kb_encodings = self.batch_encode(kb_entries, encode_func=encoder_model.encode_source, convert_func=training_data_loader.convert_source, encoding_path=kb_encoding_path) np.savez_compressed(kb_encoding_path, arr=self.kb_encodings) if self.links: self.links_encodings = self.batch_encode(links_entries, encode_func=encoder_model.encode_target, convert_func=training_data_loader.convert_target, encoding_path=links_encoding_path) np.savez_compressed(links_encoding_path, arr=self.links_encodings) else: try: self.kb_encodings = np.load(kb_encoding_path + '.npz')['arr'] except IOError: sys.stderr.write("KB encodings not found!\n") if self.links: try: self.links_encodings = np.load(links_encoding_path + '.npz')['arr'] except IOError: sys.stderr.write("Links encodings not found!\n") def load_kb(self, filename): db = [] entries = [] with codecs.open(filename, 'r', 'utf8') as f: for line in f: spl = line.strip().split(DELIM) if len(spl) != 3: continue db.append(int(spl[ID_IDX])) entries.append(spl[SOURCE_IDX]) return db, entries def load_links(self, filename): links = [] entries = [] with codecs.open(filename, 'r', 'utf8') as f: for line in f: spl = line.strip().split(DELIM) links.append(int(spl[ID_IDX])) entries.append(spl[TARGET_IDX]) return links, entries def batch_encode(self, entries, encode_func, convert_func, encoding_path): encoded = [] for i in range(0, len(entries), DEFAULT_ENCODE_BATCH_SIZE): logging.info("Read %s entries" %i) cur_size = min(DEFAULT_ENCODE_BATCH_SIZE, len(entries) - i) encoded += encode_func([convert_func(entry) for entry in entries[i:i+cur_size]]) return np.array(encoded)
StarcoderdataPython
1885223
import logging import uuid from datetime import datetime from benchmarks.async_redis_repository import save_order as saveOrder from benchmarks.model import Order, OrderStatus, OrderResp, CreateOrderReq from zero import ZeroSubscriber async def hello_world(msg): logging.info(msg) async def save_order(msg): req = CreateOrderReq(**msg) saved_order = await saveOrder( Order( id=str(uuid.uuid4()), created_by=req.user_id, items=req.items, created_at=datetime.now().isoformat(), status=OrderStatus.INITIATED, ) ) resp = OrderResp(saved_order.id, saved_order.status, saved_order.items) return resp.__dict__ if __name__ == "__main__": app = ZeroSubscriber() app.register_listener("hello_world", hello_world) app.register_listener("save_order", save_order) app.run()
StarcoderdataPython
217031
# Copyright (c) 2020-2021 Matematyka dla Ciekawych Świata (http://ciekawi.icm.edu.pl/) # Copyright (c) 2020-2021 <NAME> <<EMAIL>> # # MIT License # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. shenv = { "a":"17", "d":"13.3" } try: clipData except NameError: clipData = [] clipData += [ { 'section': 'operacje matematyczne \n i logiczne' }, { 'console': [ [0.0, eduMovie.runCommandString(r"echo 2 + 2", env=shenv)], ["math1", eduMovie.runCommandString(r"echo $((2 + 2))", env=shenv)], ["math2", eduMovie.runCommandString(r"a=17", env=shenv)], ["math2 + 1", eduMovie.runCommandString(r"echo $((2 + 2*$a ))", env=shenv)], ["math3 + 1", eduMovie.runCommandString(r"e=$(( 2 + 2*$a )) ; echo e wynosi $e", env=shenv)], ["math4", eduMovie.runCommandString(r"echo $(( 13/5 ))", env=shenv)], ["math5", eduMovie.runCommandString(r"echo $(( 13%5 ))", env=shenv)], ["let1", eduMovie.runCommandString(r"echo $a; let a++; echo $a", env=shenv)], ["float1", eduMovie.runCommandString(r"d=13.3", env=shenv)], ["float1 + 1", eduMovie.runCommandString(r"echo $(( $d * 2))", env=shenv)], ["float1 + 2", eduMovie.runCommandString(r"echo $(( 2.5 * 2))", env=shenv)], ], 'text' : [ 'Większość języków programowania gdy każemy im wypisać dwa plus dwa <m> to wypiszą 4, bash potraktuje coś takiego jak napis. <mark name="math1" />' 'Aby użyć operacji matematycznych należy skorzystać ze specjalnego <m> operatora postaci dolar dwa nawiasy okrągłe lewe <m> wyrażenie matematyczne i dwa nawiasy okrągłe prawe. <mark name="math2" />' 'W ramach wyrażenia w tym operatorze możemy odwoływać się do zmiennych <m> oraz (co w bashu jest rzadkością) swobodnie używać spacji. <mark name="math3" />' 'Wynik takiej operacji możemy przypisać do zmiennej w standardowy sposób <m> (pamiętajmy jednak że wokół znaku równości spacje są niedozwolone). <mark name="math4" />' 'Możemy także wykonać operację dzielenia, jednak będzie to <m> tylko i wyłącznie dzielenie całkowite, czyli dostaniemy część całkowitą wyniku. <mark name="math5" />' 'Możemy też poznać resztę z dzielenia używając operatora procent. <mark name="let1" />' 'Do operacji arytmetycznych może być też wykorzystywane polecenie let. <m>' 'Najczęściej jest stosowane do inkrementacji podanej zmiennej, <m> tak jak w przykładzie pokazanym na ekranie. <mark name="float1" />' 'Bash nie rozumie liczb zmiennoprzecinkowych <m> i pomimo że może je przechować w zmiennej nie potrafi na nich operować. <m>' ] }, { 'console': [ [0.0, ""], ["float2", eduMovie.runCommandString(r'e=`python -c "print($d * 2)"` ; echo $e', env=shenv)], ["float3", eduMovie.runCommandString(r'e=$(python -c "print($d * 2)") ; echo $e', env=shenv)], ["float4", eduMovie.runCommandString(r'e=$( x=$(python -c "print(13/3)"); python -c "print($d * $x)" ) ; echo $e', env=shenv)], ["callcat", eduMovie.runCommandString(r'e=$( cat /etc/resolv.conf ) ; echo $e', env=shenv)], ["callcat + 1", eduMovie.runCommandString(r'e=$( cat /etc/resolv.conf ) ; echo "$e"', env=shenv)], ], 'text' : [ 'Jednak silną stroną basha jest to co w innych językach <m> bywa dość złożone i nie często stosowane, <m>' 'czyli uruchamianie zewnętrznych poleceń i przekazywanie do nich jakiś danych <m> oraz odbieranie od nich wyników. <mark name="float2" />' 'Możemy zatem użyć innego programu <m> do wykonania operacji zmiennoprzecinkowych. <m>' 'Na ekranie widzimy użycie Pythona <m> do wykonania operacji zmiennoprzecinkowej na bashowej zmiennej d. <m>' 'Należy zwrócić uwagę na ujęcie kodu pythonowego w podwójne cudzysłowa, <m> co pozwala na podstawienie pod dolar d wartości zmiennej d. <m>' 'Oraz na użycie tzw. backticks (jest to znak umieszczony na standardowej <m> klawiaturze na lewo od jedynki i nie należy mylić go z apostrofem) <m>' 'do pobrania standardowego wyjścia uruchomionego polecenia, <m> na którym Python wypisał wynik. <mark name="float3" />' 'Zamiast backticks możemy użyć operatora dolar nawiasy okrągłe, <m> tak jak jest to teraz pokazane na ekranie. <m>' 'Zaletą tego podejścia jest możliwość zagnieżdżania takich wywołań, <mark name="float4" /> czyli wewnątrz kodu którego wyjście przechwytujemy <m> możemy też definiować zmienną przechwytującą jakieś wyjście. <m>' 'Tak jak to pokazano na ekranie w trochę bezsensownym przykładzie <m> (bo obie te operacje możemy wykonać w jednym wywołaniu Pythona). <mark name="callcat" />' 'Warto także zwrócić uwagę na znaczenie cudzysłowów <m> przy odwołaniu się do zmiennych zawierających wiele linii. <m>' 'Jeżeli wypiszemy taką zmienną bez użycia cudzysłowów <m> to nowe linie zostaną zastąpione przez echo spacjami (podobnie jak <m>' 'ciąg dowolnej ilości spacji zostanie zastąpiony pojedynczą spacją), <m> a jeżeli zastosujemy cudzysłowa zmienna zostanie wypisana w nie zmienionej formie. <m>' 'Przyczyną tego jest to iż w pierwszym wypadku echo każdy wyraz <m> potraktuje jako osobny swój argument i wypisze go oddzielając od innych argumentów spacją, <m>' 'a w drugim dostanie jeden argument ze zmienną <m> w oryginalnej postaci i go w takiej postaci wypisze. <m>' ] }, { 'console': [ [0.0, ""], ["kwadratowe - 1.5", eduMovie.runCommandString(r"[ 0 -lt 3 ]; echo $?", env=shenv)], ["kwadratowe + 1.5", eduMovie.runCommandString(r"""test "abc" = "def"; echo $?""", env=shenv)], ["test2", eduMovie.runCommandString(r"test 0 -eq 0 && echo 'zero równa się zero'", env=shenv)], ], 'text' : [ 'Można by używać operatora dolar i podwójne nawiasy okrągłe także <m> do obliczania wartości wyrażeń logicznych a nawet bitowych. <m>' 'Natomiast jest to bardzo rzadko spotykane i do obliczania wartości <m> wyrażeń logicznych typowo stosuje się nawiasy kwadratowe <mark name="kwadratowe" /> lub komendę test (zapisy te są równoważne). <m>' 'Wynika to zapewne z dwóch ich cech. <m>' 'Po pierwsze wynik testowanego wyrażenia logicznego zwracają jako <m> kod powrotu, co okazuje się bardzo wygodne do łączenia ich z innymi poleceniami. <m>' 'Po drugie oferują oprócz sprawdzania typowych nierówności i równości <m> także sprawdzanie istnienia / nieistnienia plików itp. <mark name="test2" />' 'Kod powrotu jest wyrażany w sposób typowy dla tej wartości <m> czyli zero oznacza sukces (spełnienie warunku), <m> natomiast coś nie zerowego porażkę (warunek nie spełniony). <m>' 'Mamy zatem do czynienia z logiką odwróconą. <m>' 'Pełen opis warunków które możemy sprawdzać <m> można znaleźć w man test, warto jednak od razu zauważyć <m>' 'że prawie wszystkie operacje (za wyjątkiem porównania napisów) <m> określane są jako opcja zaczynająca się od myślnika, <m> na przykład <-lt>[minus l t] oznacza mniejsze niż (less than). <m>' 'Należy też pamiętać że w przypadku nawiasów kwadratowych <m> spacje wokół nich są obowiązkowe – nie możemy nawiasu dokleić do <m> polecenia występującego przed nim, po nim lub do warunku. <m>' ] }, { # wieloliniowe w interaktywnym 'console': [ [0.071384, "o", eduMovie.prompt()], [0.920981, "o", "e"], [1.328904, "o", "="], [1.744868, "o", "'"], [2.912926, "o", "A"], [3.104875, "o", "l"], [3.328902, "o", "a"], [4.088861, "o", "\r\n"], [4.089228, "o", "> "], [4.848881, "o", "m"], [5.032797, "o", "a"], [5.736847, "o", "\r\n"], [5.737176, "o", "> "], [6.11281, "o", "k"], [6.30481, "o", "o"], [6.616787, "o", "t"], [6.872779, "o", "a"], [7.920865, "o", "\r\n"], [7.92118, "o", "> "], [9.088965, "o", "'"], [9.632915, "o", "\r\n"], [9.63395, "o", eduMovie.prompt()], [10.705002, "o", "e"], [10.96089, "o", "c"], [11.152874, "o", "h"], [11.432917, "o", "o"], [11.7849, "o", " "], [12.577041, "o", "$"], [13.344988, "o", "e"], [13.856846, "o", "\r\n"], [13.857231, "o", "Ala ma kota\r\n"], [13.857694, "o", eduMovie.prompt()], [14.641008, "o", "echo $e"], [15.209034, "o", "\""], [15.464932, "o", "\b"], [15.62497, "o", "\b"], [15.784907, "o", "\b"], [16.129074, "o", "\"$e\"\b\b\b"], [16.512932, "o", "\r\n"], [16.513324, "o", "Ala\r\nma\r\nkota\r\n\r\n"], [16.513926, "o", eduMovie.prompt()], ["blad + 0.072107", "o", eduMovie.prompt()], ["blad + 1.631328", "o", "e"], ["blad + 2.423255", "o", "="], ["blad + 2.903258", "o", "'"], ["blad + 3.631325", "o", "a"], ["blad + 3.911212", "o", "b"], ["blad + 4.29528", "o", "c"], ["blad + 5.367481", "o", "\""], ["blad + 5.751265", "o", "\r\n"], ["blad + 5.751644", "o", "> "], ["blad + 7.647443", "o", "^C"], ["blad + 7.648199", "o", eduMovie.prompt()], ], 'text' : [ 'Warto zauważyć że bash pozwala na wprowadzanie <m> w trybie interaktywnym poleceń, czy też zmiennych, wieloliniowych. <m>' 'Oczekiwanie na kontynuację wprowadzania rozpoczętego polecenia <m> bash sygnalizuje zmianą znaku zachęty na pojedynczy znak większości. <m>' 'Często wejście w ten tryb jest jednak wynikiem naszego błędu <mark name="blad" /> i nie chcemy kontynuować wprowadzania tego polecenia. <m>' 'Możemy wtedy użyć Control C <m> aby przerwać to wprowadzanie i poprawić popełniony błąd. <m>' 'Przerwane polecenie będzie dostępne w historii linii poleceń. <m>' ] }, ]
StarcoderdataPython
9620906
import os import sys from os import path current_dir = path.dirname(path.abspath(__file__)) while path.split(current_dir)[-1] != r'Heron': current_dir = path.dirname(current_dir) sys.path.insert(0, path.dirname(current_dir)) from Heron import general_utils as gu Exec = os.path.abspath(__file__) # <editor-fold desc="The following code is called from the GUI process as part of the generation of the node. # It is meant to create node specific elements (not part of a generic node). # This is where a new node's individual elements should be defined"> """ Properties of the generated Node """ BaseName = 'TL Projector Output' NodeAttributeNames = ['Parameters', 'Trigger Photodiode.', 'Angle of Pic'] NodeAttributeType = ['Static', 'Input', 'Input'] ParameterNames = ['Picture file name', 'Screen X', 'Screen Y', 'Picture X', 'Picture Y', 'Show Inner Pic'] ParameterTypes = ['str', 'int', 'int', 'int', 'int', 'bool'] ParametersDefaultValues = ['pic.png', 2560, 0, 535, 545, True] WorkerDefaultExecutable = os.path.join(os.path.dirname(Exec), 'tl_projector_output_worker.py') # </editor-fold> # <editor-fold desc="The following code is called as its own process when the editor starts the graph"> if __name__ == "__main__": tl_projector_output_com = gu.start_the_sink_communications_process() gu.register_exit_signals(tl_projector_output_com.on_kill) tl_projector_output_com.start_ioloop() # </editor-fold>
StarcoderdataPython
3465785
#!/usr/bin/env python from QUBEKit.decorators import for_all_methods, timer_logger from QUBEKit.helpers import append_to_log from tempfile import TemporaryDirectory from shutil import copy from os import getcwd, chdir, path from subprocess import run as sub_run from collections import OrderedDict from copy import deepcopy from xml.etree.ElementTree import parse as parse_tree from simtk.openmm import app, XmlSerializer from openeye import oechem from openforcefield.typing.engines.smirnoff import ForceField from openforcefield.utils import get_data_filename, generateTopologyFromOEMol # TODO Users should be able to just install ONE of the necessary parametrisation methods and not worry about needing the others too. # Is there a nice way of doing this other than try: import <module>; except ImportError: pass ? class Parametrisation: """ Class of methods which perform the initial parametrisation for the molecule. The Parameters will be stored into the molecule as dictionaries as this is easy to manipulate and convert to a parameter tree. Note all parameters gathered here are indexed from 0, whereas the ligand object indices start from 1 for all networkx related properties such as bonds! Parameters --------- molecule : QUBEKit molecule object input_file : an OpenMM style xml file associated with the molecule object fftype : the FF type the molecule will be parametrised with only needed in the case of gaff or gaff2 else will be assigned based on class used. Returns ------- AtomTypes : dictionary of the atom names, the associated OPLS type and class type stored under number. {0: [C00, OPLS_800, C800]} Residues : dictionary of residue names indexed by the order they appear. HarmonicBondForce: dictionary of equilibrium distances and force constants stored under the bond tuple. {(0, 1): [eqr=456, fc=984375]} HarmonicAngleForce: dictionary of equilibrium angles and force constant stored under the angle tuple. PeriodicTorsionForce : dictionary of periodicity, barrier and phase stored under the torsion tuple. NonbondedForce : dictionary of charge, sigma and epsilon stored under the original atom ordering. """ def __init__(self, molecule, input_file=None, fftype=None, mol2_file=None): self.molecule = molecule self.input_file = input_file self.fftype = fftype self.gaff_types = {} def __repr__(self): return f'{self.__class__.__name__}({self.__dict__!r})' def gather_parameters(self): """ This method parses the serialised xml file and collects the parameters ready to pass them to build tree. """ # Try to gather the AtomTypes first for i, atom in enumerate(self.molecule.atom_names): self.molecule.AtomTypes[i] = [atom, 'QUBE_' + str(800 + i), str(self.molecule.molecule['input'][i][0]) + str(800 + i), self.gaff_types[atom]] input_xml_file = 'serialised.xml' in_root = parse_tree(input_xml_file).getroot() # Extract all bond data for Bond in in_root.iter('Bond'): bond = (int(Bond.get('p1')), int(Bond.get('p2'))) self.molecule.HarmonicBondForce[bond] = [Bond.get('d'), Bond.get('k')] # Extract all angle data for Angle in in_root.iter('Angle'): angle = int(Angle.get('p1')), int(Angle.get('p2')), int(Angle.get('p3')) self.molecule.HarmonicAngleForce[angle] = [Angle.get('a'), Angle.get('k')] # Extract all non-bonded data i = 0 for Atom in in_root.iter('Particle'): if "eps" in Atom.attrib: self.molecule.NonbondedForce[i] = [Atom.get('q'), Atom.get('sig'), Atom.get('eps')] i += 1 # Extract all of the torsion data phases = ['0', '3.141592653589793', '0', '3.141592653589793'] for Torsion in in_root.iter('Torsion'): tor_string_forward = tuple(int(Torsion.get(f'p{i}')) for i in range(1, 5)) tor_string_back = tuple(reversed(tor_string_forward)) if tor_string_forward not in self.molecule.PeriodicTorsionForce.keys() and tor_string_back not in self.molecule.PeriodicTorsionForce.keys(): self.molecule.PeriodicTorsionForce[tor_string_forward] = [ [Torsion.get('periodicity'), Torsion.get('k'), phases[int(Torsion.get('periodicity')) - 1]]] elif tor_string_forward in self.molecule.PeriodicTorsionForce.keys(): self.molecule.PeriodicTorsionForce[tor_string_forward].append( [Torsion.get('periodicity'), Torsion.get('k'), phases[int(Torsion.get('periodicity')) - 1]]) elif tor_string_back in self.molecule.PeriodicTorsionForce.keys(): self.molecule.PeriodicTorsionForce[tor_string_back].append([Torsion.get('periodicity'), Torsion.get('k'), phases[ int(Torsion.get('periodicity')) - 1]]) # Now we have all of the torsions from the openMM system # we should check if any torsions we found in the molecule do not have parameters # if they don't give them the default 0 parameter this will not change the energy for tor_list in self.molecule.dihedrals.values(): for torsion in tor_list: # change the indexing to check if they match param = tuple(torsion[i] - 1 for i in range(4)) if param not in self.molecule.PeriodicTorsionForce.keys() and tuple(reversed(param)) not in self.molecule.PeriodicTorsionForce.keys(): self.molecule.PeriodicTorsionForce[param] = [['1', '0', '0'], ['2', '0', '3.141592653589793'], ['3', '0', '0'], ['4', '0', '3.141592653589793']] # Now we need to fill in all blank phases of the Torsions for key in self.molecule.PeriodicTorsionForce.keys(): vns = ['1', '2', '3', '4'] if len(self.molecule.PeriodicTorsionForce[key]) < 4: # now need to add the missing terms from the torsion force for force in self.molecule.PeriodicTorsionForce[key]: vns.remove(force[0]) for i in vns: self.molecule.PeriodicTorsionForce[key].append([i, '0', phases[int(i) - 1]]) # sort by periodicity using lambda function for key in self.molecule.PeriodicTorsionForce.keys(): self.molecule.PeriodicTorsionForce[key].sort(key=lambda x: x[0]) # now we need to tag the proper and improper torsions and reorder them so the first atom is the central improper_torsions = OrderedDict() for improper in self.molecule.improper_torsions: for key in self.molecule.PeriodicTorsionForce: # for each improper find the corresponding torsion parameters and save if sorted(key) == sorted(tuple([x - 1 for x in improper])): # if they match tag the dihedral self.molecule.PeriodicTorsionForce[key].append('Improper') # replace the key with the strict improper order first atom is center improper_torsions[tuple([x - 1 for x in improper])] = self.molecule.PeriodicTorsionForce[key] torsions = deepcopy(self.molecule.PeriodicTorsionForce) # Remake the torsion store in the ligand self.molecule.PeriodicTorsionForce = OrderedDict((v, k) for v, k in torsions.items() if k[-1] != 'Improper') # now we need to add the impropers at the end of the torsion object for key in improper_torsions.keys(): self.molecule.PeriodicTorsionForce[key] = improper_torsions[key] def get_gaff_types(self, fftype='gaff', file=None): """Convert the pdb file into a mol2 antechamber file and get the gaff atom types and gaff bonds if there were """ # call Antechamber to convert if we don't have the mol2 file if file is None: cwd = getcwd() # do this in a temp directory as it produces a lot of files pdb = path.abspath(self.molecule.filename) mol2 = path.abspath(f'{self.molecule.name}.mol2') file = mol2 with TemporaryDirectory() as temp: chdir(temp) copy(pdb, 'in.pdb') # call antechamber with open('Antechamber.log', 'w+') as log: sub_run(f'antechamber -i in.pdb -fi pdb -o out.mol2 -fo mol2 -s 2 -at ' f'{fftype} -c bcc', shell=True, stdout=log) # Ensure command worked if not path.exists('out.mol2'): raise FileNotFoundError('out.mol2 not found antechamber failed!') # now copy the file back from the folder copy('out.mol2', mol2) chdir(cwd) # Get the gaff atom types and bonds in case we don't have this info gaff_bonds = {} with open(file, 'r') as mol_in: atoms = False bonds = False for line in mol_in.readlines(): # TODO Surely this can be simplified?! if '@<TRIPOS>ATOM' in line: atoms = True continue elif '@<TRIPOS>BOND' in line: atoms = False bonds = True continue elif '@<TRIPOS>SUBSTRUCTURE' in line: bonds = False continue if atoms: self.gaff_types[self.molecule.atom_names[int(line.split()[0]) - 1]] = str(line.split()[5]) if bonds: try: gaff_bonds[int(line.split()[1])].append(int(line.split()[2])) except KeyError: gaff_bonds[int(line.split()[1])] = [int(line.split()[2])] append_to_log(f'GAFF types: {self.gaff_types}', msg_type='minor') # Check if the molecule already has bonds; if not apply these bonds if not list(self.molecule.topology.edges): # add the bonds to the molecule for key, value in gaff_bonds.items(): for node in value: self.molecule.topology.add_edge(key, node) self.molecule.update() # Warning this rewrites the pdb file and re # Write a new pdb with the connection information self.molecule.write_pdb(input_type='input', name=f'{self.molecule.name}_qube') self.molecule.filename = f'{self.molecule.name}_qube.pdb' print(f'Molecule connections updated new pdb file made and used: {self.molecule.name}_qube.pdb') # Update the input file name for the xml self.input_file = f'{self.molecule.name}.xml' @for_all_methods(timer_logger) class XML(Parametrisation): """Read in the parameters for a molecule from an XML file and store them into the molecule.""" def __init__(self, molecule, input_file=None, fftype='CM1A/OPLS', mol2_file=None): super().__init__(molecule, input_file, fftype, mol2_file) self.get_gaff_types(fftype='gaff', file=mol2_file) self.serialise_system() self.gather_parameters() self.molecule.parameter_engine = 'XML input ' + self.fftype def serialise_system(self): """Serialise the input XML system using openmm.""" pdb = app.PDBFile(self.molecule.filename) modeller = app.Modeller(pdb.topology, pdb.positions) if self.input_file: forcefield = app.ForceField(self.input_file) else: try: forcefield = app.ForceField(self.molecule.name + '.xml') except FileNotFoundError: raise FileNotFoundError('No .xml type file found.') system = forcefield.createSystem(modeller.topology, nonbondedMethod=app.NoCutoff, constraints=None) xml = XmlSerializer.serializeSystem(system) with open('serialised.xml', 'w+') as out: out.write(xml) @for_all_methods(timer_logger) class XMLProtein(Parametrisation): """Read in the parameters for a protein from the QUBEKit_general XML file and store them into the protein.""" def __init__(self, protein, input_file='QUBE_general_pi.xml', fftype='CM1A/OPLS'): super().__init__(protein, input_file, fftype) self.serialise_system() self.gather_parameters() self.molecule.parameter_engine = 'XML input ' + self.fftype def serialise_system(self): """Serialise the input XML system using openmm.""" pdb = app.PDBFile(self.molecule.filename) modeller = app.Modeller(pdb.topology, pdb.positions) if self.input_file: forcefield = app.ForceField(self.input_file) else: try: forcefield = app.ForceField(self.molecule.name + '.xml') except FileNotFoundError: raise FileNotFoundError('No .xml type file found.') system = forcefield.createSystem(modeller.topology, nonbondedMethod=app.NoCutoff, constraints=None) xml = XmlSerializer.serializeSystem(system) with open('serialised.xml', 'w+') as out: out.write(xml) def gather_parameters(self): """This method parses the serialised xml file and collects the parameters ready to pass them to build tree. """ # Try to gather the AtomTypes first for i, atom in enumerate(self.molecule.atom_names): self.molecule.AtomTypes[i] = [atom, 'QUBE_' + str(i), str(self.molecule.molecule['input'][i][0]) + str(i)] input_xml_file = 'serialised.xml' in_root = parse_tree(input_xml_file).getroot() # Extract all bond data for Bond in in_root.iter('Bond'): self.molecule.HarmonicBondForce[(int(Bond.get('p1')), int(Bond.get('p2')))] = [Bond.get('d'), Bond.get('k')] # before we continue update the protein class self.molecule.update() # Extract all angle data for Angle in in_root.iter('Angle'): self.molecule.HarmonicAngleForce[int(Angle.get('p1')), int(Angle.get('p2')), int(Angle.get('p3'))] = [ Angle.get('a'), Angle.get('k')] # Extract all non-bonded data i = 0 for Atom in in_root.iter('Particle'): if "eps" in Atom.attrib: self.molecule.NonbondedForce[i] = [Atom.get('q'), Atom.get('sig'), Atom.get('eps')] i += 1 # Extract all of the torsion data phases = ['0', '3.141592653589793', '0', '3.141592653589793'] for Torsion in in_root.iter('Torsion'): tor_string_forward = tuple(int(Torsion.get(f'p{i}')) for i in range(1, 5)) tor_string_back = tuple(reversed(tor_string_forward)) if tor_string_forward not in self.molecule.PeriodicTorsionForce.keys() and tor_string_back not in self.molecule.PeriodicTorsionForce.keys(): self.molecule.PeriodicTorsionForce[tor_string_forward] = [ [Torsion.get('periodicity'), Torsion.get('k'), phases[int(Torsion.get('periodicity')) - 1]]] elif tor_string_forward in self.molecule.PeriodicTorsionForce.keys(): self.molecule.PeriodicTorsionForce[tor_string_forward].append( [Torsion.get('periodicity'), Torsion.get('k'), phases[int(Torsion.get('periodicity')) - 1]]) elif tor_string_back in self.molecule.PeriodicTorsionForce.keys(): self.molecule.PeriodicTorsionForce[tor_string_back].append([Torsion.get('periodicity'), Torsion.get('k'), phases[ int(Torsion.get('periodicity')) - 1]]) # Now we have all of the torsions from the openMM system # we should check if any torsions we found in the molecule do not have parameters # if they don't give them the default 0 parameter this will not change the energy for tor_list in self.molecule.dihedrals.values(): for torsion in tor_list: # change the indexing to check if they match param = tuple(torsion[i] - 1 for i in range(4)) if param not in self.molecule.PeriodicTorsionForce.keys() and tuple( reversed(param)) not in self.molecule.PeriodicTorsionForce.keys(): self.molecule.PeriodicTorsionForce[param] = [['1', '0', '0'], ['2', '0', '3.141592653589793'], ['3', '0', '0'], ['4', '0', '3.141592653589793']] # Now we need to fill in all blank phases of the Torsions for key in self.molecule.PeriodicTorsionForce.keys(): vns = ['1', '2', '3', '4'] if len(self.molecule.PeriodicTorsionForce[key]) < 4: # now need to add the missing terms from the torsion force for force in self.molecule.PeriodicTorsionForce[key]: vns.remove(force[0]) for i in vns: self.molecule.PeriodicTorsionForce[key].append([i, '0', phases[int(i) - 1]]) # sort by periodicity using lambda function for key in self.molecule.PeriodicTorsionForce.keys(): self.molecule.PeriodicTorsionForce[key].sort(key=lambda x: x[0]) # now we need to tag the proper and improper torsions and reorder them so the first atom is the central improper_torsions = OrderedDict() for improper in self.molecule.improper_torsions: for key in self.molecule.PeriodicTorsionForce: # for each improper find the corresponding torsion parameters and save if sorted(key) == sorted(tuple([x - 1 for x in improper])): # if they match tag the dihedral self.molecule.PeriodicTorsionForce[key].append('Improper') # replace the key with the strict improper order first atom is center improper_torsions[tuple([x - 1 for x in improper])] = self.molecule.PeriodicTorsionForce[key] torsions = deepcopy(self.molecule.PeriodicTorsionForce) # now we should remake the torsion store in the ligand self.molecule.PeriodicTorsionForce = OrderedDict((v, k) for v, k in torsions.items() if k[-1] != 'Improper') # now we need to add the impropers at the end of the torsion object for key in improper_torsions.keys(): self.molecule.PeriodicTorsionForce[key] = improper_torsions[key] @for_all_methods(timer_logger) class AnteChamber(Parametrisation): """ Use AnteChamber to parametrise the Ligand first using gaff or gaff2 then build and export the xml tree object. """ def __init__(self, molecule, input_file=None, fftype='gaff', mol2_file=None): super().__init__(molecule, input_file, fftype, mol2_file) self.antechamber_cmd() self.serialise_system() self.gather_parameters() self.prmtop = None self.inpcrd = None self.molecule.parameter_engine = 'AnteChamber ' + self.fftype def serialise_system(self): """Serialise the amber style files into an openmm object.""" prmtop = app.AmberPrmtopFile(self.prmtop) system = prmtop.createSystem(nonbondedMethod=app.NoCutoff, constraints=None) with open('serialised.xml', 'w+') as out: out.write(XmlSerializer.serializeSystem(system)) def antechamber_cmd(self): """Method to run Antechamber, parmchk2 and tleap.""" # file paths when moving in and out of temp locations cwd = getcwd() input_file = path.abspath(self.molecule.filename) mol2 = path.abspath(f'{self.molecule.name}.mol2') frcmod_file = path.abspath(f'{self.molecule.name}.frcmod') prmtop_file = path.abspath(f'{self.molecule.name}.prmtop') inpcrd_file = path.abspath(f'{self.molecule.name}.inpcrd') ant_log = path.abspath('Antechamber.log') # Call Antechamber self.get_gaff_types(fftype=self.fftype) # Work in temp directory due to the amount of files made by antechamber with TemporaryDirectory() as temp: chdir(temp) copy(mol2, 'out.mol2') # Run parmchk with open('Antechamber.log', 'a') as log: sub_run(f"parmchk2 -i out.mol2 -f mol2 -o out.frcmod -s {self.fftype}", shell=True, stdout=log) # Ensure command worked if not path.exists('out.frcmod'): raise FileNotFoundError('out.frcmod not found parmchk2 failed!') # Now get the files back from the temp folder copy('out.mol2', mol2) copy('out.frcmod', frcmod_file) copy('Antechamber.log', ant_log) # Now we need to run tleap to get the prmtop and inpcrd files with TemporaryDirectory() as temp: chdir(temp) copy(mol2, 'in.mol2') copy(frcmod_file, 'in.frcmod') copy(ant_log, 'Antechamber.log') # make tleap command file with open('tleap_commands', 'w+') as tleap: tleap.write("""source oldff/leaprc.ff99SB source leaprc.gaff LIG = loadmol2 in.mol2 check LIG loadamberparams in.frcmod saveamberparm LIG out.prmtop out.inpcrd quit""") # Now run tleap with open('Antechamber.log', 'a') as log: sub_run('tleap -f tleap_commands', shell=True, stdout=log) # Check results present if not path.exists('out.prmtop') or not path.exists('out.inpcrd'): raise FileNotFoundError('Neither out.prmtop nor out.inpcrd found; tleap failed!') copy('Antechamber.log', ant_log) copy('out.prmtop', prmtop_file) copy('out.inpcrd', inpcrd_file) chdir(cwd) # Now give the file names to parametrisation method self.prmtop = f'{self.molecule.name}.prmtop' self.inpcrd = f'{self.molecule.name}.inpcrd' @for_all_methods(timer_logger) class OpenFF(Parametrisation): """ This class uses the openFF in openeye to parametrise the molecule using frost. A serialised XML is then stored in the parameter dictionaries. """ def __init__(self, molecule, input_file=None, fftype='frost', mol2_file=None): super().__init__(molecule, input_file, fftype, mol2_file) self.get_gaff_types(mol2_file) self.serialise_system() self.gather_parameters() self.molecule.parameter_engine = 'OpenFF ' + self.fftype def serialise_system(self): """Create the OpenMM system; parametrise using frost; serialise the system.""" # Load molecule using OpenEye tools mol = oechem.OEGraphMol() ifs = oechem.oemolistream(self.molecule.filename) flavor = oechem.OEIFlavor_Generic_Default | oechem.OEIFlavor_MOL2_Default | oechem.OEIFlavor_MOL2_Forcefield ifs.SetFlavor(oechem.OEFormat_MOL2, flavor) oechem.OEReadMolecule(ifs, mol) oechem.OETriposAtomNames(mol) # Load a SMIRNOFF small molecule forcefield for alkanes, ethers, and alcohols forcefield = ForceField(get_data_filename('forcefield/smirnoff99Frosst.offxml')) # Create the OpenMM system topology = generateTopologyFromOEMol(mol) system = forcefield.createSystem(topology, [mol]) # Serialise the OpenMM system into the xml file with open('serialised.xml', 'w+') as out: out.write(XmlSerializer.serializeSystem(system)) # get the gaff atom types self.get_gaff_types() @for_all_methods(timer_logger) class BOSS(Parametrisation): """ This class uses the BOSS software to parametrise a molecule using the CM1A/OPLS FF. The parameters are then stored in the parameter dictionaries. """ # TODO make sure order is consistent with PDB. def __init__(self, molecule, input_file=None, fftype='CM1A/OPLS'): super().__init__(molecule, input_file, fftype) self.BOSS_cmd() self.gather_parameters() self.molecule.parameter_engine = 'BOSS ' + self.fftype def BOSS_cmd(self): """ This method is used to call the required BOSS scripts. 1 The zmat file with CM1A charges is first generated for the molecule keeping the same pdb order. 2 A single point calculation is done. """ pass def gather_parameters(self): """ This method parses the BOSS out file and collects the parameters ready to pass them to build tree. """ pass
StarcoderdataPython
11380855
#coding:utf-8 # # id: bugs.core_4848 # title: MERGE ... WHEN NOT MATCHED ... RETURNING returns wrong (non-null) values when no insert is performed # decription: # tracker_id: CORE-4848 # min_versions: ['3.0'] # versions: 3.0 # qmid: None import pytest from firebird.qa import db_factory, isql_act, Action # version: 3.0 # resources: None substitutions_1 = [] init_script_1 = """""" db_1 = db_factory(page_size=4096, sql_dialect=1, init=init_script_1) test_script_1 = """ set list on; recreate table t1 (n1 integer, n2 integer); -- Case 1: merge into t1 using ( select 1 x from rdb$database where 1 = 0 ) on 1 = 1 when not matched then insert values (1, 11) returning n1, n2; -- Case 2: merge into t1 using ( select 1 x from rdb$database where 1 = 1 ) on 1 = 0 when not matched and 1 = 0 then insert values (1, 11) returning n1, n2; """ act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) expected_stdout_1 = """ N1 <null> N2 <null> N1 <null> N2 <null> """ @pytest.mark.version('>=3.0') def test_1(act_1: Action): act_1.expected_stdout = expected_stdout_1 act_1.execute() assert act_1.clean_expected_stdout == act_1.clean_stdout
StarcoderdataPython
9650004
<gh_stars>1-10 from django.test import TestCase from django_token_auth.user import TokenAuthenticatedUser from django_token_auth.user import UserHasNoData class TokenAuthenticatedUserTestCase(TestCase): def test_user_class(self): user = TokenAuthenticatedUser('john', 'some_token') self.assertEqual(user.username, 'john') self.assertTrue(user.is_authenticated()) self.assertRaises(UserHasNoData, getattr, user, 'email')
StarcoderdataPython
3550927
<gh_stars>0 """ A module containing unit tests for the `bitmask` modue. :Authors: <NAME> """ from __future__ import (absolute_import, division, unicode_literals, print_function) import warnings import numpy as np import pytest from stsci.tools import bitmask MAX_INT_TYPE = np.maximum_sctype(np.int) MAX_UINT_TYPE = np.maximum_sctype(np.uint) MAX_UINT_FLAG = np.left_shift( MAX_UINT_TYPE(1), MAX_UINT_TYPE(np.iinfo(MAX_UINT_TYPE).bits - 1) ) MAX_INT_FLAG = np.left_shift( MAX_INT_TYPE(1), MAX_INT_TYPE(np.iinfo(MAX_INT_TYPE).bits - 2) ) SUPER_LARGE_FLAG = 1 << np.iinfo(MAX_UINT_TYPE).bits EXTREME_TEST_DATA = np.array([ 0, 1, 1 + 1 << 2, MAX_INT_FLAG, ~0, MAX_INT_TYPE(MAX_UINT_FLAG), 1 + MAX_INT_TYPE(MAX_UINT_FLAG) ], dtype=MAX_INT_TYPE) @pytest.mark.parametrize('flag', [0, -1]) def test_nonpositive_not_a_bit_flag(flag): assert not bitmask.is_bit_flag(n=flag) @pytest.mark.parametrize('flag', [ 1, MAX_UINT_FLAG, int(MAX_UINT_FLAG), SUPER_LARGE_FLAG ]) def test_is_bit_flag(flag): assert bitmask.is_bit_flag(n=flag) @pytest.mark.parametrize('number', [0, 1, MAX_UINT_FLAG, SUPER_LARGE_FLAG]) def test_is_int(number): assert bitmask._is_int(number) @pytest.mark.parametrize('number', ['1', True, 1.0]) def test_nonint_is_not_an_int(number): assert not bitmask._is_int(number) @pytest.mark.parametrize('flag,flip,expected', [ (3, None, 3), (3, True, -4), (3, False, 3), ([1, 2], False, 3), ([1, 2], True, -4) ]) def test_interpret_valid_int_bit_flags(flag, flip, expected): assert( bitmask.interpret_bit_flags(bit_flags=flag, flip_bits=flip) == expected ) @pytest.mark.parametrize('flag', [None, ' ', 'None', 'Indef']) def test_interpret_none_bit_flags_as_None(flag): assert bitmask.interpret_bit_flags(bit_flags=flag) is None @pytest.mark.parametrize('flag,expected', [ ('1', 1), ('~-1', ~(-1)), ('~1', ~1), ('1,2', 3), ('1+2', 3), ('(1,2)', 3), ('(1+2)', 3), ('~1,2', ~3), ('~1+2', ~3), ('~(1,2)', ~3), ('~(1+2)', ~3) ]) def test_interpret_valid_str_bit_flags(flag, expected): assert( bitmask.interpret_bit_flags(bit_flags=flag) == expected ) @pytest.mark.parametrize('flag,flip', [ (None, True), (' ', True), ('None', True), ('Indef', True), (None, False), (' ', False), ('None', False), ('Indef', False), ('1', True), ('1', False) ]) def test_interpret_None_or_str_and_flip_incompatibility(flag, flip): with pytest.raises(TypeError): bitmask.interpret_bit_flags(bit_flags=flag, flip_bits=flip) @pytest.mark.parametrize('flag', [True, 1.0, [1.0], object]) def test_interpret_wrong_flag_type(flag): with pytest.raises(TypeError): bitmask.interpret_bit_flags(bit_flags=flag) @pytest.mark.parametrize('flag', ['SOMETHING', '1.0,2,3']) def test_interpret_wrong_string_int_format(flag): with pytest.raises(ValueError): bitmask.interpret_bit_flags(bit_flags=flag) def test_interpret_duplicate_flag_warning(): with warnings.catch_warnings(record=True) as w: warnings.simplefilter("always") assert bitmask.interpret_bit_flags([2, 4, 4]) == 6 assert len(w) assert issubclass(w[-1].category, UserWarning) assert "Duplicate" in str(w[-1].message) @pytest.mark.parametrize('flag', [[1, 2, 3], '1, 2, 3']) def test_interpret_non_flag(flag): with pytest.raises(ValueError): bitmask.interpret_bit_flags(bit_flags=flag) def test_interpret_allow_single_value_str_nonflags(): assert bitmask.interpret_bit_flags(bit_flags=str(3)) == 3 @pytest.mark.parametrize('flag', [ '~', '( )', '(~1,2)', '~(1,2', '1,~2', '1,(2,4)', '1,2+4', '1+4,2' ]) def test_interpret_bad_str_syntax(flag): with pytest.raises(ValueError): bitmask.interpret_bit_flags(bit_flags=flag) def test_bitfield_must_be_integer_check(): with pytest.raises(TypeError): bitmask.bitfield_to_boolean_mask(1.0, 1) @pytest.mark.parametrize('data,flags,flip,goodval,dtype,ref', [ (EXTREME_TEST_DATA, None, None, True, np.bool_, EXTREME_TEST_DATA.size * [1]), (EXTREME_TEST_DATA, None, None, False, np.bool_, EXTREME_TEST_DATA.size * [0]), (EXTREME_TEST_DATA, [1, MAX_UINT_FLAG], False, True, np.bool_, [1, 1, 0, 0, 0, 1, 1]), (EXTREME_TEST_DATA, None, None, True, np.bool_, EXTREME_TEST_DATA.size * [1]), (EXTREME_TEST_DATA, [1, MAX_UINT_FLAG], False, False, np.bool_, [0, 0, 1, 1, 1, 0, 0]), (EXTREME_TEST_DATA, [1, MAX_UINT_FLAG], True, True, np.int8, [1, 0, 1, 1, 0, 0, 0]) ]) def test_bitfield_to_boolean_mask(data, flags, flip, goodval, dtype, ref): mask = bitmask.bitfield_to_boolean_mask( bitfield=data, ignore_flags=flags, flip_bits=flip, good_mask_value=goodval, dtype=dtype ) assert(mask.dtype == dtype) assert np.all(mask == ref)
StarcoderdataPython
3449837
<reponame>Lucas-Mc/physionet-build<filename>physionet-django/project/migrations/0008_auto_20190314_1322.py # Generated by Django 2.1.7 on 2019-03-14 17:22 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('project', '0007_auto_20190308_1445'), ] operations = [ migrations.AlterField( model_name='activeproject', name='version', field=models.CharField(blank=True, default='', max_length=15), ), migrations.AlterField( model_name='archivedproject', name='version', field=models.CharField(blank=True, default='', max_length=15), ), migrations.AlterField( model_name='publishedproject', name='version', field=models.CharField(blank=True, default='', max_length=15), ), ]
StarcoderdataPython
9643428
<reponame>abb-iss/distributed-fuzzy-vault """ Chaff Points Generator to create randomized Minutia """ import random from Minutia import MinutiaNBIS import Constants class ChaffPointsGenerator: @staticmethod def generate_chaff_points_randomly(amount, genuine_minutiae, smallest_minutia_rep, minutia_converter): """ create the amount of chaff points (Minutia) desired Chaff points need to have at least a specified distance from all other genuine minutiae and chaff points :returns a list of Minutia randomly generated """ chaff_points_list = [] all_vault_points = genuine_minutiae.copy() for _ in range(amount): plausible_minutia = False while not plausible_minutia: x_random = random.randrange(MinutiaNBIS.X_MIN, MinutiaNBIS.X_MAX) y_random = random.randrange(MinutiaNBIS.Y_MIN, MinutiaNBIS.Y_MAX) theta_random = random.randrange(MinutiaNBIS.THETA_MIN, MinutiaNBIS.THETA_MAX) quality_random = random.randrange(MinutiaNBIS.QUALITY_MIN, MinutiaNBIS.QUALITY_MAX) chaff_point = MinutiaNBIS(x_random, y_random, theta_random, quality_random) if minutia_converter.get_uint_from_minutia(chaff_point) >= (smallest_minutia_rep // 2): too_close = False for minutia in all_vault_points: if chaff_point.distance_to(minutia) <= Constants.POINTS_DISTANCE: too_close = True break if not too_close: chaff_points_list.append(chaff_point) all_vault_points.append(chaff_point) plausible_minutia = True return chaff_points_list
StarcoderdataPython
4920330
<reponame>sbl1996/pytorch-hrvvi-ext import torch.nn as nn from horch.nn import Flatten from horch.models.layers import Conv2d, Linear class LeNet5(nn.Module): def __init__(self, in_channels=1, num_classes=10, dropout=None): super().__init__() self.features = nn.Sequential( Conv2d(in_channels, 6, 5, stride=1, padding=0, act='def'), nn.MaxPool2d(2, 2), Conv2d(6, 16, 5, stride=1, padding=0, act='def'), nn.MaxPool2d(2, 2), ) classifier = [ Flatten(), Linear(400, 120, act='def'), Linear(120, 84, act='def'), Linear(84, num_classes) ] if dropout: classifier.insert(0, nn.Dropout(dropout)) classifier.insert(2, nn.Dropout(dropout)) classifier.insert(4, nn.Dropout(dropout)) self.classifier = nn.Sequential(*classifier) def forward(self, x): x = self.features(x) x = self.classifier(x) return x
StarcoderdataPython
1821547
import ast while True: try: string = raw_input('What numbers should I average? ') words = string.split() numbers = [ast.literal_eval(word) for word in words] total = sum(numbers) count = len(numbers) average = 1.0 * total / count print 'The average is', average raw_input('Press enter to quit.') break except: print 'Please only give me numbers.'
StarcoderdataPython
1981643
def main(): n = int(input()) a = 'I hate it' b = 'I hate that' c = 'I love it' d = 'I love that' for i in range(1,n): if i % 2 == 1: print(b,end=" ") else: print(d,end=" ") if n % 2 == 1: print(a,end=" ") if n % 2 == 0: print(c,end=" ") if __name__ == "__main__": main()
StarcoderdataPython
4943380
<reponame>capriciash/civicu_app import os import sys import json import django from django.conf import settings # noqa Django magic starts here import PIL.Image from PIL.ExifTags import TAGS as tag_num2name # `labeler_site` must be a python package installed in your environment (virtualenv) # OR "install" it manually before running this: `export PYTHONPATH=$PYTHONPATH:/path/to/labeler_site_basedir/` os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'civicu_app.settings') django.setup() if __name__ == '__main__': # if len(sys.args) > 1: # image_path = ' '.join(sys.args[1:]) # else: image_path = os.path.join(settings.BASE_DIR, 'user_uploads', 'images', 'baby_ghost.png') img = PIL.Image.open(image_path) exif_data = img._getexif() # pprint(exif_data) # {271: 'Icatch', # 272: 'WF121', # 282: (240, 1), # 283: (240, 1), # 296: 2, # 305: 'Adobe Photoshop Lightroom 4.4 (Windows)', # 306: '2016:07:31 14:16:14', # 33434: (1, 60), # 33437: (32, 10), # 34665: 200, # 34850: 2, # 34855: 100, # 36864: b'0230', # 36867: '2014:04:27 17:40:29', # 36868: '2014:04:27 17:40:29', # 37377: (5906891, 1000000), # 37378: (3356144, 1000000), # 37379: (-5000, 1000), # 37380: (0, 10), # 37381: (3, 2), # 37383: 4, # 37384: 0, # 37385: 1, # 37386: (82, 11), # 37396: (1536, 1152, 3072, 2304), # 41728: b'\x03', # 41729: b'\x01', # 41985: 0, # 41986: 0, # 41987: 0, # 41988: (0, 1), # 41990: 0, # 41991: 0, # 41992: 0, # 41993: 0, # 41994: 0, # 41996: 0} exif_data = dict( zip( map(tag_num2name.get, exif_data.keys()), exif_data.values() ) ) print(json.dumps(exif_data, indent=2))
StarcoderdataPython
3399968
<filename>src/centrol_bots/centrol_discord.py from configs.user_messages import ( CHART_NOT_AVAILABLE, ALERT_KEY_NOT_KNOWN, HOT_TIP_1, HOT_TIP_2, HOT_TIP_3, HOT_TIP_4, HOT_TIP_5, HOT_TIP_6, HOT_TIP_7, ) from discord_slash.utils.manage_commands import create_option, create_choice from centrol_utils.make_request import create_discord_meta from discord_slash import SlashCommand, SlashContext from configs import user_messages as user_msgs import logging, discord, os, asyncio, pyjokes from centrol.stocks import send_crypto_order from centrol.get_data import GetCentrolData from centrol import ALERT_KEY_TO_IEX_MAP from configs.config import CentrolConfig from centrol.alerts import create_alert from discord.channel import TextChannel from centrol.user import CentrolUser from discord.message import Message from datetime import datetime from typing import Tuple import random log = logging.getLogger(__name__) class DiscordClient: async def connect(self): token = os.getenv("DISCORD_TOKEN") try: await self.client.login(token) self.loop.create_task(self.client.connect()) finally: log.warning("stopping") async def send_broadcast(self): user = await self.client.fetch_user("id") await user.send("This is a test broadcast") return "" # Stock Quote Request async def get_stock_quote( self, message: discord.Message, sym: str = None, is_slash: bool = False ) -> discord.Embed: if sym is None: sym = "".join(message.content.split("!s")).strip().upper() data, result = self.centrol_data.get_latest_stock_price( sym, meta=create_discord_meta(message, is_slash=is_slash) ) embed = discord.Embed( color=0x0050C7, # TODO: add color here based on +/- on the regularMarketChangePercent. green if +, red if - ) if result: chart = self.centrol_data.get_stock_chart( sym, meta=create_discord_meta(message, is_slash=is_slash) ) if chart is None: msg = embed.add_field(name=f"**{sym.upper()} Quote**", value=data) else: msg = embed.set_image(url=chart) msg.add_field(name=f"**{sym.upper()} Quote**", value=data) else: msg = embed.add_field(name=f"**{sym.upper()} Quote**", value=data) hot_tip_list = [ HOT_TIP_1, HOT_TIP_2, HOT_TIP_3, HOT_TIP_5, HOT_TIP_6, HOT_TIP_7, ] embed.set_thumbnail( url=f"https://storage.googleapis.com/iexcloud-hl37opg/api/logos/{sym.upper()}.png" ) # TODO: move this to Centrol Core embed.add_field( name="▰▰▰《 **Centrol Tips** 》▰▰▰", value=random.choice(hot_tip_list), inline=False, ) embed.set_footer( text="Join our iOS/Android app waitlist with a quick survey,\n👉 https://share.centrol.io/e/survey", icon_url="https://cdn.discordapp.com/icons/821482982601785374/442268dc155f03ce0992a14d6707bfd4.webp?size=240", ) return msg # Stock Chart Resquest async def get_stock_chart( self, message: discord.Message, sym: str = None, is_slash: bool = False ) -> discord.Embed: if sym is None: sym = "".join(message.content.split("!sc")).strip().lower() data, result = self.centrol_data.get_latest_stock_price( sym, meta=create_discord_meta(message, is_slash=is_slash) ) embed = discord.Embed( title=f"{sym.upper()} Chart", color=0x0050C7, # TODO: add color here based on +/- on the regularMarketChangePercent. green if +, red if - ) chart = self.centrol_data.get_stock_chart( sym, meta=create_discord_meta(message, is_slash=is_slash) ) if chart is None: msg = embed.add_field( name="Chart not available", value=CHART_NOT_AVAILABLE.format(sym=sym.upper()), ) else: msg = embed.set_image(url=chart) embed.set_footer( text="Join our iOS/Android app waitlist with a quick survey,\n👉 https://share.centrol.io/e/survey", icon_url="https://cdn.discordapp.com/icons/821482982601785374/442268dc155f03ce0992a14d6707bfd4.webp?size=240", ) return msg # Crypto Quote Request async def get_crypto_quote( self, message: discord.Message, sym: str = None, is_slash: bool = False ) -> discord.Embed: if sym is None: sym = "".join(message.content.split("!c")).strip().lower() data, result = self.centrol_data.get_latest_crypto_price( sym, meta=create_discord_meta(message, is_slash=is_slash) ) embed = discord.Embed( color=0x0050C7, # TODO: add color here based on +/- on the regularMarketChangePercent. green if +, red if - ) if result: chart = self.centrol_data.get_crypto_chart( sym, meta=create_discord_meta(message, is_slash=is_slash) ) if chart is None: msg = embed.add_field(name=f"**{sym.upper()} Quote**", value=data) else: msg = embed.set_image(url=chart) msg.add_field(name=f"**{sym.upper()} Quote**", value=data) else: msg = embed.add_field(name=f"**{sym.upper()} Quote**", value=data) hot_tip_list = [ HOT_TIP_1, HOT_TIP_2, HOT_TIP_3, HOT_TIP_4, HOT_TIP_6, HOT_TIP_7, ] embed.add_field( name="▰▰▰《 **Centrol Tips** 》▰▰▰", value=random.choice(hot_tip_list), inline=False, ) embed.set_footer( text="Join our iOS/Android app waitlist with a quick survey,\n👉 https://share.centrol.io/e/survey", icon_url="https://cdn.discordapp.com/icons/821482982601785374/442268dc155f03ce0992a14d6707bfd4.webp?size=240", ) return msg # Crypto Chart Request async def get_crypto_chart( self, message: discord.Message, sym: str = None, is_slash: bool = False ) -> discord.Embed: if sym is None: sym = "".join(message.content.split("!cc")).strip().lower() embed = discord.Embed( title=f"{sym.upper()} Chart", color=0x0050C7, ) chart = self.centrol_data.get_crypto_chart( sym, meta=create_discord_meta(message, is_slash=is_slash) ) if chart is None: msg = embed.add_field( name="Chart not available", value=CHART_NOT_AVAILABLE.format(sym=sym.upper()), ) else: msg = embed.set_image(url=chart) embed.set_footer( text="Join our iOS/Android app waitlist with a quick survey,\n👉 https://share.centrol.io/e/survey", icon_url="https://cdn.discordapp.com/icons/821482982601785374/442268dc155f03ce0992a14d6707bfd4.webp?size=240", ) return msg # Create Stock Alert async def create_stock_alert( self, message: discord.Message, key: str = None, sym: str = None, val: str = None, is_slash: bool = False, ) -> discord.Embed: if not is_slash: params = "".join(message.content.split("!salert")).strip().upper() try: key, sym, val = params.split(" ") except ValueError: key, sym, val = "price", *params.split(" ") mapped_key = ALERT_KEY_TO_IEX_MAP.get(key.lower()) if mapped_key is None: opts = ", ".join(list(ALERT_KEY_TO_IEX_MAP.keys())) await message.channel.send(ALERT_KEY_NOT_KNOWN.format(op="/", opts=opts)) return response = create_alert( message.author.id, message.channel.id, message.message.id if is_slash else message.id, sym, mapped_key, float(val), "stock", "discord", meta=create_discord_meta(message, is_slash=is_slash), ) embed = discord.Embed( title=f"🚨 {sym.upper()} Alert Created", color=0x0050C7, ) msg = embed.add_field(name="▰▰▰▰▰▰▰▰▰", value=response) hot_tip_list = [ HOT_TIP_1, HOT_TIP_2, HOT_TIP_3, HOT_TIP_4, HOT_TIP_5, HOT_TIP_6, HOT_TIP_7, ] embed.set_thumbnail( url=f"https://storage.googleapis.com/iexcloud-hl37opg/api/logos/{sym.upper()}.png" ) embed.add_field( name="▰▰▰《 **Centrol Tips** 》▰▰▰", value=random.choice(hot_tip_list), inline=False, ) embed.set_footer( text="Join our iOS/Android app waitlist with a quick survey,\n👉 https://share.centrol.io/e/survey", icon_url="https://cdn.discordapp.com/icons/821482982601785374/442268dc155f03ce0992a14d6707bfd4.webp?size=240", ) return msg # Create Crypto Alert async def create_crypto_alert( self, message: discord.Message, key: str = None, sym: str = None, val: str = None, is_slash: bool = False, ) -> discord.Embed: if not is_slash: params = "".join(message.content.split("!calert")).strip().upper() try: key, sym, val = params.split(" ") except ValueError: key, sym, val = "price", *params.split(" ") mapped_key = ALERT_KEY_TO_IEX_MAP.get(key.lower()) if mapped_key is None: opts = ", ".join(list(ALERT_KEY_TO_IEX_MAP.keys())) await message.channel.send(ALERT_KEY_NOT_KNOWN.format(op="/", opts=opts)) return response = create_alert( message.author.id, message.channel.id, message.message.id if is_slash else message.id, sym, mapped_key, float(val), "crypto", "discord", meta=create_discord_meta(message, is_slash=is_slash), ) embed = discord.Embed( title=f"🚨 {sym.upper()} Alert Created", color=0x0050C7, ) msg = embed.add_field(name="▰▰▰▰▰▰▰▰▰", value=response) hot_tip_list = [ HOT_TIP_1, HOT_TIP_2, HOT_TIP_3, HOT_TIP_4, HOT_TIP_5, HOT_TIP_6, HOT_TIP_7, ] embed.set_thumbnail( url=f"https://storage.googleapis.com/iexcloud-hl37opg/api/logos/{sym.upper()}.png" ) embed.add_field( name="▰▰▰《 **Centrol Tips** 》▰▰▰", value=random.choice(hot_tip_list), inline=False, ) embed.set_footer( text="Join our iOS/Android app waitlist with a quick survey,\n👉 https://share.centrol.io/e/survey", icon_url="https://cdn.discordapp.com/icons/821482982601785374/442268dc155f03ce0992a14d6707bfd4.webp?size=240", ) return msg # TODO: need to get this to work with /alerts # List User's Alerts # async def list_alerts( # self, message: discord.Message, sym: str = None, is_slash: bool = False # ) -> discord.Embed: # all_alerts = self.centrol_data.get_alerts_for_user( # message.author.id, meta=create_discord_meta(message, is_slash=is_slash) # ) # embed = discord.Embed(title=f"{message.author.name}'s Alerts", color=0x0050C7) # msg = ( # self._add_fields(embed, "Stock Alerts:", all_alerts["stocks"]), # self._add_fields(embed, "Crypto Alerts:", all_alerts["crypto"]), # ) # embed.set_footer( # text="Join our iOS/Android app waitlist with quick survey,\n👉 https://share.centrol.io/e/survey", # icon_url="https://cdn.discordapp.com/icons/821482982601785374/442268dc155f03ce0992a14d6707bfd4.webp?size=240", # ) # return msg def __init__(self, config: CentrolConfig): log.info("Setting up discord client") self.user = CentrolUser() self.centrol_data = GetCentrolData(config) self.client = discord.Client(intents=discord.Intents.default()) self.slash = SlashCommand(self.client, sync_commands=True) self.loop = asyncio.get_event_loop() @self.client.event async def on_ready(): log.info("Logged in as") log.info(self.client.user.name) log.info(self.client.user.id) ###################################################################################### #### Slash Commands #### N.B. updates to slash commands can take upto an hour to reflect on the client # """ # Adding options and general slash command usage is documented here: # https://discord-py-slash-command.readthedocs.io/en/components/gettingstarted.html#giving-some-options-for-variety # """ # Stock Quote Request @self.slash.slash( name="s", description="Get latest stock quote for 1000's of symbols. E.g. /s AAPL", options=[ create_option( name="symbol", description="Enter the symbol you want to query, e.g. AAPL, TSLA, NOK", option_type=3, required=True, ) ], ) async def stock_quote(ctx: SlashContext, symbol: str): reply = await self.get_stock_quote(ctx, sym=symbol, is_slash=True) await ctx.send(embed=reply) # Stock Chart Request @self.slash.slash( name="sc", description="Get latest stock chart for 1000's of symbols. E.g. /sc AAPL", options=[ create_option( name="symbol", description="Enter the symbol you want to query, e.g. AAPL, TSLA, NOK", option_type=3, required=True, ) ], ) async def stock_chart(ctx: SlashContext, symbol: str): reply = await self.get_stock_chart(ctx, sym=symbol, is_slash=True) await ctx.send(embed=reply) # Crypto Quote Request @self.slash.slash( name="c", description="Get latest crypto quote for 1000's of tokens. E.g. /c BTC", options=[ create_option( name="token", description="Enter the token you want to query, e.g. BTC, BTC-EUR, ETH, BTC-ETH", option_type=3, required=True, ) ], ) async def crypto_quote(ctx: SlashContext, token: str): reply = await self.get_crypto_quote(ctx, sym=token, is_slash=True) await ctx.send(embed=reply) # Crypto Chart Request @self.slash.slash( name="cc", description="Get latest crypto chart for 1000's of tokens. E.g. /cc BTC", options=[ create_option( name="token", description="Enter the token you want to query, e.g. BTC, BTC-EUR, ETH, BTC-ETH", option_type=3, required=True, ) ], ) async def crypto_chart(ctx: SlashContext, token: str): reply = await self.get_crypto_chart(ctx, sym=token, is_slash=True) await ctx.send(embed=reply) # Create Stock Alert @self.slash.slash( name="salert", description="Create a stock alert for 1000's of symbols. E.g. /salert AAPL 150", options=[ create_option( name="symbol", description="Enter the symbol you want to alert on, e.g. AAPL, TSLA", option_type=3, required=True, ), create_option( name="value", description="Set the threshold value that you want to alert on, e.g. 150, 20, 25.75", option_type=3, required=True, ), create_option( name="parameter", description="Enter the parameter you want to alert on, e.g. price, mkt-cap, vol", choices=[ create_choice(name="Price", value="price"), create_choice(name="Market Cap", value="mkt-cap"), create_choice(name="Volume", value="vol"), ], option_type=3, required=False, ), ], ) async def stock_alert( ctx: SlashContext, symbol: str, value: str, parameter: str = None ): parameter = "price" if parameter is None else parameter # Create some message so we can reply back when the alert triggers await ctx.send( f"Centrol, create a stock alert on {symbol.upper()} if {parameter} @ {value}" ) reply = await self.create_stock_alert( ctx, key=parameter, sym=symbol.upper(), val=value, is_slash=True, ) await ctx.reply(embed=reply) # Create Crypto Alert @self.slash.slash( name="calert", description="Create a crypto alert for 1000's of tokens. E.g. /calert ETH 4500", options=[ create_option( name="token", description="Enter the token you want to alert on, e.g. BTC, ETH, DOGE", option_type=3, required=True, ), create_option( name="value", description="Set the threshold value that you want to alert on, e.g. 85000, 4750, 0.5", option_type=3, required=True, ), create_option( name="parameter", description="Enter the parameter you want to alert on, e.g. price, mkt-cap, vol", choices=[ create_choice(name="Price", value="price"), create_choice(name="Market Cap", value="mkt-cap"), create_choice(name="Volume", value="vol"), ], option_type=3, required=False, ), ], ) async def crypto_alert( ctx: SlashContext, token: str, value: str, parameter: str = None ): parameter = "price" if parameter is None else parameter # Create some message so we can reply back when the alert triggers await ctx.send( f"Centrol, create a crypto alert on {token.upper()} if {parameter} @ {value}" ) reply = await self.create_crypto_alert( ctx, key=parameter, sym=token.upper(), val=value, is_slash=True, ) await ctx.reply(embed=reply) # List User alerts @self.slash.slash( name="alerts", description="Get a list of your active alerts", ) async def crypto_chart(ctx: SlashContext): reply = await self.list_alerts(ctx, is_slash=True) await ctx.send(embed=reply) ###################################################################################### async def handle_messages(message): if message.author == self.client.user: return if message.content.startswith("!hello"): async with message.channel.typing(): await message.channel.send("Hello!") if message.content.startswith("!help"): async with message.channel.typing(): await message.channel.send(user_msgs.HELP.format(op="!")) if message.content.startswith("!j"): async with message.channel.typing(): await message.channel.send(pyjokes.get_joke(category="neutral")) # !buy crypto btc 0.00001 if message.content.startswith("!buy crypto mkt"): data = "".join(message.content.split("!buy crypto mkt")).strip().upper() try: crypto_pair, amount = data.split(" ") except: return await message.author.send( user_msgs.EXAMPLE_BUY_ORDER.format(op="!") ) success, msg = await self.buy_crypto( message, crypto_pair, amount, "buy-mkt" ) return await message.author.send(msg) # !buy crypto btc 0.00001 if message.content.startswith("!sell crypto mkt"): data = ( "".join(message.content.split("!sell crypto mkt")).strip().upper() ) try: crypto_pair, amount = data.split(" ") except: return await message.author.send( user_msgs.EXAMPLE_SELL_ORDER.format(op="!") ) success, msg = await self.buy_crypto( message, crypto_pair, amount, "sell-mkt" ) return await message.author.send(msg) if message.content.startswith("!add-token coinbase"): data = "".join(message.content.split("!add-token coinbase")).strip() try: token, passphrase, secret = data.split(" ") except: return await message.author.send( user_msgs.FAILED_TOKEN_ADD.format(op="!") ) success = self.user.add_coinbase_token( str(message.author.id), token, passphrase, secret, "sandbox", meta=create_discord_meta(message), ) if success: return await message.author.send("Added token successully") else: return await message.author.send("Failed to add token") # Stock Quote Request if message.content.startswith("!s "): async with message.channel.typing(): reply = await self.get_stock_quote(message) return await message.channel.send(embed=reply) # Stock Chart Request if message.content.startswith("!sc "): async with message.channel.typing(): reply = await self.get_stock_chart(message) return await message.channel.send(embed=reply) # Crypto Quote Request if message.content.startswith("!c "): async with message.channel.typing(): reply = await self.get_crypto_quote(message) return await message.channel.send(embed=reply) # Crypto Chart Request if message.content.startswith("!cc "): async with message.channel.typing(): reply = await self.get_crypto_chart(message) return await message.channel.send(embed=reply) # Create Stock Alerts if message.content.startswith("!salert "): async with message.channel.typing(): reply = await self.create_stock_alert(message) await message.channel.send(embed=reply) # Create Stock Alerts if message.content.startswith("/salert "): async with message.channel.typing(): reply = await self.create_stock_alert(message) await message.channel.send(embed=reply) # Create Crypto Alerts if message.content.startswith("!calert "): async with message.channel.typing(): reply = await self.create_crypto_alert(message) await message.channel.send(embed=reply) # TODO: once the /alerts is up, this will need to be updated. # Listing out the alerts for the user request if message.content.startswith("!alerts"): async with message.channel.typing(): all_alerts = self.centrol_data.get_alerts_for_user( message.author.id, meta=create_discord_meta(message) ) embed = discord.Embed( title=f"**{message.author.name}'s alerts**", color=0x6E85CF ) self._add_fields(embed, "Stock Alerts:", all_alerts["stocks"]) self._add_fields(embed, "Crypto Alerts:", all_alerts["crypto"]) await message.channel.send(embed=embed) pass @self.client.event async def on_message(message: Message): try: await handle_messages(message) except Exception as e: typ = "guild" if isinstance(message.channel, TextChannel) else "private" guild = "" if typ == "private" else str(message.guild.id) log.error(e) if typ == "guild": log.warn( f"Send messages: {message.guild.me.guild_permissions.send_messages}" ) log.warn( f"Read messages: {message.guild.me.guild_permissions.read_messages}" ) log.warn( f"Slash: {message.guild.me.guild_permissions.use_slash_commands}" ) log.error( f"Failed to process message: Guild: {guild}, msg: {message.content}" ) def _add_fields(self, embed, name, stocks): operators = {"lte": "<=", "gte": ">=", "lt": "<", "gt": ">"} embed.add_field(name=name, value="\u200b", inline=False) for alert in stocks: time = datetime.fromisoformat(alert["time_created"]) embed.add_field( name="\u200b", value=f"""```ID: {alert["_id"]}\nTime Created: {time.strftime('%H:%M %m/%d/%Y')}\nSymbol: {alert["sym"]}\nWhere: {alert["key"]} {operators[alert["op"]]} {alert["val"]}\n```""", inline=False, ) async def buy_crypto(self, message, crypto_pair, price, typ) -> Tuple[bool, str]: if not self.user.check_user(message.author.id): self.user.create_user("", message.author.id, message.author.name, "discord") resp = send_crypto_order( message.author.id, crypto_pair, price, typ, "sandbox", "discord", meta=create_discord_meta(message), ) if resp["msg"] == "TOKEN_MISSING": return False, user_msgs.COINBASE_CONNECT.format(op="!") if resp["flag"] == True: return True, user_msgs.SUCCESSFUL_ORDER.format( url="https://public.sandbox.pro.coinbase.com/orders" ) return False, resp["msg"] async def send_notification( self, user_id: str, id: str, msg_id: str, msg: str, msg_typ: str ): # Reply back to user if possible if (msg_id is not None) and (msg_typ == "channel"): msg_tgt = await self.client.fetch_channel(int(id)) try: msg_tgt = await msg_tgt.fetch_message(int(msg_id)) await msg_tgt.reply(msg) except Exception as e: log.warn(f"Failed to fetch message/channel. {e}") await msg_tgt.send(f"<@{user_id}> " + msg) return elif msg_typ == "channel": msg_tgt = await self.client.fetch_channel(int(id)) elif msg_typ == "user": msg_tgt = await self.client.fetch_user(int(id)) else: raise NotImplementedError() await msg_tgt.send(msg) return ""
StarcoderdataPython
6591196
<reponame>mgfzemor/Computational-Biology import hashlib subsequences = {} def read_file(): sequence = "" file = open("file/sequence.fasta","r") file.readline() for line in file.readlines(): sequence += line[:-1] return sequence def set_subsequence(sequence): global subsequences sequence = hashlib.md5(sequence) if sequence in subsequences: subsequences[sequence] += 1 else: subsequences[sequence] = 1 def count_subsequences(sequence,size): limit = len(sequence) - size + 1 for i in range(0,limit): subsequence = sequence[i:i+size] set_subsequence(subsequence) if __name__ == '__main__': sequence = read_file() count_subsequences(sequence,37) for sub, count in subsequences.items(): print("{} => {}".format(sub,count))
StarcoderdataPython
11342326
from setuptools import setup def readme(): with open('README.md') as f: return f.read() setup(name='negentropy', version='0.2', description='C64 disassembler', long_description=readme(), long_description_content_type='text/markdown', classifiers=[ 'Development Status :: 4 - Beta', 'Environment :: Console', 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', 'Programming Language :: Python :: 3 :: Only', 'Topic :: Games/Entertainment', 'Topic :: Software Development :: Disassemblers', 'Topic :: Utilities' ], keywords='C64 disassembler 6502 6510', url='https://github.com/shewitt-au/negentropy', author='<NAME>', author_email='<EMAIL>', license='MIT', packages=['negentropy'], install_requires=['jinja2', 'pillow', 'lark'], entry_points={ 'console_scripts': ['negentropy=negentropy.negentropy:main'], }, include_package_data=True, zip_safe=False)
StarcoderdataPython
12829974
<reponame>matinraayai/ibex<filename>evaluation/classification.py import numpy as np from numba import jit from sklearn.metrics import auc, average_precision_score, precision_recall_curve, roc_curve @jit(nopython=True) def Prob2Pred(probabilities, threshold=0.5): nentries = probabilities.shape[0] predictions = np.zeros(nentries, dtype=np.uint8) for ie in range(nentries): if probabilities[ie] > threshold: predictions[ie] = True else: predictions[ie] = False return predictions def PrecisionAndRecallCurve(ground_truth, probabilities): precisions, recalls, _ = precision_recall_curve(ground_truth, probabilities) return precisions, recalls, average_precision_score(ground_truth, probabilities) def ReceiverOperatingCharacteristicCurve(ground_truth, probabilities): false_positive_rates, true_positive_rates, _ = roc_curve(ground_truth, probabilities) return false_positive_rates, true_positive_rates, auc(false_positive_rates, true_positive_rates) def PrecisionAndRecall(ground_truth, predictions, output_filename=None, binary=True): assert (ground_truth.shape == predictions.shape) # set all of the counters to zero (TP, FP, FN, TN) = (0, 0, 0, 0) # iterate through every entry for ie in range(predictions.size): # get the label and the prediction label = ground_truth[ie] prediction = predictions[ie] # some slots are used as throwaways if binary and not (label == 0 or label == 1): continue # increment the proper variables if label and prediction: TP += 1 elif not label and prediction: FP += 1 elif label and not prediction: FN += 1 else: TN += 1 # format the output string output_string = 'Positive Examples: {}\n'.format(TP + FN) output_string += 'Negative Examples: {}\n\n'.format(FP + TN) output_string += '+--------------+----------------+\n' output_string += '|{:14s}|{:3s}{:13s}|\n'.format('', '', 'Prediction') output_string += '+--------------+----------------+\n' output_string += '|{:14s}| {:7s}{:7s}|\n'.format('', 'Merge', 'Split') output_string += '|{:8s}{:5s} |{:7d}{:7d} |\n'.format('', 'Merge', TP, FN) output_string += '| {:13s}|{:7s}{:7s} |\n'.format('Truth', '', '') output_string += '|{:8s}{:5s} |{:7d}{:7d} |\n'.format('', 'Split', FP, TN) output_string += '+--------------+----------------+\n' if TP + FP == 0: output_string += 'Precision: NaN\n' else: output_string += 'Precision: {}\n'.format(float(TP) / float(TP + FP)) if TP + FN == 0: output_string += 'Recall: NaN\n' else: output_string += 'Recall: {}\n'.format(float(TP) / float(TP + FN)) output_string += 'Accuracy: {}'.format(float(TP + TN) / float(TP + FP + FN + TN)) # output the string to the output file and standard out print(output_string) if output_filename is not None: with open(output_filename, 'w') as fd: fd.write(output_string)
StarcoderdataPython
1677028
import pandas as pd import numpy as np import os import datetime # Helpers # Identify Win/Loss Streaks if any. def get_3game_ws(last_matches): if hasattr(last_matches, "__len__"): return 1 if len(last_matches) > 3 and last_matches[-3:] == 'WWW' else 0 return np.nan def get_5game_ws(last_matches): if hasattr(last_matches, "__len__"): return 1 if last_matches == 'WWWWW' else 0 return np.nan def get_3game_ls(last_matches): if hasattr(last_matches, "__len__"): return 1 if len(last_matches) > 3 and last_matches[-3:] == 'LLL' else 0 return np.nan def get_5game_ls(last_matches): if hasattr(last_matches, "__len__"): return 1 if last_matches == 'LLLLL' else 0 return np.nan def get_5win_rate(last_matches): if hasattr(last_matches, "__len__") and len(last_matches) == 5: win_count = last_matches.count('W') return win_count / len(last_matches) else: return np.nan def get_current_season(): now = datetime.datetime.now() # By July, fixture of the season should be available. new_season_start = datetime.datetime(now.year, 7, 1) return now.year if now > new_season_start else now.year - 1 # Calculate match played, current standing, goal for, goal against, goal difference, winning/losing streaks, etc. # Input is csv that is just cleaned from raw_data data # Output is csv modified with each row added match played, current standing, GF, GA, GD, winning/losing streaks, etc. def add_current_details(from_path, to_path, standings_path, year_available_from): team_detail, match_detail = {}, {} match_detail_columns = [ 'HT_match_played', 'HT_current_standing', 'HT_past_standing', 'HT_past_goal_diff', 'HT_past_win_rate', 'HT_goal_for', 'HT_goal_against', 'HT_goal_diff', 'HT_win_rate_season', 'AT_match_played', 'AT_current_standing', 'AT_past_standing', 'AT_past_goal_diff', 'AT_past_win_rate', 'AT_goal_for', 'AT_goal_against', 'AT_goal_diff', 'AT_win_rate_season', 'HT_last_5', 'HT_last_4', 'HT_last_3', 'HT_last_2', 'HT_last_1', 'AT_last_5', 'AT_last_4', 'AT_last_3', 'AT_last_2', 'AT_last_1' ] for item in match_detail_columns: match_detail[item] = [] df = pd.read_csv(from_path) previous_year = int(from_path[-13:-9]) - 1 standings = dict() # We only have data from 1993 to current. That means We don't have 'previous year' data at 1993. if previous_year > year_available_from: df_standings = pd.read_csv('{}/{}-{}.csv'.format(standings_path, previous_year, previous_year + 1)) for index, row in df_standings.iterrows(): standings[row['Team']] = dict() standings[row['Team']]['Points'] = row['Points'] standings[row['Team']]['Goal_Diff'] = row['Goal_Diff'] standings[row['Team']]['Win_Rate'] = row['Win_Rate'] for index, row in df.iterrows(): home_team = row['HomeTeam'] away_team = row['AwayTeam'] if home_team not in team_detail: team_detail[home_team] = { 'match_played': 0, 'win': 0, 'current_standing': 0, 'past_standing': standings[home_team]['Points'] if home_team in standings else -1, 'past_goal_diff': standings[home_team]['Goal_Diff'] if home_team in standings else -1, 'past_win_rate': standings[home_team]['Win_Rate'] if home_team in standings else 0, 'goal_for': 0, 'goal_against': 0, 'goal_difference': 0, 'last_5_matches': [""] * 5 } if away_team not in team_detail: team_detail[away_team] = { 'match_played': 0, 'win': 0, 'current_standing': 0, 'past_standing': standings[away_team]['Points'] if away_team in standings else -1, 'past_goal_diff': standings[away_team]['Goal_Diff'] if away_team in standings else -1, 'past_win_rate': standings[away_team]['Win_Rate'] if away_team in standings else 0, 'goal_for': 0, 'goal_against': 0, 'goal_difference': 0, 'last_5_matches': [""] * 5 } team_detail_home_team = team_detail[home_team] team_detail_away_team = team_detail[away_team] if len(team_detail_home_team['last_5_matches']) != 5 or len(team_detail_away_team['last_5_matches']) != 5: break match_detail['HT_match_played'].append(team_detail_home_team['match_played']) match_detail['HT_current_standing'].append(team_detail_home_team['current_standing']) match_detail['HT_past_standing'].append(team_detail_home_team['past_standing']) match_detail['HT_past_goal_diff'].append(team_detail_home_team['past_goal_diff']) match_detail['HT_past_win_rate'].append(team_detail_home_team['past_win_rate']) match_detail['HT_goal_for'].append(team_detail_home_team['goal_for']) match_detail['HT_goal_against'].append(team_detail_home_team['goal_against']) match_detail['HT_goal_diff'].append(team_detail_home_team['goal_difference']) match_detail['AT_match_played'].append(team_detail_away_team['match_played']) match_detail['AT_current_standing'].append(team_detail_away_team['current_standing']) match_detail['AT_past_standing'].append(team_detail_away_team['past_standing']) match_detail['AT_past_goal_diff'].append(team_detail_away_team['past_goal_diff']) match_detail['AT_past_win_rate'].append(team_detail_away_team['past_win_rate']) match_detail['AT_goal_for'].append(team_detail_away_team['goal_for']) match_detail['AT_goal_against'].append(team_detail_away_team['goal_against']) match_detail['AT_goal_diff'].append(team_detail_away_team['goal_difference']) match_detail['HT_win_rate_season'].append( team_detail_home_team['win'] / team_detail_home_team['match_played'] if team_detail_home_team['match_played'] > 0 else np.nan) match_detail['AT_win_rate_season'].append( team_detail_away_team['win'] / team_detail_away_team['match_played'] if team_detail_away_team['match_played'] > 0 else np.nan) match_detail['HT_last_5'].append(team_detail_home_team['last_5_matches'][0]) match_detail['AT_last_5'].append(team_detail_away_team['last_5_matches'][0]) match_detail['HT_last_4'].append(team_detail_home_team['last_5_matches'][1]) match_detail['AT_last_4'].append(team_detail_away_team['last_5_matches'][1]) match_detail['HT_last_3'].append(team_detail_home_team['last_5_matches'][2]) match_detail['AT_last_3'].append(team_detail_away_team['last_5_matches'][2]) match_detail['HT_last_2'].append(team_detail_home_team['last_5_matches'][3]) match_detail['AT_last_2'].append(team_detail_away_team['last_5_matches'][3]) match_detail['HT_last_1'].append(team_detail_home_team['last_5_matches'][4]) match_detail['AT_last_1'].append(team_detail_away_team['last_5_matches'][4]) team_detail_home_team['match_played'] += 1 team_detail_away_team['match_played'] += 1 team_detail_home_team['goal_for'] += row['FTHG'] team_detail_away_team['goal_for'] += row['FTAG'] team_detail_home_team['goal_against'] += row['FTAG'] team_detail_away_team['goal_against'] += row['FTHG'] gd = row['FTHG'] - row['FTAG'] team_detail_home_team['goal_difference'] += gd team_detail_away_team['goal_difference'] -= gd team_detail_home_team['last_5_matches'].pop(0) team_detail_away_team['last_5_matches'].pop(0) game_result = row['FTR'] if game_result == 'H': team_detail_home_team['current_standing'] += 3 team_detail_home_team['win'] += 1 team_detail_home_team['last_5_matches'].append('W') team_detail_away_team['last_5_matches'].append('L') elif game_result == 'A': team_detail_away_team['current_standing'] += 3 team_detail_away_team['win'] += 1 team_detail_home_team['last_5_matches'].append('L') team_detail_away_team['last_5_matches'].append('W') elif game_result == 'D': team_detail_home_team['current_standing'] += 1 team_detail_away_team['current_standing'] += 1 team_detail_home_team['last_5_matches'].append('D') team_detail_away_team['last_5_matches'].append('D') columnList = list(df) for key, match_results in match_detail.items(): df[key] = pd.Series(match_results) df = df[columnList + match_detail_columns] df['HT_last_matches'] = df['HT_last_5'] + df['HT_last_4'] + df['HT_last_3'] + df['HT_last_2'] + df['HT_last_1'] df['AT_last_matches'] = df['AT_last_5'] + df['AT_last_4'] + df['AT_last_3'] + df['AT_last_2'] + df['AT_last_1'] df['HT_3_win_streak'] = df['HT_last_matches'].apply(get_3game_ws) df['HT_5_win_streak'] = df['HT_last_matches'].apply(get_5game_ws) df['HT_3_lose_Streak'] = df['HT_last_matches'].apply(get_3game_ls) df['HT_5_lose_Streak'] = df['HT_last_matches'].apply(get_5game_ls) df['AT_3_win_streak'] = df['AT_last_matches'].apply(get_3game_ws) df['AT_5_win_streak'] = df['AT_last_matches'].apply(get_5game_ws) df['AT_3_lose_Streak'] = df['AT_last_matches'].apply(get_3game_ls) df['AT_5_lose_Streak'] = df['AT_last_matches'].apply(get_5game_ls) df['HT_5_win_rate'] = df['HT_last_matches'].apply(get_5win_rate) df['AT_5_win_rate'] = df['AT_last_matches'].apply(get_5win_rate) df['current_standing_diff'] = df['HT_current_standing'] - df['AT_current_standing'] df['past_standing_diff'] = df['HT_past_standing'] - df['AT_past_standing'] df['past_goal_diff_diff'] = df['HT_past_goal_diff'] - df['AT_past_goal_diff'] df['past_win_rate_diff'] = df['HT_past_win_rate'] - df['AT_past_win_rate'] df['past_standing_diff'] = df['HT_past_standing'] - df['AT_past_standing'] df['win_rate_season_diff'] = df['HT_win_rate_season'] - df['AT_win_rate_season'] df['goal_diff_diff'] = df['HT_goal_diff'] - df['AT_goal_diff'] dropLabels = ['HT_last_' + str(x + 1) for x in range(5)] + ['AT_last_' + str(x + 1) for x in range(5)] dropLabels += ['HT_last_matches', 'AT_last_matches'] df = df.drop(columns=dropLabels) df.to_csv(to_path, index=False) def add_current_details_all(from_folder_path, to_folder_path, standings_path, from_year, to_year, year_available_from): for year in range(from_year, to_year + 1): file = '{}-{}.csv'.format(year, year + 1) from_path = os.path.join(from_folder_path, file) to_path = os.path.join(to_folder_path, file) print("About to add 'current details' from {} to {}...".format(from_path, to_path)) add_current_details(from_path, to_path, standings_path, year_available_from)
StarcoderdataPython
3410090
""" Initialize the TCKDB backend app tests models module """
StarcoderdataPython
11203065
import _plotly_utils.basevalidators class HoveronValidator(_plotly_utils.basevalidators.FlaglistValidator): def __init__(self, plotly_name='hoveron', parent_name='violin', **kwargs): super(HoveronValidator, self).__init__( plotly_name=plotly_name, parent_name=parent_name, edit_type='style', extras=['all'], flags=['violins', 'points', 'kde'], role='info', **kwargs )
StarcoderdataPython
3491499
<reponame>deeplow/nose2 import sys # This unused import is not very elegant, but it allows eggdiscovery to be found in # Travis (or when run with PYTHONPATH=.) import nose2.plugins.loader.eggdiscovery # noqa: F401 from nose2.tests._common import FunctionalTestCase, support_file try: import pkg_resources except ImportError: pkg_resources = None else: class EggDiscoveryFunctionalTest(FunctionalTestCase): def setUp(self): for m in [m for m in sys.modules if m.startswith("pkgegg")]: del sys.modules[m] self.egg_path = support_file( "scenario/tests_in_zipped_eggs/pkgegg-0.0.0-py2.7.egg" ) sys.path.append(self.egg_path) def tearDown(self): if self.egg_path in sys.path: sys.path.remove(self.egg_path) for m in [m for m in sys.modules if m.startswith("pkgegg")]: del sys.modules[m] def test_non_egg_discoverer_does_not_fail_when_looking_in_egg(self): proc = self.runIn("scenario/tests_in_zipped_eggs", "-v", "pkgegg") self.assertTestRunOutputMatches(proc, stderr="Ran 0 tests in") def test_can_discover_test_modules_in_zipped_eggs(self): proc = self.runIn( "scenario/tests_in_zipped_eggs", "-v", "--plugin=nose2.plugins.loader.eggdiscovery", "pkgegg", ) self.assertTestRunOutputMatches( proc, stderr=r"FAILED \(failures=5, errors=1, skipped=1\)" ) def test_eggdiscovery_failure_does_not_exist(self): proc = self.runIn( "scenario", "-v", "--plugin=nose2.plugins.loader.eggdiscovery", "--exclude-plugin=nose2.plugins.loader.discovery", "-s", "tests_in_zipped_eggs_BAD", ) self.assertTestRunOutputMatches( proc, stderr="tests_in_zipped_eggs_BAD does not exist" ) class UnzippedEggDiscoveryFunctionalTest(FunctionalTestCase): def setUp(self): for m in [m for m in sys.modules if m.startswith("pkgegg")]: del sys.modules[m] self.egg_path = support_file( "scenario/tests_in_unzipped_eggs/pkgunegg-0.0.0-py2.7.egg" ) sys.path.append(self.egg_path) def tearDown(self): if self.egg_path in sys.path: sys.path.remove(self.egg_path) for m in [m for m in sys.modules if m.startswith("pkgunegg")]: del sys.modules[m] def test_eggdiscovery_ignores_unzipped_eggs(self): proc = self.runIn( "scenario/tests_in_unzipped_eggs", "-v", "--plugin=nose2.plugins.loader.eggdiscovery", "pkgunegg", ) self.assertTestRunOutputMatches( proc, stderr=r"FAILED \(failures=5, errors=1, skipped=1\)" )
StarcoderdataPython
11356129
# coding:utf-8 """Application to print some info about model """ # MIT License # Copyright (c) 2020 sMedX import click from pathlib import Path import tensorflow.compat.v1 as tf from facenet import tfutils, config, nodes @click.command() @click.option('--config', default=config.default_model_path, type=Path, help='Path to directory with model.') def main(**options): cfg = config.load_config(__file__, options) cfg.model.path = Path(cfg.model.path).expanduser() input_node_name = nodes['input']['name'] + ':0' output_node_name = nodes['output']['name'] + ':0' with tf.Graph().as_default(): with tf.Session() as sess: fvars = cfg.model.path / 'variables.txt' tfutils.load_model(cfg.model.path) graph = tf.get_default_graph() print() print('length of list of graph operations', len(graph.get_operations())) print('length of list of global variables', len(tf.global_variables())) image_placeholder = graph.get_tensor_by_name(input_node_name) print('image :', image_placeholder) embedding = graph.get_tensor_by_name(output_node_name) print('output:', embedding) phase_train_placeholder = graph.get_tensor_by_name('phase_train:0') print('output:', phase_train_placeholder) print(f'output list of trainable variables {fvars}') with fvars.open('w') as f: f.write('-----------------------------\n') f.write(f'number of trainable variables {len(tf.trainable_variables())}\n') f.write('-----------------------------\n') for i, var in enumerate(tf.trainable_variables()): f.write(f'{i}) {var}\n') with tf.Graph().as_default(): with tf.Session() as sess: fops = cfg.model.path / 'operations.txt' tfutils.load_frozen_graph(cfg.model.path) graph = tf.get_default_graph() print(f'output list of operations from frozen graph {fops}') with fops.open('w') as f: for i, op in enumerate(graph.get_operations()): f.write(f'{i}) {op.name} {op.type}\n') f.write(f'--- inputs [{len(op.inputs)}] {op.inputs}\n') for input_tensor in op.inputs: f.write(f' {input_tensor}\n') f.write(f'--- outputs [{len(op.outputs)}] {op.outputs[0]}\n') for output in op.outputs[1:]: f.write(f' {output}\n') f.write(f'--- values {op.values}\n') if __name__ == '__main__': main()
StarcoderdataPython
106764
"""Version and details for pcraft""" __description__ = "Pcraft" __url__ = "https://www.github.com/devoinc/pcraft" __version__ = "0.1.4" __author__ = "<NAME>" __author_email__ = "<EMAIL>" __license__ = "MIT" __maintainer__ = __author__ __maintainer_email__ = __author_email__
StarcoderdataPython
9735649
# discretization.py # # This file is part of scqubits: a Python package for superconducting qubits, # arXiv:2107.08552 (2021). https://arxiv.org/abs/2107.08552 # # Copyright (c) 2019 and later, <NAME> and <NAME> # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. ############################################################################ from typing import Any, Dict, List, Tuple, Union import numpy as np from numpy import ndarray from scipy import sparse from scipy.sparse.dia import dia_matrix import scqubits.core.central_dispatch as dispatch import scqubits.core.descriptors as descriptors import scqubits.io_utils.fileio_serializers as serializers import scqubits.settings as settings import scqubits.utils.misc as utils FIRST_STENCIL_COEFFS: Dict[int, List[float]] = { 3: [-1 / 2, 0.0, 1 / 2], 5: [1 / 12, -2 / 3, 0.0, 2 / 3, -1 / 12], 7: [-1 / 60, 3 / 20, -3 / 4, 0.0, 3 / 4, -3 / 20, 1 / 60], 9: [1 / 280, -4 / 105, 1 / 5, -4 / 5, 0.0, 4 / 5, -1 / 5, 4 / 105, -1 / 280], } SECOND_STENCIL_COEFFS: Dict[int, List[float]] = { 3: [1, -2, 1], 5: [-1 / 12, 4 / 3, -5 / 2, 4 / 3, -1 / 12], 7: [1 / 90, -3 / 20, 3 / 2, -49 / 18, 3 / 2, -3 / 20, 1 / 90], 9: [-1 / 560, 8 / 315, -1 / 5, 8 / 5, -205 / 72, 8 / 5, -1 / 5, 8 / 315, -1 / 560], } def band_matrix( band_coeffs: Union[List[float], List[complex], ndarray], band_offsets: Union[List[int], ndarray], dim: int, dtype: Any = None, has_corners: bool = False, ) -> dia_matrix: """ Returns a dim x dim sparse matrix with constant diagonals of values `band_coeffs[ 0]`, `band_coeffs[1]`, ... along the (off-)diagonals specified by the offsets `band_offsets[0]`, `band_offsets[1]`, ... The `has_corners` option allows generation of band matrices with corner elements, in which lower off-diagonals wrap into the top right corner and upper off-diagonals wrap into the bottom left corner. Parameters ---------- band_coeffs: each element of band_coeffs is a number to be assigned as a constant to the (off-)diagonals band_offsets: offsets specifying the positions of the (off-)diagonals dim: dimension of the matrix dim: (linear) dimension of the matrix dtype: if not specified, dtype is inferred from the dtype of `band_vecs` has_corners: if set to True, the off diagonals are wrapped into the opposing corners of the matrix """ ones_vector = np.ones(dim) vectors = [ones_vector * number for number in band_coeffs] matrix = sparse.dia_matrix((vectors, band_offsets), shape=(dim, dim), dtype=dtype) if not has_corners: return matrix for index, offset in enumerate(band_offsets): if offset < 0: corner_offset = dim + offset corner_band = vectors[index] corner_band = corner_band[offset:] elif offset > 0: corner_offset = -dim + offset corner_band = vectors[index][:-offset] corner_band = corner_band[-offset:] else: # when offset == 0 continue matrix.setdiag(corner_band, k=corner_offset) return matrix class Grid1d(dispatch.DispatchClient, serializers.Serializable): """Data structure and methods for setting up discretized 1d coordinate grid, generating corresponding derivative matrices. Parameters ---------- min_val: minimum value of the discretized variable max_val: maximum value of the discretized variable pt_count: number of grid points """ min_val = descriptors.WatchedProperty("GRID_UPDATE") max_val = descriptors.WatchedProperty("GRID_UPDATE") pt_count = descriptors.WatchedProperty("GRID_UPDATE") def __init__(self, min_val: float, max_val: float, pt_count: int) -> None: self.min_val = min_val self.max_val = max_val self.pt_count = pt_count def __repr__(self) -> str: init_dict = self.get_initdata() return type(self).__name__ + f"({init_dict!r})" def __str__(self) -> str: output = "Grid1d -----[ " for param_name, param_val in sorted( utils.drop_private_keys(self.__dict__).items() ): output += str(param_name) + ": " + str(param_val) + ", " output = output[:-3] + " ]" return output def __eq__(self, other: Any) -> bool: if not isinstance(other, type(self)): return False return self.__dict__ == other.__dict__ def __hash__(self): return super().__hash__() def get_initdata(self) -> Dict[str, Any]: """Returns dict appropriate for creating/initializing a new Grid1d object. Returns ------- dict """ return self.__dict__ def grid_spacing(self) -> float: """ Returns ------- spacing between neighboring grid points """ return (self.max_val - self.min_val) / (self.pt_count - 1) def make_linspace(self) -> ndarray: """Returns a numpy array of the grid points Returns ------- ndarray """ return np.linspace(self.min_val, self.max_val, self.pt_count) def first_derivative_matrix( self, prefactor: Union[float, complex] = 1.0, periodic: bool = False ) -> dia_matrix: """Generate sparse matrix for first derivative of the form :math:`\\partial_{x_i}`. Uses STENCIL setting to construct the matrix with a multi-point stencil. Parameters ---------- prefactor: prefactor of the derivative matrix (default value: 1.0) periodic: set to True if variable is a periodic variable Returns ------- sparse matrix in `dia` format """ if isinstance(prefactor, complex): dtp = np.complex_ else: dtp = np.float_ delta_x = self.grid_spacing() matrix_diagonals = [ coefficient * prefactor / delta_x for coefficient in FIRST_STENCIL_COEFFS[settings.STENCIL] ] offset = [i - (settings.STENCIL - 1) // 2 for i in range(settings.STENCIL)] derivative_matrix = band_matrix( matrix_diagonals, offset, self.pt_count, dtype=dtp, has_corners=periodic ) return derivative_matrix def second_derivative_matrix( self, prefactor: Union[float, complex] = 1.0, periodic: bool = False ) -> dia_matrix: """Generate sparse matrix for second derivative of the form :math:`\\partial^2_{x_i}`. Uses STENCIL setting to construct the matrix with a multi-point stencil. Parameters ---------- prefactor: optional prefactor of the derivative matrix (default value = 1.0) periodic: set to True if variable is a periodic variable (default value = False) Returns ------- sparse matrix in `dia` format """ if isinstance(prefactor, complex): dtp = np.complex_ else: dtp = np.float_ delta_x = self.grid_spacing() matrix_diagonals = [ coefficient * prefactor / delta_x ** 2 for coefficient in SECOND_STENCIL_COEFFS[settings.STENCIL] ] offset = [i - (settings.STENCIL - 1) // 2 for i in range(settings.STENCIL)] derivative_matrix = band_matrix( matrix_diagonals, offset, self.pt_count, dtype=dtp, has_corners=periodic ) return derivative_matrix class GridSpec(dispatch.DispatchClient, serializers.Serializable): """Class for specifying a general discretized coordinate grid (arbitrary dimensions). Parameters ---------- minmaxpts_array: array of with entries [minvalue, maxvalue, number of points] """ min_vals = descriptors.WatchedProperty("GRID_UPDATE") max_vals = descriptors.WatchedProperty("GRID_UPDATE") var_count = descriptors.WatchedProperty("GRID_UPDATE") pt_counts = descriptors.WatchedProperty("GRID_UPDATE") def __init__(self, minmaxpts_array: ndarray) -> None: self.min_vals = minmaxpts_array[:, 0] self.max_vals = minmaxpts_array[:, 1] self.var_count = len(self.min_vals) self.pt_counts = minmaxpts_array[:, 2].astype(int) # used as int indices def __str__(self) -> str: output = " GridSpec ......" for param_name, param_val in sorted(self.__dict__.items()): output += "\n" + str(param_name) + "\t: " + str(param_val) return output def unwrap(self) -> Tuple[ndarray, ndarray, Union[List[int], ndarray], int]: """Auxiliary routine that yields a tuple of the parameters specifying the grid.""" return self.min_vals, self.max_vals, self.pt_counts, self.var_count
StarcoderdataPython
6597553
<filename>tests/conftest.py # -*- coding: utf-8 -*- # -------------------------------------------------------- # Licensed under the terms of the BSD 3-Clause License # (see LICENSE for details). # Copyright © 2018-2021, <NAME> # All rights reserved. # -------------------------------------------------------- import json from collections import namedtuple from pathlib import Path import pytest from smartpasslib.factories import GeneratorsFactory from smartpasslib.generators import UrandomGen, BasePassGen, HashGen, KeyGen, SmartPassGen, NormalPassGen from smartpasslib.manager import SmartPassword, SmartPassMan @pytest.fixture(name='context') def context(): key = ('login', 'secret', 'length', 'pub_key', 'normal_pass', 'smart_pass', 'file') Info = namedtuple('Info', key) login = 'login' secret = 'secret' length = 15 pub_key = '15795be051670afec910bc980189a6011f9f184dea4bbbe4e005e4ca89f3' \ '18bea963b1a362167b4de909a4f57e1895298f79346068487881c8c969dce4fe909f' normal_pass = '<PASSWORD>' smart_pass = '<PASSWORD>' file = Path(Path.home()).joinpath('.cases.json') kwargs = dict( login=login, secret=secret, length=length, pub_key=pub_key, normal_pass=normal_pass, smart_pass=smart_pass, file=file, ) return Info(**kwargs) @pytest.fixture(name='gen_factory') def gen_factories(): return GeneratorsFactory() @pytest.fixture(name='urandom') def urandom(): return UrandomGen() @pytest.fixture(name='default') def default(): return BasePassGen() @pytest.fixture(name='hash_gen') def hash_gen(): return HashGen() @pytest.fixture(name='key_gen') def key_gen(): return KeyGen() @pytest.fixture(name='smart') def smart(): return SmartPassGen() @pytest.fixture(name='normal') def normal(): return NormalPassGen() @pytest.fixture(name='smart_password') def smart_password(context): return SmartPassword(login=context.login, key=context.pub_key, length=context.length) @pytest.fixture(name='pass_man') def pass_man(): return SmartPassMan() @pytest.fixture(name='data') def get_data(context): return { context.login: { 'login': context.login, 'key': context.pub_key, 'length': context.length } } @pytest.fixture(name='file') def get_file(tmpdir, data): a_file = tmpdir.join('new_file') with open(a_file, 'w') as f: json.dump(data, f) yield a_file @pytest.fixture(name='bad_data') def get_bad_data(context): return { context.login: { 'login': context.login, 'key': context.pub_key, 'length': context.length }, } @pytest.fixture(name='bad_file') def get_bad_file(file, bad_data): file.write(bad_data) yield file @pytest.fixture(name='smart_password2') def smart_password2(context): return SmartPassword(login='login2', key=context.pub_key, length=context.length)
StarcoderdataPython
6684059
from pw2qmcpack import Pw2qmcpack,Pw2qmcpackInput,Pw2qmcpackAnalyzer from wfconvert import Wfconvert,WfconvertInput,WfconvertAnalyzer
StarcoderdataPython
304134
#Ask the user for a string and print out whether this string is a palindrome or not. # (A palindrome is a string that reads the same forwards and backwards.) str = input("Let's have a string, shall we?") palindrome = str[::-1]==str if palindrome: print(str,"is a palindrome") else: print(str, "is not a palindrome")
StarcoderdataPython
4919931
#!/usr/bin/env python # -*- coding: utf-8 -*- from unittest import TestCase import mock import json import urllib2 from sqlalchemy import create_engine from sqlalchemy.orm import sessionmaker from libdict.macmillan import models, query_site words = [ 'take-on', # multiple keys in last sense 'yours', # intro paragraph 'yours truly', # informal phrase 'take off', # informal 'air', # plural, singular, nested senses 'reference', # american nested, [only before noun] nested, cntable, uncntable, # formal phrase 'my', # intro_paragraph 'then', # multiple phrases one sense 'since when', # phrase finding in entry for 'since' # some crazy h2.VARIANT 'http://www.macmillandictionary.com/dictionary/british/yours_2', 'make a difference', # missing div.THES element ] class AttrDict(dict): def __getattr__(self, attr): return self[attr] def pprint(self): print(json.dumps(self, sort_keys=True, indent=2)) ''' Entry: original_key - 1 len(senses) - 1 style_level - 1 intro_paragraph - 1 pron - 2 part_of_speech - 2 links - 0 url - 2 Sense: original_key - 2 definition - 1 len(examples) - 1 style_level - 3 subject_area - 1 syntax_coding - 4 Example: original_key - 1 content - 2 Link: key - 1 part_of_speech - 1 TODO ''' class BaseTests(object): """Subclasses must implement result_hook.""" def test_phrase_take_on(self): res = self.result_hook(query_site("take on")) # res.pprint() senses = res.senses # Entry.original_key self.assertEqual(res.original_key, 'take on') # len(Entry.senses) self.assertEqual(len(senses), 5) # Sense.definition self.assertEqual(senses[0].definition, 'to start to employ someone') # Sense.original_key self.assertEqual(senses[4].original_key, 'take on | take upon') self.assertEqual(senses[1].original_key, 'take on something') # len(Sense.examples) self.assertEqual(len(senses[1].examples), 2) # Example.content self.assertEqual(senses[1].examples[0].content, 'Our website is taking on a new look.') self.assertEqual(senses[4].examples[0].content, 'My mother took it on herself to invite them.') # Example.original_key self.assertEqual(senses[4].examples[0].original_key, 'take it on/upon yourself (to do something)') def test_phrase_yours_truly(self): res = self.result_hook(query_site("yours truly")) # Entry.style_level self.assertEqual(res.style_level, 'informal') def test_phrase_yours(self): res = self.result_hook(query_site("yours")) # Entry.intro_paragraph self.assertTrue('Her eyes are darker than yours are.' in res.intro_paragraph) s = 'It can refer to a singular or plural noun, and it can be the subject,' \ ' object, or complement of a verb or the object of a preposition' self.assertTrue(s in res.intro_paragraph) # Entry.pron self.assertEqual(res.pron, u'/jɔː(r)z/') # Entry.part_of_speech self.assertEqual(res.part_of_speech, 'pronoun') # Entry.url self.assertEqual(res.url, 'http://www.macmillandictionary.com/dictionary/british/yours_1') def test_phrase_take_off(self): res = self.result_hook(query_site("take off")) senses = res.senses # Sense.style_level self.assertEqual(senses[4].style_level, 'informal') self.assertEqual(senses[5].style_level, 'informal') # Entry.url self.assertEqual(res.url, 'http://www.macmillandictionary.com/dictionary/british/take-off_1') def test_phrase_air(self): res = self.result_hook(query_site("air")) senses = res.senses # res.pprint() # Sense.style_level self.assertEqual(senses[3].style_level, 'old-fashioned') # Sense.subject_area self.assertEqual(senses[3].subject_area, 'music') # Sense.syntax_coding self.assertEqual(senses[3].syntax_coding, '[countable]') self.assertEqual(senses[2].syntax_coding, '[plural]') self.assertEqual(senses[1].syntax_coding, '[singular]') self.assertEqual(senses[0].syntax_coding, '[uncountable]') # Entry.pron self.assertEqual(res.pron, u'/eə(r)/') # Entry.part_of_speech self.assertEqual(res.part_of_speech, 'noun') def test_look_up(self): res = self.result_hook(query_site("look up")) # Link.key self.assertEqual(res.links[0].key, u'look up to') # Link.part_of_speech self.assertEqual(res.links[0].part_of_speech, u'phrasal verb') @mock.patch("libdict.macmillan.Entry.model_class", new=AttrDict) @mock.patch("libdict.macmillan.SubSense.model_class", new=AttrDict) @mock.patch("libdict.macmillan.Sense.model_class", new=AttrDict) @mock.patch("libdict.macmillan.Example.model_class", new=AttrDict) @mock.patch("libdict.macmillan.RelatedLink.model_class", new=AttrDict) @mock.patch("libdict.macmillan.PhraseLink.model_class", new=AttrDict) class MacmillanScrapeTests(TestCase): # class MacmillanScrapeTests(TestCase, BaseTests): def result_hook(self, res): return res class MacmillanDBTests(TestCase, BaseTests): @classmethod def setUpClass(cls): engine = create_engine('sqlite:///:memory:', echo=False) models.Base.metadata.create_all(engine) cls.engine = engine Session = sessionmaker(bind=engine) cls.session = Session() @classmethod def tearDownClass(cls): cls.session.commit() def result_hook(self, res): self.session.add(res) self.session.commit() id_ = res.id res = None return self.session.query(models.Entry).filter_by(id=id_).first()
StarcoderdataPython
3418744
<gh_stars>0 import drawBot as db import vanilla from vanilla.dialogs import putFile from random import random class MyInterface: def __init__(self): self.pdf = None self.w = vanilla.Window((600, 900), "My Interface") self.w.drawButton = vanilla.Button((10, 10, 100, 25), "Draw!", callback=self.draw) self.w.saveButton = vanilla.Button((120, 10, 100, 25), "Save!", callback=self.save) self.w.drawView = db.ui.drawView.DrawView((10, 50, -10, -10)) self.w.open() def draw(self, sender): db.newDrawing() db.newPage(300, 300) fill(random(), random(), random()) db.rect(0, 0, 50, 50) self.pdf = db.pdfImage() self.w.drawView.setPDFDocument(self.pdf) def save(self, sender): if self.pdf: fileSavePath = putFile(title="Save Image", fileName="Image.pdf") if fileSavePath: db.saveImage(fileSavePath) MyInterface()
StarcoderdataPython
4817087
<filename>tabcmd/commands/group/delete_group_command.py import tableauserverclient as TSC from tabcmd.commands.auth.session import Session from tabcmd.commands.constants import Errors from tabcmd.commands.server import Server from tabcmd.execution.localize import _ from tabcmd.execution.logger_config import log class DeleteGroupCommand(Server): """ This command deletes the specified group from the server """ name: str = "deletegroup" description: str = _("deletegroup.short_description") @staticmethod def define_args(delete_group_parser): delete_group_parser.add_argument("name") @staticmethod def run_command(args): logger = log(__class__.__name__, args.logging_level) logger.debug(_("tabcmd.launching")) session = Session() server = session.create_session(args) try: logger.info(_("tabcmd.find.group").format(args.name)) group_id = Server.find_group_id(logger, server, args.name) logger.info(_("deletegroup.status").format(group_id)) server.groups.delete(group_id) logger.info(_("tabcmd.result.succeeded")) except TSC.ServerResponseError as e: Errors.exit_with_error(logger, "tabcmd.result.failed.delete.group", e)
StarcoderdataPython
12810742
#!/usr/bin/env python3 # -*- coding: utf-8 -*- class TreeNode: def __init__(self, x): self.val = x self.left = None self.right = None class Solution: ''' 遍历 ''' def isSymmetric(self, root): if root == None: return True deQueue = [root.left, root.right] while len(deQueue): leftNode = deQueue[0] del deQueue[0] rightNode = deQueue.pop() if leftNode and rightNode: if leftNode.val != rightNode.val: return False else: deQueue.insert(0, leftNode.right) deQueue.insert(0, leftNode.left) deQueue.append(rightNode.left) deQueue.append(rightNode.right) elif leftNode or rightNode: return False return True if __name__ == "__main__": s = Solution() a1 = TreeNode(1) a2 = TreeNode(2) a3 = TreeNode(2) a4 = TreeNode(3) a5 = TreeNode(4) a6 = TreeNode(4) a7 = TreeNode(3) a1.left = a2 a1.right = a3 a2.left = a4 a2.right = a5 a3.left = a6 a3.right = a7 print(s.isSymmetric(a1))
StarcoderdataPython
6531637
spam = ['hey','howdy','hiya','hello'] print(spam) print(spam.index('hiya')) spam.append('hi') spam.insert(0,'privet') print(spam) spam.remove('hello') spam.sort() print(spam) spam2 = spam.copy() spam.sort(reverse=True,key=str.lower) print(spam) spam2 = spam.copy() spam2[2] = 'test' print(spam) print(spam2)
StarcoderdataPython
11378359
# coding: utf-8 # # Copyright 2022 :Barry-Thomas-Paul: Moss # # Licensed under the Apache License, Version 2.0 (the "License") # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http: // www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from ...dyn.chart2.axis import Axis as Axis from ...dyn.chart2.axis_orientation import AxisOrientation as AxisOrientation from ...dyn.chart2.axis_type import AxisType as AxisType from ...dyn.chart2.axis_type import AxisTypeEnum as AxisTypeEnum from ...dyn.chart2.candle_stick_chart_type import CandleStickChartType as CandleStickChartType from ...dyn.chart2.cartesian_coordinate_system2d import CartesianCoordinateSystem2d as CartesianCoordinateSystem2d from ...dyn.chart2.cartesian_coordinate_system3d import CartesianCoordinateSystem3d as CartesianCoordinateSystem3d from ...dyn.chart2.chart_document import ChartDocument as ChartDocument from ...dyn.chart2.chart_document_wrapper import ChartDocumentWrapper as ChartDocumentWrapper from ...dyn.chart2.chart_type import ChartType as ChartType from ...dyn.chart2.chart_type_manager import ChartTypeManager as ChartTypeManager from ...dyn.chart2.chart_type_template import ChartTypeTemplate as ChartTypeTemplate from ...dyn.chart2.coordinate_system import CoordinateSystem as CoordinateSystem from ...dyn.chart2.coordinate_system_type import CoordinateSystemType as CoordinateSystemType from ...dyn.chart2.coordinate_system_type_id import CoordinateSystemTypeID as CoordinateSystemTypeID from ...dyn.chart2.curve_style import CurveStyle as CurveStyle from ...dyn.chart2.data_point import DataPoint as DataPoint from ...dyn.chart2.data_point_custom_label_field import DataPointCustomLabelField as DataPointCustomLabelField from ...dyn.chart2.data_point_custom_label_field_type import DataPointCustomLabelFieldType as DataPointCustomLabelFieldType from ...dyn.chart2.data_point_geometry3_d import DataPointGeometry3D as DataPointGeometry3D from ...dyn.chart2.data_point_geometry3_d import DataPointGeometry3DEnum as DataPointGeometry3DEnum from ...dyn.chart2.data_point_label import DataPointLabel as DataPointLabel from ...dyn.chart2.data_point_properties import DataPointProperties as DataPointProperties from ...dyn.chart2.data_series import DataSeries as DataSeries from ...dyn.chart2.diagram import Diagram as Diagram from ...dyn.chart2.error_bar import ErrorBar as ErrorBar from ...dyn.chart2.exponential_regression_curve import ExponentialRegressionCurve as ExponentialRegressionCurve from ...dyn.chart2.exponential_scaling import ExponentialScaling as ExponentialScaling from ...dyn.chart2.fill_bitmap import FillBitmap as FillBitmap from ...dyn.chart2.formatted_string import FormattedString as FormattedString from ...dyn.chart2.grid_properties import GridProperties as GridProperties from ...dyn.chart2.increment_data import IncrementData as IncrementData from ...dyn.chart2.interpreted_data import InterpretedData as InterpretedData from ...dyn.chart2.legend import Legend as Legend from ...dyn.chart2.legend_position import LegendPosition as LegendPosition from ...dyn.chart2.light_source import LightSource as LightSource from ...dyn.chart2.linear_regression_curve import LinearRegressionCurve as LinearRegressionCurve from ...dyn.chart2.linear_scaling import LinearScaling as LinearScaling from ...dyn.chart2.logarithmic_regression_curve import LogarithmicRegressionCurve as LogarithmicRegressionCurve from ...dyn.chart2.logarithmic_scaling import LogarithmicScaling as LogarithmicScaling from ...dyn.chart2.logic_target_model import LogicTargetModel as LogicTargetModel from ...dyn.chart2.moving_average_regression_curve import MovingAverageRegressionCurve as MovingAverageRegressionCurve from ...dyn.chart2.moving_average_type import MovingAverageType as MovingAverageType from ...dyn.chart2.moving_average_type import MovingAverageTypeEnum as MovingAverageTypeEnum from ...dyn.chart2.pie_chart_offset_mode import PieChartOffsetMode as PieChartOffsetMode from ...dyn.chart2.polar_coordinate_system2d import PolarCoordinateSystem2d as PolarCoordinateSystem2d from ...dyn.chart2.polar_coordinate_system3d import PolarCoordinateSystem3d as PolarCoordinateSystem3d from ...dyn.chart2.polynomial_regression_curve import PolynomialRegressionCurve as PolynomialRegressionCurve from ...dyn.chart2.potential_regression_curve import PotentialRegressionCurve as PotentialRegressionCurve from ...dyn.chart2.power_scaling import PowerScaling as PowerScaling from ...dyn.chart2.property_pool import PropertyPool as PropertyPool from ...dyn.chart2.regression_curve import RegressionCurve as RegressionCurve from ...dyn.chart2.regression_curve_equation import RegressionCurveEquation as RegressionCurveEquation from ...dyn.chart2.regression_equation import RegressionEquation as RegressionEquation from ...dyn.chart2.relative_position import RelativePosition as RelativePosition from ...dyn.chart2.relative_size import RelativeSize as RelativeSize from ...dyn.chart2.scale_data import ScaleData as ScaleData from ...dyn.chart2.scaling import Scaling as Scaling from ...dyn.chart2.stacking_direction import StackingDirection as StackingDirection from ...dyn.chart2.standard_diagram_creation_parameters import StandardDiagramCreationParameters as StandardDiagramCreationParameters from ...dyn.chart2.sub_increment import SubIncrement as SubIncrement from ...dyn.chart2.symbol import Symbol as Symbol from ...dyn.chart2.symbol_style import SymbolStyle as SymbolStyle from ...dyn.chart2.tickmark_style import TickmarkStyle as TickmarkStyle from ...dyn.chart2.tickmark_style import TickmarkStyleEnum as TickmarkStyleEnum from ...dyn.chart2.title import Title as Title from ...dyn.chart2.transparency_style import TransparencyStyle as TransparencyStyle from ...dyn.chart2.x_any_description_access import XAnyDescriptionAccess as XAnyDescriptionAccess from ...dyn.chart2.x_axis import XAxis as XAxis from ...dyn.chart2.x_chart_document import XChartDocument as XChartDocument from ...dyn.chart2.x_chart_shape import XChartShape as XChartShape from ...dyn.chart2.x_chart_shape_container import XChartShapeContainer as XChartShapeContainer from ...dyn.chart2.x_chart_type import XChartType as XChartType from ...dyn.chart2.x_chart_type_container import XChartTypeContainer as XChartTypeContainer from ...dyn.chart2.x_chart_type_manager import XChartTypeManager as XChartTypeManager from ...dyn.chart2.x_chart_type_template import XChartTypeTemplate as XChartTypeTemplate from ...dyn.chart2.x_color_scheme import XColorScheme as XColorScheme from ...dyn.chart2.x_coordinate_system import XCoordinateSystem as XCoordinateSystem from ...dyn.chart2.x_coordinate_system_container import XCoordinateSystemContainer as XCoordinateSystemContainer from ...dyn.chart2.x_data_interpreter import XDataInterpreter as XDataInterpreter from ...dyn.chart2.x_data_point_custom_label_field import XDataPointCustomLabelField as XDataPointCustomLabelField from ...dyn.chart2.x_data_provider_access import XDataProviderAccess as XDataProviderAccess from ...dyn.chart2.x_data_series import XDataSeries as XDataSeries from ...dyn.chart2.x_data_series_container import XDataSeriesContainer as XDataSeriesContainer from ...dyn.chart2.x_default_size_transmitter import XDefaultSizeTransmitter as XDefaultSizeTransmitter from ...dyn.chart2.x_diagram import XDiagram as XDiagram from ...dyn.chart2.x_diagram_provider import XDiagramProvider as XDiagramProvider from ...dyn.chart2.x_formatted_string import XFormattedString as XFormattedString from ...dyn.chart2.x_formatted_string2 import XFormattedString2 as XFormattedString2 from ...dyn.chart2.x_internal_data_provider import XInternalDataProvider as XInternalDataProvider from ...dyn.chart2.x_labeled import XLabeled as XLabeled from ...dyn.chart2.x_legend import XLegend as XLegend from ...dyn.chart2.x_regression_curve import XRegressionCurve as XRegressionCurve from ...dyn.chart2.x_regression_curve_calculator import XRegressionCurveCalculator as XRegressionCurveCalculator from ...dyn.chart2.x_regression_curve_container import XRegressionCurveContainer as XRegressionCurveContainer from ...dyn.chart2.x_scaling import XScaling as XScaling from ...dyn.chart2.x_target import XTarget as XTarget from ...dyn.chart2.x_time_based import XTimeBased as XTimeBased from ...dyn.chart2.x_title import XTitle as XTitle from ...dyn.chart2.x_titled import XTitled as XTitled from ...dyn.chart2.x_transformation import XTransformation as XTransformation
StarcoderdataPython
11209684
from lib import action class NomadGetPoliciesAction(action.NomadBaseAction): def run(self): return self.nomad.acl.get_policies()
StarcoderdataPython
3480697
<gh_stars>1-10 """Tests for fast5seek package.""" import unittest import logging import io from contextlib import redirect_stdout from fast5seek import fast5seek logging.disable(logging.CRITICAL) class TestBamReadIdExtraction(unittest.TestCase): """Test the read id extracxtion functions for bam and sam""" def test_BamReadIdExtractionTBMapped_SixReadIds(self): """Test read id extracxtion from bam with 6 mapped reads""" mapped = True bam = 'tests/data/bam/tb.bam' result = fast5seek.get_sam_read_ids(bam, mapped) expected = { '57d4cd63-3189-4006-93ec-bf3c8bfb2ce1', 'bbd563e9-1bf8-4268-92d5-45ccb8e3da72', '8aecf428-af00-4791-b065-5d4abd798a29', 'd707ff64-6ade-477a-8b68-0b3c394ef3b1', 'c967d421-3da4-4e11-accf-0d3fb0155840', '28acaa47-1cec-4a91-9abf-c780d27e6cc4' } self.assertSetEqual(result, expected) def test_BamReadIdExtractionTBUnmapped_EightReadIds(self): """Test read id extracxtion from bam with 6 mapped reads""" mapped = False bam = 'tests/data/bam/tb.bam' result = fast5seek.get_sam_read_ids(bam, mapped) expected = { '57d4cd63-3189-4006-93ec-bf3c8bfb2ce1', 'bbd563e9-1bf8-4268-92d5-45ccb8e3da72', '8aecf428-af00-4791-b065-5d4abd798a29', 'd707ff64-6ade-477a-8b68-0b3c394ef3b1', 'c967d421-3da4-4e11-accf-0d3fb0155840', '28acaa47-1cec-4a91-9abf-c780d27e6cc4', '6cf511b6-1724-46bd-b5a4-59c18bb57343', '6c26d9b5-d892-4fc6-b035-abe575895c88' } self.assertSetEqual(result, expected) def test_SamReadIdExtractionTBMapped_SixReadIds(self): """Test read id extracxtion from bam with 6 mapped reads""" mapped = True sam = 'tests/data/sam/tb.sam' result = fast5seek.get_sam_read_ids(sam, mapped) expected = { '57d4cd63-3189-4006-93ec-bf3c8bfb2ce1', 'bbd563e9-1bf8-4268-92d5-45ccb8e3da72', '8aecf428-af00-4791-b065-5d4abd798a29', 'd707ff64-6ade-477a-8b68-0b3c394ef3b1', 'c967d421-3da4-4e11-accf-0d3fb0155840', '28acaa47-1cec-4a91-9abf-c780d27e6cc4' } self.assertSetEqual(result, expected) def test_SamReadIdExtractionEcoliMapped_TwoReadIds(self): """Test read id extracxtion from bam with 6 mapped reads""" mapped = True sam = 'tests/data/sam/ecoli.sam' result = fast5seek.get_sam_read_ids(sam, mapped) expected = { '6cf511b6-1724-46bd-b5a4-59c18bb57343', '6c26d9b5-d892-4fc6-b035-abe575895c88' } self.assertSetEqual(result, expected) class TestFastqReadIdExtraction(unittest.TestCase): """Test the read id extracxtion functions for fastq""" def test_FastqReadIdExtractionTB_SixReadIds(self): """Test read id extracxtion from gzipped fastq with 6 mapped reads""" fastq = 'tests/data/fastq/tb_mapped.fastq' result = fast5seek.get_fastq_read_ids(fastq) expected = { '57d4cd63-3189-4006-93ec-bf3c8bfb2ce1', 'bbd563e9-1bf8-4268-92d5-45ccb8e3da72', '8aecf428-af00-4791-b065-5d4abd798a29', 'd707ff64-6ade-477a-8b68-0b3c394ef3b1', 'c967d421-3da4-4e11-accf-0d3fb0155840', '28acaa47-1cec-4a91-9abf-c780d27e6cc4' } self.assertSetEqual(result, expected) def test_FastqGzipReadIdExtractionEcoli_TwoReadIds(self): """Test read id extracxtion from fastq with 2 mapped reads""" fastq = 'tests/data/fastq/ecoli_mapped.fastq.gz' result = fast5seek.get_fastq_read_ids(fastq) expected = { '6cf511b6-1724-46bd-b5a4-59c18bb57343', '6c26d9b5-d892-4fc6-b035-abe575895c88' } self.assertSetEqual(result, expected) def test_FastqGzipReadIdExtractionAll_EightReadIds(self): """Test read id extracxtion from fastq with 8 mapped reads""" fastq = 'tests/data/fastq/basecalled.fastq.gz' result = fast5seek.get_fastq_read_ids(fastq) expected = { '57d4cd63-3189-4006-93ec-bf3c8bfb2ce1', 'bbd563e9-1bf8-4268-92d5-45ccb8e3da72', '8aecf428-af00-4791-b065-5d4abd798a29', 'd707ff64-6ade-477a-8b68-0b3c394ef3b1', 'c967d421-3da4-4e11-accf-0d3fb0155840', '28acaa47-1cec-4a91-9abf-c780d27e6cc4', '6cf511b6-1724-46bd-b5a4-59c18bb57343', '6c26d9b5-d892-4fc6-b035-abe575895c88' } self.assertSetEqual(result, expected) class TestReadIdExtraction(unittest.TestCase): """Test the read id extracxtion functions for fastq, bam and sam""" def test_ReadIdExtractionWithFast5_EmptySet(self): """Test read id extracxtion from gzipped fastq with 6 mapped reads""" mapped = False files = ['tests/data/fast5/tb1.fast5'] result = fast5seek.extract_read_ids(files, mapped) expected = set() self.assertSetEqual(result, expected) def test_ReadIdExtractionWithFast5Fastq_TwoIds(self): """Test read id extracxtion from gzipped fastq with 6 mapped reads""" mapped = False files = [ 'tests/data/fast5/tb1.fast5', 'tests/data/fastq/ecoli_mapped.fastq.gz' ] result = fast5seek.extract_read_ids(files, mapped) expected = { '6cf511b6-1724-46bd-b5a4-59c18bb57343', '6c26d9b5-d892-4fc6-b035-abe575895c88' } self.assertSetEqual(result, expected) def test_ReadIdExtractionWithFast5FastqBam_TwoIds(self): """Test read id extracxtion from gzipped fastq with 6 mapped reads""" mapped = True files = [ 'tests/data/fast5/tb1.fast5', 'tests/data/fastq/ecoli_mapped.fastq.gz', 'tests/data/bam/ecoli.bam' ] result = fast5seek.extract_read_ids(files, mapped) expected = { '6cf511b6-1724-46bd-b5a4-59c18bb57343', '6c26d9b5-d892-4fc6-b035-abe575895c88' } self.assertSetEqual(result, expected) def test_ReadIdExtractionWithFast5FastqBam_EightIds(self): """Test read id extracxtion from gzipped fastq with 6 mapped reads""" mapped = True files = [ 'tests/data/fast5/tb1.fast5', 'tests/data/fastq/ecoli_mapped.fastq.gz', 'tests/data/bam/tb.bam' ] result = fast5seek.extract_read_ids(files, mapped) expected = { '57d4cd63-3189-4006-93ec-bf3c8bfb2ce1', 'bbd563e9-1bf8-4268-92d5-45ccb8e3da72', '8aecf428-af00-4791-b065-5d4abd798a29', 'd707ff64-6ade-477a-8b68-0b3c394ef3b1', 'c967d421-3da4-4e11-accf-0d3fb0155840', '28acaa47-1cec-4a91-9abf-c780d27e6cc4', '6cf511b6-1724-46bd-b5a4-59c18bb57343', '6c26d9b5-d892-4fc6-b035-abe575895c88' } self.assertSetEqual(result, expected) class TestFastqRunIdExtraction(unittest.TestCase): """Test the read id extracxtion functions for fastq""" def test_FastqRundIdExtractionTB_EmptySet(self): """Test run id extracxtion from non-albacore fastq.""" fastq = 'tests/data/fastq/tb_mapped.fastq' result = fast5seek.get_fastq_run_ids([fastq]) expected = set() self.assertSetEqual(result, expected) def test_FastqRundIdExtractionAlbacore_TwoRunIds(self): """Test run id extracxtion from non-albacore fastq.""" fastq = 'tests/data/fastq/basecalled.fastq.gz' result = fast5seek.get_fastq_run_ids([fastq]) expected = { 'bfa81348704ecd62c348b404e974a37daf030951', 'dc6ee09815f8baff16d92e7189e3a46d855f02b4' } self.assertSetEqual(result, expected) class TestGetFast5ReadAndRunId(unittest.TestCase): """Test read id extraction from fast5 file. TODO: Find a fast5 with more than one read id to test handling """ def test_TBFast5File_OneReadAndRunID(self): filepath = 'tests/data/fast5/tb1.fast5' result = fast5seek.get_read_and_run_id(filepath) expected = ('d707ff64-6ade-477a-8b68-0b3c394ef3b1', 'dc6ee09815f8baff16d92e7189e3a46d855f02b4') self.assertTupleEqual(result, expected) def test_EcoliFast5File_OneReadAndRunID(self): filepath = 'tests/data/fast5/ecoli1.fast5' result = fast5seek.get_read_and_run_id(filepath) expected = ('6cf511b6-1724-46bd-b5a4-59c18bb57343', 'bfa81348704ecd62c348b404e974a37daf030951') self.assertTupleEqual(result, expected) def test_EmptyFast5File_EmptyTuple(self): filepath = 'tests/data/fast5/empty.fast5' result = fast5seek.get_read_and_run_id(filepath) expected = ('', '') self.assertTupleEqual(result, expected) class TestCollectAllFast5Filepaths(unittest.TestCase): """Test function that collects all the unique fast5 filepaths.""" def test_OneFast5Directory_EightFilepaths(self): fast5_dir = ['tests/data/fast5'] result = fast5seek.collect_all_fast5_filepaths(fast5_dir) expected = { 'tests/data/fast5/ecoli1.fast5', 'tests/data/fast5/ecoli2.fast5', 'tests/data/fast5/empty.fast5', 'tests/data/fast5/tb1.fast5', 'tests/data/fast5/tb2.fast5', 'tests/data/fast5/tb3.fast5', 'tests/data/fast5/tb4.fast5', 'tests/data/fast5/tb5.fast5', 'tests/data/fast5/tb6.fast5', } self.assertSetEqual(result, expected) def test_TwoFast5Directory_EightFilepaths(self): fast5_dir = ['tests/data/fast5', 'tests/data'] result = fast5seek.collect_all_fast5_filepaths(fast5_dir) expected = { 'tests/data/fast5/ecoli1.fast5', 'tests/data/fast5/ecoli2.fast5', 'tests/data/fast5/empty.fast5', 'tests/data/fast5/tb1.fast5', 'tests/data/fast5/tb2.fast5', 'tests/data/fast5/tb3.fast5', 'tests/data/fast5/tb4.fast5', 'tests/data/fast5/tb5.fast5', 'tests/data/fast5/tb6.fast5', } self.assertSetEqual(result, expected) class TestCollectPresentFast5Filepaths(unittest.TestCase): """Test function that collects all the present fast5 filepaths.""" def test_OneFast5FileOneReadIdOneRunId_Present(self): filepath = {'tests/data/fast5/tb1.fast5'} read_ids = {'d707ff64-6ade-477a-8b68-0b3c394ef3b1'} run_ids = {'dc6ee09815f8baff16d92e7189e3a46d855f02b4'} result = fast5seek.collect_present_fast5_filepaths(filepath, read_ids, run_ids, None) expected = ['tests/data/fast5/tb1.fast5'] self.assertListEqual(result, expected) def test_OneFast5FileOneReadIdNoRunId_Present(self): filepath = {'tests/data/fast5/tb1.fast5'} read_ids = {'d707ff64-6ade-477a-8b68-0b3c394ef3b1'} run_ids = set() result = fast5seek.collect_present_fast5_filepaths(filepath, read_ids, run_ids, None) expected = ['tests/data/fast5/tb1.fast5'] self.assertListEqual(result, expected) def test_OneFast5FileNoReadIdNoRunId_EmptyList(self): filepath = {'tests/data/fast5/tb1.fast5'} read_ids = set() run_ids = set() result = fast5seek.collect_present_fast5_filepaths(filepath, read_ids, run_ids, None) expected = [] self.assertListEqual(result, expected) def test_TwoFast5FileOneReadIdNoRunId_OneFast5Path(self): filepath = {'tests/data/fast5/tb1.fast5', 'tests/data/fast5/tb3.fast5'} read_ids = {'d707ff64-6ade-477a-8b68-0b3c394ef3b1'} run_ids = set() result = fast5seek.collect_present_fast5_filepaths(filepath, read_ids, run_ids, None) expected = ['tests/data/fast5/tb1.fast5'] self.assertListEqual(result, expected) def test_TwoFast5FileOneReadIdNoRunId_NoFast5Path(self): filepath = {'tests/data/fast5/tb1.fast5', 'tests/data/fast5/tb3.fast5'} read_ids = {'not a real read id'} run_ids = set() result = fast5seek.collect_present_fast5_filepaths(filepath, read_ids, run_ids, None) expected = [] self.assertListEqual(result, expected) def test_TwoFast5FileOneReadIdNoRunId_OneFast5PathNoError(self): filepath = {'tests/data/fast5/tb1.fast5', 'tests/data/fast5/empty.fast5'} read_ids = {'d707ff64-6ade-477a-8b68-0b3c394ef3b1'} run_ids = set() result = fast5seek.collect_present_fast5_filepaths(filepath, read_ids, run_ids, None) expected = ['tests/data/fast5/tb1.fast5'] self.assertListEqual(result, expected) def test_TwoFast5FileTwoReadIdOneRunId_TwoFast5Paths(self): filepath = {'tests/data/fast5/tb1.fast5', 'tests/data/fast5/ecoli1.fast5'} read_ids = {'d707ff64-6ade-477a-8b68-0b3c394ef3b1', '6cf511b6-1724-46bd-b5a4-59c18bb57343'} run_ids = set('bfa81348704ecd62c348b404e974a37daf030951') result = fast5seek.collect_present_fast5_filepaths(filepath, read_ids, run_ids, None) expected = ['tests/data/fast5/ecoli1.fast5', 'tests/data/fast5/tb1.fast5'] self.assertCountEqual(result, expected) class TestArgs(object): """Class to create args object like that from argparse.""" def __init__(self, fast5_dir, reference, mapped, output=None): self.output = output self.fast5_dir = fast5_dir self.reference = reference self.mapped = mapped self.log_level = 4 self.no_progress_bar = True class TestMain(unittest.TestCase): """Test the main function of the program.""" def test_TBTestFast5DataFastq_SixFilepaths(self): fast5_dir = ['tests/data/fast5'] reference = ['tests/data/fastq/tb_mapped.fastq.gz'] args = TestArgs(fast5_dir, reference, True) f = io.StringIO() with redirect_stdout(f): fast5seek.main(args) stdout = f.getvalue() result = [x for x in stdout.split('\n') if x] expected = ['tests/data/fast5/tb5.fast5', 'tests/data/fast5/tb6.fast5', 'tests/data/fast5/tb4.fast5', 'tests/data/fast5/tb3.fast5', 'tests/data/fast5/tb2.fast5', 'tests/data/fast5/tb1.fast5'] self.assertCountEqual(result, expected) def test_TBTestFast5DataBam_SixFilepaths(self): fast5_dir = ['tests/data/fast5'] reference = ['tests/data/bam/tb.bam'] args = TestArgs(fast5_dir, reference, True) f = io.StringIO() with redirect_stdout(f): fast5seek.main(args) stdout = f.getvalue() result = [x for x in stdout.split('\n') if x] expected = ['tests/data/fast5/tb5.fast5', 'tests/data/fast5/tb6.fast5', 'tests/data/fast5/tb4.fast5', 'tests/data/fast5/tb3.fast5', 'tests/data/fast5/tb2.fast5', 'tests/data/fast5/tb1.fast5'] self.assertCountEqual(result, expected) def test_TBTestFast5DataSam_SixFilepaths(self): fast5_dir = ['tests/data/fast5'] reference = ['tests/data/sam/tb.sam'] args = TestArgs(fast5_dir, reference, True) f = io.StringIO() with redirect_stdout(f): fast5seek.main(args) stdout = f.getvalue() result = [x for x in stdout.split('\n') if x] expected = ['tests/data/fast5/tb5.fast5', 'tests/data/fast5/tb6.fast5', 'tests/data/fast5/tb4.fast5', 'tests/data/fast5/tb3.fast5', 'tests/data/fast5/tb2.fast5', 'tests/data/fast5/tb1.fast5'] self.assertCountEqual(result, expected) def test_TBTestFast5DataSamAndFastq_SixFilepaths(self): fast5_dir = ['tests/data/fast5'] reference = ['tests/data/sam/tb.sam', 'tests/data/fastq/tb_mapped.fastq.gz'] args = TestArgs(fast5_dir, reference, True) f = io.StringIO() with redirect_stdout(f): fast5seek.main(args) stdout = f.getvalue() result = [x for x in stdout.split('\n') if x] expected = ['tests/data/fast5/tb5.fast5', 'tests/data/fast5/tb6.fast5', 'tests/data/fast5/tb4.fast5', 'tests/data/fast5/tb3.fast5', 'tests/data/fast5/tb2.fast5', 'tests/data/fast5/tb1.fast5'] self.assertCountEqual(result, expected) def test_EcoliTestFast5DataSam_TwoFilepaths(self): fast5_dir = ['tests/data/fast5'] reference = ['tests/data/sam/ecoli.sam'] args = TestArgs(fast5_dir, reference, True) f = io.StringIO() with redirect_stdout(f): fast5seek.main(args) stdout = f.getvalue() result = [x for x in stdout.split('\n') if x] expected = ['tests/data/fast5/ecoli1.fast5', 'tests/data/fast5/ecoli2.fast5'] self.assertCountEqual(result, expected) def test_TBTestFast5DataBamUnmapped_EightFilepaths(self): fast5_dir = ['tests/data/fast5'] reference = ['tests/data/bam/tb.bam'] args = TestArgs(fast5_dir, reference, False) f = io.StringIO() with redirect_stdout(f): fast5seek.main(args) stdout = f.getvalue() result = [x for x in stdout.split('\n') if x] expected = ['tests/data/fast5/ecoli1.fast5', 'tests/data/fast5/ecoli2.fast5', 'tests/data/fast5/tb5.fast5', 'tests/data/fast5/tb6.fast5', 'tests/data/fast5/tb4.fast5', 'tests/data/fast5/tb3.fast5', 'tests/data/fast5/tb2.fast5', 'tests/data/fast5/tb1.fast5'] self.assertCountEqual(result, expected) def test_TBTestFast5DataAlbacoreFastq_EightFilepaths(self): fast5_dir = ['tests/data/fast5'] reference = ['tests/data/fastq/basecalled.fastq.gz'] args = TestArgs(fast5_dir, reference, False) f = io.StringIO() with redirect_stdout(f): fast5seek.main(args) stdout = f.getvalue() result = [x for x in stdout.split('\n') if x] expected = ['tests/data/fast5/ecoli1.fast5', 'tests/data/fast5/ecoli2.fast5', 'tests/data/fast5/tb5.fast5', 'tests/data/fast5/tb6.fast5', 'tests/data/fast5/tb4.fast5', 'tests/data/fast5/tb3.fast5', 'tests/data/fast5/tb2.fast5', 'tests/data/fast5/tb1.fast5'] self.assertCountEqual(result, expected)
StarcoderdataPython
8070965
<gh_stars>10-100 from toolz.curried import * # noqa from misoc.interconnect import stream from migen_axi.interconnect import axi, Reader from .common import write_ack, wait_stb, file_tmp_folder from migen.sim import run_simulation def write_data(sink, val, eop=None): yield sink.data.eq(val) if eop: yield sink.eop.eq(1) yield from write_ack(sink) if eop: yield sink.eop.eq(0) def request_addr(sink, addr, eop=None): yield sink.addr.eq(addr) if eop: yield sink.eop.eq(1) yield from write_ack(sink) if eop: yield sink.eop.eq(0) def read_data(source): yield from wait_stb(source) return (yield source.data) def test_upscaler(): i = axi.Interface() dw = i.data_width dut = stream.Converter(8, dw) source, sink = dut.source, dut.sink write = partial(write_data, sink) read = partial(read_data, source) def testbench_upscaler(): def push(): yield from write(0x11) yield from write(0x22) yield from write(0x33) yield from write(0x44) yield from write(0x55) yield from write(0x66) yield from write(0x77) yield from write(0x88) yield from write(0x99, eop=1) def pull(): yield source.ack.eq(1) assert (yield from read()) == 0x44332211 yield assert (yield from read()) == 0x88776655 yield assert (yield from read()) & 0xff == 0x99 return [ push(), pull(), ] run_simulation(dut, testbench_upscaler()) def test_downscaler(): i = axi.Interface() dw = i.data_width dut = stream.Converter(dw, 8) source, sink = dut.source, dut.sink write = partial(write_data, sink) read = partial(read_data, source) def testbench_downscaler(): def push(): yield from write(0x44332211) yield from write(0x88776655, eop=True) def pull(): yield source.ack.eq(1) assert (yield from read()) == 0x11 yield assert (yield from read()) == 0x22 yield assert (yield from read()) == 0x33 yield assert (yield from read()) == 0x44 yield assert (yield from read()) == 0x55 yield assert (yield from read()) == 0x66 yield assert (yield from read()) == 0x77 yield assert (yield from read()) == 0x88 assert (yield source.eop) == 1 return [ push(), pull(), ] run_simulation( dut, testbench_downscaler(), vcd_name=file_tmp_folder("test_downscaler.vcd")) def test_reader(): i = axi.Interface() dw = i.data_width dut = Reader(i) dut.submodules.downscaler = stream.Converter(dw, 8) dut.comb += dut.source.connect(dut.downscaler.sink) source, sink = dut.downscaler.source, dut.sink request = partial(request_addr, sink) read = partial(read_data, source) def testbench_reader(): def push_addr(): yield from request(0x11223344, eop=True) def pull_data(): yield source.ack.eq(1) assert (yield from read()) == 0x04 yield assert (yield from read()) == 0x03 yield assert (yield from read()) == 0x02 yield assert (yield from read()) == 0x01 assert (yield source.eop) == 1 def ar_and_r_channel(): assert (yield from i.read_ar()).addr == 0x11223344 yield from i.write_r(0x55, 0x01020304, last=1) return [ push_addr(), pull_data(), ar_and_r_channel(), ] run_simulation( dut, testbench_reader(), vcd_name=file_tmp_folder("test_reader.vcd"))
StarcoderdataPython
3521212
<reponame>datadonK23/rot-weisse-wurzeln #!/usr/bin/python """ locate_control Add Locate control to folium Map. Based on leaflet plugin: https://github.com/domoritz/leaflet-locatecontrol Taken from Folium PR#1116 and slightly modified. Utility methods 'parse_options' and 'camelize' taken from Folium master branch. Author: dbf [@fullonic](https://github.com/fullonic), Folium contributors """ from branca.element import CssLink, Figure, JavascriptLink, MacroElement # type: ignore from jinja2 import Template #from folium.utilities import parse_options class LocateControl(MacroElement): """Control plugin to geolocate the user. This plugins adds a button to the map, and when it's clicked shows the current user device location. To work properly in production, the connection needs to be encrypted, otherwise browser will not allow users to share their location. WARNING: This plugin when used with Draw plugin, it must be added to your map before Draw. See example below. Parameters ---------- **kwargs For possible options, see https://github.com/domoritz/leaflet-locatecontrol Examples -------- >>> m = folium.Map() # With default settings >>> LocateControl().add_to(m) # With custom options and alongside with Draw >>> LocateControl( ... position="bottomright", ... strings={"title": "See you current location", ... "popup": "Your position"}).add_to(m)) >>> Draw(export=True).add_to(m) For more info check: https://github.com/domoritz/leaflet-locatecontrol """ _template = Template(""" {% macro script(this, kwargs) %} var {{this.get_name()}} = L.control.locate( {{this.options | tojson}} ).addTo({{this._parent.get_name()}}); {% endmacro %} """) def __init__(self, **kwargs): super(LocateControl, self).__init__() self._name = 'LocateControl' self.options = parse_options(**kwargs) def render(self, **kwargs): super(LocateControl, self).render(**kwargs) figure = self.get_root() assert isinstance(figure, Figure), ('You cannot render this Element ' 'if it is not in a Figure.') figure.header.add_child( CssLink( "https://cdnjs.cloudflare.com/ajax/libs/leaflet-locatecontrol/0.66.2/L.Control.Locate.min.css")) # noqa figure.header.add_child(JavascriptLink( "https://cdnjs.cloudflare.com/ajax/libs/leaflet-locatecontrol/0.66.2/L.Control.Locate.min.js")) # noqa def parse_options(**kwargs): """Return a dict with lower-camelcase keys and non-None values..""" return {camelize(key): value for key, value in kwargs.items() if value is not None} def camelize(key): """Convert a python_style_variable_name to lowerCamelCase. Examples -------- >>> camelize('variable_name') 'variableName' >>> camelize('variableName') 'variableName' """ return ''.join(x.capitalize() if i > 0 else x for i, x in enumerate(key.split('_')))
StarcoderdataPython
304884
<gh_stars>1-10 # -*- coding: utf-8 -*- """Test columns""" import unittest from pyrseas.testutils import InputMapToSqlTestCase, fix_indent CREATE_STMT1 = "CREATE TABLE t1 (c1 integer, c2 text)" CREATE_STMT2 = "CREATE TABLE t1 (c1 integer, c2 text, c3 date)" CREATE_STMT3 = "CREATE TABLE t1 (c1 integer, c2 text, c3 date, c4 text)" DROP_COL_STMT = "ALTER TABLE t1 DROP COLUMN c3" class ColumnToSqlTestCase(InputMapToSqlTestCase): """Test SQL generation of column-related statements from input schemas""" def test_set_column_not_null(self): "Change a nullable column to NOT NULL" inmap = self.std_map() inmap['schema public'].update({'table t1': { 'columns': [{'c1': {'type': 'integer', 'not_null': True}}, {'c2': {'type': 'text'}}]}}) sql = self.to_sql(inmap, [CREATE_STMT1]) self.assertEqual(fix_indent(sql[0]), "ALTER TABLE t1 ALTER COLUMN c1 SET NOT NULL") def test_change_column_types(self): "Change the datatypes of two columns" inmap = self.std_map() inmap['schema public'].update({'table t1': { 'columns': [{'c1': {'type': 'bigint'}}, {'c2': {'type': 'varchar(25)'}}]}}) sql = self.to_sql(inmap, [CREATE_STMT1]) self.assertEqual(fix_indent(sql[0]), "ALTER TABLE t1 ALTER COLUMN c1 TYPE bigint") self.assertEqual(fix_indent(sql[1]), "ALTER TABLE t1 ALTER COLUMN c2 TYPE varchar(25)") def test_add_column1(self): "Add new column to a table" inmap = self.std_map() inmap['schema public'].update({'table t1': { 'columns': [{'c1': {'type': 'integer'}}, {'c2': {'type': 'text'}}, {'c3': {'type': 'date'}}, {'c4': {'type': 'text'}}]}}) sql = self.to_sql(inmap, [CREATE_STMT2]) self.assertEqual(fix_indent(sql[0]), "ALTER TABLE t1 ADD COLUMN c4 text") def test_add_column2(self): "Add column to a table that has a dropped column" stmts = [CREATE_STMT2, "ALTER TABLE t1 DROP COLUMN c2"] inmap = self.std_map() inmap['schema public'].update({'table t1': { 'columns': [{'c1': {'type': 'integer'}}, {'c2': {'type': 'text'}}, {'c3': {'type': 'date'}}]}}) sql = self.to_sql(inmap, stmts) self.assertEqual(len(sql), 1) self.assertEqual(fix_indent(sql[0]), "ALTER TABLE t1 ADD COLUMN c2 text") def test_add_column3(self): "No change on a table that has a dropped column" stmts = [CREATE_STMT3, DROP_COL_STMT] inmap = self.std_map() inmap['schema public'].update({'table t1': { 'columns': [{'c1': {'type': 'integer'}}, {'c2': {'type': 'text'}}, {'c4': {'type': 'text'}}]}}) sql = self.to_sql(inmap, stmts) self.assertEqual(len(sql), 0) def test_add_column4(self): "Add two columns to a table that has a dropped column" stmts = [CREATE_STMT2, DROP_COL_STMT] inmap = self.std_map() inmap['schema public'].update({'table t1': { 'columns': [{'c1': {'type': 'integer'}}, {'c2': {'type': 'text'}}, {'c3': {'type': 'date'}}, {'c4': {'type': 'text'}}]}}) sql = self.to_sql(inmap, stmts) self.assertEqual(fix_indent(sql[0]), "ALTER TABLE t1 ADD COLUMN c3 date") self.assertEqual(fix_indent(sql[1]), "ALTER TABLE t1 ADD COLUMN c4 text") def test_drop_column1(self): "Drop a column from the end of a table" inmap = self.std_map() inmap['schema public'].update({'table t1': { 'columns': [{'c1': {'type': 'integer'}}, {'c2': {'type': 'text'}}, {'c3': {'type': 'date'}}]}}) sql = self.to_sql(inmap, [CREATE_STMT3]) self.assertEqual(fix_indent(sql[0]), "ALTER TABLE t1 DROP COLUMN c4") def test_drop_column2(self): "Drop a column from the middle of a table" inmap = self.std_map() inmap['schema public'].update({'table t1': { 'columns': [{'c1': {'type': 'integer'}}, {'c2': {'type': 'text'}}, {'c4': {'type': 'text'}}]}}) sql = self.to_sql(inmap, [CREATE_STMT3]) self.assertEqual(fix_indent(sql[0]), "ALTER TABLE t1 DROP COLUMN c3") def test_drop_column3(self): "Drop a column from the beginning of a table" inmap = self.std_map() inmap['schema public'].update({'table t1': { 'columns': [{'c2': {'type': 'text'}}, {'c3': {'type': 'date'}}, {'c4': {'type': 'text'}}]}}) sql = self.to_sql(inmap, [CREATE_STMT3]) self.assertEqual(fix_indent(sql[0]), "ALTER TABLE t1 DROP COLUMN c1") def test_rename_column(self): "Rename a table column" inmap = self.std_map() inmap['schema public'].update({'table t1': { 'columns': [{'c1': {'type': 'integer'}}, {'c3': {'type': 'text', 'oldname': 'c2'}}]}}) sql = self.to_sql(inmap, [CREATE_STMT1]) self.assertEqual(sql[0], "ALTER TABLE t1 RENAME COLUMN c2 TO c3") def test_drop_add_column1(self): "Drop and re-add table column from the end, almost like a RENAME" inmap = self.std_map() inmap['schema public'].update({'table t1': { 'columns': [{'c1': {'type': 'integer'}}, {'c2': {'type': 'text'}}, {'c4': {'type': 'date'}}]}}) sql = self.to_sql(inmap, [CREATE_STMT2]) self.assertEqual(fix_indent(sql[0]), "ALTER TABLE t1 ADD COLUMN c4 date") self.assertEqual(sql[1], "ALTER TABLE t1 DROP COLUMN c3") def test_drop_add_column2(self): "Drop and re-add table column from the beginning" inmap = self.std_map() inmap['schema public'].update({'table t1': { 'columns': [{'c2': {'type': 'text'}}, {'c3': {'type': 'date'}}, {'c4': {'type': 'text'}}]}}) sql = self.to_sql(inmap, [CREATE_STMT2]) self.assertEqual(fix_indent(sql[0]), "ALTER TABLE t1 ADD COLUMN c4 text") self.assertEqual(sql[1], "ALTER TABLE t1 DROP COLUMN c1") def test_drop_add_column3(self): "Drop and re-add table columns from table with dropped column" stmts = [CREATE_STMT2, DROP_COL_STMT] inmap = self.std_map() inmap['schema public'].update({'table t1': { 'columns': [{'c2': {'type': 'text'}}, {'c3': {'type': 'date'}}, {'c4': {'type': 'text'}}]}}) sql = self.to_sql(inmap, stmts) self.assertEqual(fix_indent(sql[0]), "ALTER TABLE t1 ADD COLUMN c3 date") self.assertEqual(fix_indent(sql[1]), "ALTER TABLE t1 ADD COLUMN c4 text") self.assertEqual(sql[2], "ALTER TABLE t1 DROP COLUMN c1") def test_drop_column_in_schema(self): "Drop a column from a table in a non-public schema" stmts = ["CREATE SCHEMA s1", "CREATE TABLE s1.t1 (c1 integer, c2 text, c3 date)"] inmap = self.std_map() inmap.update({'schema s1': {'table t1': { 'columns': [{'c1': {'type': 'integer'}}, {'c2': {'type': 'text'}}]}}}) sql = self.to_sql(inmap, stmts) self.assertEqual(fix_indent(sql[0]), "ALTER TABLE s1.t1 DROP COLUMN c3") def suite(): tests = unittest.TestLoader().loadTestsFromTestCase(ColumnToSqlTestCase) return tests if __name__ == '__main__': unittest.main(defaultTest='suite')
StarcoderdataPython
9678420
# -*- coding: utf-8 -*- # @Time : 2018/3/29 上午10:06 # @Author : Azrael.Bai # @File : reverse_string.py class Solution(object): def reverseString(self, s): """ :type s: str :rtype: str """ return s[::-1] if __name__ == '__main__': s = Solution() print(s.reverseString("abcdefg"))
StarcoderdataPython
5041446
<reponame>dhermes/project-euler<gh_stars>1-10 #!/usr/bin/env python from python.decorators import euler_timer from python.functions import inverse_mod_n from python.functions import sieve def main(verbose=False): PRIMES = sieve(10 ** 6 + 3) # 10**6 + 3 is the final value of p_2 running_sum = 0 for index in range(2, len(PRIMES) - 1): p_1 = PRIMES[index] p_2 = PRIMES[index + 1] ten_inverse = inverse_mod_n(10, p_2) digits = len(str(p_1)) k = (ten_inverse ** digits) * (p_2 - p_1) % p_2 running_sum += int('%s%s' % (k, p_1)) return running_sum if __name__ == '__main__': print euler_timer(134)(main)(verbose=True)
StarcoderdataPython
11328179
from ._iea34_130rwt import IEA34_130_1WT_Surrogate, IEA34_130_2WT_Surrogate
StarcoderdataPython
1977537
SECRET = '<KEY>' DATABASE = 'sqlite:///catalog.db' CLIENT_GOOGLE_SECRET='client_secret_google.json'
StarcoderdataPython
8152075
from .spykingcircussortingextractor import SpykingCircusSortingExtractor
StarcoderdataPython
4827472
from functools import reduce from operator import getitem from inspect import getdoc from werkzeug.exceptions import NotFound from yggdrasil.record import Record from . import Page class Root(Page): def __init__(self, urlmap): self.urlmap = urlmap def render_rule(self, request, rule): result = Record() result.endpoint=rule.endpoint segments = rule.endpoint.split(".") try: endpoint = reduce(getitem, segments, self) except KeyError: endpoint = None if rule.methods is not None: result.methods = tuple(rule.methods) description = getdoc(endpoint) if description is not None: result.description = description return result def on_intro(self, request): """ This page shows current routing table with endpoints and descriptions. """ result = Record() result.rules = rules = Record() for rule in self.urlmap.iter_rules(): rules[rule.rule] = self.render_rule(request, rule) return result class RootRefs(Root): def render_rule(self, request, rule): result = super().render_rule(request, rule) try: result.ref = self.build_url(request, rule.endpoint) except: pass return result
StarcoderdataPython
1979
# -------------- # Importing header files import numpy as np import pandas as pd from scipy.stats import mode # code starts here bank = pd.read_csv(path) categorical_var = bank.select_dtypes(include = 'object') print(categorical_var) numerical_var = bank.select_dtypes(include = 'number') print(numerical_var) banks = bank.drop(columns=['Loan_ID']) bank_mode = banks.mode() banks = banks.fillna(bank_mode.iloc[0]) print(banks.isnull().sum()) avg_loan_amount = pd.pivot_table(banks, index=['Gender', 'Married', 'Self_Employed'], values='LoanAmount', aggfunc = 'mean') print(avg_loan_amount) loan_approved_se = banks[ (banks['Self_Employed'] == "Yes") & (banks['Loan_Status'] == "Y") ] loan_approved_nse = banks[ (banks['Self_Employed'] == "No") & (banks['Loan_Status'] == "Y") ] percentage_se = (len(loan_approved_se) / 614) * 100 percentage_nse = (len(loan_approved_nse) / 614) * 100 # loan amount term loan_term = banks['Loan_Amount_Term'].apply(lambda x: int(x)/12 ) big_loan_term=len(loan_term[loan_term>=25]) print(big_loan_term) columns_to_show = ['ApplicantIncome', 'Credit_History'] loan_groupby=banks.groupby(['Loan_Status'])[columns_to_show] # Check the mean value mean_values=loan_groupby.agg([np.mean]) print(mean_values) # code ends here
StarcoderdataPython
3371316
<reponame>tcysin/portfolio """ This module contains functionality for finding duplicated and near-duplicated images. TODO better description TODO which methods do we use here? """ from collections import defaultdict from typing import Dict, Iterable, List import numpy as np from PIL import Image from scipy import spatial def dHash(image: Image.Image): """ Return dHash array for given PIL Image. We compute standard 64-bit difference hash using `Image.BOX` interpolation. For detailed discussion of dHash algorithm see http://www.hackerfactor.com/blog/index.php?/archives/529-Kind-of-Like-That.html. This implementation is heavily inspired by `ImageHash` library at https://pypi.org/project/ImageHash/. Returns: boolean 1-D ndarray of size 64. """ # convert the image to grayscale and resize the grayscale image, # adding a single column (width) so we can compute the horizontal # gradient gray = image.convert("L") resized = gray.resize((9, 8), resample=Image.BOX) pixels = np.asarray(resized) # compute the (relative) horizontal gradient between adjacent # column pixels diff = pixels[:, :-1] < pixels[:, 1:] return np.ravel(diff) def distance_matrix(X, metric="euclidean"): """ Return square distance matrix between rows of X. Args: X (array-like): M by N array of M original observations in an N-dimensional space. metric (str): the distance metric to use. See docs for `scipy.spatial.distance.pdist`. Returns: Y (ndarray): square distance matrix of shape (M, M). """ condensed = spatial.distance.pdist(X, metric=metric) return spatial.distance.squareform(condensed) def duplicate_matrix(X: np.ndarray, threshold: float): """ Return matrix of duplicates from distance matrix X. We mark both vectors as duplicates when the distance between them is smaller than the threshold. Args: X: distance matrix of shape (M, M). threshold (float): cutoff threshold for duplicated vectors. Returns: Y (ndarray): boolean matrix of shape (M, M). """ return X < threshold def duplicates_dict(X: np.ndarray) -> Dict[int, List[int]]: """ Return duplicates dictionary from a matrix of duplicates. Each key is an index of original vector, while value is a list of vector indices we consider duplicates. Args: X (ndarray): boolean duplicate matrix of shape (M, M). """ # TODO is logic ok? is it what we want? dict_dups = defaultdict(list) dups = set() # keep track of duplicates n = len(X) for row in range(n - 1): # skip last row if row in dups: continue for col in range(row + 1, n): # start one past diagonal if X[row, col] == True: # noqa E712 dict_dups[row].append(col) dups.add(col) # mark j as a duplicate return dict(dict_dups) # freeze final dict def find_duplicates( images: Iterable[Image.Image], threshold: float = 0.16 ) -> Dict[str, List[str]]: """ Find duplicated or near-duplicated images. Resulting dictionary may be empty if there are no duplicated images or initial iterable is empty. We utilize dHash method to get 64-bit perceptive hashes of given images and compare them using Hamming distance metric. Args: images: iterable of PIL Images with non-empty `filename` attribute. threshold: cutoff threshold for duplicated hashes. A fraction of bits that differs between two hashes (scaled Hamming distance). Must be between 0 and 1. Returns: dict: dictionary with filenames for original and duplicated images. Each key is a filename of original image, while value is a list of filenames marked as duplicates. """ filenames = [] hashes = [] for image in images: filenames.append(image.filename) hashes.append(dHash(image)) # TODO is this a good design? if len(hashes) == 0: return {} hashes = np.vstack(hashes) # convert to (M,64) bool ndarray M = distance_matrix(hashes, metric="hamming") # (M,M) float in [0.0, 1.0] D = duplicate_matrix(M, threshold) dups_dict = duplicates_dict(D) result = { filenames[orig]: [filenames[i] for i in dups] for orig, dups in dups_dict.items() } return result
StarcoderdataPython
4803955
<gh_stars>0 import sys import os import browser import changelog args = sys.argv if len(args) == 2 and args[1] == "--version": print("Glopi Alpha 4") exit() if len(args) == 2 and (args[1] == "--help" or args[1] == "--usage"): print("Look at the readme(.md) for help!") exit() if len(args) > 2 and args[1] == "plain-changelog": id=args[2] path="/tmp/glopi" if not os.path.isdir("/tmp/glopi"): os.mkdir(path) #download cards browser.saveurl("https://app.gitkraken.com/api/glo/boards/" + id + "/cards?archived=undefined&fields=board_id,archived_date,board_id,due_date,name,column_id,created_by,created_date,members,labels,attachment_count,comment_count,total_task_count,completed_task_count,description,status,sync_provider_id,updated_date.js", path + "/cards.json") #download columns + further info browser.saveurl("https://app.gitkraken.com/api/glo/boards/" + id + "?fields=archived_date,invited_members,external_provider_members,members,id,name,columns,columns.name,labels,labels.sync_provider_id,sync_provider,sync_provider.type,sync_provider.options", path + "/columns.json") if len(args) == 4: file = args[3] else: file = "" #generate changelog changelog.make_plain(file) exit() if len(args) > 2 and args[1] == "fancy-changelog": id=args[2] path="/tmp/glopi" if not os.path.isdir("/tmp/glopi"): os.mkdir(path) #download cards browser.saveurl("https://app.gitkraken.com/api/glo/boards/" + id + "/cards?archived=undefined&fields=board_id,archived_date,board_id,due_date,name,column_id,created_by,created_date,members,labels,attachment_count,comment_count,total_task_count,completed_task_count,description,status,sync_provider_id,updated_date.js", path + "/cards.json") #download columns + further info browser.saveurl("https://app.gitkraken.com/api/glo/boards/" + id + "?fields=archived_date,invited_members,external_provider_members,members,id,name,columns,columns.name,labels,labels.sync_provider_id,sync_provider,sync_provider.type,sync_provider.options", path + "/columns.json") if len(args) == 4: file = args[3] else: file = "" #generate changelog changelog.make_fancy(file) exit() print("Unknown command. See readme.md for avaliable commands and info.") exit()
StarcoderdataPython
4800419
<reponame>lietu/moat<gh_stars>0 class Message(object): def __init__(self, game, player_id, data): self.game = game self.player_id = player_id for key in data: setattr(self, key, data[key]) self.validate() def validate(self): raise NotImplementedError("Message without validation") def process(self): raise NotImplementedError("Message without processing") class ShootMessage(Message): def __init__(self, game, player_id, data): self.x = 0 self.y = 0 super(ShootMessage, self).__init__(game, player_id, data) def validate(self): if not self.x >= 0 and self.x <= 640: raise ValueError("Invalid X position") if not self.y >= 0 and self.y <= 480: raise ValueError("Invalid Y position") def process(self): self.game.shoot(self.player_id, (self.x, self.y,)) class RequestRespawn(Message): def __init__(self, game, player_id, data): super(RequestRespawn, self).__init__(game, player_id, data) def validate(self): pass def process(self): self.game.request_respawn(self.player_id) MESSAGE_TYPES = { "shoot": ShootMessage, "requestRespawn": RequestRespawn }
StarcoderdataPython
231441
<reponame>mish24/werk #! /usr/bin/env python # encoding: utf-8 # WARNING! Do not edit! https://waf.io/book/index.html#_obtaining_the_waf_file import random try: from queue import Queue except ImportError: from Queue import Queue from waflib import Utils,Task,Errors,Logs GAP=20 class Consumer(Utils.threading.Thread): def __init__(self,spawner,task): Utils.threading.Thread.__init__(self) self.task=task self.spawner=spawner self.setDaemon(1) self.start() def run(self): try: if not self.spawner.master.stop: self.task.process() finally: self.spawner.sem.release() self.spawner.master.out.put(self.task) self.task=None self.spawner=None class Spawner(Utils.threading.Thread): def __init__(self,master): Utils.threading.Thread.__init__(self) self.master=master self.sem=Utils.threading.Semaphore(master.numjobs) self.setDaemon(1) self.start() def run(self): try: self.loop() except Exception: pass def loop(self): master=self.master while 1: task=master.ready.get() self.sem.acquire() if not master.stop: task.log_display(task.generator.bld) Consumer(self,task) class Parallel(object): def __init__(self,bld,j=2): self.numjobs=j self.bld=bld self.outstanding=Utils.deque() self.frozen=Utils.deque() self.ready=Queue(0) self.out=Queue(0) self.count=0 self.processed=1 self.stop=False self.error=[] self.biter=None self.dirty=False self.spawner=Spawner(self) def get_next_task(self): if not self.outstanding: return None return self.outstanding.popleft() def postpone(self,tsk): if random.randint(0,1): self.frozen.appendleft(tsk) else: self.frozen.append(tsk) def refill_task_list(self): while self.count>self.numjobs*GAP: self.get_out() while not self.outstanding: if self.count: self.get_out() elif self.frozen: try: cond=self.deadlock==self.processed except AttributeError: pass else: if cond: msg='check the build order for the tasks' for tsk in self.frozen: if not tsk.run_after: msg='check the methods runnable_status' break lst=[] for tsk in self.frozen: lst.append('%s\t-> %r'%(repr(tsk),[id(x)for x in tsk.run_after])) raise Errors.WafError('Deadlock detected: %s%s'%(msg,''.join(lst))) self.deadlock=self.processed if self.frozen: self.outstanding.extend(self.frozen) self.frozen.clear() elif not self.count: self.outstanding.extend(self.biter.next()) self.total=self.bld.total() break def add_more_tasks(self,tsk): if getattr(tsk,'more_tasks',None): self.outstanding.extend(tsk.more_tasks) self.total+=len(tsk.more_tasks) def get_out(self): tsk=self.out.get() if not self.stop: self.add_more_tasks(tsk) self.count-=1 self.dirty=True return tsk def add_task(self,tsk): self.ready.put(tsk) def skip(self,tsk): tsk.hasrun=Task.SKIPPED def error_handler(self,tsk): if hasattr(tsk,'scan')and hasattr(tsk,'uid'): try: del self.bld.imp_sigs[tsk.uid()] except KeyError: pass if not self.bld.keep: self.stop=True self.error.append(tsk) def task_status(self,tsk): try: return tsk.runnable_status() except Exception: self.processed+=1 tsk.err_msg=Utils.ex_stack() if not self.stop and self.bld.keep: self.skip(tsk) if self.bld.keep==1: if Logs.verbose>1 or not self.error: self.error.append(tsk) self.stop=True else: if Logs.verbose>1: self.error.append(tsk) return Task.EXCEPTION tsk.hasrun=Task.EXCEPTION self.error_handler(tsk) return Task.EXCEPTION def start(self): self.total=self.bld.total() while not self.stop: self.refill_task_list() tsk=self.get_next_task() if not tsk: if self.count: continue else: break if tsk.hasrun: self.processed+=1 continue if self.stop: break st=self.task_status(tsk) if st==Task.RUN_ME: self.count+=1 self.processed+=1 if self.numjobs==1: tsk.log_display(tsk.generator.bld) try: tsk.process() finally: self.out.put(tsk) else: self.add_task(tsk) if st==Task.ASK_LATER: self.postpone(tsk) elif st==Task.SKIP_ME: self.processed+=1 self.skip(tsk) self.add_more_tasks(tsk) while self.error and self.count: self.get_out() self.ready.put(None) assert(self.count==0 or self.stop)
StarcoderdataPython
1920734
<reponame>pduchesne/ckanext-digitalwallonia '''plugin.py ''' import ckan.plugins as plugins import ckan.plugins.toolkit as toolkit import ckan.plugins as p from ckanext.spatial.interfaces import ISpatialHarvester from ckan.common import json class ADNThemePlugin(plugins.SingletonPlugin): '''ADN theme plugin. ''' # Declare that this class implements IConfigurer. plugins.implements(plugins.IConfigurer) def update_config(self, config): # Add this plugin's templates dir to CKAN's extra_template_paths, so # that CKAN will use this plugin's custom templates. # 'templates' is the path to the templates dir, relative to this # plugin.py file. toolkit.add_template_directory(config, 'templates') toolkit.add_public_directory(config, 'public') pass class MetawalHarvester(p.SingletonPlugin): p.implements(ISpatialHarvester, inherit=True) def get_package_dict(self, context, data_dict): package_dict = data_dict['package_dict'] json_config = data_dict['harvest_object'].source.config if json_config: config = json.loads(json_config) if ('license_id' not in package_dict and 'default_license_id' in config): package_dict['license_id'] = config['default_license_id'] # check default license in harvest source config return package_dict
StarcoderdataPython
9718128
<filename>tests/checkpoint/conftest.py import os import shutil import pytest from great_expectations import DataContext from great_expectations.core import ExpectationConfiguration from great_expectations.data_context.util import file_relative_path @pytest.fixture def titanic_pandas_data_context_stats_enabled_and_expectation_suite_with_one_expectation( titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled, ): context: DataContext = titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled # create expectation suite suite = context.create_expectation_suite("my_expectation_suite") expectation = ExpectationConfiguration( expectation_type="expect_column_values_to_be_between", kwargs={"column": "col1", "min_value": 1, "max_value": 2}, ) suite.add_expectation(expectation, send_usage_event=False) context.save_expectation_suite(suite) return context @pytest.fixture def titanic_spark_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled( tmp_path_factory, monkeypatch, spark_session, ): # Re-enable GE_USAGE_STATS monkeypatch.delenv("GE_USAGE_STATS") project_path: str = str(tmp_path_factory.mktemp("titanic_data_context")) context_path: str = os.path.join(project_path, "great_expectations") os.makedirs(os.path.join(context_path, "expectations"), exist_ok=True) data_path: str = os.path.join(context_path, "..", "data", "titanic") os.makedirs(os.path.join(data_path), exist_ok=True) shutil.copy( file_relative_path( __file__, os.path.join( "..", "test_fixtures", "great_expectations_v013_no_datasource_stats_enabled.yml", ), ), str(os.path.join(context_path, "great_expectations.yml")), ) shutil.copy( file_relative_path(__file__, os.path.join("..", "test_sets", "Titanic.csv")), str( os.path.join( context_path, "..", "data", "titanic", "Titanic_19120414_1313.csv" ) ), ) shutil.copy( file_relative_path(__file__, os.path.join("..", "test_sets", "Titanic.csv")), str( os.path.join(context_path, "..", "data", "titanic", "Titanic_19120414_1313") ), ) shutil.copy( file_relative_path(__file__, os.path.join("..", "test_sets", "Titanic.csv")), str(os.path.join(context_path, "..", "data", "titanic", "Titanic_1911.csv")), ) shutil.copy( file_relative_path(__file__, os.path.join("..", "test_sets", "Titanic.csv")), str(os.path.join(context_path, "..", "data", "titanic", "Titanic_1912.csv")), ) context: DataContext = DataContext(context_root_dir=context_path) assert context.root_directory == context_path datasource_config: str = f""" class_name: Datasource execution_engine: class_name: SparkDFExecutionEngine data_connectors: my_basic_data_connector: class_name: InferredAssetFilesystemDataConnector base_directory: {data_path} default_regex: pattern: (.*)\\.csv group_names: - data_asset_name my_special_data_connector: class_name: ConfiguredAssetFilesystemDataConnector base_directory: {data_path} glob_directive: "*.csv" default_regex: pattern: (.+)\\.csv group_names: - name assets: users: base_directory: {data_path} pattern: (.+)_(\\d+)_(\\d+)\\.csv group_names: - name - timestamp - size my_other_data_connector: class_name: ConfiguredAssetFilesystemDataConnector base_directory: {data_path} glob_directive: "*.csv" default_regex: pattern: (.+)\\.csv group_names: - name assets: users: {{}} my_runtime_data_connector: module_name: great_expectations.datasource.data_connector class_name: RuntimeDataConnector batch_identifiers: - pipeline_stage_name - airflow_run_id """ # noinspection PyUnusedLocal context.test_yaml_config( name="my_datasource", yaml_config=datasource_config, pretty_print=False ) # noinspection PyProtectedMember context._save_project_config() return context
StarcoderdataPython
5103685
print('----------first demo----------') temp = input("不妨猜一下XXX现在心里想的是哪个数字:") guess = int(temp) if guess == 8: print("好厉害好厉害,你是XXX心里的蛔虫吗?") print("哼,猜中了也没有奖励!") else: print("猜错啦,XXX现在心里想的是8!") print("游戏结束,不玩啦^_^")
StarcoderdataPython
1772543
<reponame>AitanG/numpy-string-indexed<gh_stars>0 import copy import numpy as np from . import friendly_matrix as fm __all__ = [ 'moveaxis_A', 'moveaxis', 'swapaxes_A', 'swapaxes', 'transpose_A', 'transpose', 'concatenate_A', 'concatenate', 'stack_A', 'stack', 'vstack_A', 'vstack', 'hstack_A', 'hstack', 'flip_A', 'flip', 'fliplr_A', 'fliplr', 'flipud_A', 'flipud', 'mean_A', 'mean', 'std_A', 'std', 'var_A', 'var', 'sum_A', 'sum', 'prod_A', 'prod', 'min_A', 'min', 'argmin_A', 'argmin', 'max_A', 'max', 'argmax_A', 'argmax', 'all_A', 'all', 'any_A', 'any', 'cumsum_A', 'cumsum', 'cumprod_A', 'cumprod', 'squeeze_A', 'squeeze', ] ''' Transpose-like operations ''' def moveaxis_A(friendly, dim, new_dim): return friendly.moveaxis_A(dim, new_dim) def moveaxis(friendly, dim, new_dim): return friendly.moveaxis(dim, new_dim) def swapaxes_A(friendly, dim1, dim2): return friendly.swapaxes_A(dim1, dim2) def swapaxes(friendly, dim1, dim2): return friendly.swapaxes(dim1, dim2) def transpose_A(friendly): return friendly.transpose_A() def transpose(friendly): return friendly.transpose() ''' Joining arrays ''' def concatenate_A(friendlies, axis=0): ''' Same as `concatenate()`, except returns only the array. ''' dim_index = friendlies[0]._to_dim_index(axis) arrays = tuple(friendly.array for friendly in friendlies) return np.concatenate(arrays, dim_index) def concatenate(friendlies, axis=0): ''' Concatenates the provided `friendly_matrix.ndarray`s along the provided axis. Params: `friendlies`: the `friendly_matrix.ndarray` to concatenate `axis`: the axis to concatenate along Returns: `friendly_matrix.ndarray` ''' dim_index = friendlies[0]._to_dim_index(axis) arrays = tuple(friendly.array for friendly in friendlies) array = np.concatenate(arrays, dim_index) # Ensure that we're joining along the same dim name for each friendly dim_name_first = friendlies[0].dim_names[dim_index] for friendly in friendlies[1:]: dim_name_cur = friendly.dim_names[dim_index] if dim_name_cur != dim_name_first: raise ValueError(f'Different dim names for axis {dim_index}' f' across different ndarrays: {dim_name_first}' f' and {dim_name_cur}.') # Update array of values for the axis being extended extended_dim_array = [] for friendly in friendlies: extended_dim_array += friendly.dim_arrays[dim_index] dim_arrays = list(copy.copy(friendlies[0].dim_arrays)) dim_arrays[dim_index] = extended_dim_array return fm._new_ndarray(array, friendlies[0].dim_names, dim_arrays) def stack_A(friendlies, axis_name=None, axis_array=None, axis=None): ''' Same as `stack()`, except returns only the array. ''' if axis is None: if axis_name is not None: raise ValueError(f'Specified new axis name but no axis') axis = 0 dim_index = friendlies[0]._to_dim_index(axis) arrays = tuple(friendly.array for friendly in friendlies) return np.stack(arrays, dim_index) def stack(friendlies, axis_name, axis_array, axis=0): ''' Stacks the provided `friendly_matrix.ndarray`s along a new axis. Params: `friendlies`: the `friendly_matrix.ndarray`s to concatenate `axis_name`: the name of the newly created axis `axis_array`: the labels for the newly created axis `axis`: the location of the new axis Returns: `friendly_matrix.ndarray` ''' if len(axis_array) != len(friendlies): raise ValueError(f'Axis array must be the same length as the number' f' of ndarrays being stacked ({len(axis_array)} !=' f' {len(friendlies)})') dim_index = friendlies[0]._to_dim_index(axis) arrays = tuple(friendly.array for friendly in friendlies) array = np.stack(arrays, dim_index) dim_names_first = friendlies[0].dim_names for friendly in friendlies[1:]: dim_names_cur = friendly.dim_names if dim_names_cur != dim_names_first: raise ValueError('All ndarrays being stacked must have the' ' same dim names') dim_names = copy.copy(friendlies[0].dim_names) dim_arrays = list(copy.copy(friendlies[0].dim_arrays)) dim_names.insert(dim_index, axis_name) dim_arrays.insert(dim_index, axis_array) return fm._new_ndarray(array, dim_names, dim_arrays) def vstack_A(friendlies): ''' Same as `vstack()`, except returns only the array. ''' if len(friendlies[0].shape) == 1: # Vstacking 1-dimensional arrays requires creating a new dimension raise ValueError('Can\'t perform vstack() on one-dimensional' ' ndarrays. Use stack() instead') return concatenate_A(friendlies, axis=0) def vstack(friendlies): ''' Vertically stacks the provided `friendly_matrix.ndarray`s. Params: `friendlies`: the `friendly_matrix.ndarray`s to stack Returns: `friendly_matrix.ndarray` ''' if len(friendlies[0].shape) == 1: # Vstacking 1-dimensional arrays requires creating a new dimension raise ValueError('Can\'t perform vstack() on one-dimensional' ' ndarrays. Use stack() instead') return concatenate(friendlies, axis=0) def hstack_A(friendlies): ''' Same as `hstack()`, except returns only the array. ''' if len(friendlies[0].shape) == 1: return concatenate_A(friendlies, axis=0) return concatenate_A(friendlies, axis=1) def hstack(friendlies): ''' Horizontally stacks the provided `friendly_matrix.ndarray`s. Params: `friendlies`: the `friendly_matrix.ndarray`s to stack Returns: `friendly_matrix.ndarray` ''' if len(friendlies[0].shape) == 1: return concatenate(friendlies, axis=0) return concatenate(friendlies, axis=1) ''' Rearranging elements ''' def flip_A(friendly, axis=None): ''' Same as `flip()`, except returns only the array. ''' if axis is None: axes_to_flip = list(range(friendly.ndim)) else: dim_index = friendly._to_dim_index(axis) axes_to_flip = [dim_index] return np.flip(friendly.array, axis) def flip(friendly, axis=None): ''' Reverses the order of elements along the provided axes. Params: `friendly`: the `friendly_matrix.ndarray` to flip `axis`: the axis along which to flip Returns: `friendly_matrix.ndarray` ''' if axis is None: axes_to_flip = list(range(friendly.ndim)) else: dim_index = friendly._to_dim_index(axis) axes_to_flip = [dim_index] array = np.flip(friendly.array, axis) dim_arrays = list(copy.copy(friendly.dim_arrays)) for dim_index in axes_to_flip: dim_arrays[dim_index] = list(reversed(dim_arrays[dim_index])) return fm._new_ndarray(array, friendly.dim_names, dim_arrays) def fliplr_A(friendly): ''' Same as `fliplr()`, except returns only the array. ''' return flip_A(friendly, axis=0) def fliplr(friendly): ''' Reverses the order of elements along the first axis. Params: `friendly`: the `friendly_matrix.ndarray` to flip Returns: `friendly_matrix.ndarray` ''' return flip(friendly, axis=0) def flipud_A(friendly): ''' Same as `flipud()`, except returns only the array. ''' return flip_A(friendly, axis=1) def flipud(friendly): ''' Reverses the order of elements along the second axis. Params: `friendly`: the `friendly_matrix.ndarray` to flip Returns: `friendly_matrix.ndarray` ''' return flip(friendly, axis=1) ''' Aggregating across a dimension ''' def mean_A(friendly, axis=0): return friendly.mean_A(axis) def mean(friendly, axis=0): return friendly.mean(axis) def std_A(friendly, axis=0): return friendly.std_A(axis) def std(friendly, axis=0): return friendly.std(axis) def var_A(friendly, axis=0): return friendly.var_A(axis) def var(friendly, axis=0): return friendly.var(axis) def sum_A(friendly, axis=0): return friendly.sum_A(axis) def sum(friendly, axis=0): return friendly.sum(axis) def prod_A(friendly, axis=0): return friendly.prod_A(axis) def prod(friendly, axis=0): return friendly.prod(axis) def min_A(friendly, axis=0): return friendly.min_A(axis) def min(friendly, axis=0): return friendly.min(axis) def argmin_A(friendly, axis=0): return friendly.argmin_A(axis) def argmin(friendly, axis=0): return friendly.argmin(axis) def max_A(friendly, axis=0): return friendly.max_A(axis) def max(friendly, axis=0): return friendly.max(axis) def argmax_A(friendly, axis=0): return friendly.argmax_A(axis) def argmax(friendly, axis=0): return friendly.argmax(axis) def all_A(friendly, axis=0): return friendly.all_A(axis) def all(friendly, axis=0): return friendly.all(axis) def any_A(friendly, axis=0): return friendly.any_A(axis) def any(friendly, axis=0): return friendly.any(axis) def cumsum_A(friendly, axis=0): return friendly.cumsum_A(axis) def cumsum(friendly, axis=0): return friendly.cumsum(axis) def cumprod_A(friendly, axis=0): return friendly.cumprod_A(axis) def cumprod(friendly, axis=0): return friendly.cumprod(axis) def squeeze_A(friendly): return friendly.squeeze_A() def squeeze(friendly): return friendly.squeeze()
StarcoderdataPython
6443386
<gh_stars>0 from django.urls import path, include from rest_framework.routers import DefaultRouter from profiles_api import views router = DefaultRouter() #1:the name of the url we are wish to create .. #2:the viewset we wish to register #3: base name for our viewset router.register('hello-viewset', views.HelloViewSet, base_name='hello-viewset') #we don't have to specify the base name because we have a quesry set in 'UserProfileViewSet' #django will figure out the name from the model assigned to it #the base name will be givin' in 2 cases " 1- no queryset 2- u wanna overwrite the existing name..." router.register('profile', views.UserProfileViewSet) urlpatterns = [ path('hello-view/', views.HelloApiView.as_view()), path('login/', views.UserLoginApiView.as_view()), path('', include(router.urls)) ]
StarcoderdataPython
6420578
<reponame>Hugking/lin-cms-flask<gh_stars>0 import sys import os sys.path.append((os.path.abspath(os.path.join(os.path.dirname(__file__), '../')))) from app.app import create_app app = create_app() def test_new_cate(): with app.test_client() as c: rv = c.post('/wx/shop/cate/', json={ 'name': '干花', 'keywords': '店铺推荐' }) json_data = rv.get_json() print(json_data) assert rv.status_code == 200 def test_update_cate(): with app.test_client() as c: rv = c.put('/wx/shop/cate/2',json={ 'name': '鲜花', 'keywords': '店铺推荐', }) json_data = rv.get_json() print(json_data) assert rv.status_code == 200 def test_new_attr_cate(): with app.test_client() as c: rv = c.post('/wx/shop/attr_cate/', json={ 'name': '夏季必备', }) json_data = rv.get_json() print(json_data) assert rv.status_code == 200 def test_update_attr_cate(): with app.test_client() as c: rv = c.put('/wx/shop/attr_cate/3',json={ 'name': '春季必备' }) json_data = rv.get_json() print(json_data) assert rv.status_code == 200 def test_new_attr(): with app.test_client() as c: rv = c.post('/wx/shop/attr/',json={ 'name': '规格', 'attribute_category_id':'3' }) json_data = rv.get_json() print(json_data) assert rv.status_code == 200 def test_update_attr(): with app.test_client() as c: rv = c.put('/wx/shop/attr/5',json={ 'name': '尺码', 'attribute_category_id':'2' }) json_data = rv.get_json() print(json_data) assert rv.status_code == 200 def test_new_keywords(): with app.test_client() as c: rv = c.post('/wx/shop/keywords/',json={ 'is_default': '1' }) json_data = rv.get_json() print(json_data) assert rv.status_code == 200 def test_update_keywords(): with app.test_client() as c: rv = c.put('/wx/shop/keywords/3',json={ 'is_dafault': '0', }) json_data = rv.get_json() print(json_data) assert rv.status_code == 200 def test_new_brand(): with app.test_client() as c: rv = c.post('/wx/shop/brand/',json={ 'name': 'NIICK制造商', "floor_price":'39.90' }) json_data = rv.get_json() print(json_data) assert rv.status_code == 200 def test_update_brand(): with app.test_client() as c: rv = c.put('/wx/shop/brand/3',json={ 'name':'LV制造商', 'floor_price':'19.90', }) json_data = rv.get_json() print(json_data) assert rv.status_code == 200
StarcoderdataPython
137405
students = [] class Student: # Python 没有权限控制, 所有的方法都是 public ,可以通过规范命名去说明 # class property school_name = "SHICHENGZHOGNXUE" def __init__(self, name, stu_id=110): # instance property self.name = name self.stu_id = stu_id students.append(self) def get_name_capitalize(self): return self.name.capitalize() def __str__(self): return "Student name is {0}, id is {1}".format(self.name, self.stu_id) class HighSchoolStudent(Student): def __str__(self): result_str = super().__str__() return "This is High School ,{0}".format(result_str)
StarcoderdataPython
9741766
<reponame>Amanjakhetiya/Data_Structures_Algorithms_In_Python<filename>Tree/Trie/Trie.py<gh_stars>100-1000 """ Implementation of Trie data structure. """ class Node: def __init__(self, value=None, isComplete=False): self.isComplete = isComplete self.children = {} self.value = value self.isPrefixOf = 0 class Trie: def __init__(self): self.root = Node() def add_word(self, word): """ Add the given word into the trie :param word: A String (word) to be added in the trie """ chars = list(word) curr_node = self.root for ch in chars: # The substring till this node will now become a prefix of newly added word curr_node.isPrefixOf += 1 if ch in curr_node.children: curr_node = curr_node.children[ch] else: new_node = Node(value=ch) curr_node.children[ch] = new_node curr_node = new_node curr_node.isComplete = True def search(self, word): """ Searches if the word is present in the Trie or not :param word: String (word) to be searched in the trie :return: last Node of the searched word if present else None """ chars = list(word) curr_node = self.root for ch in chars: if ch in curr_node.children: curr_node = curr_node.children[ch] else: return None if curr_node.isComplete is True: return curr_node return None def delete(self, word): """ Deletes the given String (word) from the trie :param word: Word (String) to be deleted :return: True is deleted, False if word not present in the Trie """ chars = list(word) n = len(chars) val = self._delete(self.root, word) return True if val == 1 or val == 0 else False def _delete(self, node, chars): """ Recursive Helper function to delete the word and decreement the isPrefix of values :param node: current node looking at :param chars: array of characters to look for :return: 1 is word is deleted, 0 if word is deleted and """ # if the chars array is empty if len(chars) == 0: # check if the word is present in the trie if node.isComplete: node.isComplete = False # check if the word was a prefix of any other words in trie # if so, decrement isPrefixOf and return 0, as no deletions are required if len(node.children.keys()) > 0: node.isPrefixOf -= 1 return 0 # if word was not a prefix then we need to go up in the trie # and find the lowest parent which forms a new word in trie return 1 # if word is not present in the trie return -1 # check if the character is present in current node's children if chars[0] in node.children: # recursive call for remaining characters in the respective child val = self._delete(node.children[chars[0]], chars[1:]) # if word was found but lowest parent which forms new word is not found if val == 1: if node.isComplete or len(node.children.keys()) > 1: del node.children[chars[0]] node.isPrefixOf -= 1 val = 0 # if word was found and lowest parent which forms new word was also found # simply reduce the isPrefixOf value of the node elif val == 0: node.isPrefixOf -= 1 return val return -1 trie = Trie() trie.add_word("anubhav") trie.add_word("anubshrimal") trie.add_word("anubhavshrimal") trie.add_word("data_structures") if trie.search("anubhav") is not None: print("anubhav is present in the Trie") else: print("anubhav is NOT present in the Trie") trie.delete("anubhav") if trie.search("anubhav") is not None: print("anubhav is present in the Trie") else: print("anubhav is NOT present in the Trie") print("Number of words in trie:", trie.root.isPrefixOf)
StarcoderdataPython
4892815
<reponame>jumpscale7/jumpscale_core7 from JumpScale import j from pymongo import MongoClient, MongoReplicaSetClient class MongoDBClient: def get(self, host='localhost', port=27017): try: client = MongoClient(host, int(port)) except Exception as e: raise RuntimeError('Could not connect to mongodb server on %s:%s\nerror:%s' % (host, port,e)) else: return client def getByInstance(self, instancename): hrd = j.application.getAppInstanceHRD(name="mongodb_client",instance=instancename) if hrd is None: j.events.opserror_critical("Could not find mongodb_client for instance %s" % instancename) ipaddr = hrd.get("instance.param.addr") port = hrd.getInt("instance.param.port") ssl = False if hrd.exists('instance.param.ssl'): ssl = hrd.getBool('instance.param.ssl') replicaset = "" if hrd.exists('instance.param.replicaset'): replicaset = hrd.get('instance.param.replicaset') if replicaset == "": return MongoClient(host=ipaddr, port=port, ssl=ssl) else: return MongoReplicaSetClient(ipaddr, port=port, ssl=ssl, replicaSet=replicaset)
StarcoderdataPython
6419133
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import numpy as np def int_parameter(level, maxval): return int(level * maxval / 10) def float_parameter(level, maxval): return float(level) * maxval / 10. def sample_level(n, fixed=False): if fixed: return n return np.random.uniform(low=0.1, high=n)
StarcoderdataPython
1649271
<reponame>rykehg/produtosPyFlaskJWTTests from flask import Flask, jsonify from flask_jwt_extended import JWTManager from flask_restful import Api from blacklist import BLACKLIST from config import JwtBackList, JwtSecret, MySQL, SQLAlchemyMod from models.sql_alchemy import db, initialize_db from resources.routes import initialize_routes app = Flask(__name__) # To use sqlite data base uncomment the line below and comment mysql_uri line # app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///database.db' mysql_uri = f"mysql://{MySQL['user']}:{MySQL['password']}@{MySQL['host']}/{MySQL['database']}" app.config['SQLALCHEMY_DATABASE_URI'] = mysql_uri app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = SQLAlchemyMod app.config['JWT_SECRET_KEY'] = JwtSecret["key"] app.config['JWT_BLACKLIST_ENABLED'] = JwtBackList api = Api(app) jwt = JWTManager(app) initialize_db(app) @app.before_first_request def create_database(): db.create_all() @jwt.token_in_blacklist_loader def verifica_blacklist(token): return token['jti'] in BLACKLIST @jwt.revoked_token_loader def token_de_acesso_invalidado(): return jsonify({'message': 'You have been logged out.'}), 401 # unauthorized # imports app routes initialize_routes(app) initialize_routes(api)
StarcoderdataPython