content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
import itertools import random def brutekeys(pinlength, keys="0123456789", randomorder=False): """ Returns a list of all possibilities to try, based on the length of s and buttons given. Yeah, lots of slow list copying here, but who cares, it's dwarfed by the actual guessing. """ allpossible = list(itertools.imap(lambda x: "".join(x),itertools.product(keys, repeat=pinlength))) if randomorder: random.shuffle(allpossible) return allpossible
42f659e37468073c42117d1f4d6235f08aedde59
3,653,800
def return_figures(): """Creates four plotly visualizations Args: None Returns: list (dict): list containing the four plotly visualizations """ df = query_generation('DE', 14) graph_one = [] x_val = df.index for energy_source in df.columns: y_val = df[energy_source].tolist() graph_one.append( go.Scatter( x=x_val, y=y_val, mode='lines', name=energy_source, stackgroup = 'one' ) ) layout_one = dict(title='Generation in Germany during the last 14 days', xaxis=dict(title='Date'), yaxis=dict(title='Net Generation (MW)'), colorway = ['#008000', '#ffa500', '#ff0000', '#000080', '#008080', '#808080', '#a52a2a', '#1e90ff', '#ffc40c'], plot_bgcolor = '#E8E8E8', hovermode = 'closest', hoverdistance = -1, height = 500 ) # append all charts to the figures list figures = [] figures.append(dict(data=graph_one, layout=layout_one)) return figures
c89fe79d12173bc0b167e43e81d3adf19e81eb7b
3,653,801
def create_central_storage_strategy(): """Create a CentralStorageStrategy, using a GPU if it is available.""" compute_devices = ['cpu:0', 'gpu:0'] if ( tf.config.list_logical_devices('GPU')) else ['cpu:0'] return tf.distribute.experimental.CentralStorageStrategy( compute_devices, parameter_device='cpu:0')
46cc64d6cb888f51513a2b7d5bb4e28af58b5a29
3,653,802
def ToolStep(step_class, os, **kwargs): """Modify build step arguments to run the command with our custom tools.""" if os.startswith('win'): command = kwargs.get('command') env = kwargs.get('env') if isinstance(command, list): command = [WIN_BUILD_ENV_PATH] + command else: command = WIN_BUILD_ENV_PATH + ' ' + command if env: env = dict(env) # Copy else: env = {} env['BOTTOOLS'] = WithProperties('%(workdir)s\\tools\\buildbot\\bot_tools') kwargs['command'] = command kwargs['env'] = env return step_class(**kwargs)
30bdf2a1f81135150230b5a894ee0fa3c7be4fa4
3,653,803
def get_security_groups(): """ Gets all available AWS security group names and ids associated with an AWS role. Return: sg_names (list): list of security group id, name, and description """ sg_groups = boto3.client('ec2', region_name='us-west-1').describe_security_groups()['SecurityGroups'] sg_names = [] for sg in sg_groups: sg_names.append(sg['GroupId'] + ': ' + sg['GroupName'] + ': ' + sg['Description']) return sg_names
48a30454a26ea0b093dff59c830c14d1572d3e11
3,653,804
def traverseTokens(tokens, lines, callback): """Traverses a list of tokens to identify functions. Then uses a callback to perform some work on the functions. Each function seen gets a new State object created from the given callback method; there is a single State for global code which is given None in the constructor. Then, each token seen is passed to the 'add' method of the State. This is used by the State to either calculate sizes, print tokens, or detect dependencies. The 'build' method is called at the end of the function to create a result object that is returned as an array at the end. Arguments: tokens - An array of Tokens. lines - An array of compiled code lines. callback - A constructor that returns a state object. It takes a start token or None if outside a function. It has two member functions: add - accepts the current token and the token's index. build - returns an object to be added to the results. Returns: an array of State objects in a format controlled by the callback. """ ret = [] state = callback(None, None) # Create a token iterator. This is used to read tokens from the array. We # cannot use a for loop because the iterator is passed to readFunction. tokenIter = enumerate(tokens) try: while True: index, token = next(tokenIter) if isFunction(token, lines): ret += readFunction(tokenIter, token, index, lines, callback) else: state.add(token, index) except StopIteration: pass temp = state.build() if temp: ret.append(temp) return ret
4fcdfc4505a0a3eb1ba10a884cb5fc2a2714d845
3,653,805
from typing import Any def publications_by_country(papers: dict[str, Any]) -> dict[Location, int]: """returns number of published papers per country""" countries_publications = {} for paper in papers: participant_countries = {Location(city=None, state=None, country=location.country) \ for location in paper.locations} for country in participant_countries: try: countries_publications[country] += 1 except KeyError: countries_publications[country] = 1 return (dict(sorted(countries_publications.items(), key=lambda x: x[1], reverse=True)))
7295fd9491d60956ca45995efc6818687c266446
3,653,806
def dequote(str): """Will remove single or double quotes from the start and end of a string and return the result.""" quotechars = "'\"" while len(str) and str[0] in quotechars: str = str[1:] while len(str) and str[-1] in quotechars: str = str[0:-1] return str
e6377f9992ef8119726b788c02af9df32c722c28
3,653,807
import numpy def uccsd_singlet_paramsize(n_qubits, n_electrons): """Determine number of independent amplitudes for singlet UCCSD Args: n_qubits(int): Number of qubits/spin-orbitals in the system n_electrons(int): Number of electrons in the reference state Returns: Number of independent parameters for singlet UCCSD with a single reference. """ n_occupied = int(numpy.ceil(n_electrons / 2.)) n_virtual = n_qubits / 2 - n_occupied n_single_amplitudes = n_occupied * n_virtual n_double_amplitudes = n_single_amplitudes ** 2 return (n_single_amplitudes + n_double_amplitudes)
408c9158c76fba5d118cc6603e08260db30cc3df
3,653,808
def variance(timeseries: SummarizerAxisTimeseries, param: dict): """ Calculate the variance of the timeseries """ v_mean = mean(timeseries) # Calculate variance v_variance = 0 for ts, value in timeseries.values(): v_variance = (value - v_mean)**2 # Average v_variance = len(timeseries.values) if v_variance == 0: return 0 return mean / v_variance
9b8c0e6a1d1e313a3e3e4a82fe06845f4d996620
3,653,809
def setup_i2c_sensor(sensor_class, sensor_name, i2c_bus, errors): """ Initialise one of the I2C connected sensors, returning None on error.""" if i2c_bus is None: # This sensor uses the multipler and there was an error initialising that. return None try: sensor = sensor_class(i2c_bus) except Exception as err: # Error initialising this sensor, try to continue without it. msg = "Error initialising {}:\n{}".format(sensor_name, err) print(msg) errors += (msg + "\n") return None else: print("{} initialised".format(sensor_name)) return sensor
62633c09f6e78b43fca625df8fbd0d20d866735b
3,653,810
def argparse_textwrap_unwrap_first_paragraph(doc): """Join by single spaces all the leading lines up to the first empty line""" index = (doc + "\n\n").index("\n\n") lines = doc[:index].splitlines() chars = " ".join(_.strip() for _ in lines) alt_doc = chars + doc[index:] return alt_doc
f7068c4b463c63d100980b743f8ed2d69b149a97
3,653,811
import ctypes def iterator(x, y, z, coeff, repeat, radius=0): """ compute an array of positions visited by recurrence relation """ c_iterator.restype = ctypes.POINTER(ctypes.c_double * (3 * repeat)) start = to_double_ctype(np.array([x, y, z])) coeff = to_double_ctype(coeff) out = to_double_ctype(np.zeros(3 * repeat)) res = c_iterator(start, coeff, repeat, ctypes.c_double(radius), out).contents return np.array(res).reshape((repeat, 3)).T
82c32dddf2c8d0899ace56869679ccc8dbb36d22
3,653,812
import webbrowser def open_pep( search: str, base_url: str = BASE_URL, pr: int | None = None, dry_run: bool = False ) -> str: """Open this PEP in the browser""" url = pep_url(search, base_url, pr) if not dry_run: webbrowser.open_new_tab(url) print(url) return url
2f23e16867ccb0e028798ff261c9c64eb1cdeb31
3,653,813
def random_sparse_matrix(n, n_add_elements_frac=None, n_add_elements=None, elements=(-1, 1, -2, 2, 10), add_elements=(-1, 1)): """Get a random matrix where there are n_elements.""" n_total_elements = n * n n_diag_elements = n frac_diag = 1. * n_diag_elements / n_total_elements if n_add_elements is not None and n_add_elements_frac is not None: raise ValueError("Should only set either n_add_elements or n_add_elements_frac") if n_add_elements_frac is not None: n_add_elements = int(round(n_add_elements_frac * n_total_elements)) assert n_add_elements_frac >= 0, n_add_elements_frac assert n_add_elements_frac <= 1 - frac_diag, n_add_elements_frac assert n_add_elements >= 0 assert n_add_elements <= n_total_elements - n_diag_elements A = np.zeros((n, n)) remaining = set(range(n)) # main elements for i in range(n): j = np.random.choice(list(remaining)) remaining.remove(j) A[i, j] = np.random.choice(list(elements)) # additional elements left_indices = np.array(list(zip(*np.where(A == 0.0)))) # print(left_indices) # print(A) np.random.shuffle(left_indices) assert len(left_indices) >= n_add_elements for i_add in range(n_add_elements): i, j = left_indices[i_add] assert A[i, j] == 0.0 A[i, j] = np.random.choice(list(add_elements)) return A
41ea01c69bd757f11bbdb8a259ec3aa1baabadc2
3,653,814
import uuid def is_uuid_like(val): """Returns validation of a value as a UUID. :param val: Value to verify :type val: string :returns: bool .. versionchanged:: 1.1.1 Support non-lowercase UUIDs. """ try: return str(uuid.UUID(val)).replace("-", "") == _format_uuid_string(val) except (TypeError, ValueError, AttributeError): return False
fc0b9618ede3068fe5946948dfbe655e64b27ba8
3,653,815
import argparse def is_positive_integer(value: str) -> int: """ Helper function for argparse. Raise an exception if value is not a positive integer. """ int_value = int(value) if int_value <= 0: raise argparse.ArgumentTypeError("{} is not a positive integer".format(value)) return int_value
4f5e2fd4e95e92b69bb8073daafbf8989037657b
3,653,816
from typing import List from typing import Tuple def merge_overlapped_spans(spans: List[Tuple[int, int]]) -> List[Tuple[int, int]]: """ Merge overlapped spans Parameters ---------- spans: input list of spans Returns ------- merged spans """ span_sets = list() for span in spans: span_set = set(range(span[0], span[1])) if not span_sets: span_sets.append(span_set) elif span_sets[-1] & span_set: if span_set - span_sets[-1]: span_sets[-1] = span_sets[-1] | span_set else: span_sets.append(span_set) merged_spans = list() for span_set in span_sets: merged_spans.append((min(span_set), max(span_set) + 1)) return merged_spans
0ea7f2a730274f7a98f25b8df22754ec79e8fce7
3,653,817
def network(dataframe, author_col_name, target_col_name, source_col_name=None): """ This function runs a Network analysis on the dataset provided. :param dataframe: DataFrame containing the data on which to conduct the activity analysis. It must contain at least an *author*, a *target* and a *source* column. :type dataframe: pandas.DataFrame :param author_col_name: Name of the column containing the authors of the entries. :type author_col_name: str :param target_col_name: Name of the column containing the targets of the relationship that the network analysis is supposed to exploring. :type target_col_name: str :param source_col_name: Name of the column containing the sources of the relationships that the network analysis is supposed to be exploring. :type source_col_name: str :return: Object of type network containing a *dataframe* field and a *graph* one. """ graph = _network_from_dataframe(dataframe, author_col_name, target_col_name, source_col_name) no_edges = [] for u, v, weight in graph.edges.data("weight"): if weight == 0: no_edges.append((u, v)) graph.remove_edges_from(no_edges) degrees = nx.degree_centrality(graph) nodes = pd.DataFrame.from_records([degrees]).transpose() nodes.columns = ["centrality"] return Network(nodes, graph)
edb2942e1e92cad64609819994a4e10b1de85497
3,653,818
def get_cert_and_update_domain( zappa_instance, lambda_name, api_stage, domain=None, manual=False, ): """ Main cert installer path. """ try: create_domain_key() create_domain_csr(domain) get_cert(zappa_instance) create_chained_certificate() with open("{}/signed.crt".format(gettempdir())) as f: certificate_body = f.read() with open("{}/domain.key".format(gettempdir())) as f: certificate_private_key = f.read() with open("{}/intermediate.pem".format(gettempdir())) as f: certificate_chain = f.read() if not manual: if domain: if not zappa_instance.get_domain_name(domain): zappa_instance.create_domain_name( domain_name=domain, certificate_name=domain + "-Zappa-LE-Cert", certificate_body=certificate_body, certificate_private_key=certificate_private_key, certificate_chain=certificate_chain, certificate_arn=None, lambda_name=lambda_name, stage=api_stage, ) print( "Created a new domain name. Please note that it can take up to 40 minutes for this domain to be created and propagated through AWS, but it requires no further work on your part." ) else: zappa_instance.update_domain_name( domain_name=domain, certificate_name=domain + "-Zappa-LE-Cert", certificate_body=certificate_body, certificate_private_key=certificate_private_key, certificate_chain=certificate_chain, certificate_arn=None, lambda_name=lambda_name, stage=api_stage, ) else: print("Cerificate body:\n") print(certificate_body) print("\nCerificate private key:\n") print(certificate_private_key) print("\nCerificate chain:\n") print(certificate_chain) except Exception as e: print(e) return False return True
8ce4d06af0d923165dbbe4c6cbb7617f8e20557f
3,653,819
def _ww3_ounp_contents(run_date, run_type): """ :param str run_type: :param run_date: :py:class:`arrow.Arrow` :return: ww3_ounp.inp file contents :rtype: str """ start_date = ( run_date.format("YYYYMMDD") if run_type == "nowcast" else run_date.shift(days=+1).format("YYYYMMDD") ) run_hours = {"nowcast": 24, "forecast": 36, "forecast2": 30} output_interval = 600 # seconds output_count = int(run_hours[run_type] * 60 * 60 / output_interval) contents = f"""$ WAVEWATCH III NETCDF Point output post-processing $ $ First output time (YYYYMMDD HHmmss), output increment (s), number of output times {start_date} 000000 {output_interval} {output_count} $ $ All points defined in ww3_shel.inp -1 $ File prefix $ number of characters in date $ netCDF4 output $ one file, max number of points to process $ tables of mean parameters $ WW3 global attributes $ time,station dimension order $ WMO standard output SoG_ww3_points_ 8 4 T 100 2 0 T 6 """ return contents
fda73d25c39c5bd46d791e6745fa72a0285edcdc
3,653,820
import logging def EMLP(rep_in,rep_out,group,ch=384,num_layers=3): """ Equivariant MultiLayer Perceptron. If the input ch argument is an int, uses the hands off uniform_rep heuristic. If the ch argument is a representation, uses this representation for the hidden layers. Individual layer representations can be set explicitly by using a list of ints or a list of representations, rather than use the same for each hidden layer. Args: rep_in (Rep): input representation rep_out (Rep): output representation group (Group): symmetry group ch (int or list[int] or Rep or list[Rep]): number of channels in the hidden layers num_layers (int): number of hidden layers Returns: Module: the EMLP objax module.""" logging.info("Initing EMLP (Haiku)") rep_in =rep_in(group) rep_out = rep_out(group) # Parse ch as a single int, a sequence of ints, a single Rep, a sequence of Reps if isinstance(ch,int): middle_layers = num_layers*[uniform_rep(ch,group)] elif isinstance(ch,Rep): middle_layers = num_layers*[ch(group)] else: middle_layers = [(c(group) if isinstance(c,Rep) else uniform_rep(c,group)) for c in ch] # assert all((not rep.G is None) for rep in middle_layers[0].reps) reps = [rep_in]+middle_layers # logging.info(f"Reps: {reps}") network = Sequential( *[EMLPBlock(rin,rout) for rin,rout in zip(reps,reps[1:])], Linear(reps[-1],rep_out) ) return network
aa4a1b1286ac1c96bedfe82813d9d24f36aabe96
3,653,821
def decompress(data): """ Decompress data in one shot. """ return GzipFile(fileobj=BytesIO(data), mode='rb').read()
db32cb2b9e2ddeb3a38901460d0882ceee9cab9e
3,653,822
import re def str_to_rgb(arg): """Convert an rgb string 'rgb(x,y,z)' to a list of ints [x,y,z].""" return list( map(int, re.match(r'rgb\((\d+),\s*(\d+),\s*(\d+)\)', arg).groups()) )
f8920373d5941fb231c1ae0d732fd04558615bc3
3,653,823
def vshift(x, shifts=0): """shift batch of images vertically""" return paddle.roll(x, int(shifts*x.shape[2]), axis=2)
cb00948cb58d3c2c13628d44cc36e6cd2ab487ee
3,653,824
def index(): """Shows book titles and descriptions""" tagid = request.query.tagid books = [] if tagid: try: tag = Tag.get(tagid) books = tag.books.all() except Tag.DoesNotExist: pass if not books: books = Book.all().order_by("title") return dict(books=books)
ae1fb3502f75a09577da489fe2488cbe78f699f7
3,653,825
import os import yaml def _GetExternalDataConfig(file_path_or_simple_spec, use_avro_logical_types=False, parquet_enum_as_string=False, parquet_enable_list_inference=False): """Returns a ExternalDataConfiguration from the file or specification string. Determines if the input string is a file path or a string, then returns either the parsed file contents, or the parsed configuration from string. The file content is expected to be JSON representation of ExternalDataConfiguration. The specification is expected to be of the form schema@format=uri i.e. schema is separated from format and uri by '@'. If the uri itself contains '@' or '=' then the JSON file option should be used. "format=" can be omitted for CSV files. Raises: UsageError: when incorrect usage or invalid args are used. """ maybe_filepath = os.path.expanduser(file_path_or_simple_spec) if os.path.isfile(maybe_filepath): try: with open(maybe_filepath) as external_config_file: return yaml.safe_load(external_config_file) except yaml.error.YAMLError as e: raise app.UsageError( ('Error decoding YAML external table definition from ' 'file %s: %s') % (maybe_filepath, e)) else: source_format = 'CSV' schema = None connection_id = None error_msg = ('Error decoding external_table_definition. ' 'external_table_definition should either be the name of a ' 'JSON file or the text representation of an external table ' 'definition. Given:%s') % ( file_path_or_simple_spec) parts = file_path_or_simple_spec.split('@') if len(parts) == 1: # Schema and connection are not specified. format_and_uri = parts[0] elif len(parts) == 2: # when there are 2 components, it can be: # 1. format=uri@connection_id.e.g csv=gs://bucket/[email protected] # 2. schema@format=uri e.g.col1::INTEGER@csv=gs://bucket/file # if the first element is format=uri, then second element is connnection. # Else, the first is schema, second is format=uri. if parts[0].find('://') >= 0: # format=uri and connection specified. format_and_uri = parts[0] connection_id = parts[1] else: # Schema and format=uri are specified. schema = parts[0] format_and_uri = parts[1] elif len(parts) == 3: # Schema and connection both are specified schema = parts[0] format_and_uri = parts[1] connection_id = parts[2] else: raise app.UsageError(error_msg) separator_pos = format_and_uri.find('=') if separator_pos < 0: # Format is not specified uri = format_and_uri else: source_format = format_and_uri[0:separator_pos] uri = format_and_uri[separator_pos + 1:] if not uri: raise app.UsageError(error_msg) # When using short notation for external table definition # autodetect is always performed. return _CreateExternalTableDefinition( source_format, uri, schema, True, connection_id, use_avro_logical_types=use_avro_logical_types, parquet_enum_as_string=parquet_enum_as_string, parquet_enable_list_inference=parquet_enable_list_inference)
44b30b8af9a95ea23be6cac071641cb5d4792562
3,653,826
import errno def _linux_iqn(): """ Return iSCSI IQN from a Linux host. """ ret = [] initiator = "/etc/iscsi/initiatorname.iscsi" try: with salt.utils.files.fopen(initiator, "r") as _iscsi: for line in _iscsi: line = line.strip() if line.startswith("InitiatorName="): ret.append(line.split("=", 1)[1]) except OSError as ex: if ex.errno != errno.ENOENT: log.debug("Error while accessing '%s': %s", initiator, ex) return ret
d7ae097a00c8df15208e91978d3acda1eaf8de62
3,653,827
def generate_file_storage_name(file_uri: str, suffix: str) -> str: """ Generate a filename using the hash of the file contents and some provided suffix. Parameters ---------- file_uri: str The URI to the file to hash. suffix: str The suffix to append to the hash as a part of the filename. Returns ------- dst: str The name of the file as it should be on Google Cloud Storage. """ hash = hash_file_contents(file_uri) return f"{hash}-{suffix}"
08087e86e1f70e0820cf9e3263c7a419de13ffcc
3,653,828
def mullerlyer_parameters(illusion_strength=0, difference=0, size_min=0.5, distance=1): """Compute Parameters for Müller-Lyer Illusion. Parameters ---------- illusion_strength : float The strength of the arrow shapes in biasing the perception of lines of unequal lengths. A positive sign represents the bottom arrows pointing outwards and upper arrows pointing inwards. A negative sign represents the bottom arrows pointing inwards and upper arrows pointing outwards. difference : float The objective length difference of the horizontal lines. Specifically, the real difference of upper horizontal line relative to the lower horizontal line. E.g., if ``difference=1``, the upper line will be 100% longer, i.e., 2 times longer than the lower line. A negative sign reflects the converse, where ``difference=-1`` will result in the lower line being 100% longer than the upper line. size_min : float Length of lower horizontal line. distance : float Distance between the upper and lower horizontal lines. Returns ------- dict Dictionary of parameters of the Müller-Lyer illusion. """ parameters = _ponzo_parameters_topbottom(difference=difference, size_min=size_min, distance=distance) length = size_min/2 if difference >= 0: angle = {"Top": -illusion_strength, "Bottom": illusion_strength} else: angle = {"Top": illusion_strength, "Bottom": -illusion_strength} for which in ["Top", "Bottom"]: for side in ["Left", "Right"]: if side == "Left": coord, _, _ = _coord_line(x1=parameters[which + "_x1"], y1=parameters[which + "_y1"], length=length, angle=angle[which]) else: coord, _, _ = _coord_line(x1=parameters[which + "_x2"], y1=parameters[which + "_y2"], length=length, angle=-angle[which]) x1, y1, x2, y2 = coord for c in ["1", "2"]: parameters["Distractor_" + which + side + c + "_x1"] = x1 parameters["Distractor_" + which + side + c + "_y1"] = y1 parameters["Distractor_" + which + side + c + "_x2"] = x2 if c == "1": parameters["Distractor_" + which + side + c + "_y2"] = y2 else: parameters["Distractor_" + which + side + c + "_y2"] = y2 - 2 * (y2 - y1) parameters.update({"Illusion": "MullerLyer", "Illusion_Strength": illusion_strength, "Illusion_Type": "Congruent" if illusion_strength > 0 else "Incongruent", "Distractor_Length": length}) return parameters
61631be407aa25608e1321f7e87e030bca9fa90d
3,653,829
def filter_for_corsi(pbp): """ Filters given dataframe for goal, shot, miss, and block events :param pbp: a dataframe with column Event :return: pbp, filtered for corsi events """ return filter_for_event_types(pbp, {'Goal', 'Shot', 'Missed Shot', 'Blocked Shot'})
9add922fe3aa4ded63b4032b8fe412bbc5611f3e
3,653,830
from typing import Dict from typing import Tuple import json import hashlib def upload(msg: Dict, public_key: bytes, ipns_keypair_name: str = '') -> Tuple[str, str]: """Upload encrypted string to IPFS. This can be manifest files, results, or anything that's been already encrypted. Optionally pins the file to IPNS. Pass in the IPNS key name To get IPNS key name, see create_new_ipns_link Args: msg (Dict): The message to upload and encrypt. public_key (bytes): The public_key to encrypt the file for. ipns_keypair_name (str): If left blank, then don't pin to IPNS Returns: Tuple[str, str]: returns [sha1 hash, ipfs hash] Raises: Exception: if adding bytes with IPFS fails. >>> credentials = { ... "gas_payer": "0x1413862C2B7054CDbfdc181B83962CB0FC11fD92", ... "gas_payer_priv": "28e516f1e2f99e96a48a23cea1f94ee5f073403a1c68e818263f0eb898f1c8e5" ... } >>> pub_key = b"2dbc2c2c86052702e7c219339514b2e8bd4687ba1236c478ad41b43330b08488c12c8c1797aa181f3a4596a1bd8a0c18344ea44d6655f61fa73e56e743f79e0d" >>> job = Job(credentials=credentials, escrow_manifest=manifest) >>> (hash_, manifest_url) = upload(job.serialized_manifest, pub_key) >>> manifest_dict = download(manifest_url, job.gas_payer_priv) >>> manifest_dict == job.serialized_manifest True """ try: manifest_ = json.dumps(msg, sort_keys=True) except Exception as e: LOG.error("Can't extract the json from the dict") raise e hash_ = hashlib.sha1(manifest_.encode('utf-8')).hexdigest() try: ipfs_file_hash = IPFS_CLIENT.add_bytes(_encrypt(public_key, manifest_)) except Exception as e: LOG.warning("Adding bytes with IPFS failed because of: {}".format(e)) raise e if ipns_keypair_name != '': try: # publish ipns ... docs: https://ipfs.io/ipns/12D3KooWEqnTdgqHnkkwarSrJjeMP2ZJiADWLYADaNvUb6SQNyPF/docs/http_client_ref.html#ipfshttpclient.Client.name IPFS_CLIENT.name.publish( f'/ipfs/{ipfs_file_hash}', key=ipns_keypair_name.lower(), allow_offline=True) except Exception as e: LOG.warning("IPNS failed because of: {}".format(e)) raise e return hash_, ipfs_file_hash
3dc1b12e57ce0054a1bf5b534f92ed7130187a53
3,653,831
def test_sakai_auth_url(oauth_mock): """ Test auth url retrieval for Sakai. Test that we can retrieve a formatted Oauth1 URL for Sakai """ def mock_fetch_token(mock_oauth_token, mock_oauth_token_secret): def mock_token_getter(mock_url): return { 'oauth_token': mock_oauth_token, 'oauth_token_secret': mock_oauth_token_secret, } return mock_token_getter mock_authorize_url = 'http://host/oauth-tool/authorize/' another_mock = MagicMock() another_mock.fetch_request_token.side_effect = mock_fetch_token( fixtures.oauth_creds_dict['HTTP_LMS_OAUTH_TOKEN'], fixtures.oauth_creds_dict['HTTP_LMS_OAUTH_SECRET'], ) oauth_mock.return_value = another_mock data = { 'request_token_url': 'http://host/oauth-tool/request_tokén', 'authorize_url': mock_authorize_url, 'callback_url': "http://this.doesnt.ma/tter", } headers = fixtures.get_mocked_headers('http://somebaseurl') del headers['HTTP_LMS_OAUTH_TOKEN'] del headers['HTTP_LMS_OAUTH_SECRET'] client = Client() resp = client.get( reverse('auth_url'), content_type='application/json', data=data, **headers, ) expected_auth_url = ( f'{mock_authorize_url}' f'?oauth_token={fixtures.oauth_creds_dict["HTTP_LMS_OAUTH_TOKEN"]}' ) assert resp.status_code == status.HTTP_200_OK actual_resp_json = resp.json() expected_resp_json = { 'auth_url': expected_auth_url, 'redirect_key': 'redirect_uri', 'oauth_token_secret': fixtures.oauth_creds_dict[ 'HTTP_LMS_OAUTH_SECRET' ], } assert actual_resp_json == expected_resp_json
fc9321d5b88379fb08d40b8dadece1c3fb31b26a
3,653,832
from typing import Tuple from typing import List from typing import Iterable def nodes_and_groups(expr: Expression) -> Tuple[List[Expression], Iterable[List[int]]]: """ Returns a list of all sub-expressions, and an iterable of lists of indices to sub-expressions that are equivalent. Example 1: (let (x 3) add ( (let (z 3) (add z (add x x))) (let (z 5) (add z (add x x))) ) ) Here, the two identical expressions '(add x x)' will be in one equivalence group (the closest binder for the free variable 'x' is the same). The four (single-node) sub-expressions 'x' will also be in one equivalence group. Example 2: In expression: (foo (let (x 3) (add x x)) # 1 (let (x 4) (add x x)) # 2 (let (y 3) (add y y)) # 3 ) - sub-expressions '(let (x 3) (add x x))' and '(let (y 3) (add y y))' are equivalent. - The sub-expressions `(add x x)` on line #1 and `(add y y)` on line #3 will not be in equivalence group, because they are in a different binding scope, even though they will evaluate to the same value. - '(let (x 3) (add x x))' and '(let (x 4) (add x x))' are not equivalent, because 'x' is assigned a different value. Also, for each 'add' expression, the pair of identical variables within it will, of course, be in an equivalence group. Args: expr: An expression Returns: A tuple of: * a list of subtrees (nodes) of the Expression; the same as expr.nodes, but returned to avoid an extra traversal (and more clearly corresponding to the second element as they are constructed by the same algorithm) * an iterable of lists of indices, where each list contains indices of nodes which are equivalent (compute the same value). Note that nodes that are not in """ nodes: List[Expression] = [] closest_binders: List[int] = [] def traverse(subexp: Expression, binder_stack: List[Tuple[str, int]]) -> None: idx = len(nodes) nodes.append(subexp) # Calculate the closest binder of a free-variable - intuitively, the scope of the subexp, # the highest point to which a let containing this subexp's value could be lifted. # (That is - this subexp cannot be the same as any other subexp unless their closest binder's are the same) closest_binder = -1 # Global for skip, (bv_name, binder_idx) in enumerate(reversed(binder_stack)): if bv_name in subexp.free_var_names: closest_binder = binder_idx if skip > 0 and len(subexp.children) > 0: binder_stack = binder_stack[:-skip] break closest_binders.append(closest_binder) if subexp.is_binder: bound_stack = binder_stack + [(subexp.bound_var.name, idx)] for i, c in enumerate(subexp.children): traverse( c, bound_stack if subexp.is_binder and subexp.binds_in_child(i) else binder_stack, ) traverse(expr, []) assert len(nodes) == expr.num_nodes assert len(closest_binders) == expr.num_nodes def equiv_groups() -> Iterable[List[int]]: # Group node indices by whether they have the same closest binder, same number of nodes, and are the same op. for g in utils.group_by( range(len(nodes)), lambda idx: (closest_binders[idx], nodes[idx].num_nodes, nodes[idx].op), ).values(): # Skip obviously-singleton groups if len(g) >= 2: yield from utils.group_by(g, lambda idx: nodes[idx]).values() return nodes, equiv_groups()
bf5087fa5c4dd36e614c5e9227fd3337960dc9c6
3,653,833
def masterxprv_from_electrummnemonic(mnemonic: Mnemonic, passphrase: str = "", network: str = 'mainnet') -> bytes: """Return BIP32 master extended private key from Electrum mnemonic. Note that for a 'standard' mnemonic the derivation path is "m", for a 'segwit' mnemonic it is "m/0h" instead. """ version, seed = electrum._seed_from_mnemonic(mnemonic, passphrase) prefix = _NETWORKS.index(network) if version == 'standard': xversion = _XPRV_PREFIXES[prefix] return rootxprv_from_seed(seed, xversion) elif version == 'segwit': xversion = _P2WPKH_PRV_PREFIXES[prefix] rootxprv = rootxprv_from_seed(seed, xversion) return derive(rootxprv, 0x80000000) # "m/0h" else: raise ValueError(f"Unmanaged electrum mnemonic version ({version})")
6642aba45eb72b5f366c52862ce07ddbf05d80f8
3,653,834
def release_(ctx, version, branch, master_branch, release_branch, changelog_base, force): """ Release a branch. Note that this differs from the create-release command: 1. Create a Github release with the version as its title. 2. Create a commit bumping the version of setup.py on top of the branch. 3. Generated and upload changelog of the head of the branch, relative to the latest release. 4. Update the master branch to point to the release commit. 4. Close any related issues with a comment specifying the release title. The version is calculated automatically according to the changelog. Note that the release tag will point to the above mentioned commit. The command is mainly intended to be executed automatically using CI systems (as described below), and implements certain heuristics in order to perform properly. Note, the release process will only take place if the following conditions hold: 1. The current build passes validation. (see validate-build) 2. The tip of the branch passes validation. (see validate-commit) 3. The release does not yet exist. If either of these conditions is not satisfied, the command will be silently ignored and complete successfully. This is useful so that your builds will not fail when running on commits that shouldn't be released. This command is idempotent, given that the tip of your branch hasn't changed between executions. You can safely run this command in parallel, this is important when running your CI process on multiple systems concurrently. """ ci_provider = ctx.obj.ci_provider gh = ctx.obj.github branch = branch or (ci_provider.branch if ci_provider else None) release_branch = release_branch or gh.default_branch_name sha = ci_provider.sha if ci_provider else branch if not force: try: ctx.invoke(ci.validate_build, release_branch=release_branch) ctx.invoke(validate_commit, sha=sha) except TerminationException as e: if isinstance(e.cause, exceptions.ReleaseValidationFailedException): log.sub() log.echo("Not releasing: {}".format(str(e))) return raise log.echo("Releasing branch '{}'".format(branch), add=True) changelog = _generate_changelog(gh=gh, sha=sha, base=changelog_base) next_version = version or changelog.next_version if not next_version: err = ShellException('None of the commits in the changelog references an issue ' 'labeled with a release label. Cannot determine what the ' 'version number should be.') err.cause = 'You probably only committed internal issues since the last release, ' \ 'or forgot to reference the issue.' err.possible_solutions = [ 'Amend the message of one of the commits to reference a release issue', 'Push another commit that references a release issue', 'Use --version to specify a version manually' ] raise err release = _create_release(ctx=ctx, changelog=changelog, branch=branch, master_branch=master_branch, version=next_version, sha=sha) log.echo('Closing issues', add=True) for issue in changelog.all_issues: ctx.invoke(close_issue, number=issue.impl.number, release=release.title) log.sub() log.sub() log.echo('Successfully released: {}'.format(release.url)) return release
e7a9de4c12f3eb3dfe3d6272ccb9254e351641b9
3,653,835
def get_namedtuple_from_paramnames(owner, parnames): """ Returns the namedtuple classname for parameter names :param owner: Owner of the parameters, usually the spotpy setup :param parnames: Sequence of parameter names :return: Class """ # Get name of owner class typename = type(owner).__name__ parnames = ["p" + x if x.isdigit() else x for x in list(parnames)] return namedtuple('Par_' + typename, # Type name created from the setup name parnames)
4c0b2ca46e2d75d1e7a1281e58a3fa6402f42cf0
3,653,836
def readNotificationGap(alarmName): """ Returns the notificationGap of the specified alarm from the database """ cur = conn.cursor() cur.execute('Select notificationGap FROM Alarms WHERE name is "%s"' % alarmName) gapNotification = int(cur.fetchone()[0]) conn.commit() return gapNotification
afa7bd0e510433e6a49ecd48937f2d743f8977e4
3,653,837
def vertical_line(p1, p2, p3): """ 过点p3,与直线p1,p2垂直的线 互相垂直的线,斜率互为互倒数 :param p1: [x,y] :param p2: [x,y] :param p3: [x,y] :return: 新方程的系数[na,nb,nc] """ line = fit_line(p1, p2) a, b, c = line # ax+by+c=0;一般b为-1 # 以下获取垂线的系数na,nb,nc if a == 0.: # 原方程为y=c ;新方程为x=-nc na = 1. nb = 0. elif b == 0.: # 原方程为x=-c;新方程为y=nc na = 0. nb = -1. else: # 斜率互为互倒数 a*na=-1; na = -1. / a nb = -1. # 根据ax+by+c=0求解系数c nc = -(na * p3[0] + nb * p3[1]) return [na, nb, nc]
e1644edf7702996f170b6f53828e1fc864151759
3,653,838
def _get_value(key, entry): """ :param key: :param entry: :return: """ if key in entry: if entry[key] and str(entry[key]).lower() == "true": return True elif entry[key] and str(entry[key]).lower() == "false": return False return entry[key] return None
93820395e91323939c8fbee653b6eabb6fbfd8eb
3,653,839
def calculate_bounded_area(x0, y0, x1, y1): """ Calculate the area bounded by two potentially-nonmonotonic 2D data sets This function is written to calculate the area between two arbitrary piecewise-linear curves. The method was inspired by the arbitrary polygon filling routines in vector software programs when the polygon self-intersects. Created: 2015 April 29, msswan """ # We start by taking the start of the first data set (pts0) and loop over # each segment (starting with the closest) and check to see if the # second data (pts1) set intersects. If there is an intersection, it joins # all the points together to make a polygon (reversing pts1 so that the # polygon integration calculation goes around in a single direction) and # calculates the area from that. Now it removes the points that it used to # create the polygon and adds the intersection point to pts0 (which is the # new starting point) and starts the loop again. # Turn the data into lists of tuples (x,y) coordinates pts0 = list(zip(x0, y0)) pts1 = list(zip(x1, y1)) area = 0.0 while len(pts0) + len(pts1) > 0: shouldbreak = False for idx in range(0, len(pts0)-1): for jdx in range(0, len(pts1)-1): doesintersect, int_pt = line_intersect(pts0[idx], pts0[idx+1], pts1[jdx], pts1[jdx+1]) if not doesintersect: continue polygon = list(reversed(pts1[:jdx])) + pts0[:idx] + [int_pt,] area += get_area(polygon) # Trim the processed points off of the datasets pts0 = [int_pt,] + pts0[idx+1:] pts1 = pts1[jdx+1:] # Exit out of both for-loops shouldbreak = True break if shouldbreak: break else: # Make a polygon out of whatever points remain polygon = list(reversed(pts1)) + pts0 area += get_area(polygon) # exit the while loop break return area
1cdf853a829e68f73254ac1073aadbc29abc4e2a
3,653,840
import requests def login(): """ Login to APIC-EM northbound APIs in shell. Returns: Client (NbClientManager) which is already logged in. """ try: client = NbClientManager( server=APIC, username=APIC_USER, password=APIC_PASSWORD, connect=True) return client except requests.exceptions.HTTPError as exc_info: if exc_info.response.status_code == 401: print('Authentication Failed. Please provide valid username/password.') else: print('HTTP Status Code {code}. Reason: {reason}'.format( code=exc_info.response.status_code, reason=exc_info.response.reason)) exit(1) except requests.exceptions.ConnectionError: print('Connection aborted. Please check if the host {host} is available.'.format(host=APIC)) exit(1)
8a4fd0122769b868dc06aeba17c15d1a2e0055a2
3,653,841
import numpy def transfocator_compute_configuration(photon_energy_ev,s_target,\ symbol=["Be","Be","Be"], density=[1.845,1.845,1.845],\ nlenses_max = [15,3,1], nlenses_radii = [500e-4,1000e-4,1500e-4], lens_diameter=0.05, \ sigmaz=6.46e-4, alpha = 0.55, \ tf_p=5960, tf_q=3800, verbose=1 ): """ Computes the optimum transfocator configuration for a given photon energy and target image size. All length units are cm :param photon_energy_ev: the photon energy in eV :param s_target: the target image size in cm. :param symbol: the chemical symbol of the lens material of each type. Default symbol=["Be","Be","Be"] :param density: the density of each type of lens. Default: density=[1.845,1.845,1.845] :param nlenses_max: the maximum allowed number of lenases for each type of lens. nlenses_max = [15,3,1] :param nlenses_radii: the radii in cm of each type of lens. Default: nlenses_radii = [500e-4,1000e-4,1500e-4] :param lens_diameter: the physical diameter (acceptance) in cm of the lenses. If different for each type of lens, consider the smaller one. Default: lens_diameter=0.05 :param sigmaz: the sigma (standard deviation) of the source in cm :param alpha: an adjustable parameter in [0,1](see doc). Default: 0.55 (it is 0.76 for pure Gaussian beams) :param tf_p: the distance source-transfocator in cm :param tf_q: the distance transfocator-image in cm :param:verbose: set to 1 for verbose text output :return: a list with the number of lenses of each type. """ if s_target < 2.35*sigmaz*tf_q/tf_p: print("Source size FWHM is: %f um"%(1e4*2.35*sigmaz)) print("Maximum Demagnifications is: %f um"%(tf_p/tf_q)) print("Minimum possible size is: %f um"%(1e4*2.35*sigmaz*tf_q/tf_p)) print("Error: redefine size") return None deltas = [(1.0 - xraylib.Refractive_Index_Re(symbol[i],photon_energy_ev*1e-3,density[i])) \ for i in range(len(symbol))] focal_q_target = _tansfocator_guess_focal_position( s_target, p=tf_p, q=tf_q, sigmaz=sigmaz, alpha=alpha, \ lens_diameter=lens_diameter,method=2) focal_f_target = 1.0 / (1.0/focal_q_target + 1.0/tf_p) div_q_target = alpha * lens_diameter / focal_q_target #corrections for extreme cases source_demagnified = 2.35*sigmaz*focal_q_target/tf_p if source_demagnified > lens_diameter: source_demagnified = lens_diameter s_target_calc = numpy.sqrt( (div_q_target*(tf_q-focal_q_target))**2 + source_demagnified**2) nlenses_target = _transfocator_guess_configuration(focal_f_target,deltas=deltas,\ nlenses_max=nlenses_max,radii=nlenses_radii, ) if verbose: print("transfocator_compute_configuration: focal_f_target: %f"%(focal_f_target)) print("transfocator_compute_configuration: focal_q_target: %f cm"%(focal_q_target)) print("transfocator_compute_configuration: s_target: %f um"%(s_target_calc*1e4)) print("transfocator_compute_configuration: nlenses_target: ",nlenses_target) return nlenses_target
3c25d701117df8857114038f92ebe4a5dee4097f
3,653,842
import logging import xml def flickrapi_fn(fn_name, fn_args, # format: () fn_kwargs, # format: dict() attempts=3, waittime=5, randtime=False, caughtcode='000'): """ flickrapi_fn Runs flickrapi fn_name function handing over **fn_kwargs. It retries attempts, waittime, randtime with @retry Checks results is_good and provides feedback accordingly. Captures flicrkapi or BasicException error situations. caughtcode to report on exception error. Returns: fn_success = True/False fn_result = Actual flickrapi function call result fn_errcode = error reported by flickrapi exception """ @rate_limited.retry(attempts=attempts, waittime=waittime, randtime=randtime) def retry_flickrapi_fn(kwargs): """ retry_flickrapi_fn Decorator to retry calling a function """ return fn_name(**kwargs) logging.info('fn:[%s] attempts:[%s] waittime:[%s] randtime:[%s]', fn_name.__name__, attempts, waittime, randtime) if logging.getLogger().getEffectiveLevel() <= logging.INFO: for i, arg in enumerate(fn_args): logging.info('fn:[%s] arg[%s]={%s}', fn_name.__name__, i, arg) for name, value in fn_kwargs.items(): logging.info('fn:[%s] kwarg[%s]=[%s]', fn_name.__name__, name, value) fn_success = False fn_result = None fn_errcode = 0 try: fn_result = retry_flickrapi_fn(fn_kwargs) except flickrapi.exceptions.FlickrError as flickr_ex: fn_errcode = flickr_ex.code NPR.niceerror(caught=True, caughtprefix='+++Api', caughtcode=caughtcode, caughtmsg='Flickrapi exception on [{!s}]' .format(fn_name.__name__), exceptuse=True, exceptcode=flickr_ex.code, exceptmsg=flickr_ex, useniceprint=True, exceptsysinfo=True) except (IOError, httplib.HTTPException): NPR.niceerror(caught=True, caughtprefix='+++Api', caughtcode=caughtcode, caughtmsg='Caught IO/HTTP Error on [{!s}]' .format(fn_name.__name__)) except Exception as exc: NPR.niceerror(caught=True, caughtprefix='+++Api', caughtcode=caughtcode, caughtmsg='Exception on [{!s}]'.format(fn_name.__name__), exceptuse=True, exceptmsg=exc, useniceprint=True, exceptsysinfo=True) except BaseException: NPR.niceerror(caught=True, caughtprefix='+++Api', caughtcode=caughtcode, caughtmsg='BaseException on [{!s}]' .format(fn_name.__name__), exceptsysinfo=True) finally: pass if is_good(fn_result): fn_success = True logging.info('fn:[%s] Output for fn_result:', fn_name.__name__) logging.info(xml.etree.ElementTree.tostring( fn_result, encoding='utf-8', method='xml')) else: logging.error('fn:[%s] is_good(fn_result):[%s]', fn_name.__name__, 'None' if fn_result is None else is_good(fn_result)) fn_result = None logging.info('fn:[%s] success:[%s] result:[%s] errcode:[%s]', fn_name.__name__, fn_success, fn_result, fn_errcode) return fn_success, fn_result, fn_errcode
fcdb050824aa53ef88d0b879729e3e5444d221a7
3,653,843
def load_data(CWD): """ loads the data from a parquet file specified below input: CWD = current working directory path output: df_raw = raw data from parquet file as pandas dataframe """ folderpath_processed_data = CWD + '/data_sample.parquet' df_raw = pd.read_parquet(folderpath_processed_data) return df_raw
8ba8d77b81e61f90651ca57b186faf965ec51c73
3,653,844
import subprocess def run_command(cmd): """Run command, return output as string.""" output = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True).communicate()[0] return output.decode("ascii")
0996d76ab1980c2fad262f8fd227ac50772849d2
3,653,845
def http_body(): """ Returns random binary body data. """ return strategies.binary(min_size=0, average_size=600, max_size=1500)
5789dfc882db32eefb6c543f6fd494fe621b1b8e
3,653,846
def run(data_s: str) -> tuple[int, int]: """Solve the puzzles.""" results = [check(line) for line in data_s.splitlines()] part1 = sum(result.error_score for result in results) part2 = int(median(result.completion_score for result in results if result.ok)) return part1, part2
e5870924769b23300b116ceacae3b8b73d4643f3
3,653,847
import os import logging def _getRotatingFileHandler(filename, mode='a', maxBytes=1000000, backupCount=0, encoding='utf-8', uid=None, gid=None): """Get a :class:`logging.RotatingFileHandler` with a logfile which is readable+writable only by the given **uid** and **gid**. :param str filename: The full path to the log file. :param str mode: The mode to open **filename** with. (default: ``'a'``) :param int maxBytes: Rotate logfiles after they have grown to this size in bytes. :param int backupCount: The number of logfiles to keep in rotation. :param str encoding: The encoding for the logfile. :param int uid: The owner UID to set on the logfile. :param int gid: The GID to set on the logfile. :rtype: :class:`logging.handlers.RotatingFileHandler` :returns: A logfile handler which will rotate files and chown/chmod newly created files. """ # Default to the current process owner's uid and gid: uid = os.getuid() if not uid else uid gid = os.getgid() if not gid else gid if not os.path.exists(filename): open(filename, 'a').close() os.chown(filename, uid, gid) try: os.chmod(filename, os.ST_WRITE | os.ST_APPEND) except AttributeError: # pragma: no cover logging.error(""" XXX FIXME: Travis chokes on `os.ST_WRITE` saying that the module doesn't have that attribute, for some reason: https://travis-ci.org/isislovecruft/bridgedb/builds/24145963#L1601""") os.chmod(filename, 384) fileHandler = partial(logging.handlers.RotatingFileHandler, filename, mode, maxBytes=maxBytes, backupCount=backupCount, encoding=encoding) return fileHandler
d6415f8b4e86dad0978e96ea8e07fefa1fdb3427
3,653,848
import functools def _inject(*args, **kwargs): """Inject variables into the arguments of a function or method. This is almost identical to decorating with functools.partial, except we also propagate the wrapped function's __name__. """ def injector(f): assert callable(f) @functools.wraps(f) def wrapper(*w_args, **w_kwargs): return functools.partial(f, *args, **kwargs)(*w_args, **w_kwargs) wrapper.args = args wrapper.kwargs = kwargs wrapper.function = f return wrapper return injector
40ba8ecd01880ebff3997bc16feb775d6b45f711
3,653,849
def frame_drop_correctors_ready(): """ Checks to see if the frame drop correctors 'seq_and_image_corr' topics are all being published. There should be a corrector topic for each camera. """ camera_assignment = get_camera_assignment() number_of_cameras = len(camera_assignment) number_of_correctors = get_number_of_corrector_topics() if number_of_cameras == number_of_correctors: return True else: return False
85c991de9cecd87cd20f7578e1201340d1a7f23a
3,653,850
from typing import List from typing import Dict from typing import Any import yaml def loads(content: str) -> List[Dict[str, Any]]: """ Load the given YAML string """ template = list(yaml.load_all(content, Loader=SafeLineLoader)) # Convert an empty file to an empty dict if template is None: template = {} return template
a2c455b40a0b20c4e34af93e08e9e9ae1bb9ab7d
3,653,851
def get_ndim_horizontal_coords(easting, northing): """ Return the number of dimensions of the horizontal coordinates arrays Also check if the two horizontal coordinates arrays same dimensions. Parameters ---------- easting : nd-array Array for the easting coordinates northing : nd-array Array for the northing coordinates Returns ------- ndim : int Number of dimensions of the ``easting`` and ``northing`` arrays. """ ndim = np.ndim(easting) if ndim != np.ndim(northing): raise ValueError( "Horizontal coordinates dimensions mismatch. " + f"The easting coordinate array has {easting.ndim} dimensions " + f"while the northing has {northing.ndim}." ) return ndim
a35bf0064aff583c221e8b0c28d8c50cea0826aa
3,653,852
async def info(): """ API information endpoint Returns: [json] -- [description] app version, environment running in (dev/prd), Doc/Redoc link, Lincense information, and support information """ if RELEASE_ENV.lower() == "dev": main_url = "http://localhost:5000" else: main_url = HOST_DOMAIN openapi_url = f"{main_url}/docs" redoc_url = f"{main_url}/redoc" result = { "App Version": APP_VERSION, "Environment": RELEASE_ENV, "Docs": {"OpenAPI": openapi_url, "ReDoc": redoc_url}, "License": {"Type": LICENSE_TYPE, "License Link": LICENSE_LINK}, "Application_Information": {"Owner": OWNER, "Support Site": WEBSITE}, } return result
3404ac622711c369ae006bc0edba10f57e825f22
3,653,853
import json async def info(request: FasttextRequest): """ Returns info about the supervised model TODO - Add authentication :param request: :return: """ app: FasttextServer = request.app model: SupervisedModel = app.get_supervised_model() model_info = { "dimensions": model.get_dimension(), "isQuantised": model.is_quantized() } return json(request, model_info, 200)
4c308839df53be7f074b1b3298d1535ff300df07
3,653,854
import torch def hard_example_mining(dist_mat, labels, return_inds=False): """For each anchor, find the hardest positive and negative sample. Args: dist_mat: pytorch Variable, pair wise distance between samples, shape [N, N] labels: pytorch LongTensor, with shape [N] return_inds: whether to return the indices. Save time if `False`(?) Returns: dist_ap: pytorch Variable, distance(anchor, positive); shape [N] dist_an: pytorch Variable, distance(anchor, negative); shape [N] p_inds: pytorch LongTensor, with shape [N]; indices of selected hard positive samples; 0 <= p_inds[i] <= N - 1 n_inds: pytorch LongTensor, with shape [N]; indices of selected hard negative samples; 0 <= n_inds[i] <= N - 1 NOTE: Only consider the case in which all labels have same num of samples, thus we can cope with all anchors in parallel. """ assert len(dist_mat.size()) == 2 assert dist_mat.size(0) == dist_mat.size(1) N = dist_mat.size(0) # shape [N, N] is_pos = labels.expand(N, N).eq(labels.expand(N, N).t()) is_neg = labels.expand(N, N).ne(labels.expand(N, N).t()) # `dist_ap` means distance(anchor, positive) # both `dist_ap` and `relative_p_inds` with shape [N, 1] dist_ap, relative_p_inds = torch.max( dist_mat[is_pos].contiguous().view(N, -1), 1, keepdim=True) # `dist_an` means distance(anchor, negative) # both `dist_an` and `relative_n_inds` with shape [N, 1] dist_an, relative_n_inds = torch.min( dist_mat[is_neg].contiguous().view(N, -1), 1, keepdim=True) # shape [N] dist_ap = dist_ap.squeeze(1) dist_an = dist_an.squeeze(1) if return_inds: # shape [N, N] ind = (labels.new().resize_as_(labels) .copy_(torch.arange(0, N).long()) .unsqueeze(0).expand(N, N)) # shape [N, 1] p_inds = torch.gather( ind[is_pos].contiguous().view(N, -1), 1, relative_p_inds.data) n_inds = torch.gather( ind[is_neg].contiguous().view(N, -1), 1, relative_n_inds.data) # shape [N] p_inds = p_inds.squeeze(1) n_inds = n_inds.squeeze(1) return dist_ap, dist_an, p_inds, n_inds return dist_ap, dist_an
15fd533cf74e6cd98ac0fa2e8a83b2734861b9ca
3,653,855
from datetime import datetime def trace(func): """Trace and capture provenance info inside a method /function.""" setup_logging() @wraps(func) def wrapper(*args, **kwargs): activity = func.__name__ activity_id = get_activity_id() # class_instance = args[0] class_instance = func class_instance.args = args class_instance.kwargs = kwargs # OSA specific # variables parsing global session_name, session_tag class_instance = parse_variables(class_instance) if class_instance.__name__ in REDUCTION_TASKS: session_tag = f"{activity}:{class_instance.ObservationRun}" session_name = f"{class_instance.ObservationRun}" else: session_tag = ( f"{activity}:{class_instance.PedestalRun}-{class_instance.CalibrationRun}" ) session_name = f"{class_instance.PedestalRun}-{class_instance.CalibrationRun}" # OSA specific # variables parsing # provenance capture before execution derivation_records = get_derivation_records(class_instance, activity) parameter_records = get_parameters_records(class_instance, activity, activity_id) usage_records = get_usage_records(class_instance, activity, activity_id) # activity execution start = datetime.datetime.now().isoformat() result = func(*args, **kwargs) end = datetime.datetime.now().isoformat() # no provenance logging if not log_is_active(class_instance, activity): return result # provenance logging only if activity ends properly session_id = log_session(class_instance, start) for log_record in derivation_records: log_prov_info(log_record) log_start_activity(activity, activity_id, session_id, start) for log_record in parameter_records: log_prov_info(log_record) for log_record in usage_records: log_prov_info(log_record) log_generation(class_instance, activity, activity_id) log_finish_activity(activity_id, end) return result return wrapper
8d624ef70ea4278141f8da9989b3d6787ec003c7
3,653,856
def broadcast_ms_tensors(network, ms_tensors, broadcast_ndim): """Broadcast TensorRT tensors to the specified dimension by pre-padding shape 1 dims""" broadcasted_ms_tensors = [None] * len(ms_tensors) for i, t in enumerate(ms_tensors): tensor = network.nodes[t] if len(tensor.shape) < broadcast_ndim: # append 1 size dims to front diff = broadcast_ndim - len(tensor.shape) shape = tuple([1] * diff + list(tensor.shape)) # TODO, check print ms_cell = _MsExpand0() out = ms_cell(tensor) op_key = network.add_ops(ms_cell) ms_tensor = network.add_node(out) network.add_pre(op_key, t) network.add_out(op_key, [ms_tensor]) # layer = network.add_shuffle(t) # layer.reshape_dims = shape # ms_tensor = layer.get_output(0) else: ms_tensor = t broadcasted_ms_tensors[i] = ms_tensor return broadcasted_ms_tensors
28e017aab2389faff11f2daecc696a64f2024360
3,653,857
def get_rbf_gamma_based_in_median_heuristic(X: np.array, standardize: bool = False) -> float: """ Function implementing a heuristic to estimate the width of an RBF kernel (as defined in the Scikit-learn package) from data. :param X: array-like, shape = (n_samples, n_features), feature matrix :param standardize: boolean, indicating whether the data should be normalized (z-transformation) before the gamma is estimated. :return: scalar, gamma (of the sklearn RBF kernel) estimated from the data """ # Z-transform the data if requested if standardize: X = StandardScaler(copy=True).fit_transform(X) # Compute all pairwise euclidean distances D = euclidean_distances(X) # Get the median of the distances sigma = np.median(D) # Convert to sigma to gamma as defined in the sklearn package gamma = 1 / (2 * sigma**2) return gamma
0a9238b4ba2c3e3cc4ad1f01c7855954b9286294
3,653,858
def winter_storm( snd: xarray.DataArray, thresh: str = "25 cm", freq: str = "AS-JUL" ) -> xarray.DataArray: """Days with snowfall over threshold. Number of days with snowfall accumulation greater or equal to threshold. Parameters ---------- snd : xarray.DataArray Surface snow depth. thresh : str Threshold on snowfall accumulation require to label an event a `winter storm`. freq : str Resampling frequency. Returns ------- xarray.DataArray Number of days per period identified as winter storms. Notes ----- Snowfall accumulation is estimated by the change in snow depth. """ thresh = convert_units_to(thresh, snd) # Compute daily accumulation acc = snd.diff(dim="time") # Winter storm condition out = threshold_count(acc, ">=", thresh, freq) out.attrs["units"] = to_agg_units(out, snd, "count") return out
cef1fa5cf56053f74e70542250c64b398752bd75
3,653,859
def _check_whitelist_members(rule_members=None, policy_members=None): """Whitelist: Check that policy members ARE in rule members. If a policy member is NOT found in the rule members, add it to the violating members. Args: rule_members (list): IamPolicyMembers allowed in the rule. policy_members (list): IamPolicyMembers in the policy. Return: list: Policy members NOT found in the whitelist (rule members). """ violating_members = [] for policy_member in policy_members: # check if policy_member is found in rule_members if not any(r.matches(policy_member) for r in rule_members): violating_members.append(policy_member) return violating_members
47f2d6b42f2e1d57a09a2ae6d6c69697e13d03a7
3,653,860
import re import uuid def get_mac(): """This function returns the first MAC address of the NIC of the PC without colon""" return ':'.join(re.findall('..', '%012x' % uuid.getnode())).replace(':', '')
95ebb381c71741e26b6713638a7770e452d009f2
3,653,861
async def get_clusters(session, date): """ :param session: :return: """ url = "%s/file/clusters" % BASE_URL params = {'date': date} return await get(session, url, params)
8ef55ba14558a60096cc0a96b5b0bc2400f8dbff
3,653,862
def extract_attributes_from_entity(json_object): """ returns the attributes from a json representation Args: @param json_object: JSON representation """ if json_object.has_key('attributes'): items = json_object['attributes'] attributes = recursive_for_attribute_v2(items) return attributes else: return None
d01886fac8d05e82fa8c0874bafc8860456ead0c
3,653,863
def get_config_with_api_token(tempdir, get_config, api_auth_token): """ Get a ``_Config`` object. :param TempDir tempdir: A temporary directory in which to create the Tahoe-LAFS node associated with the configuration. :param (bytes -> bytes -> _Config) get_config: A function which takes a node directory and a Foolscap "portnum" filename and returns the configuration object. :param bytes api_auth_token: The HTTP API authorization token to write to the node directory. """ FilePath(tempdir.join(b"tahoe", b"private")).makedirs() config = get_config(tempdir.join(b"tahoe"), b"tub.port") config.write_private_config(b"api_auth_token", api_auth_token) return config
682bd037944276c8a09bff46a96337571a605f0e
3,653,864
from datetime import datetime def make_journal_title(): """ My journal is weekly. There's a config option 'journal_day' that lets me set the day of the week that my journal is based on. So, if I don't pass in a specific title, it will just create a new journal titled 'Journal-date-of-next-journal-day.md'. """ #TODO: Make the generated journal title a configurable pattern daymap = { 'monday':0, 'tuesday':1, 'wednesday':2, 'thursday':3, 'friday':4, 'saturday':5, 'sunday':6 } today = datetime.date.today() journal_day = today + datetime.timedelta( (daymap[settings.JOURNAL_DAY.lower()]-today.weekday()) % 7 ) return 'Journal {0}'.format(journal_day);
5bd855c0f3fe639127f48e4386cde59bca57c62f
3,653,865
def calc_base_matrix_1qutrit_y_01() -> np.ndarray: """Return the base matrix corresponding to the y-axis w.r.t. levels 0 and 1.""" l = [[0, -1j, 0], [1j, 0, 0], [0, 0, 0]] mat = np.array(l, dtype=np.complex128) return mat
7618021173464962c3e9366d6f159fad01674feb
3,653,866
def get_feature_names_small(ionnumber): """ feature names for the fixed peptide length feature vectors """ names = [] names += ["pmz", "peplen"] for c in ["bas", "heli", "hydro", "pI"]: names.append("sum_" + c) for c in ["mz", "bas", "heli", "hydro", "pI"]: names.append("mean_" + c) names.append("mz_ion") names.append("mz_ion_other") names.append("mean_mz_ion") names.append("mean_mz_ion_other") for c in ["bas", "heli", "hydro", "pI"]: names.append("{}_ion".format(c)) names.append("{}_ion_other".format(c)) names.append("endK") names.append("endR") names.append("nextP") names.append("nextK") names.append("nextR") for c in ["bas", "heli", "hydro", "pI", "mz"]: for pos in ["i", "i-1", "i+1", "i+2"]: names.append("loc_" + pos + "_" + c) names.append("charge") for i in range(ionnumber): for c in ["bas", "heli", "hydro", "pI", "mz"]: names.append("P_%i_%s"%(i, c)) names.append("P_%i_P"%i) names.append("P_%i_K"%i) names.append("P_%i_R"%i) return names
fbffe98af0cffb05a6b11e06786c5a7076449146
3,653,867
def vectorproduct(a,b): """ Return vector cross product of input vectors a and b """ a1, a2, a3 = a b1, b2, b3 = b return [a2*b3 - a3*b2, a3*b1 - a1*b3, a1*b2 - a2*b1]
adb9e7c4b5150ab6231f2b852d6860cd0e5060a0
3,653,868
from typing import Optional import requests import json def get_tasks( id: Optional[str], name: Optional[str], completed: Optional[bool], comment: Optional[str], limit: Optional[str], ) -> str: """ :param id: This optional parameter accepts a string and response is filtered based on this value :param name: This optional parameter accepts a string and response is filtered based on this value :param completed: This optional parameter accepts a boolean and response is filtered based on this value :param comment: This optional parameter accepts a string and response is filtered based on this value :param limit: This optional parameter accepts a string and response is filtered based on this value :return: Returns a string which contains success or failure response based on the API response status """ response = requests.get(BASE_URL + BASE_PATH + "/todo/" + str(id)) if response.status_code == 200: print("Here is your task(s) list:") return json.dumps(response.json(), indent=4) else: return build_error_response(response.text)
aceacf01ea97440836b997ea631b83b2a3e9c198
3,653,869
def f_test_probability(N, p1, Chi2_1, p2, Chi2_2): """Return F-Test probability that the simpler model is correct. e.g. p1 = 5.; //number of PPM parameters e.g. p2 = p1 + 7.; // number of PPM + orbital parameters :param N: int Number of data points :param p1: int Number of parameters of the simpler model :param Chi2_1: float chi^2 corresponding to the simpler model :param p2: int Number of parameters of the model with more parameters p2 > p1 :param Chi2_2: float chi^2 corresponding to the model with more parameters :return: prob: float probability """ nu1 = p2 - p1 nu2 = N - p2 # degrees of freedom if (Chi2_1 < Chi2_2): raise RuntimeWarning('Solution better with less parameters') # F test F0 = nu2 / nu1 * (Chi2_1 - Chi2_2) / Chi2_2 # probability prob = betai(0.5 * nu2, 0.5 * nu1, nu2 / (nu2 + F0 * nu1)) return prob
21cf7c9eb455309131b6b4808c498927c3d6e485
3,653,870
def validate_user(headers): """Validate the user and return the results.""" user_id = headers.get("User", "") token = headers.get("Authorization", "") registered = False if user_id: valid_user_id = user_id_or_guest(user_id) registered = valid_user_id > 1 else: valid_user_id = 1 is_token_invalid = invalid_token(user_id, token) return valid_user_id, registered, is_token_invalid
331608df719d03afd57079d9baba3408b54e0efe
3,653,871
def load_csv_to_journal(batch_info): """Take a dict of batch and csv info and load into journal table.""" # Create batch for testing filename = batch_info['filename'] journal_batch_name = batch_info['journal_batch_name'] journal_batch_description = batch_info['journal_batch_description'] journal_batch_entity = batch_info['journal_batch_entity'] journal_batch_currency = batch_info['journal_batch_currency'] gl_post_reference = batch_info['gl_post_reference'] gl_batch_status = batch_info['gl_batch_status'] insert_new_batch_name(journal_batch_name, journal_batch_description, str(journal_batch_entity), str(journal_batch_currency), gl_post_reference, str(gl_batch_status), ) # Set up csv file to use batch_row_id = get_journal_batch_row_id_by_name(journal_batch_name) batch_row_id = batch_row_id[0][0][0] # Load csv file to journal_loader load_file = batch_load_je_file(filename, str(batch_row_id)) status_ = [0, batch_row_id] # [load_file status, batch_row_id] if load_file == 'LOAD OKAY': status_[0] = 0 else: status_[0] = 99 raise Exception('Error posting csv file to Journal table') # Compare csv totals loaded into pandas dataframe to journal # table totals. # Load batch in journal_loader to journal if status_[0] == 0: load_status_journal = batch_load_insert(batch_row_id) print(f'load_status_journal: {load_status_journal}') return status_ else: print(f'Error loading to journal_loader: {status_}') raise Exception('Error posting csv file to journal_loader') return status_
285d1113cad16d2d0cc7216f59d089a8f94e908c
3,653,872
def validate_guid(guid: str) -> bool: """Validates that a guid is formatted properly""" valid_chars = set('0123456789abcdef') count = 0 for char in guid: count += 1 if char not in valid_chars or count > 32: raise ValueError('Invalid GUID format.') if count != 32: raise ValueError('Invalid GUID format.') return guid
75fff17ee0ef2c1c080e2ef2ffb0272fd71d2921
3,653,873
def load_key_string(string, callback=util.passphrase_callback): # type: (AnyStr, Callable) -> RSA """ Load an RSA key pair from a string. :param string: String containing RSA key pair in PEM format. :param callback: A Python callable object that is invoked to acquire a passphrase with which to unlock the key. The default is util.passphrase_callback. :return: M2Crypto.RSA.RSA object. """ bio = BIO.MemoryBuffer(string) return load_key_bio(bio, callback)
0ac6df63dd7ad42d8eaaa13df7e96caa311332d7
3,653,874
import os import tarfile def load_lbidd(n=5000, observe_counterfactuals=False, return_ites=False, return_ate=False, return_params_df=False, link='quadratic', degree_y=None, degree_t=None, n_shared_parents='median', i=0, dataroot=None, print_paths=True): """ Load the LBIDD dataset that is specified :param n: size of dataset (1k, 2.5k, 5k, 10k, 25k, or 50k) :param observe_counterfactuals: if True, return double-sized dataset with both y0 (first half) and y1 (second half) observed :param return_ites: if True, return ITEs :param return_ate: if True, return ATE :param return_params_df: if True, return the DataFrame of dataset parameters that match :param link: link function (linear, quadratic, cubic, poly, log, or exp) :param degree_y: degree of function for Y (e.g. 1, 2, 3, etc.) :param degree_t: degree of function for T (e.g. 1, 2, 3, etc.) :param n_shared_parents: number covariates that T and Y share as causal parents :param i: index of parametrization to choose among the ones that match :return: dictionary of results """ folder, scaling_zip, scaling_folder, covariates_path, params_path, counterfactuals_folder, factuals_folder = \ get_paths(dataroot=dataroot) if print_paths: print(scaling_folder) print(covariates_path) # Check if files exist if not (os.path.isfile(scaling_zip) and os.path.isfile(covariates_path)): raise FileNotFoundError( 'You must first download scaling.tar.gz and x.csv from ' 'https://www.synapse.org/#!Synapse:syn11738963 and put them in the ' 'datasets/lbidd/ folder. This requires creating an account on ' 'Synapse and accepting some terms and conditions.' ) # Process dataset size (n) if n is not None: if not isinstance(n, str): n = str(n) if n.lower() not in VALID_N: raise ValueError('Invalid n: {} ... Valid n: {}'.format(n, list(VALID_N))) n = N_STR_TO_INT[n] # Unzip 'scaling.tar.gz' if not already unzipped if not os.path.exists(scaling_folder): print('Unzipping {} ...'.format(SCALING_TAR_ZIP), end=' ') tar = tarfile.open(scaling_zip, "r:gz") tar.extractall(folder) tar.close() print('DONE') # Load and filter the params DataFrame params_df = pd.read_csv(params_path) if n is not None: params_df = params_df[params_df['size'] == n] # Select dataset size if link is not None: if link not in VALID_LINKS: raise ValueError('Invalid link function type: {} ... Valid links: {}' .format(link, VALID_LINKS)) if link == 'linear': link = 'poly' degree_y = 1 degree_t = 1 elif link == 'quadratic': link = 'poly' degree_y = 2 degree_t = 2 elif link == 'cubic': link = 'poly' degree_y = 3 degree_t = 3 params_df = params_df[params_df['link_type'] == link] # Select link function if degree_y is not None: params_df = params_df[params_df['deg(y)'] == degree_y] # Select degree Y if degree_t is not None: params_df = params_df[params_df['deg(z)'] == degree_t] # Select degree T # Filter by number of parents that T and Y share valid_n_shared_parents = params_df['n_conf(yz)'].unique().tolist() if n_shared_parents in valid_n_shared_parents: params_df = params_df[params_df['n_conf(yz)'] == n_shared_parents] elif isinstance(n_shared_parents, str) and n_shared_parents.lower() == 'max': max_shared_parents = params_df['n_conf(yz)'].max() params_df = params_df[params_df['n_conf(yz)'] == max_shared_parents] elif isinstance(n_shared_parents, str) and n_shared_parents.lower() == 'median': median_i = len(params_df) // 2 median_shared_parents = params_df['n_conf(yz)'].sort_values().iloc[median_i] params_df = params_df[params_df['n_conf(yz)'] == median_shared_parents] elif n_shared_parents is None: pass else: raise ValueError('Invalid n_shared_parents ... must be either None, "max", "median", or in {}' .format(valid_n_shared_parents)) if params_df.empty: raise ValueError('No datasets have that combination of parameters.') output = {} if return_params_df: output['params_df'] = params_df # Get ith dataset that has the right parameters if i < len(params_df): ufid = params_df['ufid'].iloc[i] else: raise ValueError('Invalid i: {} ... with that parameter combination, i must be an int such that 0 <= i < {}' .format(i, len(params_df))) covariates_df = pd.read_csv(covariates_path, index_col=INDEX_COL_NAME) factuals_path = os.path.join(factuals_folder, ufid + FILE_EXT) factuals_df = pd.read_csv(factuals_path, index_col=INDEX_COL_NAME) joint_factuals_df = covariates_df.join(factuals_df, how='inner') output['t'] = joint_factuals_df['z'].to_numpy() output['y'] = joint_factuals_df['y'].to_numpy() output['w'] = joint_factuals_df.drop(['z', 'y'], axis='columns').to_numpy() if observe_counterfactuals or return_ites or return_ate: counterfactuals_path = os.path.join(counterfactuals_folder, ufid + COUNTERFACTUAL_FILE_SUFFIX + FILE_EXT) counterfactuals_df = pd.read_csv(counterfactuals_path, index_col=INDEX_COL_NAME) joint_counterfactuals_df = covariates_df.join(counterfactuals_df, how='inner') # Add t column and stack y0 potential outcomes and y1 potential outcomes in same df if observe_counterfactuals: joint_y0_df = joint_counterfactuals_df.drop(['y1'], axis='columns').rename(columns={'y0': 'y'}) joint_y0_df['t'] = 0 joint_y1_df = joint_counterfactuals_df.drop(['y0'], axis='columns').rename(columns={'y1': 'y'}) joint_y1_df['t'] = 1 stacked_y_counterfactuals_df = pd.concat([joint_y0_df, joint_y1_df]) output['obs_counterfactual_t'] = stacked_y_counterfactuals_df['t'].to_numpy() output['obs_counterfactual_y'] = stacked_y_counterfactuals_df['y'].to_numpy() output['obs_counterfactual_w'] = stacked_y_counterfactuals_df.drop(['t', 'y'], axis='columns').to_numpy() if return_ites: ites = joint_counterfactuals_df['y1'] - joint_counterfactuals_df['y0'] output['ites'] = ites.to_numpy() if return_ate: ites = joint_counterfactuals_df['y1'] - joint_counterfactuals_df['y0'] output['ate'] = ites.to_numpy().mean() return output
7e0b1ec7fb9780150db17ef5a10e0be479b7c6e3
3,653,875
def generate_iface_status_html(iface=u'lo', status_txt="UNKNOWN"): """Generates the html for interface of given status. Status is UNKNOWN by default.""" status = "UNKNOWN" valid_status = html_generator.HTML_LABEL_ROLES[0] if status_txt is not None: if (str(" DOWN") in str(status_txt)): status = "DOWN" valid_status = html_generator.HTML_LABEL_STATUS[u'CRITICAL'] elif (str(" UP") in str(status_txt)): status = "UP" valid_status = html_generator.HTML_LABEL_STATUS[u'OK'] return generate_iface_status_html_raw(iface, status, valid_status)
c3d459720b5675c9a7d53fa77bb1d7bb6d3988f2
3,653,876
def is_a(file_name): """ Tests whether a given file_name corresponds to a CRSD file. Returns a reader instance, if so. Parameters ---------- file_name : str the file_name to check Returns ------- CRSDReader1_0|None Appropriate `CRSDReader` instance if CRSD file, `None` otherwise """ try: crsd_details = CRSDDetails(file_name) logger.info('File {} is determined to be a CRSD version {} file.'.format(file_name, crsd_details.crsd_version)) return CRSDReader(crsd_details) except SarpyIOError: # we don't want to catch parsing errors, for now? return None
e083a54becdbb86bbefdb7c6504d5cd1d7f81458
3,653,877
def Closure(molecules): """ Returns the set of the closure of a given list of molecules """ newmol=set(molecules) oldmol=set([]) while newmol: gen=ReactSets(newmol,newmol) gen|=ReactSets(newmol,oldmol) gen|=ReactSets(oldmol,newmol) oldmol|=newmol newmol=gen-oldmol return oldmol
7546a528a43465127c889a93d03fbe1eb83a7d63
3,653,878
import os def get_logs(repo_folder): """ Get the list of logs """ def get_status(path, depth, statuses): if depth == 3: for f in os.listdir(path): if f == STATUS_FILE_NAME: f = os.path.join(path,f) statuses.append(f) else: for d in os.listdir(path): d = os.path.join(path,d) if not os.path.isdir(d): continue get_status(d, depth + 1, statuses) statuses = [] get_status(repo_folder, 0, statuses) return statuses
b00910bc38264fcde2a6ccb37d538489d23d6a57
3,653,879
def get_celery_task(): """get celery task, which takes user id as its sole argument""" global _celery_app global _celery_task if _celery_task: return _celery_task load_all_fetcher() _celery_app = Celery('ukfetcher', broker=ukconfig.celery_broker) _celery_app.conf.update( CELERY_ACCEPT_CONTENT=['pickle', 'json', 'msgpack', 'yaml']) @_celery_app.task def on_user_activated(user_id): try: user_fetcher = get_db_set(user_id, 'fetcher') for i in user_fetcher: fetcher = register_fetcher.fetcher_map.get(i) if fetcher is None: uklogger.log_err( 'fetcher {} not exist, requested by user {}'.format( i, user_id)) else: uklogger.log_info('run fetcher {} for user {}'.format( i, user_id)) fetcher.run(user_id) except Exception as ex: uklogger.log_exc(ex) if is_in_unittest(): _celery_task = on_user_activated else: _celery_task = on_user_activated.delay return _celery_task
b1cf2aa6ccf462b8e391c8900ac9efaea0b62728
3,653,880
def plot_keras_activations(activations): """Plot keras activation functions. Args: activations (list): List of Keras activation functions Returns: [matplotlib figure] [matplotlib axis] """ fig, axs = plt.subplots(1,len(activations),figsize=(3*len(activations),5),sharex=True,sharey=True,dpi=150) x = tf.constant(tf.range(-3,3,0.1), dtype=tf.float32) for i, activation in enumerate(activations): axs[i].plot(x.numpy(), activation(x).numpy()) axs[i].set_title(activation.__name__) axs[i].set_xlabel(r'$x$') if i == 0: axs[i].set_ylabel(r'$\phi(x)$') despine(ax=axs[i]) fig.tight_layout() return fig, axs
3c10bd3a57531ef8a88b6b0d330c2ba7eaf0b35c
3,653,881
def hog_feature(image, pixel_per_cell=8): """ Compute hog feature for a given image. Important: use the hog function provided by skimage to generate both the feature vector and the visualization image. **For block normalization, use L1.** Args: image: an image with object that we want to detect. pixel_per_cell: number of pixels in each cell, an argument for hog descriptor. Returns: score: a vector of hog representation. hogImage: an image representation of hog provided by skimage. """ ### YOUR CODE HERE (hogFeature, hogImage) = feature.hog(image, pixels_per_cell=(pixel_per_cell, pixel_per_cell), visualize=True); #hogFeature = normalize(hogFeature.reshape(500,-1), 'l1', return_norm=False) ### END YOUR CODE return (hogFeature, hogImage)
6509a46dd161f6bde448588314535cb5aeef5e8a
3,653,882
def create_parser() -> ArgumentParser: """ Helper function parsing the command line options. """ parser = ArgumentParser(description="torchx CLI") subparser = parser.add_subparsers( title="sub-commands", description=sub_parser_description, ) subcmds = { "describe": CmdDescribe(), "log": CmdLog(), "run": CmdRun(), "builtins": CmdBuiltins(), "runopts": CmdRunopts(), "status": CmdStatus(), } for subcmd_name, cmd in subcmds.items(): cmd_parser = subparser.add_parser(subcmd_name) cmd.add_arguments(cmd_parser) cmd_parser.set_defaults(func=cmd.run) return parser
515309ad03907f5e22d32e5d13744a5fd24bfd40
3,653,883
import re def process_word(word): """Remove all punctuation and stem words""" word = re.sub(regex_punc, '', word) return stemmer.stem(word)
bceb132e7afddaf0540b38c22e9cef7b63a27e8c
3,653,884
def no_autoflush(fn): """Wrap the decorated function in a no-autoflush block.""" @wraps(fn) def wrapper(*args, **kwargs): with db.session.no_autoflush: return fn(*args, **kwargs) return wrapper
c211b05ea68074bc22254c584765ad001ed38f67
3,653,885
import os def default_model(): """Get a path for a default value for the model. Start searching in the current directory.""" project_root = get_project_root() models_dir = os.path.join(project_root, "models") curr_dir = os.getcwd() if ( os.path.commonprefix([models_dir, curr_dir]) == models_dir and curr_dir != models_dir ): latest_model = curr_dir else: latest_model = get_latest_folder(models_dir) return latest_model
49220267018e422df3e72d3be942ae5e28022586
3,653,886
def int_to_ip(ip): """ Convert a 32-bit integer into IPv4 string format :param ip: 32-bit integer :return: IPv4 string equivalent to ip """ if type(ip) is str: return ip return '.'.join([str((ip >> i) & 0xff) for i in [24, 16, 8, 0]])
8ceb8b9912f10ba49b45510f4470b9cc34bf7a2f
3,653,887
def audit_work_timer_cancel(id): """ Cancel timer set. :param id: :return: """ work = Work.query.get(id) celery.control.revoke(work.task_id, terminate=True) work.task_id = None work.timer = None db.session.add(work) db.session.commit() return redirect(url_for('.audit_work_timer', id=id))
d05d76dbf31faa4e6b8349af7f698b7021fba50f
3,653,888
def team_pos_evolution(team_id): """ returns the evolution of position for a team for the season """ pos_evo = [] for week in team_played_weeks(team_id): try: teams_pos = [x[0] for x in league_table_until_with_teamid(week)] pos = teams_pos.index(int(team_id)) + 1 pos_evo.append(pos) except: pass return pos_evo
2b1d5378663eadf1f6ca1abb569e72866a58b0aa
3,653,889
def ifft_method(x, y, interpolate=True): """ Perfoms IFFT on data. Parameters ---------- x: array-like the x-axis data y: array-like the y-axis data interpolate: bool if True perform a linear interpolation on dataset before transforming Returns ------- xf: array-like the transformed x data yf: array-like transformed y data """ N = len(x) if interpolate: x, y = _fourier_interpolate(x, y) xf = np.fft.fftfreq(N, d=(x[1] - x[0]) / (2 * np.pi)) yf = np.fft.ifft(y) return xf, yf
d13e1519cbcec635bbf2f17a0f0abdd44f41ae53
3,653,890
import sys def getExecutable(): """ Returns the executable this session is running from. :rtype: str """ return sys.executable
87d842239f898554582900d879501b2a3457df8e
3,653,891
def run(namespace=None, action_prefix='action_', args=None): """Run the script. Participating actions are looked up in the caller's namespace if no namespace is given, otherwise in the dict provided. Only items that start with action_prefix are processed as actions. If you want to use all items in the namespace provided as actions set action_prefix to an empty string. :param namespace: An optional dict where the functions are looked up in. By default the local namespace of the caller is used. :param action_prefix: The prefix for the functions. Everything else is ignored. :param args: the arguments for the function. If not specified :data:`sys.argv` without the first argument is used. """ if namespace is None: namespace = sys._getframe(1).f_locals actions = find_actions(namespace, action_prefix) if args is None: args = sys.argv[1:] if not args or args[0] in ('-h', '--help'): return print_usage(actions) elif args[0] not in actions: fail('Unknown action \'%s\'' % args[0]) arguments = {} types = {} key_to_arg = {} long_options = [] formatstring = '' func, doc, arg_def = actions[args.pop(0)] for idx, (arg, shortcut, default, option_type) in enumerate(arg_def): real_arg = arg.replace('-', '_') if shortcut: formatstring += shortcut if not isinstance(default, bool): formatstring += ':' key_to_arg['-' + shortcut] = real_arg long_options.append(isinstance(default, bool) and arg or arg + '=') key_to_arg['--' + arg] = real_arg key_to_arg[idx] = real_arg types[real_arg] = option_type arguments[real_arg] = default try: optlist, posargs = getopt.gnu_getopt(args, formatstring, long_options) except getopt.GetoptError, e: fail(str(e)) specified_arguments = set() for key, value in enumerate(posargs): try: arg = key_to_arg[key] except IndexError: fail('Too many parameters') specified_arguments.add(arg) try: arguments[arg] = converters[types[arg]](value) except ValueError: fail('Invalid value for argument %s (%s): %s' % (key, arg, value)) for key, value in optlist: arg = key_to_arg[key] if arg in specified_arguments: fail('Argument \'%s\' is specified twice' % arg) if types[arg] == 'boolean': if arg.startswith('no_'): value = 'no' else: value = 'yes' try: arguments[arg] = converters[types[arg]](value) except ValueError: fail('Invalid value for \'%s\': %s' % (key, value)) newargs = {} for k, v in arguments.iteritems(): newargs[k.startswith('no_') and k[3:] or k] = v arguments = newargs return func(**arguments)
83a575f633088dc44e1cfcce65efadfb6fda84cc
3,653,892
import os def get_file_with_suffix(d, suffix): """ Generate a list of all files present below a given directory. """ items = os.listdir(d) for file in items: if file.endswith(suffix): return file.split(suffix)[0] return None
1191868a4fd9b925f6f8ce713aba16d9b66f1a9a
3,653,893
def PolyMod(f, g): """ return f (mod g) """ return f % g
53b47e993e35c09e59e209b68a8a7656edf6b4ce
3,653,894
def policy_improvement(nS, nA, P, full_state_to_index, g=.75,t=0.05): """Iteratively evaluates and improves a policy until an optimal policy is found or reaches threshold of iterations Parameters: nS: number of states nA: number of actions P: transitional tuples given state and action full_state_to_index: dictionary of state to index Values g: gamma which is discount factor t: theta or stopping condition Returns: tuple of policy and value of policy """ policy = np.ones([nS, nA]) / nA # random policy (equal chance all actions) i=0 while True: i+=1 if i%100==0: print(i) V = policy_eval(policy, nS, nA, P, full_state_to_index, gamma=g, theta=t) # eval current policy is_policy_stable = True # true is no changes false if we make changes for s in range(nS): chosen_a = np.random.choice(np.argwhere(policy[s] == np.amax(policy[s])).flatten().tolist()) action_values = value(s, V, full_state_to_index, nA, P, gamma=g, theta=t) best_a = np.random.choice(np.argwhere(action_values == np.amax(action_values)).flatten().tolist()) if chosen_a != best_a: # greedy update is_policy_stable = False policy[s] = np.eye(nA)[best_a] if is_policy_stable or i==10000: print(i, 'Iterations') return policy, V
84373843a179bb2afda20427e24795fbb524ae2c
3,653,895
import torch def get_train_val_test_data(args): """Load the data on rank zero and boradcast number of tokens to all GPUS.""" (train_data, val_data, test_data) = (None, None, None) # Data loader only on rank 0 of each model parallel group. if mpu.get_model_parallel_rank() == 0: data_config = configure_data() data_config.set_defaults(data_set_type='BERT', transpose=False) (train_data, val_data, test_data), tokenizer = data_config.apply(args) before = tokenizer.num_tokens after = before multiple = args.make_vocab_size_divisible_by * \ mpu.get_model_parallel_world_size() while (after % multiple) != 0: after += 1 print_rank_0('> padded vocab (size: {}) with {} dummy ' 'tokens (new size: {})'.format( before, after - before, after)) # Need to broadcast num_tokens and num_type_tokens. token_counts = torch.cuda.LongTensor([after, tokenizer.num_type_tokens, int(args.do_train), int(args.do_valid), int(args.do_test)]) else: token_counts = torch.cuda.LongTensor([0, 0, 0, 0, 0]) # Broadcast num tokens. torch.distributed.broadcast(token_counts, mpu.get_model_parallel_src_rank(), group=mpu.get_model_parallel_group()) num_tokens = token_counts[0].item() num_type_tokens = token_counts[1].item() args.do_train = token_counts[2].item() args.do_valid = token_counts[3].item() args.do_test = token_counts[4].item() return train_data, val_data, test_data, num_tokens, num_type_tokens
c729262e71bb40c016c6b7a65deaba65f4db951e
3,653,896
def user_data_check(data_file): """ 1 - Check user data file, and if necessary coerce to correct format. 2 - Check for fold calculation errors, and if correct, return data frame for passing to later functions. 3 - If incorrect fold calculations detected, error message returned. :param data_file: user data table. :return orig_file_parsed: Dataframe (if error checks pass). :return error_message: Text string (error message). """ # Read user_data and assign to dataframe variable. orig_file = pd.read_table(data_file) # Subset source df by the first 7 columns. # Note: last index should be +1 bigger than number of fields. # AZ20.tsv file has 86 total columns, 80 of which are empty cells. # Necessary step to maintain indexing references at a later stage! orig_file_subset = orig_file.iloc[:, 0:7] # Coerce column 1 to object. orig_file_subset.iloc[:, 0] = orig_file_subset.iloc[:, 0].astype(object) # Coerce column 2-7 to float. orig_file_subset.iloc[:, 1:7] = orig_file_subset.iloc[:, 1:7].astype(float) # Subset data frame by checking if mean intensities in both columns, # are greater than zero. orig_file_subset = orig_file_subset[(orig_file_subset.iloc[:, 1] > 0) |\ (orig_file_subset.iloc[:, 2] > 0)] # A data file that has been edited such that columns have been deleted, # i.e. in excel, may introduce "phantom" columns in python environment. # Such columns are coerced to "un-named" fields with nan entries. # If cv columns present with values, original data frame unaffected. # Code drops columns that contain all nan in columns. orig_file_subset = orig_file_subset.dropna(axis=1, # Iterate by columns. how="all") # Drop if all na # in columns. # Determine number of columns. num_col = orig_file_subset.shape[1] # Check if number of cols = 5 and append new columns with all entries # = to 1 for cv calculations that are missing. # If number of columns adhere to correct format, data frame unaffected. if num_col == 5: orig_file_subset["control_cv"] = 1 orig_file_subset["condition_cv"] = 1 # Add fold calculation column to df. orig_file_subset["calc_fold_change"] = \ orig_file_subset.iloc[:, 2].divide(orig_file_subset.iloc[:,1]) # Define user and script calculated fold changes as series variables. user_fold_calc = orig_file_subset.iloc[:, 3] script_fold_calc = orig_file_subset.iloc[:, 7] # Determine if fold change calculations match by # an absolute tolerance of 3 signifcant figures. # Numpy "isclose()" function used to check closeness of match. # Boolean series returned to new column in data frame. orig_file_subset["check_fold_match"] = \ np.isclose(user_fold_calc, script_fold_calc, atol=10**3) # Determine number of true matches for fold change calculations. # Summing of boolean series carried out: True = 1, False = 0. sum_matches = sum(orig_file_subset.iloc[:, 8] == 1) # Define error message if fold calculation matching determines # existance of errors. error_message = \ ("Anomaly detected..PhosQuest will self-destruct in T minus 10 seconds"+ "...just kidding! Please check your fold change calculations, "+ "a discrepancy has been detected.") # If "sum_matches" equal to length of data frame, then return data frame. # If not, return error message. # Note: if first logical test passes, this indicates that fold change # calculations in original user data are correct (within tolerance), # and filtered dataframe returned for further analysis. if sum_matches == len(orig_file_subset): orig_file_parsed = orig_file_subset.iloc[:, 0:7] return orig_file_parsed elif sum_matches != len(orig_file_subset): return error_message
fc1b1d18a0e9a5a28674573cc2ab1c7cf9f08a03
3,653,897
import requests def get_modules(request: HttpRequest) -> JsonResponse: """Gets a list of modules for the provided course from the Canvas API based on current user A module ID has to be provided in order to access the correct course :param request: The current request as provided by django :return: A JSONResponse containing either an error or the data provided by Canvas """ # Note: For functionality documentation, see get_courses, as much of it is the same error = expire_checker(request) url = request.user.canvas_oauth2_token.url if error[0] is not None: return error[0] client = error[1] header = {"Authorization": f"Bearer {request.user.canvas_oauth2_token.access_token}"} course_id = request.GET.get("course_id", "") if not course_id: return error_generator("There was no provided course ID!", 404) # Returns without module ID modules = requests.get( "{}/api/v1/courses/{}/modules?per_page=50".format(url, course_id), headers=header, verify=False is client.dev) return content_helper(modules)
d583779b075419dd67514bd50e709374fd4964bf
3,653,898
def create_workflow(session, workflow_spec=dict(), result_schema=None): """Create a new workflow handle for a given workflow specification. Returns the workflow identifier. Parameters ---------- session: sqlalchemy.orm.session.Session Database session. workflow_spec: dict, default=dict() Optional workflow specification. result_schema: dict, default=None Optional result schema. Returns ------- string """ workflow_id = util.get_unique_identifier() workflow = WorkflowObject( workflow_id=workflow_id, name=workflow_id, workflow_spec=workflow_spec, result_schema=result_schema ) session.add(workflow) return workflow_id
1c3843a15d543fb10427b52c7d654abd877b3342
3,653,899