content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
def parseRegionSetName(regionSetName): """ Get region and setName from regionSetName """ if '.' in regionSetName: region,setName = regionSetName.split('.') else: region,setName = 'Assembly',regionSetName return region,setName
ebda9b44833e42127c7bec1169b9660137c8daa3
697,333
def flatten_object(obj, result=None): """ Convert a JSON object to a flatten dictionary. example: { "db": { "user": "bar" }} becomes {"db.user": "bar" } """ if not result: result = {} def _flatten(key_obj, name=''): if isinstance(key_obj, dict): for item in key_obj: arg = str(name) + str(item) + '.' _flatten(key_obj[item], arg) elif isinstance(key_obj, list): index = 0 for item in key_obj: arg = str(name) + str(index) + '.' _flatten(item, arg) index += 1 else: result[name[:-1]] = key_obj _flatten(obj) return result
3a86a9c3ed1ad4add6027207a074ba0c0ed1b3c8
697,334
def check_view_filter_and_group_by_criteria(filter_set, group_by_set): """Return a bool for whether a view can be used.""" no_view_group_bys = {"project", "node"} # The dashboard does not show any data grouped by OpenShift cluster, node, or project # so we do not have views for these group bys if group_by_set.intersection(no_view_group_bys) or filter_set.intersection(no_view_group_bys): return False return True
8baeeb827ba092a3b5262f076ba1fa77aaf55b5f
697,335
def ping(event): """Responds 'pong' to your 'ping'.""" return 'pong'
48d07ca1e513b28212c56758c41e1757b5f75468
697,340
import functools import operator def _has_pattern_match(name: str, patterns) -> bool: """Check if name matches any of the patterns""" return functools.reduce( operator.or_, map(lambda r: r.search(name) is not None, patterns), False)
1dd6ad54ee35db20b6fcec0495e4c4ef61788aa0
697,341
import torch def _get_zero_grad_tensor(device): """ return a zero tensor that requires grad. """ loss = torch.as_tensor(0.0, device=device) loss = loss.requires_grad_(True) return loss
5bf67a7ab0ff50e041fcd07484d6f5468acf6d4e
697,343
def update_borders(bb: list, roi_size: list): """ Update bounding box borders according to roi_size. Borders are updated from center of image. :param bb: original bounding box :param roi_size: output bounding box size :return: modified bounding box """ mid_x = bb[0] + bb[2] / 2 mid_y = bb[1] + bb[3] / 2 new_x = int(mid_x - roi_size[1] / 2) new_y = int(mid_y - roi_size[0] / 2) return [new_x, new_y, roi_size[0], roi_size[1]]
15574e03cc3bea0b33e5f20014bc83ea60b47bfc
697,345
import random def random_up(sentence: str) -> str: """ Randomly makes some letters lower case and some upper case Parameters: sentence(str): the sentence to mix case of the letters in Returns: str: modified sentence Examples: >>> random_up('abc') 'Abc' >>> random_up('abcdef') 'acDeBf' >>> random_up('i like python') 'i lIKE pYThOn' """ return ''.join(random.choice((str.upper, str.lower))(c) for c in sentence)
15d7300e258b54462ae51b8f70a1b8e8a59e0120
697,347
import json def load_scenario(json_file_path): """ Load scenario from JSON """ json_file = open(json_file_path, 'r') parsed_json = json.loads(json_file.read()) json_file.close() return parsed_json
005eb73753fcbbd873e5cb73fa54142990ebc97d
697,349
def to_deg(value, loc): """convert decimal coordinates into degrees, munutes and seconds tuple Keyword arguments: value is float gps-value, loc is direction list ["S", "N"] or ["W", "E"] return: tuple like (25, 13, 48.343 ,'N') """ if value < 0: loc_value = loc[0] elif value > 0: loc_value = loc[1] else: loc_value = "" abs_value = abs(value) deg = int(abs_value) t1 = (abs_value-deg)*60 min = int(t1) sec = round((t1 - min)* 60, 5) return (deg, min, sec, loc_value)
5c2eb955bc3e05f5f8378bc0df2ed16ea7f7cf3b
697,350
def max_sub_array(nums): """ Returns the max subarray of the given list of numbers. Returns 0 if nums is None or an empty list. Time Complexity: O(n) Space Complexity: O(1) """ if nums == None: return 0 if len(nums) == 0: return 0 max_sum = nums[0] curr_sum = nums[0] for i in range(1, len(nums)): curr_sum = max(curr_sum + nums[i], nums[i]) max_sum = max(curr_sum, max_sum) return max_sum
729f277b61b517fe7e434812576347fe6742401c
697,351
def add_camera_args(parser): """Add parser augument for camera options.""" parser.add_argument('--video', type=str, default=None, help='video file name, e.g. traffic.mp4') parser.add_argument('--video_looping', action='store_true', help='loop around the video file [False]') parser.add_argument('--onboard', type=int, default=None, help='Jetson onboard camera [None]') parser.add_argument('--copy_frame', action='store_true', help=('copy video frame internally [False]')) parser.add_argument('--do_resize', action='store_true', help=('resize image/video [False]')) parser.add_argument('--width', type=int, default=640, help='image width [640]') parser.add_argument('--height', type=int, default=480, help='image height [480]') return parser
21f74ccd2a092ba9620f40bb1a0a9c5c6dbf72a5
697,356
def process_wildcard(fractions): """ Processes element with a wildcard ``?`` weight fraction and returns composition balanced to 1.0. """ wildcard_zs = set() total_fraction = 0.0 for z, fraction in fractions.items(): if fraction == "?": wildcard_zs.add(z) else: total_fraction += fraction if not wildcard_zs: return fractions balance_fraction = (1.0 - total_fraction) / len(wildcard_zs) for z in wildcard_zs: fractions[z] = balance_fraction return fractions
6358bfc7ee3b7f187b375b1df2cecd8ce104674a
697,357
def _convert_ketone_unit(raw_value): """Convert raw ketone value as read in the device to its value in mmol/L.""" return int((raw_value + 1) / 2.) / 10.
804d34ecc9d901f3d958ebee34282885248c3499
697,361
import base64 def secret_to_bytes(secret): """Convert base32 encoded secret string to bytes""" return base64.b32decode(secret)
81720bc65fa4be6a18cf0ba461adaf7b9227a417
697,362
def find_constraints(mol, sites): """ Find the NwChem constraints for a selection of sites on a molecule. """ # Find the corresponding atom numbers in string format site_numbers = [] for i in range(len(mol.sites)): if mol.sites[i] in sites: site_numbers.append(str(i + 1) + ' ') site_numbers = ''.join(site_numbers) # Set up the constraints on the atoms return {'fix atom': site_numbers}
978bef23a58ec670e4bf002b16ebca580acb5663
697,370
def decomp_proc (values): """ Given the row of predictions for the decomposition mode test output, remove the pointless training zeroes from the time :param values: A row of the decomposition predictions .csv, split by commas into a list :return: Same row with pointless zeros after the time removed """ time = values[1] times = time.split(".") assert (len(times) == 2) assert (times[1] == "000000") values[1] = times[0] return values
6898dc6586ffed0b16dbde1159861eccc63206c5
697,371
def _non_adjacent_filter(self, cmd, qubit_graph, flip=False): """A ProjectQ filter to identify when swaps are needed on a graph This flags any gates that act on two non-adjacent qubits with respect to the qubit_graph that has been given Args: self(Dummy): Dummy parameter to meet function specification. cmd(projectq.command): Command to be checked for decomposition into additional swap gates. qubit_graph(Graph): Graph object specifying connectivity of qubits. The values of the nodes of this graph are unique qubit ids. flip(Bool): Flip for switching if identifying a gate is in this class by true or false. Designed to meet the specification of ProjectQ InstructionFilter and DecompositionRule with one function. Returns: bool: When flip is False, this returns True when a 2 qubit command acts on non-adjacent qubits or when it acts only on a single qubit. This is reversed when flip is used. """ if qubit_graph is None: return True ^ flip total_qubits = (cmd.control_qubits + [item for qureg in cmd.qubits for item in qureg]) # Check for non-connected gate on 2 qubits if ((len(total_qubits) == 1) or (len(total_qubits) == 2 and qubit_graph.is_adjacent( qubit_graph.find_index(total_qubits[0].id), qubit_graph.find_index(total_qubits[1].id)))): return True ^ flip return False ^ flip
9d3a55341c2a1410c5c1864ce5fcd6ea177d4026
697,373
def str2bool(str): """Tells whether a string is a Yes (True) or a No (False)""" if str.lower() in ["y", "yes"]: return True elif str.lower() in ["n", "no"]: return False else: raise Exception("Please enter Yes or No")
ba3f8a2fdca089ae5cdc608a04091009a57a7662
697,374
def _make_value_divisible(value, factor, min_value=None): """ It ensures that all layers have a channel number that is divisible by 8 :param v: value to process :param factor: divisor :param min_value: new value always greater than the min_value :return: new value """ if min_value is None: min_value = factor new_value = max(int(value + factor / 2) // factor * factor, min_value) if new_value < value * 0.9: new_value += factor return new_value
89561ca1551b988030b3d0b4fdd5d17f5664607f
697,380
def contig_count(contig): """Return a count of contigs from a fasta file""" return sum([1 for line in open(contig, 'r').readlines() if line.startswith('>')])
dde6bbcf5799dbea197c2b0f391bf26e9ac960b6
697,381
from pathlib import Path def contour(anchor): """contour(anchor) -> Path Returns a Path object representing the contour starting with the Dart anchor. (syntactic sugar for 'Path(anchor.phiOrbit())')""" return Path(anchor.phiOrbit())
c2fee570e3feb753439c5af941b0658e2aa49bbc
697,382
def HPX_grid_size(Nside): """Return the size of the pixel grid (Nx, Ny) for a given Nside""" Nx = 8 * Nside Ny = 4 * Nside + 1 return Nx, Ny
8e267c467ed52ef24b1d540cff9aeac84a1e1bb4
697,385
def _get_percent(text): """If text is formatted like '33.2%', remove the percent and convert to a float. Otherwise, just convert to a float. """ if not text: return None if text.endswith('%'): text = text[:-1] return float(text.strip())
2975f0a603a113bf7991753250a83be4da363070
697,388
def contig_to_array(bw, chrm, res = None): """ Convert single basepair bigwig information to a numpy array Args: bw - a pyBigWig object chrm - name of chromosome you want res - resolution you want data at in bp. Returns: outarray - numpyarray at specified resolution """ chrm_length = bw.chroms(chrm) # makes an array at 1 bp resolution out_array = bw.values(chrm, 0, chrm_length, numpy=True) if res: out_array = out_array[::res] return out_array
f09fb80ef5a073bf87f82b0d86c26153db404f4f
697,389
def clean_empty_keyvalues_from_dict(d): """ Cleans all key value pairs from the object that have empty values, like [], {} and ''. Arguments: d {object} -- The object to be sent to metax. (might have empty values) Returns: object -- Object without the empty values. """ if not isinstance(d, (dict, list)): return d if isinstance(d, list): return [v for v in (clean_empty_keyvalues_from_dict(v) for v in d) if v] return {k: v for k, v in ((k, clean_empty_keyvalues_from_dict(v)) for k, v in d.items()) if v}
8769e5ceda55588a136fdf131f19aea4c5f06f95
697,391
import random def session_id_generator(size = 8): """ Generating session id """ s = "0123456789ABCDEF" return "".join(random.sample(s,size ))
b57ca24f6cc08cb465562f09495c99f25d72d296
697,392
def _resource_name_package(name): """ pkg/typeName -> pkg, typeName -> None :param name: package resource name, e.g. 'std_msgs/String', ``str`` :returns: package name of resource, ``str`` """ if not '/' in name: return None return name[:name.find('/')]
f434aa1fcfd18797625ca63c7f121dce020f8585
697,398
def hours2days(period): """ uses a tuple to return multiple values. Write an hours2days function that takes one argument, an integer, that is a time period in hours. The function should return a tuple of how long that period is in days and hours, with hours being the remainder that can't be expressed in days. For example, 39 hours is 1 day and 15 hours, so the function should return (1,15). These examples demonstrate how the function can be used: hours2days(24) # 24 hours is one day and zero hours (1, 0) hours2days(25) # 25 hours is one day and one hour (1, 1) hours2days(10000) (416, 16) """ hours_of_day = 24 day = period // hours_of_day hours = period % hours_of_day return day, hours
2198538b30ae9e0b1b1c1ec6f9f7f76ac393cafb
697,403
def top_ranked_final_primers(filter_merged_df): """ Drops duplicate sequence ids and keeps first (which also corresponds) to the highest ranking primer pair for each sample. Args: filter_merged_df (DataFrame): input from filter_merged, where primers are only equal to on target primers from initial primer generation. Returns: top_ranked_df (DataFrame): outputs only the highest scoring primer pair at each position """ top_ranked_df = filter_merged_df.drop_duplicates('Sequence ID', keep='first') return top_ranked_df
03f682a03d6454c3b142231fe78d82208117cf09
697,407
def words_sent(message_container, start_date, end_date): """ Return number of words sent between start and end date contained by message container (chat/member). """ words_sent = 0 for message in message_container.messages: if (start_date <= message.timestamp.date() <= end_date and message.type == 'text'): words_sent += len(message.words()) return words_sent
c8e466a8d9c1049a6b5066fe0c9efccbbd3ae245
697,409
import math def matrix_from_quaternion(quaternion): """Calculates a rotation matrix from quaternion coefficients. Parameters ---------- quaternion : [float, float, float, float] Four numbers that represents the four coefficient values of a quaternion. Returns ------- list[list[float]] The 4x4 transformation matrix representing a rotation. Raises ------ ValueError If quaternion is invalid. Examples -------- >>> q1 = [0.945, -0.021, -0.125, 0.303] >>> R = matrix_from_quaternion(q1) >>> q2 = quaternion_from_matrix(R) >>> allclose(q1, q2, tol=1e-03) True """ sqrt = math.sqrt q = quaternion n = q[0]**2 + q[1]**2 + q[2]**2 + q[3]**2 # dot product # perhaps this should not be hard-coded? eps = 1.0e-15 if n < eps: raise ValueError("Invalid quaternion, dot product must be != 0.") q = [v * sqrt(2.0 / n) for v in q] q = [[q[i] * q[j] for i in range(4)] for j in range(4)] # outer_product rotation = [ [1.0 - q[2][2] - q[3][3], q[1][2] - q[3][0], q[1][3] + q[2][0], 0.0], [q[1][2] + q[3][0], 1.0 - q[1][1] - q[3][3], q[2][3] - q[1][0], 0.0], [q[1][3] - q[2][0], q[2][3] + q[1][0], 1.0 - q[1][1] - q[2][2], 0.0], [0.0, 0.0, 0.0, 1.0]] return rotation
57a38e4214c94e54e2cfb2fd71e1bfa57d4f714b
697,413
from typing import Iterable def create_item_name(item_type: str, item_names: Iterable[str]) -> str: """ Translates an item with a type into a name. For instance, if there are two items of type 'POST', the first will be named 'POST0' and the second will be 'POST1' Args: item_type: Type of item item_names: Names of current items Returns: Translated item name """ name_index = len([name for name in item_names if name.startswith(item_type)]) return f"{item_type}{name_index}"
8ec37473ee0a3dc880dc962b9f1c622c2dfd9af3
697,414
def get_installed_tool_shed_repository( app, id ): """Get a tool shed repository record from the Galaxy database defined by the id.""" return app.install_model.context.query( app.install_model.ToolShedRepository ) \ .get( app.security.decode_id( id ) )
d76c3af3f159f9a0882a6b27951a2e8060f4bc28
697,421
def _keytify_test_cases(test_cases): """Traverse the test cases list and return a dictionnary which associate test case name to its duration. This is used for fast access to tests cases duration. """ res = {} for tc in test_cases: key = "%s/%s" % (tc.get('classname'), tc.get('name')) if tc.get('time') is None or float(tc.get('time')) == 0.0: continue res[key] = float(tc.get('time')) return res
3adb4f94e617bc03722cb860f0bcd2c388d6ec1a
697,422
def vv_vel(vel, dt, force, force_previous, mass): """Computes the new velocities using the velocity-verlet integration scheme Args: vel -- np.array(N,dim) containing the current particle velocities DT -- timestep force -- np.array(N,dim) containing the current net forces on each particle force_previous -- np.array(N,dim) contaning forces at previous timestep mass -- value of particle mass Returns: vel -- np.array(N,dim) containing updated velocities Notes: -- the routine runs using Numba @njit decorator for faster run time """ for dim in range(vel.shape[1]): for i in range(vel.shape[0]): vel[i, dim] += 0.5*dt * (force[i, dim] + force_previous[i, dim])/mass return vel
df56e09ad191b161d258f1decf2bdcaedf7bded9
697,423
import requests import json def getdetailedinfofromid(url, token, personid): """ @param url: url for the webex teams API calls @param token: WebEx Teams Token to be used for the queries of the WebEx Teams Cloud @param personid: personID of the end user that we would like to return the email to. @return: Detailed array of all identification data for the user specifed by the personID field This function will take the person ID value and query the WebEx Teams API to return all data associated with the user. It includes: "avatar": "created": "displayName": "emails": "firstName": "id": "lastName": "nickName": "orgId": "type": """ apistring = url+"/v1/people?id="+personid # Set up the Headers based upon the WebEx Teams API headers = {'Authorization': 'Bearer {}'.format(token), 'content-type': 'application/json'} # Send the request to the WebEx Teams API using the payload and headers defined above try: resp = requests.get(apistring, headers=headers) except requests.exceptions.RequestException as e: print(e) return '' if resp.status_code == 200: message_dict = json.loads(resp.text) message_dict['statuscode'] = str(resp.status_code) return message_dict['items'][0] else: return ''
c6bd54182cb8c7cc109ad68505fca5887a6f9fad
697,425
def map_nlist(nlist, fn): """ Maps `fn` to elements in nested list that has arbitrary depth. https://stackoverflow.com/a/26133679 :param nlist: Input list :param fn: Function to be applied :return: Modified list """ new_list = [] for i in range(len(nlist)): if isinstance(nlist[i], list): new_list.append(map_nlist(nlist[i], fn)) else: new_list.append(fn(nlist[i])) return new_list
5f808eff663eae9b38073804241295c6976237a3
697,427
def s3_upload_part_copy( s3_obj, bucketname, copy_source, object_key, part_number, upload_id ): """ Boto3 client based upload_part_copy operation Args: s3_obj (obj): MCG or OBC object bucketname (str): Name of the bucket copy_source (str): Name of the source bucket and key name. {bucket}/{key} part_number (int): Part number upload_id (str): Upload Id object_key (str): Unique object Identifier for copied object Returns: dict : upload_part_copy response """ return s3_obj.s3_client.upload_part_copy( Bucket=bucketname, CopySource=copy_source, Key=object_key, PartNumber=part_number, UploadId=upload_id, )
e89fdcf85616eeeab6c8da93dbbd290ad992820b
697,428
import math import torch def sdeint_euler(f, g, dif_g, t, h, x0, mid_state=None): """ SDE integration from t0=0 to t1=t. Assume diagnoal noise. Args: f: drift function of (t, X), t is time and X is a d-dimensional vector. Outputs a d-dimensional vector g: diffusion function of (t, X), t is time and X is a d-dimensional vector. We assume G=g(t, X) is diagnoal matrix, so g(t, X) outputs the diagnoal vector (d-dimensional). t: final time. h: step size of discritization (the real step size might be slightly smaller). x0: initial value of X. d: dimension of X. Returns: y: d-dimensional vector, storing the integration result X(t). """ N = int(t / h) + 1 h_real = t / N root_h = math.sqrt(h_real) # for storing the noise tt = 0 x = x0 z = torch.randn_like(x0).to(x0) z.requires_grad = False for step in range(N): if mid_state is not None: mid_state.append(x.detach().clone()) tmp = root_h * z.normal_() x = x + f(tt, x) * h_real + g(tt, x) * tmp tt += h_real return x
f9147add232a244cd94d91f95610ff37987c07c3
697,430
def max_element(l): """ Returns the maximum element of a given list of numbers. """ max_ = None for x in l: if max_ is None or x > max_: max_ = x return max_
1596f33f0bb91839fbcaf2613892bf90fafd6afd
697,432
def mask_pipe_mw(text: str) -> str: """ Mask the pipe magic word ({{!}}). :param text: Text to mask :return: Masked text """ return text.replace("{{!}}", "|***bot***=***param***|")
546d7c4b71ce3403da7a33883dac2cd3171224e6
697,433
def to_scalar(var): """ convert a tensor to a scalar number """ return var.view(-1).item()
56170067e38773ce452268827ef87d7b1bab877a
697,434
def check_input(def_list): """Check that all defect structures in list are not None. Args: def_list (list): List of defect structures Returns: True if all defect structures are present. """ for defect in def_list: if not defect: return False return True
0c1372688e4780dd44df575d470e0511366dbbc4
697,435
from time import strftime, localtime from typing import Union def format_time(time_tick: Union[int, float]) -> str: """ 格式化时间戳为具体日期,例\n >>> format_time(123456789)\n >>> '1973-11-30 05:33:09'\n :param time_tick: 时间戳 :return: 具体日期 """ return strftime('%Y-%m-%d %H:%M:%S', localtime(time_tick))
8f3da87066013ce267b485d4924617c9615b455e
697,440
def pack(word, pattern): """Return a packed word given a spaced seed pattern. >>> pack('actgac', [True, False, True, True, False, True]) 'atgc' """ ret = [] for i, char in enumerate(word): if pattern[i]: ret.append(char) return "".join(ret)
9697a4ee5b9bbc3d7f4d22040196bc8856fd2b6d
697,441
def get_shp_extent(gdf): """Fetch extent of input shapefile""" extent = tuple(gdf.bounds.values[0]) return extent
d9343f94f349c1db5af033d78693d3b55407a3c8
697,448
def get_grid_data(df): """ Prunes dataframe to rows whose longitudes are multiples of 60 degrees and whose years are multiples of 10. This includes only lat/lon grid locations for which we have USNO data for all eight types of twilight events. """ bools = (df['Longitude'] % 60 == 0) & (df['Year'] % 10 == 0) return df[bools]
4788356a9c0b14759a34c436f2f8267ea07043be
697,449
def factorial(value: int) -> int: """ Calculates the factorial of the given value. factorial ========= The `factorial` function takes an positive integer value and calculates the factorial (n! = n * (n - 1) * (n - 2) * ... * 3 * 2 * 1). Parameters ---------- value: int an positive integer value Returns ------- factorial: int factorial of the value (value!) """ if value == 1: # The base case: if the value is 1 then return 1 # stop recursion and return return value * factorial(value - 1) # The recursive case: call the # factorial function with value # decreased by 1
642700f9ec42c3ab47130d599626c44b2de4165c
697,450
import ast def _build_table(src): """ Builds AST tree table from given source. Args: src (str): source code Returns: dict: table of ast.AST nodes in tree """ table = {} tree = ast.parse(src) for node in ast.walk(tree): curr = table.get(node.__class__, []) table[node.__class__] = curr + [node] return table
955fcec38e57a9657ffad183a7486ddfc5847bfc
697,451
import torch def make_bow_vector(sentence, word_to_ix): """ Turn a sentence (list of words) into a vector using the dictionary (word_to_ix) return data is a FloatTensor of shape 1 (batch size) by len(word_to_ix)""" vec = torch.zeros(len(word_to_ix)) for word in sentence: vec[word_to_ix[word]] += 1 return vec.view(1, -1)
ae0df26878537ae232e70881d5e8a56fcea0c350
697,453
def matmul(a, b): """ Multipy two matrices :param a: list of list of floats :param b: list of list of floats :return: resulting matrix """ res = [] for r_idx, a_r in enumerate(a): res_row = [ sum([a_v * b_r[b_idx] for a_v, b_r in zip(a_r, b)]) for b_idx in range(len(b[0])) ] res.append(res_row) return res
504f53c1de68a28a2c23c479b53acc76f359f347
697,454
from typing import Dict from typing import Any from typing import List def directory(module_locals: Dict[str, Any]) -> List[str]: """ Return the module's public directory for dir function. Parameters ---------- module_locals The module's locals as generated by the locals() function. Returns ------- List of public objects. """ dir_ = [] for key in module_locals: if key.startswith('_'): continue try: include_in_dir = module_locals[key].__dict__['_include_in_dir'] except KeyError: include_in_dir = True except AttributeError: include_in_dir = False if include_in_dir: dir_.append(key) return dir_
37dc44ddc18a21c5aa6faba9887cae0aa6bcdd0b
697,456
def SplitDataset_x_y(df, IndependentColumnName): """ This function split a dataframe into a dataframe with independent variables and a dataframe with dependent variables. The IndependentColumnName define what is/are the independent variables based on the related column name. Arguments: ---------- - df: pandas dataframe The dataframe containing both the independent and dependent variables - IndependentColumnName: list The column name of the independent variables - can be a list of one element or more. Return: ---------- - x: pandas dataframe All columns contained in the initial df dataframe excepted the column provided into the IndependentColumnName list. - y: pandas dataframe Only the columns provided into the IndependentColumnName """ y = df[IndependentColumnName] x = df.drop(IndependentColumnName, axis=1) return x, y
a2e4a6244fc5bcee401981ee54f20d179c09dc89
697,459
from functools import reduce import operator def validate_nmea_checksum(sentence): """ Validates NMEA sentence using checksum according to the standard. :param sentence: NMEA sentence including checksum :returns: - Boolean result (checksum correct) - raw NMEA data string, with prefix $Gx and checksum suffix removed """ sentence = sentence.strip('\n').strip('\r') nmeadata, cksum = sentence.split('*', 1) nmeadata = nmeadata.replace('$', '') xcksum = str("%0.2x" % (reduce(operator.xor, (ord(c) for c in nmeadata), 0))).upper() return (cksum == xcksum), nmeadata[2:]
82f5a63943916e4323178063366a0f8c276ad64a
697,461
def get_sorter_by_args(model, args: list): """Get list of SQLAlchemy order_by args from dictionary of arguments. """ sorters = [] for key in args: if key[0] == '-': sorters.append(getattr(model, key[1:]).desc()) else: sorters.append(getattr(model, key)) return sorters
d9572ad576d8980dd453835b80f36673f6ab4f16
697,462
def get_crash_arg() -> bytes: """ This function returns the (pre-encoded) `password` argument to be sent to the `sudo` program. This data should cause the program to crash and generate a core dump. Make sure to return a `bytes` object and not an `str` object. WARNINGS: 0. Don't delete this function or change it's name/parameters - we are going to test it directly in our tests, without running the main() function below. Returns: The bytes of the password argument. """ return (135 * b'A' + 4 * b'B')
f25bf1c66c5387076830e77077b97d9844244428
697,466
def ParseLine(line, new_target): """Parse one line of a GCC-generated deps file. Each line contains an optional target and then a list of space seperated dependencies. Spaces within filenames are escaped with a backslash. """ filenames = [] if new_target and ':' in line: line = line.split(':', 1)[1] line = line.strip() line = line.rstrip('\\') while True: # Find the next non-escaped space line = line.strip() pos = line.find(' ') while pos > 0 and line[pos-1] == '\\': pos = line.find(' ', pos+1) if pos == -1: filenames.append(line) break filenames.append(line[:pos]) line = line[pos+1:] return filenames
ab0df773a9279f30b438ee7c325225a74574a588
697,470
def has_refseq(db_list): """ Return the index of the list where the 'RefSeq' string is located. Otherwise return None :param db_list: A list of db names taken as the first element of the tuples in a Swissprot.record.cross_references list :return: int: index or None """ if 'RefSeq' in db_list: return db_list.index('RefSeq') else: return None
ffa175079e7e14e91fe9b8901c3d8cd60b200eea
697,472
import ipaddress def prefix_to_network(prefix): """Convert an IP prefix to an IP-address and network mask.""" ipaddr = ipaddress.ip_interface(prefix) # turn into ipaddress object address = ipaddr.ip mask = ipaddr.netmask return address, mask
ac521405d5bdd90f082bc4d0e5f434b7a5b7f3f7
697,476
def deflatten(d, sep='.', maxdepth=-1): """Build a nested dict from a flat dict respecting a separator. >>> d_in = {'a': 1, 'c': {'a': 2, 'b': {'x': 5, 'y' : 10}}, 'd': [1, 2, 3]} >>> d = flatten(d_in) >>> for k, v in d.items(): print(k, v) a 1 c.a 2 c.b.x 5 c.b.y 10 d [1, 2, 3] >>> deflatten(d) {'a': 1, 'c': {'a': 2, 'b': {'x': 5, 'y': 10}}, 'd': [1, 2, 3]} >>> deflatten(d, maxdepth=1) {'a': 1, 'c': {'a': 2, 'b.x': 5, 'b.y': 10}, 'd': [1, 2, 3]} >>> deflatten(d, maxdepth=0) {'a': 1, 'c.a': 2, 'c.b.x': 5, 'c.b.y': 10, 'd': [1, 2, 3]} >>> d = flatten(d_in, sep='_') >>> for k, v in d.items(): print(k, v) a 1 c_a 2 c_b_x 5 c_b_y 10 d [1, 2, 3] >>> deflatten(d, sep='_') {'a': 1, 'c': {'a': 2, 'b': {'x': 5, 'y': 10}}, 'd': [1, 2, 3]} >>> deflatten({('a', 'b'): 'd', ('a', 'c'): 'e'}, sep=None) {'a': {'b': 'd', 'c': 'e'}} >>> deflatten({'a.b': 1, 'a': 2}) Traceback (most recent call last): ... AssertionError: Conflicting keys! ('a',) >>> deflatten({'a': 1, 'a.b': 2}) Traceback (most recent call last): ... AssertionError: Conflicting keys! ('a', 'b') """ ret = {} if sep is not None: d = { tuple(k.split(sep, maxdepth)): v for k, v in d.items() } for keys, v in d.items(): sub_dict = ret for sub_key in keys[:-1]: if sub_key not in sub_dict: sub_dict[sub_key] = {} assert isinstance(sub_dict[sub_key], dict), ( f'Conflicting keys! {keys}' ) sub_dict = sub_dict[sub_key] assert keys[-1] not in sub_dict, f'Conflicting keys! {keys}' sub_dict[keys[-1]] = v return ret
dc507bd9c167c1b2c9cb89605a424d235c11fb82
697,477
import click def config_option(config_function): """Helper decorator that turns an option function into a cli option""" return (lambda function: click.option('--' + config_function.__name__, help=config_function.__doc__.strip() + '. Example: "' + str(config_function()) + '"')(function))
f6e8dd160bf6b1f38612cbf338140f6f94862644
697,479
import re def strip_headers(pem_text): """ Strips the headers off a a FinCrypt key or message. :param pem_text: Text of key or message (string) :return: Tuple (header (ie. 'BEGIN FINCRYPT MESSAGE'), base64 (string)) """ match = re.match( r'(?:-+ (BEGIN FINCRYPT (?:PUBLIC |PRIVATE )?(?:KEY|MESSAGE)) -+\n)([a-zA-Z0-9\n\-_=]+[^\n])' r'(?:\n-+ END FINCRYPT (?:PUBLIC |PRIVATE )?(?:KEY|MESSAGE) -+)', pem_text) if match is None: return None, None return match[1], match[2]
c00f3006f392bde2ed835905cdf0582e25049a9d
697,480
def _CreateAssetsList(path_tuples): """Returns a newline-separated list of asset paths for the given paths.""" dests = sorted(t[1] for t in path_tuples) return '\n'.join(dests) + '\n'
bb57f014e68d398eb89468a7c86c972f03e47b7d
697,481
def discount_with_dones(rewards, dones, gamma): """ Calculates discounted rewards. This is still valid if the episode terminated within the sequence. From OpenAI basline's A2C. Parameters ---------- rewards: list list of rewards with the last value being the state value dones: list list of dones as floats gamma: float discount factor Returns ------- list of discounted rewards """ discounted = [] r = 0 # discounted rewards are calculated on the reversed reward list. # that returns are calculated in descending order is easy to # overlook in the original pseudocode. # when writing down an example of the pseudocode, it is clear, that # r_t + gamma * V(s_tp1) is calculated for each list element and # this is also what is done here. for reward, done in zip(rewards[::-1], dones[::-1]): r = reward + gamma*r*(1.-done) discounted.append(r) return discounted[::-1]
2feef30d209795414029cfc79aef2ea8e65220af
697,484
def ema(series, n): """ 指数加权移动平均线: 求series序列n周期的指数加权移动平均 计算公式: ema(x, n) = 2 * x / (n + 1) + (n - 1) * ema(x, n).shift(1) / (n + 1) 注意: 1. n 需大于等于1 2. 对距离当前较近的k线赋予了较大的权重 Args: series (pandas.Series): 数据序列 n (int): 周期 Returns: pandas.Series: 指数加权移动平均线序列 Example:: from tqsdk import TqApi, TqSim, tafunc api = TqApi(TqSim()) klines = api.get_kline_serial("CFFEX.IF1908", 24 * 60 * 60) ema = tafunc.ema(klines.close, 5) print(list(ema)) """ ema_data = series.ewm(span=n, adjust=False).mean() return ema_data
fcdf4a33821ba20d7026f30f4229ab7f62f32eda
697,485
from typing import Any from typing import Dict def _get_feature_row(features: Any, index: int) -> Dict[str, Any]: """Returns one row of the features table as a dictionary.""" return {name: values.iloc[index] for name, values in features.items()}
8729d3f4c8adaa00fd219e2bb74d6ba138776793
697,491
def get_quarter_from_month(month: int): """Returns the quarter for a given month""" month_quarter_map = {1: 1, 2: 1, 3: 1, 4: 2, 5: 2, 6: 2, 7: 3, 8: 3, 9: 3, 10: 4, 11: 4, 12: 4} return month_quarter_map[month]
ef7f3a83c3b4b75823e67bca9ce3f62dd1cae5cf
697,494
def _precision(tp, fp): """Calculate precision from true positive and false positive counts.""" if fp == 0: return 1 # by definition. else: return tp / (tp + fp)
9bf80bc0ce2b657b1c8bbc8fb57e64e7df2081c9
697,496
def inference(model, X): """Run model inferences and return the predictions. Inputs ------ model : ??? Trained machine learning model. X : np.array Data used for prediction. Returns ------- preds : np.array Predictions from the model. """ return model.predict(X)
c9edd988933bff12fa4da14c9735f8f482e82ca8
697,497
from datetime import datetime def now(frm: str = '%d/%m/%Y %H:%M:%S') -> str: """ Функция возвращает текущее время и дату Args: frm (str): Формат времени и даты Returns: str: Текущее время и дата """ return datetime.now().strftime(frm)
e21851f65de1e7640dc7e34f8f3761129052e1c3
697,498
def get_full_piece_id(piece): """ Returns rdb piece id and append movement id if exists. :param piece: A piece as stored in dataset :return: A string containing either the piece ID (no movement information) like "123", or the piece ID followed by dash, followed by the movement ID like "123-46123" """ piece_id_only = piece["rdb_id_piece"] movement_id = str(piece.get("rdb_id_movement", "")) piece_id = str(piece_id_only) if movement_id: piece_id += "-" + movement_id return piece_id
7d9c0946d26035cdbf945f6d854913388818e8f9
697,501
import itertools def maxgap(data, max_gap): """ Arrange data into groups where successive elements differ by no more than *maxgap* The input has to be a list of list with the structure: [['id1', distance between start decay and last expression],['id2',dist2],...,['idn', distn]] The output will be a list of lists with the identifiers clustered together by the distances if the difference between them is less than the maxgap [[id1, id2],[id3,..]] Example: in: [['id1',1], ['id2',-1], ['id3',2], ['id4',80], ['id5',81], ['id3',82]] out: [['id1','id2','id3'], ['id4','id5','id3']] """ # Sort the list by the second value (distances) data.sort(key=lambda x: x[1]) # Separate lists identifiers = [x[0] for x in data] distances = [x[1] for x in data] # Cluster the distances by max gap and return the identifiers groupes groups = [] for k, g in itertools.groupby(distances, key=lambda n: n//max_gap): i = len(list(g)) groups.append(identifiers[:i]) identifiers = identifiers[i:] return groups
fba36520d40263eafe25556304a8cbbaf8f724fb
697,502
def get_slice(img, ori, slc): """ Extract one slice from a 3D numpy.ndarray image. :param img: np.array. input image to extract slice. :param ori: int. orientation to extract slice. 1. sagittal. 2. coronal. 3. axial. :param slc: int. slice number to extract. :return: extracted 2D slice: np.array. """ if ori == 1 and 0 <= slc < img.shape[0]: return img[slc, :, :] elif ori == 2 and 0 <= slc < img.shape[1]: return img[:, slc, :] elif ori == 3 and 0 <= slc < img.shape[2]: return img[:, :, slc] else: raise Exception('Invalid orientation or slice number')
dcf03eec0d16c68f55f701b2d312dbf4fa946ee7
697,503
def prune_multiple_copies_of_species(tree, g2s_map, s2g_map): """ Removes all but one leaf with the same species label Parameters ---------- tree : treeswift tree object g2s_map : dictionary maps gene copy labels to species labels s2g_map : dictionary maps species label to gene copy labels """ found_duplicate = set([]) nLMX = 0 c = 0 for leaf in tree.traverse_leaves(): gene = leaf.get_label() species = g2s_map[gene] all_genes = s2g_map[species] if gene == all_genes[0]: leaf.set_label(species) nLMX += 1 else: leaf.contract() if not (species in found_duplicate): found_duplicate.add(species) c += 1 tree.suppress_unifurcations() return [nLMX, c]
a7b5b5a0b8af0ea157d5bbf1bbbfb1337dc7ee69
697,504
def get_link(task_id, tasks_df): """Return the task link from the task ID.""" task = tasks_df.loc[int(task_id)] return task['info']['link']
a24c467f298526d2176747960430f5e283384043
697,507
def known_face_sentence(known_face_name): """ describe known person Example: Anuja is in front of you. :param known_face_name: name of known person in the frame :return: sentence descibing person """ return "%s in front of you" % ( known_face_name )
266da5bf129ba6844dbe0bd3cbcf9b4663100d1c
697,508
from typing import Union from pathlib import Path def ensure_path(path: Union[str, Path]) -> Path: """ Ensure string is converted to a Path. This is a more restrictive version of spaCy's [ensure_path](https://github.com/explosion/spaCy/blob/ac05de2c6c708e33ebad6c901e674e1e8bdc0688/spacy/util.py#L358) # Parameters path : `Union[str, Path]` If string, it's converted to Path. # Returns `Path` """ if isinstance(path, str): return Path(path) return path
1f70ab426f6347399da73e854c0c2be9eee7843c
697,510
def tokenize_table_name(full_table_name): """Tokenize a BigQuery table_name. Splits a table name in the format of 'PROJECT_ID.DATASET_NAME.TABLE_NAME' to a tuple of three strings, in that order. PROJECT_ID may contain periods (for domain-scoped projects). Args: full_table_name: BigQuery table name, as PROJECT_ID.DATASET_NAME.TABLE_NAME. Returns: A tuple of project_id, dataset_name, and table_name. Raises: ValueError: If full_table_name cannot be parsed. """ delimiter = '.' tokenized_table = full_table_name.split(delimiter) if not tokenized_table or len(tokenized_table) < 3: raise ValueError('Table name must be of the form ' 'PROJECT_ID.DATASET_NAME.TABLE_NAME') # Handle project names with periods, e.g. domain.org:project_id. return (delimiter.join(tokenized_table[:-2]), tokenized_table[-2], tokenized_table[-1])
a092f749e18cdec41b50a8f1b7e1e5e99cd8e2e3
697,517
def linear_annuity_mapping_fprime(underlying, alpha0, alpha1): """linear_annuity_mapping_fprime first derivative of linear annuity mapping function. See :py:func:`linear_annuity_mapping_func`. The function calculates following formula: .. math:: \\alpha^{\prime}(S) := \\alpha_{0.} where :math:`S` is underlying, :math:`\\alpha_{0}` is alpha0. :param float underlying: :param float alpha0: :param float alpha1: not used. :return: value of first derivative of linear annuity mapping function. :rtype: float. """ return alpha0
ff57723cad7ade65644744dc30abb6db5c1e6b95
697,518
def third_bashforth(state, tendencies_list, timestep): """Return the new state using third-order Adams-Bashforth. tendencies_list should be a list of dictionaries whose values are tendencies in units/second (from oldest to newest), and timestep should be a timedelta object.""" return_state = {} for key in tendencies_list[0].keys(): return_state[key] = state[key] + timestep.total_seconds() * ( 23./12*tendencies_list[-1][key] - 4./3*tendencies_list[-2][key] + 5./12*tendencies_list[-3][key] ) return return_state
6b1a62b94c662a1b14eafa3be6953e73486b6cfd
697,519
from typing import OrderedDict def getAttrFromList(objList, attr): """\ Given a list of objects in objList, each having the attribute attr, return a list comprising the value of attr for each object in objList. Return: A list of values. Parameters: * objList: The list of objects * attr: The attribute had in common """ values = [] for o in objList: if type(o) == dict or type(o) == OrderedDict: values.append(o.get(attr, None)) else: values.append(getattr(o, attr)) return values
bef6386afff15a0d6a78d92f02878a47171dd9d7
697,521
def remove_control_char(pdf_content): """ 移除控制字符,换行符、制表符、转义符等)python-docx是不支持控制字符写入的 :param pdf_content: PDF 文件内容 :return: 返回去除控制字符的内容 """ # 使用str的translate方法,将ASCII码在32以下的都移除 http://ascii.911cha.com/ return pdf_content.translate(dict.fromkeys(range(32)))
84dff48a5654b12f7446f77cbc4d132716d2018c
697,523
def confirm(question: str) -> bool: """Ask a confirmation to the user in the console. Args: - question: the question to ask the user. Returns: True if the user answered 'yes' or 'y', False otherwise. """ input_ = input(question + " Enter (y)es or (n)o: ") return input_ == "yes" or input_ == "y"
bd6415658d4c7adf73d682fb151dc8aecc7eaf6d
697,524
def make_safe_label(label): """Avoid name clashes with GraphViz reserved words such as 'graph'.""" unsafe_words = ("digraph", "graph", "cluster", "subgraph", "node") out = label for word in unsafe_words: out = out.replace(word, "%sX" % word) return out.replace(".", "__").replace("*", "")
e122575956e492cbb450287a6b91174fdc59233a
697,526
def load_bands(src, bands, masked=False): """ Load selected bands of a raster as array ********* params: -------- src : rasterio.DatasetReader object bands : list list of bands to load, e.g. [1,2,3] masked : bool (default:False) if True exclude nodata values return: tuple: array, metadata """ arr = src.read(bands, masked=masked) metadata = src.profile metadata.update({ 'driver': 'GTiff', 'count': len(bands)}) return arr, metadata
e0c131ca93066387ae12f9bc6206626d4313cdf7
697,527
def is_in_tol(value, expected_value, tolerance): """Is the provided value within expected_value +/- tolerance Parameters ---------- value : int, float Value of interest expected_value : int, float Expected value tolerance : int, float Allowed deviation from expected_value Returns ---------- bool True if value is within within expected_value +/- tolerance, exclusive """ return expected_value + tolerance > value > expected_value - tolerance
b1cde33b7979995b86e3ecd4cd41330cfa31c447
697,529
def merge(left, right): """ Merge two sorted arrays in a resulting array of size len(left) + len(right) """ result = [] while len(left) != 0 and len(right) != 0: if left[0] < right[0]: result.append(left.pop(0)) else: result.append(right.pop(0)) if len(left) == 0: result += right else: result += left return result
9a962735cc157a135c4d65b4f18c5e55d8e182d2
697,531
def loglines(logspath): """ Read logging lines generated by a test case. Parameters ---------- logspath : str Path to which test case wrote logs. Returns ------- Iterable of str Logs from the test case. """ with open(logspath, 'r') as logfile: return logfile.readlines()
a30680a863fba0b1098536404021b4d32b31abff
697,534
import re def read_best_values(file_list): """Reads the best hypervolume values from files in file_list, where each is formatted as a C source file (starts to read in the next line from the first encountered 'static'). Returns a dictionary containing problem names and their best known hypervolume values. :param file_list: list of file names """ result = {} for file_name in file_list: read = False with open(file_name, 'r') as f: for line in f: if read: if line[0:2] == '};': break split = re.split(',|\"|\t| |\n', line) entries = [item for item in split if item] result.update({entries[0]: entries[1]}) elif line[0:6] == 'static': read = True f.close() return result
9f9088931f64dbeb463223b62c871c1462561ca6
697,537
def GetGems(gem_counts): """Returns a list of GemTypes given a dict from GemType to counts.""" gems_list = [] for gem_type, count in gem_counts.iteritems(): if count < 0: raise NotImplementedError("count cannot be negative") for _ in range(count): gems_list.append(gem_type) return gems_list
08b4f7e669cbf2a0fb621449a2d845500637c4e1
697,539
def _get_obj_name(obj): """Auxiliary function to retrieve the name of an object.""" name = str(obj).replace("class ", "") return name.translate(str.maketrans({c: "" for c in "(<>),'"}))
92e3d774036a1f0d2bab9bdc60ae9e6582b8aa40
697,540
def uniform_heuristic(item): """ Taking in an item, return 1. """ return 1
bf1b25c96d25c56852446e36c6653c141a6f6460
697,541
def can_read_deleted(context): """Indicates if the context has access to deleted objects.""" if not context: return False return context.read_deleted
58b3025f133ee6bb8a1cc17c410bc1b0ea43c3b5
697,542
import requests def get_google_dot_com(use_ssl=True): """Return the contents of Google.com :param use_ssl: Use SSL? You should! Don't turn this off. You've been warned. Scamp! """ protocol = "https" if use_ssl else "http" return requests.get(f"{protocol}://www.google.com/").text
246420a67f5706fab2bf3d13bb6bb9e1bb7d00d7
697,543
import torch def get_gpu_or_cpu_number(device): """Returns the GPU number on which the tensors will be run. Returns -1 if the CPU is used""" if 'cuda' in device: if not torch.cuda.is_available(): raise RuntimeError("Torch cuda check failed, your drivers might not be correctly installed") gpu = device.split(":") if len(gpu) > 1: gpu_n = int(gpu[1]) else: gpu_n = 0 else: gpu_n = -1 # i.e, tensor are CPU based return gpu_n
5a16538e8339747d459ac94467278017b8294f21
697,544
def map_calculated_scores_to_user(predictions, user): """ This function replaces the previous scores (only 0's in production) with the computed scores) :param predictions: the list of prediction-scores :param user: the user with its (original) scores :return: the user with its new scores """ for row in predictions: user.scores[row[0]] = row[1] return user
23a1dcc077cab2f5f27750c660abbab09bf0ff4c
697,546
from pathlib import Path def check_db_dir(parent: Path): """Checks whether the db directory exists. If not, create it. Args: parent (Path): the parent to both server_settings and db Returns: Path: the directory itself Raises: Exception: the db dir path exists, but it isn't a directory """ db_dir = parent / 'db' if db_dir.exists() and not db_dir.is_dir(): raise Exception(f"""{db_dir} exists, but it isn't a directory!""") elif not db_dir.exists(): db_dir.mkdir() return db_dir
3f7c0197dc7d7b04c0864d479ac834518815aade
697,547
from bs4 import BeautifulSoup def read_XML_file(filename): """Reads file, with specific filename and returns parsed XML-tree :param filename: path to file with urls :return: parsed XML-tree, that contains in specefied file """ with open(filename) as xml: soup = BeautifulSoup(xml, "lxml-xml") return soup
025b45453ef812d6d91d8386dde33f8e59882ce1
697,550
def iterkeys(obj, **kwargs): """Iterate over dict keys in Python 2 & 3.""" return (obj.iterkeys(**kwargs) if hasattr(obj, 'iterkeys') else iter(obj.keys(**kwargs)))
03787c0e8bb493c721871990c4068144782370e2
697,552