content
stringlengths
39
9.28k
sha1
stringlengths
40
40
id
int64
8
710k
def wafv2_custom_body_response_content(content): """ Validate wafv2 custom body response content. Any character between 1 to 10240 Property: CustomResponseBody.Content """ if not content: raise ValueError("Content must not be empty") if len(content) > 10240: raise ValueError("Content maximum length must not exceed 10240") return content
a3d5df6a385422ff783ec4adbfcfb9132a11feba
557,623
def cell_volume(a1,a2,a3): """ returns the volume of the primitive cell: |a1.(a2xa3)| """ a_mid_0 = a2[1]*a3[2] - a2[2]*a3[1] a_mid_1 = a2[2]*a3[0] - a2[0]*a3[2] a_mid_2 = a2[0]*a3[1] - a2[1]*a3[0] return abs(float(a1[0]*a_mid_0 + a1[1]*a_mid_1 + a1[2]*a_mid_2))
b9421ea1ad230cbe876968980fe7f1580ecde1d9
381,656
def preproc(unclean_batch_x): """Convert values to range 0-1""" temp_batch = unclean_batch_x / unclean_batch_x.max() return temp_batch
ef84265c0e6fe244ce2367514d0f4b901d8f958b
562,343
def get_file_contents(file_path): """Return a string containing the file contents of the file located at the specified file path """ with open(file_path, encoding="utf-8") as f: file_contents = f.read() return file_contents
4e00422c05de3b90e3457b8d468514957c28b5f3
625,560
def dot_index(index, data): """Internal method for indexing dicts by dot-notation. Args: index: a string with the dot-index key. data: a dict with the index. Returns: A list with the result. Example: >>> data = {'apa': {'bepa': {'cepa': 'depa'}}} >>> list(dot_index('apa', data)) [('bepa', {'cepa': 'depa'})] >>> list(dot_index('apa.bepa.cepa', data)) ['depa'] """ if index: for key in index.split('.'): if isinstance(data, list): data = [x[key] for x in data] else: data = data[key] if isinstance(data, list): return data if isinstance(data, dict): return data.items() else: return [data]
0891b65d7c15438c0581295d3d7d8e4931da50f9
486,583
def isfloat(value): """ Check input for float conversion. :param value: input value :type value: str :return: result as bool (True: input_value is a number, False: otherwise) """ try: float(value) return True except Exception: return False
59889102a8855338b0149feb9f96e0c0b4fea52c
327,397
import six import ipaddress def address_to_bin(address, split=None): """Converts an dotted-decimal (IPv4) or groups of hexadecimals (IPv6) formatted address to a binary representation :param address: The IP address :type address: ipaddress.IPv4Address or ipaddress.IPv6Address or str :param int split: Position to split the address :return: A binary representation of the IP address :rtype: str """ if isinstance(address, six.string_types): address = ipaddress.ip_address(six.u(address)) if address.version == 4: bits_total = 32 bits_step = 8 bits_sep = '.' else: bits_total = 128 bits_step = 16 bits_sep = ':' bits_array = bin(int(address))[2:] bits_array = bits_array.zfill(bits_total) bits_array = [bits_array[p:p + bits_step] for p in range(0, bits_total, bits_step)] bits_array = bits_sep.join(bits_array) if split: split += split // bits_step bits_array = bits_array[:split] + ' ' + bits_array[split:] return bits_array
3c668221f407211cc1eaed0f157006248bdf8892
215,371
import re def is_base64(string: str) -> bool: """ Verifies if `string` is encoded in Base 64. Parameters ---------- string: str String object to be verified Returns ---------- bool True, if `string` complies with Base 64 format, False, otherwise """ return re.match("^([A-Za-z0-9+/]{4})+([A-Za-z0-9+/]{3}=|[A-Za-z0-9+/]{2}==)?$", string) is not None
9fb176814dade945e40af2c72640fcda865651c9
311,172
def has_preview(song_data): """ Returns True if a song's Spotify metadata contains a valid preview URL Inputs: song_data : json/dict track metadata object from Spotify API (ex: output of call to spotipy.track(track_id)) Outputs: bool """ preview = song_data['preview_url'] if (type(preview)==str) and ('p.scdn.co' in preview): return True return False
852ebb6a5e385e3dfc3d3b1eaf82ff9f6a1b6907
454,540
def time_fmt(secs): """ Return a string representing the number of seconds, in a human readable unit. """ if secs < 120: return "{}s".format(int(secs)) if secs < 3600: return "{}m".format(secs // 60) return "{}h".format(secs // 3600)
aef5f7fc548939dd7f285115d2af2a7b166186b3
295,958
def get_spatial_dim(tensor): """ Return spatial dim of the input tensor (without channels and batch dimension). Parameters ---------- tensor : tf.Tensor Returns ------- dim : int """ return len(tensor.get_shape().as_list()) - 2
75dcf6e50a9901ed2f6e6caa465e7455e9b5577b
369,942
def endswith(suffix): """ Create a function that checks if the argument that is passed in ends with string :param str suffix: string to check the end for :return func endswith_function: a function that checks if the argument ends with the specified string """ def string_ends_with(string): return str.endswith(string, suffix) return string_ends_with
ad6a28cc5671a8bac323ba2a45faaeae5cc8569e
523,118
def make_swagger_name(group: str, version: str, name: str) -> str: """ This function creates properly formatted swagger names for an object :param group: string; group that the object belongs to :param version: string; version that the object belongs to :param name: string: name of the object (class) :return: A single string that combines the three input elements; can be fed to process_swagger_name() and receive the original broken-out parts. """ return f"{group}.{version}.{name}" if group is not None else f"{version}.{name}"
fe2a4331477f7cfc6daae146537edb6884ed535a
54,914
def _to_hass_brightness(brightness): """Convert percentage to Home Assistant brightness units.""" return int(brightness * 255)
df05529683a403b9ff542ec7001bc48975034f32
334,888
def contained_point(point, mask): """Checks to see if a point is contained within the coordinates of a mask""" if point[0] < mask.x2 and point[0] > mask.x1 and point[1] < mask.y2 and point[1] > mask.y1: return True return False
426a1ae8953867e98ad94b07070fbd9992098df7
614,787
def css_check(css_class): """ Function that checks if a CSS class is something that a command can act on. Generally speaking we do not act upon posts with these two classes. :param css_class: The css_class of the post. :return: True if the post is something than can be worked with, False if it's in either of the defined two classes. """ if css_class in ["meta", "community"]: return False else: return True
58ffcc05e4636985d54d91c708caa3087690da90
530,818
def endfSENDLineNumber( ) : """Indicates the end of an ENDF data section for one (MF, MT) pair.""" return( 99999 )
bd910343d6800bbb608774cdf6a62585d32dc54f
651,655
def bytes_to_str(value): """Return UTF-8 formatted string from bytes object.""" return bytes(value).decode('UTF-8')
8e43de7f21cbcde55b79f3ea118b7e29b4e78aa1
657,526
def context(context, request_headers): """Return the Context object for passing to SecurityView.__call__().""" context.host = "via.hypothes.is" context.proxied_url = "https://example.com/foo" context.headers = [] context.get_header.side_effect = request_headers.get context.query_params = {} return context
64cc1d5bdc7df06f96e84e3a003a2d5d5d4578de
496,822
def clamp_band(data, min, max): """clamp data in band between min an max Parameters ---------- data: np.array band data min: Number max: Number Min and max values to clam to Returns ------- np.array band_data """ data[data < min] = min data[data > max] = max return data
82f7514851d7420f0300206b8734b403b2d4b4ce
180,032
import pathlib def about_package(init_posixpath: pathlib.Path) -> dict: """ Return package information defined with dunders in __init__.py as a dictionary, when provided with a PosixPath to the __init__.py file. """ about_text: str = init_posixpath.read_text() return { entry.split(" = ")[0]: entry.split(" = ")[1].strip('"') for entry in about_text.strip().split("\n") if entry.startswith("__") }
4e43c305ecf784e8355d2ab9ac735c18b7346881
627,624
def is_profile_bare_os(profile): """ Check if profile is set up as bare os profile """ # bare_os field is optional return 'bare_os' in profile and profile['bare_os'] is True
e4a92172b9bd4793c7b3c7e2cd16e144dd58bbbb
492,685
def SuppressBlanks(text): """ Replace succesive blanks with a blank :param str text: string :return: string :rtype: str """ outtext='' for s in text.split(): outtext=outtext+s+' ' return outtext.strip()
42bedaf7f7601cffdad2410a119bef1506226c08
567,914
def even_or_odd(x=0): """Find whether a number x is even or odd. >>> even_or_odd(10) '10 is Even!' >>> even_or_odd(5) '5 is Odd!' whenever a float is provided, then the closest integer is used: >>> even_or_odd(3.2) '3 is Odd!' in case of negative numbers, the positive is taken: >>> even_or_odd(-2) '-2 is Even!' """ #Define function to be tested if x % 2 == 0: return "%d is Even!" % x return "%d is Odd!" % x
bac5431d26eb4f0512354431bb03b1ba5d815cfb
359,433
import hashlib import hmac def _salted_hmac(key_salt, value, secret): """ Returns the HMAC-SHA1 of 'value', using a key generated from key_salt and a secret. A different key_salt should be passed in for every application of HMAC. Lifted from: http://code.djangoproject.com/svn/django/trunk/django/utils/crypto.py """ # We need to generate a derived key from our base key. We can do this by # passing the key_salt and our base key through a pseudo-random function and # SHA1 works nicely. key = hashlib.sha1(key_salt + secret).digest() # If len(key_salt + secret) > sha_constructor().block_size, the above # line is redundant and could be replaced by key = key_salt + secret, since # the hmac module does the same thing for keys longer than the block size. # However, we need to ensure that we *always* do this. return hmac.new(key, msg=value, digestmod=hashlib.sha1)
f91334cdd3c783475ed0f784ec74b2467591ca69
254,294
def extract_tag_indexes(tag_seq, b_tag, i_tag): """ Returns index of B-LOC and I-LOC, I-LOC = -1 if no I-LOC exist """ i_tag_index = -1 # default value b_tag_index = tag_seq.index(b_tag) if b_tag_index == len(tag_seq) - 1: i_tag_index = b_tag_index return b_tag_index, i_tag_index # case when B-LOC at the end of tag_sequence", no I-LOC if tag_seq[b_tag_index + 1] == "O": i_tag_index = b_tag_index # case when B-LOC in the middle but no "I-LOC" return b_tag_index, i_tag_index else: for i in range(b_tag_index + 1, len(tag_seq)): if tag_seq[i] == i_tag: i_tag_index = i # return index of last I-LOC return b_tag_index, i_tag_index
6432d66fff72d43da0783f0dfc909cb001f42929
584,304
def merge(numbs1, numbs2): """ Go through the two sorted arrays simultaneously from left to right. Find the smallest element between the two in the two arrays. Append the smallest element to a third array and increase the index from which the element was taken by one. If the two elements are the same, then add one of them to the third array and increase both indexes by one. The process is repeated until the end of both arrays. If one of the two arrays is bigger, then add the remaining elements of the biggest array at the end of the iterations. :param numbs1: The first array to be merged :param numbs2: The second array to be merged :return: The sorted array """ sorted_numbs = [] i = j = 0 while i < len(numbs1) and j < len(numbs2): if numbs1[i] <= numbs2[j]: sorted_numbs.append(numbs1[i]) i += 1 elif numbs2[j] <= numbs1[i]: sorted_numbs.append(numbs2[j]) j += 1 else: sorted_numbs.append(numbs1[i]) i += 1 j += 1 if i < len(numbs1): for k in range(i, len(numbs1)): sorted_numbs.append(numbs1[k]) if j < len(numbs2): for l in range(j, len(numbs2)): sorted_numbs.append(numbs2[l]) return sorted_numbs
2595f5bea09004461cf6e3e984455d260adf87bf
87,178
def greaterD( ef, gh): """ Return True if pair ef has greater margin than pair gh. A pair is a pair of the form: (pref[e,f],pref[f,e]) Schulze says (page 154): "(N[e,f],N[f,e]) >_win (N[g,h],N[h,g]) if and only if at least one of the following conditions is satisfied: 1. N[e,f] > N[f,e] and N[g,h] <= N[h,g]. 2. N[e,f] >= N[f,e] and N[g,h] < N[h,g]. 3. N[e,f] > N[f,e] and N[g,h] > N[h,g] and N[e,f] > N[g,h]. 4. N[e,f] > N[f,e] and N[g,h] > N[h,g] and N[e,f] = N[g,h] and N[f,e] < N[h,g]." """ nef,nfe = ef ngh,nhg = gh if nef > nfe and ngh <= nhg: return True if nef >= nfe and ngh < nhg: return True if nef > nfe and ngh > nhg and nef > ngh: return True if nef > nfe and ngh > nhg and nef==ngh and nfe < nhg: return True return False
a7e63f7aff655aa085389a34f4ad873b5d3e9d9b
318,769
def add_multiples(maximum): """Add all numbers that are multiples of 3 and 5 below the maximum.""" total = 0 for x in range(maximum): if x % 3 == 0 or x % 5 == 0: total += x return total
0724e049e6157f463c7a127dca27856eaddb20f3
608,796
def avg_Q(q_S, q_D): """ Return total transmission capacity of station, assuming transmission evenly distributed over hexagonal cell Parameters ---------- q_S : float channel capacity in Mbps q_D : float data demand rate in Mbps Returns ------- q : float average demand speed, in Mbps, based on Shannon-Hartley Theorem """ q = q_S - q_D return q
db78680a57319d642e367fe8fbacdbf8f31648ff
615,427
def kansai_gas_nattoku(contract, power): """ 関西電力「なっトクでんき(なっトクパック)」での電気料金計算 https://kepco.jp/ryokin/menu/nattokudenki/ Parameters ---------- contract : None 契約アンペア数 (本プランでは計算に使用しません) power : float 前回検針後の使用電力量(kWh) Returns ------- fee: int 電気料金 """ fee = 0 if power <= 15: return 285 elif 15 < power <= 120: fee = 20.31 elif 120 < power <= 300: fee = 24.10 elif 300 < power: fee = 27.80 return int(fee*power)
67e495f0c536de76c2e3b47735add907a982c1fa
301,009
import re def commas(line): """ Return the location of all the commas in an input string or line """ return [m.start() for m in re.finditer(',', line)]
c5327e3c34336db3c64acfa391da179018276b3b
44,462
def make_list(length): """ Creates and initializes a list in a safe manner (avoiding reference issues) """ return [None for x in range(length)]
f87fb65e3e0fcc2961abebe20ac20e9bba4e28cb
589,026
def max_farmers(collection): # pragma: no cover """Returns the maximum number of farmers recorded in the collection""" max_farmers = 0 for doc in collection.find({}).sort([('total_farmers',-1)]).limit(1): max_farmers = doc['total_farmers'] return max_farmers
9f91774d0fe36fc4299db938e11db45d080ed5c1
690,882
def sigmoid(x, a, c, fw): """Sigmoid function with scale parameter a and horizontal shift c, evaluated at point x using the framework fw (numpy or theano.tensor). """ return 1. / (1. + fw.exp(-1. * a * (x - c)))
94ddc51e486e23619dac15870659eb02032b4248
548,248
import re def handle_gutenberg(text: str) -> str: """Removes Project Gutenberg boilerplate from text. :param text: A Project Gutenberg document. :return: The input text document without the Gutenberg boilerplate. """ # find end of front boiler plate, assuming something like: # *** START OF THIS PROJECT GUTENBERG EBOOK FRANKENSTEIN *** # This is a "non-greedy" regex pattern, meaning it will stop looking # and return after the first "***" (instead of deleting some of the text # if it finds "***" outside of the boilerplate. re_start_gutenberg = re.compile( r"\*\*\* START OF THIS PROJECT GUTENBERG.*?\*\*\*", re.IGNORECASE | re.UNICODE | re.MULTILINE) match = re.search(re_start_gutenberg, text) if match: end_boiler_front = match.end() # text saved without front boilerplate text = text[end_boiler_front:] else: re_start_gutenberg = re.compile( r"Copyright.*\n\n\n", re.IGNORECASE | re.UNICODE) match = re.search(re_start_gutenberg, text) if match: end_boiler_front = match.end() # text saved without front boilerplate text = text[end_boiler_front:] # now let's find the start of the ending boilerplate re_end_gutenberg = re.compile( r"End of.*?Project Gutenberg", re.IGNORECASE | re.UNICODE | re.MULTILINE) match = re.search(re_end_gutenberg, text) if match: start_boiler_end = match.start() # text saved without end boilerplate text = text[:start_boiler_end] return text
3a11ade12113b5580b8ade830cd4ade9190f15d1
495,998
from string import printable def unprintable(mystring): """return only the unprintable characters of a string""" return ''.join( character for character in mystring if character not in printable )
bb48580d525d1e829f5b4b33cd4c0e540aa3a21a
34,516
def risk_to_size(capital_size: float, risk_percentage: float, risk_per_qty: float, entry_price: float) -> float: """ calculates the size of the position based on the amount of risk percentage you're willing to take example: round(risk_to_size(10000, 1, 0.7, 8.6)) == 1229 :param capital_size: :param risk_percentage: :param risk_per_qty: :param entry_price: :return: float """ if risk_per_qty == 0: raise ValueError('risk cannot be zero') risk_percentage /= 100 temp_size = ((risk_percentage * capital_size) / risk_per_qty) * entry_price return min(temp_size, capital_size)
502d7ac41afb603b3e1c66ec24ae8735e9b667d8
272,063
def format_rotor_key_defs(rotor_keyword_vals): """ Formats strings that contain the 'Group', 'Axis', and 'Symmetry' keywords and values that are used to define hindered rotors and internal rotors in MESS input files. :param rotor_keyword_vals: values for the for some rotor keyword :type: rotor_keyword_vals: list(int) :return rotor_keyword_str: MESS-format string containing values :rtype str """ # Build string containing the values of each keyword rotor_keyword_str = '' for vals in rotor_keyword_vals: rotor_keyword_str += '{0:<4d}'.format(vals+1) return rotor_keyword_str
6c8d8f3e3674b6541a5e02796f633be58ebf5c90
362,909
import re def separate_words(name): """Convenience function for inserting spaces into CamelCase names.""" return re.sub(r"(.)([A-Z])", r"\1 \2", name)
a2c2db19d9eddf94edd846f0752ca237cb99e441
687,685
import inspect def getfile(fn): """ Returns the file where the function is defined. Works even in wrapped functions """ if hasattr(fn, '__wrapped__'): return getfile(fn.__wrapped__) else: return inspect.getfile(fn)
44d9021c4628966f9f314ab90ee36066a9500962
607,553
from typing import Dict from typing import List def _create_key_value_pair_list(input_dict: Dict[str, str]) -> List[str]: """ Helper to create name=value string list from dict Filters "ANY" options. """ res_list: List[str] = [] if not input_dict: return res_list for name, value in input_dict.items(): value = str(value) if value.lower() == "any": continue res_list.append(name + "=" + value) return res_list
52fe113c3486a133cb39480742cfa0386633b4b5
470,109
import re def check_string_capitalised(string): """ Check to see if a string is in all CAPITAL letters. Boolean. """ return bool(re.match('^[A-Z_]+$', string))
f496d79fafae4c89c3686856b42113c4818f7ed8
708,880
def get_event_ends(T_part, n_repeats): """get the end points for a event sequence, with lenth T, and k repeats - event ends need to be removed for prediction accuracy calculation, since there is nothing to predict there - event boundaries are defined by these values Parameters ---------- T_part : int the length of an event sequence (one repeat) n_repeats : int number of repeats Returns ------- 1d np.array the end points of event seqs """ return [T_part * (k+1)-1 for k in range(n_repeats)]
1080784ad0b448d326b257f74e9fe9f857eef37c
290,534
def default_init_params(output_min, output_max): """Returns default initialization bounds depending on layer output bounds. Args: output_min: None or minimum layer output. output_max: None or maximum layer output. """ if output_min is None and output_max is None: return 0.5, 1.5 else: return 0.0, 1.0
1db53b3b9b887a53165ac17d5f822df1fd8294e9
154,011
def get_standard_file(standard): """ Map the standard to a file """ standard_files = {"C2004": "misra-c2004-guidelines.csv", "C2012": "misra-c2012-guidelines.csv", "CPP2008": "misra-cpp2008-guidelines.csv"} if standard in standard_files.keys(): return standard_files[standard] return None
6416bd8a995682eeb90b69bc4370bd984771f10f
289,099
import six import json def meta_serialize(metadata): """ Serialize non-string metadata values before sending them to Nova. """ return dict((key, (value if isinstance(value, six.string_types) else json.dumps(value)) ) for (key, value) in metadata.items())
a2ef8762c9d1b3d78dc401996392df0985c1c226
699,251
def does_not_contain_arxiv(value: object) -> float: """Value does not contain the word `arxiv`.""" if not isinstance(value, str): return 0.0 return 0. if 'arxiv' in value else 1.
2c0aa71a6d6dc3772e0a4d5d23c18045cfb3f7e8
161,627
def get_initials(pname): """ Splits name on spaces and returns first letter from each part. :param pname: str, player name :return: str, player initials """ return ''.join([part[0] for part in pname.split(' ')])
282ae97ca4033a40f1bc5d2133bb50121bfdbdbd
272,574
def get_file_name(args) -> str: """Creates the base file name for this experiment from the command line arguments.""" if args.override_name is not None and args.override_name != "": if args.name_suffix != "": raise ValueError("Cannot set --override_name and --name_suffix.") return args.override_name split_prefix = "S" if args.disable_split_fix else "SF" batch_size_text = "BSn" if args.minibatch_size is None else f"BS{args.minibatch_size}" if args.encoder_minibatch_size is not None: batch_size_text += f"_EBS{args.encoder_minibatch_size}" if args.mode == "IWAE": dreg_text = "_dreg" if args.use_dreg else "" file_name = ( f"{args.dataset}_{args.configuration}_{args.mode}_{split_prefix}{args.split}" f"_K{args.num_IW_samples}_{batch_size_text}{dreg_text}" ) elif args.mode == "VI": file_name = f"{args.dataset}_{args.configuration}_{args.mode}_{split_prefix}{args.split}_{batch_size_text}" elif args.mode == "CIWAE": file_name = ( f"{args.dataset}_{args.configuration}_{args.mode}_{split_prefix}{args.split}" f"_K{args.num_IW_samples}_B{args.beta}_{batch_size_text}" ) else: raise ValueError(f"Unknown mode: {args.mode}") if not args.use_nat_grad_for_final_layer: file_name += "_finvarnotnat" if args.name_suffix != "": file_name += f"_{args.name_suffix}" return file_name
47dd967c03ea9d9f8fd2d3a75462dc97911b223f
566,434
def differentiate_frames(src): """Subtract every two consecutive frames.""" # Shift data to pre/post frames pre_src = src[:-1] post_src = src[1:] # Differentiate src points src = pre_src - post_src return src
2fd69cb9850529639cfa02bfbd7d0239bcd2c3e8
323,059
def rescaleInput(input): """ scales input's elements down to range 0.1 to 1.0. """ return (0.99 * input / 255.0) + .1
06f097e2f8dafaaffda395ef590a1ac5cb7b78dd
89,025
def _override_license_types_spec_to_json(override_license_types_spec): """ Given an override license types spec, returns the json serialization of the object. """ license_type_strings = [] for license_type in override_license_types_spec: license_type_strings.append("\"" + license_type + "\"") return ("[" + ", ".join(license_type_strings) + "]")
57ab635a35c44e9deddeb015f197a1b071f8d4ff
74,922
from typing import List from typing import Dict def sort_on_categories(methods: List[dict]) -> Dict[str, Dict[str, dict]]: """ Sorts methods on categories Returns: Dict[category: str, Dict[method_name: str, method: dict]] """ categories: Dict[str, Dict[str, dict]] = {} for method in methods: category, method_name = method["name"].split(".") if category not in categories: categories.update({category: dict()}) categories[category].update({method_name: method}) return categories
39989295a80bf3f87d7417774b3b9ce8b9b53d22
554,638
import logging def convert_to_es(scan_result: tuple[dict, str]) -> dict: """ Converts a dictionary with an IP as a key to a dictionary with the IP as a value, which helps elasticsearch queries. Input: - scan_result: a tuple with the scanned dictionary as the first element and the IP address of the host as the second. Output: a dictionary with the IP as a value. """ logging.info("Converting the scan output to one that elasticsearch parses better.") data = {} scan_dict = scan_result[0] keys = list(scan_dict.keys()) ip = scan_result[1] if ip in keys and 'ports' in scan_dict[ip]: data = scan_dict data['scan_results'] = scan_dict[ip] data['host'] = {} data['host']['ip'] = ip data.pop(ip) return data
dacf2eefc02e881478b3ddb9e01b600090fe38d7
264,076
def parse_behave_table(context_table): """Cast behave's table to a dict.""" return next( dict(zip(context_table.headings, row)) for row in context_table.rows )
3499dbdfb19d33fd1790e61e89a6da2c4132165d
116,093
def gas_fvf(z, temp, pressure): """ Gas FVF calculated in oilfield unit, result in res ft3/scf inputs temp in Fahrenheit, pressure in psia or psig """ temp = temp + 459.67 Bg = 0.0282793 * z * temp / pressure return(Bg)
8ca09ad37a2b33ea784ed347c6919ec856c65963
347,015
def convert_bbox_info(f_frame_len, bbox_info): """Convert bbox old information: <bb_left>, <bb_top>, <bb_width>, <bb_height> to new form to fit cv2.rectangle() inputs: <bb_left>, <bb_top>, <bb_right>, <bb_bottom>""" total_length = 0 bbox = list(bbox_info) for key in f_frame_len.keys(): total_length += f_frame_len.get(key) for i in range(total_length): if i % 4 == 2 or i % 4 == 3: bbox[i] = bbox[i - 2] + bbox[i] return bbox
602455e12ca0c57d3d97c0f08d64d22a3fcc47bd
122,772
def _boolstring(arg): """ Checks api_parser that are not required and have default values on the server. Argument is a string but mus be converted to True, False or ''. Args: arg (str): Parsed argument. Returns: bool or empty string: "True" -> `True` "False" -> `False` Anything else -> `''` """ if arg == 'True': return True elif arg == 'False': return False else: return ''
0f1f2c236690757f4af7279f4a2859bf5b533fd0
498,993
import json import codecs def parse_json(filename): """Parse data from filename and return a list of boats.""" country = filename.split("/")[2][0:3] try: rms = json.load(codecs.open(filename, "r", "utf-8-sig"), strict=False) except json.decoder.JSONDecodeError as e: print(f"Error parsing file: {filename}, error: {e}") return [] data = rms["rms"] for item in data: item["country"] = country return data
db05745a9ff37377fe419afae50ec3cf9cc643e9
98,682
def _data_types_from_dsp_mask(words): """ Return a list of the data types from the words in the data_type mask. """ data_types = [] for i, word in enumerate(words): data_types += [j + (i * 32) for j in range(32) if word >> j & 1] return data_types
a0c10a96ce8d6ca0af3156ee147de8571f605447
49,322
def compare_bits(olds, news): """Subtract 2D list to determine changes to bit state.""" rows = len(olds) cols = len(olds[0]) delta = [[0] * cols for i in range(rows)] for i in range(0, rows): for j in range(0, cols): delta[i][j] = news[i][j] - olds[i][j] return delta
cdf1a99b3b3f168f0f2e6715854f06eb640564eb
194,032
def lookup_fits_header(remote_path): """Read the FITS header from storage. FITS Header Units are stored in blocks of 2880 bytes consisting of 36 lines that are 80 bytes long each. The Header Unit always ends with the single word 'END' on a line (not necessarily line 36). Here the header is streamed from Storage until the 'END' is found, with each line given minimal parsing. See https://fits.gsfc.nasa.gov/fits_primer.html for overview of FITS format. Args: remote_path (`google.cloud.storage.blob.Blob`): Blob or path to remote blob. If just the blob name is given then the blob is looked up first. Returns: dict: FITS header as a dictonary. """ i = 1 if remote_path.name.endswith('.fz'): i = 2 # We skip the compression header info headers = dict() streaming = True while streaming: # Get a header card start_byte = 2880 * (i - 1) end_byte = (2880 * i) - 1 b_string = remote_path.download_as_string(start=start_byte, end=end_byte) # Loop over 80-char lines for j in range(0, len(b_string), 80): item_string = b_string[j: j + 80].decode() # End of FITS Header, stop streaming if item_string.startswith('END'): streaming = False break # Get key=value pairs (skip COMMENTS and HISTORY) if item_string.find('=') > 0: k, v = item_string.split('=') # Remove FITS comment if ' / ' in v: v = v.split(' / ')[0] v = v.strip() # Cleanup and discover type in dumb fashion if v.startswith("'") and v.endswith("'"): v = v.replace("'", "").strip() elif v.find('.') > 0: v = float(v) elif v == 'T': v = True elif v == 'F': v = False else: v = int(v) headers[k.strip()] = v i += 1 return headers
c150e2e260060b5b6446170729d51cdfd154c9b0
180,224
def url2db(identifier): """ Converts species name from its representation in url to its database representation. :param identifier: Species name as in url, e.g. "canis_familiaris" :return: Species name as in database "Canis lupus familiaris" """ # special cases if identifier in ('canis_familiaris', 'canis_lupus_familiaris'): return "Canis lupus familiaris" elif identifier in ('gorilla_gorilla', 'gorilla_gorilla_gorilla'): return "Gorilla gorilla gorilla" elif identifier in ('ceratotherium_simum', 'ceratotherium_simum_simum'): return "Ceratotherium simum simum" else: return identifier.replace('_', ' ').capitalize()
dc3e0bf33397f57deddcdbdfec5052d9775f12c4
313,737
def PolyMod(f, g): """ return f (mod g) """ return f % g
53b47e993e35c09e59e209b68a8a7656edf6b4ce
708,867
import colorsys def label_color_css(bg_color): """Create CSS for a label color.""" r, g, b = [int(bg_color[i : i + 2], 16) / 255 for i in [0, 2, 4]] h, l, s = colorsys.rgb_to_hls(r, g, b) return "".join( f"--label-{ltr}:{int(val * fac)};" for ltr, val, fac in zip( "rgbhsl", [r, g, b, h, s, l], [255, 255, 255, 360, 100, 100] ) )
f48a45d6fe3ec6f7e5b6f422e59af54cf8813715
127,146
import functools import operator def combine_probability_matrices(matrices): """given a sequence of probability matrices, combine them into a single matrix with sum 1.0 and return it""" distribution = functools.reduce(operator.mul, matrices) # normalize return distribution / distribution.sum()
e27e702226188238d49fd285e5171af06a40bead
60,754
def format_date(date): """ Formats passed date in datetime into a format that the front end can display without the need for filters or directives. """ if date: return date.strftime("%b %d, %Y")
7a839822ea920ac950f093f8eceb84faee0b403f
447,848
def _ascii_decode(ascii: bytes) -> str: """Decode bytes of ASCII charactors to string. Args: ascii (bytes): ASCII charactors Returns: str: Converted string """ return ascii.decode("ascii", "replace").replace("\x00", "")
9f764760e6d1d1ddb43306df7f072706b8ebca10
219,291
def has_rep(chrn, rfrom, rto, rstrand, rep_pos): """ Return the names of the REP elements in the region Arguments: - `chrn`: chromosome name - `rfrom`: region from - `rto`: region to - `rstrand`: region strand - `rep_pos`: REP positions dictionary """ reps = [] for repel, repp in rep_pos.items(): if repp[0] == chrn and repp[3] == rstrand: if repp[1]<rto and repp[2]>=rfrom: reps.append(repel) return reps
edb47b8799d89ffc6eaafb9bb1f381de44b17db6
336,966
def check_requirements(requirements, entities): """ This method compares the existing entities and the entities required to complete an intent. :param requirements: The list of the entities needed :param entities: The list of current entities :return: If entities are missing, a list of this missing entities """ missing = requirements missing_status = True if len(requirements) > len(entities): return missing_status, missing else: for entity in entities: for i, needed_entity in enumerate(requirements): if entity["type"] == needed_entity: del missing[i] break if len(missing) == 0: missing_status = False return missing_status, missing
49c186c404ec32cee04c8daa7da4325f7d7b5b9f
475,225
import torch def ndc_to_camera_space(Xn, P): """Transform point(s) from normalised device coordinates to camera space. Args: Xn (torch.Tensor): homogeneous point(s) in normalised device coordinates. P (torch.Tensor): projection matrix. Returns: Xc (torch.Tensor): homogeneous point(s) in camera space. """ # Normalised device coordinates -> homogeneous clip space z = Xn[..., 2:3] w = P[2, 3] / (z - P[2, 2]) Xh = Xn * w # Homogeneous clip space -> camera space Xc = torch.matmul(Xh, P.inverse().t()) return Xc
40e731bb349ed5248bf22993107c76245e4e0767
631,039
from typing import List def evalRPN(tokens: List[str], verbose=False) -> int: """Evaluate an integer expression in Reversed Polish Notation (aka Postfix Notation) Keyword arguments: tokens -- list of tokens presented as strings, can be integers or operators '+,-,*,/' verbose -- flag to enable stack reporting during the evaluation (default False) """ operators = {"+": int.__add__, "*": int.__mul__, "/": int.__truediv__, "-": int.__sub__} stack: List[int] = [] for t in tokens: if t in operators: r, l = stack.pop(), stack.pop() stack.append(int(operators[t](l, r))) print(stack) else: stack.append(int(t)) print(stack) return stack[0]
1769133f1edaad79d4eb58f05d9affe075983d7c
446,223
import typing import re def _get_mypy_errors(stdout) -> typing.Dict[int, typing.Dict[str, str]]: """Parse line number and error message.""" errors: typing.Dict[int, typing.Dict[str, typing.Any]] = {} # last line is summary of errors for error in [x for x in stdout.split("\n") if x != ""][:-1]: matches = re.match( r".+\.py:(?P<lineno>\d+): error: (?P<msg>.+) \[(?P<errcode>.+)\]", error, ) if matches is not None: match_dict = matches.groupdict() errors[int(match_dict["lineno"])] = { "msg": match_dict["msg"], "errcode": match_dict["errcode"], } return errors
7bc5b0c9cdbd9ce81a28c65885c36d5697c4900e
598,770
def get_renumber_starting_ids_from_model(model): """ Get the starting ids dictionary used for renumbering with ids greater than those in model. Parameters ----------- model : BDF BDF object to get maximum ids from. Returns -------- starting_id_dict : dict {str : int, ...} Dictionary from id type to starting id. """ eid_max = max([ max(model.elements.keys()), max(model.masses.keys()) if model.masses else 0, max(model.rigid_elements.keys()) if model.rigid_elements else 0, ]) pid_max = max([ max(model.properties.keys()), 0 if len(model.properties_mass) == 0 else max(model.properties_mass.keys()), ]) starting_id_dict = { 'cid' : max(model.coords.keys()) + 1, 'nid' : max(model.point_ids) + 1, 'eid' : eid_max + 1, 'pid' : pid_max + 1, 'mid' : max(model.material_ids) + 1, 'set_id' : max(model.sets.keys()) + 1 if model.sets else 1, 'spline_id' : max(model.splines.keys()) + 1 if model.splines else 1, 'caero_id' : max(caero.box_ids[-1, -1] for caero in model.caeros.values()) + 1 if model.caeros else 1, } return starting_id_dict
e545e1248a0c2ea5db9a6055851a143270fc2fbc
446,913
from pathlib import Path from click.testing import CliRunner def runner_with_args_and_paths(tmp_path: Path): """ wrap up some things used for click testing """ runner = CliRunner() workdir = tmp_path / "work" output = tmp_path / "output" args = ["--workdir", str(workdir), "--output", str(output)] return runner, args, workdir, output
8ca08f08cc2a81ec97dcb12e06e2154317377619
580,276
from functools import reduce import operator def list_geometric(ls): """ Returns the geometric mean of a list.""" return reduce(operator.mul, ls)**(1.0/len(ls))
27b65a50e7d6d1f466835550ce219ab1599f7785
445,701
def errorMessage(err, location = None): """ Generate a standard error message. Parameters ---------- err : str The error message. location : str, optional Where the error happens. E.g. CTL.funcs.funcs.errorMessage Returns ------- str The generated error message. """ if (location is None): return "Error: {}".format(err) else: return "Error in {}: {}".format(location, err)
fbc1c0cee3de8d165cb4f2512e5f830cedaa0c27
82,047
import requests def read_data(url_page, parser): """ Call requests.get for a url and return the extracted data. Parameters: url_page -- url of the Billboard data parser -- Instantiated parser (either ParseWeek() or ParseYear(). """ req = requests.get(url_page) parser.feed(req.text) return parser.result
d9d8bedc0a18b64a197e007cf0fdc63d5a1e63cb
37,448
def comma_labels(x_list): """Change list of int to comma format.""" result = [] for x in x_list: result.append(format(int(x), ',')) return(result)
7fa2eb8f8ba20ca10ef23c6882a24fe16be1675c
495,530
def grow_population(initial, days_to_grow): """ Track the fish population growth from an initial population, growing over days_to_grow number of days. To make this efficient two optimizations have been made: 1. Instead of tracking individual fish (which doubles every approx. 8 days which will result O(10^9) fish over 256 days), we instead compute the sum of fish with the same due date and use the due date as the offset into the current popluation list. For example, if 5 fish have a timer of 1 and 2 fish have a timer of 4 the population would be tracked as: [0, 5, 0, 0, 2, 0, 0, 0, 0] 2. Modulo arithmetic is used instead of fully iterating through the entire list to decrement the due date of each fish every day. Using modula arithmetic provides a projection into the fish data that looks like its changing each day without needing O(n) operations and instead we can update the list in constant time regardless of the number of different ages for fish. """ current = list(initial) if days_to_grow == 0: return current for day in range(0, days_to_grow): due_index = day % 9 due_count = current[due_index] current[(day+7)%9] += due_count current[(day+9)%9] += due_count current[due_index] = max(0, current[due_index] - due_count) return current
88b8283e5c1e6de19acb76278ef16d9d6b94de00
2,974
def file_len(fname): """ Get the number of lines in a text file. This is used to count the number of NE found by gamebit. :param fname: file name. :return: number of lines. """ num_lines = sum(1 for line in open(fname)) return num_lines
e175e166cb60df1a1386f572ec46dcb10f01de07
301,513
def is_compatible_broadcast_shape(src_shape, dst_shape): """ Check if src_shape can be broadcasted to dst_shape dst_shape needs to be numpy compatible with src_shape, and each matching dimension for dst_shape must be equal or larger than src_shape Args: src_shape: tuple or list, shape of the source tensor dst_shape: tuple or list, shape of the desination tensor Returns: True if src_shape can be broadcasted to dst_shape """ if len(src_shape) > len(dst_shape): return False is_compatible = lambda l, r: l == 1 or l == r for l, r in zip(src_shape, dst_shape[-len(src_shape):]): if not is_compatible(l, r): return False return True
70c34c5c743637d9cc1bbc02a1759b7caf158008
123,023
def isClose(float1, float2): """ Helper function - are two floating point values close? """ return abs(float1 - float2) < .01
bc37c5dfb48bc0d9d3c3f933af04fa7a7f1c76c0
305,499
import torch def _reduce_camera_iou_overlap(ious: torch.Tensor, topk: int = 2) -> torch.Tensor: """ Calculate the final camera difficulty by computing the average of the ious of the two most similar cameras. Returns: single-element Tensor """ # pyre-ignore[16] topk not recognized return ious.topk(k=min(topk, len(ious) - 1)).values.mean()
40f532417c6fdfba4fa370996946c3924061c77a
465,422
import math def dbm2mw(v): """ dBm to mW mW = 10^(dBm/10) >>> dbm2mw(0) 1.0 >>> dbm2mw(10) 10.0 """ return math.pow(10, v / 10)
1fe33850a48087378d095d5c235c1b78b7efb1b9
380,086
import re def normalize_file_name(fname): """ Remove/replace characters in the string so that the string is suitable to be used as a file name. """ t = fname t = re.sub('[^0-9A-Za-z_-]', '_', t ) t = re.sub('_+', '_', t) return t
d4d988b2f7b0cd2acf74f113df6c6091481d7b0c
99,351
def total_loss_factor(frequency, reverberation_time): """ The total loss factor can be related to the reverberation time for any subsystem. :param frequency: Frequency :math:`f`. :param reverberation_time: Reverberation time :math:`T`. :returns: Total loss factor. .. math:: \\eta = \\frac{2.2}{f T} See Craik, equation 1.19, page 9. """ return 2.2 / (frequency * reverberation_time)
fbe8160fa680e84f5c7cbde49f39e859f2b99adc
366,186
def subset(shape, dims): """Returns the dims-th elements of shape.""" out = [] for dim in dims: out.append(shape[dim]) return out
1eb66727f124f7769bf64b454f0a7d0f8e9a54c4
576,029
def extract_header(msg_or_header): """Given a message or header, return the header.""" if not msg_or_header: return {} try: # See if msg_or_header is the entire message. h = msg_or_header["header"] except KeyError: try: # See if msg_or_header is just the header h = msg_or_header["msg_id"] except KeyError: raise else: h = msg_or_header if not isinstance(h, dict): h = dict(h) return h
731b79f9b1336fe0cefd44ed2cebb9f43c894115
207,862
def filter_flash_errors(glm_data, LL_coords, UR_coords): """ There appears to be dense lines of erroneous flashes around 26 N on 10 Sep & 11 Sep from 1500-2100z. This function will remove these, but is unable to distinguish if a flash is genuine or erroneous. Parameters ---------- glm_data : list of str List of GLM flash latitudes & longitudes LL_coords : tuple of str Lower lefthand coordinates of the bounding box contaning the area of false flashes UR_coords : tuple of str Upper righthand coordinates of the bounding box contaning the area of false flashes Returns ------- filtered_flashes : tuple of lists Filtered GLM flash coordinates. Format: (flash_lons, flash_lats) """ filtered_lons = [] filtered_lats = [] lons = glm_data[0] lats = glm_data[1] min_lat = LL_coords[1] max_lat = UR_coords[1] min_lon = LL_coords[0] max_lon = UR_coords[0] for idx, lon in enumerate(lons): lat = lats[idx] if ((lat < min_lat or lat > max_lat) or (lon < min_lon or lon > max_lon)): filtered_lons.append(lon) filtered_lats.append(lat) return (filtered_lons, filtered_lats)
5ff8ca4dbb82b633e36105527ac7bf18db7b5c94
23,032
def convert_steering_value_to_rad(steering_value): """Converts the steering value in range -1, +1 to steering angle in radians.""" return steering_value * 0.91
98a67ea392d57da57ff23b47e68bef1dfc848aac
440,692
def start(ctx): """ Start Docker container for this project. """ return ctx.obj['docker'].start(verbose=ctx.obj['verbose'])
43491cf2cf87bf229ed769358f0ca5e8901a4cce
462,357
def strings_from_csv(file): """ Return a list of strings from a .csv fle. Parameters ---------- file : str Path of .csv file. Returns ------- list List of strings. """ with open(file, 'r') as f: strings = f.readlines() return [[i.rstrip('\n') for i in j.split(',')] for j in strings]
1dca6b20de46e755b96fe8bbf5de6da757129db9
212,277
def strip_padding(blocklen, data, padding_class=0x01): """ Strip the padding of decrypted data. Returns data without padding """ if padding_class == 0x01: tail = len(data) - 1 while data[tail] != '\x80': tail = tail - 1 return data[:tail]
651288c08134d90d42050b26328ef59824a08840
457,033
import re def query_and_ref_names_from_path(filepath): """ Return a tuple of the query and product names from a path including bin names separated by "_to". E.g. "potential_relpath/bin_a_to_bin_b" --> bin_a, bin_b :param filepath: string to search :return: dict containing the query_bin, ref_bin """ # \w matches a "word" character: a letter or digit or underbar [a-zA-Z0-9_] search = '([\w\.#-]+)_to_([\w\.#-]+).tsv' match = re.search(search, filepath) assert match, 'match not found for {} in {}'.format(search, filepath) if match: return {'query': match.group(1), 'ref':match.group(2)} else: return None
9f8a9376e24b9ab927fd313f5cabcb58777de7c8
91,660
import logging def float_user_needs_update(float_data, latest_data): """ Return False if all values in latest_data matches all values in float_data. Return Trues otherwise indicating that the data in Float needs to be updated """ # Loop through latest data for key, value in latest_data.items(): # Look for mismatches if value != float_data[key]: logging.info(f"Value LDAP:{value} != Float:{float_data[key]} for key {key}.") # Found a mismatch. Needs to update return True # No need to update since all values matched return False
2b0e72c07179472ccf410ac4f0f0c0fa00eed23c
564,118
def penn_to_wordnet(tag): """ Convert a Penn Treebank PoS tag to WordNet PoS tag. """ if tag in ['NN', 'NNS', 'NNP', 'NNPS']: return 'n' #wordnet.NOUN elif tag in ['VB', 'VBD', 'VBG', 'VBN', 'VBP', 'VBZ']: return 'v' #wordnet.VERB elif tag in ['RB', 'RBR', 'RBS']: return 'r' #wordnet.ADV elif tag in ['JJ', 'JJR', 'JJS']: return 'a' #wordnet.ADJ return None
3935e5bfcda542d819d5cdeee3b02541c968461d
534,800
from typing import Tuple def cnnxfc_params( image_size: Tuple[int, int], n_channels: int, out_features: int, bias: bool = True ): """ Return the number of parameters in a CNN followe by a linear layer. Args: image_size: Size of the output of the CNN. n_channels: Number of the image's channels. out_features: Neurons in the linear layer. bias: If true count bias. Returns: Number of parameters. """ w, h = image_size weights = w * h * n_channels * out_features biases = out_features if bias else 0 return weights + biases
9d34a900a821031cb183a26dd5780e3c52a7016e
541,203
import json import re def maybe_replace_staging(page_data, module_config): """Replace host in JSON data if configured.""" if module_config.replace_staging_host: json_data = json.dumps(page_data) json_data = re.sub( re.escape(module_config.stage_host), module_config.prod_host, json_data ) return json.loads(json_data) return page_data
c4f0e28d33e4a2adf3344c72615ecd64b47870ea
135,707