content
stringlengths
39
9.28k
sha1
stringlengths
40
40
id
int64
8
710k
def is_valid(param): """Return true is the param is not a null value.""" return param is not None
6f2f334267585f97fdd85fec543a040cd5916dd6
245,190
async def get_option_market_data(self, input_symbols, expiration_date, strike_price, option_type, info=None): """Returns the option market data for the stock option, including the greeks, open interest, change of profit, and adjusted mark price. :param input_symbols: The ticker of the stock. :type input_symbols: str :param expiration_date: Represents the expiration date in the format YYYY-MM-DD. :type expiration_date: str :param strike_price: Represents the price of the option. :type strike_price: str :param option_type: Can be either 'call' or 'put'. :type option_type: str :param info: Will data_filter the results to get a specific value. :type info: Optional[str] :returns: Returns a dictionary of key/value pairs for the stock. \ If info parameter is provided, the value of the key that matches info is extracted. """ symbols = self.inputs_to_set(input_symbols) if option_type: option_type = option_type.lower().strip() data = [] for symbol in symbols: option_id = await self.id_for_option(symbol, expiration_date, strike_price, option_type) market_data = await self.get_option_market_data_by_id(option_id) data.append(market_data) return self.data_filter(data, info)
19e35842fd434406f9dc31ae22b117958a44236b
573,772
def remove_thousand_separator(int_as_str: str) -> str: """Removes thousand separator from a number stored as string.""" return int_as_str.replace(",", "")
32cbd09fd239cf956ca138799bf0cf2a71b39040
317,743
def backoff_exponential(n): """ backoff_exponential(n) -> float Exponential backoff implementation. This returns 2 ** n. See ReconnectingWebSocket for details. """ return 2 ** n
c2e3cfe079147045604d110d48d79800a224cdca
363,956
def round2(value): """rounds a number to 2 decimal places except for values of 0 or 100""" if value in [0.0, 100.0]: return int(value) return round(value, 2)
629481b2ec158f7e74f1b9b3f765ddc64c6bbf62
379,031
def check_method(result, method): """Compare given method to method listed in the result, if any. """ if method is None: return None if 'method' in result and result['method'] != method: return "wrong method {}".format(result['method']) return None
13f1a02b2fab10139f870a5e9bde673c15af1f73
513,381
def find_whitespace(string): """Find index of first whitespace in s.""" for idx, char in enumerate(string): if char.isspace(): return idx return -1
bbf43244dbc77a4656ec315c8c0fa8b8865d058a
311,192
from typing import List def get_filetypes(format: str = 'json') -> List[str]: """Get filetypes based on specified format.""" return ['yml', 'yaml'] if format == 'yaml' else [format]
4e9eb09f69bb727f267e48694b043f94b14bdadd
676,264
def create_source_sequence(resolve, name, clip_info_list): """ Creates a source sequence from a given name and a list of info generated by compute_source_sequence() or extract_tl_item_info() """ project_manager = resolve.GetProjectManager() resolve_project = project_manager.GetCurrentProject() media_pool = resolve_project.GetMediaPool() edit_metadata = [] for clip in clip_info_list: for segment in clip["segments"]: edit_metadata.append({"mediaPoolItem": clip["mp_item"], "startFrame": segment[0], "endFrame": segment[1]}) timeline = media_pool.CreateEmptyTimeline(name) resolve_project.SetCurrentTimeline(timeline) for edit in edit_metadata: status = media_pool.AppendToTimeline([edit]) return timeline
5a91a0a7782a42195f1c31c133caf27ff644276c
225,983
def hasIntent(parsed, intent): """ 判断是否包含某个意图 :param parsed: UNIT 解析结果 :param intent: 意图的名称 :returns: True: 包含; False: 不包含 """ if parsed is not None and 'result' in parsed and \ 'response_list' in parsed['result']: response_list = parsed['result']['response_list'] for response in response_list: if response['schema']['intent'] == intent: return True return False else: return False
ca23bb5bd2689f297429cdd53db5925eb4d21fa6
466,034
import re def is_sale(this_line): """Determine whether a given line describes a sale of cattle.""" has_price = re.search(r'[0-9]+', this_line) is_not_succinct = len(re.split(r'\s{3,}',this_line)) > 3 return bool(has_price and is_not_succinct)
2e456b1a5c961ee0f9632d279de6ce1747e3c30c
619,350
def add_tweet_to_list(tweets: list, timeLine: list) -> list: """ Adds tweets to the list :param tweets: Entire list of tweets :param timeLine: List of tweets from new batch :return: List of tweets with new tweets added """ for tweet in timeLine: tweets.append(tweet) return tweets
b8a242ed6ac30472600bd543bbd33a6b79edce8a
351,348
def _FormDestinationUri(bucket): """Forms destination bucket uri.""" return 'gs://{}/dependencies'.format(bucket)
352bcd81a583fb281a80ef1c09a5e8ef195d62d5
324,829
import typing import importlib def import_object(path: str, default=None) -> typing.Any: """Import object from path. Paths have the format ``module_path:object_path``. The ``default`` value is returned when ``path`` is ``None``. This is a convenience for passing in settings that may be ``None``. Examples:: >>> import_object('dijkstar.graph:Graph') <class 'dijkstar.graph.Graph'> >>> import_object('dijkstar.graph:Graph.load') <bound method Graph.load of <class 'dijkstar.graph.Graph'>> """ if path is None: return default module_path, object_path = path.split(":") module = importlib.import_module(module_path) names = object_path.split(".") obj = module for name in names: obj = getattr(obj, name) return obj
9c53a0616581a5958bad4b94d42cfe363e413cf8
47,385
def chunk_str(s, n, char): """Insert `char` character every `n` characters in string `s`. Canonically pronounced "chunkster". """ # Modified from http://stackoverflow.com/a/312464/3776794 if n < 1: raise ValueError( "Cannot split string into chunks with n=%d. n must be >= 1." % n) return char.join((s[i:i+n] for i in range(0, len(s), n)))
35e927b8289d86a2d2e38c9e3f18b5f40c64ca3a
217,205
import uuid def get_uuid5(str_in): """Generate a UUID using a SHA-1 hash of a namespace UUID and a name Args: str_in: Input string. Returns: s: UUID5 string. """ uid5 = uuid.uuid5(uuid.NAMESPACE_DNS, str_in) s = str(uid5) return s
25c4b91d8dd9189e6b85bc0e3e299699ea235016
514,029
def prune_table(df): """ Removes columns where all values are duplicates """ dupl_names = [] for column_name in df.columns: if len(set(df[column_name])) == 1: dupl_names.append(column_name) if dupl_names and len(df) > 1: df = df.drop(dupl_names, axis=1) return df
58859aea2f85e8b9e1ba463b850b18972120c79e
150,647
def _anonymous_model_data(ops_data): """Returns a dict representing an anonymous model. ops_data must be a dict representing the model operations. It will be used unmodified for the model `operations` attribute. """ return {"model": "", "operations": ops_data}
6b64f9098b30cf3e079311b75ca136cfc2f7038f
703,090
def extract_model_configs(full_entry): """ Given a full entry, extract model configurations and put into a dict. """ model = ["ico_encoder", "article_encoder", "attn", "cond_attn", "tokenwise_attention", "pretrain_attention", "tune_embeddings", "no_pretrained_word_embeddings"] problem = ["article_sections", "data_config"] d = {} for m in model: d[m] = full_entry[m] for p in problem: d[p] = full_entry[p] return d
debb3af7016cd0594563e02b65b498048091c672
103,573
def loadactive(cf): """Load active ips from firewall directory Parameters ---------- cf : Config Returns ------- List[str] List of filename stems ending in .auto in blacklist directory """ path = cf.etcpath('blacklist') out = [] for p in path.glob('*.auto'): ip = p.stem ip = ip.replace('|', '/') out.append(ip) return out
06cdbb15b7b2d037f94fae3064552963c2bf672d
202,392
def is_fits(name): """Checks if the image is a fits image""" return ".fits" in name
2fc2e9a4b5f2bffe50d8837bacd211b7e6eea48f
315,645
import json def open_credential_file(filename): """Open credential file.""" return json.load(open(filename))
af6cd4355b7cdd7a45ade17c0074a22be791c27f
383,757
def open_in_file(filename): """Trying to open a file. Returns: A file handler if OK or False if something wrong """ try: f = open(filename, 'r') return f except: print("Oops! something wrong with opening file") return False
1c11fd89bb3f0e751d12b11cf1b783c67edb90aa
498,980
def get_op_slices(ops, op_reg_manager): """Returns list of OpSlice per op in a list of ops. Args: ops: List of tf.Operation. op_reg_manager: OpRegularizerManager that tracks the slicing. Returns: List of list of OpSlice, where the outer list has a list per op, and the inner list is a list of OpSlice that compose the op. """ op_slices = [] for op in ops: op_slices.append(op_reg_manager.get_op_slices(op)) return list(filter(None, op_slices))
53d2950ab063225c440326f8eb77fa65b34b60f1
201,080
import json def get_key(path=".keys/AES.key"): """ Load key from .keys/AES.key file :param path: file path :type path: String :return: key :rtype: bytearray """ with open(path, 'r') as f: k = json.load(f) return bytearray.fromhex(k["key"])
fb653d3447629c3b50974f3aad2170ea4af50e79
654,058
def find_undefined_value(cbf_handle): """Given a cbf handle, get the value for the undefined pixel.""" cbf_handle.find_category(b"array_intensities") cbf_handle.find_column(b"undefined_value") return cbf_handle.get_doublevalue()
9e53dd7ebac6f711e02e1cf77f1d2553a09d9c3b
691,498
def join_list_for_MATLAB(join_str:str, ls:list, min_len:int =75, add_semicolon:bool =False, comment:bool=False, preface:str =''): """Function to take a list and join it together with a delimiter as str.join(ls) does except, you actually insert a MATLAB style line break where the lines start to get long. Doesn't append delimiter to beginning or end. Inputs: join_str = string delimiter you want to use to join the list: ',', '+' ... ls = list of items you want to join with join_str. min_len = int that is the minimum line lenth they must be before inserting a line break. add_semicolon = boolean of wehther to add a semi colon at then end or not. comment = boolean of whether the entire list should be commented our or not. Outputs- Single string that has all items in list joined with delimiters and MATLAB style line breaks where lines are long. Example Usage: ls=['a'*np.random.randint(5,65) for i in range(0,100)] # random list. join_str=' + ' # Delimiter you want to join this list. out = join_list_for_MATLAB(join_str, ls) print(out) Author: Jessica D. Haskins ([email protected]) GitHub: @jdhask 1.14.2022 JDH created. """ ad=';' if add_semicolon is True else '' ln_ls=[]; lines=[] for i, item in enumerate(ls): if type(item) != str: item=str(item) # Join only takes string lists. if len(item) > 0: # Don't join things that are blanks. ln_ls.append(item) ln= join_str.join(ln_ls) if len(ln) > min_len: if comment is True and i != 0: ln='% '+ln lines.append(ln) # Keep all these joined strings. They will be on 1 line. ln_ls=[]; # reset list holding items to put on a single line. if len(lines)==0: # Stuff wasn't long enough! So write one line with it on it. out=preface+join_str.join(ls)+ad else: # Join each line together with the MATLAB line break and new line character to_join= join_str+'...\n ' out=to_join.join(lines)+ad return out
aca60a616560b8ef08330dc1ce5a3898a141f815
274,246
def human_readable_timedelta(value, precision=0): """Return a human-readable time delta as a string.""" pieces = [] if value.days: pieces.append(f"{value.days}d") seconds = value.seconds if seconds >= 3600: hours = int(seconds / 3600) pieces.append(f"{hours}h") seconds -= hours * 3600 if seconds >= 60: minutes = int(seconds / 60) pieces.append(f"{minutes}m") seconds -= minutes * 60 if seconds > 0 or not pieces: pieces.append(f"{seconds}s") if not precision: return "".join(pieces) return "".join(pieces[:precision])
caf0723debe333452000efe105d6e15d0d571ff5
405,971
def _verify_additional_type(additionaltype): """Check that the input to additionaltype is a list of strings. If it is empty, raise ValueError If it is a string, convert it to a list of strings.""" if additionaltype is None: return None if isinstance(additionaltype, str): additionaltype = [additionaltype] if len(additionaltype) == 0: raise ValueError("additionaltype must be a non-empty list") return additionaltype
54811d9686d4b538239bffae3ae17c55016ad5c4
255,599
def get_decipher_values(values, text_key): """ Converts a Decipher values collection into a Quantipy values collection. Parameters ---------- values : list The values object from a block of Decipher metadata Returns ------- values : list The Quantipy values object """ values = [ { 'value': int(v['value']), 'text': {text_key: v['title']} } for v in values ] return values
9d25e9574880ea8f0f079df857f09313067bca0e
642,429
def _top_k(indices, indptr, data, k_per_row): """ Parameters ---------- indices: np.ndarray, shape [n_edges] Indices of a sparse matrix. indptr: np.ndarray, shape [n+1] Index pointers of a sparse matrix. data: np.ndarray, shape [n_edges] Data of a sparse matrix. k_per_row: np.ndarray, shape [n] Number of top_k elements for each row. Returns ------- top_k_idx: list List of the indices of the top_k elements for each row. """ n = len(indptr) - 1 top_k_idx = [] for i in range(n): cur_top_k = k_per_row[i] if cur_top_k > 0: cur_indices = indices[indptr[i]:indptr[i + 1]] cur_data = data[indptr[i]:indptr[i + 1]] # top_k = cur_indices[np.argpartition(cur_data, -cur_budget)[-cur_budget:]] top_k = cur_indices[cur_data.argsort()[-cur_top_k:]] top_k_idx.append(top_k) return top_k_idx
a56586d858085ef12b37ca04114646e420dbeefa
575,555
def delete_files_template(get_s3_client_func, get_bucket_func, delete_keys_func, key_list, s3_bucket): """ Curried template function for deleting files from an S3 bucket :param get_s3_client_func: A function that returns an S3 client :param get_bucket_func: A function that returns an object representing an S3 bucket :param delete_keys_func: A function that deletes a list of keys from an S3 bucket :param key_list: The list of keys to delete from the S3 bucket :param s3_bucket: The name of the bucket from which to delete :return: A MultiDeleteResult object detailing the keys that were deleted and any errors encountered """ return delete_keys_func \ ( get_bucket_func ( get_s3_client_func(), s3_bucket ), key_list )
d766fd1fb12b044f3436da0846f3802a6ad521c1
632,104
def get_day_of_the_week_number_from_datetime(datetime_obj) -> int: """Does what the function title states.""" return datetime_obj.weekday()
f39ef66090501da260b698901bc35b23d224a363
681,865
def cli(ctx, state="", history_id="", invocation_id="", tool_id="", workflow_id="", user_id="", date_range_min="", date_range_max="", limit=500, offset=0, user_details=False): """Get all jobs, or select a subset by specifying optional arguments for filtering (e.g. a state). Output: Summary information for each selected job. For example:: [{'create_time': '2014-03-01T16:16:48.640550', 'exit_code': 0, 'id': 'ebfb8f50c6abde6d', 'model_class': 'Job', 'state': 'ok', 'tool_id': 'fasta2tab', 'update_time': '2014-03-01T16:16:50.657399'}, {'create_time': '2014-03-01T16:05:34.851246', 'exit_code': 0, 'id': '1cd8e2f6b131e891', 'model_class': 'Job', 'state': 'ok', 'tool_id': 'upload1', 'update_time': '2014-03-01T16:05:39.558458'}] .. note:: The following filtering options can only be used with Galaxy ``release_21.05`` or later: user_id, limit, offset, workflow_id, invocation_id """ return ctx.gi.jobs.get_jobs(state=state, history_id=history_id, invocation_id=invocation_id, tool_id=tool_id, workflow_id=workflow_id, user_id=user_id, date_range_min=date_range_min, date_range_max=date_range_max, limit=limit, offset=offset, user_details=user_details)
cf6702581a7f4870c105aab763a8b5b5a89a1bc6
677,959
def clean_cat(catalog, pdict, fill_mask=None): """ Convert table column names intrinsic to the slurped catalog with the FRB survey desired values Args: catalog (astropy.table.Table): Catalog generated by astroquery pdict (dict): Defines the original key and desired key fill_mask (int or float, optional): Fill masked items with this value Returns: astropy.table.Table: modified catalog """ for key,value in pdict.items(): if value in catalog.keys(): catalog.rename_column(value, key) # Mask if fill_mask is not None: if catalog.mask is not None: catalog = catalog.filled(fill_mask) return catalog
5bc59c046f3d259a1653de036e0a8727487caeb7
582,840
import mimetypes def get_mimetype(pathname): """for the given file path return the mime type""" return mimetypes.guess_type(pathname,strict=True)
a05ff6ce73e735e7e7bdc855c02bf0b91664e59c
594,084
def createReferencePoint(myAsm, surface): """ Create reference point given instance and desired instance surface. """ refPoint = myAsm.ReferencePoint(surface.pointOn[0]) return refPoint
6801b12dd6f1e6278169edd7d91b38219be98b06
447,996
def _cleanup_temp_dir(temp_dir): """ Runs cleanup on a TemporaryDirectory If successful it returns True If that fails it returns None """ try: temp_dir.cleanup() return True except: return None
1fa7180d31611cf84fcfb9e96e506596991cdcfa
205,973
from typing import Tuple from typing import Dict from typing import Any def postprocess_fbound( smfb_0: float, smfb_1: float ) -> Tuple[Dict[str, Any], Dict[str, Any]]: """Postprocess frequency bounds Args: smfb_0 (float): Lower bound smfb_1 (float): Upper bound Returns: dict, dict: Postprocessed settings and parameters as dictionaries""" if smfb_0 > smfb_1: smfb_0, smfb_1 = (smfb_1, smfb_0) elif smfb_0 == smfb_1: smfb_0 = 20 smfb_1 = 16000 in_kw = dict( smfb_0=smfb_0, smfb_1=smfb_1, ) out_kw = dict( sinusoidal_model__frequency_bounds=( smfb_0, smfb_1 ) ) return in_kw, out_kw
19efb4b4f7659bc157cd6ac1927fd4e18455f243
693,435
def _should_allow_unhandled(class_reference, key_name): """Check if a property is allowed to be unhandled.""" if not hasattr(class_reference, "__deserialize_allow_unhandled_map__"): return False return class_reference.__deserialize_allow_unhandled_map__.get(key_name, False)
3475c0eef50a31ad88bcfaaa2d0a6335cffbba24
51,517
def is_short_info(obj): """Return whether the object is a short representation of a dataset""" fields = ['bodySize', 'bodyRows', 'bodyFormat', 'numErrors', 'metaTitle', 'commitTime'] return bool(set(obj) & set(fields))
62a4f59d29b5999a6bd5e10eed5513f5925f1d01
633,716
def filter_pipeline(effects, filters): """ Apply each filter to the effect list sequentially. If any filter returns zero values then ignore it. As soon as only one effect is left, return it. Parameters ---------- effects : list of MutationEffect subclass instances filters : list of functions Each function takes a list of effects and returns a list of effects Returns list of effects """ for filter_fn in filters: if len(effects) == 1: return effects filtered_effects = filter_fn(effects) if len(filtered_effects) == 0: return effects effects = filtered_effects return effects
43912d5e5407efb53c5c2e13b3ba1119814a55d4
491,648
def kUB_(B,kUB,Cooperativity,P): """Returns the receptor binding rate kUB for either the cooperative or the non-cooperative binding model. Parameters ---------- B : float Number of bound receptors. kUB : float Rate at which AMPARs bind to PSD slots. Cooperativity : 0, 1 Specifies whether cooperative recepotr binding is accounted for (=1) or not (=0). P : float Number of binding site/slots at the PSD. Returns ------- float kUB. """ if Cooperativity==1: m = 24.6/(12.5 + P); return kUB*(m*B**0.8 + 1); else: return kUB
532ce6e2cdf7dce4f4b467d13bc0bab687512df3
307,987
def allocate(a_list:list, item): """ Append an item to a list, and return the new item's index in that list. Too frequent an idiom not to abbreviate. """ idx = len(a_list) a_list.append(item) return idx
0730db424cb40de4425e1fdf46ada53c663d0b5e
510,474
def _TIME2STEPS(time): """Conversion from (float) time in seconds to milliseconds as int""" return int(time * 1000)
386e5a8857f4c03419ce2a4d96643ad638bea90f
601,492
def has_only(string, chars): """ Check whether the string contains only the specified characters. """ for char in string: if not char in chars: return False return True
7e64c5b884e17136b8b3e1a5be88e3760fac86da
209,087
import requests def tplink_post(url: str): """ Issue a POST to the device. The Content-Type header seems to be required by some devices.""" _response = requests.post(url, timeout=5, headers={"Content-Type":"application/x-www-form-urlencoded",}) _response.raise_for_status() return _response
91f8e882d411d0bde2b44680789a0e10d79a25e0
583,730
def gather_input(input_string, datatype=int, req=[], notreq=[]): """ Create an input and return the users input - it will catch for invalid inputs. Parameters ----------- input_string: :class:`str` This will be passed into the builtin input() function. datatype: Any The data type to convert the input into - if it cannot be converted it will ask the user again. req: :class:`list` A list with all possible inputs and if the user input is not a match it will ask again. - If [], anything is allowed. notreq: :class:`list` A list with all inputs that should NOT be allowed - If [], nothing will happen. Returns ------- Any The input that was received from the user. """ while True: try: menu = datatype(input(input_string).strip()) except: print("Invalid input.") continue if req != []: if menu not in req: print("Invalid input.") continue elif notreq != []: if menu in notreq: print("Invalid input.") continue return menu
564e7d41c2bfb1f92d89f6d0ad2b7c51c54819ff
96,621
def merge_tuples(*tuples): """ Utility method to merge a number of tuples into a list. To aid output, this also converts numbers into strings for easy output :param tuples: :return: List[String] """ return [str(j) for i in tuples for j in (i if isinstance(i, tuple) else (i,))]
ea9d889fc2324fd86a9ca6a38584396e64bd5ac2
52,729
def get_table_data(soup, text, default=None): # type (BeautifulSoup.tag, str) -> str """Attempts to retrieve text data from a table based on the header value""" if default is None: default = "" if soup is None: return default if text is None: return default head = soup.find("th", text=text) if head is None: return default data = head.findNext("td").text return default if data is None or not data else data
858cd1fbb448027e1118d9cca4f777a07452d107
434,509
import json def get_json(filename: str) -> str: """Read a JSON file Args: filename: the JSON filename Returns: the data as a dictionary """ with open(filename) as file_stream: return json.load(file_stream)
07c213d12c1c8c9358775c7e2adbdf04939dd1a6
112,945
def build_mu(mut, grid, full_levels=False): """Build 3D mu on full or half-levels from 2D mu and grid constants.""" if full_levels: mu = grid["C1F"] * mut + grid["C2F"] else: mu = grid["C1H"] * mut + grid["C2H"] return mu
14313ce1656d0eeeb5521a219b1f643b7e9ce84a
143,047
def get_samples(distribution, num_samples, seed=None): """Given a batched distribution, compute samples and reshape along batch. That is, we have a distribution of shape (batch_size, ...), where each element of the tensor is independent. We then draw num_samples from each component, to give a tensor of shape: (num_samples, batch_size, ...) Args: distribution: `tfp.distributions.Distribution`. The distribution from which to sample. num_samples: `Integral` | `DeviceArray`, int32, (). The number of samples. seed: `Integral` | `None`. The seed that will be forwarded to the call to distribution.sample. Defaults to `None`. Returns: `DeviceArray`, float32, (batch_size * num_samples, ...). Samples for each element of the batch. """ # Obtain the sample from the distribution, which will be of shape # [num_samples] + batch_shape + event_shape. sample = distribution.sample(num_samples, seed=seed) sample = sample.reshape((-1, sample.shape[-1])) # Combine the first two dimensions through a reshape, so the result will # be of shape (num_samples * batch_size,) + shape_tail. return sample
ab8d3b48c3767907c6f8b47a4108f3d15b5d02a6
589,738
def dice(type_profile, song_profile): """ Calculate the Dice similarity measure between the profile of specific output_columns value (e.g. specific composer) and the profile of a song """ flat_type_profile = list(type_profile.keys()) flat_song_profile = list(song_profile.keys()) type_profile_len = len(type_profile) song_profile_len = len(song_profile) overlap = set(flat_type_profile).intersection(set(flat_song_profile)) return 2 * len(overlap) / (type_profile_len + song_profile_len)
a3163c0266637478bcf3dadb54514e607c1c5cb8
172,377
async def challenge(websocket, user): """Challenges a user. """ return await websocket.send(f'|/challenge {user}, gen8metronomebattle')
bee53e79d085ebc7f8a48e0091adb2e6605a2ac6
702,511
def fibonacci_partial_sum(m: int, n: int): """ Finds the lsat digit of a partial sum of Fibonacci numbers: Fm + Fm+1 + ... + Fn. :param m: starting index in Finacci sequence :param n: end index in Fibonacci sequence :return: the last digit of the partial sum Example: F3 + F4 + F5 + F7 = 2 + 3 + 5 + 8 + 13 = 31 >>> fibonacci_partial_sum(3, 7) 1 """ pisano_period = 60 partial_sum = [0, 1] if m == n: # base case for a single fibonacci number fibonacci = n % pisano_period if fibonacci <= 1: # base case for Fn <= 1 return fibonacci for i in range(2, fibonacci + 1): # compute the fibonacci sequence digit = (partial_sum[i - 1] + partial_sum[i - 2]) % 10 partial_sum.append(digit) return partial_sum[fibonacci] start_ndx = m % pisano_period end_ndx = n % pisano_period + 1 if start_ndx >= end_ndx: end_ndx += pisano_period for i in range(2, end_ndx): digit = (partial_sum[i - 1] + partial_sum[i - 2]) % 10 partial_sum.append(digit) return sum(partial_sum[start_ndx:end_ndx]) % 10
2154e3b7dc5c0e58a821e14c0cc1c0ffb5023cfc
163,517
def extract_people_sequence(mocap_data): """ Extract all of the matrices for each frame per person. Extracting this information will make it possible to simple loop the frames and create the mocap data for one person. :param dict mocap_data: :return: People data :rtype: dict """ people = [] for frame in mocap_data.get("frames"): for i, p in enumerate(frame.get("people")): if len(people) < i + 1: people.append({"frames": []}) people[i].get("frames").append(p) return people
afb64b74fad6a807d194a16779d409f24ac88f09
572,342
def likelihood(pList, nList): """ The likelihood for probabilities `pList` of a die, given `nList` counts for each outcome. """ output = 1. for i, pVal in enumerate(pList): output *= pVal**nList[i] return output
5a530555e760eab75550768de2555806c0d0506c
368,853
def flatten_automaton_to_table(automaton)->str: """ Returns a string that contains a table describing an automaton. :param FA automaton: automaton that has to be described :return str: table representation """ name_length = 12 input_length = 2 for state in automaton.states: input_length += len(repr(state)) if len(repr(state)) > name_length: name_length = len(repr(state)) # quick, naive width settings. I could do this really accurately in two passes # but I don't really care. accepted_length = 12 epsilon_included = False epsilon = list(automaton.states.values())[0].epsilon if epsilon in automaton.inputs: epsilon_included = True inputs = sorted(automaton.inputs - {epsilon}) if epsilon_included: inputs.append(epsilon) result = '' result += '{:^{length}s}'.format('state name', length=name_length) for single_input in inputs: result += '{:^{length}s}'.format(single_input, length=input_length) result += '{:^{length}s}'.format('accepted', length=accepted_length) result += '\n' result += '-' * (name_length + input_length * len(inputs) + accepted_length) + '\n' for name in sorted(automaton.states): result += '{:^{length}s}'.format(repr(name), length=name_length) for single_input in inputs: output = sorted(automaton.states[name].forward(single_input)) if not output: output = '-' result += '{:^{length}s}'.format(str(output), length=input_length) result += '{:^{length}d}'.format(automaton.states[name].value, length=accepted_length) result += '\n' return result
c53d2b21ba76aa7ff7635fd7baaf2a71c748aead
423,839
def find_in_annotation(annotation_dict, pattern, single=False, out_type='str'): """ Search for a pattern in all XML annotation files provided and return a dictionary of results. Parameters ---------- annotation_dict: dict A dict of annotation files in the form: {'swath ID': `lxml.etree._Element` object} pattern: str The pattern to search for in each annotation file. single: bool, optional If True, the results found in each annotation file are expected to be the same and therefore only a single value will be returned instead of a dict. If the results differ, an error is raised. Default is False. out_type: str, optional Output type to convert the results to. Can be one of the following: - str (default) - float - int Returns ------- out: dict A dictionary of the results containing a list for each of the annotation files. E.g., {'swath ID': list[str, float or int]} """ out = {} for s, a in annotation_dict.items(): swaths = [x.text for x in a.findall('.//swathProcParams/swath')] items = a.findall(pattern) parent = items[0].getparent().tag if parent in ['azimuthProcessing', 'rangeProcessing']: for i, val in enumerate(items): out[swaths[i]] = val.text else: out[s] = [x.text for x in items] if len(out[s]) == 1: out[s] = out[s][0] def convert(obj, type): if isinstance(obj, list): return [convert(x, type) for x in obj] elif isinstance(obj, str): if type == 'float': return float(obj) if type == 'int': return int(obj) if out_type != 'str': for k, v in list(out.items()): out[k] = convert(v, out_type) err_msg = 'Search result for pattern "{}" expected to be the same in all annotation files.' if single: val = list(out.values())[0] for k in out: if out[k] != val: raise RuntimeError(err_msg.format(pattern)) if out_type != 'str': return convert(val, out_type) else: return val else: return out
1003017e45337a22d8db9e22c8830156d66a68f0
197,130
def action2motion(action, speed=0.25): """Turns an action from our fully connected net and turns it into movement. Assumes discrete actions space as an integer. Turns it into X, Y, Z velocities corresponding to our specific drone.""" action_dict = { 0: (0, 0, 0), 1: (speed, 0, 0), 2: (0, speed, 0), 3: (-speed, 0, 0), 4: (0, -speed, 0), } try: return action_dict[action] except KeyError: raise RuntimeError("Could not convert discrete action into movement.")
34f798e757b232aa1191f905827e0b68c8860107
96,110
def days_from_common_era(year: int) -> int: """ Returns the number of days from from 0001-01-01 to the provided year. For a common era year the days are counted until the last day of December, for a BCE year the days are counted down from the end to the 1st of January. """ if year > 0: return year * 365 + year // 4 - year // 100 + year // 400 elif year >= -1: return year * 366 else: year = -year - 1 return -(366 + year * 365 + year // 4 - year // 100 + year // 400)
1702b7d0753e7edcb1edbd232d44a96a7ed34698
278,382
import re def update_page_number(url, page=1): # type: (str, int) -> str """Updates or appends the 'page' parameter for a URL""" pattern = r"page=(\d+)" return re.sub(pattern, "page={}".format(page), url) \ if re.search(pattern, url) \ else "{}&page={}".format(url, page)
4967cde346bb68ec79a2abf27a5a4a82ca4d1ab6
71,233
def table_log_format(name, timestamp, data): """ Return a formatted string for use in the log""" return str(str(name) + '&' + str(timestamp) + '->[' + str(data) + ']')
db801b3a223b1418d21804fba54133814469817d
612,828
def weak_match(ground1, ground2): """ Matches a grounded condition if it has the same name and literals but ignores the truth value """ if ground1.predicate != ground2.predicate: return False if len(ground1.literals) != len(ground2.literals): return False for i, j in zip(ground1.literals, ground2.literals): if i != j: return False return True
76c4d22de6efbe31c3b4872dc76cb75ed5df3e04
207,448
import hashlib def sha256(msg): """Generate a SHA-256 hash.""" return hashlib.sha256(msg).digest()
0ff9072ba622923047f2d3b5ff06cd66a156414c
603,853
import stat def mode_to_octal(mode): """ Convert a text mode back to octal The opposite of stat.filemode """ omode = 0 if mode[0] == '-': omode |= stat.S_IFREG elif mode[0] == 'd': omode |= stat.S_IFDIR elif mode[0] == 'l': omode |= stat.S_IFLNK if mode[1] == 'r': omode |= stat.S_IRUSR if mode[2] == 'w': omode |= stat.S_IWUSR if mode[3] == 'x': omode |= stat.S_IXUSR elif mode[3] == 's': omode |= stat.S_ISUID omode |= stat.S_IXUSR elif mode[3] == 'S': omode |= stat.S_ISUID if mode[4] == 'r': omode |= stat.S_IRGRP if mode[5] == 'w': omode |= stat.S_IWGRP if mode[6] == 'x': omode |= stat.S_IXGRP elif mode[6] == 's': omode |= stat.S_ISGID omode |= stat.S_IXGRP elif mode[6] == 'S': omode |= stat.S_ISGID if mode[7] == 'r': omode |= stat.S_IROTH if mode[8] == 'w': omode |= stat.S_IWOTH if mode[9] == 'x': omode |= stat.S_IXOTH return omode
911c495d6bcfe5cde5dcf2af364e82bf14fb34ed
449,650
def get_tensor_children(tensor): """ Get all calculation and data parent tensors (Not read). """ children_list = [] children_list.append(tensor) if tensor.op: for t in tensor.op.outputs: if not 'read:0' in t.name: children_list += get_tensor_children(t) return list(set(children_list))
5d7bd97e9f1836e0df31a46013727d657d048185
100,464
def get_edges(cities, graph): """ Takes in a list of city names and a graph that is a route map from an airline and returns a tuple including a bool signifying if the route is nonstop and the price of the trip """ if not isinstance(cities, list): return (False, '$0') if len(cities) < 2: return (False, '$0') sum = 0 for i in range(0, (len(cities)-1)): try: sum += graph.graph[cities[i]][cities[i+1]] except (KeyError): return (False, '$0') return(True, f'${sum}')
d390aadfe9b8ec138312693274e102b68d0d9439
520,967
def get_rect_xmax(data): """Find maximum x value from four (x,y) vertices.""" return max(data[0][0], data[1][0], data[2][0], data[3][0])
9bf3615e64068807b5f2725514e7960aad7125dd
377,401
def parse_language_tagged_string(value: str) -> tuple: """ Check if a string value has a language tag @xx or @xxx and returns the string without the value tag and language as tuple. If no language tag -> language is None :param value :returns: """ lang = None if len(value) > 1 and value[0] == value[-1] == '"': value = value[1:-1] if len(value) > 6 and value[-6] == '@': lang = value[-5:] value = value[:-6] elif len(value) > 3 and value[-3] == '@': lang = value[-2:] value = value[:-3] elif len(value) > 4 and value[-4] == '@': lang = value[-3:] value = value[:-4] return value, lang
690c976bd094a7db16ab1885b0dbae824d0f991c
524,362
def macro_double_soft_f1(y, y_hat, reduction='mean'): # Written in PyTorch """Compute the macro soft F1-score as a cost (average 1 - soft-F1 across all labels). Use probability values instead of binary predictions. This version uses the computation of soft-F1 for both positive and negative class for each label. Args: y (torch.FloatTensor): targets array of shape (BATCH_SIZE, N_LABELS), including 0. and 1. y_hat (torch.FloatTensor): probability matrix from forward propagation of shape (BATCH_SIZE, N_LABELS) Returns: cost (scalar): value of the cost function for the batch """ # dtype = y_hat.dtype # y = y.to(dtype) # FloatTensor = torch.cuda.FloatTensor # y = FloatTensor(y) # y_hat = FloatTensor(y_hat) tp = (y_hat * y).sum(dim=0) # soft fp = (y_hat * (1-y)).sum(dim=0) # soft fn = ((1-y_hat) * y).sum(dim=0) # soft tn = ((1-y_hat) * (1-y)).sum(dim=0) # soft soft_f1_class1 = 2*tp / (2*tp + fn + fp + 1e-16) soft_f1_class0 = 2*tn / (2*tn + fn + fp + 1e-16) cost_class1 = 1 - soft_f1_class1 # reduce 1 - soft-f1_class1 in order to increase soft-f1 on class 1 cost_class0 = 1 - soft_f1_class0 # reduce 1 - soft-f1_class0 in order to increase soft-f1 on class 0 cost = 0.5 * (cost_class1 + cost_class0) # take into account both class 1 and class 0 if reduction == 'none': return cost if reduction == 'mean': macro_cost = cost.mean() return macro_cost
5ebecb7507e885690abdeb64067ffc92b0dca574
462,988
def number_of_steps(table): """Gets the number of steps taken.""" return max(table['step'])
dd271ba61ef93630e37503c112368aaecdf132e1
551,381
def format_targets(ranking, targets_format): """ Returns the generation targets as a list of str, depending on targets_format. Args: ranking: list of (inputs, targets) batches targets_format: str, version of the targets format to use. """ valid_choices = [] for inputs, targets in ranking: for choice, target in zip(inputs['choices'], targets): if target: valid_choices.append(choice) if targets_format == "v0": # one target per valid choice return valid_choices elif targets_format == "v1": # all valid choices in one target, separated with separation tokens. return ["∂ " + " ∂ ".join(valid_choices)] else: raise Exception("Targets format not implemented: %s." % targets_format)
6cb07c6cd72fc2a76137f88345c188644b057bc2
218,237
def insert_np(n_array, target, start = 0): """ Inserts NumPy 1d array into target 1d array from starting position, treats out of bounds correctly. """ if start > target.shape[0]: return # Target start/end start_t = start end_t = start + n_array.shape[0] start_i = 0 if start < 0: start_i = -start start_t = 0 if start_i > n_array.shape[0]: return end_i = n_array.shape[0] if end_t > target.shape[0]: end_i = n_array.shape[0] - (end_t - target.shape[0]) end_t = target.shape[0] if end_i <= 0: return target[start_t : end_t] = n_array[start_i : end_i] return target
839ea9cbae5317013b15d2be8b3fc476887815c3
189,070
def is_jsonp_accepted(request): """ Returns if jsonp is accepted or not :return: True if jsonp is accepted, False otherwise """ return request.args.get('format') == "jsonp"
8b6f80e0c3eb91342a80a7f4dd473fff9ef19d9c
129,879
def conv_repoids_to_list(repo_ids): """ Convert repo ids seperated by "\n" to list. """ if not repo_ids: return [] repoid_list = [] for repo_id in repo_ids.split("\n"): if repo_id == '': continue repoid_list.append(repo_id) return repoid_list
6a76a8ae4f565ac27839478f068f9e9a13276263
699,977
import json def json_request(func): """ You can use this decorator on your view when the incoming request has JSON data. This decorator deocdes json data and puts it in request.POST """ def decorated_function(request): try: data = json.loads(request.body) request.POST = data except: pass return func(request) return decorated_function
32ae54977e5526fc1cdd17ed4a9ed8c1ccbe4af2
224,997
def formatCoreTime(core_time_ms): """ Format core time in millis to core hours. """ return "%0.2f" % (core_time_ms / 1000 / 3600.0)
948245393134e1b9069ed6caab67fb5b028c2212
37,391
def tags2dict(tags): """Convert a tag list to a dictionary. Example: >>> t2d([{'Key': 'Name', 'Value': 'foobar'}]) {'Name': 'foobar'} """ if tags is None: return {} return dict((el['Key'], el['Value']) for el in tags)
1b5caaf51ad45110d7065ef24625f02daa034267
60,303
def get_itersecting_DEM_tile_names(index, geometry): """ Find the DEM tiles that intersect the geometry and extract the filenames from the index. NOTE: the geometries need to be in the same CRS! Parameters ---------- index : dtype=GeoDataFrame DEM tile index. geometry : shapely geometry object polygon of interest. Returns ------- fname file name of tiles of interest. """ # sometimes points are projected to infinity # this is troublesome for intestections # hence remove these instances out_of_bounds = index['geometry'].area.isna() index = index[~out_of_bounds] mask = index.intersects(geometry) index = index[mask] return index['CPP filename']
88f81f1c50dccc9fdd76cf29e03e8949d736f272
410,758
def doc_template(object_name: str, body: str)->str: """Returns an html page :param object_name: name of the object :param body: body of the page :return: the page """ doc = """ <!DOCTYPE html> <html lang="en"> <head> <meta charset="utf-8"> <title>""" + object_name + """</title> <meta name="description" content="Denodo code part"> </head> <body>""" + body + """</body> </html> """ return doc
faf7a74f58dd1d2957b29d125b9d24d950c2429a
653,600
from datetime import datetime def datetime_delta_ms(d1, d2): """ Both `d1` and `d2 must be an instance of `datetime.datetime`. return the millisecond(s) between `d1` and `d2`. """ if not isinstance (d1, datetime): raise TypeError('`d1` must be an instance of `datetime.datetime`.') if not isinstance (d2, datetime): raise TypeError('`d2` must be an instance of `datetime.datetime`.') delta = d1 - d2 return delta.days * 24 * 3600 * 1000 + \ delta.seconds * 1000 + \ delta.microseconds / 1000;
c38dcded0fcbd4459f52ce4201cf27b19d381cb6
532,827
def handle_leap_year(t): """given a time struct, give the number of days in February for that year""" return (29 if t.tm_year % 4 == 0 else 28)
87373a0d0c7f6680b74f9ce8e6ec49134bdc90f7
524,846
def _buildD(vd, dims, N): """ Builds the effective drift matrices by multiplying by the 1/4N coeff vd : List containing the drift matrices dims : List containing the pop sizes N : List containing the effective pop sizes for each pop Returns a list of effective drift matrices for each pair of pops """ if (len(dims) == 1): return [1.0 / 4 / N[0] * vd[0][0]] res = [] ctr = 0 for i in range(len(dims)): for j in range(i + 1, len(dims)): res.append(1.0/(4*N[i])*vd[ctr][0] + 1.0/(4*N[j])*vd[ctr][1]) ctr += 1 return res
d9191fd546c31fd5acb489615a98f37b1a72b2b2
309,476
import requests def count_redirects(url): """Return the number of redirects in a URL.""" try: response = requests.get(url, timeout=3) if response.history: return len(response.history) else: return 0 except Exception: return '?'
c0a090571be7e4d6d3d6ba1ab7db44d6afdea620
528,281
def ConvertToCamelCase(name): """Converts snake_case name to camelCase.""" part = name.split('_') return part[0] + ''.join(x.title() for x in part[1:])
d5ffa8ecd23a9fa8423cb78e8e93cbaad7591ec4
367,092
def fasta(sequences): """Builds a FASTA record of extracted sequences. This function expects either a list of Synthase objects (mode='synthase') or a dictionary of extracted domains, keyed on synthase header (mode='domains'). """ # Entire Synthase objects were extracted if isinstance(sequences, list): return "\n".join(s.to_fasta() for s in sequences) # Domain sequences were extracted if isinstance(sequences, dict): return "\n".join( f">{header}_{index}\n{sequence}" for header, sequences in sequences.items() for index, sequence in enumerate(sequences) )
76ea3b13c8ea8214d73985d56a0d4cf50de4c517
624,062
import optparse def ActionGroup(parser): """Define the group of 'actions' the script can execute.""" group = optparse.OptionGroup( parser, "Script actions", "These options list the actions the scripts can perform. " "One, and only one, action must be given when running the script.") return group
c5c04fb84fb2d9d06497c15dcb381f1cfe038083
536,035
def _truncate(seq: str): """Removes the prefix before the first ATG. and any trailing nucleotides that are not part of a full codon. Args: seq (str): Nucleotide sequence to truncate Returns: str or None: the truncated sequence or None if no start codon was found. """ seq = seq.upper() for start in range(0, len(seq) - 2): codon = seq[start:start+3] if codon == "ATG": res = seq[start:] trailing_nuc = len(res) % 3 if trailing_nuc > 0: return res[:-trailing_nuc] else: return res
c189fe8b9360db3318ad2603357a32acc0b92352
432,418
import time def measure(func): """ @measure def test(): for i in range(1000000000): pass if __name__ == '__main__': print("start") test() print("end") """ def wrapper(*args, **kwargs): def display_formatted_time(elapsed_time, msg=""): minutes, seconds = map(int, divmod(elapsed_time, 60)) print("Elapsed time - {0}: {1}min {2}s".format(msg, minutes, seconds)) since = time.time() func(*args, **kwargs) display_formatted_time(time.time() - since, func.__name__) return wrapper
95a703cdd0858f45f279608844a444a774c21c46
574,726
def get_optional_vim_attr(vim_object, attr_name): """ Returns either the attribute value for the attribute 'attr_name' on the object 'vim_object', else None if the attribute isn't set. """ try: return getattr(vim_object, attr_name) except IndexError: # IndexError is raised if an optional, non-array "May not be present" (see # vSphere API documentation) attribute is accessed. For optional, array # attributes, it appears the attribute is always present although the array # could consist of zero elements. return None
6781e13d7ba1ff1cfd6ab73d4cbeac70f09b1deb
66,781
import base64 def b64decode(data): """JOSE Base64 decode. :param data: Base64 string to be decoded. If it's unicode, then only ASCII characters are allowed. :type data: bytes or unicode :returns: Decoded data. :rtype: bytes :raises TypeError: if input is of incorrect type :raises ValueError: if input is unicode with non-ASCII characters """ if isinstance(data, str): try: data = data.encode('ascii') except UnicodeEncodeError: raise ValueError( 'unicode argument should contain only ASCII characters') elif not isinstance(data, bytes): raise TypeError('argument should be a str or unicode') return base64.urlsafe_b64decode(data + b'=' * (4 - (len(data) % 4)))
0a58173aeaefa872fd365af8871802b8a51b5ba0
394,305
def sort_colors_descending(g): """ Sort priorities occurring in the game in descending order of priority. :param g: a game graph. :return: the colors in g sorted in descending order. """ # (player, priority) is a value of the node dictionary. We create a set of the priorities to remove duplicates # then we sort it return sorted(set(k[1] for k in g.nodes.itervalues()), reverse=True)
db0b3636474923064864295f1b4d98250262b044
530,942
def test_geojson(distribution): """ Test if a DCAT:distribution is GeoJSON. """ return ( distribution.get("mediaType") == "application/vnd.geo+json" or distribution.get("format", "").lower() == "geojson" )
c321795d4f38f7188fb4d9f3ab6403ce8b866bad
650,343
def _basename_in_ignore_list_re(base_name, ignore_list_re): """Determines if the basename is matched in a regex ignorelist :param str base_name: The basename of the file :param list ignore_list_re: A collection of regex patterns to match against. Successful matches are ignored. :returns: `True` if the basename is ignored, `False` otherwise. :rtype: bool """ for file_pattern in ignore_list_re: if file_pattern.match(base_name): return True return False
c8b8e0e4c4667ae86d86ee80497f959ea3e8453a
557,990
import re def get_chapter_section(string): """ Given a line of text, extract the chapter-section if it exists :param string: string from which to extract the chapter-section number :return: chapter-section if one exists """ chap_sec = [] # Check if chapter-section contains a colon get_colon = re.search("^\d+:", string) if get_colon is not None: chap_sec = string.split(':') else: chap_sec = string.split('-') return chap_sec
ea8d932f9a2890804bc7c7c67613779e127858b5
489,752
from typing import Callable def rename_keys(iterable: dict, key_func: Callable): """ Renames the keys in a dictionary :param iterable: Dictionary to iterate through :param key_func: Function to generate the new key name :return: Dictionary with new key names """ return {key_func(k): v for k, v in iterable.items()}
356213e40acab9e8a1d65f4da01bee47820b2ad2
388,291
import functools import time def slow_down(func, seconds: float): """Slow down `func` by adding timeouts before and after its execution.""" @functools.wraps(func) def wrapper(*args, **kwargs): time.sleep(seconds) result = func(*args, **kwargs) time.sleep(seconds) return result return wrapper
f98c8eb9eae368a15b2e031a158af91b406426bb
602,404
def lerp(start, end, percent): """Linear interpolation between `start` and `end`. """ return percent * end + (1 - percent) * start
2f71779913554e17765117f1a169cffd0ed943e4
192,148