content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def poly_union(poly_det, poly_gt): """Calculate the union area between two polygon. Args: poly_det (Polygon): A polygon predicted by detector. poly_gt (Polygon): A gt polygon. Returns: union_area (float): The union area between two polygons. """ assert isinstance(poly_det, plg.Polygon) assert isinstance(poly_gt, plg.Polygon) area_det = poly_det.area() area_gt = poly_gt.area() area_inters, _ = poly_intersection(poly_det, poly_gt) return area_det + area_gt - area_inters
fbd13a9b1ef4acee27fac7d04b00fc1cfc46ca08
3,651,275
def get_stoich(geom_i, geom_j): """ get the overall combined stoichiometry """ form_i = automol.geom.formula(geom_i) form_j = automol.geom.formula(geom_j) form = automol.formula.join(form_i, form_j) stoich = '' for key, val in form.items(): stoich += key + str(val) return stoich
eaba89508d7c913a77ebf91097d620dc6fdff5a6
3,651,276
import requests from bs4 import BeautifulSoup def get_all_text(url): """Retrieves all text in paragraphs. :param str url: The URL to scrap. :rtype: str :return: Text in the URL. """ try: response = requests.get(url) # If the response was successful, no Exception will be raised response.raise_for_status() except HTTPError as http_err: print(f'HTTP error occurred: {http_err}') # Python 3.6 return None # sys.exit() except Exception as err: print(f'Other error occurred: {err}') # Python 3.6 return None # sys.exit() soup = BeautifulSoup(response.text, "lxml") text = "" for i in soup.find_all('p'): # soup.select # i.encode("utf-8") # default # Delete citations (e.g. "The Alhambra is a UNESCO World Heritage Site.[2]") text += i.get_text() + '\n' text = clean_text.del_nonAscii(clean_text.del_refs(text)) return text
364150aee7c1c093367d3d95bc5c0836dde978db
3,651,277
from typing import List def metadata_partitioner(rx_txt: str) -> List[str]: """Extract Relax program and metadata section. Parameters ---------- rx_txt : str The input relax text. Returns ------- output : List[str] The result list of partitioned text, the first element is the relax program, and the second is metadata section. """ partitions = [] left_curly = 0 meta_start = 0 meta_end = 0 for i, char in enumerate(rx_txt): if i < 0: raise ValueError("The program is invalid.") if char == "{": if meta_start == 0: meta_start = i left_curly += 1 elif char == "}": left_curly -= 1 if left_curly == 0: meta_end = i + 1 break if meta_end == 0: raise ValueError("The metadata section was not found.") metadata = rx_txt[meta_start:meta_end] rx_program = rx_txt[meta_end:-1] partitions.append(rx_program) partitions.append(metadata) return partitions
dd09aff9ea517813d43ff307fb9fc425b7338943
3,651,278
def make_aware(value, timezone): """ Makes a naive datetime.datetime in a given time zone aware. """ if hasattr(timezone, 'localize'): # available for pytz time zones return timezone.localize(value, is_dst=None) else: # may be wrong around DST changes return value.replace(tzinfo=timezone)
b466b4fda2daf54b7aa5e8f00ad7b10397e61c7b
3,651,279
def to_dict(funs): """Convert an object to a dict using a dictionary of functions. to_dict(funs)(an_object) => a dictionary with keys calculated from functions on an_object Note the dictionary is copied, not modified in-place. If you want to modify a dictionary in-place, do adict.update(to_dict(funs)(a_dict)) Use to_dict(funs) in a map, and you can generate a list of dictionaries from a list of objects (which could also be dictionaries). :: K is hashable type => {K: (X -> V)} -> [X] -> {K: V} Equivalent to the following in Python 3: {k: f(an_object) for (k, f) in funs.items()} >>> from operator import itemgetter >>> funs = {'id': itemgetter('id'), 'fullname': lambda x: '%(forename)s %(surname)s' % x} >>> an_object = {'id': 1, 'forename': 'Fred', 'surname': 'Bloggs'} >>> result = to_dict(funs)(an_object) >>> result['id'] 1 >>> result['fullname'] 'Fred Bloggs' >>> 'forename' in result # Original keys are left out False """ def to_dict_funs(an_object): return dict((k, f(an_object)) for (k, f) in funs.items()) return to_dict_funs
d22bbcb3c1913361c3906fd2e7f3d254dc67de28
3,651,280
import re def parse_duration_string_ms(duration): """Parses a duration string of the form 1h2h3m4s5.6ms4.5us7.8ns into milliseconds.""" pattern = r'(?P<value>[0-9]+\.?[0-9]*?)(?P<units>\D+)' matches = list(re.finditer(pattern, duration)) assert matches, 'Failed to parse duration string %s' % duration times = {'h': 0, 'm': 0, 's': 0, 'ms': 0} for match in matches: parsed = match.groupdict() times[parsed['units']] = float(parsed['value']) return (times['h'] * 60 * 60 + times['m'] * 60 + times['s']) * 1000 + times['ms']
da2981590d70f32ee3514873602621a77b70cbe2
3,651,281
def fin(activity): """Return the end time of the activity. """ return activity.finish
ed5b1d1e0f29f403cfee357a264d05d5cc88093e
3,651,282
def unfreeze_map(obj): """ Unfreezes all elements of mappables """ return {key: unfreeze(value) for key, value in obj.items()}
2ba48f6cf89f44001b7940076c4763dc820d9aa1
3,651,283
from typing import Optional from typing import Union from datetime import datetime def get_date( value: Optional[Union[date, datetime, str]], raise_error=False ) -> Optional[date]: """ Convert a given value to a date. Args: raise_error: flag to raise error if return is None or not value: to be converted. Can be date/datetime obj as well as str formatted in date/datetime Returns: date obj Raises: ValueError: If raise_error flag is True and parsed_date is None Examples: >>> get_date(date(2021, 1, 1)) datetime.date(2021, 1, 1) >>> get_date(datetime(2021, 1, 1, 0, 2)) datetime.date(2021, 1, 1) >>> get_date('2020-01-01 13:12:13') datetime.date(2020, 1, 1) >>> get_date('sadasadasdas') is None True >>> get_date(None) is None True >>> get_date('2021-20-20-20-20', raise_error=True) Traceback (most recent call last): ... ValueError: Invalid date 2021-20-20-20-20 """ if isinstance(value, datetime): return value.date() if isinstance(value, date): return value if value is not None: # A common date is in the form "2020-01-01", 10 characters if len(value) > 10: parsed_date = parse_datetime(value) parsed_date = parsed_date.date() if parsed_date else None else: parsed_date = parse_date(value) else: parsed_date = None if parsed_date is None and raise_error: raise ValueError(f"Invalid date {value}") return parsed_date
501b2363aa2d40f16f6144995db8d840e62f750a
3,651,284
def k_param(kguess, s): """ Finds the root of the maximum likelihood estimator for k using Newton's method. Routines for using Newton's method exist within the scipy package but they were not explored. This function is sufficiently well behaved such that we should not have problems solving for k, especially since we have a good estimate of k to use as a starting point. """ k = kguess val = np.log(k) - sps.psi(k) - s counter = 0 while np.abs(val) >= 0.0001: k = k - (np.log(k)-sps.psi(k)-s)/(1/k-sps.polygamma(1, k)) val = np.log(k) - sps.psi(k) - s # sps.polygamma(1,k) is first derivative of sps.psi(k) counter += 1 if counter > MAX_NEWTON_ITERATIONS: raise Exception("Max Newton's method iterations exceeded") return k
24df48746d53fd4573db10093065e7b49d5c7bfe
3,651,286
def hex_to_bin(value: hex) -> bin: """ convert a hexadecimal to binary 0xf -> '0b1111' """ return bin(value)
b82c4fea08fc258a3b50be9a5e77b3d076a33459
3,651,287
def four_oneports_2_twoport(s11: Network, s12: Network, s21: Network, s22: Network, *args, **kwargs) -> Network: """ Builds a 2-port Network from list of four 1-ports Parameters ---------- s11 : one-port :class:`Network` s11 s12 : one-port :class:`Network` s12 s21 : one-port :class:`Network` s21 s22 : one-port :class:`Network` s22 \*args, \*\*kwargs : passed to :func:`Network.__init__` for the twoport Returns ------- twoport : two-port :class:`Network` result See Also -------- n_oneports_2_nport three_twoports_2_threeport """ return n_oneports_2_nport([s11, s12, s21, s22], *args, **kwargs)
2f8b365b2ccb06c252337630f6e34b794a3a3eba
3,651,288
def find_xml_command(rvt_version, xml_path): """ Finds name index src path and group of Commands in RevitPythonShell.xml configuration. :param rvt_version: rvt version to find the appropriate RevitPythonShell.xml. :param xml_path: path where RevitPythonShell.xml resides. :return: Commands dictionary: {com_name:[index, src_path, group]} """ if not xml_path: xml_path = op.join(op.expanduser("~"), "AppData\\Roaming\\RevitPythonShell{0}\\RevitPythonShell.xml").format(rvt_version) xml_tree = ETree.parse(xml_path) xml_root = xml_tree.getroot() commands = defaultdict(list) for child in xml_root: if child.tag == 'Commands': com_children = child.getchildren() for i, com_child in enumerate(com_children): com_name = com_child.attrib["name"] commands[com_name].append(i) commands[com_name].append(com_child.attrib["src"]) commands[com_name].append(com_child.attrib["group"]) return commands
1effc1b313d93e92b25deef1d62fc65c8f3e6975
3,651,289
import mimetypes def put_data_to_s3(data, bucket, key, acl=None): """data is bytes not string""" content_type = mimetypes.guess_type(key)[0] if content_type is None: content_type = 'binary/octet-stream' put_object_args = {'Bucket': bucket, 'Key': key, 'Body': data, 'ContentType': content_type} if acl: put_object_args.update({'ACL': acl}) return boto3.client('s3').put_object(**put_object_args)
042fc8eea230559efdc60ca9f18db2e9d1766286
3,651,290
from pathlib import Path def join_analysis_json_path(data_path: Path, analysis_id: str, sample_id: str) -> Path: """ Join the path to an analysis JSON file for the given sample-analysis ID combination. Analysis JSON files are created when the analysis data is too large for a MongoDB document. :param data_path: the path to the application data :param analysis_id: the ID of the analysis :param sample_id: the ID of the sample :return: a path """ return join_analysis_path(data_path, analysis_id, sample_id) / "results.json"
5ae25e5c0df4801b23a34cdac09db709733844ca
3,651,291
def user_profile(uname=None): """ Frontend gets user's profile by user name or modify user profile (to do). Return user's complete profile and the recommendations for him (brief events). :param uname: user's name, a string :return: a json structured as {'user': [(0, 'who', 'password', '[email protected]', 'address', 'Limoges')], 'event': [{'event_id': 1234, 'title': '...', ...},{'event_id': 2345, ...}, ...]} """ verify_headers(request.headers) if request.method == 'GET': user = user_manager.return_user_data(uname) if len(user) == 0: abort(404) preferred_events_id = rcmd_manager.get_recommendations_for_user(user_manager.return_user_id(uname)) preferred_events = [] for pair in preferred_events_id: preferred_events.append({'activity': event_manager.get_event_with_nearest(pair[0]), 'score': pair[1]}) return jsonify({'user': user, 'event': preferred_events}) elif request.method == 'POST': if not request.json: abort(400) # to do for user profile modification
b43ab64b0d44e7d19342a90da261bf96489fed3a
3,651,292
def circuit_status(self, handle: ResultHandle) -> CircuitStatus: """ Return a CircuitStatus reporting the status of the circuit execution corresponding to the ResultHandle """ if handle in self._cache: return CircuitStatus(StatusEnum.COMPLETED) raise CircuitNotRunError(handle)
7de17e03e3177f7b7c2de31650a0c341ab7e4fa6
3,651,293
def get_portfolio() -> pd.DataFrame: """ Get complete user portfolio Returns: pd.DataFrame: complete portfolio """ portfolio = get_simple_portfolio() full_portfolio = pd.DataFrame() for ticket in portfolio.index: full_portfolio = full_portfolio.append( _clear_individual_information(get_individual_information(ticket))) return full_portfolio
c04df5cf88e936cab9bd7f30a63ab3e695efd771
3,651,295
def findZeros( vec, tol = 0.00001 ): """Given a vector of a data, finds all the zeros returns a Nx2 array of data each row is a zero, first column is the time of the zero, second column indicates increasing or decreasing (+1 or -1 respectively)""" zeros = [] for i in range( vec.size - 1 ): a = float( vec[ i ] ) b = float( vec[ i + 1] ) increasing = 1 if ( b < a ): increasing = -1 if ( a * b < 0 ): t = -a / ( b - a ) zeros.append( ( i + t, increasing ) ) if ( abs( vec[ -1 ] ) < tol ): if ( vec[-1] > vec[-2] ): zeros.append( ( vec.size - 1, 1 ) ) else: zeros.append( ( vec.size - 1, -1 ) ) return np.array( zeros, dtype=np.int )
173f734c9b3abf876b48d194e691b517fd0ec816
3,651,296
from typing import List def get_index(square_num: int) -> List[int]: """ Gets the indices of a square given the square number :param square_num: An integer representing a square :return: Returns a union with 2 indices """ for i in range(4): for j in range(4): if puzzle_state[i][j] == square_num: return [i, j]
e9896ba58b76ea43069b408a445720f0b418488d
3,651,297
def _ResolveName(item): """Apply custom name info if provided by metadata""" # ---------------------------------------------------------------------- def IsValidName(value): return bool(value) # ---------------------------------------------------------------------- if Attributes.UNIVERSAL_NAME_OVERRIDE_ATTRIBUTE_NAME in item.metadata.Values: metadata_value = item.metadata.Values[Attributes.UNIVERSAL_NAME_OVERRIDE_ATTRIBUTE_NAME] if not IsValidName(metadata_value.Value): raise Exceptions.ResolveInvalidCustomNameException( metadata_value.Source, metadata_value.Line, metadata_value.Column, name=metadata_value.Value, ) item.name = metadata_value.Value del item.metadata.Values[Attributes.UNIVERSAL_NAME_OVERRIDE_ATTRIBUTE_NAME]
0d303cb4577503b4e39f14da699cf77c9adf462f
3,651,298
import re def extract_errno(errstr): """ Given an error response from a proxyfs RPC, extracts the error number from it, or None if the error isn't in the usual format. """ # A proxyfs error response looks like "errno: 18" m = re.match(PFS_ERRNO_RE, errstr) if m: return int(m.group(1))
adff11595d391a1bb4403c3c93a0bb4ab182254a
3,651,299
def index(): """ Index page """ return render_template("index.html");
af92fa468122a41ed33d55a591735d400cf68e0d
3,651,300
def solve(in_array): """ Similar to 46442a0e, but where new quadrants are flips of the original array rather than rotations :param in_array: input array :return: expected output array """ array_edgelength = len(in_array[0]) # input array edge length opp_end = array_edgelength*2-1 # used for getting opposite end of array prediction = [[-1]*array_edgelength*2 for i in range(array_edgelength*2)] # init 2d array # iterate through all values for y in range(len(in_array)): for x in range(len(in_array[0])): val = in_array[y][x] prediction[y][x] = val # other 3 quadrants are flips prediction[y][opp_end-x] = val prediction[opp_end-y][opp_end-x] = val prediction[opp_end-y][x] = val return prediction
0af23e82caf65bea64eeeae6da8400ef6ec03426
3,651,301
def trim_all(audio, rate, frame_duration, ambient_power=1e-4): """Trims ambient silence in the audio anywhere. params: audio: A numpy ndarray, which has 1 dimension and values within -1.0 to 1.0 (inclusive) rate: An integer, which is the rate at which samples are taken frame_duration: A float, which is the duration of each frame to check ambient_power: A float, which is the Root Mean Square of ambient noise return: A numpy ndarray, which has 1 dimension and values within -1.0 to 1.0 (inclusive) """ new_audio = [] powers, fr = for_each_frame(audio, rate, frame_duration, calc_rms) frame_length = round(rate / fr) for ndx, power in enumerate(powers): if power > ambient_power: new_audio += audio[ndx*frame_length:(ndx+1)*frame_length].tolist() return np.array(new_audio)
37d7ca77c9ab767c90fedf4008a7a28415c5ce3f
3,651,302
def guess_initializer(var, graph=None): """Helper function to guess the initializer of a variable. The function looks at the operations in the initializer name space for the variable (e.g. my_scope/my_var_name/Initializer/*). The TF core initializers have characteristic sets of operations that can be used to determine the initializer. Args: var: `tf.Variable`. The function will use the name to look for initializer operations in the same scope. graph: Optional `tf.Graph` that contains the variable. If None the default graph is used. Returns: Tuple of the name of the guessed initializer. """ if graph is None: graph = tf.get_default_graph() prefix = var.op.name + "/Initializer" ops = [op for op in graph.get_operations() if op.name.startswith(prefix)] assert ops, "No operations found for prefix {}".format(prefix) op_names = [op.name[len(prefix) + 1:] for op in ops] if len(op_names) == 1: if op_names[0] == "Const": value = ops[0].get_attr("value").float_val[0] if value == 0.0: return "zeros" if np.isclose(value, 1.0): return "ones" return "constant" return op_names[0] # ones or zeros if "Qr" in op_names and "DiagPart" in op_names: return "orthogonal" if "random_uniform" in op_names: return "glorot_uniform" stddev_ops = [op for op in ops if op.name.endswith("stddev")] if stddev_ops: assert len(stddev_ops) == 1 stddev = stddev_ops[0].get_attr("value").float_val[0] else: stddev = None if "random_normal" in op_names: return "random_normal" if "truncated_normal" in op_names: if len(str(stddev)) > 5: return "glorot_normal" return "truncated_normal"
5a1e4a99037e51d87a8d75bc5f33e105f86a4153
3,651,303
def get_all(ptype=vendor): """ returns a dict of all partners """ if ptype == vendor: d = get_dict_from_json_file( VENDORS_JSON_FILE ) # will create file if not exist if ptype == customer: d = get_dict_from_json_file( CUSTOMERS_JSON_FILE ) return d
eb285d9462f85daec9c8b176edc6eaa90a09ff4c
3,651,304
from catkin.find_in_workspaces import find_in_workspaces def FindCatkinResource(package, relative_path): """ Find a Catkin resource in the share directory or the package source directory. Raises IOError if resource is not found. @param relative_path Path relative to share or package source directory @param package The package to search in @return Absolute path to resource """ paths = find_in_workspaces(project=package, search_dirs=['share'], path=relative_path, first_match_only=True) if paths and len(paths) == 1: return paths[0] else: raise IOError('Loading resource "{:s}" failed.'.format( relative_path))
17fe7bf3fb6b04f031d1bd8e0dd6558312dca92a
3,651,306
from typing import Callable import urllib3 from typing import Dict from typing import Any from typing import Optional def send_udf_call( api_func: Callable[..., urllib3.HTTPResponse], api_kwargs: Dict[str, Any], decoder: decoders.AbstractDecoder, id_callback: Optional[IDCallback] = None, *, results_stored: bool, ) -> "results.RemoteResult[_T]": """Synchronously sends a request to the given API. This handles the boilerplate parts (exception handling, parsing, response construction) of calling one of the generated API functions for UDFs. It runs synchronously and will return a :class:`results.RemoteResult`. To run the same function asychronously, use :meth:`Client.wrap_async_base_call` around the function that calls this (by convention, the ``whatever_api_base`` functions). This should only be used by callers *inside* this package. :param api_func: The UDF API function that we want to call from here. For instance, this might be :meth:`rest_api.SqlApi.run_sql`. :param api_kwargs: The arguments to pass to the API function as a dict. This should only include the parameters you want to send to the server, *not* any of the “meta” parameters that are mixed in with them (e.g. ``_preload_content``; this function will correctly set up the request). :param decoder: The Decoder to use to decode the response. :param id_callback: When the request completes (either by success or failure), this will be called with the UUID from the HTTP response, or None if the UUID could not be parsed. :param results_stored: A boolean indicating whether the results were stored. This does *not affect* the request; the ``store_results`` parameter of whatever API message the call uses must be set, and this must match that value. :return: A response containing the parsed result and metadata about it. """ try: http_response = api_func(_preload_content=False, **api_kwargs) except rest_api.ApiException as exc: if id_callback: id_callback(results.extract_task_id(exc)) raise tiledb_cloud_error.check_exc(exc) from None task_id = results.extract_task_id(http_response) if id_callback: id_callback(task_id) return results.RemoteResult( body=http_response.data, decoder=decoder, task_id=task_id, results_stored=results_stored, )
d34323f1f276f14d0dc947835db490e78ca47691
3,651,307
import requests import json def migration_area_baidu(area="乌鲁木齐市", indicator="move_in", date="20200201"): """ 百度地图慧眼-百度迁徙-XXX迁入地详情 百度地图慧眼-百度迁徙-XXX迁出地详情 以上展示 top100 结果,如不够 100 则展示全部 迁入来源地比例: 从 xx 地迁入到当前区域的人数与当前区域迁入总人口的比值 迁出目的地比例: 从当前区域迁出到 xx 的人口与从当前区域迁出总人口的比值 https://qianxi.baidu.com/?from=shoubai#city=0 :param area: 可以输入 省份 或者 具体城市 但是需要用全称 :type area: str :param indicator: move_in 迁入 move_out 迁出 :type indicator: str :param date: 查询的日期 20200101以后的时间 :type date: str :return: 迁入地详情/迁出地详情的前50个 :rtype: pandas.DataFrame """ city_dict.update(province_dict) inner_dict = dict(zip(city_dict.values(), city_dict.keys())) if inner_dict[area] in province_dict.keys(): dt_flag = "province" else: dt_flag = "city" url = "https://huiyan.baidu.com/migration/cityrank.jsonp" params = { "dt": dt_flag, "id": inner_dict[area], "type": indicator, "date": date, } res = requests.get(url, params=params) json_data = json.loads(res.text[res.text.find("({") + 1:res.text.rfind(");")]) return pd.DataFrame(json_data["data"]["list"])
4bb4afdde77c2b21222bde28a4f93e58cd8c6019
3,651,308
def ranges(locdata: LocData, loc_properties=None, special=None, epsilon=1): """ Provide data ranges for locdata.data property. If LocData is empty None is returned. If LocData carries a single value, the range will be (value, value + `epsilon`). Parameters ---------- locdata : LocData Localization data. loc_properties : str, tuple[str], list[str], True, None. Localization properties for which the range is determined. If None the ranges for all spatial coordinates are returned. If True the ranges for all locdata.data properties are returned. special : None, str If None (min, max) ranges are determined from data and returned; if 'zero' (0, max) ranges with max determined from data are returned. if 'link' (min_all, max_all) ranges with min and max determined from all combined data are returned. epsilon : float number to specify the range for single values in locdata. Returns ------- numpy.ndarray of float with shape (dimension, 2), None The data range (min, max) for each localization property. """ if locdata.data.empty: return None elif len(locdata) == 1: pass if loc_properties is None: ranges_ = locdata.bounding_box.hull.T.copy() elif loc_properties is True: ranges_ = np.array([locdata.data.min(), locdata.data.max()]).T elif isinstance(loc_properties, str): ranges_ = np.array( [[locdata.data[loc_properties].min(), locdata.data[loc_properties].max()]] ) else: loc_properties = list(loc_properties) ranges_ = np.array( [locdata.data[loc_properties].min(), locdata.data[loc_properties].max()] ).T if len(locdata) == 1: if ranges_.size == 0: ranges_ = np.concatenate( [locdata.coordinates, locdata.coordinates + epsilon], axis=0 ).T else: ranges_ = ranges_ + [0, epsilon] if special is None: pass elif special == "zero": ranges_[:, 0] = 0 elif special == "link": minmax = np.array([ranges_[:, 0].min(axis=0), ranges_[:, 1].max(axis=0)]) ranges_ = np.repeat(minmax[None, :], len(ranges_), axis=0) else: raise ValueError(f"The parameter special={special} is not defined.") return ranges_
28a23603dbb2abb52df4f7d2b35b6333050cfe43
3,651,309
def evaluate_available(item, type_name, predicate): """ Run the check_available predicate and cache the result. If there is already a cached result, use that and don't run the predicate command. :param str item: name of the item to check the type for. i.e. 'server_types :param str type_name: name of the type. i.e. 'headless' :param str predicate: the check_available command :return bool type_available: whether or not the type is available """ global cached_available if (item, type_name) not in cached_available: exit_code, _, _ = run_command_print_ready( shell=True, command=predicate ) cached_available[(item, type_name)] = exit_code == 0 return cached_available[(item, type_name)]
872e81613c91141c81f6dafd27aee6e8642c1e59
3,651,311
def parse_args(): """ Parses command line arguments """ parser = ArgumentParser(description="A multi-threaded gemini server") parser.add_argument("-b", "--host", default=DEFAULT_HOST, help="Host to bind to") parser.add_argument("-p", "--port", default=DEFAULT_PORT, help="Port to bind to") parser.add_argument( "-c", "--cert", default=DEFAULT_CERTFILE, help="SSL certificate in PEM format" ) parser.add_argument( "-k", "--key", default=DEFAULT_KEYFILE, help="SSL private key in PEM format" ) parser.add_argument( "-w", "--webroot", default=DEFAULT_WEBROOT, help="Webroot directory" ) parser.add_argument( "-q", "--queue", default=DEFAULT_QSIZE, help="Size of request queue" ) parser.add_argument( "-t", "--threads", default=DEFAULT_THREADS, help="Number of threads" ) parser.add_argument( "-u", "--uid", default=0, type=int, help="uid to use after loading SSL certificate", ) parser.add_argument( "-g", "--gid", default=0, type=int, help="gid to use after loading SSL certificate", ) return parser.parse_args()
05dec02ce0f243f46896917c2f25108e6f592bb5
3,651,314
def get_mapping_rules(): """ Get mappings rules as defined in business_object.js Special cases: Aduit has direct mapping to Program with program_id Request has a direct mapping to Audit with audit_id Response has a direct mapping to Request with request_id DocumentationResponse has a direct mapping to Request with request_id DocumentationResponse has normal mappings with all other objects in maping modal Section has a direct mapping to Standard/Regulation/Poicy with directive_id Anything can be mapped to a request, frotent show audit insted """ def filter(object_list): """ remove all lower case items since real object are CamelCase """ return set([item for item in object_list if item != item.lower()]) # these rules are copy pasted from # src/ggrc/assets/javascripts/apps/business_objects.js line: 276 business_object_rules = { "Program": "Issue ControlAssessment Regulation Contract Policy Standard Objective Control System Process DataAsset AccessGroup Product Project Facility Market OrgGroup Vendor Audit Request", # noqa # removed Person because Programs have a "Mapped" attribute for people mappings "Audit": "Issue ControlAssessment Request history Person program program_controls Request", # noqa "Issue": "ControlAssessment Control Audit Program Regulation Contract Policy Standard Objective Control System Process DataAsset AccessGroup Product Project Facility Market OrgGroup Vendor Person Issue Request", # noqa "ControlAssessment": "Issue Objective Program Regulation Contract Policy Standard Control System Process DataAsset AccessGroup Product Project Facility Market OrgGroup Vendor Person Audit Request", # noqa "Regulation": "Program Issue ControlAssessment Section Clause Objective Control System Process DataAsset AccessGroup Product Project Facility Market OrgGroup Vendor Person Request", # noqa "Policy": "Program Issue ControlAssessment Section Clause Objective Control System Process DataAsset AccessGroup Product Project Facility Market OrgGroup Vendor Person Request", # noqa "Standard": "Program Issue ControlAssessment Section Clause Objective Control System Process DataAsset AccessGroup Product Project Facility Market OrgGroup Vendor Person Request", # noqa "Contract": "Program Issue ControlAssessment Clause Objective Control System Process DataAsset AccessGroup Product Project Facility Market OrgGroup Vendor Person Section Request", # noqa "Clause": "Contract Objective Control System Process DataAsset AccessGroup Product Project Facility Market OrgGroup Vendor Person Section Policy Regulation Standard Request", # noqa "Section": "Objective Control System Process DataAsset AccessGroup Product Project Facility Market OrgGroup Vendor Person Policy Regulation Standard Contract Clause Request", # noqa "Objective" : "Program Issue ControlAssessment Regulation Contract Policy Standard Section Clause Objective Control System Process DataAsset AccessGroup Product Project Facility Market OrgGroup Vendor Person Request", # noqa "Control" : "Issue ControlAssessment Request Program Regulation Contract Policy Standard Section Clause Objective Control System Process DataAsset AccessGroup Product Project Facility Market OrgGroup Vendor Person Audit Request", # noqa "Person" : "Issue ControlAssessment Regulation Contract Policy Standard Section Clause Objective Control System Process DataAsset AccessGroup Product Project Facility Market OrgGroup Vendor Audit Request", # noqa "OrgGroup" : "Program Issue ControlAssessment Regulation Contract Policy Standard Section Clause Objective Control System Process DataAsset AccessGroup Product Project Facility Market OrgGroup Vendor Person Audit Request", # noqa "Vendor" : "Program Issue ControlAssessment Regulation Contract Policy Standard Section Clause Objective Control System Process DataAsset AccessGroup Product Project Facility Market OrgGroup Vendor Person Audit Request", # noqa "System" : "Program Issue ControlAssessment Regulation Contract Policy Standard Section Clause Objective Control System Process DataAsset AccessGroup Product Project Facility Market OrgGroup Vendor Person Audit Request", # noqa "Process" : "Program Issue ControlAssessment Regulation Contract Policy Standard Section Clause Objective Control System Process DataAsset AccessGroup Product Project Facility Market OrgGroup Vendor Person Audit Request", # noqa "DataAsset" : "Program Issue ControlAssessment Regulation Contract Policy Standard Section Clause Objective Control System Process DataAsset AccessGroup Product Project Facility Market OrgGroup Vendor Person Audit Request", # noqa "AccessGroup" : "Program Issue ControlAssessment Regulation Contract Policy Standard Section Clause Objective Control System Process DataAsset AccessGroup Product Project Facility Market OrgGroup Vendor Person Audit Request", # noqa "Product" : "Program Issue ControlAssessment Regulation Contract Policy Standard Section Clause Objective Control System Process DataAsset AccessGroup Product Project Facility Market OrgGroup Vendor Person Audit Request", # noqa "Project" : "Program Issue ControlAssessment Regulation Contract Policy Standard Section Clause Objective Control System Process DataAsset AccessGroup Product Project Facility Market OrgGroup Vendor Person Audit Request", # noqa "Facility" : "Program Issue ControlAssessment Regulation Contract Policy Standard Section Clause Objective Control System Process DataAsset AccessGroup Product Project Facility Market OrgGroup Vendor Person Audit Request", # noqa "Market" : "Program Issue ControlAssessment Regulation Contract Policy Standard Section Clause Objective Control System Process DataAsset AccessGroup Product Project Facility Market OrgGroup Vendor Person Audit Request" # noqa } split_rules = {k: v.split() for k, v in business_object_rules.items()} filtered_rules = {k: filter(v) for k, v in split_rules.items()} return filtered_rules
59b94070d3fe35eca8c356162caf9969c9ea47d0
3,651,315
async def get_pipeline(request: web.Request, organization, pipeline) -> web.Response: """get_pipeline Retrieve pipeline details for an organization :param organization: Name of the organization :type organization: str :param pipeline: Name of the pipeline :type pipeline: str """ return web.Response(status=200)
0bbbe26111542173fda05fe8e3beccec99b6bfe8
3,651,318
def add_attachment(manager, issue, file): """ Replace jira's method 'add_attachment' while don't well fixed this issue https://github.com/shazow/urllib3/issues/303 And we need to set filename limit equaled 252 chars. :param manager: [jira.JIRA instance] :param issue: [jira.JIRA.resources.Issue instance] :param path: [string] :return: [jira.JIRA.resources.Attachment instance] """ filename = _get_filename(file.name) return _upload_file(manager, issue, file.file.read(), filename)
19d2fb57fbd116e27328c075a2899425243856b2
3,651,319
def _ols_iter(inv_design, sig, min_diffusivity): """ Helper function used by ols_fit_dki - Applies OLS fit of the diffusion kurtosis model to single voxel signals. Parameters ---------- inv_design : array (g, 22) Inverse of the design matrix holding the covariants used to solve for the regression coefficients. sig : array (g,) Diffusion-weighted signal for a single voxel data. min_diffusivity : float Because negative eigenvalues are not physical and small eigenvalues, much smaller than the diffusion weighting, cause quite a lot of noise in metrics such as fa, diffusivity values smaller than `min_diffusivity` are replaced with `min_diffusivity`. Returns ------- dki_params : array (27,) All parameters estimated from the diffusion kurtosis model. Parameters are ordered as follows: 1) Three diffusion tensor's eigenvalues 2) Three lines of the eigenvector matrix each containing the first, second and third coordinates of the eigenvector 3) Fifteen elements of the kurtosis tensor """ # DKI ordinary linear least square solution log_s = np.log(sig) result = np.dot(inv_design, log_s) # Extracting the diffusion tensor parameters from solution DT_elements = result[:6] evals, evecs = decompose_tensor(from_lower_triangular(DT_elements), min_diffusivity=min_diffusivity) # Extracting kurtosis tensor parameters from solution MD_square = (evals.mean(0))**2 KT_elements = result[6:21] / MD_square # Write output dki_params = np.concatenate((evals, evecs[0], evecs[1], evecs[2], KT_elements), axis=0) return dki_params
da55a73fff02f2088b77d21a4b0a7a7308b0c855
3,651,320
from typing import Counter def unarchive_collector(collector): """ This code is copied from `Collector.delete` method """ # sort instance collections for model, instances in collector.data.items(): collector.data[model] = sorted(instances, key=attrgetter("pk")) # if possible, bring the models in an order suitable for databases that # don't support transactions or cannot defer constraint checks until the # end of a transaction. collector.sort() # number of objects deleted for each model label unarchived_counter = Counter() with transaction.atomic(using=collector.using, savepoint=False): # reverse instance collections for instances in collector.data.values(): instances.reverse() # delete instances for model, instances in collector.data.items(): if not is_archivable_cls(model): continue pk_list = [obj.pk for obj in instances] queryset = model.all_objects.filter(pk__in=pk_list) count = queryset.update(archived_at=None) unarchived_counter[model._meta.label] += count if not model._meta.auto_created: for obj in instances: # user post archive instead of post delete signals.post_unarchive.send( sender=model, instance=obj, using=collector.using ) for obj in instances: setattr(obj, "archived_at", None) return sum(unarchived_counter.values()), dict(unarchived_counter)
3c0a05d31fafac34e0503bd5dd154c9201e7e94a
3,651,321
from typing import List from typing import Optional from typing import Union from typing import Dict def remove_tag_from_issues( issues: List[GitHubIssue], tag: str, scope: str = "all", ignore_list: Optional[Union[List[int], List[Dict[str, int]]]] = None, ) -> List[GitHubIssue]: """remove_tag_from_issues Removes all of a tag from the given issues. If scoped to just issues, we still check the first comment as this comment is the issue body. """ if ignore_list is None: ignore_list = [-1] for index, issue in enumerate(issues): if scope in ("all", "issues"): if tag in issue.metadata: # If the issue is one we should ignore, continue. # This is usually due to the issue being empty. if index in ignore_list: continue issue.metadata.remove(tag) if tag in issue.all_comments[0].tags: issue.all_comments[0].tags.remove(tag) if scope in ("all", "comments"): for comment in issue.all_comments: if tag in comment.tags: # If the comment is one we should ignore, continue. # This is usually due to the comment being empty. if {"issue": index, "comment": comment.number} in ignore_list: continue comment.tags.remove(tag) return issues
c8709f7e9a01f4c5320748ca181a3a813a9e754f
3,651,322
from datetime import datetime def days_remaining_context_processor(request): """Context processor. Adds days_remaining to context of every view.""" now = datetime.now() return {'days_remaining' : (wedding_date - now).days}
1aa9deb40b54627044926820921c4e5550f2050c
3,651,323
from datetime import datetime import time def convert_time_range(trange, tz=None): """ Converts freeform time range into a tuple of localized timestamps (start, end). If `tz` is None, uses settings.TIME_ZONE for localizing time range. :param trange: - string representing time-range. The options are: * string in format 'x1|x2', where x1 and x2 are start and end date in the format YYYYmmdd[THH:MM:SS.mmmmm] (in fact, any other format would work well, the function tries its best to determine format and parse timestamps) * string in format 'x1|x2', where x1 and x2 are given in human readable format, as described in the dateparser doc: (see https://github.com/scrapinghub/dateparser) * one of the following keywords: 'today', 'yesterday', 'this week', 'last week', 'this month', 'last month', 'this year', 'last year' :param tz: - timezone (optional). Either string representing a timezone (e.g. "America/Lima") or a pytz object. :return: tuple of two TZ-aware timestamps. """ # Form time range as a tuple of naive datetimes. assert isinstance(trange, str), "Value is not a string: %s" % trange trange = trange.strip().lower() _time = lambda d: datetime.combine(d, time()) today = date.today() if trange == 'today': ts_from = _time(today) ts_to = ts_from + timedelta(days=1, seconds=-1) elif trange == 'yesterday': ts_from = _time(today+timedelta(days=-1)) ts_to = ts_from + timedelta(days=1, seconds=-1) elif trange == 'this week': ts_from = _time(today-timedelta(days=today.weekday())) ts_to = ts_from + timedelta(days=7, seconds=-1) elif trange == 'last week': this_week = _time(today-timedelta(days=today.weekday())) ts_to = this_week + timedelta(seconds=-1) ts_from = _time(ts_to - timedelta(days=ts_to.weekday())) elif trange == 'this month': ts_from = _time(today.replace(day=1)) next_month = ts_from.replace(day=28) + timedelta(days=4) this_month_last_day = next_month - timedelta(days=next_month.day) ts_to = this_month_last_day + timedelta(days=1, seconds=-1) elif trange == 'last month': ts_to = _time(today.replace(day=1)) + timedelta(seconds=-1) ts_from = _time(ts_to.replace(day=1)) elif trange == 'this year': ts_from = _time(today.replace(month=1, day=1)) this_year_last_day = _time(today.replace(month=12, day=31)) ts_to = this_year_last_day + timedelta(days=1, seconds=-1) elif trange == 'last year': ts_to = _time(today.replace(month=1, day=1)) + timedelta(seconds=-1) ts_from = _time(ts_to.replace(month=1, day=1)) else: try: ts_from, ts_to = [dateparser.parse(t) for t in trange.split('|')] except ValueError: raise MalformedValueError( 'Cannot parse datetime range: wrong format!\n' + \ 'Datetime range should be two date[time] values divided by vertical bar (|)' ) if (ts_from is None) or (ts_to is None): raise MalformedValueError('Cannot parse datetime range: wrong format!') # Stretch date values (without time) to the end of day # (ignore microseconds). if ts_to.minute == 0 and ts_to.second == 0: ts_to += timedelta(days=1, seconds=-1) # Figure out desired timezone. time_zone = get_tz(tz) # Add timezone info to the result. ts_from = ts_from.replace(tzinfo=time_zone) ts_to = ts_to.replace(tzinfo=time_zone) if ts_from > ts_to: raise MalformedValueError( 'Start date cannot be greater than the end date!' ) return (ts_from, ts_to)
64c24c3011418e93111ec856acdd4b6a94abd425
3,651,324
def process_waiting_time(kernel_data, node_id, phase_id, norm_vehs=False): """Processes batched waiting time computation""" cycle_time = 60 def fn(x): if (x / 13.89) < 0.1: return 1.0 else: return 0.0 wait_times = [] for t in kernel_data: qt = defaultdict(lambda : 0) for veh in t[node_id][phase_id]: key = (veh.edge_id, veh.lane) qt[key] += fn(veh.speed) if len(qt) == 0: wait_times.append(0.0) else: if norm_vehs: wait_times.append( sum([v / MAX_VEHS_PER_LANE[k] for k, v in qt.items()])) else: wait_times.append(sum(qt.values())) ret = round(sum(wait_times) / cycle_time, 2) return ret
4489205a8d3ba58601875a7dee1b086fd7b639af
3,651,325
def get_version(): """ Do this so we don't have to import lottery_ticket_pruner which requires keras which cannot be counted on to be installed when this package gets installed. """ with open('lottery_ticket_pruner/__init__.py', 'r') as f: for line in f.readlines(): if line.startswith('__version__'): version = line.split('=')[1].strip().replace('"', '').replace('\'', '') return version return ''
0ab355110918e1c92b056932ba1d03768826c4f2
3,651,326
import time def train_model_regression(X, X_test, y, params, folds, model_type='lgb', eval_metric='mae', columns=None, plot_feature_importance=False, model=None, verbose=10000, early_stopping_rounds=200, n_estimators=50000): """ A function to train a variety of regression models. Returns dictionary with oof predictions, test predictions, scores and, if necessary, feature importances. :params: X - training data, can be pd.DataFrame or np.ndarray (after normalizing) :params: X_test - test data, can be pd.DataFrame or np.ndarray (after normalizing) :params: y - target :params: folds - folds to split data :params: model_type - type of model to use :params: eval_metric - metric to use :params: columns - columns to use. If None - use all columns :params: plot_feature_importance - whether to plot feature importance of LGB :params: model - sklearn model, works only for "sklearn" model type """ columns = X.columns if columns is None else columns X_test = X_test[columns] # to set up scoring parameters metrics_dict = {'mae': {'lgb_metric_name': 'mae', 'catboost_metric_name': 'MAE', 'sklearn_scoring_function': metrics.mean_absolute_error}, 'group_mae': {'lgb_metric_name': 'mae', 'catboost_metric_name': 'MAE', 'scoring_function': group_mean_log_mae}, 'mse': {'lgb_metric_name': 'mse', 'catboost_metric_name': 'MSE', 'sklearn_scoring_function': metrics.mean_squared_error} } result_dict = {} # out-of-fold predictions on train data oof = np.zeros(len(X)) # averaged predictions on train data prediction = np.zeros(len(X_test)) # list of scores on folds scores = [] feature_importance = pd.DataFrame() # split and train on folds for fold_n, (train_index, valid_index) in enumerate(folds.split(X)): print(f'Fold {fold_n + 1} started at {time.ctime()}') if type(X) == np.ndarray: X_train, X_valid = X[columns][train_index], X[columns][valid_index] y_train, y_valid = y[train_index], y[valid_index] else: X_train, X_valid = X[columns].iloc[train_index], X[columns].iloc[valid_index] y_train, y_valid = y.iloc[train_index], y.iloc[valid_index] if model_type == 'lgb': model = lgb.LGBMRegressor(**params, n_estimators=n_estimators, n_jobs=-1) model.fit(X_train, y_train, eval_set=[(X_train, y_train), (X_valid, y_valid)], eval_metric=metrics_dict[eval_metric]['lgb_metric_name'], verbose=verbose, early_stopping_rounds=early_stopping_rounds) y_pred_valid = model.predict(X_valid) y_pred = model.predict(X_test, num_iteration=model.best_iteration_) if model_type == 'xgb': train_data = xgb.DMatrix(data=X_train, label=y_train, feature_names=X.columns) valid_data = xgb.DMatrix(data=X_valid, label=y_valid, feature_names=X.columns) watchlist = [(train_data, 'train'), (valid_data, 'valid_data')] model = xgb.train(dtrain=train_data, num_boost_round=20000, evals=watchlist, early_stopping_rounds=200, verbose_eval=verbose, params=params) y_pred_valid = model.predict(xgb.DMatrix(X_valid, feature_names=X.columns), ntree_limit=model.best_ntree_limit) y_pred = model.predict(xgb.DMatrix(X_test, feature_names=X.columns), ntree_limit=model.best_ntree_limit) if model_type == 'sklearn': model = model model.fit(X_train, y_train) y_pred_valid = model.predict(X_valid).reshape(-1, ) score = metrics_dict[eval_metric]['sklearn_scoring_function'](y_valid, y_pred_valid) print(f'Fold {fold_n}. {eval_metric}: {score:.4f}.') print('') y_pred = model.predict(X_test).reshape(-1, ) if model_type == 'cat': model = CatBoostRegressor(iterations=20000, eval_metric=metrics_dict[eval_metric]['catboost_metric_name'], **params, loss_function=metrics_dict[eval_metric]['catboost_metric_name']) model.fit(X_train, y_train, eval_set=(X_valid, y_valid), cat_features=[], use_best_model=True, verbose=False) y_pred_valid = model.predict(X_valid) y_pred = model.predict(X_test) oof[valid_index] = y_pred_valid.reshape(-1, ) if eval_metric != 'group_mae': scores.append(metrics_dict[eval_metric]['sklearn_scoring_function'](y_valid, y_pred_valid)) else: scores.append(metrics_dict[eval_metric]['scoring_function'](y_valid, y_pred_valid, X_valid['type'])) prediction += y_pred if model_type == 'lgb' and plot_feature_importance: # feature importance fold_importance = pd.DataFrame() fold_importance["feature"] = columns fold_importance["importance"] = model.feature_importances_ fold_importance["fold"] = fold_n + 1 feature_importance = pd.concat([feature_importance, fold_importance], axis=0) prediction /= folds.n_splits print('CV mean score: {0:.4f}, std: {1:.4f}.'.format(np.mean(scores), np.std(scores))) result_dict['oof'] = oof result_dict['prediction'] = prediction result_dict['scores'] = scores # if model_type == 'lgb': # if plot_feature_importance: # feature_importance["importance"] /= folds.n_splits # cols = feature_importance[["feature", "importance"]].groupby("feature").mean().sort_values( # by="importance", ascending=False)[:50].index # # best_features = feature_importance.loc[feature_importance.feature.isin(cols)] # # plt.figure(figsize=(16, 12)); # sns.barplot(x="importance", y="feature", data=best_features.sort_values(by="importance", ascending=False)); # plt.title('LGB Features (avg over folds)'); # # result_dict['feature_importance'] = feature_importance return result_dict
586e82a1efa42e41b0d0dfdddaf9d6d0afdd7bb4
3,651,327
def extract(d, keys): """ Extract a key from a dict. :param d: The dict. :param keys: A list of keys, in order of priority. :return: The most important key with an value found. """ if not d: return for key in keys: tmp = d.get(key) if tmp: return tmp
9985e2f1079088251429fa26611fa6e15b920622
3,651,328
def edit_distance(y, y_hat): """Edit distance between two sequences. Parameters ---------- y : str The groundtruth. y_hat : str The recognition candidate. the minimum number of symbol edits (i.e. insertions, deletions or substitutions) required to change one word into the other. """ return _edit_distance_matrix(y, y_hat)[-1, -1]
42e9ee4169848cd2fc491e6e99b67f96e59dd95b
3,651,331
def sort_predictions(classes, predictions, bboxes): """ Sorts predictions from most probable to least, generate extra metadata about them. """ results = [] for idx, pred in enumerate(predictions): results.append({ "class_idx": np.argmax(pred), "class": classes[np.argmax(pred)], "prob": pred[np.argmax(pred)], "fname": get_region_filename(idx), "coords": bboxes[idx], }) results.sort(key=itemgetter("prob"), reverse=True) return results
1938bb3c1b301d15425a6574e66e136cdd43a867
3,651,332
def task_bootstrap_for_adming(): """ """ return {'actions': [(clushUtils.exec_script, [targetNode, "bootstrap_for_adming.py"], { 'dependsFiles': [".passwords", f"{homeDir}/.ssh/id_rsa.pub"], 'user':"root", 'manageEnv': False, 'dependsPkgs':['py3-pip', 'py3-psutil', 'curl'], 'logOutput': 'bootstrap_for_adming.log' } ) ], 'targets': [f'{logDir}/bootstrap_for_adming.log.{targetNode}'], 'file_dep': ["deployConfig.py"], }
91180c0b8b9a497488d7b4d1515088f133f5626b
3,651,333
from typing import List from typing import Tuple def reassign_clustered( knn: List[Tuple[npt.NDArray, npt.NDArray]], clusters: List[Tuple[str, int]], min_sim_threshold: float = 0.6, n_iter: int = 20, epsilon: float = 0.05, ) -> List[Tuple[str, int]]: """Reassigns companies to new clusters based on the average similarity to nearest neighbours belonging to clusters. Args: knn: A list of pairs of nearest neighbour index IDs and their similarities. clusters: A list of cluster ID and org ID pairs. min_sim_threshold: Minimum cosine similarity for a cluster reassignment to be accepted. n_iter: Number of timer to iteratively reaassign companies to clusters. epsilon: Minimum fraction of companies required for an iteration of reassignment to happen. If the fraction of companies being reassigned falls below this value, then there will be no more reassignment iterations, even if n_iter has not been reached. Returns: clusters: A list of reassigned cluster ID and org ID pairs. """ org_ids = [c[1] for c in clusters] shift = epsilon complete = 0 while (shift >= epsilon) and (n_iter > complete): index_id_cluster_lookup = np.array([c[0] for c in clusters]) changed = 0 _clusters = [] agg_clusters = [] agg_cluster_sims = [] for org_id, (knn_ids, sims) in zip(org_ids, knn): knn_ids, sims, source_id = decompose_knn( knn_ids, sims, source=True, ) knn_cluster_ids = index_id_cluster_lookup[knn_ids] unique_clusters, agg_sims = mean_cluster_similarities(knn_cluster_ids, sims) best_cluster, best_sim = get_best_cluster(unique_clusters, agg_sims) original_cluster = index_id_cluster_lookup[source_id] same_cluster = best_cluster == original_cluster if same_cluster: _clusters.append((original_cluster, org_id)) else: if best_sim >= min_sim_threshold: _clusters.append((best_cluster, org_id)) changed += 1 else: _clusters.append((original_cluster, org_id)) agg_clusters.append(unique_clusters) agg_cluster_sims.append(agg_sims) clusters = _clusters complete += 1 shift = changed / len(knn) return clusters, np.array(agg_clusters), np.array(agg_cluster_sims)
e90c61459cfeb8d906f155219cd4b758f4b8b5fe
3,651,334
def boys(n,t): """Boys function for the calculation of coulombic integrals. Parameters ---------- n : int Order of boys function t : float Varible for boys function. Raises ------ TypeError If boys function order is not an integer. ValueError If boys function order n is not a none negative number. """ if not isinstance(n, int): raise TypeError("Boys function order n must be an integer") if n < 0: raise ValueError("Boys function order n must be a none negative number") if not isinstance(t, float): raise TypeError("Boys function varible t must be integer or float") return sc.hyp1f1(n+0.5,n+1.5,-t)/(2.0*n+1.0)
1232d53898abfd032e570ad7697379f8359a566f
3,651,335
def get_diameter_by_sigma(sigma, proba): """ Get diameter of nodule given sigma of normal distribution and probability of diameter coverage area. Transforms sigma parameter of normal distribution corresponding to cancerous nodule to its diameter using probability of diameter coverage area. Parameters ---------- sigma : float square root of normal distribution variance. proba : float probability of diameter coverage area. Returns ------- float equivalent diameter. """ return 2 * sigma * stats.norm.ppf((1 + proba) / 2)
0cd32d685b21b71cbae06a0cfb48f226209eff44
3,651,336
import termcolor def _colorize(val, color): """Colorize a string using termcolor or colorama. If any of them are available. """ if termcolor is not None: val = termcolor.colored(val, color) elif colorama is not None: val = "{}{}{}".format(TERMCOLOR2COLORAMA[color], val, colorama.Style.RESET_ALL) return val
77743f99fd845b1f8450c4bd93a52563e7c4c313
3,651,337
from pathlib import Path def get_output_filename(output_folder: str, repository_type: str, repository_name: str, filename: str) -> Path: """Returns the output filename for the file fetched from a repository.""" return ( Path(output_folder) / Path(repository_type.lower()) / Path(Path(repository_name).name) / Path(Path(filename).name) )
23b806f98265b45b799dbcc177760d5ceb8248fb
3,651,338
def wave_exist_2d_full_v2(b=.8): """ plot zeros of -nu1 + G(nu1,nu2) and -nu2 + G(nu2,nu1) as a function of g use accurate fourier series """ # get data # nc1 bifurcation values bif = np.loadtxt('twod_wave_exist_br1.dat') #bif2 = np.loadtxt('twod_wave_exist_br2.dat') bif_diag1 = np.loadtxt('twod_wave_exist_diag1.dat') bif_diag2 = np.loadtxt('twod_wave_exist_diag2.dat') # clean bifx,bify = clean(bif[:,3],bif[:,7],tol=.47) bifx2,bify2 = clean(bif[:,3],bif[:,8],tol=.47) bif_diag1x,bif_diag1y = clean(bif_diag1[:,0],np.abs(bif_diag1[:,1]),tol=.2) bif_diag2x,bif_diag2y = clean(bif_diag2[:,0],np.abs(bif_diag2[:,1]),tol=.2) # remove nans for calculating minima (usually nans are taken to be max/min vals, which is bad) bifx_nonan = bifx[(~np.isnan(bifx))*(~np.isnan(bify))] bify_nonan = bify[(~np.isnan(bifx))*(~np.isnan(bify))] bifx2_nonan = bifx2[(~np.isnan(bifx2))*(~np.isnan(bify2))] bify2_nonan = bify2[(~np.isnan(bifx2))*(~np.isnan(bify2))] bif_diag1x_nonan = bif_diag1x[(~np.isnan(bif_diag1x))*(~np.isnan(bif_diag1y))] bif_diag1y_nonan = bif_diag1y[(~np.isnan(bif_diag1x))*(~np.isnan(bif_diag1y))] bif_diag2x_nonan = bif_diag2x[(~np.isnan(bif_diag2x))*(~np.isnan(bif_diag2y))] bif_diag2y_nonan = bif_diag2y[(~np.isnan(bif_diag2x))*(~np.isnan(bif_diag2y))] fig = plt.figure(figsize=(10,5)) ax1 = fig.add_subplot(121, projection='3d') ax2 = fig.add_subplot(122) plane1_z = .895 plane2_z = 1.17 # get plane intersection idx bifx_int_p1 = np.argmin(np.abs(bifx_nonan-plane1_z)) bifx_int_p2 = np.argmin(np.abs(bifx_nonan-plane2_z)) bifx2_int_p1 = np.argmin(np.abs(bifx2_nonan-plane1_z)) bifx2_int_p2 = np.argmin(np.abs(bifx2_nonan-plane2_z)) bif_diagx_int_p1 = np.argmin(np.abs(bif_diag1x_nonan-plane1_z)) bif_diagx_int_p2 = np.argmin(np.abs(bif_diag1x_nonan-plane2_z)) bif_diagx2_int_p1 = np.argmin(np.abs(bif_diag2x_nonan-plane1_z)) bif_diagx2_int_p2 = np.argmin(np.abs(bif_diag2x_nonan-plane2_z)) ## plot curves in 3d # plot off diagonal and axial curves v1a = bify2[(bify>=0)*(bify2>=0)*(bify<=1)*(bify2<=1)*(bifx<=2)] v2a = bify[(bify>=0)*(bify2>=0)*(bify<=1)*(bify2<=1)*(bifx<=2)] ga = bifx[(bify>=0)*(bify2>=0)*(bify<=1)*(bify2<=1)*(bifx<=2)] #v1b = bif_diag1y[(bif_diag1y>=0)*(bif_diag2y>=0)*(bif_diag1y<=1)*(bif_diag2y<=1)*(bif_diag1x<=2)] #v2b = bif_diag1y[(bif_diag1y>=0)*(bif_diag2y>=0)*(bif_diag1y<=1)*(bif_diag2y<=1)*(bif_diag1x<=2)] gb = np.linspace(np.amin(bif_diag1x[~np.isnan(bif_diag1x)]),np.amax(bif_diag1x[~np.isnan(bif_diag1x)]),20) # clean ga,v1a,v2a = clean3d(ga,v1a,v2a,tol=.47) # remove nans for linewidth stuff later. ga_nonan = ga[~np.isnan(ga)*(~np.isnan(v1a))*(~np.isnan(v2a))] v1a_nonan = v1a[~np.isnan(ga)*(~np.isnan(v1a))*(~np.isnan(v2a))] v2a_nonan = v2a[~np.isnan(ga)*(~np.isnan(v1a))*(~np.isnan(v2a))] # prep for plotting with different line widths sol = np.zeros((len(ga),3)) sol[:,0] = v1a sol[:,1] = ga sol[:,2] = v2a sol = np.transpose(sol) points = np.array([sol[0,:],sol[1,:],sol[2,:]]).T.reshape(-1,1,3) segs = np.concatenate([points[:-1],points[1:]],axis = 1) line3d = Line3DCollection(segs,linewidths=(1.+(v1a_nonan)/np.amax(v1a_nonan)*3.),colors='k') # add modified curves to figure ax1.add_collection3d(line3d) # repleat above to capture remaining axial branch(es) # prep for plotting with different line widths sol = np.zeros((len(ga),3)) sol[:,0] = v2a sol[:,1] = ga sol[:,2] = v1a sol = np.transpose(sol) points = np.array([sol[0,:],sol[1,:],sol[2,:]]).T.reshape(-1,1,3) segs = np.concatenate([points[:-1],points[1:]],axis = 1) line3d = Line3DCollection(segs,linewidths=(1.+(v2a_nonan)/np.amax(v2a_nonan)*3.),colors='k') # add modified curves to figure ax1.add_collection3d(line3d) # plot diagonal guys # prep for plotting with different line widths diagx = bif_diag2y[(bif_diag2y<=1)*(bif_diag2x<=2.)] diagy = bif_diag2x[(bif_diag2y<=1)*(bif_diag2x<=2.)] diagz = bif_diag2y[(bif_diag2y<=1)*(bif_diag2x<=2.)] diagx_nonan = diagx[~np.isnan(diagx)] sol = np.zeros((len(diagx),3)) sol[:,0] = diagx sol[:,1] = diagy sol[:,2] = diagz sol = np.transpose(sol) points2 = np.array([sol[0,:],sol[1,:],sol[2,:]]).T.reshape(-1,1,3) segs2 = np.concatenate([points2[:-1],points2[1:]],axis = 1) line3d2 = Line3DCollection(segs2,linewidths=(1.+(diagx_nonan)/np.amax(diagx_nonan)*3.),colors='k') ax1.add_collection3d(line3d2) # plot zero solution ax1.plot([.0,0],[.5,plane1_z],[.0,0],color='black',lw=1) # plot bifurcation planes X,Y = np.meshgrid(np.linspace(0,1,10),np.linspace(0,1,10)) ax1.plot_surface(X,0.*X+plane1_z,Y,alpha=.5,color='gray') ax1.plot_surface(X,0.*X+plane2_z,Y,alpha=.5,color='red') # plot plane intersections ax1.scatter(bify[bifx_int_p1],bifx[bifx_int_p1],bify2[bifx_int_p1],color='black',s=20) #ax1.scatter(bify[bifx_int_p2],bifx[bifx_int_p2],bify2[bifx_int_p2],color='black',s=20) #ax1.scatter(bif_diag2y_nonan[bif_diagx_int_p2],bif_diag1x_nonan[bif_diagx_int_p2],bif_diag1y_nonan[bif_diagx_int_p2],color='black',s=20) ax1.scatter(0,1.17,.51,color='red',s=20,zorder=10) ax1.scatter(.5,1.17,0.,color='red',s=40,zorder=10) ax1.scatter(.37,1.17,.37,color='red',s=50,zorder=10) """ ax1.scatter(L1[g_int_p2],g[g_int_p2],M1[g_int_p2],color='black',s=20) ax1.scatter(L2[g_int_p1],g[g_int_p1],M2[g_int_p1],color='black',s=20) ax1.scatter(L2[g_int_p2],g[g_int_p2],M2[g_int_p2],color='black',s=20) ax1.scatter(L3[g_int_p1],g[g_int_p1],M3[g_int_p1],color='black',s=20) ax1.scatter(L3[g_int_p2],g[g_int_p2],M3[g_int_p2],color='black',s=20) ax1.scatter(L4[g_int_p1],g[g_int_p1],M4[g_int_p1],color='black',s=20) ax1.scatter(L4[g_int_p2],g[g_int_p2],M4[g_int_p2],color='black',s=20) """ ## plot curves in 2d # bifurcation lines ax2.plot([plane1_z,plane1_z],[-1,1.8],color='black',alpha=.5,lw=2) ax2.plot([plane2_z,plane2_z],[-1,1.8],color='red',alpha=.5,lw=2) ax2.plot(bifx,bify,color='black') ax2.plot(bifx2,bify2,color='black') ax2.plot(bif_diag1x,bif_diag1y,color='black') ax2.plot(bif_diag2x,bif_diag2y,color='black') ax2.plot([0,5],[0,0],color='black') # label curves ax2.annotate(r'$x$-axis direction', xy=(1.04,.37),xycoords='data',textcoords='data', xytext=(.6,.6), arrowprops=dict(arrowstyle="-|>", connectionstyle="arc3", color='black'), ) ax2.annotate(r'$y$-axis direction', xy=(1.0,.0),xycoords='data',textcoords='data', xytext=(.55,.33), arrowprops=dict(arrowstyle="-|>", connectionstyle="arc3", color='black'), ) ax2.annotate(r'$g^*$', xy=(.9,.0),xycoords='data',textcoords='data', xytext=(.8,.05), arrowprops=dict(arrowstyle="-|>", connectionstyle="arc3", color='black'), ) ax2.annotate('Diagonal', xy=(1.1,.32),xycoords='data',textcoords='data', xytext=(1.4,.2), arrowprops=dict(arrowstyle="-|>", connectionstyle="arc3", color='black'), ) ax2.annotate('Off-diagonal', xy=(1.4,.41),xycoords='data',textcoords='data', xytext=(1.5,.34), arrowprops=dict(arrowstyle="-|>", connectionstyle="arc3", color='black'), ) ax2.annotate('Off-diagonal', alpha=0., xy=(1.4,.62),xycoords='data',textcoords='data', xytext=(1.5,.34), arrowprops=dict(arrowstyle="-|>", connectionstyle="arc3", color='black'), ) # plot params ax1.view_init(20,-8) # set labels ax1.set_xlabel(r'$\nu_2$') ax2.set_xlabel(r'$g$') ax1.set_ylabel(r'$g$') ax2.set_ylabel(r'$\nu_1$') ax1.set_zlabel(r'$\nu_1$') ax1.set_xlim(0.,1.) ax2.set_xlim(.5,2.) ax1.set_ylim(.5,2.) ax2.set_ylim(-.05,1.) ax1.set_zlim(0.,1.) #plt.show() return fig
a471a8b510ed786080e2e5f1b3c8159cc211ff19
3,651,340
def _parse_variables(vars_list): """Transform the list of vars stored in module definition in dictionnary""" vars = {} for var in vars_list: key = var['name'] value = None for var_type in ATTRIBUTE_TYPE: if var_type in var: value = var[var_type] break vars[key] = value return vars
59c88815abf08efe72dcca9efce4970bcd072b91
3,651,341
import logging def get_vertical_axes(nc_file): """ Scan input netCDF file and return a list of vertical axis variables, requiring specific axis names """ vertical_axes = [] for var_name, var in nc_file.variables.items(): if var_name in ('full_levels', 'half_levels'): vertical_axes.append(var) logging.info('Found %i vertical axes.', len(vertical_axes)) return vertical_axes
f26b89d9d9839759f3b1ed7a990d548f996e29d2
3,651,342
def b_2_d(x): """ Convert byte list to decimal :param x: byte list :return: decimal """ s = 0 for i in range(0, len(x)): s += x[i]*2**i return s
e865700ea30be535ad014908d6b6024186cc5ac6
3,651,344
def get(s, delimiter='', format="diacritical"): """Return pinyin of string, the string must be unicode """ return delimiter.join(_pinyin_generator(u(s), format=format))
7369e133f73e9517fc20f6b95809ba615172feae
3,651,345
def top_dist(g1, g2, name='weight', topology_type=0): """ :param g1: graph 1 :param g2: graph 2 :param name: compared edge attribute :param topology_type: topology distance normalization method :return: topology distance """ max_v = max_edge(g1, name, max_edge(g2, name, 0)) # find max value in a graph v = 0 nodes_list = set(g1.nodes()) | set(g2.nodes()) # define nodes list in g1 or g2 degree1 = g1.degree(weight=name) # define degree of g1 degree2 = g2.degree(weight=name) # define degree of g2 for node in nodes_list: # consider each node if node in g1.nodes() and node in g2.nodes(): # node appears in both graphs nodes1 = set(g1.neighbors(node)) # adjacent nodes in g1 nodes2 = set(g2.neighbors(node)) - nodes1 # distinct adjacent nodes in g2 for node2 in nodes1: if node2 in g2.neighbors(node): v += abs(g1[node][node2][name]-g2[node][node2][name]) else: v += g1[node][node2][name] for node2 in nodes2: v += g2[node][node2][name] else: if node in g1.nodes(): # node appears only in g1 v += degree1[node] else: v += degree2[node] # node appears only in g2 v /= max_v if topology_type == 0: return v/len(nodes_list)/len(nodes_list) else: num_edges = len(set(g1.edges()) | set(g2.edges())) return v/num_edges/num_edges
2abf2e74b3a715861389b75bfce8bc3c609a77c1
3,651,346
def refresh_track(): """ For now the interface isn't refreshed :return: """ try: url = request.form["url"] except KeyError: return "nok" with app.database_lock: Track.refresh_by_url(app.config["DATABASE_PATH"], url) return "ok"
47cf865ec01093735050e7abb15d65ef97d2e1ba
3,651,347
import pickle def get_weights(): """ Loads uni-modal text and image CNN model weights. Returns: tuple: text and image weights. """ text_weight_file = open("models/unimodal_text_CNN_weights.pickle", "rb") text_weights = pickle.load(text_weight_file) text_weight_file.close() image_weight_file = open("models/unimodal_image_CNN_LSTM_weights.pickle", "rb") image_weights = pickle.load(image_weight_file) image_weight_file.close() return text_weights, image_weights
abff59a197130f5776fdb0cacc3f895ff5d7393e
3,651,348
def get_data(start_runno, start_fileno, hall, fields): # pylint: disable=too-many-locals,too-many-branches """Pull the data requested, starting from first VALID run/file after/including the specified one""" val_dict = lambda: {'values': []} ad_dict = lambda: {f'AD{det}': val_dict() for det in dets_for(hall, start_runno)} wp_dict = lambda: {f'WP{det}': val_dict() for det in ['I', 'O']} result = {'runnos': [], 'filenos': [], 'metrics': { field_desc(field): wp_dict() if field.endswith('WP') else ad_dict() for field in fields }, # Send 'latest' so that frontend knows whether to disable END button 'latest': all_latest()} focus = focus_sql(hall, start_runno) try: end_runno, end_fileno = get_shifted(start_runno, start_fileno, hall, 1, skipfirst=False) except EndOfDataException: # return empty result, let caller decide how to proceed return result ad_fields = [f for f in fields if not f.endswith('WP')] wp_fields = [f[:-2] for f in fields if f.endswith('WP')] uniq_fields = list(set(ad_fields + wp_fields)) if any(f.endswith('counts') for f in uniq_fields): livetimes = {} rows = get_livetimes(start_runno, start_fileno, end_runno, end_fileno, hall) for runno, fileno, lt_ms in rows: livetimes[(runno, fileno)] = lt_ms / 1000 default_livetime = sum(livetimes.values()) / len(livetimes) field_sel = f', {",".join(uniq_fields)}' if uniq_fields else '' loc = loc_pred(start_runno, start_fileno, end_runno, end_fileno) query = f'''SELECT runno, fileno, detectorid {field_sel} FROM DqDetectorNew NATURAL JOIN DqDetectorNewVld vld LEFT JOIN runno_fileno_sitemask USING (runno, fileno) WHERE ({loc}) AND ({focus}) AND vld.sitemask = {sitemask(hall)} AND streamtype = 'Physics' ORDER BY runno, fileno, detectorid, insertdate''' rows = dq_exec(query).fetchall() def val_arr(field, det): if det >= 5: prefix = 'WP' det = 'O' if det == 6 else 'I' else: prefix = 'AD' return result['metrics'][field_desc(field)][f'{prefix}{det}']['values'] last_runno, last_fileno = None, None for row in rows: runno, fileno, det = row[:3] if runno != last_runno or fileno != last_fileno: result['runnos'].append(runno) result['filenos'].append(fileno) for each_ad in dets_for(hall, start_runno): for field in ad_fields: val_arr(field, each_ad).append(-2) # default value for each_wp in [5, 6]: for field in wp_fields: val_arr(field+'WP', each_wp).append(-2) for i, field in enumerate(uniq_fields): val = row[i+3] if field.endswith('counts'): try: norm = livetimes[(runno, fileno)] except KeyError: print(f'WARNING: Missing livetime for {runno}, {fileno}') norm = default_livetime if val is not None: # in case we got a NULL in this row val /= norm if val is None: val = -3 # NOTE If the loc_pred queries are slow due to IN, consider # simplifying those and instead doing a more precise AD check # here if field in ad_fields and det <= 4: val_arr(field, det)[-1] = val # replace default/older elif field in wp_fields and det >= 5: val_arr(field+'WP', det)[-1] = val last_runno, last_fileno = runno, fileno result['xs'] = scale_xs(result['runnos'], result['filenos'], (start_runno, start_fileno), (end_runno, end_fileno), hall) return result
e740952bf5419956bb86f214b01e4a8deb8e6ebc
3,651,349
def timelength_label_to_seconds( timelength_label: spec.TimelengthLabel, ) -> spec.TimelengthSeconds: """convert TimelengthLabel to seconds""" number = int(timelength_label[:-1]) letter = timelength_label[-1] base_units = timelength_units.get_base_units() base_seconds = base_units['1' + letter] seconds = number * base_seconds return seconds
d0494fd2fabe07d0cae2dbc7c8c142b7b478533c
3,651,350
from typing import List def getUrlsAlias()->List[str]: """获取所有urls.py的别名""" obj = getEnvXmlObj() return obj.get_childnode_lists('alias/file[name=urls]')
be0f5a2b423a4fa9a58d9e60e2cc0d91f1d66949
3,651,351
def project_xarray(run: BlueskyRun, *args, projection=None, projection_name=None): """Produces an xarray Dataset by projecting the provided run. EXPERIMENTAL: projection code is experimental and could change in the near future. Projections come with multiple types: linked, and caclulated. Calculated fields are only supported in the data (not at the top-level attrs). Projected fields will be inserted into the resulting xarray.Dataset Parameters ---------- run : BlueskyRun run to project projection_name : str, optional name of a projection to select in the run, by default None projection : dict, optional projection not from the run to use, by default None Returns ------- xarray.Dataset The return Dataset will contain: - single value meta data (from the run start) in the return Dataset's attrs dict, keyed on the projection key. These are projections marked "location": "start" - single value meta data (from a streams configuration field) in the return Dataset's xarray's dict, keyed on the projection key. These are projections marked "location": "configuration" - multi-value data (from a stream). Keys for the dict-like xarray.Dataset match keys in the passed-in projection. These are projections with "location": "linked"...note that every xarray for a field froma given stream will contain a reference to the same set of configuration attrs for as all fields from the same stream Dataset |_attrs |_'projection_start_field': value |_data |_ 'projection_event_field': xarray |_ attrs |_'projection_configuration_field': value Raises ------ ProjectionError """ attrs = {} # will populate the return Dataset attrs field data_vars = {} # will populate the return Dataset DataArrays stream_configurations = {} # will populate a collection of dicts of stream configurations def metadata_cb(field, value): attrs[field] = value def event_configuration_cb( projection_field, stream, config_index, config_device, config_field, value): if stream not in stream_configurations: stream_configurations[stream] = [] if len(stream_configurations[stream]) == 0: stream_configurations[stream].append({}) if config_device not in stream_configurations[stream][config_index]: stream_configurations[stream][config_index][config_device] = {} stream_configurations[stream][config_index][config_device][config_field] = value def event_field_cb(projection_field, stream, field, xarray: xarray.DataArray): if projection_field not in stream_configurations: stream_configurations[stream] = [] # associate the stream configuration to the xarrays's atrtrs xarray.attrs['configuration'] = stream_configurations[stream] data_vars[projection_field] = xarray # Use the callbacks defined above to project the run and build up a return xarray.Dataset projector = Projector( metadata_cb=metadata_cb, event_configuration_cb=event_configuration_cb, event_field_cb=event_field_cb) projector.project(run, projection=projection, projection_name=projection_name) dataset = xarray.Dataset(data_vars, attrs=attrs) return dataset, projector.issues
8960b68090601c0a83da4ebb82c4b97f3751282f
3,651,352
def collect_users(): """Collect a list of all Santas from the user""" list_of_santas = [] while 1: item = input("Enter a name\n") if not item: break list_of_santas.append(item) return list_of_santas
d86ec360518fdb497b86b7f631fee0dc4464e2bb
3,651,353
def check_role_exists(role_name, access_key, secret_key): """ Check wheter the given IAM role already exists in the AWS Account Args: role_name (str): Role name access_key (str): AWS Access Key secret_key (str): AWS Secret Key Returns: Boolean: True if env exists else False """ iam_client = get_iam_client(access_key, secret_key) try: role = iam_client.get_role(RoleName=role_name) return True if role else False except: return False
cd6f118424ca17f6e65e28abefed39e89bd66b95
3,651,354
def group_delay(group_key, flights): """ Group the arrival delay flights based on keys. :param group_key: Group key to use for categorization. :param flights: List of flights matching from an origin airport. :return: Dictionary containing the list of flights grouped. """ dict_of_group_flights = defaultdict(list) if group_key == 'distance': global distance_range # segmentation every distance range # Remove duplicate value & Get the maximum distance distance_set = set() for flight in flights: distance_set.add(int(flight['distance'])) distance_list = sorted(list(distance_set)) max_distance = max(distance_list) # Segment into Ranges temp_dict = defaultdict(list) for flight in flights: distance_limit = 0 while distance_limit <= max_distance: if int(flight[group_key]) in range(distance_limit, distance_limit + distance_range): time_of_arrival = int(flight['arr_delay']) if flight['arr_delay'] else None if time_of_arrival is not None and time_of_arrival < 0: distance_ranges = str(distance_limit) + " - " + str(distance_limit + distance_range) + " miles" temp_dict[distance_ranges].append(time_of_arrival) distance_limit += distance_range elif group_key == 'day_of_week': temp_dict = defaultdict(list) for flight in flights: time_of_arrival = int(flight['arr_delay']) if flight['arr_delay'] else None if time_of_arrival is not None and time_of_arrival < 0: name_of_day = get_day_name(int(flight[group_key])) temp_dict[name_of_day].append(time_of_arrival) else: temp_dict = defaultdict(list) for flight in flights: time_of_arrival = int(flight['arr_delay']) if flight['arr_delay'] else None if time_of_arrival is not None and time_of_arrival < 0: temp_dict[flight[group_key]].append(time_of_arrival) # Overall Arrival Delay in "<minimum> - <maximum> minute(s) late" format for key, delay_list in temp_dict.iteritems(): fastest_delay = str(abs(max(delay_list))) longest_delay = str(abs(min(delay_list))) if fastest_delay == longest_delay: dict_of_group_flights[key].append(fastest_delay + " minute(s) late") else: dict_of_group_flights[key].append(fastest_delay + " - " + longest_delay + " minute(s) late") return dict_of_group_flights
0ae760f7da7762b97d6d7a5d5503b280ed39f855
3,651,355
def create_matrix( score_same_brackets, score_other_brackets, score_reverse_brackets, score_brackets_dots, score_two_dots, add_score_for_seq_match, mode='simple'): """ Function that create matrix that can be used for further analysis, please take note, that mode must be the same in case of matrix and multiple sequence alignment, otherwise random-like effects will occur :param score_same_brackets: int, score for the same tye of brackets like ( and ( :param score_other_brackets: int, score for different type of brackets like ( and [ :param score_reverse_brackets: int, socre for reverse brackets like ( and ) :param score_brackets_dots: int, socre for brakcet and dot like ( and . :param score_two_dots: int, score for two dots like . and . :param add_score_for_seq_match: int, value to add if sequence letter is the same :param mode: string, simple - only level one pseudoknots, pseudo - multiplelevel of pseudoknots :return: string containing matrix that can be saved """ header = " A C D E F G H I K L M " \ "N P Q R S T V W Y" matrix = defaultdict(dict) if mode == 'simple': for letter1 in LETTERS: nucleotide1 = None dot_bracket1 = None for nucleotide in SIMPLE_CONVERSION: for dot_bracket in SIMPLE_CONVERSION[nucleotide]: if SIMPLE_CONVERSION[nucleotide][dot_bracket] == letter1: nucleotide1 = nucleotide dot_bracket1 = dot_bracket for letter2 in LETTERS: nucleotide2 = None dot_bracket2 = None for nucleotide in SIMPLE_CONVERSION: for dot_bracket in SIMPLE_CONVERSION[nucleotide]: if SIMPLE_CONVERSION[nucleotide][dot_bracket] == \ letter2: nucleotide2 = nucleotide dot_bracket2 = dot_bracket score = score_brackets( dot_bracket1, dot_bracket2, score_same_brackets, score_other_brackets, score_reverse_brackets, score_brackets_dots, score_two_dots) if nucleotide1 == nucleotide2: score += add_score_for_seq_match matrix[letter1][letter2] = score elif mode == 'pseudo': for letter1 in LETTERS: dot_bracket1 = None for dot_bracket in PSEUDOKNOT_CONVERSION: if PSEUDOKNOT_CONVERSION[dot_bracket] == letter1: dot_bracket1 = dot_bracket for letter2 in LETTERS: score = 0 dot_bracket2 = None for dot_bracket in PSEUDOKNOT_CONVERSION: if PSEUDOKNOT_CONVERSION[dot_bracket] == letter2: dot_bracket2 = dot_bracket if dot_bracket2 is not None and dot_bracket1 is not None: score = score_brackets( dot_bracket1, dot_bracket2, score_same_brackets, score_other_brackets, score_reverse_brackets, score_brackets_dots, score_two_dots) matrix[letter1][letter2] = score else: print('Wrong mode') text = [header] for letter1 in LETTERS: string = [letter1, ' '] for letter2 in LETTERS: score = matrix[letter1][letter2] string.append(str(score).rjust(5)) text.append("".join(string)) return "\n".join(text)
7979a72b70ae2910051943c714676aec3d291dbc
3,651,356
def view_inv(inventory_list): """list -> None empty string that adds Rental attributes """ inventory_string = '' for item in inventory_list: inventory_string += ('\nRental: ' + str(item[0])+ '\nQuantity: '+ str(item[1])+ '\nDeposit: '+"$"+ str(item[2])+"\nPrice Per Week: "+ "$" + str(item[3])+ '\nReplacement Value: '+ "$" + str(int(item[4]))+ "\n") return inventory_string
540b6bb2597ba5686a070749c2526ad09be25d5f
3,651,357
def generate_smb_proto_payload(*protos): """Generate SMB Protocol. Pakcet protos in order. """ hexdata = [] for proto in protos: hexdata.extend(proto) return "".join(hexdata)
848fdad11941a6d917bd7969fb7ffb77025cd13d
3,651,358
def FeatureGrad_LogDet(grad_feature): """Part of the RegTerm inside the integral It calculates the logarithm of the determinant of the matrix [N_y x N_y] given by the scalar product of the gradients along the N_x axis. Args: grad_feature (array_like): [N_samples, N_y, N_x], where N_x is the input space and N_y the feature space. Returns: (array_like): [N_samples] """ # Case of 1d feature if len(grad_feature.shape) == 2: grad_feature = grad_feature[:, np.newaxis, :] matrix_j = grad_feature@grad_feature.swapaxes(1, -1) s, d = np.linalg.slogdet(matrix_j) # return s*d # We remove terms with zero s (i.e. errors) return s[s != 0]*d[s != 0]
a32b472c6c69b441be52911f5a2f82011c5cab00
3,651,359
def get_every_second_indexes(ser: pd.Series, even_index=True) -> pd.core.series.Series: """Return all rows where the index is either even or odd. If even_index is True return every index where idx % 2 == 0 If even_index is False return every index where idx % 2 != 0 Assume default indexing i.e. 0 -> n """ idx = 0 if even_index else 1 return ser.iloc[idx::2]
eb8c3b3a377c34e047d7daa525226cac18e21b7b
3,651,360
def preprocess_input(text): """ 정제된 텍스트를 토큰화합니다 :param text: 정제된 텍스트 :return: 문장과 단어로 토큰화하여 분석에 투입할 준비를 마친 텍스트 """ sentences = nltk.sent_tokenize(text) tokens = [nltk.word_tokenize(sentence) for sentence in sentences] return tokens
902c1aa5fc98ad5180ef7db670afbc972089a307
3,651,362
def create_count_dictionaries_for_letter_placements(all_words_list): """Returns a tuple of dictionaries where the index of the tuple is the counts for that index of each word >>> create_count_dictionaries_for_letter_placements(all_words_list) (dictPosition0, dictPosition1, dictPosition2, dictPosition3, dictPosition4) For example: dictPosition0 has the counts of all characters (a-z) in the first position of all the words. dictPosition3 has the counts of all characters (a-z) in the fourth position of all the words. """ dictPosition0 = create_dictionary_of_characters_at_word_index(0, all_words_list) dictPosition1 = create_dictionary_of_characters_at_word_index(1, all_words_list) dictPosition2 = create_dictionary_of_characters_at_word_index(2, all_words_list) dictPosition3 = create_dictionary_of_characters_at_word_index(3, all_words_list) dictPosition4 = create_dictionary_of_characters_at_word_index(4, all_words_list) return dictPosition0, dictPosition1, dictPosition2, dictPosition3, dictPosition4
4d45ddda36c64ccfb357367521aa1983d738ab7b
3,651,363
def parse_known(key, val) -> str: """ maps string from html to to function for parsing Args: key: string from html val: associated value in html Returns: str """ key_to_func = {} key_to_func["left"] = parse_number key_to_func["top"] = parse_number key_to_func["width"] = parse_number key_to_func["font-size"] = parse_number key_to_func["color"] = parse_color if key in key_to_func: return key_to_func[key](key, val) else: return val
680a38496c368e7bd13f5578f4312914ac63c7f7
3,651,364
def getRecordsPagination(page, filterRecords=''): """ get all the records created by users to list them in the backend welcome page """ newpage = int(page)-1 offset = str(0) if int(page) == 1 \ else str(( int(conf.pagination) *newpage)) queryRecordsPagination = """ PREFIX prov: <http://www.w3.org/ns/prov#> PREFIX base: <"""+conf.base+"""> SELECT DISTINCT ?g ?title ?userLabel ?modifierLabel ?date ?stage WHERE { GRAPH ?g { ?s ?p ?o . OPTIONAL { ?g rdfs:label ?title; prov:wasAttributedTo ?user; prov:generatedAtTime ?date ; base:publicationStage ?stage. ?user rdfs:label ?userLabel . OPTIONAL {?g prov:wasInfluencedBy ?modifier. ?modifier rdfs:label ?modifierLabel .} } OPTIONAL {?g rdfs:label ?title; prov:generatedAtTime ?date ; base:publicationStage ?stage . } BIND(COALESCE(?date, '-') AS ?date ). BIND(COALESCE(?stage, '-') AS ?stage ). BIND(COALESCE(?userLabel, '-') AS ?userLabel ). BIND(COALESCE(?modifierLabel, '-') AS ?modifierLabel ). BIND(COALESCE(?title, 'none', '-') AS ?title ). filter not exists { ?g prov:generatedAtTime ?date2 filter (?date2 > ?date) } } """+filterRecords+""" FILTER( str(?g) != '"""+conf.base+"""vocabularies/' ) } ORDER BY DESC(?date) LIMIT """+conf.pagination+""" OFFSET """+offset+""" """ records = list() sparql = SPARQLWrapper(conf.myEndpoint) sparql.setQuery(queryRecordsPagination) sparql.setReturnFormat(JSON) results = sparql.query().convert() for result in results["results"]["bindings"]: records.append( (result["g"]["value"], result["title"]["value"], result["userLabel"]["value"], result["modifierLabel"]["value"], result["date"]["value"], result["stage"]["value"] )) return records
97221f9cfebe615744bc3ef488e8daf3ddc0dca4
3,651,365
import scipy.integrate def integrate_intensity(data_sets, id, nθ, iN, NCO2, color1, color2): """Integrate intensity ove angle theta Arguments: data_sets {[type]} -- [description] id {[type]} -- [description] nθ {[type]} -- [description] iN {[type]} -- [description] NCO2 {[type]} -- [description] color1 {[type]} -- [description] color2 {[type]} -- [description] Returns: [type] -- [description] """ θ_0 = np.deg2rad(data_sets.get(id, iN, 0, iθ)[0]) # theta θ_1 = np.deg2rad(data_sets.get(id, iN, 1, iθ)[0]) # theta θ_2 = np.deg2rad(data_sets.get(id, iN, 2, iθ)[0]) # theta I_0 = data_sets.get(id, iN, 0, iI)[-1] # intensity at TOA I_1 = data_sets.get(id, iN, 1, iI)[-1] # intensity at TOA I_2 = data_sets.get(id, iN, 2, iI)[-1] # intensity at TOA # qubic approximation of I(θ) R1 = I_1 - I_0 R2 = I_2 - I_0 a0 = I_0 det = θ_1**2 * θ_2**3 - θ_1**3 * θ_2**2 a2 = (R1 * θ_2**3 - R2 * θ_1**3) / det a3 = (R2 * θ_1**2 - R1 * θ_2**2) / det c1 = scipy.integrate.quad(lambda x: np.cos(x)*np.sin(x), 0.0, np.pi*0.5)# θ_2) c2 = scipy.integrate.quad(lambda x: np.cos(x)*np.sin(x)*x**2, 0.0, np.pi*0.5)# θ_2) c3 = scipy.integrate.quad(lambda x: np.cos(x)*np.sin(x)*x**3, 0.0, np.pi*0.5)# θ_2) θ = np.mgrid[0.0:np.pi*0.5:100j] I = a0 + a2*θ**2 + a3*θ**3 # plot plt.plot(θ, I, color1, label='%d ppm, cubic approximation' % NCO2) plt.plot([θ_0, θ_1, θ_2], [I_0, I_1, I_2], color2+'o', label="%d ppm, computed" % NCO2) plt.xlabel("angle θ [rad]") plt.ylabel("TOA flux I(θ) [W/m²]") plt.legend(loc='best') # integrated intensity Iint = 2.0*np.pi * (a0*c1[0] + a2*c2[0] + a3*c3[0]) return Iint
8336347f8fbe9c690800ae3efec185ba1a0e610d
3,651,366
def new(request, pk=""): """ New CodeStand Entry When user presses 'Associate new project' there is a Project Container associated, then you need reuse this information in the form :param request: HttpResponse :param pk: int - Indicates which project must be loaded """ if request.path != request.session[constants.ACTUAL_TEMPLATE]: clear_session(request) request.session[constants.REM_LINKS] = [] request.session[constants.REM_TAGS] = [] request.session[constants.REM_DOCS] = [] request.session[constants.REM_CONTACTS] = [] request.session[constants.ADD_CONTACTS] = [] request.session[constants.ADD_LINKS] = [] request.session[constants.ADD_TAGS] = [] request.session[constants.ADD_DOCS] = [] request.session[constants.MAINTAIN_STATE] = True if pk != "": request.session[constants.ACTUAL_PROJECT] = get_object_or_404(ProjectContainer, id=pk) # User must have permission to add new CodeStand if not is_user_allowed(request.user, "canaddmatch"): raise Http404 return save_code(request, False, pk)
432949f5d7ae6869078c729d86bafabac0f17871
3,651,367
def sideral( date, longitude=0.0, model="mean", eop_correction=True, terms=106 ): # pragma: no cover """Sideral time as a rotation matrix """ theta = _sideral(date, longitude, model, eop_correction, terms) return rot3(np.deg2rad(-theta))
01f3209db8996ad1e11ded48da26d286253c5f7d
3,651,368
from splitgraph.core.output import conn_string_to_dict from typing import Type import click def _make_mount_handler_command( handler_name: str, handler: Type[ForeignDataWrapperDataSource] ) -> Command: """Turn the mount handler function into a Click subcommand with help text and kwarg/connection string passing""" help_text, handler_options_help = _generate_handler_help(handler) params = [ click.Argument(["schema"]), click.Option( ["--connection", "-c"], help="Connection string in the form username:password@server:port", ), click.Option( ["--handler-options", "-o"], help=handler_options_help, default="{}", type=JsonType() ), ] def _callback(schema, connection, handler_options): handler_options.update(conn_string_to_dict(connection)) mount(schema, mount_handler=handler_name, handler_kwargs=handler_options) cmd = click.Command(handler_name, params=params, callback=_callback, help=help_text) return cmd
0e8aa0cf3973c265e0df2b1815afd65042fa5d14
3,651,369
def test_load_settings_onto_instrument(tmp_test_data_dir): """ Test that we can successfully load the settings of a dummy instrument """ # Always set datadir before instruments set_datadir(tmp_test_data_dir) def get_func(): return 20 tuid = "20210319-094728-327-69b211" instr = Instrument("DummyInstrument") # A parameter that is both settable and gettable instr.add_parameter( "settable_param", initial_value=10, parameter_class=ManualParameter ) # A parameter that is only gettable instr.add_parameter("gettable_param", set_cmd=False, get_cmd=get_func) # A boolean parameter that is True by defualt instr.add_parameter( "boolean_param", initial_value=True, parameter_class=ManualParameter ) # A parameter which is already set to None instr.add_parameter( "none_param", initial_value=None, parameter_class=ManualParameter, vals=validators.Numbers(), ) # A parameter which our function will try to set to None, giving a warning instr.add_parameter( "none_param_warning", initial_value=1, parameter_class=ManualParameter, vals=validators.Numbers(), ) # The snapshot also contains an 'obsolete_param', that is not included here. # This represents a parameter which is no longer in the qcodes driver. with pytest.warns( UserWarning, match="Parameter none_param_warning of instrument DummyInstrument could not be " "set to None due to error", ): load_settings_onto_instrument(instr, tuid) with pytest.warns( UserWarning, match="Could not set parameter obsolete_param in DummyInstrument. " "DummyInstrument does not possess a parameter named obsolete_param.", ): load_settings_onto_instrument(instr, tuid) assert instr.get("IDN") == { "vendor": None, "model": "DummyInstrument", "serial": None, "firmware": None, } assert instr.get("settable_param") == 5 assert instr.get("gettable_param") == 20 assert instr.get("none_param") is None assert instr.get("none_param_warning") == 1 assert not instr.get("boolean_param") instr.close()
96f3d96b7a83989c390bcc629f39df618c553056
3,651,370
import html def dashboard_3_update_graphs(n_intervals): """Update all the graphs.""" figures = load_data_make_graphs() main_page_layout = html.Div(children=[ html.Div(className='row', children=[ make_sub_plot(figures, LAYOUT_COLUMNS[0]), make_sub_plot(figures, LAYOUT_COLUMNS[1]), ]), html.Div(className='row', children=[ make_sub_plot(figures, LAYOUT_COLUMNS[2]), make_sub_plot(figures, LAYOUT_COLUMNS[3]), ]), ]) return main_page_layout
b0b67dd06540ffff3c09d2c0ce87d0e5edb44bdf
3,651,371
import numpy as np from mvpa2.datasets import Dataset import copy def fx(sl, dataset, roi_ids, results): """this requires the searchlight conditional attribute 'roi_feature_ids' to be enabled""" resmap = None probmap = None for resblock in results: for res in resblock: if resmap is None: # prepare the result container resmap = np.zeros((len(res), dataset.nfeatures), dtype=res.samples.dtype) observ_counter = np.zeros(dataset.nfeatures, dtype=int) #project the result onto all features -- love broadcasting! resmap[:, res.a.roi_feature_ids] += res.samples # increment observation counter for all relevant features observ_counter[res.a.roi_feature_ids] += 1 # when all results have been added up average them according to the number # of observations observ_mask = observ_counter > 0 resmap[:, observ_mask] /= observ_counter[observ_mask] # transpose to make broadcasting work -- creates a view, so in-place # modification still does the job result_ds = Dataset(resmap, fa={'observations': observ_counter}) if 'mapper' in dataset.a: result_ds.a['mapper'] = copy.copy(dataset.a.mapper) return result_ds
6b5e968882d0fe2e27c9302bc2b821509cfaafa1
3,651,372
import pathlib import stat def check_file(file_name): """ test if file: exists and is writable or can be created Args: file_name (str): the file name Returns: (pathlib.Path): the path or None if problems """ if not file_name: return None path = pathlib.Path(file_name) # if file exists test if writable if path.exists() and path.is_file(): handle = None try: handle = open(path, 'w') except PermissionError: return None finally: if handle: handle.close() # crate file with write permissions try: path.touch(stat.S_IWUSR) except PermissionError: return None return path
5b8ff64795aa66d3be71444e158357c9b7a1b2c0
3,651,373
async def push(request): """Push handler. Authenticate, then return generator.""" if request.method != "POST": return 405, {}, "Invalid request" fingerprint = authenticate(request) if not fingerprint: return 403, {}, "Access denied" # Get given file payload = await request.get_body(100 * 2 ** 20) # 100 MiB limit # Also validate it if not validate_payload(request, payload): return 403, {}, "Payload could not be verified." # Return generator -> do a deploy while streaming feedback on status gen = push_generator(fingerprint, payload) return 200, {"content-type": "text/plain"}, gen
aeedd0c0c336b756898a98460e15a24b3411c5c2
3,651,374
from urllib.request import urlretrieve from urllib import urlretrieve def getfile(url, outdir=None): """Function to fetch files using urllib Works with ftp """ fn = os.path.split(url)[-1] if outdir is not None: fn = os.path.join(outdir, fn) if not os.path.exists(fn): #Find appropriate urlretrieve for Python 2 and 3 try: except ImportError: print("Retrieving: %s" % url) #Add progress bar urlretrieve(url, fn) return fn
9cf70384fd81f702c29316e51fed9ca80802f022
3,651,375
def isroutine(object): """Return true if the object is any kind of function or method.""" return (isbuiltin(object) or isfunction(object) or ismethod(object) or ismethoddescriptor(object))
386893f99c8cdd00c5523ef7ce052784e6ae9ca8
3,651,376
import json def account_upload_avatar(): """Admin Account Upload Avatar Action. *for Ajax only. Methods: POST Args: files: [name: 'userfile'] Returns: status: {success: true/false} """ if request.method == 'POST': re_helper = ReHelper() data = request.files['userfile'] filename = re_helper.r_slash(data.filename.encode('utf-8')) helper = UpYunHelper() url = helper.up_to_upyun('account', data, filename) if url: return json.dumps({'success': 'true', 'url': url}) else: return json.dumps({'success': 'false'})
c17f93eb7e5e750508aa34aedeaffb5b9410f15e
3,651,377
def get_front_end_url_expression(model_name, pk_expression, url_suffix=''): """ Gets an SQL expression that returns a front-end URL for an object. :param model_name: key in settings.DATAHUB_FRONTEND_URL_PREFIXES :param pk_expression: expression that resolves to the pk for the model :param url_suffix: Optional: string appended to the end of the url """ return Concat( Value(f'{settings.DATAHUB_FRONTEND_URL_PREFIXES[model_name]}/'), pk_expression, Value(url_suffix), )
c763f1d891f35c36823bb2fb7791a3d145f57164
3,651,378
def mock_config_entry() -> MockConfigEntry: """Return the default mocked config entry.""" return MockConfigEntry( title="KNX", domain=KNX_DOMAIN, data={ CONF_KNX_INDIVIDUAL_ADDRESS: XKNX.DEFAULT_ADDRESS, ConnectionSchema.CONF_KNX_MCAST_GRP: DEFAULT_MCAST_GRP, ConnectionSchema.CONF_KNX_MCAST_PORT: DEFAULT_MCAST_PORT, CONF_KNX_CONNECTION_TYPE: CONF_KNX_AUTOMATIC, }, )
e8585d7c0f793e0636f29bb91d5533fc22a14a4b
3,651,379
def load_escores(name_dataset, classifier, folds): """Excluir xxxxxxx Return escore in fold. """ escores=[] escores.append(load_dict_file("escores/"+name_dataset +"_"+classifier +"_escore_grid_train"+str(folds))) return escores for index in range(folds): escores.append(load_dict_file("escores/"+name_dataset +"_"+classifier +"_escore_grid_train"+str(index))) return escores
6afa5b7db023bec903dbab26203edc5d7c162280
3,651,380
def get_devices(): """ will also get devices ready :return: a list of avaiable devices names, e.g., emulator-5556 """ ret = [] p = sub.Popen(settings.ADB + ' devices', stdout=sub.PIPE, stderr=sub.PIPE, shell=True) output, errors = p.communicate() print output segs = output.split("\n") for seg in segs: device = seg.split("\t")[0].strip() if seg.startswith("emulator-"): p = sub.Popen(settings.ADB + ' -s ' + device + ' shell getprop init.svc.bootanim', stdout=sub.PIPE, stderr=sub.PIPE, shell=True) output, errors = p.communicate() if output.strip() != "stopped": time.sleep(10) print "waiting for the emulator:", device return get_devices() else: ret.append(device) assert len(ret) > 0 return ret
b3b3a377483f694ecac7f57f6f76a40727be4eee
3,651,381
import logging import tqdm import multiprocessing def _proc_event_full(st, **kwargs): """ processings including :param st: :param kwargs: :return: """ # instrument response removal, spectral whitening, temporal normalization # autocorrelation and filter, then output results. def iter3c(stream): # for an event, there is always "onset" return IterMultipleComponents(stream, key="onset", number_components=(2, 3)) # resp removal, rotation, spectral whitening, temporal normalization tasks = iter3c(st) # loop over streams in each stream containing the 3-component traces do_work = partial(_proc_event_rst, **kwargs) njobs = kwargs["njobs"] numbers = [] logging.info("deep processing for full event correlogram.") print("deep processing for full event correlogram.") if njobs == 1: logging.info('do work sequential (%d cores)', njobs) for task in tqdm(tasks, total=len(tasks)): num = do_work(task) numbers.append(num) else: logging.info('do work parallel (%d cores)', njobs) pool = multiprocessing.Pool(njobs) for num in tqdm(pool.imap_unordered(do_work, tasks), total=len(tasks)): numbers.append(num) pool.close() pool.join() logging.info("%d/%d files processed.", sum(numbers), len(tasks))
cd069f684fba1a8d037aa8d08245859098b25c1f
3,651,382
import types def optional_is_none(context, builder, sig, args): """Check if an Optional value is invalid """ [lty, rty] = sig.args [lval, rval] = args # Make sure None is on the right if lty == types.none: lty, rty = rty, lty lval, rval = rval, lval opt_type = lty opt_val = lval del lty, rty, lval, rval opt = context.make_optional(opt_type)(context, builder, opt_val) res = builder.not_(cgutils.as_bool_bit(builder, opt.valid)) return impl_ret_untracked(context, builder, sig.return_type, res)
a00b6725b43e8d09e8261b228f58319a27b191f9
3,651,383
import logging def get_volume_disk_capacity(pod_name, namespace, volume_name): """ Find the container in the specified pod that has a volume named `volume_name` and run df -h or du -sb in that container to determine the available space in the volume. """ api = get_api("v1", "Pod") res = api.get(name=pod_name, namespace=namespace) if res.kind == "PodList": # make sure there is only one pod with the requested name if len(res.items) == 1: pod = res.items[0] else: return {} else: pod = res containers = pod.spec.get("initContainers", []) + pod.spec.get("containers", []) for container in containers: for volume_mount in container.get("volumeMounts", []): if volume_mount.get("name") == volume_name: mount_path = volume_mount.get("mountPath") volume = list(filter(lambda x: x.name == volume_name, pod.spec.volumes)) volume = volume[0] if len(volume) == 1 else {} if ( "emptyDir" in volume.keys() and volume["emptyDir"].get("sizeLimit") is not None ): # empty dir is used for the session command = ["sh", "-c", f"du -sb {mount_path}"] used_bytes = parse_du_command( pod_exec( pod_name, namespace, container.name, command, ) ) total_bytes = convert_to_bytes(volume["emptyDir"]["sizeLimit"]) available_bytes = ( 0 if total_bytes - used_bytes < 0 else total_bytes - used_bytes ) return { "total_bytes": total_bytes, "used_bytes": used_bytes, "available_bytes": available_bytes, } else: # PVC is used for the session command = ["sh", "-c", f"df -Pk {mount_path}"] try: disk_cap_raw = pod_exec( pod_name, namespace, container.name, command, ) except ApiException: disk_cap_raw = "" logging.warning( f"Checking disk capacity failed with {pod_name}, " f"{namespace}, {container.name}, {command}." ) else: logging.info( f"Checking disk capacity succeeded with {pod_name}, " f"{namespace}, {container.name}, {command}." ) disk_cap = parse_df_command(disk_cap_raw) # make sure `df -h` returned the results from only one mount point if len(disk_cap) == 1: return disk_cap[0] return {}
29d20c5dfb481be9a58dec85a23e20b9abd9cc5f
3,651,384
import re def is_valid_battlefy_id(battlefy_id: str) -> bool: """ Verify a str is a Battlefy Id (20 <= length < 30) and is alphanumeric. :param battlefy_id: :return: Validity true/false """ return 20 <= len(battlefy_id) < 30 and re.match("^[A-Fa-f0-9]*$", battlefy_id)
ba3f79f4897425b87962f04506fdff1da684c122
3,651,385
def echo(word:str, n:int, toupper:bool=False) -> str: """ Repeat a given word some number of times. :param word: word to repeat :type word: str :param n: number of repeats :type n: int :param toupper: return in all caps? :type toupper: bool :return: result :return type: str """ res=word*n if (toupper): res=res.upper() return res
62a68c1ff577781a84a58f124beec8d31b0b456c
3,651,386
def verify_package_info(package_info): """Check if package_info points to a valid package dir (i.e. contains at least an osg/ dir or an upstream/ dir). """ url = package_info['canon_url'] rev = package_info['revision'] command = ["svn", "ls", url, "-r", rev] out, err = utils.sbacktick(command, clocale=True, err2out=True) if err: raise SVNError("Exit code %d getting SVN listing of %s (rev %s). Output:\n%s" % (err, url, rev, out)) for line in out.split("\n"): if line.startswith('osg/') or line.startswith('upstream/'): return True return False
6a38e50c2ab121260cbaaa9c188f2470784e65fb
3,651,387