content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
def _create_trip_from_stack(temp_trip_stack, origin_activity, destination_activity): """ Aggregate information of trip elements in a structured dictionary Parameters ---------- temp_trip_stack : list list of dictionary like elements (either pandas series or python dictionary). Contains all elements that will be aggregated into a trip origin_activity : dictionary like Either dictionary or pandas series destination_activity : dictionary like Either dictionary or pandas series Returns ------- dictionary """ # this function return and empty dict if no tripleg is in the stack first_trip_element = temp_trip_stack[0] last_trip_element = temp_trip_stack[-1] # all data has to be from the same user assert origin_activity['user_id'] == last_trip_element['user_id'] # double check if trip requirements are fulfilled assert origin_activity['activity'] == True assert destination_activity['activity'] == True assert first_trip_element['activity'] == False trip_dict_entry = {'user_id': origin_activity['user_id'], 'started_at': first_trip_element['started_at'], 'finished_at': last_trip_element['finished_at'], 'origin_staypoint_id': origin_activity['id'], 'destination_staypoint_id': destination_activity['id'], 'tpls': [tripleg['id'] for tripleg in temp_trip_stack if tripleg['type'] == 'tripleg'], 'spts': [tripleg['id'] for tripleg in temp_trip_stack if tripleg['type'] == 'staypoint']} return trip_dict_entry
f2ddb6c19650c001c714ddeb8372b81ff40f2abe
698,844
def precprint(prec_type, prec_cap, p): """ String describing the precision mode on a p-adic ring or field. EXAMPLES:: sage: from sage.rings.padics.misc import precprint sage: precprint('capped-rel', 12, 2) 'with capped relative precision 12' sage: precprint('capped-abs', 11, 3) 'with capped absolute precision 11' sage: precprint('floating-point', 1234, 5) 'with floating precision 1234' sage: precprint('fixed-mod', 1, 17) 'of fixed modulus 17^1' """ precD = {'capped-rel':'with capped relative precision %s'%prec_cap, 'capped-abs':'with capped absolute precision %s'%prec_cap, 'floating-point':'with floating precision %s'%prec_cap, 'fixed-mod':'of fixed modulus %s^%s'%(p, prec_cap), 'lattice-cap':'with lattice-cap precision', 'lattice-float':'with lattice-float precision', 'relaxed':'handled with relaxed arithmetics'} return precD[prec_type]
b3eab5f0fd133ead8c413aded650d839a2c818b9
698,846
def zooms_string(z1, z2): """Prints 'zoom N' or 'zooms N-M'.""" if z2 != z1: return "zooms {}-{}".format(min(z1, z2), max(z1, z2)) else: return "zoom {}".format(z1)
2e433472d721767cfc152b75a74f9976ba340f7a
698,848
def _imag_2d_func(x, y, func): """Return imag part of a 2d function.""" return func(x, y).imag
b95f64fb2bca54db89c85349ed3ca961d2b47b4c
698,852
def batch_directions(z, y, Q, path_sizes, step_vals, subtract_projection=True): """ This function takes an input batch of z vectors (and corresponding class label vectors y) and applies the directions in Q to the batch of z vectors. :param z: (N, nz) tensor of base random noise to manipulate with directions :param y: (N, D) tensor of class vectors :param Q: (ndirs, nz) matrix of z-space directions :param path_sizes: (ndirs,) tensor indicating how far to travel in each direction :param step_vals: (interp_steps,) tensor controlling the granularity of the interpolation :param subtract_projection: bool, whether or not to "remove" each direction from the sampled z vectors :return: z: (N * ndirs * interp_steps, nz) tensor, y: (N * ndirs * interp_steps) tensor containing all z's and y's needed to create the visualizations """ interp_steps = step_vals.size(0) N, nz = z.size() ndirs = Q.size(0) z = z.view(1, N, 1, nz).repeat(ndirs, 1, interp_steps, 1) # .view(N * ndirs * interp_steps, nz) if subtract_projection: # The projection will be the same across the interp_steps dimension, so we can just pick-out the first step: z_proj = z[:, :, 0, :].view(ndirs * N, nz) Q_proj = Q.repeat_interleave(N, dim=0) projection = (z_proj * Q_proj).sum(dim=1, keepdims=True) / Q_proj.pow(2).sum(dim=1, keepdims=True) * Q_proj z -= projection.view(ndirs, N, 1, nz) path_sizes = path_sizes.view(ndirs, 1, 1, 1) step_vals = step_vals.view(1, 1, interp_steps, 1) Q = Q.view(ndirs, 1, 1, nz) z += step_vals * path_sizes * Q z = z.view(N * ndirs * interp_steps, nz) y = y.repeat_interleave(interp_steps, dim=0).repeat(ndirs, 1) return z, y
4adeab5b8a9ded7b4a10affbb5427435dbd5a599
698,857
import json def _CanParseJSON(my_json): """Returns True if the input can be parsed as JSON, False otherwise.""" try: json.loads(my_json) except ValueError: return False return True
c8602b9e9544a70102135bd875d19c66664bdefc
698,860
from typing import Union import json def prettify(data: Union[list, dict]) -> str: """ Return input data structure (list or dict) as a prettified JSON-formatted string. Default is set here to stringify values like datetime values. """ return json.dumps(data, indent=4, sort_keys=True, default=str)
800ef5d3f7a5765bca6fe42fc32e40da5a1cc398
698,865
def power_series(z, cs): """ returns cs[0] + cs[1] * z + cs[2] * z ** 2 + ... + cs[-1] * z ** (len(cs) - 1) """ s = cs[-1] for c in reversed(cs[:-1]): s *= z s += c return s
a9fe54d8a4bc15385f5c1da61eb1696b43a470d4
698,866
def normalize(signal): """Restrict the range of a signal to the closed interval [-1.0, 1.0]. """ normalized_signal = signal / max(signal.max(), signal.min(), key=abs) return normalized_signal
d86fe058302ee133e6318f5c0a3fa24430e7e24c
698,867
def remove_non_ascii(s): """ Remove non-ascii characters in a file. Needed when support for non-ASCII is not available. Args: s (str): Input string Returns: String with all non-ascii characters removed. """ return "".join(i for i in s if ord(i) < 128)
0a215ffa1841667d7dd7d9c1d9a12bbe84e2cbcd
698,869
def generate_streak_matrix(weeks_data): """ Create the streak matrix 1 if the user committed 0 if the user hasn't committed -1 to store null values :param weeks_data: week-wise contribution data of the user :return: matrix containing the values of the contribution streak """ rows, columns = 7, len(weeks_data) streak_matrix = [[-1 for i in range(columns)] for y in range(rows)] i = 0 for week in weeks_data: days = week['contributionDays'] for day in days: if day['contributionCount'] > 0: streak_matrix[day['weekday']][i] = 1 elif day['contributionCount'] == 0: streak_matrix[day['weekday']][i] = 0 i += 1 return streak_matrix
e6cc9e3c96aebb20ca8af11ae19e7f86aab96d62
698,870
def any_in_seq(search_vals, seq): """Check if any value in search_vals in the sequence seq""" for v in search_vals: if v in seq: return True return False
d314355aef2cba89833394ff6aeacf675daec7e6
698,871
import json def get_json_file(path): """ Load a JSON file from disk. """ with open(path) as f: data = json.load(f) return data
acb099868c4baeb59ead76bb20018af477720609
698,875
def format_datetime(session): """Convert date or datetime object into formatted string representation. """ if session.data is not None: date_format = session.field.opts.date_format if date_format == 'iso8601': session.data = session.data.isoformat() else: session.data = session.data.strftime(date_format) return session.data
53a99843e47dde6b82cb48e77fd553bbf65dd646
698,881
def train_step(model, optimizer, loss_fn, conditions, true, out): """One training step Args: model: the feed-forward network optimizer: the optimizer for the network loss_fn: the loss function conditions: the observing conditions used as inputs true: the true galaxy magnitudes used as inputs out: the ground truth output """ optimizer.zero_grad() conditions.requires_grad_(True) true.requires_grad_(True) predout = model(conditions, true).squeeze() loss = loss_fn(predout, out) loss.backward() optimizer.step() return loss.item(), predout.data
8d9ba730582e1d7992bf2bd2f8359f4531392645
698,882
import torch def get_concentrated_mask(class_weights, topk): """ Returns a logical mask indicating the categories with the top k largest probabilities, as well as the catogories corresponding to those with the top k largest probabilities. Parameters ---------- class_weights : torch.Tensor Array of class weights, with each row corresponding to a datapoint, each column corresponding to the probability of the datapoint belonging to that category topk : int the k in top-k Returns ------- mask_topk : torch.Tensor Boolean array, same dimension as class_weights, with entry 1 if the corresponding class weight is in the topk for that observation topk_domain: torch.LongTensor Array specifying the indices of class_weights that correspond to the topk observations """ mask_topk = torch.zeros(class_weights.shape).to(class_weights.device) seq_tensor = torch.LongTensor([i for i in range(class_weights.shape[0])]) if topk > 0: _, topk_domain = torch.topk(class_weights, topk) for i in range(topk): mask_topk[seq_tensor, topk_domain[:, i]] = 1 else: topk_domain = None return mask_topk, topk_domain, seq_tensor
8663c6e4d868eb2100684132ef95c59eee9b3560
698,883
import re def get_params(rule): """ Returns params from the url Args: rule (str): the endpoint path (e.g. '/v1/data/<int:id>') Returns: (list): parameters from the endpoint path Examples: >>> rule = '/v1/random_resource/<string:path>/<status_type>' >>> get_params(rule) ['path', 'status_type'] """ # param regexes param_with_colon = r"<.+?:(.+?)>" param_no_colon = r"<(.+?)>" either_param = param_with_colon + r"|" + param_no_colon parameter_matches = re.findall(either_param, rule) return ["".join(match_tuple) for match_tuple in parameter_matches]
05414d950a6a603ff79fa2efff3ff3fef1e375f2
698,887
def upstream_or_distgit_path( request, upstream_and_remote, distgit_and_remote, ogr_distgit_and_remote ): """ Parametrize the test to upstream, downstream [currently skipped] and ogr distgit """ return { "upstream": upstream_and_remote[0], "distgit": distgit_and_remote[0], "ogr-distgit": ogr_distgit_and_remote[0], }[request.param]
6f94a44e95301398c495dff56f9e470b46b5c737
698,890
def garfield_empty_mock(url, request) -> str: """ Mock HTTP empty response using HTTMock :param url: str :param request: Request :return: str """ return """ <html> <body></body> </html> """
c2c349e40f315bfc625680fc5b0e92a7a54f5f7c
698,895
def check_order(order): """ Checks the specified drawing order is valid and returns the corresponding tree traversal order. """ if order is None: order = "minlex" traversal_orders = { "minlex": "minlex_postorder", "tree": "postorder", } if order not in traversal_orders: raise ValueError( f"Unknown display order '{order}'. " f"Supported orders are {list(traversal_orders.keys())}" ) return traversal_orders[order]
c8478bbbb59ce25beec4a4196dc183a49ecb62ba
698,896
def get_cat2id(item_metas, n_entities): """Extracts all categories from item metada and maps them to an id""" categories = set([cat for it_meta in item_metas for cat in it_meta.categories]) return {cate: n_entities + i for i, cate in enumerate(categories)}
38c7895949d3eccf9d8d4fc6c609b036700f93d8
698,898
def to_dict(arr): """ Convert a list to a dict with keys drawn from '0', '1', '2', ... Examples -------- >>> to_dict([2, 3, 4]) # doctest: +SKIP {'0': 2, '1': 3, '2': 4} """ return dict(zip(map(str, range(len(arr))), arr))
a51c0cbb477b4569a67fda3088a200194bb8bf67
698,903
def is_power_of_two(a): """Return whether the argument, cast to int, is a power of 2.""" a = int(a) # Bit manipulations. A power of 2 has a bit represetenation like 0...010...0. # For such a number subtracting 1 from it turns it into 0...001...1, so ANDing # a-1 and a should yield 0. return a > 0 and ((a - 1) & a) == 0
d7b0d90df8eb4287a6f56e8256aa6c40b9b46441
698,904
from typing import Callable from functools import reduce def clean(text: str, *cleaners: Callable[[str], str]) -> str: """Cleans the given text using the provided cleaning functions. Arguments: text: The text string to be cleaned. cleaners: The simple cleaner functions to be applied in sequence over the input text. Returns: The clean text. """ return reduce(lambda part, func: func(part), cleaners, text)
67a53637bca0b19b49bd157ccc520d8dc053a12f
698,905
def variable_name_to_title( variable_name, latex_flag=True ): """ Translates a variable name into a title suitable for inclusion in Matplotlib title strings. Variable names are assumed to be lowercased as found in IWP datasets and titles may include LaTeX markers for mathematical typesetting. Unknown variable names are returned as is. Takes 2 arguments: variable_name - Variable name to translate. latex_flag - Optional flag specifying whether LaTeX-encodings should be used in the translation. If specified as False, translations will not use LaTeX. If omitted, defaults to True. Returns 1 value: variable_title - Translated variable title. """ if variable_name == "divh": return "Horizontal Divergence" elif variable_name == "p": return "Density" elif variable_name == "pprime": if latex_flag: return "Density$'$" else: return "Density'" elif variable_name == "u": if latex_flag: return "$velocity_x$" else: return "Velocity - X" elif variable_name == "uprime": if latex_flag: return "$velocity_x'$" else: return "Acceleration - X" elif variable_name == "v": if latex_flag: return "$velocity_y$" else: return "Velocity - Y" elif variable_name == "vprime": if latex_flag: return "$velocity_y'$" else: return "Acceleration - Y" elif variable_name == "w": if latex_flag: return "$velocity_z$" else: return "Velocity - Z" elif variable_name == "wprime": if latex_flag: return "$velocity_z'$" else: return "Acceleration - Z" elif variable_name == "vortx": if latex_flag: return "$vorticity_x$" else: return "Vorticity - X" elif variable_name == "vorty": if latex_flag: return "$vorticity_y$" else: return "Vorticity - Y" elif variable_name == "vortz": if latex_flag: return "$vorticity_z$" else: return "Vorticity - Z" elif variable_name.startswith( "morlet" ): # Morlet wavelets have an angle preference which is encoded as either # "morlet+-angle", "morlet+angle", or "morlet-angle". handle the # plus/minus case as special and let the positive/negative, single # angles fall through like normal text. variable_title = "2D CWT with Morlet" # decorate the base title depending on the format of the rest of the # variable. pm_index = variable_name.find( "+-" ) if pm_index != -1: # # NOTE: we have to filter this as the non-default split parameter # will *not* filter out empty strings... # pieces = list( filter( lambda piece: len( piece ) > 0, variable_name[pm_index+2:].split( "-" ) ) ) # the first piece is the angle. add the remaining pieces the # way we found them. if len( pieces ) == 1: suffix = "" else: suffix = " ({:s})".format( "-".join( pieces[1:] ) ) if latex_flag: # add "+-N degrees" in LaTeX and then append the remaining # components as a suffix. variable_title = "{:s}$\pm{:s}\circ${:s}".format( variable_title, pieces[0], suffix ) else: variable_title = "{:s} +-{:s} degrees{:s}".format( variable_title, pieces[0], suffix ) elif len( variable_name ) > len( "morlet" ): # add the remaining text as a parenthetical. variable_title = "{:s} ({:s})".format( variable_title, variable_name[len( "morlet" ):] ) return variable_title elif variable_name.startswith( "arc" ): return "2D CWT with Arc" elif variable_name.startswith( "halo" ): return "2D CWT with Halo" # we don't have a special title for this variable. use what we have. return variable_name
3f9d56a13f4aacb2ec6e9cf15ad1d2ff1c4288bc
698,907
def rectangle_area(length, width): """ Calculates the area of a rectangle. :param length: The length of the rectangle. :param width: The width of the rectangle. :return: The area of the rectangle. """ return length * width
0cbe453fbd4c3c6a061f520d57f303dae55fdc25
698,911
def threshold_array(arr, threshold=2e-4): """ Thresholds an array, returning a binary array Parameters --------- arr : NumpyArray Contains the data to threshold Returns --------- NumpyArray Returns arr with binary values, depending of the threshold """ return (arr > threshold)
254692de4f82dbf6c3e684b10324cb73c436ff28
698,913
def _current_window_for_event(event): """ Return the `Window` for the currently focussed Buffer. """ return event.app.layout.current_window
4b9859c7bf7fc4b072362d2d5b9e896022769587
698,915
import re def clean_xml(xml): """Clean the given XML string of namespace definition, namespace prefixes and syntactical but otherwise meaningless differences. Parameters ---------- xml : str String representation of XML document. Returns ------- str String representation of cleaned XML document. """ # remove xmlns namespace definitions r = re.sub(r'[ ]+xmlns:[^=]+="[^"]+"', '', xml) # remove namespace prefixes in tags r = re.sub(r'<(/?)[^:]+:([^ >]+)([ >])', r'<\1\2\3', r) # remove extra spaces in tags r = re.sub(r'[ ]+/>', '/>', r) # remove extra spaces between tags r = re.sub(r'>[ ]+<', '><', r) return r
3f566975ab512ccc22824c45e7ef04fc861a5c03
698,916
def weighted_average(gini_or_entropy_left, left_cnt, gini__or_entropy_right, right_cnt): """ calculate weighted average for Gini index or Entropy :param right_cnt: count of total records on the right side of node :param left_cnt: count of total records on left side of node :param gini_or_entropy_left: gini index or Entropy of left side of node :param gini__or_entropy_right: gini index or Entropy of right side of node :return: weighted average of entire node """ # formula used to calculate weighted gini index weighted_avg = ((left_cnt / (left_cnt + right_cnt)) * gini_or_entropy_left) + ( (right_cnt / (left_cnt + right_cnt)) * gini__or_entropy_right) return weighted_avg
7914a0164427de4e9cebf7637dda670694f8df59
698,918
import torch def concat_entities(entities): """ Concat multiple graphs via concatenation of their entities tensors. Parameters ---------- entities: a list of graph tuples. Either [(v,e,c),...] or [(v,e),...] when the graph has no global attribute. Returns v,e,c - concatenated entities tensors, None for c if no global in the graph. ------- """ has_global = len(entities[0]) == 3 and entities[0][2] is not None v = torch.cat([el[0] for el in entities], dim=1) e = {} for k in entities[0][1].keys(): e[k] = torch.cat([el[1][k] for el in entities], dim=1) c = None if has_global: c = torch.cat([el[2] for el in entities], dim=1) return v, e, c
06d0619836d4cb8c977028ee0cdccd9075136c72
698,919
import math def RotateXY(x, y, xc=0, yc=0, angle=0, units="DEGREES"): """Rotate an xy cooordinate about a specified origin x,y xy coordinates xc,yc center of rotation angle angle units "DEGREES" (default) or "RADIANS" """ x = x - xc y = y - yc # make angle clockwise (like Rotate_management) angle = angle * -1 if units == "DEGREES": angle = math.radians(angle) xr = (x * math.cos(angle)) - (y * math.sin(angle)) + xc yr = (x * math.sin(angle)) + (y * math.cos(angle)) + yc return xr, yr
68d24bfd5b2cf436b1ea37c0d4124f6cfa357e9f
698,924
def _extract_gpcrdb_residue_html(txt): """ Extracts the relevant lines for all residues from a GPCRdb html entry. Parameters ---------- txt : str Content (html) of the website with the GPCRdb entry. Returns ------- residue_html : list A list in which each item contains the html lines for one residue. """ res_start_line = ' <td class="seqv seqv-sequence">' res_end_line = ' </td>' spl = txt.split('\n') residue_html = [] for lnum, line in enumerate(spl): if line == res_start_line: residue_lines = spl[lnum:lnum+12] # Use fewer lines if the residue is shorter # (i.e. has no GPCRdb number) if residue_lines[-4] == res_end_line: residue_lines = residue_lines[:-3] residue_html.append(residue_lines) return residue_html
4621848f27b9cd24017ed5c49683bfc4b8a180e8
698,925
def simple_tag_without_context_parameter(arg): """Expected simple_tag_without_context_parameter __doc__""" return "Expected result"
aad64452d051a587447696c6bd616b04f3f5b23e
698,927
import re def uuid(value: str): """ Validator for Universally unique identifier Example Result: [123e4567-e89b-12d3-a456-426655440000, 6a2f41a3-c54c-fce8-32d2-0324e1c32e22] Detail: https://en.wikipedia.org/wiki/Universally_unique_identifier#Format """ _uuid_pat = r'[0-9a-fA-F]{8}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{12}' # pylint: disable=C0301 return re.findall(_uuid_pat, value)
66f498669b52df2e3ff9ea281fb781cb6774c77c
698,930
from typing import Tuple from typing import Dict def field_annotations(typed_dict) -> Tuple[Dict[str, type], Dict[str, type]]: """Return the required and optional fields in the TypedDict.""" return (typed_dict.__annotations__["required_fields"].__annotations__, typed_dict.__annotations__["optional_fields"].__annotations__)
2e88481a1668cd40caacb8bfc91a50c8746a704e
698,931
import sqlite3 def get_member_data(member_id: str, conn: sqlite3.Connection) -> dict: """ Gets email and phone data from database based on passed member. This function uses placeholders in query so it is injection safe. Look up get_member_data_injection for example where it's NOT injection safe. """ query = "SELECT EMAIL,PHONE FROM MEMBERS WHERE MEMBER_ID=?" cursor = conn.cursor() cursor.execute(query, (member_id,)) rows = cursor.fetchall() return dict(rows)
a4ca505961352e292109e22318ff7a0188ffc454
698,933
def node_short_name(node, **kwargs): """ Returns short name of the given node :param node: str :return: str """ return node
67d1d5ff172544eb2b233a925d90bbd0ea767e83
698,934
def create_notify_payload(host, nt, usn, location=None, al=None, max_age=None, extra_fields=None): """ Create a NOTIFY packet using the given parameters. Returns a bytes object containing a valid NOTIFY request. The NOTIFY request is different between IETF SSDP and UPnP SSDP. In IETF, the 'location' and 'al' fields serve the same purpose, and can be provided together (if so, they should point to the same location) or not at all. In UPnP, the 'location' field MUST be provided, and 'al' is ignored. Sending both 'location' and 'al' is the more widely supported option. It does not, however, mean that all SSDP implementations would accept a packet with both. Therefore the option to send just one of these fields (or none at all) is supported. If in doubt, send both. If your notifications go ignored, opt to not send 'al'. :param host: The address (IP + port) that the NOTIFY will be sent about. This is usually a multicast address. :type host: str :param nt: Notification type. Indicates which device is sending the notification. :type nt: str :param usn: Unique identifier for the service. Usually this will be composed of a UUID or any other universal identifier. :type usn: str :param location: A URL for more information about the service. This parameter is only valid when sending a UPnP SSDP packet, not IETF. :type location: str :param al: Similar to 'location', but only supported on IETF SSDP, not UPnP. :type al: str :param max_age: Amount of time in seconds that the NOTIFY packet should be cached by clients receiving it. In UPnP, this header is required. :type max_age: int :param extra_fields: Extra header fields to send. UPnP SSDP section 1.1.3 allows for extra vendor-specific fields to be sent in the NOTIFY packet. According to the spec, the field names MUST be in the format of `token`.`domain-name`, for example `myheader.philips.com`. SSDPy, however, does not check this. Normally, headers should be in ASCII - but this function does not enforce that. :return: A bytes object containing the generated NOTIFY payload. """ if max_age is not None and not isinstance(max_age, int): raise ValueError("max_age must by of type: int") data = ( "NOTIFY * HTTP/1.1\r\n" "HOST:{}\r\n" "NT:{}\r\n" "NTS:ssdp:alive\r\n" "USN:{}\r\n" ).format(host, nt, usn) if location is not None: data += "LOCATION:{}\r\n".format(location) if al is not None: data += "AL:{}\r\n".format(al) if max_age is not None: data += "Cache-Control:max-age={}\r\n".format(max_age) if extra_fields is not None: for field, value in extra_fields.items(): data += "{}:{}\r\n".format(field, value) data += "\r\n" return data.encode("utf-8")
95b6a7ad37ec96d646116451340ed7ee96495632
698,935
def get_present_volume(present): """Calculate volume of the box needed for present.""" volume = 1 for side in present: volume *= side return volume
e3df95638a741513307163abd90a68013aa7da3b
698,939
def crop_rasters_for_sr(max_sr_factor, *hmaps): """Crop a list of rasters to a size such that they can be evently divided by ``max_sr_factor``. It assumes that each raster is centered identically. I.e. if one raster has size 256x256 and another 254x254, it assumes that it is a border of size 1 that is removed symmetrically from the first raster to get the location of the second raster. It will crop off the bottom-right of the image to make it an evenly divisible size. Parameters ---------- max_sr_factor : int The maximum amplicfication factor for super-resolution predictions that will be made using these rasters. I.e. 32 means that there will be 32x32 predictions per image. hmaps : list of :class:`numpy.ndarray` The rasters to crop. The final two dimensions must correpsond to i,j of the raster. Returns ------- list of :class:`numpy.ndarray` Cropped versions of ``hmaps`` """ min_width = min([i.shape[-1] for i in hmaps]) reduction = min_width % max_sr_factor out = [] for h in hmaps: crop_width = (h.shape[-1] - min_width) / 2 assert crop_width == int(crop_width) crop_width = int(crop_width) out.append( h[ ..., crop_width : -(reduction + crop_width), crop_width : -(reduction + crop_width), ] ) return out
77b45841cb82bc5475c6343df05c91b04d4e74c8
698,940
def apim_api_operation_get(client, resource_group_name, service_name, api_id, operation_id): """Gets the details of the API Operation specified by its identifier.""" return client.api_operation.get(resource_group_name, service_name, api_id, operation_id)
4101a7aa2b3815856bf852a299698d407af390c0
698,943
def _uses_vulnerable_solc_version(version): """Detect if used compiler version is 0.4.[0|1|2|3|4] Args: version (solc version used) Returns: Bool """ if version in ["0.4.0", "0.4.1", "0.4.2", "0.4.3", "0.4.4"]: return True return False
475dff3c6d3ed71317aab79e147f8797bde85f3a
698,944
import hashlib def get_subscriber_hash(member_email): """ The MD5 hash of the lowercase version of the list member's email. Uses as memeber_id """ member_email = member_email.lower() m = hashlib.md5(member_email) return m.hexdigest()
c2d7c7f4d0da58ec3990cccad865cb53cf2ebe15
698,945
def Reverse(action): """Reverses the behavior of the action Example:: # rotates the sprite 180 degrees in 2 seconds counter clockwise action = Reverse( RotateBy( 180, 2 ) ) sprite.do( action ) """ return action.__reversed__()
9bde87421204300e55fff80eed0564f4b4d8e978
698,947
def get_frame_number(frame): """Get frame number by calculating distance to newest frame.""" num = 0 newer = frame.newer() while newer != None: newer = newer.newer() num += 1 return num
1b653a629d9e34af2cfa1ce37bea54644a18236a
698,951
def folder_contents_html(folder_path, files, folders): """Given files and folders generate html.""" html = "<!DOCTYPE html><html><body>{}</body></html>" atag = '<a href="{}">{}</a>' files_and_folders = '' for folder in folders: files_and_folders += '<h4>' + atag.format(folder_path + '/' + folder, folder) + '</h4>' for file_name in files: files_and_folders += '<h4>' + atag.format(folder_path + '/' + file_name, file_name) + '</h4>' return html.format(files_and_folders)
6b6b37ca9452319d309a61c877ebf6d1fba201aa
698,954
from typing import Optional def parse_dot_notation(input: str) -> tuple[str, Optional[tuple[str, ...]]]: """ Parse dot-notation Example: parse_dot_notation('a') #-> 'a', None parse_dot_notation('a.b.c') #-> 'a', ['b', 'c'] """ name, _, sub_path_str = input.partition('.') sub_path = tuple(sub_path_str.split('.')) if sub_path_str else None return name, sub_path
06650868fb773b41b97a839b0d423cc8f3cd4a85
698,959
def createURIString(valueString, delimiter, vocab): """This function takes a delimiter separted string of values and returns a string in which every of these values is prefixed with the specified vocab URI. >>> createURIString('nl;fr;de', ';', 'http://id.loc.gov/vocabulary/languages/') 'http://id.loc.gov/vocabulary/languages/nl;http://id.loc.gov/vocabulary/languages/fr;http://id.loc.gov/vocabulary/languages/de' An empty input string results in an empty output string >>> createURIString('', ';', 'http://id.loc.gov/vocabulary/languages/') '' Only a delimiter results in an empty string >>> createURIString(';', ';', 'http://id.loc.gov/vocabulary/languages/') '' """ uris = [] urisString = "" values = valueString.split(delimiter) if len(values) > 1: for v in values: if len(v) > 0: uris.append(vocab + v) urisString = ';'.join(uris) elif len(values) == 1: if len(values[0]) > 0: urisString = vocab + valueString else: urisString = '' return urisString
6fb6898b1531b5741dd890452c9ddd9e4db6f205
698,960
import requests from bs4 import BeautifulSoup def get_page(url, **kwargs): """Pulls in the HTML from a URL and returns the results as a BeautifulSoupt object. Parameters ---------- url : str The URL to scrape Returns ------- soup : bs4.BeautifulSoup The BeautifulSoup representation of the webpage """ response = requests.get(url, **kwargs) if response.status_code != 200: raise RuntimeError( f"Response from {url} failed with status code " "{response.status_code}" ) else: return BeautifulSoup(response.text, "lxml")
392c83be8b24bdeb27cf482c9df72c30b4b945dc
698,961
from pathlib import Path import json def read_jupyter_as_json(filepath: Path) -> Path: """ Read in rendered notebook-- read in the JSON representation that is 'under the hood' :param filepath: path to jupyter notebook. """ with open(filepath, "r") as fout: contents = fout.read() return json.loads(contents)
d91344b1bddcd0e1078effe6dd7947f7e04ea6af
698,964
def _to_db_str(sequential_list): """Convert a list or tuple object to a string of database format.""" entry_list = [] for _entry in sequential_list: # I know only text type need to be converted by now. More types could # be added in the future when we know. if isinstance(_entry, str): entry_list.append("u'%s'" % _entry) else: entry_list.append(str(_entry)) return "(%s)" % ", ".join(entry_list)
42a4e008964c0accb3e596dc75859641e999a0f4
698,965
def extract_domain(email_address): """ Given an email address, extract the domain name from it. This is done by finding the @ and then splicing the email address and returning everything found after the @. If no @ is found then the entire email address string is returned. :param email_address: :return: """ email_address = email_address.lower() # figure out the domain from the email address try: return email_address[email_address.index(u'@') + 1:] except ValueError: # no @ found, just use the whole string return email_address
3e09c9cef431c09d126d8de6854990dc9351ef0d
698,972
def check_solution(model, solution): """ Helper function. if solution is None, attempts to get it from the model. :param model: :param solution: :return: """ if solution is None: try: solution = model.solution except AttributeError: raise AttributeError('If not providing a solution object, please ' 'provide a model with an embedded solution ' '(call model.solve())') return solution
a03ee5e2033ee99caa0fd761f9d78d3d597a906b
698,974
def degTodms(ideg): """ Converts degrees to degrees:minutes:seconds :param ideg: objects coordinate in degrees :type ideg: float :return: degrees:minutes:seconds :rtype: string """ if (ideg < 0): s = -1 else: s = 1 ideg = abs(ideg) deg = int(ideg) + 0. m = 60. * (ideg - deg) minutes = int(m) + 0. seconds = 60. * (m - minutes) if s < 0: dms = "-%02d:%02d:%06.3f" % (deg, minutes, seconds) else: dms = "%02d:%02d:%06.3f" % (deg, minutes, seconds) return dms
169cf4a89e7a2bd8526cf32acd1e88ec62d0237f
698,975
def prepare_results(cursor_description, rows): """ Generate result in JSON format with an entry consisting of key value pairs. :param cursor_description: a tuple with query result columns :param rows: list of returned sql query values :return: dictionary """ if rows is None or len(rows) == 0: return {"entries": None} # List of column names from SQL result to use as dictionary keys dt_column_keys = [column[0] for column in cursor_description] # Build dictionary: key-value pairs consisting of column name - row value entries_data_list = [] for row in rows: entries_data_list.append(dict(zip(dt_column_keys, row))) entries = {"entries": [entry for entry in entries_data_list]} return entries
8fcc48f0a732a200c65d27817349a0de1a5f1172
698,978
import torch def _rescale(dat, mn_out=0, mx_out=511): """ Rescales image intensities between mn_out and mx_out. """ dtype = dat.dtype device = dat.device dat[(dat == dat.min()) | ~torch.isfinite(dat) | (dat == dat.max())] = 0 # Make scaling to set image intensities between mn_out and mx_out mn = torch.tensor([dat.min(), 1], dtype=dtype, device=device)[None, ...] mx = torch.tensor([dat.max(), 1], dtype=dtype, device=device)[None, ...] sf = torch.cat((mn, mx), dim=0) sf = torch.tensor([mn_out, mx_out], dtype=dtype, device=device)[..., None].solve(sf)[0].squeeze() # Rescale dat = dat*sf[0] + sf[1] # Clamp dat = dat.clamp_min(mn_out).clamp_max(mx_out) return dat
3b502094e22fe97fadfeb4ff987774385442c52e
698,980
def get_fws_and_tasks(workflow, fw_name_constraint=None, task_name_constraint=None): """ Helper method: given a workflow, returns back the fw_ids and task_ids that match name constraints. Used in developing multiple powerups. Args: workflow (Workflow): Workflow fw_name_constraint (str): a constraint on the FW name task_name_constraint (str): a constraint on the task name Returns: a list of tuples of the form (fw_id, task_id) of the RunVasp-type tasks """ fws_and_tasks = [] for idx_fw, fw in enumerate(workflow.fws): if fw_name_constraint is None or fw_name_constraint in fw.name: for idx_t, t in enumerate(fw.tasks): if task_name_constraint is None or task_name_constraint in str(t): fws_and_tasks.append((idx_fw, idx_t)) return fws_and_tasks
28f4f2cdcc58b942ee3e0631c21bf2a3a052db35
698,990
def _get_tags(ws_info): """Get the tags relevant to search from the ws_info metadata""" metadata = ws_info[-1] if metadata.get('searchtags'): if isinstance(metadata['searchtags'], list): return metadata['searchtags'] else: return [metadata['searchtags']] else: return []
f6922f92913446284545aa73cb2b8cd7139749e8
698,991
import base64 import json def encode_base64_json(data): """ Encode dict-like data into a base64 encoded JSON string. This can be used to get dict-like data into HTTP headers / envvar. """ return base64.b64encode(bytes(json.dumps(data), 'utf-8'))
08b9d8568a59717173adf00658aad03bddb8df14
698,997
import math def edgeweight_properties(graph, weight_label="weight"): """ Calculates properties of edge weights. Parameters ---------- graph: nx.Graph Graph to calculate the properties from weight_label: Returns ------- max_weight: number Maximum weights of an edge min_weight: number Minimum weight of an edge num_of_zero_weights: int Number of edges with zero weight """ max_weight = -math.inf min_weight = math.inf num_of_zero_weights = 0 for u, v, d in graph.edges(data=True): weight = d[weight_label] if weight > max_weight: max_weight = weight if weight < min_weight: min_weight = weight if weight == 0: num_of_zero_weights += 1 return max_weight, min_weight, num_of_zero_weights
a22857d844c2235859c004bfac897b3b5c80feee
699,002
def timedelta_to_seconds(td): """ Converts a timedelta to total seconds. (This is built-in in Python 2.7) """ # we ignore microseconds for this if not td: return None return td.seconds + td.days * 24 * 3600
ef4ebd88581d8a2a1f64b9f940afbe22da8f55bc
699,003
import torch def _safe_mean(losses, num_present): """Computes a safe mean of the losses. Args: losses: `Tensor` whose elements contain individual loss measurements. num_present: The number of measurable elements in `losses`. Returns: A scalar representing the mean of `losses`. If `num_present` is zero, then zero is returned. """ total_loss = torch.sum(losses) if num_present == 0: return 0 * total_loss else: return torch.div(total_loss, num_present)
4174dead8e2fc582633589713e076871a0631de1
699,004
def get_proper_state(job, state): """ Return a proper job state to send to server. This function should only return 'starting', 'running', 'finished', 'holding' or 'failed'. If the internal job.serverstate is not yet set, it means it is the first server update, ie 'starting' should be sent. :param job: job object. :param state: internal pilot state (string). :return: valid server state (string). """ if job.serverstate in ('finished', 'failed'): pass elif job.serverstate == "" and state != "finished" and state != "failed": job.serverstate = 'starting' elif state in ('finished', 'failed', 'holding'): job.serverstate = state else: job.serverstate = 'running' return job.serverstate
ba54c1f4eee99055e73099bc381ccbb69da4b183
699,006
def _msearch_success(response): """Return true if all requests in a multi search request succeeded Parameters ---------- response : requests.models.Response Returns ------- bool """ parsed = response.json() if 'responses' not in parsed: return False for result in parsed['responses']: if result['status'] != 200: return False return True
acdac4408464120fdeb7f20a06f07aac6ca3809f
699,010
def reverse(graph): """replace all arcs (u, v) by arcs (v, u) in a graph""" rev_graph = [[] for node in graph] for node, _ in enumerate(graph): for neighbor in graph[node]: rev_graph[neighbor].append(node) return rev_graph
5b1b0281df529676e90ba4d27d1ab554d4a50537
699,015
import random def propose_any_node_flip(partition): """Flip a random node (not necessarily on the boundary) to a random part """ node = random.choice(tuple(partition.graph)) newpart = random.choice(tuple(partition.parts)) return partition.flip({node: newpart})
1ba9d747b92b707cad34c820abec9325913aca55
699,018
import string def extract_words(text): """Return the words in a tweet, not including punctuation. >>> extract_words('anything else.....not my job') ['anything', 'else', 'not', 'my', 'job'] >>> extract_words('i love my job. #winning') ['i', 'love', 'my', 'job', 'winning'] >>> extract_words('make justin # 1 by tweeting #vma #justinbieber :)') ['make', 'justin', 'by', 'tweeting', 'vma', 'justinbieber'] >>> extract_words("paperclips! they're so awesome, cool, & useful!") ['paperclips', 'they', 're', 'so', 'awesome', 'cool', 'useful'] """ s = "" c = '' for i in text: if i not in string.ascii_letters: i = ' ' s += i return s.split()
cc0b7dbc548696ed74b48dec57b386fe38adfa41
699,019
def mods_to_step_size(mods): """ Convert a set of modifier keys to a step size. :param mods: Modifier keys. :type mods: :class:`tuple`[:class:`str`] :return: Step size, by name. :rtype: :class:`str` """ if "alt" in mods: return "fine" elif "shift" in mods: return "coarse" return "medium"
3c93fd11a8b5b0fad5cc26a35872e0616ebf7231
699,020
import yaml def read_parameter_file(parameter_file_path: str) -> dict: """ Reads the parameters from a yaml file into a dictionary. Parameters ---------- parameter_file_path: Path to a parameter file. Returns ------- params: Dictionary containing the parameters defined in the provided yam file """ with open(parameter_file_path, 'r') as f: params = yaml.safe_load(f) return params
5f203bf596c2b1c39f14cea1e99e0c33c2f97ce2
699,023
import hashlib def make_hash(to_hash: str) -> str: """ Return a hash of to_hash. """ new_hash = hashlib.md5() new_hash.update(to_hash.encode("utf-8")) return str(new_hash.hexdigest())
7e800f7942df23256373c221428e5c24b65cabee
699,024
def _beam_fit_fn_3(z, z0, Theta): """Fitting function for z0 and Theta.""" return (Theta*(z-z0))**2
76955c6464ae7a33927986d146e9000e9a45c12b
699,031
def stations_by_river(stations): """Returns a dictionary containing rivers (keys), and the stations on each river (values)""" rivers = {} for station in stations: # only add the river if station.river has been set river = station.river if river is not None: # add the station to the river key in the dictionary # if the key is not in the dictionary, add it if river in rivers: rivers[river].append(station) else: rivers[river] = [station] return rivers
7feef52d4d5c14109807e1c2e559f206b420c5cc
699,032
def json_citation_for_ij(query, score, doi): """ Because we are parsing the PDF, we cannot ensure the validity of each subfieldobtained form the parser (CERMINE) i.e title, journal, volume, authors, etc. Instead, we join all the information obtained from the parser in plain text. This plain text is the same used in the field query.bibliographic in crossref. If the crossref query (see query_crossref_with_citation_list) gives a first match with a high score (see score_threshold) containing a DOI, we store that DOI. If the score is low, or the match doesn't have a doi, then our doi is not populated, or null. The output keys are: unstructured, score, doi, The idea is that if we store a DOI is because we are pretty sure it's the right one. If not, we just store the unstructured text and the score. """ out = {} out["unstructured"] = query if doi: out["doi"] = doi if score: out["score"] = score return out
1f226db5420b61cd40b88015ec727ebd84a08138
699,033
def find_combination(value, stream): """ >>> find_combination(127, [35, 20, 15, 25, 47, 40, 62, 55, 65, 95, 102, 117, 150, 182, 127, 219, 299, 277, 309, 576]) 62 """ x = 0 y = 2 while True: total = sum(stream[x:y]) if total < value: y += 1 elif total > value: x += 1 else: return min(stream[x:y]) + max(stream[x:y])
17a61c6918ce78f283bc1b641edb4c235746d428
699,034
def _range_checker(ip_check, first, last): """ Tests whether an ip address is within the bounds of the first and last address. :param ip_check: The ip to test if it is within first and last. :param first: The first IP in the range to test against. :param last: The last IP in the range to test against. :return: bool """ if ip_check >= first and ip_check <= last: return True else: return False
924e617374fdbc4cabc28cb18e63c45192da3b4c
699,037
import math def entropy(data): """ Calculate informational entropy. """ entropy = 0.0 frequency = {} for instance in data: p_instance = int(round(instance/5) * 5) if p_instance in frequency: frequency[p_instance] += 1 else: frequency[p_instance] = 1 for freq in frequency.values(): entropy += (-freq/len(data)) * math.log(float(freq)/len(data), 2) return entropy
4b96c229e4cc0318a764990569d2951003447a72
699,044
def s2c_stereographic(sph): """ Stereographic projection from the sphere to the plane. """ u = sph[..., 0] v = sph[..., 1] w = sph[..., 2] return (u + 1j*v)/(1+w)
1aff6cf6accd6bb26c647f014dc964404e84b979
699,046
def text(node): """ Get all the text of an Etree node Returns ------- str """ return ''.join(node.itertext())
f5000a6220da74059230a499dc2b48057e5c4ada
699,047
from typing import Union from typing import Callable def fully_qualified_name(thing: Union[type, Callable]) -> str: """Construct the fully qualified name of a type.""" return thing.__module__ + '.' + thing.__qualname__
eacbffdcda78fa38667af0b9d7bc0c53c2fbdb1f
699,048
def remove_whitespace(text): # type: (str) -> str """strips all white-space from a string""" if text is None: return "" return "".join(text.split())
747538de63b11e49d498b2f4ccb8286975019ec8
699,049
from typing import Dict def _invert(mapping: Dict) -> Dict: """Invert dictionary {k: v} -> {v: k}.""" return {target: source for source, target in mapping.items()}
013894d56e95a5df273a5bed6a7acc9c49c18d10
699,051
def kind(event): """ Finds the type of an event :param event: the event :return: the type of the event """ return event.type
68f0170eac9fc06f954542769dcd0d4ef974e725
699,054
def select(*_): """ Always return None """ return None
9f29559a50440143d9e3fe46be766590516f1fc4
699,055
def cells_number(rc): """ Calculates number of cells in each frame. Intedend for use on 'cells' or subsets of 'cells' tables. """ return rc[['frame', 'cell_id']].groupby('frame').agg(len).reset_index().sort('frame')['cell_id'].values
91725cc352de6a1aa31cf4f82301ed8de6e11bb4
699,061
def indent(s, shift=1, width=4): """Indent a block of text. The indentation is applied to each line.""" indented = '\n'.join(' ' * (width * shift) + l if l else '' for l in s.splitlines()) if s[-1] == '\n': indented += '\n' return indented
34ab969f133429959463903fe7e86e18ee644f20
699,066
def get_db_path(spider_dir, db_id): """ Return path to SQLite database file. Args: spider_dir: path to SPIDER benchmark db_id: database identifier Returns: path to SQLite database file """ return f'{spider_dir}/database/{db_id}/{db_id}.sqlite'
9069b673df1b3c929c249319339a84eaaf398c33
699,070
def pop_indices(lst, indices): """ pop the lst given a list or tuple of indices. this function modifies lst directly inplace. >>> pop_indices([1,2,3,4,5,6], [0,4,5]) >>> [2, 3, 4] """ for n in sorted(indices, reverse=True): lst.pop(n) return lst
4cfeedfd211ba4578d877004acdec061c7727d78
699,071
def _BackslashEscape(s): """Double up backslashes. Useful for strings about to be globbed and strings about to be IFS escaped. """ return s.replace('\\', '\\\\') # Similar to GlobEscape and splitter.Escape(). escaped = '' for c in s: if c == '\\': escaped += '\\' escaped += c return escaped
4c107203117d699c65fd00158913914ae6530b97
699,074
import re def read_accounts_file(account_file): """ Process each line in the specified account file looking for account definitions. An account definition is a line containing the word 'account' followed by a valid account name, e.g: account Expenses account Expenses:Utilities All other lines are ignored. """ accounts = [] pattern = re.compile("^\s*account\s+([:A-Za-z0-9-_ ]+)$") with open(account_file, "r", encoding='utf-8') as f: for line in f.readlines(): mo = pattern.match(line) if mo: accounts.append(mo.group(1)) return accounts
f917958201cd66f6b04bd9307611bb282b25f3f6
699,078
def forceresolution( numgridptscuberoot, lengthcuberoot ) : """ returns the force resolution of the PM part in units of the input boxsize args: numgridptscuberoot: lengthcuberoot : return : force resolution (distance) in units usef for the box size """ return lengthcuberoot/numgridptscuberoot
d14760ac863bd1409b9df40fe0ff19ff44b508ca
699,080
def set_up_folder_name_1M ( model_file , date_run ): """ Produce log_file, plot_folder based on model_file (file name ending with _sd.pt) If model_file is epoch_1_sd.pt, date_run is 0913: plot_folder 'plot_2d_epoch_1', log file 'log_0913_2d_epoch_1' """ # if model_file == '': model_pure = f'ep{ep_ind}' model_pure = model_file[:model_file.rfind('_sd.pt')] plot_folder = f'plot_2d_{model_pure}' log_file = f'log_{date_run}_2d_{model_pure}' # .out This is the log file, recording the printing return log_file, plot_folder
d7dd24789476279ea8d5fe0b32627aa049731ef7
699,081
from typing import List import glob def get_all_file_from_directory(directory: str, file_dsc: str) -> List[str]: """ Get paths of all files matching file_dsc in directory """ template = f"{directory}{file_dsc}" file_paths = glob.glob(template) return file_paths
7a3115793a5a59bc8f6ee655315ea87857338c47
699,082
def naked(val): """ Given a string strip off all white space & quotes """ return val.strip(' "\'\t')
1875b38f05fa0c8b540ece0265354293f275b3ea
699,083
def handler(store, default=True, internal=False, passive=False): """ Decorator for setting up a handler. This puts the handler into the handler store which can then be used to look up all handlers and information about them such as which are default, passive, etc. Currently there are two handler stores, one for connection handlers in nogotofail.mitm.connection.handlers.connection.store and one for data handlers in nogotofail.mitm.connection.handlers.data.store Arguments: store -- the HandlerStore to store information about the handler in default -- if the handler should be used by default internal -- if the handler is used internally. These are always added and not displayed in --help or sent to the client. passive -- if the handler is passive and does no modification. """ def wrapper(cls): cls.passive = passive if internal: store.internal.append(cls) else: store.map[cls.name] = cls store.all.append(cls) if default: store.default.append(cls) return cls return wrapper
cf7c4c8847539be57c5680d87f80414bb6ab0164
699,085
def human_readable_filesize(size): """Convert file size in bytes to human readable format Args: size (int): Size in bytes Returns: str: Human readable file-size, i.e. 567.4 KB (580984 bytes) """ if size < 1024: return "{} bytes".format(size) remain = float(size) for unit in ["B", "KB", "MB", "GB", "TB"]: if remain < 1024.0: return "{:.1f} {} ({:d} bytes)".format(remain, unit, size) remain /= 1024.0
c8eefdf9145bfb5c937b740fec9e6b437704aa5b
699,087
from typing import Generator def to_list(x, repeat=1): """convert x to list object Args: x: any object to convert repeat: if x is to make as [x], repeat `repeat` elements in the list """ if isinstance(x, (Generator, tuple, set)): return list(x) elif isinstance(x, list): return x elif isinstance(x, dict): return list(x.values()) elif x is not None: return [x] * repeat else: return []
79841d76cd0eba5a2e92fc0992516f59113c3f9b
699,091
def try_key(dict_to_try, key_for_dict): """Either returns key value or empty string.""" if key_for_dict not in dict_to_try: return '' return dict_to_try[key_for_dict]
a7486bd4933301278941fb7ee2001890221fcbd9
699,092
def covariance_matrix(X): """ Args: X (ndarray) (m, n) Return: cov_mat (ndarray) (n, n): covariance matrix of X """ m = X.shape[0] return (X.T @ X) / m
128d9bfe12169cb344c150bf9f8f05565b5a8831
699,094
def sigmoid_deriv(x): """ Calculates the sigmoid derivative for the given value. :param x: Values whose derivatives should be calculated :return: Derivatives for given values """ return x * (1. - x)
8a7a1005fafb1b34c17c6ce4a8a04f80334e396a
699,095