content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
from typing import Set import requests def get_filter_fields(target: str, data_registry_url: str, token: str) -> Set[str]: """ Returns a list of filterable fields from a target end point by calling OPTIONS :param target: target end point of the data registry :param data_registry_url: the url of the data registry :param token: personal access token :return: the set of filterable fields on this target end point """ end_point = get_end_point(data_registry_url, target) result = requests.options(end_point, headers=get_headers(token)) result.raise_for_status() options = result.json() return set(options.get("filter_fields", []))
34b6cccc2f8529391357ab70b212f1fddbd9e37d
3,651,739
def azimuthalAverage(image, center=None): """ Calculate the azimuthally averaged radial profile. image - The 2D image center - The [x,y] pixel coordinates used as the center. The default is None, which then uses the center of the image (including fracitonal pixels). http://www.astrobetter.com/blog/2010/03/03/fourier-transforms-of-images-in-python/ v0.1 """ # Calculate the indices from the image y, x = np.indices(image.shape) if not center: center = np.array([(x.max()-x.min())/2.0, (y.max()-y.min())/2.0]) r = np.hypot(x - center[0], y - center[1]) # Get sorted radii ind = np.argsort(r.flat) r_sorted = r.flat[ind] i_sorted = image.flat[ind] # Get the integer part of the radii (bin size = 1) r_int = r_sorted.astype(int) # Find all pixels that fall within each radial bin. deltar = r_int[1:] - r_int[:-1] # Assumes all radii represented rind = np.where(deltar)[0] # location of changed radius nr = rind[1:] - rind[:-1] # number of radius bin # Cumulative sum to figure out sums for each radius bin csim = np.cumsum(i_sorted, dtype=float) tbin = csim[rind[1:]] - csim[rind[:-1]] radial_prof = tbin / nr return radial_prof
f086d0868bd56b01de976f346e2a66f5e0d7a10b
3,651,740
def train_transforms_fisheye(sample, image_shape, jittering): """ Training data augmentation transformations Parameters ---------- sample : dict Sample to be augmented image_shape : tuple (height, width) Image dimension to reshape jittering : tuple (brightness, contrast, saturation, hue) Color jittering parameters Returns ------- sample : dict Augmented sample """ if len(image_shape) > 0: sample = resize_sample_fisheye(sample, image_shape) sample = duplicate_sample(sample) if len(jittering) > 0: sample = colorjitter_sample(sample, jittering) sample = to_tensor_sample(sample) return sample
c815d28a5e9e62234544adc4f2ba816e9f1c366a
3,651,741
from typing import Any def build_json_output_request(**kwargs: Any) -> HttpRequest: """A Swagger with XML that has one operation that returns JSON. ID number 42. See https://aka.ms/azsdk/python/protocol/quickstart for how to incorporate this request builder into your code flow. :return: Returns an :class:`~azure.core.rest.HttpRequest` that you will pass to the client's `send_request` method. See https://aka.ms/azsdk/python/protocol/quickstart for how to incorporate this response into your code flow. :rtype: ~azure.core.rest.HttpRequest Example: .. code-block:: python # response body for status code(s): 200 response.json() == { "id": 0 # Optional. } """ accept = "application/json" # Construct URL url = kwargs.pop("template_url", "/xml/jsonoutput") # Construct headers header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any] header_parameters["Accept"] = _SERIALIZER.header("accept", accept, "str") return HttpRequest(method="GET", url=url, headers=header_parameters, **kwargs)
b920f0955378f0db1fdddadefc4037a33bedecad
3,651,742
def merge_extended(args_container: args._ArgumentContainer, hold: bool, identificator: str) -> int: """ Merge the args_container into the internal, like merge_named, but hold specifies if the internal container should not be cleared. :param args_container: The argument container with the data to merge :param hold: When True, does not clear the internal data. :param identificator: The identificator to pass to the MERGE_END event :raises TypeError: if the arguments passed are not the expected type """ if ( not isinstance(args_container, args._ArgumentContainer) or not isinstance(hold, int) # noqa W503 or not isinstance(identificator, str) # noqa W503 ): raise TypeError("The given parameters do not match the types required.") return _grm.grm_merge_extended(args_container.ptr, c_int(1 if hold else 0), _encode_str_to_char_p(identificator))
111765aa0a62a1050387670472d3718aca8a015f
3,651,743
def video_path_file_name(instance, filename): """ Callback for video node field to get path file name :param instance: the image field :param filename: the file name :return: the path file name """ return path_file_name(instance, 'video', filename)
b34b96961ad5f9275cd89809828b8dd0aed3dafb
3,651,744
def radiative_processes_mono(flux_euv, flux_fuv, average_euv_photon_wavelength=242.0, average_fuv_photon_wavelength=2348.0): """ Calculate the photoionization rate of helium at null optical depth based on the EUV spectrum arriving at the planet. Parameters ---------- flux_euv (``float``): Monochromatic extreme-ultraviolet (0 - 504 Angstrom) flux arriving at the planet in units of erg / s / cm ** 2. Attention: notice that this ``flux_euv`` is different from the one used for hydrogen, since helium ionization happens at a shorter wavelength. flux_fuv (``float``): Monochromatic far- to middle-ultraviolet (911 - 2593 Angstrom) flux arriving at the planet in units of erg / s / cm ** 2. average_euv_photon_wavelength (``float``): Average wavelength of EUV photons ionizing the He singlet state, in unit of Angstrom. Default value is 242 Angstrom. The default value is based on a flux-weighted average of the solar spectrum between 0 and 504 Angstrom. average_fuv_photon_wavelength (``float``): Average wavelength of FUV-NUV photons ionizing the He triplet state, in unit of Angstrom. Default value is 2348 Angstrom. The default value is based on a flux-weighted average of the solar spectrum between 911 and 2593 Angstrom. Returns ------- phi_1 (``float``): Ionization rate of helium singlet at null optical depth in unit of 1 / s. phi_3 (``float``): Ionization rate of helium triplet at null optical depth in unit of 1 / s. a_1 (``float``): Flux-averaged photoionization cross-section of helium singlet in unit of cm ** 2. a_3 (``float``): Flux-averaged photoionization cross-section of helium triplet in unit of cm ** 2. a_h_1 (``float``): Flux-averaged photoionization cross-section of hydrogen in the range absorbed by helium singlet in unit of cm ** 2. a_h_3 (``float``): Flux-averaged photoionization cross-section of hydrogen in the range absorbed by helium triplet in unit of cm ** 2. """ # Average cross-section to ionize helium singlet a_1 = microphysics.helium_singlet_cross_section(average_euv_photon_wavelength) # The photoionization cross-section of He triplet wavelength_3, a_lambda_3 = microphysics.helium_triplet_cross_section() # # Average cross-section to ionize helium triplet a_3 = np.interp(average_fuv_photon_wavelength, wavelength_3, a_lambda_3) # The flux-averaged photoionization cross-section of H is also going to be # needed because it adds to the optical depth that the He atoms see. # Contribution to the optical depth seen by He singlet atoms: # Hydrogen cross-section within the range important to helium singlet a_h_1 = 6.3E-18 * (average_euv_photon_wavelength / 13.6) ** (-3) # Unit 1 / cm ** 2. # Contribution to the optical depth seen by He triplet atoms: if average_fuv_photon_wavelength < 911.0: a_h_3 = microphysics.hydrogen_cross_section( wavelength=average_fuv_photon_wavelength) else: a_h_3 = 0.0 # Convert the fluxes from erg to eV and calculate the photoionization rates energy_1 = 12398.419843320025 / average_euv_photon_wavelength energy_3 = 12398.419843320025 / average_fuv_photon_wavelength phi_1 = flux_euv * 6.24150907e+11 * a_1 / energy_1 phi_3 = flux_fuv * 6.24150907e+11 * a_3 / energy_3 return phi_1, phi_3, a_1, a_3, a_h_1, a_h_3
b555fe1af2bdcdab4ac8f78ed2e64bef35a2cdab
3,651,745
import typing from datetime import datetime def date_yyyymmdd(now: typing.Union[datetime.datetime, None] = None, day_delta: int = 0, month_delta: int = 0) -> str: """ :param day_delta: :param month_delta: :return: today + day_delta + month_delta -> str YYYY-MM-DD """ return date_delta(now, day_delta, month_delta).strftime("%Y-%m-%d")
8a3ff535964aba6e3eeaa30dc6b98bfcab1b5794
3,651,746
from .geocoder import description_for_number as real_fn def description_for_number(*args, **kwargs): """Return a text description of a PhoneNumber object for the given language. The description might consist of the name of the country where the phone number is from and/or the name of the geographical area the phone number is from. This function explicitly checks the validity of the number passed in Arguments: numobj -- The PhoneNumber object for which we want to get a text description. lang -- A 2-letter lowercase ISO 639-1 language code for the language in which the description should be returned (e.g. "en") script -- A 4-letter titlecase (first letter uppercase, rest lowercase) ISO script code as defined in ISO 15924, separated by an underscore (e.g. "Hant") region -- A 2-letter uppercase ISO 3166-1 country code (e.g. "GB") Returns a text description in the given language code, for the given phone number, or an empty string if no description is available.""" return real_fn(*args, **kwargs)
c60423fb26d892a43db6017a08cce3d589481cb6
3,651,747
def get_pathway_nodes(pathway): """Return single nodes in pathway. :param pathme_viewer.models.Pathway pathway: pathway entry :return: BaseAbundance nodes :rtype: list[pybel.dsl.BaseAbundance] """ # Loads the BELGraph graph = from_bytes(pathway.blob) collapse_to_genes(graph) # Return BaseAbundace BEL nodes return { node.as_bel() for node in graph if isinstance(node, BaseAbundance) }
5ec451d9e9192b7d07230b05b2e3493df7ab3b4d
3,651,748
import copy def handle_domain_addition_commands(client: Client, demisto_args: dict) -> CommandResults: """ Adds the domains to the inbound blacklisted list. :type client: ``Client`` :param client: Client to use. :type demisto_args: ``dict`` :param demisto_args: The demisto arguments. :return: The command results which contains the added domains to the inbound blacklisted list. :rtype: ``CommandResults`` """ demisto_args = handle_args(demisto_args) domain = demisto_args.get('domain') if not domain: raise DemistoException( 'A domain must be provided in order to add it to the inbound blacklisted list.') demisto_args['domain'] = ','.join(argToList(domain)) raw_result = client.inbound_blacklisted_domain_add_command(demisto_args) domains_list = copy.deepcopy(raw_result.get('domains', [raw_result])) msg = 'Domains were successfully added to the inbound blacklisted list\n' objects_time_to_readable_time(domains_list, 'updateTime') readable_output = msg + tableToMarkdown('Added Domains', domains_list, headers=['domain', 'pgid', 'cid', 'update_time', 'annotation'], headerTransform=string_to_table_header, removeNull=True) return CommandResults( outputs_prefix='NetscoutAED.InboundBlacklistDomain', outputs_key_field='domain', outputs=domains_list, raw_response=raw_result, readable_output=readable_output, )
b5b281e3254a433c9431e77631001cb2be4e37e3
3,651,750
import torch def _tc4(dom: AbsDom): """ Validate that my AcasNet module can be optimized at the inputs. """ mse = nn.MSELoss() max_retries = 100 max_iters = 30 # at each retry, train at most 100 iterations def _loss(outputs_lb): lows = outputs_lb[..., 0] distances = 0 - lows distances = F.relu(distances) prop = torch.zeros_like(distances) return mse(distances, prop) retried = 0 while retried < max_retries: # it is possible to get inputs optimized to some local area, thus retry multiple times net = AcasNet(dom, 2, 2, [2]).to(device) inputs = torch.randn(2, 2, 2, device=device) inputs_lb, _ = torch.min(inputs, dim=-1) inputs_ub, _ = torch.max(inputs, dim=-1) inputs_lb = inputs_lb.requires_grad_() inputs_ub = inputs_ub.requires_grad_() ins = dom.Ele.by_intvl(inputs_lb, inputs_ub) with torch.no_grad(): outputs_lb, outputs_ub = net(ins).gamma() if _loss(outputs_lb) <= 0: # found something to optimize continue retried += 1 # Now the network has something to optimize print(f'\n===== TC4: ({retried}th try) =====') print('Using inputs LB:', inputs_lb) print('Using inputs UB:', inputs_ub) print('Before any optimization, the approximated output is:') print('Outputs LB:', outputs_lb) print('Outputs UB:', outputs_ub) # This sometimes work and sometimes doesn't. It may stuck on a fixed loss and never decrease anymore. orig_inputs_lb = inputs_lb.clone() orig_inputs_ub = inputs_ub.clone() opti = torch.optim.Adam([inputs_lb, inputs_ub], lr=0.1) iters = 0 while iters < max_iters: iters += 1 # after optimization, lb ≤ ub may be violated _inputs_lbub = torch.stack((inputs_lb, inputs_ub), dim=-1) _inputs_lb, _ = torch.min(_inputs_lbub, dim=-1) _inputs_ub, _ = torch.max(_inputs_lbub, dim=-1) ins = dom.Ele.by_intvl(_inputs_lb, _inputs_ub) opti.zero_grad() outputs_lb, outputs_ub = net(ins).gamma() loss = _loss(outputs_lb) if loss <= 0: # until the final output's 1st element is >= 0 break loss.backward() opti.step() print(f'Iter {iters} - loss {loss.item()}') if iters < max_iters: # successfully trained break assert retried < max_retries with torch.no_grad(): print(f'At {retried} retry, all optimized after {iters} iterations. ' + f'Now the outputs 1st element should be >= 0 given the latest input.') outputs_lb, outputs_ub = net(ins).gamma() print('Outputs LB:', outputs_lb) print('Outputs UB:', outputs_ub) print('Original inputs LB:', orig_inputs_lb) print('Optimized inputs LB:', inputs_lb) print('Original inputs UB:', orig_inputs_ub) print('Optimized inputs UB:', inputs_ub) assert (outputs_lb[:, 0] >= 0.).all() return
f008fd4bff6e1986f2354ef9338a3990e947656c
3,651,751
def skip_url(url): """ Skip naked username mentions and subreddit links. """ return REDDIT_PATTERN.match(url) and SUBREDDIT_OR_USER.search(url)
60c54b69916ad0bce971df06c5915cfbde10018c
3,651,752
def registry(): """ Return a dictionary of problems of the form: ```{ "problem name": { "params": ... }, ... }``` where `flexs.landscapes.AdditiveAAVPackaging(**problem["params"])` instantiates the additive AAV packaging landscape for the given set of parameters. Returns: dict: Problems in the registry. """ problems = { "heart": {"params": {"phenotype": "heart", "start": 450, "end": 540}}, "lung": {"params": {"phenotype": "lung", "start": 450, "end": 540}}, "kidney": {"params": {"phenotype": "kidney", "start": 450, "end": 540}}, "liver": {"params": {"phenotype": "liver", "start": 450, "end": 540}}, "blood": {"params": {"phenotype": "blood", "start": 450, "end": 540}}, "spleen": {"params": {"phenotype": "spleen", "start": 450, "end": 540}}, } return problems
5dd2e4e17640e0831daf02d0a2a9b9f90305a1c4
3,651,753
import time import random def ecm(n, rounds, b1, b2, wheel=2310, output=True): """Elliptic Curve Factorization Method. In each round, the following steps are performed: 0. Generate random point and curve. 1. Repeatedly multiply the current point by small primes raised to some power, determined by b1. 2. Standard continuation from b1 to b2 with Brent-Suyama's Extension and Polyeval. Returns when a non-trivial factor is found. Args: n (int): Number to be factorized. n >= 12. rounds (int): Number of random curves to try. b1 (int): Bound for primes used in step 1. b2 (int): Bound for primes searched for in step 2. b1 < b2. wheel (int, optional): Wheel, where only numbers coprime to wheel will be considered in step 2. Defaults to 2310. output (bool, optional): Whether to print progress to stdout. Defaults to True. Raises: ValueError: Thrown when n < 12. Returns: int: Non-trivial factor if found, otherwise returns None. """ if n < 12: raise ValueError j_list = [j for j in range(1, wheel // 2) if gcd(j, wheel) == 1] block_size = 1 << (len(j_list) - 1).bit_length() - 1 for round_i in range(rounds): if output: st = time.time() print("Round {}...".format(round_i)) count = 0 success = False while not success and count < 20: try: count += 1 sigma = random.randint(6, n - 6) mnt_pt, mnt_curve = mnt.get_curve_suyama(sigma, n) success = True except InverseNotFound as e: res = gcd(e.x, n) if 1 < res < n: return res except CurveInitFail: pass if not success: print(" - Curve Init Failed.") break try: # Step 1 if output: print("{:>5.2f}: Step 1".format(time.time() - st)) for p in PRIME_GEN(b1): for _ in range(int(np.log(b1) / np.log(p))): mnt_pt = mnt.mul_pt_exn(mnt_pt, mnt_curve, p) # Step 2 if output: print("{:>5.2f}: Step 2".format(time.time() - st)) polynomial = (2, 0, 9, 0, 6, 0, 1) # f(x) = x^6 + 6x^4 + 9x^2 + 2 q, wst_curve = mnt.to_weierstrass(mnt_pt, mnt_curve) c1 = b1 // wheel c2 = b2 // wheel + 2 c = 0 k_ls = [ apply_polynomial(polynomial, j) for j in j_list ] + get_difference_seq(polynomial, c1 * wheel, wheel) mul_res = wst.mul_pt_multi(q, wst_curve, k_ls) xj_list = [] for i in range(len(j_list)): xj_list.append(mul_res[i][0]) cq_list = mul_res[len(j_list) :] f_tree = product_tree([Polynomial([n - xj, 1], n) for xj in xj_list], n) f_recip_tree = recip_tree(f_tree) H = Polynomial([1], n) g_poly_list = [] while c < c2 - c1: for _ in range(min(block_size, c2 - c1 - c)): g_poly_list.append(Polynomial([n - cq_list[0][0], 1], n)) step_difference_seq_exn(cq_list, wst_curve) c += 1 G = product_tree(g_poly_list, n)[0] H = (H * G).mod_with_recip(f_tree[0], f_recip_tree[0]) g_poly_list.clear() rem_tree = remainder_tree(H, f_tree, f_recip_tree, n) res = gcd(rem_tree[0], n) if 1 < res < n: return res elif res == n: for rem in rem_tree[len(rem_tree) // 2 :]: res = gcd(rem, n) if 1 < res < n: return res assert False if output: print("{:>5.2f}: End".format(time.time() - st)) except InverseNotFound as e: res = gcd(e.x, n) if 1 < res < n: return res return None
9490e6ac4308aed9835e85b3093a1c2b18877fd1
3,651,754
from typing import Optional import re from datetime import datetime import logging def dc_mode_option(update: Update, contex: CallbackContext) -> Optional[int]: """Get don't care response mode option""" ndc = contex.user_data[0] if ndc.response_mode == DoesntCare.ResponseMode.TIME: if not re.match(r"[0-9]+:[0-9]+:[0-9]+", update.effective_message.text): update.effective_message.reply_text( 'Invalid time format, please send in this format: Hours:Minutes:Seconds') return None hms = update.effective_message.text.split(':') ndc.response_mode_option = \ datetime.timedelta(hours=int(hms[0]), minutes=int(hms[1]), seconds=int(hms[2])).total_seconds() else: if ((not update.effective_message.text.isdigit()) or (not (int(update.effective_message.text) > 1))): update.effective_message.reply_text('Invalid number. Please send a positive integer more than 1.') return None ndc.response_mode_option = float(update.effective_message.text) if ndc.add(): update.effective_message.reply_text("Added user to your don't care list!") logging.info( "Add: DCU: \"{}\", NIU: \"{}\", Chat: \"{}\", RM: \"{}\", RMO: \"{}\"" .format(ndc.doesnt_care_id, ndc.not_important_id, ndc.chat_id, ndc.response_mode, ndc.response_mode_option) ) else: update.effective_message.reply_text("Sorry, an error occurred! Please try again later.") logging.error( "Add, DCU: \"{}\", NIU: \"{}\", Chat: \"{}\"" .format(ndc.doesnt_care_id, ndc.not_important_id, ndc.chat_id) ) return ConversationHandler.END
accf998e660898d9de2d17d45e18b6d49ba90f4c
3,651,755
def is_in_period(datetime_, start, end): """指定した日時がstartからendまでの期間に含まれるか判定する""" return start <= datetime_ < end
3b830cb8d9e74934a09430c9cd6c0940cf36cf2e
3,651,756
def create_experiment_summary(): """Returns a summary proto buffer holding this experiment""" # Convert TEMPERATURE_LIST to google.protobuf.ListValue temperature_list = struct_pb2.ListValue().extend(TEMPERATURE_LIST) return summary.experiment_pb( hparam_infos=[ api_pb2.HParamInfo(name="initial_temperature", display_name="initial temperature", type=api_pb2.DATA_TYPE_FLOAT64, domain_discrete=temperature_list), api_pb2.HParamInfo(name="ambient_temperature", display_name="ambient temperature", type=api_pb2.DATA_TYPE_FLOAT64, domain_discrete=temperature_list), api_pb2.HParamInfo(name="heat_coefficient", display_name="heat coefficient", type=api_pb2.DATA_TYPE_FLOAT64, domain_discrete=temperature_list) ], metric_infos=[ api_pb2.MetricInfo( name=api_pb2.MetricName( tag="temparature/current/scalar_summary"), display_name="Current Temp."), api_pb2.MetricInfo( name=api_pb2.MetricName( tag="temparature/difference_to_ambient/scalar_summary"), display_name="Difference To Ambient Temp."), api_pb2.MetricInfo( name=api_pb2.MetricName( tag="delta/scalar_summary"), display_name="Delta T") ] )
678a9f1b004f4c5a60784ccf814082731eace826
3,651,757
import requests def get_session(token, custom_session=None): """Get requests session with authorization headers Args: token (str): Top secret GitHub access token custom_session: e.g. betamax's session Returns: :class:`requests.sessions.Session`: Session """ session = custom_session or requests.Session() session.headers = { "Authorization": "token " + token, "User-Agent": "testapp" } return session
88bf566144a55cf36daa46d3f9a9886d3257d767
3,651,758
def mass_to_tbint_to_energy_map(dpath, filterfn=lambda x: True, fpath_list=None): """Given a directory, creates a mapping mass number -> ( a, b, c, d, j -> energy ) using the files in the directory :param fpath_list: :param dpath: the directory which is a direct parent to the files from which to generate the map :param filterfn: the filter function to apply to the files before constructing the map """ mida_map = _mass_tbme_data_map( dpath, filterfn, fpath_list) for k in mida_map.keys(): v = mida_map[k] nextv = dict() for row in v: tup = tuple(row[0:6]) energy = float(row[6]) nextv[tup] = energy mida_map[k] = nextv return mida_map
a13caba5ff41e2958d7f4e6104eb809de1cda1c1
3,651,759
import unicodedata def strip_accents(text): """ Strip accents from input String. :param text: The input string. :type text: String. :returns: The processed String. :rtype: String. """ text = unicodedata.normalize('NFD', text) text = text.encode('ascii', 'ignore') text = text.decode("utf-8") return str(text)
4a6e11e0a72438a7e604e90e44a7220b1426df69
3,651,760
import json def json_formatter(result, _verbose): """Format result as json.""" if isinstance(result, list) and "data" in result[0]: res = [json.dumps(record) for record in result[0]["data"]] output = "\n".join(res) else: output = json.dumps(result, indent=4, sort_keys=True) return output
68aae87577370d3acf584014651af21c7cbfa309
3,651,761
def show_all_companies(): """Show all companies a user has interest in.""" # redirect if user is not logged in if not session: return redirect('/') else: # get user_id from session user_id = session['user_id'] user = User.query.filter(User.user_id == user_id).one() user_companies = user.companies companies = {} for company in user_companies: count = Job.query.filter(Job.company_id == company.company_id).count() companies[company] = count return render_template('companies.html', companies=companies)
7f2d7215627747ff44caff4f58324dce2e3aa749
3,651,762
def ll_combined_grad(x, item_ids, judge_ids, pairwise=[], individual=[]): """ This function computes the _negative_ gradient of the loglikelihood for each parameter in x, for both the individual and pairwise data. Keyword arguments: x -- the current parameter estimates. item_ids -- the ids of the items being evaluated judge_ids -- the ids of the judges being evaluted pairwise -- an iterator for the pairwise ratings individual -- an iterator for the individual ratings >>> ll_combined_grad([0,0,1,1,3,1], [0,1], [0], [], []) array([-0. , -0. , -0. , -1.33333333, 2. , -0. ]) """ item_val = {i:idx for idx, i in enumerate(item_ids)} discrim = {i:idx + len(item_val) for idx, i in enumerate(judge_ids)} bias = {i:idx + len(item_val) + len(judge_ids) for idx, i in enumerate(judge_ids)} precision = {i:idx + len(item_val) + 2*len(judge_ids) for idx, i in enumerate(judge_ids)} likert_mean = x[-1] likert_prec = x[-2] grad = np.zeros(len(x)) #grad = np.array([0.0 for v in x]) for r in pairwise: left = x[item_val[r.left.id]] right = x[item_val[r.right.id]] d = x[discrim[r.judge.id]] y = r.value z = d * (left - right) #z = (left - right) p = invlogit(z) g = y - p #grad[item_val[r.left.id]] += g #grad[item_val[r.right.id]] += -1 * g grad[item_val[r.left.id]] += d * g grad[item_val[r.right.id]] += -1 * d * g grad[discrim[r.judge.id]] += (left - right) * g for l in individual: u = x[item_val[l.item.id]] b = x[bias[l.judge.id]] prec = x[precision[l.judge.id]] #n = sqrt(1/prec) p0 = likert_prec s = 1 / sqrt(p0) error = (l.value - likert_mean - s * (b + u)) grad[item_val[l.item.id]] += prec * p0 * error * s grad[bias[l.judge.id]] += prec * p0 * error * s grad[-1] += prec * p0 * error grad[precision[l.judge.id]] += (1 / (2 * prec)) - (p0 / 2) * (error * error) grad[-2] += (1 / (2 * p0)) - (prec / 2) * ((b + u) * s * error + error * error) #error = (l.value - likert_mean - b - u) #grad[item_val[l.item.id]] += prec * error #grad[bias[l.judge.id]] += prec * error #grad[-1] += prec * error # likert mean #grad[precision[l.judge.id]] += (1 / (2 * prec)) - (error * error)/2 # Regularization # Normal prior on means item_reg = np.array([0.0 for v in x]) for i in item_val: item_reg[item_val[i]] += (x[item_val[i]] - item_mean) item_reg = -1 * item_prec * item_reg #item_reg = (-1.0 / (item_std * item_std)) * item_reg # Normal prior on discriminations judge_reg = np.array([0.0 for v in x]) for i in discrim: judge_reg[discrim[i]] += (x[discrim[i]] - discrim_mean) judge_reg = -1 * discrim_prec * judge_reg #judge_reg = (-1.0 / (discrim_std * discrim_std)) * judge_reg # Normal prior on bias bias_reg = np.array([0.0 for v in x]) for i in bias: bias_reg[bias[i]] += (x[bias[i]] - bias_mean) bias_reg = (-1.0 / (bias_std * bias_std)) * bias_reg # Normal prior on noise prec_reg = np.array([0.0 for v in x]) for i in precision: prec_reg[precision[i]] += (x[precision[i]] - prec_mean) prec_reg = (-1.0 / (prec_std * prec_std)) * prec_reg return -1 * (grad + item_reg + judge_reg + bias_reg + prec_reg)
54936fe9b0e9b7a17acb7455c606bf754532a8b8
3,651,763
def relu(inp): # ReLu function as activation function """ ReLu neural network activation function :param inp: Node value before activation :return: Node value after activation """ return np.max(inp, 0)
fbe6caf2246684a62d00956e38579fab3dff3418
3,651,764
from typing import List from typing import Tuple import logging def augment_sentence(tokens: List[str], augmentations: List[Tuple[List[tuple], int, int]], begin_entity_token: str, sep_token: str, relation_sep_token: str, end_entity_token: str) -> str: """ Augment a sentence by adding tags in the specified positions. Args: tokens: Tokens of the sentence to augment. augmentations: List of tuples (tags, start, end). begin_entity_token: Beginning token for an entity, e.g. '[' sep_token: Separator token, e.g. '|' relation_sep_token: Separator token for relations, e.g. '=' end_entity_token: End token for an entity e.g. ']' An example follows. tokens: ['Tolkien', 'was', 'born', 'here'] augmentations: [ ([('person',), ('born in', 'here')], 0, 1), ([('location',)], 3, 4), ] output augmented sentence: [ Tolkien | person | born in = here ] was born [ here | location ] """ # sort entities by start position, longer entities first augmentations = list(sorted(augmentations, key=lambda z: (z[1], -z[2]))) # check that the entities have a tree structure (if two entities overlap, then one is contained in # the other), and build the entity tree root = -1 # each node is represented by its position in the list of augmentations, except that the root is -1 entity_tree = {root: []} # list of children of each node current_stack = [root] # where we are in the tree for j, x in enumerate(augmentations): tags, start, end = x if any(augmentations[k][1] < start < augmentations[k][2] < end for k in current_stack): # tree structure is not satisfied! logging.warning(f'Tree structure is not satisfied! Dropping annotation {x}') continue while current_stack[-1] >= 0 and \ not (augmentations[current_stack[-1]][1] <= start <= end <= augmentations[current_stack[-1]][2]): current_stack.pop() # add as a child of its father entity_tree[current_stack[-1]].append(j) # update stack current_stack.append(j) # create empty list of children for this new node entity_tree[j] = [] return ' '.join(expand_tokens( tokens, augmentations, entity_tree, root, begin_entity_token, sep_token, relation_sep_token, end_entity_token ))
916745727dd6ce19e67a28bdadb2bd74b54075a3
3,651,765
import multiprocessing def evaluate_model_recall_precision(mat, num_items, testRatings, K_recall, K_precision, num_thread): """ Evaluate the performance (Hit_Ratio, NDCG) of top-K recommendation Return: score of each test rating. """ global _mat global _testRatings global _K_recall global _K_precision global _K_max global _num_items _mat = mat _testRatings = testRatings _K_recall = K_recall _K_precision = K_precision _K_max = max(_K_precision,_K_recall) _num_items = num_items recalls, precisions = [], [] if (num_thread > 1): # Multi-thread pool = multiprocessing.Pool(processes=num_thread) res = pool.map(eval_recall_precision, range(len(_testRatings))) pool.close() pool.join() recalls = [r[0] for r in res] precisions = [r[1] for r in res] return (recalls, precisions) # Single thread for idx in range(len(_testRatings)): (recall, precision) = eval_recall_precision(idx) recalls.append(recall) precisions.append(precision) return (recalls, precisions)
bb504053937faf6e3017f8d79fee6a4a4e864b15
3,651,766
def pipe_hoop_stress(P, D, t): """Calculate the hoop (circumferential) stress in a pipe using Barlow's formula. Refs: https://en.wikipedia.org/wiki/Barlow%27s_formula https://en.wikipedia.org/wiki/Cylinder_stress :param P: the internal pressure in the pipe. :type P: float :param D: the outer diameter of the pipe. :type D: float :param t: the pipe wall thickness. :type t: float :returns: the hoop stress in the pipe. :rtype: float """ return P * D / 2 / t
9985d35c2c55e697ce21a880bb2234c160178f33
3,651,767
def node_constraints(node): """ Returns all constraints a node is linked to :param node: str :return: list(str) """ return maya.cmds.listRelatives(node, type='constraint')
85c619f4c1b6ec24feb8c3dac3e73b92f8fdf7fc
3,651,768
def load_opencv_stereo_calibration(path): """ Load stereo calibration information from xml file @type path: str @param path: video_path to xml file @return stereo calibration: loaded from the given xml file @rtype calib.data.StereoRig """ tree = etree.parse(path) stereo_calib_elem = tree.find("Rig") return rig.Rig.from_xml(stereo_calib_elem)
07ace05e8d377ba1fdcef632e5afa1d9ea309185
3,651,771
def _IsSingleElementTuple(token): """Check if it's a single-element tuple.""" close = token.matching_bracket token = token.next_token num_commas = 0 while token != close: if token.value == ',': num_commas += 1 if token.OpensScope(): token = token.matching_bracket else: token = token.next_token return num_commas == 1
8d675bcee737ddb106817db79e2b989509d2efaa
3,651,772
def exportBufferView(gltf: GLTF2, primaryBufferIndex: int, byteOffset: int, byteLength: int) -> GLTFIndex: """Creates a glTF bufferView with the specified offset and length, referencing the default glB buffer. Args: gltf: Gltf object to append new buffer onto. primaryBufferIndex: Index of the primary glb buffer. byteOffset: Index of the starting byte in the referenced buffer. byteLength: Length in bytes of the bufferView. Returns: The index of the exported bufferView in the glTF bufferViews list. """ bufferView = BufferView() bufferView.buffer = primaryBufferIndex # index of the default glB buffer. bufferView.byteOffset = byteOffset bufferView.byteLength = byteLength return appendGetIndex(gltf.bufferViews, bufferView)
6905f3544470860a125b0d28f5f422a39bc7b91f
3,651,773
import numpy def ReadCan(filename): """Reads the candump in filename and returns the 4 fields.""" trigger = [] trigger_velocity = [] trigger_torque = [] trigger_current = [] wheel = [] wheel_velocity = [] wheel_torque = [] wheel_current = [] trigger_request_time = [0.0] trigger_request_current = [0.0] wheel_request_time = [0.0] wheel_request_current = [0.0] with open(filename, 'r') as fd: for line in fd: data = line.split() can_id = int(data[1], 16) if can_id == 0: data = [int(d, 16) for d in data[3:]] trigger.append(((data[0] + (data[1] << 8)) - 32768) / 32768.0) trigger_velocity.append( ((data[2] + (data[3] << 8)) - 32768) / 32768.0) trigger_torque.append( ((data[4] + (data[5] << 8)) - 32768) / 32768.0) trigger_current.append( ((data[6] + ((data[7] & 0x3f) << 8)) - 8192) / 8192.0) elif can_id == 1: data = [int(d, 16) for d in data[3:]] wheel.append(((data[0] + (data[1] << 8)) - 32768) / 32768.0) wheel_velocity.append( ((data[2] + (data[3] << 8)) - 32768) / 32768.0) wheel_torque.append( ((data[4] + (data[5] << 8)) - 32768) / 32768.0) wheel_current.append( ((data[6] + ((data[7] & 0x3f) << 8)) - 8192) / 8192.0) elif can_id == 2: data = [int(d, 16) for d in data[3:]] trigger_request_current.append( ((data[4] + (data[5] << 8)) - 32768) / 32768.0) trigger_request_time.append(len(trigger) * 0.001) elif can_id == 3: data = [int(d, 16) for d in data[3:]] wheel_request_current.append( ((data[4] + (data[5] << 8)) - 32768) / 32768.0) wheel_request_time.append(len(wheel) * 0.001) trigger_data_time = numpy.arange(0, len(trigger)) * 0.001 wheel_data_time = numpy.arange(0, len(wheel)) * 0.001 # Extend out the data in the interpolation table. trigger_request_time.append(trigger_data_time[-1]) trigger_request_current.append(trigger_request_current[-1]) wheel_request_time.append(wheel_data_time[-1]) wheel_request_current.append(wheel_request_current[-1]) return (trigger_data_time, wheel_data_time, trigger, wheel, trigger_velocity, wheel_velocity, trigger_torque, wheel_torque, trigger_current, wheel_current, trigger_request_time, trigger_request_current, wheel_request_time, wheel_request_current)
773657474462aa3a129ea7459c72ea0b0dc0cefa
3,651,774
def retrieve(func): """ Decorator for Zotero read API methods; calls _retrieve_data() and passes the result to the correct processor, based on a lookup """ def wrapped_f(self, *args, **kwargs): """ Returns result of _retrieve_data() func's return value is part of a URI, and it's this which is intercepted and passed to _retrieve_data: '/users/123/items?key=abc123' the atom doc returned by _retrieve_data is then passed to _etags in order to extract the etag attributes from each entry, then to feedparser, then to the correct processor """ if kwargs: self.add_parameters(**kwargs) retrieved = self._retrieve_data(func(self, *args)) # determine content and format, based on url params content = self.content.search( self.request.get_full_url()) and \ self.content.search( self.request.get_full_url()).group(0) or 'bib' fmt = self.fmt.search( self.request.get_full_url()) and \ self.fmt.search( self.request.get_full_url()).group(0) or 'atom' # step 1: process atom if it's atom-formatted if fmt == 'atom': parsed = feedparser.parse(retrieved) processor = self.processors.get(content) # step 2: if the content is JSON, extract its etags if processor == self._json_processor: self.etags = etags(retrieved) # extract next, previous, first, last links self.links = self._extract_links(parsed) return processor(parsed) # otherwise, just return the unparsed content as is else: return retrieved return wrapped_f
442f18f4c00a13b5eb68285202088b009f9f351b
3,651,775
from typing import Dict async def health() -> Dict[str, str]: """Health check function :return: Health check dict :rtype: Dict[str, str] """ health_response = schemas.Health(name=settings.PROJECT_NAME, api_version=__version__) return health_response.dict()
8c2841cea1fb9118cbc063d9352d375188025614
3,651,776
def detail(video_id): """ return value is [ { 'video_path' : s }, { 'person_id': n, 'person_info_list' : [ { 'frame' : n 'millisec' : n 'age' : n 'gender' : s 'img_person' : s 'top_color' : n 'bottom_color' : n }, { ... } ] }, { 'person_id' : n, ... }, ... ] """ video = VideoList.query.get_or_404(video_id) tableName = videoNameToTable(video.video_name) VideoTable = getVideoTable(tableName) returnJson = list() returnJson.append({'video_name' : tableName + '.mp4' }) people = db.session.query(VideoTable.person_id.distinct()).all() for person in people: personDict = dict() person_id = person[0] personDict['person_id'] = person_id personDict['person_info_list'] = list() personInfoList = VideoTable.query.filter(VideoTable.person_id == person_id).all() for personInfo in personInfoList: # change 'personInfo.img_person' from abs path to relative path index = personInfo.img_person.find('images') img_person = personInfo.img_person[index + 7:] personDict['person_info_list'].append( { 'frame' : personInfo.frame, 'millisec' : personInfo.millisec, 'age' : personInfo.age, 'gender' : personInfo.gender, 'img_person' : img_person, 'top_color' : personInfo.top_color, 'bottom_color' : personInfo.bottom_color } ) returnJson.append(personDict) return jsonify(returnJson), 200
7447f5ea45ab6fa1c6d10f97ac7d57add68fdf40
3,651,777
import logging def RunLinters(prefix, name, data, settings=None): """Run linters starting with |prefix| against |data|.""" ret = [] if settings is None: settings = ParseOptions([]) ret += settings.errors linters = [x for x in FindLinters(prefix) if x not in settings.skip] for linter in linters: functor = globals().get(linter) for result in functor(data): ret.append(LintResult(linter, name, result, logging.ERROR)) return ret
9b8c780fe3684405d17e59897bee11118dff5590
3,651,779
def element_norm_spatial_exoao(processes, comp_sol, test_time, test_var_list, exact_solution, subel_ints = 1, zfill=None, exact_time=None, block_ids=[]): """ This is element_norm_spatial but input solution types are limited. An exodus.ExodusFile object is expected for the computed solution, and an analytic solution object is expected for the exact solution. if exact_time is not given, the exact_solution is evaluated at test_time """ # Accept an exodus object as the computed solution. if not isinstance(comp_sol, exodus.ExodusFile): # Unrecognized type print "Computed solution is not a recognized type." print "It should be either an exodus.ExodusFile object." sys.exit(1) # Get the (1-based) index of the time for the computed solution comp_t_idx1 = find_time_index(comp_sol, test_time) # The (0-based) index of the variable in the computed solution comp_var_idx0 = comp_sol.findVar(exodus.EX_ELEM_BLOCK, test_var_list[0]) # Add error checking for test_var_list? # If no list of block ids is given, generate a list including all blocks if block_ids == []: for block_idx0 in range(comp_sol.getNumber(exodus.EX_ELEM_BLOCK)): block_ids.append(comp_sol.getId(exodus.EX_ELEM_BLOCK, block_idx0) ) # Accept a solution object as the exact solution if hasattr(exact_solution, test_var_list[1]): exact_sol = exact_solution # If not overridden by exact_time argument, ensure the # analytic solution time matches the simulation data time if exact_time == None: exact_time = comp_sol.getTimes()[comp_t_idx1 - 1] # Refer directly to the attribute (method) we want func_direct = getattr(exact_sol, test_var_list[1]) # Get nodal coords here rather than over and over for each element block # for subel_ints == 1 restructure after computing center coordinates, # which happens in the block loop current_coordinates = get_current_coordinates(comp_sol, comp_t_idx1) if subel_ints > 1: restructured_coords = restructure_coordinates(current_coordinates) else: # Unrecognized type print "Exact solution is not a recognized type." print "It should be an analytic solution object." sys.exit(1) # Initialize varET = WeightedErrorTally() ######## The work proper ######## for block_id in block_ids: element_volumes = get_element_volumes(comp_sol, block_id, comp_t_idx1) comp_var = comp_sol.readVar(comp_t_idx1, exodus.EX_ELEM_BLOCK, block_id, comp_var_idx0) exact_var = array.array('d') # exact solution will be calculated from a function if subel_ints == 1: # Evaluate the exact solution at the center of the element ctr_coords = comp_sol.computeCenters(exodus.EX_ELEM_BLOCK, block_id, current_coordinates) # Have to add the fill here because computeCenters knows # the true number of dimensions if comp_sol.getDimension()==2 and not zfill==None: x2_fill = array.array(comp_sol.storageType()) for i in range(len(ctr_coords[0])): x2_fill.append(zfill) ctr_coords.append(x2_fill) r_coords = restructure_coordinates(ctr_coords) len_r_coords = len(r_coords) if processes <= 2: # No point in parallelizing for 2 processes, since only 1 child process would be created. exact_var = map_func(func_direct, 0, len_r_coords, r_coords, exact_time) else: child_processes = processes - 1 exact_var = [None for i in range(len_r_coords)] pipes = [(None, None) for i in range(child_processes)] process_list = [None for i in range(child_processes)] for process_number in range(child_processes): idx_start = (process_number * len_r_coords) / child_processes idx_end = ((process_number+1) * len_r_coords) / child_processes pipes[process_number] = multiprocessing.Pipe(False) p = multiprocessing.Process(target=map_func_parallel, args=(pipes[process_number][1], func_direct, idx_start, idx_end, r_coords, exact_time,)) process_list[process_number] = p p.start() for process_number in range(child_processes): p = process_list[process_number] idx_start = (process_number * len_r_coords) / child_processes idx_end = ((process_number+1) * len_r_coords) / child_processes conn_obj = pipes[process_number][0] exact_var_local = conn_obj.recv() for idx in range(idx_start, idx_end): exact_var[idx] = exact_var_local[idx - idx_start] conn_obj.close() p.join() else: avg_evar_on_block(processes, comp_sol, block_id, comp_t_idx1, restructured_coords, func_direct, subel_ints, zfill, evar_array = exact_var) varET.w_accumulate(exact_var, comp_var, element_volumes) return varET
323fe13213a5ae8ad980d760943bc5cf1fc46074
3,651,780
from typing import Iterator def generate_close_coordinates( draw: st.DrawFn, prev_coord: Coordinates[str, np.float64] ) -> Coordinates[str, np.float64]: """Create coordinates using Hypothesis.""" diff = [ draw(hynp.from_dtype(np.dtype(np.float64), min_value=0.1, max_value=1.0)), draw(hynp.from_dtype(np.dtype(np.float64), min_value=0.1, max_value=1.0)), draw(hynp.from_dtype(np.dtype(np.float64), min_value=0.1, max_value=1.0)), draw(hynp.from_dtype(np.dtype(np.float64), min_value=0.1, max_value=1.0)), draw(hynp.from_dtype(np.dtype(np.float64), min_value=0.1, max_value=1.0)), draw(hynp.from_dtype(np.dtype(np.float64), min_value=0.1, max_value=1.0)), ] coord = vectorize(prev_coord) + diff formatted: Iterator[np.float64] = (np.float64(i) for i in coord) return dict(zip(SIXAXES, formatted))
8b207d5989f59a30e0c99eebd4654b609a03be93
3,651,781
from typing import Union def redistribute_vertices( geom: Union[LineString, MultiLineString], distance: float ) -> Union[LineString, MultiLineString]: """Redistribute the vertices of input line strings Parameters ---------- geom : LineString or MultiLineString Input line strings whose vertices is to be redistributed. distance : float The distance to be used for redistribution. Returns ------- LineString or MultiLineString The resulting line strings with redistributed vertices. Raises ------ ValueError If input geometry is not LineString or MultiLineString. """ if geom.geom_type == 'LineString': # pylint: disable=R1705 num_vert = int(round(geom.length / distance)) if num_vert == 0: num_vert = 1 return LineString( [geom.interpolate(float(n) / num_vert, normalized=True) for n in range(num_vert + 1)]) elif geom.geom_type == 'MultiLineString': parts = [redistribute_vertices(part, distance) for part in geom] return type(geom)([p for p in parts if not p.is_empty]) raise ValueError(f'unhandled geometry {geom.geom_type}')
1a5f0c3f409d5f3de46831bfa8456a734985d2b8
3,651,782
def get_boolean_value(value): """Get the boolean value of the ParameterValue.""" if value.type == ParameterType.PARAMETER_BOOL: return value.bool_value else: raise ValueError('Expected boolean value.')
fc5452a45983d16f30433ffe54b8883c24c1eb94
3,651,783
import torch def eval_bayesian_optimization(net: torch.nn.Module, input_picture: DATA,\ label_picture: DATA, ) -> float: """ Compute classification accuracy on provided dataset to find the optimzed hyperparamter settings. Args: net: trained neural network Input: The image Label: Th label to the respective image Returns: float: classification accuracy """ # Define the data x_valid = input_picture y_valid = label_picture # Pre-locating memory correct = 0 # Get the number of samples and batches before testing the network num_samples = x_valid.shape[0] num_batches = int(np.ceil(num_samples / float(BATCH_SIZE))) net.eval() with torch.no_grad(): for i in range(num_batches): idx = range(i*BATCH_SIZE, np.minimum((i+1) * BATCH_SIZE, num_samples)) x_batch_val = get_variable(Variable(torch.from_numpy(x_valid[idx]))) y_batch_val = get_variable(Variable(torch.from_numpy(y_valid[idx]).long())) output, _ = net(x_batch_val) _, predicted = torch.max(output.data, 1) correct += (predicted == y_batch_val).float().mean() # Calculating the accuracy return float(correct/num_batches)
4833627f5239f7c713f11a1ab9f97e6898a303b1
3,651,784
import urllib def parse(url): """ URL-parsing function that checks that - port is an integer 0-65535 - host is a valid IDNA-encoded hostname with no null-bytes - path is valid ASCII Args: A URL (as bytes or as unicode) Returns: A (scheme, host, port, path) tuple Raises: ValueError, if the URL is not properly formatted. """ parsed = urllib.parse.urlparse(url) if not parsed.hostname: raise ValueError("No hostname given") if isinstance(url, bytes): host = parsed.hostname # this should not raise a ValueError, # but we try to be very forgiving here and accept just everything. # decode_parse_result(parsed, "ascii") else: host = parsed.hostname.encode("idna") parsed = encode_parse_result(parsed, "ascii") port = parsed.port if not port: port = 443 if parsed.scheme == b"https" else 80 full_path = urllib.parse.urlunparse( (b"", b"", parsed.path, parsed.params, parsed.query, parsed.fragment) ) if not full_path.startswith(b"/"): full_path = b"/" + full_path if not check.is_valid_host(host): raise ValueError("Invalid Host") if not check.is_valid_port(port): raise ValueError("Invalid Port") return parsed.scheme, host, port, full_path
d1af42d9ee5b9c786cae9a6a16da89a545d27e33
3,651,785
def is_amicable(num: int) -> bool: """ Returns whether the number is part of an amicable number pair """ friend = sum(divisors(num)) - num # Only those in pairs are amicable numbers. If the sum is the number itself, it's a perfect number return friend != num and sum(divisors(friend)) - friend == num
e5fc62d4f390a95f6d54d57979c4e39b9d4e4316
3,651,786
import html def no_data_info(): """Returns information about not having enough information yet to display""" return html.Div(children=[dcc.Markdown(''' # Please wait a little bit... The MongoDB database was probably just initialized and is currently empty. You will need to wait a bit (~30 min) for it to populate with initial data before using the application. ''', className='eleven columns', style={'paddingLeft': '5%'})], className="row")
59ce4a2a0e2b18298006746be31f30b8c2cb4a6a
3,651,787
def delta_t(soil_type): """ Displacement at Tu """ delta_ts = { "dense sand": 0.003, "loose sand": 0.005, "stiff clay": 0.008, "soft clay": 0.01, } return delta_ts.get(soil_type, ValueError("Unknown soil type."))
c542adb7c302bc1f50eb4c49bf9da70932758814
3,651,788
def extractPlate(imgOriginal, listOfMatchingChars, PlateWidthPaddingFactor, PlateHeightPaddingFactor): """ Extract license-plate in the provided image, based on given contours group that corresponds for matching characters """ # Sort characters from left to right based on x position: listOfMatchingChars.sort(key=lambda matchingChar_: matchingChar_.intCenterX) # Calculate the plate centroid (average of leftmost and righhtmost characters): fltPlateCenterX = (listOfMatchingChars[0].intCenterX + listOfMatchingChars[len(listOfMatchingChars) - 1].intCenterX) / 2.0 fltPlateCenterY = (listOfMatchingChars[0].intCenterY + listOfMatchingChars[len(listOfMatchingChars) - 1].intCenterY) / 2.0 ptPlateCenter = fltPlateCenterX, fltPlateCenterY # Calculate plate width (rightmost - leftmost characters): intPlateWidth = int(PlateWidthPaddingFactor * (listOfMatchingChars[len(listOfMatchingChars) - 1].intBoundingRectX + listOfMatchingChars[len(listOfMatchingChars) - 1].intBoundingRectWidth - listOfMatchingChars[0].intBoundingRectX)) # Calculate plate height (average over all characters): intTotalOfCharHeights = 0 for matchingChar in listOfMatchingChars: intTotalOfCharHeights = intTotalOfCharHeights + matchingChar.intBoundingRectHeight fltAverageCharHeight = intTotalOfCharHeights / len(listOfMatchingChars) intPlateHeight = int(fltAverageCharHeight * PlateHeightPaddingFactor) # Calculate correction angle of plate region (simple geometry calculation): fltOpposite = listOfMatchingChars[len(listOfMatchingChars) - 1].intCenterY - listOfMatchingChars[0].intCenterY fltHypotenuse = (listOfMatchingChars[0] - listOfMatchingChars[len(listOfMatchingChars) - 1]) fltCorrectionAngleInRad = asin(fltOpposite / fltHypotenuse) fltCorrectionAngleInDeg = fltCorrectionAngleInRad * (180.0 / pi) # Rotate the entire image (affine warp), for compensating the angle of the plate region: rotationMatrix = getRotationMatrix2D(tuple(ptPlateCenter), fltCorrectionAngleInDeg, 1.0) height, width, _ = imgOriginal.shape imgRotated = warpAffine(imgOriginal, rotationMatrix, (width, height)) # Crop the plate from the image: imgCropped = getRectSubPix(imgRotated, (intPlateWidth, intPlateHeight), tuple(ptPlateCenter)) # Create and return possiblePlate object, which packs most the above information: possiblePlate = PossiblePlate() possiblePlate.rrLocationOfPlateInScene = (tuple(ptPlateCenter), (intPlateWidth, intPlateHeight), fltCorrectionAngleInDeg) possiblePlate.imgPlate = imgCropped return possiblePlate
f6d726727762b752003ae16c3cf9d286a0ebe990
3,651,789
def create_stratified_name(stem, stratification_name, stratum_name): """ generate a standardised stratified compartment name :param stem: str the previous stem to the compartment or parameter name that needs to be extended :param stratification_name: str the "stratification" or rationale for implementing the current stratification process :param stratum_name: str name of the stratum within the stratification :return: str the composite name with the standardised stratification name added on to the old stem """ return stem + create_stratum_name(stratification_name, stratum_name)
2677dec386dfd235e7fb5d088c5481987acf4beb
3,651,790
import inspect import typing def bind_args_kwargs(sig: inspect.Signature, *args: typing.Any, **kwargs: typing.Any) -> typing.List[BoundParameter]: """Bind *args and **kwargs to signature and get Bound Parameters. :param sig: source signature :type sig: inspect.Signature :param args: not keyworded arguments :type args: typing.Any :param kwargs: keyworded arguments :type kwargs: typing.Any :return: Iterator for bound parameters with all information about it :rtype: typing.List[BoundParameter] .. versionadded:: 3.3.0 .. versionchanged:: 5.3.1 return list """ result: typing.List[BoundParameter] = [] bound: typing.MutableMapping[str, inspect.Parameter] = sig.bind(*args, **kwargs).arguments for param in sig.parameters.values(): result.append(BoundParameter(parameter=param, value=bound.get(param.name, param.default))) return result
3fc8b16449981e920998ff84839a71cbbfc26d28
3,651,791
def user(user_type): """ :return: instance of a User """ return user_type()
a8c8cd4ef57915c555864f6fc09dce63c2a1c6fb
3,651,792
def true_or_false(item): """This function is used to assist in getting appropriate values set with the PythonOption directive """ try: item = item.lower() except: pass if item in ['yes','true', '1', 1, True]: return True elif item in ['no', 'false', '0', 0, None, False]: return False else: raise Exception
3e7c0cee07f6796c6134b182572a7d5ff95cf42d
3,651,793
import time def time_ms(): """currently pypy only has Python 3.5.3, so we are missing Python 3.7's time.time_ns() with better precision see https://www.python.org/dev/peps/pep-0564/ the function here is a convenience; you shall use `time.time_ns() // 1e6` if using >=Python 3.7 """ return int(time.time() * 1e3)
1bff241db79007314d7a876ddd007af137ba7306
3,651,795
def _calculate_mk(tp, fp, tn, fn): """Calculate mk.""" ppv = np.where((tp + fp) > 0, tp / (tp + fp), np.array(float("nan"))) npv = np.where((tn + fn) > 0, tn / (tn + fn), np.array(float("nan"))) npv = tn / (tn + fn) numerator = ppv + npv - 1.0 denominator = 1.0 return numerator, denominator
d777db3abd9296b2a67e038396d29e8ef8529a74
3,651,796
def geometric_progression_for_stepsize( x, update, dist, decision_function, current_iteration ): """Geometric progression to search for stepsize. Keep decreasing stepsize by half until reaching the desired side of the boundary. """ epsilon = dist / np.sqrt(current_iteration) while True: updated = x + epsilon * update success = decision_function(updated)[0] if success: break else: epsilon = epsilon / 2.0 return epsilon
d5a043f434efa68e827ff89f6f469eab37a79383
3,651,797
def absorption_two_linear_known(freq_list, interaction_strength, decay_rate): """ The absorption is half the imaginary part of the susecptibility. """ return susceptibility_two_linear_known(freq_list, interaction_strength, decay_rate).imag/2.0
9d4819715150ce63753f4e356c406685852fc761
3,651,798
def post_to_conf(post_grid, cell_size): """ Converts a N-dimensional grid of posterior values into a grid of confidence levels. The posterior values do not need to be normalised, i.e. their distribution need not integrate to 1. Works with likelihood values (not log-likelihood) instead of posteriors, assuming a flat prior. Args: post_grid (ND numpy array): Grid of posterior values. cell_size (float): The size of a grid cell, e.g. for 2 dimensions x and y this would be dx*dy. Returns: ND numpy array: Grid of confidence levels, where the value at each point is the minimum confidence region that \ includes that point. The least likely point would have a value of 1, indicating that it is \ only included in the 100% confidence region and excluded from anything smaller. """ # Create flattened list of posteriors and sort in descending order posteriors = post_grid.flatten() posteriors[::-1].sort() # Dictionary to contain mapping between posterior and confidence level confidence_level_unnormalised = {} # Calculate the cumulative integral of posterior values integral = 0 for posterior in posteriors: integral += posterior * cell_size confidence_level_unnormalised[posterior] = integral # Map each posterior in the grid to its confidence value confidence_grid_unnormalised = np.vectorize(confidence_level_unnormalised.get)(post_grid) # Normalise the confidence values using the final (complete) integral confidence_grid_normalised = np.divide(confidence_grid_unnormalised, integral) return confidence_grid_normalised
b4bcb8dddeceb7a4e1bb0914e503868e443ecb09
3,651,800
def get_fuzzer_display(testcase): """Return FuzzerDisplay tuple.""" if (testcase.overridden_fuzzer_name == testcase.fuzzer_name or not testcase.overridden_fuzzer_name): return FuzzerDisplay( engine=None, target=None, name=testcase.fuzzer_name, fully_qualified_name=testcase.fuzzer_name) fuzz_target = get_fuzz_target(testcase.overridden_fuzzer_name) if not fuzz_target: # Legacy testcases. return FuzzerDisplay( engine=testcase.fuzzer_name, target=testcase.get_metadata('fuzzer_binary_name'), name=testcase.fuzzer_name, fully_qualified_name=testcase.overridden_fuzzer_name) return FuzzerDisplay( engine=fuzz_target.engine, target=fuzz_target.binary, name=fuzz_target.engine, fully_qualified_name=fuzz_target.fully_qualified_name())
273e0a2f92a4e24606908586111f1bad17e50b4c
3,651,801
def process_articles_results(articles_list): """ Function that processes the articles result and transform them to a list of Objects Args: articles_list: A list of dictionaries that contain sources details Returns : articles_results: A list of source objects """ articles_results = [] for article_item in articles_list: id = article_item.get('id') author = article_item.get('author') title = article_item.get('title') description = article_item.get('description') url = article_item.get('url') urlToImage = article_item.get('urlToImage') publishedAt = article_item.get('publishedAt') content = article_item.get('content') if urlToImage: article_object = Articles(id, author, title, description, url, urlToImage, publishedAt, content) articles_results.append(article_object) return articles_results
7b4e540474757b2e0e9b93f66f5bee926992a782
3,651,802
def list_to_bytes_list(strList): """ This function turns an array of strings into a pointer array with pointers pointing to the encodings of those strings Possibly contained bytes are kept as they are. :param strList: List of strings that shall be converted :type strList: List of strings :returns: Pointer array with pointers pointing to bytes :raises: TypeError if strList is not list, set or tuple """ pList = c_char_p * len(strList) # if strList is already a pointerarray or None, there is nothing to do if isinstance(strList, (pList, type(None))): return strList if not isinstance(strList, (list, set, tuple)): raise TypeError("strList must be list, set or tuple, not " + str(type(strList))) pList = pList() for i, elem in enumerate(strList): pList[i] = str_to_bytes(elem) return pList
19bcc6751e4805adcbfde54656ff83ef52ef02b8
3,651,803
def handle_td(element, box, _get_image_from_uri): """Handle the ``colspan``, ``rowspan`` attributes.""" if isinstance(box, boxes.TableCellBox): # HTML 4.01 gives special meaning to colspan=0 # http://www.w3.org/TR/html401/struct/tables.html#adef-rowspan # but HTML 5 removed it # http://www.w3.org/TR/html5/tabular-data.html#attr-tdth-colspan # rowspan=0 is still there though. integer_attribute(element, box, 'colspan') integer_attribute(element, box, 'rowspan', minimum=0) return [box]
d3a2669ffc8ccac27d3b40c4f693751239b9c135
3,651,804
import pipes def login_flags(db, host, port, user, db_prefix=True): """ returns a list of connection argument strings each prefixed with a space and quoted where necessary to later be combined in a single shell string with `"".join(rv)` db_prefix determines if "--dbname" is prefixed to the db argument, since the argument was introduced in 9.3. """ flags = [] if db: if db_prefix: flags.append(' --dbname={0}'.format(pipes.quote(db))) else: flags.append(' {0}'.format(pipes.quote(db))) if host: flags.append(' --host={0}'.format(host)) if port: flags.append(' --port={0}'.format(port)) if user: flags.append(' --username={0}'.format(user)) return flags
2c844def8e6f1154a9962d43c858b39b9a7adf2a
3,651,805
def glplot(ncfile, times, colora, label): """ add a plot of grounding line points to current axes. makes use of the numpy.ma.MaskedArray when reading xGL,yGL """ try: ncid = Dataset(ncfile, 'r') except: print("Failed to open file: {}. Skipping.".format(ncfile)) return 350.0, 500.0 time = ncid.variables["time"][:] lxmax = 0.0 lxmin = 800.0 for i in range(0, len(times)): seq = (time == times[i]) xGL = ncid.variables["xGL"][:, seq]*1e-3 lxmax = max(np.max(xGL), lxmax) lxmin = min(np.min(xGL), lxmin) yGL = ncid.variables["yGL"][:, seq]*1e-3 plt.plot(xGL, yGL, 's', ms=3, mfc=colora[i], mec=colora[i], label=label + ', t = ' + format(times[i])) return lxmin, lxmax
149836ceb0f6b65ba792bffcbdafad6fe8702f62
3,651,806
def roi_intersect(a, b): """ Compute intersection of two ROIs. .. rubric:: Examples .. code-block:: s_[1:30], s_[20:40] => s_[20:30] s_[1:10], s_[20:40] => s_[10:10] # works for N dimensions s_[1:10, 11:21], s_[8:12, 10:30] => s_[8:10, 11:21] """ def slice_intersect(a, b): if a.stop < b.start: return slice(a.stop, a.stop) if a.start > b.stop: return slice(a.start, a.start) _in = max(a.start, b.start) _out = min(a.stop, b.stop) return slice(_in, _out) if isinstance(a, slice): if not isinstance(b, slice): b = b[0] return slice_intersect(a, b) b = (b,) if isinstance(b, slice) else b return tuple(slice_intersect(sa, sb) for sa, sb in zip(a, b))
d1070c8ec0c493296dfee6bdc54b7430e703bda8
3,651,807
def _flows_finished(pgen_grammar, stack): """ if, while, for and try might not be finished, because another part might still be parsed. """ for stack_node in stack: if stack_node.nonterminal in ('if_stmt', 'while_stmt', 'for_stmt', 'try_stmt'): return False return True
dd0fe435d1328b3ae83ba2507006b6825ca23087
3,651,808
def PositionToPercentile(position, field_size): """Converts from position in the field to percentile. position: int field_size: int """ beat = field_size - position + 1 percentile = 100.0 * beat / field_size return percentile
c75869f3d7f8437f28d3463fcf12b2b446fe930a
3,651,809
def grid(num, ndim, large=False): """Build a uniform grid with num points along each of ndim axes.""" if not large: _check_not_too_large(np.power(num, ndim) * ndim) x = np.linspace(0, 1, num, dtype='float64') w = 1 / (num - 1) points = np.stack( np.meshgrid(*[x for _ in range(ndim)], indexing='ij'), axis=-1) return points, w
51a3ef70da4581a774d76839d05a14042e7bf78c
3,651,810
def rolling_outlier_quantile(x, width, q, m): """Detect outliers by multiples of a quantile in a window. Outliers are the array elements outside `m` times the `q`'th quantile of deviations from the smoothed trend line, as calculated from the trend line residuals. (For example, take the magnitude of the 95th quantile times 5, and mark any elements greater than that value as outliers.) This is the smoothing method used in BIC-seq (doi:10.1073/pnas.1110574108) with the parameters width=200, q=.95, m=5 for WGS. Returns ------- np.array A boolean array of the same size as `x`, where outlier indices are True. """ if len(x) <= width: return np.zeros(len(x), dtype=np.bool_) dists = np.abs(x - savgol(x, width)) quants = rolling_quantile(dists, width, q) outliers = (dists > quants * m) return outliers
3c28fa245c8dfce03958dee33c47828eb38ac979
3,651,811
def compute_region_classification_len(dataset_output, dataset_type: str): """ Compute the number of points per class and return a dictionary (dataset_type specifies the keys) with the results """ stable_region_indices, marginal_stable_region_indices, marginal_region_indices, marginal_unstable_region_indices, unstable_region_indices = compute_regions_belongings( value=dataset_output) region_len_dict = {f"len_{dataset_type}_stable_region": sum(stable_region_indices), f"len_{dataset_type}_marginal_stable_region": sum(marginal_stable_region_indices), f"len_{dataset_type}_marginal_region": sum(marginal_region_indices), f"len_{dataset_type}_marginal_unstable_region": sum(marginal_unstable_region_indices), f"len_{dataset_type}_unstable_region": sum(unstable_region_indices), } return region_len_dict
ba78d7d000b97cfcefa2acac263612ebd4aff377
3,651,812
def set_world_properties(world_uid, world_name=None, owner=None, config=None): """ Set the properties of the given world """ return runtime.set_world_properties(world_uid, world_name, owner, config)
4c063554390c0fb33ec74394a5a7cc967d55211d
3,651,813
def _resize_and_pad(img, desired_size): """ Resize an image to the desired width and height :param img: :param desired_size: :return: """ old_size = img.shape[:2] # old_size is in (height, width) format ratio = float(desired_size) / max(old_size) new_size = tuple([int(x * ratio) for x in old_size]) if new_size[0] == 0: new_size = (new_size[0] + 1, new_size[1]) if new_size[1] == 0: new_size = (new_size[0], new_size[1] + 1) # New_size should be in (width, height) format im = cv2.resize(img, (new_size[1], new_size[0])) delta_w = desired_size - new_size[1] delta_h = desired_size - new_size[0] top, bottom = delta_h // 2, delta_h - (delta_h // 2) left, right = delta_w // 2, delta_w - (delta_w // 2) color = [0, 0, 0] img = cv2.copyMakeBorder(im, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) return img
1053748c0a303e3b5b3712623a089e42ba822301
3,651,814
from cntk.ops.cntk2 import Dropout def dropout(x, name=None): """ Compute a new tensor with `dropoutRate` perecent set to zero. The values that are set to zero are randomly chosen. This is commonly used to prevent overfitting during the training process. The output tensor has the same shape as `x`, but with `dropoutRate` of the elements set to zero (droped out). Args: x: source tensor name (str): the name of the node in the network Returns: :class:`cntk.graph.ComputationNode` """ op = Dropout(x, name = name) wrap_numpy_arrays(op) op.rank = op._.rank return op
ae688aa478842ba451b92de2bc0503e42f1a9363
3,651,816
def mol2graph(crystal_batch: CrystalDataset, args: Namespace) -> BatchMolGraph: """ Converts a list of SMILES strings to a BatchMolGraph containing the batch of molecular graphs. :param crystal_batch: a list of CrystalDataset :param args: Arguments. :return: A BatchMolGraph containing the combined molecular graph for the molecules """ crystal_graphs = list() for crystal_point in crystal_batch: if crystal_point in CRYSTAL_TO_GRAPH.keys(): crystal_graph = CRYSTAL_TO_GRAPH[crystal_point] else: crystal_graph = MolGraph(crystal_point, args) if not args.no_cache and len(CRYSTAL_TO_GRAPH) <= 10000: CRYSTAL_TO_GRAPH[crystal_point] = crystal_graph crystal_graphs.append(crystal_graph) return BatchMolGraph(crystal_graphs, args)
604351ad5ae6c1ccfa6ce01a1e7b03c5e80ff2a4
3,651,817
import ast def _compile(s: str): """compiles string into AST. :param s: string to be compiled into AST. :type s: str """ return compile( source = s, filename = '<unknown>', mode = 'eval', flags = ast.PyCF_ONLY_AST, )
4709cfa84ab6e5d7210924cf3aa206a1d297b7bd
3,651,818
def temp_get_users_with_permission_form(self): """Used to test that swapping the Form method works""" # Search string: ABC return ()
72390791304d62fc5d78720aac4e2807e918587c
3,651,819
def permute_images(images, permutation_index): """ Permute pixels in all images. :param images: numpy array of images :param permutation_index: index of the permutation (#permutations = #tasks - 1) :return: numpy array of permuted images (of the same size) """ # seed = np.random.randint(low=4294967295, dtype=np.uint32) # make a random seed for all images in an array # baseline and superposition have the same permutation of images for the corresponding task global seeds seed = seeds[permutation_index] # the same permutation each run for the first, second, ... task return np.array([permute_pixels(im, seed) for im in images])
5742c9c2bce5012b0c17b60eb5e66328b91e53b4
3,651,821
def new_user(request, id): """ Page for creating users after registering a person. person must be either volunteer, NGO employee or Government """ msg = '' password = '' try: person_id = int(id) # Get Name user = RegPerson.objects.get(pk=person_id) personfname = user.first_name personsname = user.surname names = user.full_name if request.method == 'POST': form = NewUser(user, data=request.POST) username = request.POST.get('username') password1 = request.POST.get('password1') password2 = request.POST.get('password2') # resolve existing account user_exists = AppUser.objects.filter(reg_person=person_id) if user_exists: msg = 'Person ({} {}) has an existing user account.'.format( personfname, personsname) messages.add_message(request, messages.INFO, msg) return HttpResponseRedirect(reverse(persons_search)) if password1 == password2: password = password1 else: msg = 'Passwords do not match!' messages.add_message(request, messages.INFO, msg) form = NewUser(user, data=request.POST) return render(request, 'registry/new_user.html', {'form': form}, ) # validate username if__exists username_exists = AppUser.objects.filter(username__iexact=username) if username_exists: msg = 'Username ({}) is taken. Pick another one.'.format( username) messages.add_message(request, messages.INFO, msg) form = NewUser(user, data=request.POST) return render(request, 'registry/new_user.html', {'form': form}, ) else: # Create User user = AppUser.objects.create_user(username=username, reg_person=person_id, password=password) if user: user.groups.add(Group.objects.get( name='Standard logged in')) # Capture msg & op status msg = 'User ({}) save success.'.format(username) messages.add_message(request, messages.INFO, msg) return HttpResponseRedirect( '%s?id=%d' % (reverse(persons_search), int(person_id))) else: form = NewUser(user) return render(request, 'registry/new_user.html', {'names': names, 'form': form}, ) except Exception as e: msg = 'Error - ({}) '.format(str(e)) messages.add_message(request, messages.ERROR, msg) return HttpResponseRedirect(reverse(persons_search))
26ff9e3fa289915218a6f60e138a3491955c0228
3,651,822
def multi_conv(func=None, options=None): """A function decorator for generating multi-convolution operations. Multi-convolutions allow for a set of data-independent convolutions to be executed in parallel. Executing convolutions in parallel can lead to an increase in the data throughput. The ``multi_conv`` function decorator is a convenient way to generate multi-convolutions - it detects all the convolution operations inside of the decorated function and executes them in parallel. For example: .. code-block:: python from tensorflow import keras from tensorflow.python import ipu @ipu.nn_ops.multi_conv def convs(x, y, z): x = keras.layers.DepthwiseConv2D(8, 2, depth_multiplier=2)(x) y = keras.layers.DepthwiseConv2D(16, 4, depth_multiplier=2)(y) z = keras.layers.Conv2D(8, 3)(z) return x, y, z Will detect and execute the three convolutions ``x``, ``y`` and ``z`` in parallel. Note that any operations which are not convolutions, such as bias add operations, will be executed in the same way as if they were not inside of a ``multi_conv`` decorated function. It is also possible to set PopLibs multi-convolution options using this decorator. For example: .. code-block:: python from tensorflow import keras from tensorflow.python import ipu @ipu.nn_ops.multi_conv(options={"perConvReservedTiles":"50"}) def convs(x, y, z): x = keras.layers.DepthwiseConv2D(8, 2, depth_multiplier=2)(x) y = keras.layers.DepthwiseConv2D(16, 4, depth_multiplier=2)(y) z = keras.layers.Conv2D(8, 3)(z) return x, y, z See the PopLibs documention for the list of all available flags. Note that these options will also be applied to the gradient operations generated during backpropagation. Args: func: A python function which takes a list of positional arguments only. All the arguments must be `tf.Tensor`-like objects, or be convertible to them. The function provided must return at least one `tf.Tensor`-like object. options: A dictionary of Poplar option flags for multi-convolution. See the multi-convolution PopLibs documentation for available flags. """ def decorated(inner_func): def multi_conv_wrapper(*args): inner_options = options if options else {} if not isinstance(inner_options, dict): raise TypeError( "Expected the multi_conv `options` to be a `dict`, but got %s " "instead." % (str(inner_options))) option_proto = option_flag_pb2.PoplarOptionFlags() for key, value in inner_options.items(): flag = option_proto.flags.add() flag.option = key flag.value = value def func_wrapper(*args): with ops.get_default_graph().as_default() as g: with g.gradient_override_map(_gradient_override_map): return inner_func(*args) args = functional_ops._convert_to_list(args) # pylint: disable=protected-access with ops.name_scope("multi_conv") as scope: func_graph, captured_args = functional_ops._compile_function( # pylint: disable=protected-access func_wrapper, args, scope, [], allow_external_captures=True) with ops.control_dependencies(list(func_graph.control_captures)): outputs = gen_functional_ops.multi_conv( captured_args, to_apply=util.create_new_tf_function(func_graph), Tout=func_graph.output_types, output_shapes=func_graph.output_shapes, option_flags=json_format.MessageToJson(option_proto)) return func_graph_module.pack_sequence_as(func_graph.structured_outputs, outputs) return multi_conv_wrapper if func is not None: return decorated(func) return decorated
d1c9a69fbcec7b374142bc7568fc89ba8dddb0b9
3,651,823
def hi_joseangel(): """ Hi Jose Angel Function """ return "hi joseangel!"
5889a51977d3ec2269040a9a7e7968801209ff25
3,651,824
import time def received_date_date(soup): """ Find the received date received_date_date in human readable form """ received_date = get_history_date(soup, date_type = "received") date_string = None try: date_string = time.strftime("%B %d, %Y", received_date) except(TypeError): # Date did not convert pass return date_string
3963d846a64e06ed0d2e60b7ecba26efcd4d9e6e
3,651,825
def is_on(hass, entity_id): """ Returns if the group state is in its ON-state. """ state = hass.states.get(entity_id) if state: group_type = _get_group_type(state.state) # If we found a group_type, compare to ON-state return group_type and state.state == _GROUP_TYPES[group_type][0] return False
8e77a7a3f4a09d68d92d58105b3d5a36c830cd0c
3,651,826
def pytest_report_header(config, startdir): """return a string to be displayed as header info for terminal reporting.""" capabilities = config.getoption('capabilities') if capabilities: return 'capabilities: {0}'.format(capabilities)
4e6ada67f5f08c1db8f5b6206089db4e3ee84f46
3,651,827
def chessboard_distance(x_a, y_a, x_b, y_b): """ Compute the rectilinear distance between point (x_a,y_a) and (x_b, y_b) """ return max(abs(x_b-x_a),abs(y_b-y_a))
9b11bf328faf3b231df23585914f20c2efd02bf9
3,651,828
from pgmpy.factors import TabularCPD from pgmpy.models import BayesianModel import pandas as pd from pgmpy.inference import VariableElimination # NOQA from pgmpy.factors import TabularCPD import pgmpy import plottool as pt import networkx as netx def bayesnet(): """ References: https://class.coursera.org/pgm-003/lecture/17 http://www.cs.ubc.ca/~murphyk/Bayes/bnintro.html http://www3.cs.stonybrook.edu/~sael/teaching/cse537/Slides/chapter14d_BP.pdf http://www.cse.unsw.edu.au/~cs9417ml/Bayes/Pages/PearlPropagation.html https://github.com/pgmpy/pgmpy.git http://pgmpy.readthedocs.org/en/latest/ http://nipy.bic.berkeley.edu:5000/download/11 """ # import operator as op # # Enumerate all possible events # varcard_list = list(map(op.attrgetter('variable_card'), cpd_list)) # _esdat = list(ut.iprod(*map(range, varcard_list))) # _escol = list(map(op.attrgetter('variable'), cpd_list)) # event_space = pd.DataFrame(_esdat, columns=_escol) # # Custom compression of event space to inspect a specific graph # def compress_space_flags(event_space, var1, var2, var3, cmp12_): # """ # var1, var2, cmp_ = 'Lj', 'Lk', op.eq # """ # import vtool as vt # data = event_space # other_cols = ut.setdiff_ordered(data.columns.tolist(), [var1, var2, var3]) # case_flags12 = cmp12_(data[var1], data[var2]).values # # case_flags23 = cmp23_(data[var2], data[var3]).values # # case_flags = np.logical_and(case_flags12, case_flags23) # case_flags = case_flags12 # case_flags = case_flags.astype(np.int64) # subspace = np.hstack((case_flags[:, None], data[other_cols].values)) # sel_ = vt.unique_row_indexes(subspace) # flags = np.logical_and(mask, case_flags) # return flags # # Build special cases # case_same = event_space.loc[compress_space_flags(event_space, 'Li', 'Lj', 'Lk', op.eq)] # case_diff = event_space.loc[compress_space_flags(event_space, 'Li', 'Lj', 'Lk', op.ne)] # special_cases = [ # case_same, # case_diff, # ] name_nice = ['n1', 'n2', 'n3'] score_nice = ['low', 'high'] match_nice = ['diff', 'same'] num_names = len(name_nice) num_scores = len(score_nice) nid_basis = list(range(num_names)) score_basis = list(range(num_scores)) semtype2_nice = { 'score': score_nice, 'name': name_nice, 'match': match_nice, } var2_cpd = { } globals()['semtype2_nice'] = semtype2_nice globals()['var2_cpd'] = var2_cpd name_combo = np.array(list(ut.iprod(nid_basis, nid_basis))) combo_is_same = name_combo.T[0] == name_combo.T[1] def get_expected_scores_prob(level1, level2): part1 = combo_is_same * level1 part2 = (1 - combo_is_same) * (1 - (level2)) expected_scores_level = part1 + part2 return expected_scores_level # def make_cpd(): def name_cpd(aid): cpd = TabularCPD( variable='N' + aid, variable_card=num_names, values=[[1.0 / num_names] * num_names]) cpd.semtype = 'name' return cpd name_cpds = [name_cpd('i'), name_cpd('j'), name_cpd('k')] var2_cpd.update(dict(zip([cpd.variable for cpd in name_cpds], name_cpds))) if True: num_same_diff = 2 samediff_measure = np.array([ # get_expected_scores_prob(.12, .2), # get_expected_scores_prob(.88, .8), get_expected_scores_prob(0, 0), get_expected_scores_prob(1, 1), ]) samediff_vals = (samediff_measure / samediff_measure.sum(axis=0)).tolist() def samediff_cpd(aid1, aid2): cpd = TabularCPD( variable='A' + aid1 + aid2, variable_card=num_same_diff, values=samediff_vals, evidence=['N' + aid1, 'N' + aid2], # [::-1], evidence_card=[num_names, num_names]) # [::-1]) cpd.semtype = 'match' return cpd samediff_cpds = [samediff_cpd('i', 'j'), samediff_cpd('j', 'k'), samediff_cpd('k', 'i')] var2_cpd.update(dict(zip([cpd.variable for cpd in samediff_cpds], samediff_cpds))) if True: def score_cpd(aid1, aid2): semtype = 'score' evidence = ['A' + aid1 + aid2, 'N' + aid1, 'N' + aid2] evidence_cpds = [var2_cpd[key] for key in evidence] evidence_nice = [semtype2_nice[cpd.semtype] for cpd in evidence_cpds] evidence_card = list(map(len, evidence_nice)) evidence_states = list(ut.iprod(*evidence_nice)) variable_basis = semtype2_nice[semtype] variable_values = [] for mystate in variable_basis: row = [] for state in evidence_states: if state[0] == state[1]: if state[2] == 'same': val = .2 if mystate == 'low' else .8 else: val = 1 # val = .5 if mystate == 'low' else .5 elif state[0] != state[1]: if state[2] == 'same': val = .5 if mystate == 'low' else .5 else: val = 1 # val = .9 if mystate == 'low' else .1 row.append(val) variable_values.append(row) cpd = TabularCPD( variable='S' + aid1 + aid2, variable_card=len(variable_basis), values=variable_values, evidence=evidence, # [::-1], evidence_card=evidence_card) # [::-1]) cpd.semtype = semtype return cpd else: score_values = [ [.8, .1], [.2, .9], ] def score_cpd(aid1, aid2): cpd = TabularCPD( variable='S' + aid1 + aid2, variable_card=num_scores, values=score_values, evidence=['A' + aid1 + aid2], # [::-1], evidence_card=[num_same_diff]) # [::-1]) cpd.semtype = 'score' return cpd score_cpds = [score_cpd('i', 'j'), score_cpd('j', 'k')] cpd_list = name_cpds + score_cpds + samediff_cpds else: score_measure = np.array([get_expected_scores_prob(level1, level2) for level1, level2 in zip(np.linspace(.1, .9, num_scores), np.linspace(.2, .8, num_scores))]) score_values = (score_measure / score_measure.sum(axis=0)).tolist() def score_cpd(aid1, aid2): cpd = TabularCPD( variable='S' + aid1 + aid2, variable_card=num_scores, values=score_values, evidence=['N' + aid1, 'N' + aid2], evidence_card=[num_names, num_names]) cpd.semtype = 'score' return cpd score_cpds = [score_cpd('i', 'j'), score_cpd('j', 'k')] cpd_list = name_cpds + score_cpds pass input_graph = [] for cpd in cpd_list: if cpd.evidence is not None: for evar in cpd.evidence: input_graph.append((evar, cpd.variable)) name_model = BayesianModel(input_graph) name_model.add_cpds(*cpd_list) var2_cpd.update(dict(zip([cpd.variable for cpd in cpd_list], cpd_list))) globals()['var2_cpd'] = var2_cpd varnames = [cpd.variable for cpd in cpd_list] # --- PRINT CPDS --- cpd = score_cpds[0] def print_cpd(cpd): print('CPT: %r' % (cpd,)) index = semtype2_nice[cpd.semtype] if cpd.evidence is None: columns = ['None'] else: basis_lists = [semtype2_nice[var2_cpd[ename].semtype] for ename in cpd.evidence] columns = [','.join(x) for x in ut.iprod(*basis_lists)] data = cpd.get_cpd() print(pd.DataFrame(data, index=index, columns=columns)) for cpd in name_model.get_cpds(): print('----') print(cpd._str('phi')) print_cpd(cpd) # --- INFERENCE --- Ni = name_cpds[0] event_space_combos = {} event_space_combos[Ni.variable] = 0 # Set ni to always be Fred for cpd in cpd_list: if cpd.semtype == 'score': event_space_combos[cpd.variable] = list(range(cpd.variable_card)) evidence_dict = ut.all_dict_combinations(event_space_combos) # Query about name of annotation k given different event space params def pretty_evidence(evidence): return [key + '=' + str(semtype2_nice[var2_cpd[key].semtype][val]) for key, val in evidence.items()] def print_factor(factor): row_cards = factor.cardinality row_vars = factor.variables values = factor.values.reshape(np.prod(row_cards), 1).flatten() # col_cards = 1 # col_vars = [''] basis_lists = list(zip(*list(ut.iprod(*[range(c) for c in row_cards])))) nice_basis_lists = [] for varname, basis in zip(row_vars, basis_lists): cpd = var2_cpd[varname] _nice_basis = ut.take(semtype2_nice[cpd.semtype], basis) nice_basis = ['%s=%s' % (varname, val) for val in _nice_basis] nice_basis_lists.append(nice_basis) row_lbls = [', '.join(sorted(x)) for x in zip(*nice_basis_lists)] print(ut.repr3(dict(zip(row_lbls, values)), precision=3, align=True, key_order_metric='-val')) # name_belief = BeliefPropagation(name_model) name_belief = VariableElimination(name_model) def try_query(evidence): print('--------') query_vars = ut.setdiff_ordered(varnames, list(evidence.keys())) evidence_str = ', '.join(pretty_evidence(evidence)) probs = name_belief.query(query_vars, evidence) factor_list = probs.values() joint_factor = pgmpy.factors.factor_product(*factor_list) print('P(' + ', '.join(query_vars) + ' | ' + evidence_str + ')') # print(six.text_type(joint_factor)) factor = joint_factor # NOQA # print_factor(factor) # import utool as ut print(ut.hz_str([(f._str(phi_or_p='phi')) for f in factor_list])) for evidence in evidence_dict: try_query(evidence) evidence = {'Aij': 1, 'Ajk': 1, 'Aki': 1, 'Ni': 0} try_query(evidence) evidence = {'Aij': 0, 'Ajk': 0, 'Aki': 0, 'Ni': 0} try_query(evidence) globals()['score_nice'] = score_nice globals()['name_nice'] = name_nice globals()['score_basis'] = score_basis globals()['nid_basis'] = nid_basis print('Independencies') print(name_model.get_independencies()) print(name_model.local_independencies([Ni.variable])) # name_belief = BeliefPropagation(name_model) # # name_belief = VariableElimination(name_model) # for case in special_cases: # test_data = case.drop('Lk', axis=1) # test_data = test_data.reset_index(drop=True) # print('----') # for i in range(test_data.shape[0]): # evidence = test_data.loc[i].to_dict() # probs = name_belief.query(['Lk'], evidence) # factor = probs['Lk'] # probs = factor.values # evidence_ = evidence.copy() # evidence_['Li'] = name_nice[evidence['Li']] # evidence_['Lj'] = name_nice[evidence['Lj']] # evidence_['Sij'] = score_nice[evidence['Sij']] # evidence_['Sjk'] = score_nice[evidence['Sjk']] # nice2_prob = ut.odict(zip(name_nice, probs.tolist())) # ut.print_python_code('P(Lk | {evidence}) = {cpt}'.format( # evidence=(ut.repr2(evidence_, explicit=True, nobraces=True, strvals=True)), # cpt=ut.repr3(nice2_prob, precision=3, align=True, key_order_metric='-val') # )) # for case in special_cases: # test_data = case.drop('Lk', axis=1) # test_data = test_data.drop('Lj', axis=1) # test_data = test_data.reset_index(drop=True) # print('----') # for i in range(test_data.shape[0]): # evidence = test_data.loc[i].to_dict() # query_vars = ['Lk', 'Lj'] # probs = name_belief.query(query_vars, evidence) # for queryvar in query_vars: # factor = probs[queryvar] # print(factor._str('phi')) # probs = factor.values # evidence_ = evidence.copy() # evidence_['Li'] = name_nice[evidence['Li']] # evidence_['Sij'] = score_nice[evidence['Sij']] # evidence_['Sjk'] = score_nice[evidence['Sjk']] # nice2_prob = ut.odict(zip([queryvar + '=' + x for x in name_nice], probs.tolist())) # ut.print_python_code('P({queryvar} | {evidence}) = {cpt}'.format( # query_var=query_var, # evidence=(ut.repr2(evidence_, explicit=True, nobraces=True, strvals=True)), # cpt=ut.repr3(nice2_prob, precision=3, align=True, key_order_metric='-val') # )) # _ draw model fig = pt.figure() # NOQA fig.clf() ax = pt.gca() netx_nodes = [(node, {}) for node in name_model.nodes()] netx_edges = [(etup[0], etup[1], {}) for etup in name_model.edges()] netx_graph = netx.DiGraph() netx_graph.add_nodes_from(netx_nodes) netx_graph.add_edges_from(netx_edges) # pos = netx.graphviz_layout(netx_graph) pos = netx.pydot_layout(netx_graph, prog='dot') netx.draw(netx_graph, pos=pos, ax=ax, with_labels=True) pt.plt.savefig('foo.png') ut.startfile('foo.png')
05853fb3a7e84a1864399588af4b27390a1c8d31
3,651,829
def sigma_R(sim, Pk=None, z=None, non_lin=False): """ return amplitude of density fluctuations if given Pk -- C++ class Extrap_Pk or Extrap_Pk_Nl -- computes its sigma_R. if given redshift, computes linear or non-linear (emulator) amplitude of density fluctuations """ sigma = fs.Data_Vec_2() if Pk: # compute amplitude of density fluctuations from given continuous power spectrum fs.gen_sigma_binned_gsl_qawf(sim, Pk, sigma) elif z is not None: # compute (non-)linear amplitude of density fluctuations a = 1./(1.+z) if z != 'init' else 1.0 if non_lin: fs.gen_sigma_func_binned_gsl_qawf_lin(sim, a, sigma) else: fs.gen_sigma_func_binned_gsl_qawf_nl(sim, a, sigma) else: raise KeyError("Function 'sigma_R' called without arguments.") return get_ndarray(sigma)
956a4ca092ce56c1d8120c3b9047280306005326
3,651,830
def session_ended_request_handler(handler_input): """Handler for Session End.""" # type: (HandlerInput) -> Response logger.info("Entering AMAZON.SessionEndedRequest") save_data(handler_input) return handler_input.response_builder.response
a3bd1c38699a69da0cdce0203ee0549e9132b1c1
3,651,831
import unittest def _getTestSuite(testFiles): """ Loads unit tests recursively from beneath the current directory. Inputs: testFiles - If non-empty, a list of unit tests to selectively run. Outputs: A unittest.TestSuite object containing the unit tests to run. """ loader = unittest.TestLoader() if testFiles: return loader.loadTestsFromNames([".".join(TEST_DIR, testFile) for testFile in testFiles]) return loader.discover(TEST_DIR)
786baa4d70161e1ae6c60160460f379c66ea465c
3,651,832
def stratifiedsmooth2stratifiedwavy_c(rho_gas, rho_liq, vel_gas, d_m, beta, mu_liq, mu_gas): """ function for construction of boundary transition from stratified-smooth to stratified-wavy structure resulting from the "wind" effect :param rho_gas: gas density :param rho_liq: liquid density :param vel_gas: superficial gas velocity :param d_m: pipe diameter :param beta: angle of inclination from the horizontal :param mu_liq: liquid viscosity :param mu_gas: gas viscosity :return: superficial liquid velocity """ froude_number = (rho_gas / (rho_liq - rho_gas)) ** 0.5 * vel_gas / (d_m * uc.g * np.cos(beta * uc.pi / 180)) ** 0.5 vel_liq_0 = 0.0000001 def equation2solve(vel_liq): re_sl = reynolds_number(rho_liq, vel_liq, d_m, mu_liq) k = froude_number * re_sl ** 0.5 # k = froude_number ** 2 * re_sl x = parameter_x(d_m, rho_liq, rho_gas, mu_liq, mu_gas, vel_gas, vel_liq) y = parameter_y(d_m, rho_liq, rho_gas, mu_gas, vel_gas, beta) h_l = combined_momentum_equation(x, y, d_m, rho_liq, rho_gas, mu_liq, mu_gas, vel_gas, vel_liq) variables = dimensionless_variables(h_l) v_g = variables[6] s = 0.01 v_l = variables[5] equation = k - 2 / (v_l ** 0.5 * v_g * s ** 0.5) return equation vel_liq = opt.fsolve(equation2solve, np.array(vel_liq_0)) return vel_liq
a80c1b5f400d4db36979960a26f5b914047abe8d
3,651,833
def box( data_frame=None, x=None, y=None, color=None, facet_row=None, facet_row_weights=None, facet_col=None, facet_col_weights=None, facet_col_wrap=0, facet_row_spacing=None, facet_col_spacing=None, hover_name=None, hover_data=None, custom_data=None, animation_frame=None, animation_group=None, category_orders=None, labels=None, color_discrete_sequence=None, color_discrete_map=None, orientation=None, boxmode=None, log_x=False, log_y=False, range_x=None, range_y=None, points=None, notched=False, title=None, template=None, width=None, height=None, ): """ In a box plot, rows of `data_frame` are grouped together into a box-and-whisker mark to visualize their distribution. Each box spans from quartile 1 (Q1) to quartile 3 (Q3). The second quartile (Q2) is marked by a line inside the box. By default, the whiskers correspond to the box' edges +/- 1.5 times the interquartile range (IQR: Q3-Q1), see "points" for other options. """ return make_figure( args=locals(), constructor=go.Box, trace_patch=dict(boxpoints=points, notched=notched, x0=" ", y0=" "), layout_patch=dict(boxmode=boxmode), )
2e5a22fd4fa875b4cb506c7d21ff91e56908ed65
3,651,834
def _infer_main_columns(df, index_level='Column Name', numeric_dtypes=_numeric_dtypes): """ All numeric columns up-until the first non-numeric column are considered main columns. :param df: The pd.DataFrame :param index_level: Name of the index level of the column names. Default 'Column Name' :param numeric_dtypes: Set of numpy.dtype containing all numeric types. Default int/float. :returns: The names of the infered main columns """ columns = df.columns.get_level_values(index_level) main_columns = [] for i,dtype in enumerate(df.dtypes): if dtype in numeric_dtypes: main_columns.append(columns[i]) else: break return main_columns
3eae67b765ca7a1a048047e12511c1a9721f9fea
3,651,835
def index(): """ 显示首页 :return: """ return render_template('index.html')
8965ff54f131a0250f1a05183ceb79d6d677883c
3,651,836
def two_phase(model, config): """Two-phase simulation workflow.""" wea_path, datetime_stamps = get_wea(config) smx = gen_smx(wea_path, config.smx_basis, config.mtxdir) pdsmx = prep_2phase_pt(model, config) vdsmx = prep_2phase_vu(model, config) if not config.no_multiply: calc_2phase_pt(model, datetime_stamps, pdsmx, smx, config) calc_2phase_vu(datetime_stamps, vdsmx, smx, config) return pdsmx, vdsmx
0f2eb619dcfea233446e90565bc1310ee1a3bc3f
3,651,837
def str_with_tab(indent: int, text: str, uppercase: bool = True) -> str: """Create a string with ``indent`` spaces followed by ``text``.""" if uppercase: text = text.upper() return " " * indent + text
3306ba86781d272a19b0e02ff8d06da0976d7282
3,651,838
def delete(card, files=None): """Delete individual notefiles and their contents. Args: card (Notecard): The current Notecard object. files (array): A list of Notefiles to delete. Returns: string: The result of the Notecard request. """ req = {"req": "file.delete"} if files: req["files"] = files return card.Transaction(req)
2acfa67b7531244e44a183286a9d87b9ac849c83
3,651,839
def test_timed_info(): """Test timed_info decorator""" @timed_info def target(): return "hello world" result = target() assert result == "hello world"
4deb25b542bcc1a3ad2fc5859c2c3f243060b6d9
3,651,840
from typing import Set def get_doc_word_token_set(doc: Doc, use_lemma=False) -> Set[Token]: """Return the set of tokens in a document (no repetition).""" return set([token.lemma_ if use_lemma else token.text for token in get_word_tokens(doc)])
56e1d8bfcad363049b4dd455e728d5c4dd3754f5
3,651,841
from typing import List def finding_the_percentage(n: int, arr: List[str], query_name: str) -> str: """ >>> finding_the_percentage(3, ['Krishna 67 68 69', 'Arjun 70 98 63', ... 'Malika 52 56 60'], 'Malika') '56.00' >>> finding_the_percentage(2, ['Harsh 25 26.5 28', 'Anurag 26 28 30'], ... 'Harsh') '26.50' """ student_marks = {} for i in range(n): name, *line = arr[i].split() scores = list(map(float, line)) student_marks[name] = sum(scores)/len(scores) return '{:.2f}'.format(student_marks[query_name])
86c2ad777c667f9ba424bc2b707f46465a10accc
3,651,842
def mvg_logpdf_fixedcov(x, mean, inv_cov): """ Log-pdf of the multivariate Gaussian distribution where the determinant and inverse of the covariance matrix are precomputed and fixed. Note that this neglects the additive constant: -0.5 * (len(x) * log(2 * pi) + log_det_cov), because it is irrelevant when comparing pdf values with a fixed covariance, but it means that this is not the normalised pdf. Args: x (1D numpy array): Vector value at which to evaluate the pdf. mean (1D numpy array): Mean vector of the multivariate Gaussian distribution. inv_cov (2D numpy array): Inverted covariance matrix. Returns: float: Log-pdf value. """ dev = x - mean return -0.5 * (dev @ inv_cov @ dev)
648d1925ed4b4793e8e1ce1cec8c7ccd0efb9f6b
3,651,843
def make_frac_grid(frac_spacing, numrows=50, numcols=50, model_grid=None, seed=0): """Create a grid that contains a network of random fractures. Creates and returns a grid containing a network of random fractures, which are represented as 1's embedded in a grid of 0's. Parameters ---------- frac_spacing : int Average spacing of fractures (in grid cells) numrows : int, optional Number of rows in grid (if model_grid parameter is given, uses values from the model grid instead) numcols : int, optional Number of columns in grid (if model_grid parameter is given, uses values from the model grid instead) model_grid : Landlab RasterModelGrid object, optiona RasterModelGrid to use for grid size seed : int, optional Seed used for random number generator Returns ------- m : Numpy array Array containing fracture grid, represented as 0's (matrix) and 1's (fractures). If model_grid parameter is given, returns a 1D array corresponding to a node-based array in the model grid. Otherwise, returns a 2D array with dimensions given by numrows, numcols. """ # Make an initial grid of all zeros. If user specified a model grid, # use that. Otherwise, use the given dimensions. if model_grid is not None: numrows = model_grid.shape[0] numcols = model_grid.shape[1] m = zeros((numrows,numcols), dtype=int) # Add fractures to grid nfracs = (numrows + numcols) // frac_spacing for i in range(nfracs): (y, x) = calculate_fracture_starting_position((numrows, numcols), seed+i) ang = calculate_fracture_orientation((y, x), seed+i) (dy, dx) = calculate_fracture_step_sizes((y, x), ang) trace_fracture_through_grid(m, (y, x), (dy, dx)) # If we have a model_grid, flatten the frac grid so it's equivalent to # a node array. if model_grid is not None: m.shape = (m.shape[0]*m.shape[1]) return m
2e1ffc1bab30726dcbbe1b022c6cf92920c2dcc2
3,651,845
import colorsys def generate_colors(): """ Generate random colors. To get visually distinct colors, generate them in HSV space then convert to RGB. """ N = 30 brightness = 0.7 hsv = [(i / N, 1, brightness) for i in range(N)] colors = list(map(lambda c: colorsys.hsv_to_rgb(*c), hsv)) perm = [15, 13, 25, 12, 19, 8, 22, 24, 29, 17, 28, 20, 2, 27, 11, 26, 21, 4, 3, 18, 9, 5, 14, 1, 16, 0, 23, 7, 6, 10] colors = [colors[idx] for idx in perm] return colors
ee8951d66972190e6d1dcd5dc5c211d5631f6841
3,651,846
def secant_method(f, x0, x1, iterations): """Return the root calculated using the secant method.""" for i in range(iterations): f_x1 = f(x1) x2 = x1 - f_x1 * (x1 - x0) / (f_x1 - f(x0) + 1e-9).float() x0, x1 = x1, x2 return x2
081522ae8e68ad14cb67f8afc03989c46f3999d5
3,651,847