content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def getContentType(the_type): """ Get the content type based on the type name which is in settings :param the_type: :return: """ if the_type not in settings.XGDS_MAP_SERVER_JS_MAP: return None the_model_name = settings.XGDS_MAP_SERVER_JS_MAP[the_type]['model'] splits = the_model_name.split('.') content_type = ContentType.objects.get(app_label=splits[0], model=splits[1]) return content_type
25031eb0dce8fe7828f94bdbc99d5c574f0e5ea6
3,655,565
import scipy import math import numpy def calculateGravityAcceleration(stateVec, epoch, useGeoid): """ Calculate the acceleration due to gravtiy acting on the satellite at a given state (3 positions and 3 velocities). Ignore satellite's mass, i.e. use a restricted two-body problem. Arguments ---------- numpy.ndarray of shape (1,6) with three Cartesian positions and three velocities in an inertial reference frame in metres and metres per second, respectively. epoch - datetime corresponding to the UTC epoch at which the rate of change is to be computed. useGeoid - bool, whether to compute the gravity by using EGM geopotential expansion (True) or a restricted two-body problem (False). Returns ---------- numpy.ndarray of shape (1,3) with three Cartesian components of the acceleration in m/s2 given in an inertial reference frame. """ if useGeoid: " Compute geocentric co-latitude, longitude & radius. " colatitude,longitude,r = calculateGeocentricLatLon(stateVec, epoch) " Find the gravitational potential at the desired point. " # See Eq. 1 in Cunningham (1996) for the general form of the geopotential expansion. gravitationalPotential = 0.0 # Potential of the gravitational field at the stateVec location. for degree in range(0, MAX_DEGREE+1): # Go through all the desired orders and compute the geoid corrections to the sphere. temp = 0. # Contribution to the potential from the current degree and all corresponding orders. legendreCoeffs = scipy.special.legendre(degree) # Legendre polynomial coefficients corresponding to the current degree. for order in range(degree+1): # Go through all the orders corresponding to the currently evaluated degree. if (abs(colatitude-math.pi/2. <= 1E-16)) or (abs(colatitude-3*math.pi/2. <= 1E-16)): # We're at the equator, cos(colatitude) will be zero and things will break. temp += legendreCoeffs[order] * 1.0 * (Ccoeffs[degree][order]*math.cos( order*longitude ) + Scoeffs[degree][order]*math.sin( order*longitude )) else: temp += legendreCoeffs[order] * math.cos(colatitude) * (Ccoeffs[degree][order]*math.cos( order*longitude ) + Scoeffs[degree][order]*math.sin( order*longitude )) gravitationalPotential += math.pow(EarthRadius/r, degree) * temp # Add the contribution from the current degree. gravitationalPotential *= GM/r # Final correction (*GM for acceleration, /r to get r^(n+1) in the denominator). " Compute the acceleration due to the gravity potential at the given point. " # stateVec is defined w.r.t. Earth's centre of mass, so no need to account # for the geoid shape here. gravityAcceleration = gravitationalPotential/r* (-1.*stateVec[:3]/r) # First divide by the radius to get the acceleration value, then get the direction (towards centre of the Earth). else: r = numpy.linalg.norm(stateVec[:3]) # Earth-centred radius. gravityAcceleration = GM/(r*r) * (-1.*stateVec[:3]/r) # First compute the magnitude, then get the direction (towards centre of the Earth). return gravityAcceleration
a2ea0ff1c8feb9f0a678911130e3ca6e96838b7c
3,655,566
import math def points_on_line(r0, r1, spacing): """ Coordinates of points spaced `spacing` apart between points `r0` and `r1`. The dimensionality is inferred from the length of the tuples `r0` and `r1`, while the specified `spacing` will be an upper bound to the actual spacing. """ dim = len(r0) v = np.array(r1) - np.array(r0) length = np.linalg.norm(v) steps = math.ceil(1.0 * length / spacing) + 1 points = np.zeros((steps, dim)) for i in xrange(dim): points[:, i] = np.linspace(r0[i], r1[i], steps) return points
eb2795cba55566823632c75b7a72f34731b5e36e
3,655,567
def index() -> Response: """ Return application index. """ return APP.send_static_file("index.html")
37d299ec548fe4f83d8f55f063e3bf9f5fb64c4e
3,655,568
def compare_files(og_maxima,new_maxima, compare_file, until=100, divisor=1000): """ given input of the maxima of a graph, compare it to the maxima from data100.txt maxima will be a series of x,y coordinates corresponding to the x,y values of a maximum from a file. First see if there is a maxima with the same x value as data100.txt, if there is not expand the x value ranges until a maximum is found. Find out what this dx is for the new file. Note do it for all the peaks of data100.txt at once, so that if it finds a peak for the 2nd peak of data100.txt, it doesn't also assign this to the first peak as well. kewyword arguments until and divisor: for the dx loop the loop will increase dx from 0 until until/divisor in steps of 1/divisor eg for default values until=100 and divisor=1000, it will increase dx from 0 until 100/1000 (=0.1) in steps of 1/1000 (=0.001) changing these arguments will lead to more or less peak matching, which could affect the results of the calculation significantly. """ if compare_file == 'data100.txt': return None # Whenever there is a match we will iterate this, so that we can compare #this at the end? number_of_matches = 0 # Initiate two lists to contain all the dx and dy values for each peak that # is matched by the code. dx_values = [] dy_values = [] # Loop through the original maxima list (supplied as an argument) # and also loop through the maxima from the file being compared. for og_idx,og_val in enumerate(og_maxima.T[0]): for idx,val in enumerate(new_maxima.T[0]): #this will loop dx from 0 to (until)/divisor in steps of 1/divisor for x in range(until+1): dx = x/divisor # For the current value of dx see if there is a matching # peak between the data100.txt file and the file being compared. # There is a match if the val from the compare_file is within the range # of the original peak x value +/- the dx value. if og_val - dx <= val <= og_val + dx: #if there is a match print some logging information to the console. print(f"Peak Match : index {og_idx} from data100.txt and {idx} from {compare_file}") print(f"values are {og_val} and {val} respectively") # iterate the number of peak matches between the two files being compared. number_of_matches+=1 # append the current dx value to our running list which will keep track # of the dx values for all the matched peaks dx_values.append(dx) # Get the absolute value of the difference in y values (dy) dy = abs(og_maxima.T[1][og_idx] - new_maxima.T[1][idx]) dy_values.append(dy) #breaks us out of the "for x in range" loop break # If the for loop (for x in range ...) isn't terminated by a break statement # I.E. we didn't get a match else: "move onto next peak in new_maxima" continue # If the for loop does get terminated by the break statement # I.E. we get a match """compare next peak in og_maxima, IE break the new_maxima loop and move onto next in the original maxima list""" break # Calculate the absolute value of the difference in number of peaks # between the two data files different_no_peaks = abs(len(new_maxima) - len(og_maxima)) return [dx_values, dy_values, number_of_matches, different_no_peaks]
86fe2ffd02785d41284b8edfef44d0dc0e097c90
3,655,569
import _datetime def parseDatetimetz(string, local=True): """Parse the given string using :func:`parse`. Return a :class:`datetime.datetime` instance. """ y, mo, d, h, m, s, tz = parse(string, local) s, micro = divmod(s, 1.0) micro = round(micro * 1000000) if tz: offset = _tzoffset(tz, None) / 60 _tzinfo = tzinfo(offset) else: _tzinfo = None return _datetime(y, mo, d, int(h), int(m), int(s), int(micro), _tzinfo)
ce95f42f568b50ffcdc0084dc659a1d5fd0233ff
3,655,570
def median_ratio_flux(spec, smask, ispec, iref, nsig=3., niter=5, **kwargs): """ Calculate the median ratio between two spectra Parameters ---------- spec smask: True = Good, False = Bad ispec iref nsig niter kwargs Returns ------- med_scale : float Median of reference spectrum to input spectrum """ # Setup fluxes, sigs, wave = unpack_spec(spec) # Mask okm = smask[iref,:] & smask[ispec,:] # Insist on positive values okf = (fluxes[iref,:] > 0.) & (fluxes[ispec,:] > 0) allok = okm & okf # Ratio med_flux = fluxes[iref,allok] / fluxes[ispec,allok] # Clip mn_scale, med_scale, std_scale = stats.sigma_clipped_stats(med_flux, sigma=nsig, maxiters=niter, **kwargs) # Return return med_scale
28872a548ce7569f17f155242aaf4377bf0c1b63
3,655,571
def get_tags_from_event(): """List of tags Arguments: event {dict} -- Lambda event payload Returns: list -- List of AWS tags for use in a CFT """ return [ { "Key": "OwnerContact", "Value": request_event['OwnerContact'] } ]
e7a0f7da62a4904dbfb716c57b6811053aff3497
3,655,572
from typing import List def _verify(symbol_table: SymbolTable, ontology: _hierarchy.Ontology) -> List[Error]: """Perform a battery of checks on the consistency of ``symbol_table``.""" errors = _verify_there_are_no_duplicate_symbol_names(symbol_table=symbol_table) if len(errors) > 0: return errors errors.extend( _verify_with_model_type_for_classes_with_at_least_one_concrete_descendant( symbol_table=symbol_table ) ) errors.extend( _verify_all_the_function_calls_in_the_contracts_are_valid( symbol_table=symbol_table ) ) errors.extend( _verify_all_non_optional_properties_are_initialized_in_the_constructor( symbol_table=symbol_table ) ) errors.extend( _verify_orders_of_constructors_arguments_and_properties_match( symbol_table=symbol_table ) ) errors.extend( _verify_all_argument_references_occur_in_valid_context( symbol_table=symbol_table ) ) errors.extend(_verify_constraints_and_constraintrefs(symbol_table=symbol_table)) errors.extend(_verify_description_rendering_with_smoke(symbol_table=symbol_table)) errors.extend(_verify_only_simple_type_patterns(symbol_table=symbol_table)) if len(errors) > 0: return errors _assert_interfaces_defined_correctly(symbol_table=symbol_table, ontology=ontology) _assert_all_class_inheritances_defined_an_interface(symbol_table=symbol_table) _assert_self_not_in_concrete_descendants(symbol_table=symbol_table) return errors
da9dd12f01107a0c0ea1a8b2df1aa2fb543391ab
3,655,573
def gsl_eigen_symmv_alloc(*args, **kwargs): """gsl_eigen_symmv_alloc(size_t n) -> gsl_eigen_symmv_workspace""" return _gslwrap.gsl_eigen_symmv_alloc(*args, **kwargs)
54384bfa9787b9a337ad3b9e2d9ea211769238d4
3,655,574
def add_poll_answers(owner, option): """ Add poll answer object. Matching user and option is considered same. :param owner: User object. :param option: Chosen poll option. :return: Poll answer object, Boolean (true, if created). """ ''' owner = models.ForeignKey(User, related_name='poll_answers', on_delete=models.CASCADE) answer = models.ForeignKey(PollOption, related_name='answers', on_delete=models.CASCADE) ''' created = False try: a = PollAnswer.objects.get(owner=owner, answer=option) except PollAnswer.DoesNotExist: a = PollAnswer(owner=owner, answer=option) a.save() return a, created
ac667fbfb47aeb7d2450a3d698b0b678c3bdfdbc
3,655,575
def calculate_rrfdi ( red_filename, nir_filename ): """ A function to calculate the Normalised Difference Vegetation Index from red and near infrarred reflectances. The reflectance data ought to be present on two different files, specified by the varaibles `red_filename` and `nir_filename`. The file format ought to be recognised by GDAL """ g_red = gdal.Open ( red_filename ) red = g_red.ReadAsArray() g_nir = gdal.Open ( nir_filename ) nir = g_nir.ReadAsArray() if ( g_red.RasterXSize != g_nir.RasterXSize ) or \ ( g_red.RasterYSize != g_nir.RasterYSize ): print "ERROR: Input datasets do't match!" print "\t Red data shape is %dx%d" % ( red.shape ) print "\t NIR data shape is %dx%d" % ( nir.shape ) sys.exit ( -1 ) passer = True rrfdi = np.where ( passer, (1.*red - 1.*nir ) / ( 1.*nir + 1.*red ), -999 ) return rrfdi*(-1)
3b8f4d7eadceb38b7f874bfe0a56827f7a8aab09
3,655,576
import re def strip_price(header_list): """input a list of tag-type values and return list of strings with surrounding html characters removed""" match_obs = [] regex = '\$(((\d+).\d+)|(\d+))' string_list = []#['' for item in range(len(header_list))] for item in range(len(header_list)): match_obs.append(re.search(regex, str(header_list[item]))) for i in range(len(match_obs)): #print(match_obs[i]) string_list.append(match_obs[i].group(1)) #print(string_list) return string_list
7b3d90416e44f8aa61ababc0e7b68f82ae754413
3,655,579
import functools def module(input, output, version): """A decorator which turn a function into a module""" def decorator(f): class Wrapper(Module): def __init__(self): super().__init__(input, output, version) @property def name(self): """The module's name""" return f.__name__ def execute(self, *args, **kwargs): return f(*args, **kwargs) wrapper = Wrapper() return functools.wraps(f)(wrapper) return decorator
b7d5afcaa8fa52411024f84f979891d19ccf60c0
3,655,580
def compile_modules_to_ir( result: BuildResult, mapper: genops.Mapper, compiler_options: CompilerOptions, errors: Errors, ) -> ModuleIRs: """Compile a collection of modules into ModuleIRs. The modules to compile are specified as part of mapper's group_map. Returns the IR of the modules. """ deser_ctx = DeserMaps({}, {}) modules = {} # Process the graph by SCC in topological order, like we do in mypy.build for scc in sorted_components(result.graph): scc_states = [result.graph[id] for id in scc] trees = [st.tree for st in scc_states if st.id in mapper.group_map and st.tree] if not trees: continue fresh = all(id not in result.manager.rechecked_modules for id in scc) if fresh: load_scc_from_cache(trees, result, mapper, deser_ctx) else: scc_ir = compile_scc_to_ir(trees, result, mapper, compiler_options, errors) modules.update(scc_ir) return modules
e2ea8a87a1ed2450e4c8ed99c7ca8a3142568f45
3,655,581
def minutes_to_restarttime(minutes) : """ converts an int meaning Minutes after midnight into a restartTime string understood by the bos command """ if minutes == -1 : return "never" pod = "am" if minutes > 12*60 : pod = "pm" minutes -= 12*60 time = "%d:%02d %s" % (minutes / 60, minutes % 60, pod) return time
6d7807cebb7a474553dda8eadfd27e5ce7b2a657
3,655,582
import tqdm def ccm_test(x, y,emb_dim = "auto", l_0 = "auto", l_1 = "auto", tau=1, n=10,mean_num = 10,max_dim = 10): """ estimate x from y to judge x->y cause :param x: :param y: :param l_0: :param l_1: :param emb_dim: :param tau: :param n: :return: """ if emb_dim == "auto": emb_dim = decide_dim(x,y) if l_0 == "auto": l_0 = int(np.ceil((len(x) - emb_dim + 1) * 0.1)) if l_1 == "auto": l_1 = int(np.ceil((len(x) - emb_dim + 1) * 0.9)) ys = twin_surrogate(y, emb_dim,num=n) raw_rhos = [] rhos = [] max_length = len(ys[0]) for i in tqdm(range(n)): mean = 0 for j in range(mean_num): rho_0, _ = estimate_using_bootstrap(x, y, length=l_0, emb_dim=emb_dim, tau=tau) rho_1, _ = estimate_using_bootstrap(x, y, length=l_1, emb_dim=emb_dim, tau=tau) rho_s_0, _ = estimate_from_emb_random(x, ys[i], length=l_0, emb_dim=emb_dim, tau=tau, max_length = max_length) rho_s_1, _ = estimate_from_emb_random(x, ys[i], length=l_1, emb_dim=emb_dim, tau=tau, max_length = max_length) raw_rhos.append([rho_0, rho_1, rho_s_0, rho_s_1]) mean += rho_1 -rho_0 -(rho_s_1 - rho_s_0 ) rhos.append(mean/mean_num) rhos = np.array(rhos) p = 1 - (len(rhos[rhos>0]) / n) return { "p_value" :p, "rhos" :rhos, "raw_rhos":raw_rhos }
c03a05e62df36910ea05e361c9683b60befc1b9c
3,655,583
def make_indiv_spacing(subject, ave_subject, template_spacing, subjects_dir): """ Identifies the suiting grid space difference of a subject's volume source space to a template's volume source space, before a planned morphing takes place. Parameters: ----------- subject : str Subject ID. ave_subject : str Name or ID of the template brain, e.g., fsaverage. template_spacing : float Grid spacing used for the template brain. subjects_dir : str Path to the subjects directory. Returns: -------- trans : SourceEstimate The generated source time courses. """ fname_surf = op.join(subjects_dir, subject, 'bem', 'watershed', '%s_inner_skull_surface' % subject) fname_surf_temp = op.join(subjects_dir, ave_subject, 'bem', 'watershed', '%s_inner_skull_surface' % ave_subject) surf = mne.read_surface(fname_surf, return_dict=True, verbose='ERROR')[-1] surf_temp = mne.read_surface(fname_surf_temp, return_dict=True, verbose='ERROR')[-1] mins = np.min(surf['rr'], axis=0) maxs = np.max(surf['rr'], axis=0) mins_temp = np.min(surf_temp['rr'], axis=0) maxs_temp = np.max(surf_temp['rr'], axis=0) # Check which dimension (x,y,z) has greatest difference diff = (maxs - mins) diff_temp = (maxs_temp - mins_temp) # print additional information # for c, mi, ma, md in zip('xyz', mins, maxs, diff): # logger.info(' %s = %6.1f ... %6.1f mm --> Difference: %6.1f mm' # % (c, mi, ma, md)) # for c, mi, ma, md in zip('xyz', mins_temp, maxs_temp, diff_temp): # logger.info(' %s = %6.1f ... %6.1f mm --> Difference: %6.1f mm' # % (c, mi, ma, md)) prop = (diff / diff_temp).mean() indiv_spacing = (prop * template_spacing) print(" '%s' individual-spacing to '%s'[%.2f] is: %.4fmm" % ( subject, ave_subject, template_spacing, indiv_spacing)) return indiv_spacing
cbe5120093fdf78913c2386820d3388aca0724d1
3,655,584
def sqlpool_blob_auditing_policy_update( cmd, instance, state=None, storage_account=None, storage_endpoint=None, storage_account_access_key=None, storage_account_subscription_id=None, is_storage_secondary_key_in_use=None, retention_days=None, audit_actions_and_groups=None, is_azure_monitor_target_enabled=None): """ Updates a sql pool blob auditing policy. Custom update function to apply parameters to instance. """ _audit_policy_update(cmd, instance, state, storage_account, storage_endpoint, storage_account_access_key, storage_account_subscription_id, is_storage_secondary_key_in_use, retention_days, audit_actions_and_groups, is_azure_monitor_target_enabled) return instance
e99013545172eb03ad5dddeefdb0b36b7bb2edd7
3,655,585
def format_search_filter(model_fields): """ Creates an LDAP search filter for the given set of model fields. """ ldap_fields = convert_model_fields_to_ldap_fields(model_fields); ldap_fields["objectClass"] = settings.LDAP_AUTH_OBJECT_CLASS search_filters = import_func(settings.LDAP_AUTH_FORMAT_SEARCH_FILTERS)(ldap_fields) return "(&{})".format("".join(search_filters));
b6c5c17b566c583a07ef5e9f3ec61cb868f6f8ab
3,655,587
def normalize_img(img): """ normalize image (caffe model definition compatible) input: opencv numpy array image (h, w, c) output: dnn input array (c, h, w) """ scale = 1.0 mean = [104,117,123] img = img.astype(np.float32) img = img * scale img -= mean img = np.transpose(img, (2, 0, 1)) return img
dac9ec8c942d70fb98f0b0989e9643f80dde5448
3,655,589
from typing import List from typing import Any def pages(lst: List[Any], n: int, title: str, *, fmt: str = "```%s```", sep: str = "\n") -> List[discord.Embed]: # noinspection GrazieInspection """ Paginates a list into embeds to use with :class:disputils.BotEmbedPaginator :param lst: the list to paginate :param n: the number of elements per page :param title: the title of the embed :param fmt: a % string used to format the resulting page :param sep: the string to join the list elements with :return: a list of embeds """ l: List[List[str]] = group_list([str(i) for i in lst], n) pgs = [sep.join(page) for page in l] return [ discord.Embed( title=f"{title} - {i + 1}/{len(pgs)}", description=fmt % pg ) for i, pg in enumerate(pgs) ]
f8d9471f2d254b63754128a2e2762520f858edbd
3,655,590
import re def Substitute_Percent(sentence): """ Substitutes percents with special token """ sentence = re.sub(r'''(?<![^\s"'[(])[+-]?[.,;]?(\d+[.,;']?)+%(?![^\s.,;!?'")\]])''', ' @percent@ ', sentence) return sentence
61bc6970af09703ef018bfcc9378393241ae21ed
3,655,591
def ready_df1(df): """ This function prepares the dataframe for EDA. """ df = remove_columns(df, columns=[ 'nitrogen_dioxide', 'nitrogen_dioxide_aqi', 'sulfur_dioxide', 'sulfur_dioxide_aqi', 'trioxygen', 'trioxygen_aqi', 'volatile', 'volatile_aqi', ]) df['fahrenheit'] = 9.0/5.0 * df['temperature'] + 32 df = df.drop(columns=['temperature']) df = df.rename(index=str, columns={'fahrenheit':'temperature'}) df['carbon_monoxide'] = df['carbon_monoxide'].fillna(0).astype(int) df['timestamp'] = pd.to_datetime(df['timestamp']) return df
3776c571d3eabb39ce27017ac1481e2bd469f68c
3,655,592
def _wrap(func, args, flip=True): """Return partial function with flipped args if flip=True :param function func: Any function :param args args: Function arguments :param bool flip: If true reverse order of arguments. :return: Returns function :rtype: function """ @wraps(func) def flippedfunc(*args): return func(*args[::-1]) return partial(flippedfunc if flip else func, args)
9ac5a814840f821260d46df64b60cd6d71185dbb
3,655,593
def compute_kkt_optimality(g, on_bound): """Compute the maximum violation of KKT conditions.""" g_kkt = g * on_bound free_set = on_bound == 0 g_kkt[free_set] = np.abs(g[free_set]) return np.max(g_kkt)
216cf110d64d1fd8ec89c0359ebaa9b4e4dcc773
3,655,594
def replace_cipd_revision(file_path, old_revision, new_revision): """Replaces cipd revision strings in file. Args: file_path: Path to file. old_revision: Old cipd revision to be replaced. new_revision: New cipd revision to use as replacement. Returns: Number of replaced occurrences. Raises: IOError: If no occurrences were found. """ with open(file_path) as f: contents = f.read() num = contents.count(old_revision) if not num: raise IOError('Did not find old CIPD revision {} in {}'.format( old_revision, file_path)) newcontents = contents.replace(old_revision, new_revision) with open(file_path, 'w') as f: f.write(newcontents) return num
f429e74f0dd7180ab4bf90d662f8042b958b81f8
3,655,595
def spectral_derivs_plot(spec_der, contrast=0.1, ax=None, freq_range=None, fft_step=None, fft_size=None): """ Plot the spectral derivatives of a song in a grey scale. spec_der - The spectral derivatives of the song (computed with `spectral_derivs`) or the song itself contrast - The contrast of the plot ax - The matplotlib axis where the plot must be drawn, if None, a new axis is created freq_range - The amount of frequency to plot, usefull only if `spec_der` is a song. Given to `spectral_derivs` ov_params - The Parameters to override, passed to `spectral_derivs` """ if spec_der.ndim == 1: spec_der = spectral_derivs(spec_der, freq_range, fft_step, fft_size) ax = sns.heatmap(spec_der.T, yticklabels=50, xticklabels=50, vmin=-contrast, vmax=contrast, ax=ax, cmap='Greys', cbar=False) ax.invert_yaxis() return ax
5b683d8c49e9bad2fd1fa029af6bc5660bc0e936
3,655,596
from operator import add from operator import sub def scale_center(pnt, fac, center): """scale point in relation to a center""" return add(scale(sub(pnt, center), fac), center)
f69ca54e25d5eb8008b8f08c40500f236005e093
3,655,597
def gopherize_feed(feed_url, timestamp=False, plug=True): """Return a gophermap string for the feed at feed_url.""" return gopherize_feed_object(feedparser.parse(feed_url), timestamp, plug)
aaf4d35044c873e7d0f1a43c4d001ebe5e30714b
3,655,598
def first_sunday_of_month(datetime: pendulum.DateTime) -> pendulum.DateTime: """Get the first Sunday of the month based on a given datetime. :param datetime: the datetime. :return: the first Sunday of the month. """ return datetime.start_of("month").first_of("month", day_of_week=7)
88c517d1d38785c0d8f9c0f79f3d34199dfceb1e
3,655,599
import pickle def evaluate_single_model( model_path, model_index, save_preds_to_db, save_prefix, metrics, k_values, X, y, labeled_indices): """ Evaluate a single model with provided model specifications and data. Arguments: - model_path: path to load the model - model_index: index for the model - save_preds_to_db: whether or not to save predictions to database - save_prefix: string prefix for any tables created - metrics: a list of metrics to use - k_values: k-values used for computing the metrics - X: feature array - y: label array - labeled_indices: indices of rows that have labels Returns: - model_index: index for the model - model_results: an (M x K) array of model results, for each metric, at each k-value """ # Load saved model with open(model_path, 'rb') as file: model = pickle.load(file) # Get predictions pred_table_name = f'{save_prefix}_model_{model_index}' if save_preds_to_db else None y_preds, probs = get_predictions(model, X, k_values=k_values, pred_table_name=pred_table_name) # Filter labels y_preds_filtered = y_preds[labeled_indices] y_filtered = y.to_numpy(copy=True)[labeled_indices] # Calculate metrics for each k value model_results = np.zeros((len(metrics), len(k_values))) for i, metric in enumerate(metrics): for j in range(len(k_values)): model_results[i, j] = metric(y_filtered, y_preds_filtered[:, j]) return model_index, model_results
311589284c46d19e04cd04fd36056e1b53c4bb52
3,655,600
def sigmoid(x: np.ndarray, derivative: bool = False) -> np.ndarray: """ The sigmoid function which is given by 1/(1+exp(-x)) Where x is a number or np vector. if derivative is True it applied the derivative of the sigmoid function instead. Examples: >>> sigmoid(0) 0.5 >>> abs(sigmoid(np.array([100, 30, 10])) - 1) < 0.001 array([ True, True, True]) >>> abs(sigmoid(-100) - 0) < 0.001 True """ if derivative: return sigmoid(x) * (1 - sigmoid(x)) return 1 / (1 + np.exp(-x))
7a80b978a9dd8503ba6ec56ce11a5ee9c0564fdb
3,655,602
def create_hierarchy( num_samples, bundle_size, directory_sizes=None, root=".", start_sample_id=0, start_bundle_id=0, address="", n_digits=1, ): """ SampleIndex Hierarchy Factory method. Wraps create_hierarchy_from_max_sample, which is a max_sample-based API, not a numSample-based API like this method. :param num_samples: The total number of samples. :bundle_size: The max number of samples a bundle file is responsible for. :directory_sizes: The number of samples each directory is responsible for - a list, one value for each level in the directory hierarchy. :root: The root path of this index. Defaults to ".". :start_sample_id: The start of the sample count. Defaults to 0. :n_digits: The number of digits to pad the directories with """ if directory_sizes is None: directory_sizes = [] return create_hierarchy_from_max_sample( num_samples + start_sample_id, bundle_size, directory_sizes=directory_sizes, root=root, start_bundle_id=start_bundle_id, min_sample=start_sample_id, address=address, n_digits=n_digits, )
6d41b995d664eec2c9d6454abfc485c2c4202220
3,655,603
def eval_sysu(distmat, q_pids, g_pids, q_camids, g_camids, max_rank = 20): """Evaluation with sysu metric Key: for each query identity, its gallery images from the same camera view are discarded. "Following the original setting in ite dataset" """ num_q, num_g = distmat.shape if num_g < max_rank: max_rank = num_g print("Note: number of gallery samples is quite small, got {}".format(num_g)) indices = np.argsort(distmat, axis=1) pred_label = g_pids[indices] matches = (g_pids[indices] == q_pids[:, np.newaxis]).astype(np.int32) # compute cmc curve for each query new_all_cmc = [] all_cmc = [] all_AP = [] all_INP = [] num_valid_q = 0. # number of valid query for q_idx in range(num_q): # get query pid and camid q_pid = q_pids[q_idx] q_camid = q_camids[q_idx] # remove gallery samples that have the same pid and camid with query order = indices[q_idx] remove = (q_camid == 3) & (g_camids[order] == 2) keep = np.invert(remove) # compute cmc curve # the cmc calculation is different from standard protocol # we follow the protocol of the author's released code new_cmc = pred_label[q_idx][keep] new_index = np.unique(new_cmc, return_index=True)[1] new_cmc = [new_cmc[index] for index in sorted(new_index)] new_match = (new_cmc == q_pid).astype(np.int32) new_cmc = new_match.cumsum() new_all_cmc.append(new_cmc[:max_rank]) orig_cmc = matches[q_idx][keep] # binary vector, positions with value 1 are correct matches if not np.any(orig_cmc): # this condition is true when query identity does not appear in gallery continue cmc = orig_cmc.cumsum() # compute mINP # refernece Deep Learning for Person Re-identification: A Survey and Outlook pos_idx = np.where(orig_cmc == 1) pos_max_idx = np.max(pos_idx) inp = cmc[pos_max_idx]/ (pos_max_idx + 1.0) all_INP.append(inp) cmc[cmc > 1] = 1 all_cmc.append(cmc[:max_rank]) num_valid_q += 1. # compute average precision # reference: https://en.wikipedia.org/wiki/Evaluation_measures_(information_retrieval)#Average_precision num_rel = orig_cmc.sum() tmp_cmc = orig_cmc.cumsum() tmp_cmc = [x / (i+1.) for i, x in enumerate(tmp_cmc)] tmp_cmc = np.asarray(tmp_cmc) * orig_cmc AP = tmp_cmc.sum() / num_rel all_AP.append(AP) assert num_valid_q > 0, "Error: all query identities do not appear in gallery" all_cmc = np.asarray(all_cmc).astype(np.float32) all_cmc = all_cmc.sum(0) / num_valid_q # standard CMC new_all_cmc = np.asarray(new_all_cmc).astype(np.float32) new_all_cmc = new_all_cmc.sum(0) / num_valid_q mAP = np.mean(all_AP) mINP = np.mean(all_INP) return new_all_cmc, mAP, mINP
ccf61aa9f91e95cebfd63855aea366cb50de8887
3,655,604
import getpass def espa_login() -> str: """ Get ESPA password using command-line input :return: """ return getpass.getpass("Enter ESPA password: ")
3ba61567d23ba3771effd6f0aa1a4ac504467378
3,655,605
def row_up1_array(row, col): """This function establishes an array that contains the index for the row above each entry""" up1_array = np.zeros((row, col), dtype=np.uint8) for i in range(row): up1_array[i, :] = np.ones(col, dtype = np.uint8) * ((i - 1) % row) return up1_array
19cf1e3ceb9fe174c5cc3c6ba2c336fc58412037
3,655,606
def lcm(a, b): """Return lowest common multiple.""" return a * b // gcd(a, b)
27a7d5af9001015a0aff459af274a45921d2bc94
3,655,607
from typing import Callable def chl_mean_hsl(weights: np.ndarray) -> Callable[[np.ndarray], np.ndarray]: """ return a function that can calculate the channel-wise average of the input picture in HSL color space """ return lambda img: np.average(cv2.cvtColor(img, cv2.COLOR_BGR2HLS), axis=(0, 1), weights=weights)
b5e337fb3bee18762e31aef3d666906975305b4b
3,655,608
def cosine_mrl_option(labels, predicts): """For a minibatch of image and sentences embeddings, computes the pairwise contrastive loss""" #batch_size, double_n_emd = tensor.shape(predicts) #res = tensor.split(predicts, [double_n_emd/2, double_n_emd/2], 2, axis=-1) img = l2norm(labels) text = l2norm(predicts) scores = tensor.dot(img, text.T) diagonal = scores.diagonal() mrl_margin = 0.3 loss_max_violation = True # caption retrieval (1 + neg - pos) cost_s = tensor.maximum(0, mrl_margin + scores - diagonal.reshape((-1,1))) # clear diagonals cost_s = fill_diagonal(cost_s, 0) # img retrieval cost_im = tensor.maximum(0, mrl_margin + scores - diagonal) cost_im = fill_diagonal(cost_im, 0) if loss_max_violation: if cost_s: cost_s = tensor.max(cost_s, axis=1) if cost_im: cost_im = tensor.max(cost_im, axis=0) loss = cost_s.mean() + cost_im.mean() return loss
e103b1b0075438270e79913bb59b1117da09b51f
3,655,609
def escape_cdata(cdata): """Escape a string for an XML CDATA section""" return cdata.replace(']]>', ']]>]]&gt;<![CDATA[')
c38b934b4c357e8c15fd1f3942f84ca3aaab4ee1
3,655,610
import inspect import pprint def _collect_data_for_docstring(func, annotation): """ Collect data to be printed in docstring. The data is collected from custom annotation (dictionary passed as a parameter for the decorator) and standard Python annotations for the parameters (if any). Data from custom annotation always overrides Python parameter annotations. Parameters ---------- func: callable Reference to the function. annotation: dict Custom annotation. Returns ------- Dictionary of the collected parameters """ signature = inspect.signature(func) parameters = signature.parameters return_annotation = signature.return_annotation doc_params = dict() # Description of the function doc_params["description"] = annotation.get("description", "") # Flag that tells if the function is a generator. Title for returning # values for generator is 'Yields' and for regular functions it is 'Returns' doc_params["is_generator"] = inspect.isgeneratorfunction(func) doc_params["parameters"] = {} if parameters: # The function may have no parameters # We will print names of ALL parameters from the signature for p_name, p in parameters.items(): # Select description, annotation and types from available sources. # Annotation (parameter of the wrapper) always overrides Python annotation. doc_params["parameters"][p_name] = {} kind = p.kind.name kind = kind.lower().replace("_", " ") doc_params["parameters"][p_name]["kind"] = kind desc, an, plans, devices, enums = "", "", {}, {}, {} if ("parameters" in annotation) and (p_name in annotation["parameters"]): p_an = annotation["parameters"][p_name] desc = p_an.get("description", "") if "annotation" in p_an: an = p_an["annotation"] # Ignore annotation if it is an empty string. Lists of plans # and devices make no sense, so don't include them. if an: # Now save the lists of plans and devices if any plans = p_an.get("plans", {}) devices = p_an.get("devices", {}) enums = p_an.get("enums", {}) if not an and parameters[p_name].annotation != inspect.Parameter.empty: an = str(parameters[p_name].annotation) doc_params["parameters"][p_name]["annotation"] = _convert_annotation_to_type(an) doc_params["parameters"][p_name]["description"] = desc doc_params["parameters"][p_name]["plans"] = plans doc_params["parameters"][p_name]["devices"] = devices doc_params["parameters"][p_name]["enums"] = enums if p.default != inspect.Parameter.empty: # Print will print strings in quotes (desired behavior) v_default = pprint.pformat(p.default) else: v_default = None # If 'v_default' is None, it is not specified, so it should not be printed # in the docstring at all doc_params["parameters"][p_name]["default"] = v_default # Print return value annotation and description. Again the annotation from # custom annotation overrides Python annotation. doc_params["returns"] = {} desc, an = "", "" if "returns" in annotation or (return_annotation != inspect.Parameter.empty): if "returns" in annotation: desc = annotation["returns"].get("description", "") an = annotation["returns"].get("annotation", "") if not an: if return_annotation != inspect.Signature.empty: an = str(return_annotation) doc_params["returns"]["description"] = desc if doc_params["is_generator"]: an = _extract_yield_type(an) doc_params["returns"]["annotation"] = _convert_annotation_to_type(an) return doc_params
32a7ac62506dfc04157c613fa781b3d740a95451
3,655,611
def _strip_unbalanced_punctuation(text, is_open_char, is_close_char): """Remove unbalanced punctuation (e.g parentheses or quotes) from text. Removes each opening punctuation character for which it can't find corresponding closing character, and vice versa. It can only handle one type of punctuation (e.g. it could strip quotes or parentheses but not both). It takes functions (is_open_char, is_close_char), instead of the characters themselves, so that we can determine from nearby characters whether a straight quote is an opening or closing quote. Args: text (string): the text to fix is_open_char: a function that accepts the text and an index, and returns true if the character at that index is an opening punctuation mark. is_close_char: same as is_open_char for closing punctuation mark. Returns: The text with unmatched punctuation removed. """ # lists of unmatched opening and closing chararacters opening_chars = [] unmatched_closing_chars = [] for idx, c in enumerate(text): if is_open_char(text, idx): opening_chars.append(idx) elif is_close_char(text, idx): if opening_chars: # this matches a character we found earlier opening_chars.pop() else: # this doesn't match any opening character unmatched_closing_chars.append(idx) char_indices = [i for (i, _) in enumerate(text) if not(i in opening_chars or i in unmatched_closing_chars)] stripped_text = "".join([text[i] for i in char_indices]) return stripped_text
db4b8f201e7b01922e6c06086594a8b73677e2a2
3,655,612
def read(fin, alphabet=None): """Read and parse a fasta file. Args: fin -- A stream or file to read alphabet -- The expected alphabet of the data, if given Returns: SeqList -- A list of sequences Raises: ValueError -- If the file is unparsable """ seqs = [s for s in iterseq(fin, alphabet)] name = names[0] if hasattr(fin, "name"): name = fin.name return SeqList(seqs, name=name)
1ff492ac533a318605569f94ef66036c847b21d5
3,655,613
def get_min_max_value(dfg): """ Gets min and max value assigned to edges in DFG graph Parameters ----------- dfg Directly follows graph Returns ----------- min_value Minimum value in directly follows graph max_value Maximum value in directly follows graph """ min_value = 9999999999 max_value = -1 for edge in dfg: if dfg[edge] < min_value: min_value = dfg[edge] if dfg[edge] > max_value: max_value = dfg[edge] return min_value, max_value
17a98350f4e13ec51e72d4357e142ad661e57f54
3,655,614
def vgg_fcn(num_classes=1000, pretrained=False, batch_norm=False, **kwargs): """VGG 16-layer model (configuration "D") Args: num_classes(int): the number of classes at dataset pretrained (bool): If True, returns a model pre-trained on ImageNet batch_norm: if you want to introduce batch normalization """ if pretrained: kwargs['init_weights'] = True model = VGG(make_layers(cfg['D'], batch_norm=batch_norm), num_classes, **kwargs) if pretrained: # loading weights if batch_norm: pretrained_weights = model_zoo.load_url(model_urls['vgg19_bn']) else: pretrained_weights = model_zoo.load_url(model_urls['vgg19']) model.load_state_dict(pretrained_weights, strict=False) return model
73c1e80e0ffc6aff670394d1b1ec5e2b7d21cf06
3,655,616
import time def fmt_time(timestamp): """Return ISO formatted time from seconds from epoch.""" if timestamp: return time.strftime('%Y-%m-%dT%H:%M:%S', time.localtime(timestamp)) else: return '-'
c87f1da7b6a3b1b8d8daf7d85a2b0746be58133b
3,655,618
def lislice(iterable, *args): """ (iterable, stop) or (iterable, start, stop[, step]) >>> lislice('ABCDEFG', 2) ['A', 'B'] >>> lislice('ABCDEFG', 2, 4) ['C', 'D'] >>> lislice('ABCDEFG', 2, None) ['C', 'D', 'E', 'F', 'G'] >>> lislice('ABCDEFG', 0, None, 2) ['A', 'C', 'E', 'G'] """ return list(islice(iterable, *args))
6c7eb26a9ab5cb913c17f77c2a64929cfc7ebb06
3,655,619
def calculate_transition_cost(number_objs: int, target_storage_class: str) -> float: """ Calculates the cost of transition data from one class to another Args: number_objs: the number of objects that are added on a monthly basis target_storage_class: the storage class the objects will reside in after they are transitioned Returns: int, the cost of the transition """ target_storage_class_data = data[target_storage_class] transition_cost = ( number_objs / target_storage_class_data["items_per_transition_chunk"] ) * target_storage_class_data["transition_cost"] return transition_cost
01ec7d3e7149dadc020ab6f82033a178366c6ebf
3,655,620
def get_covid(): """This module sends off a covid notification. You can't get covid from this.""" covid_data = covid_handler() covid_content = Markup("Date: " + str(covid_data["date"]) + ",<br/>Country: " + str( covid_data["areaName"]) + ",<br/>New Cases: " + str( covid_data["newCasesByPublishDate"]) + ",<br/>Total Cases: " + str( covid_data["cumCasesByPublishDate"])) # The above formats the covid data, ready to send it off as a notification covid_notification = {"title": "Covid Cases", "content": covid_content} return covid_notification
0c6e4c8e5df7b7e13212eabe46f8a72a7874fde5
3,655,622
def send_songogram(your_name, artist_first_name, artist_last_name, song_name, number_to_call): """ Function for sending a Sonogram. :param your_name: string containing the person sending the sonogram's name. :param artist_first_name: string containing the musician's first name. :param artist_last_name: string containing the musician's last name. :param song_name: string containing the song name. :param number_to_call: string of the telephone number to send a sonogram to. """ try: lyrics = scrape_lyrics(artist_first_name, artist_last_name, song_name) make_call(number_to_call, lyrics, your_name) send_text(song_name, artist_first_name + ' ' + artist_last_name, number_to_call, your_name) return {'status': 201} except: return {'status': 400,'error': 'Bad Request', 'message': 'Unable to process request'}
84e67f7b8b185817596f0fd0173e4cc989616687
3,655,623
def segm_and_cat(sersic_2d_image): """fixture for segmentation and catalog""" image_mean, image_median, image_stddev = sigma_clipped_stats(sersic_2d_image, sigma=3) threshold = image_stddev * 3 # Define smoothing kernel kernel_size = 3 fwhm = 3 # Min Source size (area) npixels = 4 ** 2 return make_catalog( sersic_2d_image, threshold=threshold, deblend=True, kernel_size=kernel_size, fwhm=fwhm, npixels=npixels, contrast=0.00, plot=False, )
2a6018f7b4c2a1aea946b6744840bd2216352002
3,655,624
from typing import Tuple def break_word_by_trailing_integer(pname_fid: str) -> Tuple[str, str]: """ Splits a word that has a value that is an integer Parameters ---------- pname_fid : str the DVPRELx term (e.g., A(11), NSM(5)) Returns ------- word : str the value not in parentheses value : int the value in parentheses Examples -------- >>> break_word_by_trailing_integer('T11') ('T', '11') >>> break_word_by_trailing_integer('THETA11') ('THETA', '11') """ nums = [] i = 0 for i, letter in enumerate(reversed(pname_fid)): if letter.isdigit(): nums.append(letter) else: break num = ''.join(nums[::-1]) if not num: msg = ("pname_fid=%r does not follow the form 'T1', 'T11', 'THETA42' " "(letters and a number)" % pname_fid) raise SyntaxError(msg) word = pname_fid[:-i] assert len(word)+len(num) == len(pname_fid), 'word=%r num=%r pname_fid=%r' % (word, num, pname_fid) return word, num
e9b9c85b4225269c94918ce1cc2e746d3c74aa5c
3,655,625
def preprocess_data(image, label, is_training): """CIFAR data preprocessing""" image = tf.image.convert_image_dtype(image, tf.float32) if is_training: crop_padding = 4 image = tf.pad(image, [[crop_padding, crop_padding], [crop_padding, crop_padding], [0, 0]], 'REFLECT') image = tf.image.random_crop(image, [32, 32, 3]) image = tf.image.random_flip_left_right(image) if FLAGS.distort_color: image = color_distortion(image, s=1.0) else: image = tf.image.resize_with_crop_or_pad(image, 32, 32) # central crop return image, label
642f384fbf1aa2f884e64de2edf264890317b258
3,655,627
def load_Counties(): """ Use load_country() instead of this function """ # Get data # Load data using Pandas dfd = { 'positive': reread_csv(csv_data_file_Global['confirmed_US']), 'death': reread_csv(csv_data_file_Global['deaths_US']), } return dfd
098d08f3720b6c6148c51000e6e1512d382adeaf
3,655,628
from typing import Iterator from typing import Any def issetiterator(object: Iterator[Any]) -> bool: """Returns True or False based on whether the given object is a set iterator. Parameters ---------- object: Any The object to see if it's a set iterator. Returns ------- bool Whether the given object is a set iterator. """ if not isiterable(object): return False return isinstance(object, SetIteratorType)
07ecdcc72c62c4ce3d5fb91181cd1bc785d6cb4d
3,655,630
from mpl_toolkits.mplot3d import Axes3D def plotModeScatter( pc , xMode=0, yMode=1, zMode=None, pointLabels=None, nTailLabels=3, classes=None): """ scatter plot mode projections for up to 3 different modes. PointLabels is a list of strings corresponding to each shape. nTailLabels defines number of points that are labelled at the tails of the distributions, can be 'all' to label all points. Point labels are for 2D plots only. """ xWeights = pc.projectedWeights[xMode] yWeights = pc.projectedWeights[yMode] colourMap = mpl.cm.gray if classes==None: c = 'r' else: c = classes if zMode == None: fig = plot.figure() ax = fig.add_subplot(111) plt = ax.scatter(xWeights,yWeights, c=c, marker='o', cmap=colourMap) ax.set_title('Scatter: Mode %d vs Mode %d'%(xMode, yMode)) ax.set_xlabel('Mode %d'%(xMode)) ax.set_ylabel('Mode %d'%(yMode)) if pointLabels!=None: if nTailLabels=='all': for label, x, y in zip(pointLabels, xWeights, yWeights): plot.annotate( label, xy=(x,y), xytext=(-5, 5), textcoords='offset points', ha='right', va='bottom', bbox=dict(boxstyle='round,pad=0.5', fc='yellow', alpha=0.5), arrowprops=dict(arrowstyle='->', connectionstyle='arc3,rad=0')) elif isinstance(nTailLabels, int): # sort weights xSortedArgs = scipy.argsort(xWeights) ySortedArgs = scipy.argsort(yWeights) # label x tails for i in xSortedArgs[:nTailLabels]: plot.annotate( pointLabels[i], xy=(xWeights[i],yWeights[i]), xytext=(-5, 5), textcoords='offset points', ha='right', va='bottom', bbox=dict(boxstyle='round,pad=0.5', fc='yellow', alpha=0.5), arrowprops=dict(arrowstyle='->', connectionstyle='arc3,rad=0')) for i in xSortedArgs[-nTailLabels:]: plot.annotate( pointLabels[i], xy=(xWeights[i],yWeights[i]), xytext=(-5, 5), textcoords='offset points', ha='right', va='bottom', bbox=dict(boxstyle='round,pad=0.5', fc='yellow', alpha=0.5), arrowprops=dict(arrowstyle='->', connectionstyle='arc3,rad=0')) # label y tails for i in ySortedArgs[:nTailLabels]: plot.annotate( pointLabels[i], xy=(xWeights[i],yWeights[i]), xytext=(-5, 5), textcoords='offset points', ha='right', va='bottom', bbox=dict(boxstyle='round,pad=0.5', fc='yellow', alpha=0.5), arrowprops=dict(arrowstyle='->', connectionstyle='arc3,rad=0')) for i in ySortedArgs[-nTailLabels:]: plot.annotate( pointLabels[i], xy=(xWeights[i],yWeights[i]), xytext=(-5, 5), textcoords='offset points', ha='right', va='bottom', bbox=dict(boxstyle='round,pad=0.5', fc='yellow', alpha=0.5), arrowprops=dict(arrowstyle='->', connectionstyle='arc3,rad=0')) else: raise ValueError, "nTailLabels must be 'all' or an integer" plot.show() else: fig = plot.figure() zWeights = pc.projectedWeights[zMode] ax = fig.add_subplot(111, projection='3d') plt = ax.scatter(xWeights,yWeights, zWeights, c =c, marker='o', cmap=colourMap) ax.set_title('3D Scatter') ax.set_xlabel('Mode %d'%(xMode)) ax.set_ylabel('Mode %d'%(yMode)) ax.set_zlabel('Mode %d'%(zMode)) plot.show() return fig, plt
72bc671d9d4fc0fc8df26965fd4d24d91ab51b72
3,655,632
import math def calculatetm(seq): """ Calculate Tm of a target candidate, nearest neighbor model """ NNlist = chopseq(seq, 2, 1) NNtable = ['AA', 'AC', 'AG', 'AT', 'CA', 'CC', 'CG', 'CT', 'GA', 'GC', 'GG', 'GT', 'TA', 'TC', 'TG', 'TT'] NNendtable = ['A', 'C', 'G', 'T'] NNcount = np.zeros(16) NNend = np.zeros(4) for c, NN in enumerate(NNtable): NNcount[c] = NNlist.count(NN) for c, NN in enumerate(NNendtable): NNend[c] = seq[0].count(NN) # numbers below from Sugimoto et al. NAR (1996) NNEnthalpy = np.array([-8.0, -9.4, -6.6, -5.6, -8.2, -10.9, -11.8, -6.6, -8.8, -10.5, -10.9, -9.4, -6.6, -8.8, -8.2, -8.0]) NNEntropy = np.array([-21.9, -25.5, -16.4, -15.2, -21.0, -28.4, -29.0, -16.4, -23.5, -26.4, -28.4, -25.5, -18.4, -23.5, -21.0, -21.9]) NNendEnthalpy = np.array([.6, .6, .6, .6]) NNendEntropy = np.array([-9.0, -9.0, -9.0, -9.0]) sumEnthalpy = np.sum(np.multiply(NNcount, NNEnthalpy)) + np.sum(np.multiply(NNend, NNendEnthalpy)) sumEntropy = np.sum(np.multiply(NNcount, NNEntropy)) + np.sum(np.multiply(NNend, NNendEntropy)) Tm = (sumEnthalpy * 1000)/(sumEntropy + (1.9872 * math.log(1e-7))) - 273.15 # oligo concentration: 1e-7 M sumSalt = 0.075 + (3.795 * 0.01**0.5) # monovalent: 0.075 M, bivalent: 0.01 M Tm += 16.6 * math.log10(sumSalt) # salt correction Tm -= 0.72 * 20 # formamide correction return Tm
f53c1aa09cd335d603c721fa9922d85e2de0f612
3,655,634
def get_data_shape(X_train, X_test, X_val=None): """ Creates, updates and returns data_dict containing metadata of the dataset """ # Creates data_dict data_dict = {} # Updates data_dict with lenght of training, test, validation sets train_len = len(X_train) test_len = len(X_test) data_dict.update({'train_len': train_len, 'test_len': test_len}) if X_val is not None: val_len = len(X_val) data_dict.update({'val_len': val_len}) # else : val_len = None # Updates number of dimensions of data no_of_dim = X_train.ndim data_dict.update({'no_of_dim': no_of_dim}) # Updates number of features(, number of channels, width, height) if no_of_dim == 2: no_of_features = X_train.shape[1] data_dict.update({'no_of_features': no_of_features}) elif no_of_dim == 3: channels = X_train.shape[1] features_per_c = X_train.shape[2] no_of_features = channels * features_per_c data_dict.update({'no_of_features': no_of_features, 'channels': channels, 'features_per_c': features_per_c}) elif no_of_dim == 4: channels = X_train.shape[1] height = X_train.shape[2] width = X_train.shape[3] features_per_c = height*width no_of_features = channels*features_per_c data_dict.update({'height':height, 'width':width, 'channels':channels, 'features_per_c':features_per_c, 'no_of_features':no_of_features}) return data_dict
231a334b625d0bfe6aa6e63b79de2b2226b8e684
3,655,636
def setupAnnotations(context): """ set up the annotations if they haven't been set up already. The rest of the functions in here assume that this has already been set up """ annotations = IAnnotations(context) if not FAVBY in annotations: annotations[FAVBY] = PersistentList() return annotations
f427c8619452d7143a56d4b881422d01a90ba666
3,655,637
def _get_media(media_types): """Helper method to map the media types.""" get_mapped_media = (lambda x: maps.VIRTUAL_MEDIA_TYPES_MAP[x] if x in maps.VIRTUAL_MEDIA_TYPES_MAP else None) return list(map(get_mapped_media, media_types))
4dbbcf87c717fca2e1890a5258df023ebbca31c5
3,655,638
import ctypes def get_int_property(device_t, property): """ Search the given device for the specified string property @param device_t Device to search @param property String to search for. @return Python string containing the value, or None if not found. """ key = cf.CFStringCreateWithCString( kCFAllocatorDefault, property.encode("mac_roman"), kCFStringEncodingMacRoman ) CFContainer = iokit.IORegistryEntryCreateCFProperty( device_t, key, kCFAllocatorDefault, 0 ); number = ctypes.c_uint16() if CFContainer: output = cf.CFNumberGetValue(CFContainer, 2, ctypes.byref(number)) return number.value
75bc08117bb838e8070d3ea4d5134dfbeec9576c
3,655,639
def _get_unique_barcode_ids(pb_index, isoseq_mode=False): """ Get a list of sorted, unique fw/rev barcode indices from an index object. """ bc_sel = (pb_index.bcForward != -1) & (pb_index.bcReverse != -1) bcFw = pb_index.bcForward[bc_sel] bcRev = pb_index.bcReverse[bc_sel] bc_ids = sorted(list(set(zip(bcFw, bcRev)))) if isoseq_mode: bc_ids = sorted(list(set([tuple(sorted(bc)) for bc in bc_ids]))) return bc_ids
bdfb386d26415a7b3f9f16661d83a38a63958ad0
3,655,640
def clean_logs(test_yaml, args): """Remove the test log files on each test host. Args: test_yaml (str): yaml file containing host names args (argparse.Namespace): command line arguments for this program """ # Use the default server yaml and then the test yaml to update the default # DAOS log file locations. This should simulate how the test defines which # log files it will use when it is run. log_files = get_log_files(test_yaml, get_log_files(BASE_LOG_FILE_YAML)) host_list = get_hosts_from_yaml(test_yaml, args) command = "sudo rm -fr {}".format(" ".join(log_files.values())) print("Cleaning logs on {}".format(host_list)) if not spawn_commands(host_list, command): print("Error cleaning logs, aborting") return False return True
229f34615dc9a6f7ab9c484b9585151814656a77
3,655,641
def call_posterior_haplotypes(posteriors, threshold=0.01): """Call haplotype alleles for VCF output from a population of genotype posterior distributions. Parameters ---------- posteriors : list, PosteriorGenotypeDistribution A list of individual genotype posteriors. threshold : float Minimum required posterior probability of occurrence with in any individual for a haplotype to be included. Returns ------- haplotypes : ndarray, int, shape, (n_haplotypes, n_base) VCF sorted haplotype arrays. """ # maps of bytes to arrays and bytes to sum probs haplotype_arrays = {} haplotype_values = {} # iterate through genotype posterors for post in posteriors: # include haps based on probability of occurrence ( haps, probs, ) = post.allele_occurrence() _, weights = post.allele_frequencies(dosage=True) idx = probs >= threshold # order haps based on weighted prob haps = haps[idx] weights = weights[idx] for h, w in zip(haps, weights): b = h.tobytes() if b not in haplotype_arrays: haplotype_arrays[b] = h haplotype_values[b] = 0 haplotype_values[b] += w # remove reference allele if present refbytes = None for b, h in haplotype_arrays.items(): if np.all(h == 0): # ref allele refbytes = b if refbytes is not None: haplotype_arrays.pop(refbytes) haplotype_values.pop(refbytes) # combine all called haplotypes into array n_alleles = len(haplotype_arrays) + 1 n_base = posteriors[0].genotypes.shape[-1] haplotypes = np.full((n_alleles, n_base), -1, np.int8) values = np.full(n_alleles, -1, float) for i, (b, h) in enumerate(haplotype_arrays.items()): p = haplotype_values[b] haplotypes[i] = h values[i] = p haplotypes[-1][:] = 0 # ref allele values[-1] = values.max() + 1 order = np.flip(np.argsort(values)) return haplotypes[order]
46c26eb38c693d979ea4234af606b3b07ad1e75e
3,655,642
def get_discorded_labels(): """ Get videos with citizen discorded labels Partial labels will only be set by citizens """ return get_video_labels(discorded_labels)
6f3cbaf09b43956d14d9abf5cf4e77734c152d2f
3,655,644
def set_common_tags(span: object, result: object): """Function used to set a series of common tags to a span object""" if not isinstance(result, dict): return span for key, val in result.items(): if key.lower() in common_tags: span.set_tag(key, val) return span
365230fb6a69b94684aeac25d14fa4275c1549f8
3,655,645
import time def local_timezone(): """ Returns: (str): Name of current local timezone """ try: return time.tzname[0] except (IndexError, TypeError): return ""
c97c11582b27d8aa0205555535616d6ea11775b9
3,655,646
import getpass def ask_credentials(): """Interactive function asking the user for ASF credentials :return: tuple of username and password :rtype: tuple """ # SciHub account details (will be asked by execution) print( " If you do not have a ASF/NASA Earthdata user account" " go to: https://search.asf.alaska.edu/ and register" ) uname = input(" Your ASF/NASA Earthdata Username:") pword = getpass.getpass(" Your ASF/NASA Earthdata Password:") return uname, pword
a601a460b3aeddf9939f3acf267e58fdaf9ed7cd
3,655,647
def lab2lch(lab): """CIE-LAB to CIE-LCH color space conversion. LCH is the cylindrical representation of the LAB (Cartesian) colorspace Parameters ---------- lab : array_like The N-D image in CIE-LAB format. The last (``N+1``-th) dimension must have at least 3 elements, corresponding to the ``L``, ``a``, and ``b`` color channels. Subsequent elements are copied. Returns ------- out : ndarray The image in LCH format, in a N-D array with same shape as input `lab`. Raises ------ ValueError If `lch` does not have at least 3 color channels (i.e. l, a, b). Notes ----- The Hue is expressed as an angle between ``(0, 2*pi)`` Examples -------- >>> from skimage import data >>> from skimage.color import rgb2lab, lab2lch >>> img = data.astronaut() >>> img_lab = rgb2lab(img) >>> img_lch = lab2lch(img_lab) """ lch = _prepare_lab_array(lab) a, b = lch[..., 1], lch[..., 2] lch[..., 1], lch[..., 2] = _cart2polar_2pi(a, b) return lch
711d23d452413d738af162ac5b9e3f34c1a4eab6
3,655,648
def callattice(twotheta, energy_kev=17.794, hkl=(1, 0, 0)): """ Calculate cubic lattice parameter, a from reflection two-theta :param twotheta: Bragg angle, deg :param energy_kev: energy in keV :param hkl: reflection (cubic only :return: float, lattice contant """ qmag = calqmag(twotheta, energy_kev) dspace = q2dspace(qmag) return dspace * np.sqrt(np.sum(np.square(hkl)))
2718e4c44e08f5038ff4119cf477775ed9f3a678
3,655,650
def reset_password( *, db: Session = Depends(get_db), current_user: User = Depends(get_current_active_user), background_tasks: BackgroundTasks, ): """reset current user password""" email = current_user.email # send confirm email if settings.EMAILS_ENABLED and email: confirm_token = create_access_token( subject=email, expires_delta=timedelta(settings.EMAIL_CONFIRM_TOKEN_EXPIRE) ) background_tasks.add_task( send_reset_password_email, email_to=email, token=confirm_token ) return {"msg": "Password reset email sent"}
1f292188b3927c26eb41634acb7fb99e398e94b6
3,655,651
def rule_valid_histone_target(attr): """ { "applies" : ["ChIP-Seq", "experiment_target_histone"], "description" : "'experiment_target_histone' attributes must be 'NA' only for ChIP-Seq Input" } """ histone = attr.get('experiment_target_histone', [''])[0] if attr.get('experiment_type', [""])[0].lower() in ['ChIP-Seq Input'.lower()]: return histone == 'NA' else: return histone != 'NA'
0a10f09c6b9e50cf01583d0c803e5112629e503b
3,655,652
def extend(curve: CustomCurve, deg): """returns curve over the deg-th relative extension""" E = curve.EC q = curve.q K = curve.field if q % 2 != 0: R = K["x"] pol = R.irreducible_element(deg) Fext = GF(q ** deg, name="z", modulus=pol) return E.base_extend(Fext) charac = K.characteristic() R = GF(charac)["x"] ext_deg = q ** deg pol = R.irreducible_element(deg * ZZ(log(q, charac))) Kext = GF(ext_deg, name="ex", modulus=pol) gKext = Kext.gen() h = gKext ** ((ext_deg - 1) // (q - 1)) assert charac ** (h.minpoly().degree()) == q H = GF(q, name="h", modulus=h.minpoly()) inclusion = H.hom([h]) new_coefficients = [ inclusion(stupid_coerce_K_to_L(a, K, H)) for a in E.a_invariants() ] EE = EllipticCurve(Kext, new_coefficients) return EE
8d750b40d91d10d6b51c75765e2083300d7dccf6
3,655,653
def flatten3D(inputs: tf.Tensor) -> tf.Tensor: """ Flatten the given ``inputs`` tensor to 3 dimensions. :param inputs: >=3d tensor to be flattened :return: 3d flatten tensor """ shape = inputs.get_shape().as_list() if len(shape) == 3: return inputs assert len(shape) > 3 return tf.reshape(inputs, [tf.shape(inputs)[0], tf.shape(inputs)[1], np.prod(inputs.get_shape().as_list()[2:])])
11c9c7f7ab955594401468c64323f8f3a52dbe81
3,655,654
def get_classes(dataset): """Get class names of a dataset.""" alias2name = {} for name, aliases in dataset_aliases.items(): for alias in aliases: alias2name[alias] = name if mmcv.is_str(dataset): if dataset in alias2name: labels = eval(alias2name[dataset] + '_classes()') else: raise ValueError('Unrecognized dataset: {}'.format(dataset)) else: raise TypeError('dataset must a str, but got {}'.format(type(dataset))) return labels
d307793a85deef3be239d7dbff746c7c9643dc1b
3,655,655
def split_exclude_string(people): """ Function to split a given text of persons' name who wants to exclude with comma separated for each name e.g. ``Konrad, Titipat`` """ people = people.replace('Mentor: ', '').replace('Lab-mates: ', '').replace('\r\n', ',').replace(';', ',') people_list = people.split(',') return [p.strip() for p in people_list if p.strip() is not '']
5748a52039548175923f53384474f40ac8fb5e38
3,655,656
from datetime import datetime def now(tz=DEFAULT_TZ): """ Get the current datetime. :param tz: The preferred time-zone, defaults to DEFAULT_TZ :type tz: TzInfo (or similar pytz time-zone) :return: A time-zone aware datetime set to now :rtype: datetime """ return datetime.now(tz=tz)
1dcdd78898b726576f69f01cb9f4bfe3aeaef29d
3,655,657
def peek_with_kwargs(init, args=[], permissive=False): """ Make datatypes passing keyworded arguments to the constructor. This is a factory function; returns the actual `peek` routine. Arguments: init (callable): type constructor. args (iterable): arguments NOT to be keyworded; order does matter. permissive (bool): missing positional arguments are set to None (*new in 0.8.5*). Returns: callable: deserializer (`peek` routine). All the peeked attributes that are not referenced in `args` are passed to `init` as keyworded arguments. """ if permissive: def try_peek(store, attr, container, _stack=None): try: return store.peek(attr, container, _stack=_stack) except KeyError: return None def peek(store, container, _stack=None): return init(\ *[ try_peek(store, attr, container, _stack) for attr in args ], \ **dict([ (attr, store.peek(attr, container, _stack=_stack)) \ for attr in container if attr not in args ])) else: def peek(store, container, _stack=None): return init(\ *[ store.peek(attr, container, _stack=_stack) for attr in args ], \ **dict([ (attr, store.peek(attr, container, _stack=_stack)) \ for attr in container if attr not in args ])) return peek
d06df21ab439da1cacb52befa6c619f1efa23d1a
3,655,658
def idc_asset_manage(request,aid=None,action=None): """ Manage IDC """ if request.user.has_perms(['asset.view_asset', 'asset.edit_asset']): page_name = '' if aid: idc_list = get_object_or_404(IdcAsset, pk=aid) if action == 'edit': page_name = '编辑IDC机房' if action == 'delete': idc_list.delete() return redirect('idc_asset_list') else: idc_list = IdcAsset() action = 'add' page_name = '新增IDC机房' if request.method == 'POST': form = IdcAssetForm(request.POST,instance=idc_list) if form.is_valid(): if action == 'add': form.save() return redirect('idc_asset_list') if action == 'edit': form.save() return redirect('idc_asset_list') else: form = IdcAssetForm(instance=idc_list) return render(request, 'asset_idc_manage.html', {"form":form, "page_name":page_name, "action":action}) else: raise Http404
7fbf1729c87e9e9921f19cf5cba2810879958848
3,655,659
def get_detected_column_types(df): """ Get data type of each columns ('DATETIME', 'NUMERIC' or 'STRING') Parameters: df (df): pandas dataframe Returns df (df): dataframe that all datatypes are converted (df) """ assert isinstance(df, pd.DataFrame), 'Parameter must be DataFrame' for c in df.columns: # Convert column to string col_data = df[c].map(str) col_data = col_data.replace("NaT", None) col_data = col_data.replace("NaN", None) # Check NULL column if(df[c].isnull().values.all()): continue # Check DATETIME try: # Check if it's able to convert column to datetime # if column is datetime, then skip to convert if 'datetime' in str(col_data.dtype): continue df[c] = pd.to_datetime(col_data) continue except ValueError: pass # Check NUMERIC try: # Drop NaN rows series = df[c].dropna() # if column_name is int or float, then skip to convert if 'int' in str(col_data.dtype) or 'float' in str(col_data.dtype): continue # Check if it can be converted to numeric df[c] = pd.to_numeric(series) except ValueError: pass return df
23647127d0e5a125e06fb1932e74ba5f9c885ded
3,655,661
def distance(coords): """Calculates the distance of a path between multiple points Arguments: coords -- List of coordinates, e.g. [(0,0), (1,1)] Returns: Total distance as a float """ distance = 0 for p1, p2 in zip(coords[:-1], coords[1:]): distance += ((p2[0] - p1[0]) ** 2 + (p2[1] - p1[1]) ** 2) ** 0.5 return distance
9c6088b740f42b839d4aa482c276fe4cc5dc8114
3,655,662
def roll_dice(dicenum, dicetype, modifier=None, conditional=None, return_tuple=False): """ This is a standard dice roller. Args: dicenum (int): Number of dice to roll (the result to be added). dicetype (int): Number of sides of the dice to be rolled. modifier (tuple): A tuple `(operator, value)`, where operator is one of `"+"`, `"-"`, `"/"` or `"*"`. The result of the dice roll(s) will be modified by this value. conditional (tuple): A tuple `(conditional, value)`, where conditional is one of `"=="`,`"<"`,`">"`,`">="`,`"<=`" or "`!=`". This allows the roller to directly return a result depending on if the conditional was passed or not. return_tuple (bool): Return a tuple with all individual roll results or not. Returns: roll_result (int): The result of the roll + modifiers. This is the default return. condition_result (bool): A True/False value returned if `conditional` is set but not `return_tuple`. This effectively hides the result of the roll. full_result (tuple): If, return_tuple` is `True`, instead return a tuple `(result, outcome, diff, rolls)`. Here, `result` is the normal result of the roll + modifiers. `outcome` and `diff` are the boolean result of the roll and absolute difference to the `conditional` input; they will be will be `None` if `conditional` is not set. `rolls` is itself a tuple holding all the individual rolls in the case of multiple die-rolls. Raises: TypeError if non-supported modifiers or conditionals are given. Notes: All input numbers are converted to integers. Examples: print roll_dice(2, 6) # 2d6 <<< 7 print roll_dice(1, 100, ('+', 5) # 1d100 + 5 <<< 34 print roll_dice(1, 20, conditional=('<', 10) # let'say we roll 3 <<< True print roll_dice(3, 10, return_tuple=True) <<< (11, None, None, (2, 5, 4)) print roll_dice(2, 20, ('-', 2), conditional=('>=', 10), return_tuple=True) <<< (8, False, 2, (4, 6)) # roll was 4 + 6 - 2 = 8 """ dicenum = int(dicenum) dicetype = int(dicetype) # roll all dice, remembering each roll rolls = tuple([randint(1, dicetype) for roll in range(dicenum)]) result = sum(rolls) if modifier: # make sure to check types well before eval mod, modvalue = modifier if mod not in ('+', '-', '*', '/'): raise TypeError("Non-supported dice modifier: %s" % mod) modvalue = int(modvalue) # for safety result = eval("%s %s %s" % (result, mod, modvalue)) outcome, diff = None, None if conditional: # make sure to check types well before eval cond, condvalue = conditional if cond not in ('>', '<', '>=', '<=', '!=', '=='): raise TypeError("Non-supported dice result conditional: %s" % conditional) condvalue = int(condvalue) # for safety outcome = eval("%s %s %s" % (result, cond, condvalue)) # True/False diff = abs(result - condvalue) if return_tuple: return result, outcome, diff, rolls else: if conditional: return outcome else: return result
acbc97e4b7720129788c8c5d5d9a1d51936d9dc1
3,655,663
import math def build_central_hierarchical_histogram_computation( lower_bound: float, upper_bound: float, num_bins: int, arity: int = 2, max_records_per_user: int = 1, epsilon: float = 1, delta: float = 1e-5, secure_sum: bool = False): """Create the tff federated computation for central hierarchical histogram aggregation. Args: lower_bound: A `float` specifying the lower bound of the data range. upper_bound: A `float` specifying the upper bound of the data range. num_bins: The integer number of bins to compute. arity: The branching factor of the tree. Defaults to 2. max_records_per_user: The maximum number of records each user is allowed to contribute. Defaults to 1. epsilon: Differential privacy parameter. Defaults to 1. delta: Differential privacy parameter. Defaults to 1e-5. secure_sum: A boolean deciding whether to use secure aggregation. Defaults to `False`. Returns: A tff.federated_computation function to perform central tree aggregation. """ if upper_bound < lower_bound: raise ValueError(f'upper_bound: {upper_bound} is smaller than ' f'lower_bound: {lower_bound}.') if num_bins <= 0: raise ValueError(f'num_bins: {num_bins} smaller or equal to zero.') if arity < 2: raise ValueError(f'Arity should be at least 2.' f'arity={arity} is given.') if max_records_per_user < 1: raise ValueError(f'Maximum records per user should be at least 1. ' f'max_records_per_user={max_records_per_user} is given.') if epsilon < 0 or delta < 0 or delta > 1: raise ValueError(f'Privacy parameters in wrong range: ' f'(epsilon, delta): ({epsilon}, {delta})') if epsilon == 0.: stddev = 0. else: stddev = max_records_per_user * _find_noise_multiplier( epsilon, delta, steps=math.ceil(math.log(num_bins, arity))) central_tree_aggregation_factory = hierarchical_histogram_factory.create_central_hierarchical_histogram_factory( stddev, arity, max_records_per_user, secure_sum=secure_sum) return _build_hierarchical_histogram_computation( lower_bound, upper_bound, num_bins, central_tree_aggregation_factory)
fcd35bc2df5f61174d00079638dda0c04c1490ff
3,655,664
def initialise_halo_params(): """Initialise the basic parameters needed to simulate a forming Dark matter halo. Args: None Returns: G: gravitational constant. epsilon: softening parameter. limit: width of the simulated universe. radius: simulated radius of each particle (for proper handling of boundary conditions). num_pos_particles: number of positive mass particles. num_neg_particles: number of negative mass particles. chunks_value: dask chunks value. time_steps: number of time steps to simulate. """ G = 1.0 epsilon = 0.07 limit = 80000 radius = 4 num_pos_particles = 5000 num_neg_particles = 45000 chunks_value = (num_pos_particles+num_neg_particles)/5.0 time_steps = 1000 return G, epsilon, limit, radius, num_pos_particles, num_neg_particles, chunks_value, time_steps
ee3311fd17a40e8658f11d2ddf98d0ff8eb27a6d
3,655,665
def read_data(image_paths, label_list, image_size, batch_size, max_nrof_epochs, num_threads, shuffle, random_flip, random_brightness, random_contrast): """ Creates Tensorflow Queue to batch load images. Applies transformations to images as they are loaded. :param random_brightness: :param random_flip: :param image_paths: image paths to load :param label_list: class labels for image paths :param image_size: size to resize images to :param batch_size: num of images to load in batch :param max_nrof_epochs: total number of epochs to read through image list :param num_threads: num threads to use :param shuffle: Shuffle images :param random_flip: Random Flip image :param random_brightness: Apply random brightness transform to image :param random_contrast: Apply random contrast transform to image :return: images and labels of batch_size """ images = ops.convert_to_tensor(image_paths, dtype=tf.string) labels = ops.convert_to_tensor(label_list, dtype=tf.int32) # Makes an input queue input_queue = tf.train.slice_input_producer((images, labels), num_epochs=max_nrof_epochs, shuffle=shuffle, ) images_labels = [] imgs = [] lbls = [] for _ in range(num_threads): image, label = read_image_from_disk(filename_to_label_tuple=input_queue) image = tf.random_crop(image, size=[image_size, image_size, 3]) image.set_shape((image_size, image_size, 3)) image = tf.image.per_image_standardization(image) if random_flip: image = tf.image.random_flip_left_right(image) if random_brightness: image = tf.image.random_brightness(image, max_delta=0.3) if random_contrast: image = tf.image.random_contrast(image, lower=0.2, upper=1.8) imgs.append(image) lbls.append(label) images_labels.append([image, label]) image_batch, label_batch = tf.train.batch_join(images_labels, batch_size=batch_size, capacity=4 * num_threads, enqueue_many=False, allow_smaller_final_batch=True) return image_batch, label_batch
2bbb7f1be38764634e198f83b82fafb730ec3afa
3,655,666
def reorder_matrix (m, d) : """ Reorder similarity matrix : put species in same cluster together. INPUT: m - similarity matrix d - medoid dictionary : {medoid : [list of species index in cluster]} OUTPUT : m in new order new_order - order of species indexes in matrix """ new_order = [] for i, med_class in enumerate(d.values()): new_order.append(med_class) return m[np.concatenate(new_order), :], new_order
5d203ec6f61afe869008fa6749d18946f128ac87
3,655,667
def reward_penalized_log_p(mol): """ Reward that consists of log p penalized by SA and # long cycles, as described in (Kusner et al. 2017). Scores are normalized based on the statistics of 250k_rndm_zinc_drugs_clean.smi dataset :param mol: rdkit mol object :return: float """ # normalization constants, statistics from 250k_rndm_zinc_drugs_clean.smi logP_mean = 2.4570953396190123 logP_std = 1.434324401111988 SA_mean = -3.0525811293166134 SA_std = 0.8335207024513095 cycle_mean = -0.0485696876403053 cycle_std = 0.2860212110245455 log_p = MolLogP(mol) SA = -calculateScore(mol) # cycle score cycle_list = nx.cycle_basis(nx.Graph( Chem.rdmolops.GetAdjacencyMatrix(mol))) if len(cycle_list) == 0: cycle_length = 0 else: cycle_length = max([len(j) for j in cycle_list]) if cycle_length <= 6: cycle_length = 0 else: cycle_length = cycle_length - 6 cycle_score = -cycle_length normalized_log_p = (log_p - logP_mean) / logP_std normalized_SA = (SA - SA_mean) / SA_std normalized_cycle = (cycle_score - cycle_mean) / cycle_std return normalized_log_p + normalized_SA + normalized_cycle
e3e5ebfabf31e4980dc6f3b6c998a08444ce9851
3,655,669
def loadmat(filename, variable_names=None): """ load mat file from h5py files :param filename: mat filename :param variable_names: list of variable names that should be loaded :return: dictionary of loaded data """ data = {} matfile = h5py.File(filename, 'r') if variable_names is None: for key in matfile.keys(): data.update({key: matfile[key][()]}) else: for key in variable_names: if not key in matfile.keys(): raise RuntimeError('Variable: "' + key + '" is not in file: ' + filename) data.update({key: matfile[key][()]}) return data
3b9183968fba56d57c705bce0ec440c630cc0031
3,655,670
def date_start_search(line): """予定開始の日付を検出し,strで返す.""" # 全角スペース zen_space = ' ' # 全角0 zen_zero = '0' nichi = '日' dollar = '$' # 全角スペースを0に置き換えることで無理やり対応 line = line.replace(zen_space, zen_zero) index = line.find(nichi) # 日と曜日の位置関係から誤表記を訂正 index_first_dollar = line.find(dollar, index + 1) if index + 1 != index_first_dollar: index = index_first_dollar # ex. 1 → 01 #if line[index - 1] == zen_space: # line[index - 1] = zen_zero return zenhan.z2h(line[index - 2:index])
f89e332a2a0031acdf6fa443ea9752e528674b32
3,655,671
def train_sub1(sess, x, y, bbox_preds, x_sub, y_sub, nb_classes, nb_epochs_s, batch_size, learning_rate, data_aug, lmbda, aug_batch_size, rng, img_rows=48, img_cols=48, nchannels=3): """ This function creates the substitute by alternatively augmenting the training data and training the substitute. :param sess: TF session :param x: input TF placeholder :param y: output TF placeholder :param bbox_preds: output of black-box model predictions :param x_sub: initial substitute training data :param y_sub: initial substitute training labels :param nb_classes: number of output classes :param nb_epochs_s: number of epochs to train substitute model :param batch_size: size of training batches :param learning_rate: learning rate for training :param data_aug: number of times substitute training data is augmented :param lmbda: lambda from arxiv.org/abs/1602.02697 :param rng: numpy.random.RandomState instance :return: """ # Define TF model graph (for the black-box model) model_sub = ModelSubstitute('model_s', nb_classes) preds_sub = model_sub.get_logits(x) loss_sub = CrossEntropy(model_sub, smoothing=0) print("Defined TensorFlow model graph for the substitute.") # Define the Jacobian symbolically using TensorFlow grads = jacobian_graph(preds_sub, x, nb_classes) # Train the substitute and augment dataset alternatively for rho in xrange(data_aug): print("Substitute training epoch #" + str(rho)) train_params = { 'nb_epochs': nb_epochs_s, 'batch_size': batch_size, 'learning_rate': learning_rate } #with TemporaryLogLevel(logging.WARNING, "cleverhans.utils.tf"): train(sess, loss_sub, x, y, x_sub, to_categorical(y_sub, nb_classes), init_all=False, args=train_params, rng=rng) #var_list=model_sub.get_params()) # If we are not at last substitute training iteration, augment dataset if rho < data_aug - 1: print("Augmenting substitute training data.") # Perform the Jacobian augmentation lmbda_coef = 2 * int(int(rho / 3) != 0) - 1 # print(x.shape) # print(x_sub.shape) # print(y_sub.shape) #print(grads.shape) x_sub = jacobian_augmentation(sess, x, x_sub, y_sub, grads, lmbda_coef * lmbda, aug_batch_size) print("Labeling substitute training data.") # Label the newly generated synthetic points using the black-box y_sub = np.hstack([y_sub, y_sub]) x_sub_prev = x_sub[int(len(x_sub)/2):] eval_params = {'batch_size': batch_size} #tmp = batch_eval(sess, [x], [bbox_preds], [x_sub_prev],args=eval_params) tmp = batch_eval(sess, [x], [bbox_preds], [x_sub_prev],batch_size=batch_size) print(tmp) bbox_val = tmp[0] # Note here that we take the argmax because the adversary # only has access to the label (not the probabilities) output # by the black-box model y_sub[int(len(x_sub)/2):] = np.argmax(bbox_val, axis=1) return model_sub, preds_sub
a5433f78c60f6beec14a6d4fd414d45dc8c65999
3,655,672
def divideArray(array, factor): """Dzielimy tablice na #factor tablic, kazda podtablica ma tyle samo elem oprocz ostatniej""" factor = min(factor, len(array)) length = floor(len(array) * 1.0 / factor) res = [] for i in range(factor - 1): res = res + list([array[i * length:(i + 1) * length]]) return list(res + list([array[length * (factor - 1):]]))
d94441e6036e78f9b541b9d170d03681740c81d3
3,655,673
def argMax(scores): """ Returns the key with the highest value. """ if len(scores) == 0: return None all = scores.items() values = [x[1] for x in all] maxIndex = values.index(max(values)) return all[maxIndex][0]
9310988a0f8aa1279882d060ade7febdc102b0c5
3,655,674
def rotateright(arr,k)->list: """ Rotate the array right side k number of times. """ temp=a[0] poi=0 for i in range(len(arr)): for j in range(0,k): poi+=1 if(poi==len(arr)): poi=0 temp1=arr[poi] arr[poi]=temp temp=temp1 return arr
7d303f5b57cb10a1a28f5c78ffa848d2a9cb593f
3,655,675
def get_ratio(numerator, denominator): """Get ratio from numerator and denominator.""" return ( 0 if not denominator else round(float(numerator or 0) / float(denominator), 2) )
e51a860292d54d2e44909ad878d0b1d8e66c37c2
3,655,677
import io def create_app(): """ Create a Flask application for face alignment Returns: flask.Flask -> Flask application """ app = Flask(__name__) model = setup_model() app.config.from_mapping(MODEL=model) @app.route("/", methods=["GET"]) def howto(): instruction = ( "Send POST request to /align to fix face orientation in input image" "\nex." "\n\tcurl -X POST -F 'image=@/path/to/face.jpg' --output output.jpg localhost:5000/align" ) return instruction @app.route("/align", methods=["POST"]) def align(): data = request.files["image"] img_str = data.read() nparr = np.fromstring(img_str, np.uint8) img = cv2.imdecode(nparr, cv2.IMREAD_ANYCOLOR) faces = model.detect(img) if len(faces) == 0: return "No face found. Try again", 400 elif len(faces) > 1: return "Too many faces found. Try again", 400 else: face = faces[0] rotated_image = rotate_bound(img, face.angle) # Encode image is_completed, buf = cv2.imencode(".jpg", rotated_image) if not is_completed: return "Unexpected encoding error. Try again", 400 byte_buffer = io.BytesIO(buf.tostring()) return send_file( byte_buffer, "image/jpeg", as_attachment=True, attachment_filename="output.jpg", ) return app
d9a5d59f64dc9227949bbe73065d18bcc8142b9d
3,655,678
def grad_clip(x:Tensor) -> Tensor: """ Clips too big and too small gradients. Example:: grad = grad_clip(grad) Args: x(:obj:`Tensor`): Gradient with too large or small values Returns: :obj:`Tensor`: Cliped Gradient """ x[x>5] = 5 x[x<-5] = -5 return x
5c07c4432fda16d06bda8569aca34cbbaf45b076
3,655,679
def unfold_kernel(kernel): """ In pytorch format, kernel is stored as [out_channel, in_channel, height, width] Unfold kernel into a 2-dimension weights: [height * width * in_channel, out_channel] :param kernel: numpy ndarray :return: """ k_shape = kernel.shape weight = np.zeros([k_shape[1] * k_shape[2] * k_shape[3], k_shape[0]]) for i in range(k_shape[0]): weight[:, i] = np.reshape(kernel[i, :, :, :], [-1]) return weight
7106ead9b4953024731d918fb3c356b056bca156
3,655,680
def _parse_polyline_locations(locations, max_n_locations): """Parse and validate locations in Google polyline format. The "locations" argument of the query should be a string of ascii characters above 63. Args: locations: The location query string. max_n_locations: The max allowable number of locations, to keep query times reasonable. Returns: lats: List of latitude floats. lons: List of longitude floats. Raises: ClientError: If too many locations are given, or if the location string can't be parsed. """ # The Google maps API prefixes their polylines with 'enc:'. if locations and locations.startswith("enc:"): locations = locations[4:] try: latlons = polyline.decode(locations) except Exception as e: msg = "Unable to parse locations as polyline." raise ClientError(msg) # Polyline result in in list of (lat, lon) tuples. lats = [p[0] for p in latlons] lons = [p[1] for p in latlons] # Check number. n_locations = len(lats) if n_locations > max_n_locations: msg = f"Too many locations provided ({n_locations}), the limit is {max_n_locations}." raise ClientError(msg) return lats, lons
3ebff7a35c86bad5986ee87c194dd9128936abb0
3,655,681
def dense(data, weight, bias=None, out_dtype=None): """The default implementation of dense in topi. Parameters ---------- data : tvm.Tensor 2-D with shape [batch, in_dim] weight : tvm.Tensor 2-D with shape [out_dim, in_dim] bias : tvm.Tensor, optional 1-D with shape [out_dim] out_dtype : str The output type. This is used for mixed precision. Returns ------- output : tvm.Tensor 2-D with shape [batch, out_dim] """ assert len(data.shape) == 2 and len(weight.shape) == 2, \ "only support 2-dim dense" if bias is not None: assert len(bias.shape) == 1 if out_dtype is None: out_dtype = data.dtype batch, in_dim = data.shape out_dim, _ = weight.shape k = tvm.reduce_axis((0, in_dim), name='k') matmul = tvm.compute((batch, out_dim), \ lambda i, j: tvm.sum(data[i, k].astype(out_dtype) * \ weight[j, k].astype(out_dtype), axis=k), \ name='T_dense', tag='dense') if bias is not None: matmul = tvm.compute((batch, out_dim), \ lambda i, j: matmul[i, j] + bias[j].astype(out_dtype), \ tag=tag.BROADCAST) return matmul
ac5550f901d1a7c94fee4b8e65fa9957d4b2ff78
3,655,682