content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def mmap_zeros(shape, dtype): """ Create an empty shared memory array. """ new = anonymousmemmap(shape, dtype) new[:] = 0.0 return new
5f78b5e227ab7f5115bc04af2e3f7ea62a769cd8
3,649,503
from typing import Iterable def edge_disjoint_paths(g: Graph, source: Node, sink: Node) -> Iterable: """ Given directed graph G, and two nodes s and t, find k paths from s to t such that no two paths share an edge. Menger’s Theorem: Given a directed graph G with nodes s,t the maximum number of edge-disjoint s-t paths equals the minimum number of edges whose removal separates s from t. Suppose you want to send k large files from s to t but never have two files use the same network link (to avoid congestion on the links). """ for u in g: for v in g[u]: g[u][v].cap = 1 fifo_push_relabel(g, source, sink) # use dfs to find the paths S, paths = [source], [] visited = defaultdict(lambda: False) pred = defaultdict(lambda: None) while S: u = S.pop() if u == sink: path = [sink] current = pred[sink] while current is not None: path.append(current) current = pred[current] paths.append(tuple(reversed(path))) continue if visited[u]: continue visited[u] = True for v in adjacency(g, u): if not visited[u] and g[u][v].flow: S.append(v) pred[v] = u return iter(paths)
d638923e9091eefcee0c0e2623adba095e33cc0c
3,649,504
def listToMLlibVectorUDF(col): """ Map struct column from list to MLlib vector """ return Column(default().listToMLlibVectorUDF(col._jc))
9b785839234bc2dfcb44c193cc12930e4f26f1a8
3,649,505
def change_file_paths_to_showcase(df, showcase_dir="/showcase_data/raw_data"): """Changes file paths to use showcase directory""" output = df.copy() if "file_path" in df.columns: output.loc[:, "file_path"] = df.file_path.apply( lambda x: add_path(x, showcase_dir) ) if "file_path_feature_values" in df.columns: output.loc[:, "file_path_feature_values"] = df.file_path_feature_values.apply( lambda x: add_path(x, showcase_dir) ) if "cluster_id_path" in df.columns: output.loc[:, "cluster_id_path"] = df.cluster_id_path.apply( lambda x: add_path(x, showcase_dir) ) if "thumbnail_path" in df.columns: output.loc[:, "thumbnail_path"] = df.thumbnail_path.apply( lambda x: add_path(x, showcase_dir) ) if "file_path_small" in df.columns: output.loc[:, "file_path_small"] = df.file_path_small.apply( lambda x: add_path(x, showcase_dir) ) return output
6789ff426794b620eebb1d80ae96b1a18c9a2dc8
3,649,506
def match_conftest_error(line): """ Extract `ConftestImportFailure` error message from a string. :param line: A string to pattern match against. :returns: A dictionary where the key `file_path` holds the file path and the key `error` the error description. If not matched, the dictionary is empty. """ return match_pattern( r"^E\s+.*ConftestImportFailure: " "\(local\('(?P<file_path>.*)'\), \((?P<error>.*)\)\)$", line, )
90b10831b672bb053cc46e5adbefaea5597607e3
3,649,507
import string def modifyModlist( old_entry,new_entry,ignore_attr_types=None,ignore_oldexistent=0 ): """ Build differential modify list for calling LDAPObject.modify()/modify_s() old_entry Dictionary holding the old entry new_entry Dictionary holding what the new entry should be ignore_attr_types List of attribute type names to be ignored completely ignore_oldexistent If non-zero attribute type names which are in old_entry but are not found in new_entry at all are not deleted. This is handy for situations where your application sets attribute value to '' for deleting an attribute. In most cases leave zero. """ ignore_attr_types = list_dict(map(string.lower,(ignore_attr_types or []))) modlist = [] attrtype_lower_map = {} for a in old_entry.keys(): attrtype_lower_map[string.lower(a)]=a for attrtype in new_entry.keys(): attrtype_lower = string.lower(attrtype) if ignore_attr_types.has_key(attrtype_lower): # This attribute type is ignored continue # Filter away null-strings new_value = filter(lambda x:x!=None,new_entry[attrtype]) if attrtype_lower_map.has_key(attrtype_lower): old_value = old_entry.get(attrtype_lower_map[attrtype_lower],[]) old_value = filter(lambda x:x!=None,old_value) del attrtype_lower_map[attrtype_lower] else: old_value = [] if not old_value and new_value: # Add a new attribute to entry modlist.append((ldap.MOD_ADD,attrtype,new_value)) elif old_value and new_value: # Replace existing attribute replace_attr_value = len(old_value)!=len(new_value) if not replace_attr_value: old_value_dict=list_dict(old_value) new_value_dict=list_dict(new_value) delete_values = [] for v in old_value: if not new_value_dict.has_key(v): replace_attr_value = 1 break add_values = [] if not replace_attr_value: for v in new_value: if not old_value_dict.has_key(v): replace_attr_value = 1 break if replace_attr_value: modlist.append((ldap.MOD_DELETE,attrtype,None)) modlist.append((ldap.MOD_ADD,attrtype,new_value)) elif old_value and not new_value: # Completely delete an existing attribute modlist.append((ldap.MOD_DELETE,attrtype,None)) if not ignore_oldexistent: # Remove all attributes of old_entry which are not present # in new_entry at all for a in attrtype_lower_map.keys(): if ignore_attr_types.has_key(a): # This attribute type is ignored continue attrtype = attrtype_lower_map[a] modlist.append((ldap.MOD_DELETE,attrtype,None)) return modlist
f28814b4659ccc8e9e27eec0dee5da8700a732ac
3,649,508
def regnety_3200m(**kwargs): """ Constructs a RegNet-Y model under 3200M FLOPs. """ model = RegNet(regnetY_3200M_config, **kwargs) return model
6799f96df72769e6f8bd0819e47353e141acefd0
3,649,509
def dcos_api_session(dcos_api_session_factory): """ Overrides the dcos_api_session fixture to use exhibitor settings currently used in the cluster """ args = dcos_api_session_factory.get_args_from_env() exhibitor_admin_password = None expanded_config = get_expanded_config() if expanded_config['exhibitor_admin_password_enabled'] == 'true': exhibitor_admin_password = expanded_config['exhibitor_admin_password'] api = dcos_api_session_factory( exhibitor_admin_password=exhibitor_admin_password, **args) api.wait_for_dcos() return api
ab677802d1228b3af4bfc8ecc2cbf6040edbc6b0
3,649,511
def BarycentricInterpolation(bins, pnts): """ barycentricinterpolation for given points, return the barycentric coordinates for points within the grids INPUT bins - grids for discretization, m-length array where bins[i] indicates the mesh along dimension i pnts - an array of pnts, each points is an m-length indicates the Cartesian coordinates can be n pnts in total RETURN indices - an n-length list of indices, each indices is d-length (d=m+1) for interpolating points invovled coeffs - an n-length list of coefficients, each coefficients is d-length for reconstructing points n A pythonic version barycentricinterpolation from Russ' drake utility function does not support dcoefs currently... """ #note here the layout of input and output is different from the C++ version of drake m = pnts.shape[1] n = pnts.shape[0] d = m+1 if len(bins) != m: print 'The number of bins must equal to the dimension of the points.' #validation return None, None binsize = [len(bins[i]) for i in range(m)] nskip = np.concatenate([[1], np.cumprod([binsize[i] for i in range(m-1)])]) #a list of bary points for future sorting... b = [{'dim':0, 'fracway':0.0, 'dfracway':0.0} for i in range(d)] indices = np.zeros((n, d)) coeffs = np.zeros((n, d)) for j in range(n): sidx = 0 # 0-index in our case... for i in range(m): pt = pnts[j, i] curr_bin = bins[i] curr_bin_size = binsize[i] b[i]['dim'] = i if curr_bin_size == 1: #singleton dimensions #sidx is unchanged b[i]['fracway'] = 1.0 elif pt > curr_bin[curr_bin_size-1]: #larger than max bound of bin sidx += nskip[i] * (curr_bin_size-1) b[i]['fracway'] = 1.0 b[i]['dfracway'] = 0.0 elif pt < curr_bin[0]: #less than min bound of bin sidx += nskip[i] b[i]['fracway'] = 0.0 b[i]['dfracway'] = 0.0 else: #Russ commented that smarter search can be done here... #i guess we can do it in a pythonic way... next_bin_index = np.argmax(curr_bin>pt) sidx += nskip[i]*next_bin_index b[i]['fracway'] = (pt - curr_bin[next_bin_index-1])/(curr_bin[next_bin_index]- curr_bin[next_bin_index-1]) b[i]['dfracway'] = 1./(curr_bin[next_bin_index]- curr_bin[next_bin_index-1]) #sort dimension based on fracway (lowest to highest) b_sorted = sorted(b[:-1], key=lambda b_elem: b_elem['fracway']) # final element of b_sorted, b_sorted.append({'dim':m-1,'fracway':1.0, 'dfracway':0.0}) # top right corner indices[j, 0] = sidx coeffs[j, 0] = b_sorted[0]['fracway'] for i in range(m): if binsize[b_sorted[i]['dim']] > 1: #support singletone dimension sidx -= nskip[b_sorted[i]['dim']] indices[j, i+1] = sidx coeffs[j, i+1] = b_sorted[i+1]['fracway'] - b_sorted[i]['fracway'] return indices, coeffs
aa12be78a581cae154887da4546b0a9e94297e00
3,649,512
from pathlib import Path import shutil def submit_rgi_job(sample_instance: AnalysisSample) -> RGIResult: """ Given an input AnalysisSample instance, runs RGI and stores result in the database :param sample_instance: Instance of AnalysisSample object :return: Populated RGIResult object generated by the method """ logger.info(f"Received RGI job request for {sample_instance}") assembly_instance = SampleAssemblyData.objects.get(sample_id=sample_instance.sample_id) rgi_dir_name = f'RGI_{sample_instance.user}_{sample_instance.pk}' root_sample_instance = Sample.objects.get(sample_id=sample_instance.sample_id) outdir = MEDIA_ROOT / Path(str(sample_instance.sample_id.fwd_reads)).parent / rgi_dir_name if not assembly_instance.assembly_exists(): logger.warning(f"Could not find assembly for {assembly_instance} - cannot proceed with job") return else: assembly_path = assembly_instance.get_assembly_path() # Remove previous analysis if it exists if outdir.exists(): shutil.rmtree(outdir, ignore_errors=True) outdir.mkdir(parents=True) # Call RGI rgi_text_results, rgi_json_results = call_rgi_main(fasta=assembly_path, outdir=outdir, sample_id=root_sample_instance.sample_id) # Populate database with results rgi_result_object = RGIResult.objects.create(analysis_sample=sample_instance) rgi_result_object.rgi_main_text_results = upload_analysis_file(instance=root_sample_instance, filename=rgi_text_results.name, analysis_folder=rgi_dir_name) rgi_result_object.rgi_main_json_results = upload_analysis_file(instance=root_sample_instance, filename=rgi_json_results.name, analysis_folder=rgi_dir_name) rgi_result_object.save() logger.info(f"Completed running RGI on {sample_instance}") return rgi_result_object
3610f59fe62c01c211fcbf93658bc0c70eb25b12
3,649,513
def forwardslash2shift(args=None): """ Make forward slash shift when pressed with another key """ run_mapper(premade.ForwardSlash2Shift) return 0
cb9cbbe3272fbfd2cdf16fc4d5fce90b378b4b32
3,649,514
def plotly_figure(figure, id: str): """ :param figure: plotly graph object or px figure :param id: unique id string of format 'id_xxx' with x representin a number :return: html style string containing a plotly figure """ json_figure = figure.to_json() html = """ <div id="""+id+"""></div> <script> var plotly_data = {} Plotly.react("""+id+""", plotly_data.data, plotly_data.layout); </script> """ local_text = html.format(json_figure) return local_text
949415c70d467c48ee3aa1f028c9e3539099febf
3,649,515
def _add_resources_to_vault_obj(obj, data, columns): """Add associated resources to column and data tuples """ i = 0 for s in obj.resources: if obj.resources[i].id: name = 'resource_id_' + str(i + 1) data += (obj.resources[i].id,) columns = columns + (name,) name = 'resource_type_' + str(i + 1) data += (obj.resources[i].type,) columns = columns + (name,) i += 1 return data, columns
3a6dd7541ac853a7c62b638abf4d0eeb21bb6cb2
3,649,516
def classify_helmet_belt_worn(x): """ This function returns a strinig representation of the int value of the field which specifies whether the person was wearing a setabelt or a helmet. This specification is from the Road Crash Statistics Victoria , 2013 Edition document. :param x: int value representing the classify helmet belt worn field :return: string representation of the integer value """ if x == 1: return 'Seatbelt Worn' elif x == 2: return 'Seatbelt Not Worn' elif x == 3: return 'Child Restraint Worn' elif x == 4: return 'Child Restraint Not Worn' elif x == 5: return 'Seatbelt/restraint Not fitted' elif x == 6: return 'Crash Helmet Worn' elif x == 7: return 'Crash Helmet Not Worn' elif x == 8: return 'Not Appropriate' else: return 'Not Known'
cba05be8d03c933e767a75400032d07e296e0ec3
3,649,517
def history_kernels ( estimated_stimulus_kernel, estimated_response_kernel, ci_kernels, ax=None, presentation="left/right", ground_truth=None ): """plot history kernels :Parameters: *estimated_stimulus_kernel* stimulus kernel estimated from the data *estimated_response_kernel* response kernel estimated from the data *ci_kernels* a sequence of confidence regions for the kernels as returned by statistics.history_kernel_ci() *ax* pylab.axes where the plot should go *presentation* how should the kernels be presented? Selection of either 'left/right' or 'correct/incorrect' :Example: >>> skernel = [1.2,.5,.3,.1] >>> rkernel = [.1,.1,0,0] >>> ci_kernels = [ [[1.3,.6,.4,.2],[.8,.3,.1,-.05]],[[.2,.2,.1,.1],[-.05,0.,-.1,-.1]],[[1.5,.8,.5,.3],[.7,.3,0.,-.2]],[[1.2,.5,.5,.2],[.9,.2,0.,-.05]] ] >>> history_kernels ( skernel, rkernel, ci_kernels ) >>> pl.savefig ( 'test/history_kernels.png' ); pl.close() """ if presentation=="left/right": kernels = (estimated_stimulus_kernel,estimated_response_kernel) colors = (stimulus_color,response_color) labels = ("stimulus","response") if not ci_kernels is None: CI = np.array(ci_kernels[:2]) else: CI = None if not ground_truth is None: true_kernels = ground_truth['stimulus_kernel'],\ ground_truth['response_kernel'] elif presentation=="correct/incorrect": kernels = (estimated_stimulus_kernel+estimated_response_kernel,-estimated_stimulus_kernel+estimated_response_kernel) colors = (correct_color,incorrect_color) labels = ("correct","incorrect") if not ci_kernels is None: CI = np.array(ci_kernels[2:]) else: CI = None if not ground_truth is None: true_kernels = ground_truth['stimulus_kernel']+\ ground_truth['response_kernel'],\ -ground_truth['stimulus_kernel']+\ ground_truth['response_kernel'] else: raise ValueError("presentation should be either 'left/right' or 'correct/incorrect'") if CI is None: CI = np.array([[kernels[0],kernels[0]],[kernels[1],kernels[1]]]) if ax is None: ax = pl.gca() ax = prepare_axes ( ax ) # Plot confidence regions lags = np.arange ( len(estimated_stimulus_kernel) ) + 1 for i in [0,1]: fc = 0.5*np.array(colors[i])+0.5*np.ones(3) ax.fill ( np.concatenate ( (lags,lags[::-1]) ), np.concatenate ( (CI[i,0,:],CI[i,1,::-1]) ), facecolor=fc, edgecolor=0.5*colors[i], alpha=0.7 ) kernellines = [] for i in [0,1]: if not ground_truth is None: ax.plot ( lags, true_kernels[i], color=0.5*colors[i] ) kernellines += ax.plot ( lags, kernels[i], 'o', markerfacecolor=colors[i], markeredgecolor=0.5*colors[i], label=labels[i] ) ax.set_xlim ( 1-0.01*len(estimated_stimulus_kernel),len(estimated_stimulus_kernel)+0.01*len(estimated_stimulus_kernel) ) ax.set_xticks ( lags ) # label_axes ( title="history kernels", xlabel="lag", ylabel="equivalent stimulus strength", legend='best', ax=ax ) return kernellines
c25751759079dbf11b7b63e9ed66b73d3552c040
3,649,518
def specs_url(self): """ The Swagger specifications absolute url (ie. `swagger.json`) :rtype: str """ return url_for(self.endpoint('specs'), _external=False)
1620c8eb4d0b8e61c9a67aadca677b2acae5074f
3,649,520
import sqlite3 def construct_db(db: str) -> sqlite3: """Build empty database 'db'.""" conn = sqlite3.connect(db) c = conn.cursor() c.executescript(''' CREATE TABLE files ( ID INTEGER PRIMARY KEY, Name TEXT, Path TEXT, FullPath TEXT, isDir INTEGER, Size INTEGER, Mtime INTEGER, Atime INTEGER, Ctime INTEGER, Btime INTEGER, UID INTEGER, GID INTEGER, iNode INTEGER, DevID INTEGER, DP INTEGER, XCount INTEGER, MIME INTEGER, Type INTEGER, Offset INTEGER ); CREATE TABLE xattrs ( ID INTEGER PRIMARY KEY AUTOINCREMENT, FileID INTEGER, Key TEXT, Value TEXT, Raw BLOB ); CREATE TABLE mtypes( ID INTEGER PRIMARY KEY AUTOINCREMENT, MIME TEXT ); CREATE TABLE ftypes ( ID INTEGER PRIMARY KEY, Type TEXT ); CREATE VIEW localtime as select files.ID, Name, Path, FullPath, isDir, Size, datetime(mtime, 'unixepoch', 'localtime') as Mtime, datetime(atime, 'unixepoch', 'localtime') as Atime, datetime(ctime, 'unixepoch', 'localtime') as Ctime, datetime(btime, 'unixepoch', 'localtime') as Btime, UID, GID, iNode, DevID as DeviceID, mtypes.MIME, ftypes.Type, Xcount as ExtraAttrs, 'Offset' as ZipOffset, Key as XattrKey, Value as XattrValue, Raw from files left join xattrs on files.ID = xattrs.FileID left join mtypes on files.MIME = mtypes.ID left join ftypes on files.Type = ftypes.ID; CREATE VIEW utc as select files.ID, Name, Path, FullPath, isDir, Size, datetime(mtime, 'unixepoch') as Mtime, datetime(atime, 'unixepoch') as Atime, datetime(ctime, 'unixepoch') as Ctime, datetime(btime, 'unixepoch') as Btime, UID, GID, iNode, DevID as DeviceID, mtypes.MIME, ftypes.Type, Xcount as ExtraAttrs, 'Offset' as ZipOffset, Key as XattrKey, Value as XattrValue, Raw from files left join xattrs on files.ID = xattrs.FileID left join mtypes on files.MIME = mtypes.ID left join ftypes on files.Type = ftypes.ID; ''') conn.commit() return conn
22759b9b8e68e7c39f8fdd6fc33124c0ecea3a24
3,649,521
def class_javadoc(ns, stmt): """ Generate javadoc for class (string without '/**' and '*/' but with * on new line) """ description = '' desc_stmt = search_one(stmt, 'description') if desc_stmt is not None: description += ''.join([str(desc_stmt.arg).replace('\n', '\n * ')]) description += ''.join(['\n * <br/>\n * Namespace: ', ns]) return description
9bfc093362bdb573ba8b41ca17b037b57da3891e
3,649,522
def r_precision(r): """Score is precision after all relevant documents have been retrieved Relevance is binary (nonzero is relevant). Args: r: Relevance scores (list or numpy) in rank order (first element is the first item) Returns: R Precision """ r = np.asarray(r) != 0 z = r.nonzero()[0] if not z.size: return 0. return np.mean(r[:z[-1] + 1])
998ff6750ce51455fa09ae5970a94934a4c3f383
3,649,523
from typing import Callable from typing import Coroutine from typing import Any def _async_climate_updater( lookin_protocol: LookInHttpProtocol, uuid: str, ) -> Callable[[], Coroutine[None, Any, Remote]]: """Create a function to capture the cell variable.""" async def _async_update() -> Climate: return await lookin_protocol.get_conditioner(uuid) return _async_update
b4160385fe7f304096de6bb9196822d3230c342f
3,649,525
def load_natural_movies(cpd=1.00): """load natural movies dataset Parameters ---------- - cpd: float of cycles per degree, should be 1.00 or 1.33 """ if cpd not in {1.00, 1.33}: raise Exception('cpd must be in {1.00, 1.33}') if cpd == 1.00: cpd = '1.00' elif cpd == 1.33: cpd = '1.33' else: raise Exception('cpd must be in {1.00, 1.33}') # load X X_path = '/auto/k6/nbilenko/preproc_data/movie/dir{cpd}cpd_{dataset}stim.npy' Xtrain = np.load(X_path.format(cpd=cpd, dataset='t')) Xtest = np.load(X_path.format(cpd=cpd, dataset='v')) # load Y Y_path = 'auto/k8/anunez/proj/snmovies/datasets/snmovies_braindata_AH3T.hdf' cci = glabtools.io.get_cc_interface('anunez_raid', verbose=False) Y_data = cci.cloud2dict(Y_path, verbose=False) Ytrain = Y_data['Ytrain'] Ytest = Y_data['Yval'] return { 'natural_movies_gabor_pyramid': { 'Xtrain': Xtrain, 'Ytrain': Ytrain, 'Xtest': Xtest, 'Ytest': Ytest, }, 'natural_movies_mean_gabor': { 'Xtrain': Xtrain.mean(1, keepdims=True), 'Ytrain': Ytrain, 'Xtest': Xtest.mean(1, keepdims=True), 'Ytest': Ytest, }, }
bff8dd14cc2afac89aceb9407651f5cb91509a9a
3,649,526
import collections def sort_dataset_by_len(dataset): """ returns a dict mapping length -> list of items of that length an OrderedDict is used to that the mapping is sorted from smallest to largest """ sorted_dataset = collections.OrderedDict() lengths = sorted(list(set(len(x[1]) for x in dataset))) for l in lengths: sorted_dataset[l] = [] for item in dataset: sorted_dataset[len(item[1])].append(item) return sorted_dataset
1e67da963c6d968fba39730cc33e100242fcafca
3,649,527
def rule_VisibleTo_if_in_same_visible_container(x, actor, world) : """Anything in the same visible container to the actor is visible if the visible container is lit. We treat doors specially: if x is in the get_room_doors of the visible container, then the door is visible, too.""" actor_vis_cont = world[VisibleContainer(world[Location(actor)])] if x in world.activity.get_room_doors(actor_vis_cont) : return True if actor_vis_cont == x : # otherwise we'd be looking too many levels high x_vis_cont = x else : loc = world[Location(x)] if not loc : raise NotHandled() x_vis_cont = world[VisibleContainer(loc)] if actor_vis_cont == x_vis_cont and world[ContainsLight(actor_vis_cont)] : return True raise NotHandled()
8e4465d7684c95a9890e4271b2dbb75b665d2efd
3,649,528
import copy import random def select_random_user_goals(user_goals_no_req_slots, user_goals_with_req_slots, cardinality_no_req, cardinality_req): """ Helper method to randomly select user goals """ random_user_goals = {} random_user_goals['all'] = [] # select randomly user goals without request slots random_user_goals['all'].extend(copy.deepcopy(random.sample(user_goals_no_req_slots, cardinality_no_req))) # select randomly user goals with request slots random_user_goals['all'].extend(copy.deepcopy(random.sample(user_goals_with_req_slots, cardinality_req))) return random_user_goals
ff51361d45cdbd62cc9ee9e8263d47870435b326
3,649,529
import copy def dict_items_recursive_apply(config_dict, apply_method, **apply_method_parameters): """Recursive apply method to dict elements >>> dict_items_recursive_apply( ... {"foo": {"bar":"baz"}, "qux": ["a","b"]}, ... lambda k,v,x: v.upper()+x, **{"x":"!"} ... ) == {'foo': {'bar': 'BAZ!'}, 'qux': ['A!', 'B!']} True :param config_dict: input nested dictionnary :type config_dict: dict :param apply_method: method to be applied to dict elements :type apply_method: :func:`apply_method` :param apply_method_parameters: optional parameters passed to the method :type apply_method_parameters: dict :returns: updated dict :rtype: dict """ result_dict = copy.deepcopy(config_dict) for dict_k, dict_v in result_dict.items(): if isinstance(dict_v, dict): result_dict[dict_k] = dict_items_recursive_apply( dict_v, apply_method, **apply_method_parameters ) elif any(isinstance(dict_v, t) for t in (list, tuple)): result_dict[dict_k] = list_items_recursive_apply( dict_v, apply_method, **apply_method_parameters ) else: result_dict[dict_k] = apply_method( dict_k, dict_v, **apply_method_parameters ) return result_dict
760e3de8e414dcd5300aa79cc703b0941a5852fd
3,649,530
def d_B_nu_d_T_d_nu_dimensionless(x): """ Calculates d^2(B_nu) / d (T) / d (nu), as a function of dimensionless units, x = (h nu / k_B T) Parameters ---------- x : float Returns ------- d_B_nu_d_T_d_nu_dimensionless : float Not normalized to anything meaningful """ return - np.exp(x)*x**3 * (np.exp(x)*(x-4)+x+4) / (np.exp(x)-1)**3
eb1e42d48e15cbc5ea17877868cca27422d89346
3,649,531
def node_to_get_batch_value(shape_node: Node): """ The function returns a node that produces the batch value which is usually the element of the shape with index 0 :param shape_node: the node of 1D output shape to get batch from :return: the node producing batch value """ return node_to_get_shape_value_of_range(shape_node, [0])
126570b69895cd34bb6821f179076d6d005c36db
3,649,532
def re2_full_match(input, pattern): # pylint: disable=redefined-builtin """Extract regex groups Args: input: A `tf.string` tensor pattern: A pattern string. """ return core_ops.io_re2_full_match(input, pattern)
d9ca2606eae8faf21bf2cf7ec1730c69f609d4c5
3,649,533
import click def optional_tools_or_packages_arg(multiple=False): """ Decorate click method as optionally taking in the path to a tool or directory of tools or a Conda package. If no such argument is given the current working directory will be treated as a directory of tools. """ name = "paths" if multiple else "path" nargs = -1 if multiple else 1 return click.argument( name, metavar="TARGET", nargs=nargs, )
4a34da51b4a644df70c5ce3ea8afb8b86ae2281d
3,649,535
import numpy def linear_interpolate_cdf(base_cdf): """Linear interpolate regions of straight lines in the CDF. Parameters: base_cdf (list): n elements of non-decreasing order. Returns: list of length base_cdf where consecutive elements of straight lines are linearly interpolated between the left and right sides. """ target_cdf = list(base_cdf) index = 0 left_val = 0 while index < len(base_cdf)-1: if base_cdf[index] == base_cdf[index+1]: # search for where it ends offset = index+1 while (offset < len(base_cdf)-1 and base_cdf[offset] == base_cdf[offset+1]): offset += 1 # linearly interpolate between index and offset right_val = base_cdf[offset] interp_val = numpy.interp( list(range(index, offset+1, 1)), [index-1, offset], [float(left_val), float(right_val)]) target_cdf[index:offset+1] = interp_val left_val = right_val index = offset+1 else: left_val = base_cdf[index] index += 1 return target_cdf
8f119d1698a44e90253920decf1b3253db9171be
3,649,536
def hash_str(string: str) -> int: """ Create the hash for a string (poorly). """ hashed = 0 results = map(ord, string) for result in results: hashed += result return hashed
b80c177974437966361e4117ba235c1563fee5c4
3,649,537
import plotly.graph_objects as go import plotly.io as pio def graph(g: nx.Graph, s: Optional[list] = None, plot_size: Tuple = (500, 500)): # pragma: no cover """Creates a plot of the input graph. This function can plot the input graph only, or the graph with a specified subgraph highlighted. Graphs are plotted using the Kamada-Kawai layout with an aspect ratio of 1:1. **Example usage:** >>> graph = nx.complete_graph(10) >>> fig = plot.graph(graph, [0, 1, 2, 3]) >>> fig.show() .. image:: ../../_static/complete_graph.png :width: 40% :align: center :target: javascript:void(0); Args: g (nx.Graph): input graph s (list): optional list of nodes comprising the subgraph to highlight plot_size (int): size of the plot in pixels, given as a pair of integers ``(x_size, y_size)`` Returns: Figure: figure for graph and optionally highlighted subgraph """ try: except ImportError: raise ImportError(plotly_error) try: in_notebook = get_ipython().__class__.__name__ == "ZMQInteractiveShell" except NameError: in_notebook = False if not in_notebook: pio.renderers.default = "browser" l = nx.kamada_kawai_layout(g) g_nodes = go.Scatter( **_node_coords(g, l), mode="markers", hoverinfo="text", marker=dict(color=graph_node_colour, size=graph_node_size, line_width=2), ) g_edges = go.Scatter( **_edge_coords(g, l), line=dict(width=1, color=graph_edge_colour), hoverinfo="none", mode="lines", ) g_nodes.text = [str(i) for i in g.nodes()] layout = go.Layout( showlegend=False, hovermode="closest", xaxis=dict(showgrid=False, zeroline=False, showticklabels=False), yaxis=dict(showgrid=False, zeroline=False, showticklabels=False), margin=dict(b=0, l=0, r=0, t=25), height=plot_size[1], width=plot_size[0], plot_bgcolor="#ffffff", ) if s is not None: s = g.subgraph(s) s_edges = go.Scatter( **_edge_coords(s, l), line=dict(width=2, color=subgraph_edge_colour), hoverinfo="none", mode="lines", ) s_nodes = go.Scatter( **_node_coords(s, l), mode="markers", hoverinfo="text", marker=dict(color=subgraph_node_colour, size=subgraph_node_size, line_width=2), ) s_nodes.text = [str(i) for i in s.nodes()] f = go.Figure(data=[g_edges, s_edges, g_nodes, s_nodes], layout=layout) else: f = go.Figure(data=[g_edges, g_nodes], layout=layout) return f
9830ef44f3a85234002c11d0da1913a89c332491
3,649,538
def intersect(p1x, p1y, p2x, p2y, x0, y0): """Intersect segment defined by p1 and p2 with ray coming out of x0,y0 ray can be horizontal y=y0 x=x0+dx , want dx>0. Args: p1x (float): x coordinate of point 1 of segment p1y (float): y coordinate of point 1 of segment p2x (float): x coordinate of point 2 of segment p2y (float): y coordinate of point 2 of segment x0 (float): x coordinate anchoring the intersection ray y0 (float): y coordinate anchoring the intersection ray Returns: boolean int: (1) if intersecting, (0) if not intersecting """ if p1x != p2x and p1y != p2y: m = (p2y - p1y) / (p2x - p1x) x_inter = (y0 - p1y) / m + p1x if x_inter >= x0 and np.min([p1y, p2y]) <= y0 <= np.max([p1y, p2y]): ans = 1 else: ans = 0 else: if p1x == p2x: # vertical segment if x0 <= p1x and np.min([p1y, p2y]) <= y0 <= np.max([p1y, p2y]): ans = 1 else: ans = 0 if p1y == p2y: # horizontal segment if y0 == p1y: ans = 1 else: ans = 0 return ans
b58ae51cf179183689a7ed4b0854eefaeb28b895
3,649,539
from scipy import linalg def impulse_matrix(params, dt, reduced=False): """Calculate the matrix exponential for integration of MAT model""" a1, a2, b, w, R, tm, t1, t2, tv, tref = params if not reduced: A = - np.matrix([[1 / tm, -1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 1 / t1, 0, 0, 0], [0, 0, 0, 1 / t2, 0, 0], [0, 0, 0, 0, 1 / tv, -1], [b / tm, -b, 0, 0, 0, 1 / tv]]) else: A = - np.matrix([[1 / tm, -1, 0, 0], [0, 0, 0, 0], [0, 0, 1 / tv, -1], [b / tm, -b, 0, 1 / tv]]) return linalg.expm(A * dt)
4305d588680dd5de91765e79b170d26e43f82a01
3,649,540
def get_vimg(request): """ 获取验证码 :param request: :return: """ text, image = vcode.gen_captcha_text_and_image() v_key = request.GET.get('vk') ex_key = request.GET.get('ex') if ex_key: try: redis_conn.delete(ex_key) except Exception as e: logger.error(e) redis_conn.set(v_key, text, 60*3) return HttpResponse(image.getvalue(), content_type='image/jpg')
f3398236cb4d69f21a04519de472d85dca885a2c
3,649,541
def generate_fcm_token(): """Generate an FCM token nLAUJTr5RIJ:MNmSQ8O52FoJSvfWEPF4KvWopcNScNFRPHHbXdepwzuXJJMfadpEfb2JlHoqEhWanFz7-N0sfPg-pW4gNubNdxyikiI0lrvGeWGTp86fn9-NA3sZ-Eizv9QE7YKHCOIa70fR38N1ZYsb """ return '{}:{}-{}-{}-{}-{}'.format(random_all(11), random_all(68), random_all(6), random_all(30), random_all(5), random_all(27))
e535584bf630e1353a8f7458ff45cf2f0c1433fb
3,649,542
def evaluate(data_loader): """Evaluate given the data loader Parameters ---------- data_loader : DataLoader Returns ------- avg_loss : float Average loss real_translation_out : list of list of str The translation output """ translation_out = [] all_inst_ids = [] avg_loss_denom = 0 avg_loss = 0.0 for _, (src_seq, tgt_seq, src_valid_length, tgt_valid_length, inst_ids) \ in enumerate(data_loader): src_seq = src_seq.as_in_context(ctx) tgt_seq = tgt_seq.as_in_context(ctx) src_valid_length = src_valid_length.as_in_context(ctx) tgt_valid_length = tgt_valid_length.as_in_context(ctx) # Calculating Loss out, _ = model(src_seq, tgt_seq[:, :-1], src_valid_length, tgt_valid_length - 1) loss = loss_function(out, tgt_seq[:, 1:], tgt_valid_length - 1).mean().asscalar() all_inst_ids.extend(inst_ids.asnumpy().astype(np.int32).tolist()) avg_loss += loss * (tgt_seq.shape[1] - 1) avg_loss_denom += (tgt_seq.shape[1] - 1) # Translate samples, _, sample_valid_length =\ translator.translate(src_seq=src_seq, src_valid_length=src_valid_length) max_score_sample = samples[:, 0, :].asnumpy() sample_valid_length = sample_valid_length[:, 0].asnumpy() for i in range(max_score_sample.shape[0]): translation_out.append( [tgt_vocab.idx_to_token[ele] for ele in max_score_sample[i][1:(sample_valid_length[i] - 1)]]) avg_loss = avg_loss / avg_loss_denom real_translation_out = [None for _ in range(len(all_inst_ids))] for ind, sentence in zip(all_inst_ids, translation_out): real_translation_out[ind] = sentence return avg_loss, real_translation_out
f7697e9f22e5bda3af6b0892b3cc5c3b047771f0
3,649,544
def adding_equation(thetas, eta0, eta1, eta2, eta3, kappa3 = 0.0, polarized=False, tau1=0.0, tau2=0.0): """ Return the reflectance of a 4 layers material (3 interfaces) with all inter-reflections, using adding equation """ zeros = [np.zeros_like(thetas),np.zeros_like(thetas)] if polarized else np.zeros_like(thetas) R01 = fresnel(np.cos(thetas), eta1/eta0, polarized=polarized) if eta1 != eta0 else zeros ones = np.ones_like(R01) T01 = ones - R01 thetas_t1 = clamp(np.arcsin(eta0 / eta1 * np.sin(thetas))) thetas_t1 = np.where(thetas_t1 is not np.nan, thetas_t1, 0.0); R10 = fresnel(np.cos(thetas_t1), eta0/eta1, polarized=polarized) if eta1 != eta0 else zeros R12 = fresnel(np.cos(thetas_t1), eta2/eta1, polarized=polarized) if eta1 != eta2 else zeros T12 = ones - R12 thetas_t2 = clamp(np.arcsin(eta1/eta2 * np.sin(thetas_t1))) thetas_t2 = np.where(thetas_t2 is not np.nan, thetas_t2, 0.0); R21 = fresnel(np.cos(thetas_t2), eta1/eta2, polarized=polarized) if eta1 != eta2 else zeros k = 0.0 if kappa3 == 0.0 else kappa3/eta2 R23 = fresnel(np.cos(thetas_t2), eta3/eta2, k, polarized=polarized) if polarized: res = [] for i in range(2): R13 = add_with_absorption(R12[i], R23[i], tau2, thetas_t2) R03 = add_with_absorption(R01[i], R13, tau1, thetas_t1) #R13 = add(R12[i], T12[i], R21[i], R23[i]) #R03 = add(R01[i], T01[i], R10[i], R13) res.append(np.where(np.isfinite(R03), R03, ones[0])) return res #R13 = add(R12, T12, R21, R23) #R03 = add(R01, T01, R10, R13) R13 = add_with_absorption(R12, R23, tau2, thetas_t2) R03 = add_with_absorption(R01, R13, tau1, thetas_t1) return np.where(np.isfinite(R03), R03, 1.0)
1e1f7e56096d712f04354cca52987b2010fd322f
3,649,545
import logging def text_expand(context): """ Give context, pick out the bible indexes, turn them into normalized scripture, and put the scripture back into the context """ output = [] end = 0 for m in candidate_filter(context): output.append(m.group('out')) try: bucket = get_bucket(m) formated = format_bucket(bucket) output.extend(['《',':'.join(list(formated)), '》']) except KeyError: output.append(m.group(0)) except AttributeError: output.append(m.group(0)) except: logging.warning(print(context)) end = m.end() output.append(context[end:]) return ''.join(output)
146cd85a1007215cc8bed53341418a7b3c23b532
3,649,546
from typing import Iterable from datetime import datetime def json_custom_parser(obj): """ A custom json parser to handle json.dumps calls properly for Decimal and Datetime data types. """ if not isinstance(obj, string_types) and isinstance(obj, Iterable): return list(obj) elif isinstance(obj, datetime.datetime) or isinstance(obj, datetime.date): dot_ix = 19 # 'YYYY-MM-DDTHH:MM:SS.mmmmmm+HH:MM'.find('.') return obj.isoformat()[:dot_ix] else: raise TypeError(obj)
dbee1501376d2b1fc235b5351236e857fc9c5750
3,649,547
def like(): """ Function to automatically like a picture :return: 0 or 1 where 1 = one picture liked :rtype: int """ like_icons = driver.find_elements_by_xpath("//*[contains(@aria-label, 'Like')]") unlike_icons = driver.find_elements_by_xpath("//*[contains(@aria-label, 'Unlike')]") for icon in unlike_icons or like_icons: height = icon.get_attribute("height") fill_color = icon.get_attribute("fill") # Ensuring it is the correct icon and that it has not been liked before if height == "24" and fill_color == "#ed4956": # Fill color of a post already liked is #ed4956 print("Picture already liked.") return 0 elif height == "24" and fill_color == "#262626": # Fill color of post NOT liked is #262626 # ('..') is used here to fetch the parent of icon using xpath like_button = icon.find_element_by_xpath('..') like_button.click() print("Picture liked :)") sleep(2) return 1 else: # pragma: no cover pass
5db4a43c4b29a3cb49d62bddf755a9e374f0ac4e
3,649,548
def compare_files(file_name1, file_name2): """ Compare two files, line by line, for equality. Arguments: file_name1 (str or unicode): file name. file_name2 (str or unicode): file name. Returns: bool: True if files are equal, False otherwise. """ with open(file_name1) as file1, open(file_name2) as file2: for line1, line2 in zip(file1, file2): if line1 != line2: file1.close() file2.close() return False file1.close() file2.close() return True
3f77cf177ba60ddd121b95648379fff845d9877b
3,649,550
def like(request, pk): """Add a user to those who liked the post. Only authenticated users are able to like a post. """ if request.method == 'POST': # query the post in question try: post = Post.objects.get(pk=pk) except Post.DoesNotExist: return Response(status=status.HTTP_404_NOT_FOUND) # add a user to the list of those who liked this post # won't duplicate the relationship post.users_who_liked.add(request.user) return Response({'message': f'Liked the post {pk}.'})
d3ef1d9728592872a73b900c60e4021078d2ef2e
3,649,551
def from_float32(buffer): """Interprets an arbitrary string or NumPy array as Vax single-precision floating-point binary values, and returns the equivalent array in IEEE values.""" # Convert the buffer to 2-byte elements if isinstance(buffer, (str, np.str_, bytes, bytearray)): pairs = np.fromstring(buffer, dtype='uint16') pairs = pairs.reshape(pairs.size//2, 2) newshape = (pairs.size//2,) else: buffer = np.asarray(buffer) pairs = buffer.view('uint16') assert pairs.shape[-1] % 2 == 0, \ 'buffer shape is incompatible with 4-byte elements' if buffer.itemsize == 1: newshape = buffer.shape[:-1] + (buffer.shape//4,) elif buffer.itemsize == 2: newshape = buffer.shape[:-1] + (buffer.shape//2,) elif buffer.itemsize == 4: newshape = buffer.shape[:-1] + (1,) else: newshape = buffer.shape + (buffer.itemsize//4,) if newshape[-1] == 1: newshape = newshape[:-1] # Perform a pairwise swap of the two-byte elements swapped = np.empty(pairs.shape, dtype='uint16') swapped[...,:] = pairs[...,::-1] # The results are in LSB IEEE format aside from a scale factor of four ieee = swapped.view('<f4') / 4. return ieee.reshape(newshape)
2ab310b2d5cc6fcd7f9f094d97de319a1643dc7e
3,649,553
def get_file_stats(file_name, entity_type='file', lineno=None, cursorpos=None, plugin=None, language=None, local_file=None): """Returns a hash of information about the entity.""" language = standardize_language(language, plugin) stats = { 'language': language, 'dependencies': [], 'lines': None, 'lineno': lineno, 'cursorpos': cursorpos, } if entity_type == 'file': lexer = get_lexer(language) if not language: language, lexer = guess_language(file_name, local_file) parser = DependencyParser(local_file or file_name, lexer) stats.update({ 'language': use_root_language(language, lexer), 'dependencies': parser.parse(), 'lines': number_lines_in_file(local_file or file_name), }) return stats
b1ccf3d0eb2af676fce690e5f81182d89d50596b
3,649,554
def setup(app: sphinx.application.Sphinx) -> dict[str, object]: """Called by Sphinx to set up the extension.""" app.add_config_value("gaphor_models", {}, "env", [dict]) app.add_directive("diagram", DiagramDirective) app.connect("config-inited", config_inited) return { "version": "0.1", "parallel_read_safe": True, "parallel_write_safe": True, }
992d7436d31cd18b7cd50b02b013d9c56179eacb
3,649,555
from nipype.interfaces.afni import utils as afni_utils def create_vmhc(use_ants, flirt_only=False, name='vmhc_workflow', ants_threads=1): """ Compute the map of brain functional homotopy, the high degree of synchrony in spontaneous activity between geometrically corresponding interhemispheric (i.e., homotopic) regions. Parameters ---------- None Returns ------- vmhc_workflow : workflow Voxel Mirrored Homotopic Connectivity Analysis Workflow Notes ----- `Source <https://github.com/FCP-INDI/C-PAC/blob/master/CPAC/vmhc/vmhc.py>`_ Workflow Inputs:: inputspec.brain : string (existing nifti file) Anatomical image(without skull) inputspec.symmetric_brain : string (existing nifti file) MNI152_T1_2mm_symmetric_brain.nii.gz inputspec.rest_res_filt : string (existing nifti file) Band passed Image with nuisance signal regressed out(and optionally scrubbed). Recommended bandpass filter (0.001,0.1) ) inputspec.reorient : string (existing nifti file) RPI oriented anatomical data inputspec.example_func2highres_mat : string (existing affine transformation .mat file) Specifies an affine transform that should be applied to the example_func before non linear warping inputspec.standard_for_func: string (existing nifti file) MNI152_T1_standard_resolution_brain.nii.gz inputspec.symmetric_skull : string (existing nifti file) MNI152_T1_2mm_symmetric.nii.gz inputspec.twomm_brain_mask_dil : string (existing nifti file) MNI152_T1_2mm_brain_mask_symmetric_dil.nii.gz inputspec.config_file_twomm_symmetric : string (existing .cnf file) T1_2_MNI152_2mm_symmetric.cnf inputspec.rest_mask : string (existing nifti file) A mask functional volume(derived by dilation from motion corrected functional volume) fwhm_input.fwhm : list (float) For spatial smoothing the Z-transformed correlations in MNI space. Generally the value of this parameter is 1.5 or 2 times the voxel size of the input Image. inputspec.mean_functional : string (existing nifti file) The mean functional image for use in the func-to-anat registration matrix conversion to ITK (ANTS) format, if the user selects to use ANTS. Workflow Outputs:: outputspec.highres2symmstandard : string (nifti file) Linear registration of T1 image to symmetric standard image outputspec.highres2symmstandard_mat : string (affine transformation .mat file) An affine transformation .mat file from linear registration and used in non linear registration outputspec.highres2symmstandard_warp : string (nifti file) warp file from Non Linear registration of T1 to symmetrical standard brain outputspec.fnirt_highres2symmstandard : string (nifti file) Non Linear registration of T1 to symmetrical standard brain outputspec.highres2symmstandard_jac : string (nifti file) jacobian determinant image from Non Linear registration of T1 to symmetrical standard brain outputspec.rest_res_2symmstandard : string (nifti file) nonlinear registration (func to standard) image outputspec.VMHC_FWHM_img : string (nifti file) pearson correlation between res2standard and flipped res2standard outputspec.VMHC_Z_FWHM_img : string (nifti file) Fisher Z transform map outputspec.VMHC_Z_stat_FWHM_img : string (nifti file) Z statistic map Order of commands: - Perform linear registration of Anatomical brain in T1 space to symmetric standard space. For details see `flirt <http://www.fmrib.ox.ac.uk/fsl/flirt/index.html>`_:: flirt -ref MNI152_T1_2mm_symmetric_brain.nii.gz -in mprage_brain.nii.gz -out highres2symmstandard.nii.gz -omat highres2symmstandard.mat -cost corratio -searchcost corratio -dof 12 -interp trilinear - Perform nonlinear registration (higres to standard) to symmetric standard brain. For details see `fnirt <http://fsl.fmrib.ox.ac.uk/fsl/fnirt/>`_:: fnirt --in=head.nii.gz --aff=highres2symmstandard.mat --cout=highres2symmstandard_warp.nii.gz --iout=fnirt_highres2symmstandard.nii.gz --jout=highres2symmstandard_jac.nii.gz --config=T1_2_MNI152_2mm_symmetric.cnf --ref=MNI152_T1_2mm_symmetric.nii.gz --refmask=MNI152_T1_2mm_brain_mask_symmetric_dil.nii.gz --warpres=10,10,10 - Perform spatial smoothing on the input functional image(inputspec.rest_res_filt). For details see `PrinciplesSmoothing <http://imaging.mrc-cbu.cam.ac.uk/imaging/PrinciplesSmoothing>`_ `fslmaths <http://www.fmrib.ox.ac.uk/fslcourse/lectures/practicals/intro/index.htm>`_:: fslmaths rest_res_filt.nii.gz -kernel gauss FWHM/ sqrt(8-ln(2)) -fmean -mas rest_mask.nii.gz rest_res_filt_FWHM.nii.gz - Apply nonlinear registration (func to standard). For details see `applywarp <http://www.fmrib.ox.ac.uk/fsl/fnirt/warp_utils.html#applywarp>`_:: applywarp --ref=MNI152_T1_2mm_symmetric.nii.gz --in=rest_res_filt_FWHM.nii.gz --out=rest_res_2symmstandard.nii.gz --warp=highres2symmstandard_warp.nii.gz --premat=example_func2highres.mat - Copy and L/R swap the output of applywarp command (rest_res_2symmstandard.nii.gz). For details see `fslswapdim <http://fsl.fmrib.ox.ac.uk/fsl/fsl4.0/avwutils/index.html>`_:: fslswapdim rest_res_2symmstandard.nii.gz -x y z tmp_LRflipped.nii.gz - Calculate pearson correlation between rest_res_2symmstandard.nii.gz and flipped rest_res_2symmstandard.nii.gz(tmp_LRflipped.nii.gz). For details see `3dTcorrelate <http://afni.nimh.nih.gov/pub/dist/doc/program_help/3dTcorrelate.html>`_:: 3dTcorrelate -pearson -polort -1 -prefix VMHC_FWHM.nii.gz rest_res_2symmstandard.nii.gz tmp_LRflipped.nii.gz - Fisher Z Transform the correlation. For details see `3dcalc <http://afni.nimh.nih.gov/pub/dist/doc/program_help/3dcalc.html>`_:: 3dcalc -a VMHC_FWHM.nii.gz -expr 'log((a+1)/(1-a))/2' -prefix VMHC_FWHM_Z.nii.gz - Calculate the number of volumes(nvols) in flipped rest_res_2symmstandard.nii.gz(tmp_LRflipped.nii.gz) :: -Use Nibabel to do this - Compute the Z statistic map :: 3dcalc -a VMHC_FWHM_Z.nii.gz -expr 'a*sqrt('${nvols}'-3)' -prefix VMHC_FWHM_Z_stat.nii.gz Workflow: .. image:: ../images/vmhc_graph.dot.png :width: 500 Workflow Detailed: .. image:: ../images/vmhc_detailed_graph.dot.png :width: 500 References ---------- .. [1] Zuo, X.-N., Kelly, C., Di Martino, A., Mennes, M., Margulies, D. S., Bangaru, S., Grzadzinski, R., et al. (2010). Growing together and growing apart: regional and sex differences in the lifespan developmental trajectories of functional homotopy. The Journal of neuroscience : the official journal of the Society for Neuroscience, 30(45), 15034-43. doi:10.1523/JNEUROSCI.2612-10.2010 Examples -------- >>> vmhc_w = create_vmhc() >>> vmhc_w.inputs.inputspec.symmetric_brain = 'MNI152_T1_2mm_symmetric_brain.nii.gz' >>> vmhc_w.inputs.inputspec.symmetric_skull = 'MNI152_T1_2mm_symmetric.nii.gz' >>> vmhc_w.inputs.inputspec.twomm_brain_mask_dil = 'MNI152_T1_2mm_brain_mask_symmetric_dil.nii.gz' >>> vmhc_w.inputs.inputspec.config_file_twomm = 'T1_2_MNI152_2mm_symmetric.cnf' >>> vmhc_w.inputs.inputspec.standard_for_func= 'MNI152_T1_2mm.nii.gz' >>> vmhc_w.inputs.fwhm_input.fwhm = [4.5, 6] >>> vmhc_w.get_node('fwhm_input').iterables = ('fwhm', [4.5, 6]) >>> vmhc_w.inputs.inputspec.rest_res = os.path.abspath('/home/data/Projects/Pipelines_testing/Dickstein/subjects/s1001/func/original/rest_res_filt.nii.gz') >>> vmhc_w.inputs.inputspec.reorient = os.path.abspath('/home/data/Projects/Pipelines_testing/Dickstein/subjects/s1001/anat/mprage_RPI.nii.gz') >>> vmhc_w.inputs.inputspec.brain = os.path.abspath('/home/data/Projects/Pipelines_testing/Dickstein/subjects/s1001/anat/mprage_brain.nii.gz') >>> vmhc_w.inputs.inputspec.example_func2highres_mat = os.path.abspath('/home/data/Projects/Pipelines_testing/Dickstein/subjects/s1001/func/original/reg/example_func2highres.mat') >>> vmhc_w.inputs.inputspec.rest_mask = os.path.abspath('/home/data/Projects/Pipelines_testing/Dickstein/subjects/s1001/func/original/rest_mask.nii.gz') >>> vmhc_w.run() # doctest: +SKIP """ vmhc = pe.Workflow(name=name) inputNode = pe.Node(util.IdentityInterface(fields=['rest_res', 'example_func2highres_mat', 'rest_mask', 'standard_for_func', 'mean_functional', 'brain', 'flirt_linear_aff', 'fnirt_nonlinear_warp', 'ants_symm_initial_xfm', 'ants_symm_rigid_xfm', 'ants_symm_affine_xfm', 'ants_symm_warp_field']), name='inputspec') outputNode = pe.Node(util.IdentityInterface(fields=['rest_res_2symmstandard', 'VMHC_FWHM_img', 'VMHC_Z_FWHM_img', 'VMHC_Z_stat_FWHM_img']), name='outputspec') inputnode_fwhm = pe.Node(util.IdentityInterface(fields=['fwhm']), name='fwhm_input') if use_ants == False: # Apply nonlinear registration (func to standard) func_to_standard = pe.Node(interface=fsl.ApplyWarp(), name='func_to_standard') elif use_ants == True: # ANTS warp image etc. fsl_to_itk_vmhc = create_wf_c3d_fsl_to_itk(0, name='fsl_to_itk_vmhc') collect_transforms_vmhc = create_wf_collect_transforms(0, name='collect_transforms_vmhc') apply_ants_xfm_vmhc = create_wf_apply_ants_warp(map_node=False, name='apply_ants_xfm_vmhc', ants_threads=ants_threads) # this has to be 3 instead of default 0 because it is a 4D file apply_ants_xfm_vmhc.inputs.inputspec.input_image_type = 3 # copy and L/R swap file copy_and_L_R_swap = pe.Node(interface=fsl.SwapDimensions(), name='copy_and_L_R_swap') copy_and_L_R_swap.inputs.new_dims = ('-x', 'y', 'z') # calculate vmhc pearson_correlation = pe.Node(interface=preprocess.TCorrelate(), name='pearson_correlation') pearson_correlation.inputs.pearson = True pearson_correlation.inputs.polort = -1 pearson_correlation.inputs.outputtype = 'NIFTI_GZ' try: z_trans = pe.Node(interface=preprocess.Calc(), name='z_trans') z_stat = pe.Node(interface=preprocess.Calc(), name='z_stat') except AttributeError: z_trans = pe.Node(interface=afni_utils.Calc(), name='z_trans') z_stat = pe.Node(interface=afni_utils.Calc(), name='z_stat') z_trans.inputs.expr = 'log((1+a)/(1-a))/2' z_trans.inputs.outputtype = 'NIFTI_GZ' z_stat.inputs.outputtype = 'NIFTI_GZ' NVOLS = pe.Node(util.Function(input_names=['in_files'], output_names=['nvols'], function=get_img_nvols), name='NVOLS') generateEXP = pe.Node(util.Function(input_names=['nvols'], output_names=['expr'], function=get_operand_expression), name='generateEXP') smooth = pe.Node(interface=fsl.MultiImageMaths(), name='smooth') if use_ants == False: vmhc.connect(inputNode, 'rest_res', smooth, 'in_file') vmhc.connect(inputnode_fwhm, ('fwhm', set_gauss), smooth, 'op_string') vmhc.connect(inputNode, 'rest_mask', smooth, 'operand_files') vmhc.connect(inputNode, 'standard_for_func', func_to_standard, 'ref_file') if not flirt_only: vmhc.connect(inputNode, 'fnirt_nonlinear_warp', func_to_standard, 'field_file') vmhc.connect(smooth, 'out_file', func_to_standard, 'in_file') vmhc.connect(inputNode, 'example_func2highres_mat', func_to_standard, 'premat') else: func_to_anat = pe.Node(interface=fsl.ApplyWarp(), name='func_to_anat') vmhc.connect(smooth, 'out_file', func_to_anat, 'in_file') vmhc.connect(inputNode, 'brain', func_to_anat, 'ref_file') vmhc.connect(inputNode, 'example_func2highres_mat', func_to_anat, 'premat') vmhc.connect(func_to_anat, 'out_file', func_to_standard, 'in_file') vmhc.connect(inputNode, 'flirt_linear_aff', func_to_standard, 'premat') vmhc.connect(func_to_standard, 'out_file', copy_and_L_R_swap, 'in_file') vmhc.connect(func_to_standard, 'out_file', pearson_correlation, 'xset') elif use_ants == True: # connections for ANTS stuff # functional apply warp stuff vmhc.connect(inputNode, 'rest_res', smooth, 'in_file') vmhc.connect(inputnode_fwhm, ('fwhm', set_gauss), smooth, 'op_string') vmhc.connect(inputNode, 'rest_mask', smooth, 'operand_files') vmhc.connect(smooth, 'out_file', apply_ants_xfm_vmhc, 'inputspec.input_image') vmhc.connect(inputNode, 'ants_symm_initial_xfm', collect_transforms_vmhc, 'inputspec.linear_initial') vmhc.connect(inputNode, 'ants_symm_rigid_xfm', collect_transforms_vmhc, 'inputspec.linear_rigid') vmhc.connect(inputNode, 'ants_symm_affine_xfm', collect_transforms_vmhc, 'inputspec.linear_affine') vmhc.connect(inputNode, 'ants_symm_warp_field', collect_transforms_vmhc, 'inputspec.warp_file') # func->anat matrix (bbreg) vmhc.connect(inputNode, 'example_func2highres_mat', fsl_to_itk_vmhc, 'inputspec.affine_file') vmhc.connect(inputNode, 'brain', fsl_to_itk_vmhc, 'inputspec.reference_file') vmhc.connect(inputNode, 'mean_functional', fsl_to_itk_vmhc, 'inputspec.source_file') vmhc.connect(fsl_to_itk_vmhc, 'outputspec.itk_transform', collect_transforms_vmhc, 'inputspec.fsl_to_itk_affine') vmhc.connect(inputNode, 'standard_for_func', apply_ants_xfm_vmhc, 'inputspec.reference_image') vmhc.connect(collect_transforms_vmhc, 'outputspec.transformation_series', apply_ants_xfm_vmhc, 'inputspec.transforms') vmhc.connect(apply_ants_xfm_vmhc, 'outputspec.output_image', copy_and_L_R_swap, 'in_file') vmhc.connect(apply_ants_xfm_vmhc, 'outputspec.output_image', pearson_correlation, 'xset') vmhc.connect(copy_and_L_R_swap, 'out_file', pearson_correlation, 'yset') vmhc.connect(pearson_correlation, 'out_file', z_trans, 'in_file_a') vmhc.connect(copy_and_L_R_swap, 'out_file', NVOLS, 'in_files') vmhc.connect(NVOLS, 'nvols', generateEXP, 'nvols') vmhc.connect(z_trans, 'out_file', z_stat, 'in_file_a') vmhc.connect(generateEXP, 'expr', z_stat, 'expr') if use_ants == False: vmhc.connect(func_to_standard, 'out_file', outputNode, 'rest_res_2symmstandard') elif use_ants == True: # ANTS warp outputs to outputnode vmhc.connect(apply_ants_xfm_vmhc, 'outputspec.output_image', outputNode, 'rest_res_2symmstandard') vmhc.connect(pearson_correlation, 'out_file', outputNode, 'VMHC_FWHM_img') vmhc.connect(z_trans, 'out_file', outputNode, 'VMHC_Z_FWHM_img') vmhc.connect(z_stat, 'out_file', outputNode, 'VMHC_Z_stat_FWHM_img') return vmhc
4c71974d962d86385de8de9d6752dc59b1e205d0
3,649,556
def get_chunk_index(connection, db, # pylint: disable=too-many-arguments tbl, chunk, ch_db='percona', ch_tbl='checksums'): """ Get index that was used to cut the chunk :param connection: MySQLDb connection :param db: database of the chunk :param tbl: table of the chunk :param chunk: chunk id :param ch_db: Database where checksums are stored. Default percona. :param ch_tbl: Table where checksums are stored. Default checksums. :return: index name or None if no index was used """ cur = connection.cursor() query = "SELECT chunk_index FROM `%s`.`%s` " \ "WHERE db='%s' AND tbl='%s' AND chunk = %s" LOG.info('Executing %s', query % (ch_db, ch_tbl, db, tbl, chunk)) cur.execute(query % (ch_db, ch_tbl, db, tbl, chunk)) return cur.fetchone()[0]
6cf59174d766d68dc635e9c578c9d4d12dba55bf
3,649,557
from typing import List def initial_assignment_alpha_MSS(agents: List[AdditiveAgent], items: List[str], alpha: float)->Allocation: """ Initial division for allocting agents according to their alpha-MMS. :param agents: valuations of agents, normalized such that MMS=1 for all agents, and valuation are ordered in ascending order :param items: items names sorted from the highest valued to the lowest :param alpha: parameter for how much to approximate MMS allocation. :return Allocation: whats been allocated so far (in this function), items and agents are update during function >>> ### allocation for 1 agent, 1 object (this pass!) >>> a = AdditiveAgent({"x": 1}, name="Alice") >>> agents=[a] >>> a1 = initial_assignment_alpha_MSS(agents,['x'],0.75) >>> print(a1, agents) Alice gets {x} with value nan. [] >>> ### allocation for 1 agent, 2 object >>> b = AdditiveAgent({"x": 0.5, "y": 0.4}, name="Blice") >>> agents=[b] >>> a1 = initial_assignment_alpha_MSS(agents,['x','y'],0.6) >>> print(a1, agents) Blice gets {x,y} with value nan. [] >>> ### allocation for 2 agent, 2 object >>> a = AdditiveAgent({"x": 0.8, "y": 0.7}, name="Alice") >>> b = AdditiveAgent({"x": 0.7, "y": 0.7}, name="Blice") >>> agents=[a,b] >>> a1= initial_assignment_alpha_MSS(agents,['x','y'],0.6) >>> print(a1, agents) Alice gets {x} with value nan. Blice gets {y} with value nan. [] >>> ### allocation for 2 agent, 8 object >>> a = AdditiveAgent({"x1": 0.647059, "x2": 0.588235, "x3": 0.470588, "x4": 0.411765, "x5": 0.352941, "x6": 0.294118, "x7": 0.176471, "x8": 0.117647}, name="A") >>> b = AdditiveAgent({"x1": 1.298701, "x2": 0.714286, "x3": 0.649351, "x4": 0.428571, "x5": 0.155844, "x6": 0.064935, "x7": 0.051948, "x8": 0.012987}, name="B") >>> c = AdditiveAgent({"x1": 0.6, "x2": 0.6, "x3": 0.48, "x4": 0.36, "x5": 0.32, "x6": 0.32, "x7": 0.28, "x8": 0.04}, name="C") >>> agents=[a,b,c] >>> a1 = initial_assignment_alpha_MSS(agents,['x1','x2','x3','x4','x5','x6','x7','x8'],0.75) >>> print(a1, agents) # x6, x7, x8 weren't divided A gets {x3,x4} with value nan. B gets {x1} with value nan. C gets {x2,x5} with value nan. [] """ ag_alloc = {} n = len(agents)-1 #if thereare less object than agents, mms is 0 for every one. if(n+1>len(items)): return Allocation(ag_alloc,agents) #return None names_agents=agent_names_from(agents) while(True): # for every agents check if s1/s2/s3/s3>=alpha num_items=len(items) #fill si bundles s1_bundle,s2_bundle,s3_bundle,s4_bundle=[],[],[],[] #check index not out of bound if num_items>0: s1_bundle=[items[0]] if num_items>n+1: s2_bundle=[items[n] , items[n+1]] if num_items>2*(n+1): if 2*(n+1)-2>0: s3_bundle=[items[(2*(n+1))-2], items[2*(n+1)-1] , items[2*(n+1)]] s4_bundle=[items[0], items[2*(n+1)]] s=[s1_bundle,s2_bundle,s3_bundle, s4_bundle] for si in s: willing_agent_index=willing_agent(agents,si,alpha) if willing_agent_index!=None: # give bundle to agent ag_alloc[agents[willing_agent_index]._name] = si # remove given items agent for item in si: items.remove(item) agents.pop(willing_agent_index) # update number of agents n = n - 1 # go to begining of outside loop and redefine the si bundles break elif si==s4_bundle: # no agent is satisfied by any of the si bundles return Allocation (names_agents,ag_alloc)
5ac7fa947ee555dfd963c679696224e58e2c343a
3,649,559
def comp_axes( self, axes_list, machine=None, axes_dict_in=None, is_periodicity_a=None, is_periodicity_t=None, per_a=None, is_antiper_a=None, per_t=None, is_antiper_t=None, ): """Compute simulation axes such as time / angle / phase axes, with or without periodicities and including normalizations Parameters ---------- self : Input an Input object machine : Machine a Machine object axes_list: list List of axes name to return in axes dict axes_dict: {Data} dict of axes containing time and angle axes (with or without (anti-)periodicity) is_periodicity_a: bool True if spatial periodicity is requested is_periodicity_t: bool True if time periodicity is requested per_a : int angle periodicity is_antiper_a : bool if the angle axis is antiperiodic per_t : int time periodicity is_antiper_t : bool if the time axis is antiperiodic Returns ------- axes_dict: {Data} dict of axes containing requested axes """ if len(axes_list) == 0: raise Exception("axes_list should not be empty") if self.parent is not None: simu = self.parent else: simu = None if hasattr(simu, "parent") and simu.parent is not None: output = simu.parent else: output = None if (axes_list is None or len(axes_list) == 0) and ( axes_dict_in is None or len(axes_dict_in) == 0 ): raise Exception( "Cannot calculate axes if both axes list and axes dict are None" ) if machine is None: # Fetch machine from input if hasattr(simu, "machine") and simu.machine is not None: machine = simu.machine else: raise Exception("Cannot calculate axes if simu.machine is None") # Get machine pole pair number p = machine.get_pole_pair_number() # Fill periodicity parameters that are None if per_a is None or is_antiper_a is None or per_t is None or is_antiper_t is None: if output is not None: # Get time and space (anti-)periodicities from the output ( per_a_0, is_antiper_a_0, per_t_0, is_antiper_t_0, ) = output.get_machine_periodicity() else: # Compute time and space (anti-)periodicities from the machine per_a_0, is_antiper_a_0 = machine.comp_periodicity_spatial() per_t_0, is_antiper_t_0, _, _ = machine.comp_periodicity_time() if is_periodicity_t is None or is_periodicity_t: # Enforce None values to machine time periodicity per_t = per_t_0 if per_t is None else per_t is_antiper_t = is_antiper_t_0 if is_antiper_t is None else is_antiper_t if is_periodicity_t is None: # Check time periodicity is included is_periodicity_t = per_t > 1 or is_antiper_t elif not is_periodicity_t: # Remove time periodicity per_t = 1 is_antiper_t = False if is_periodicity_a is None or is_periodicity_a: # Enforce None values to machine periodicity per_a = per_a_0 if per_a is None else per_a is_antiper_a = is_antiper_a_0 if is_antiper_a is None else is_antiper_a if is_periodicity_a is None: # Enforce requested angle periodicity is_periodicity_a = per_a > 1 or is_antiper_a elif not is_periodicity_a: # Remove angle periodicity per_a = 1 is_antiper_a = False # Init axes_dict axes_dict = dict() # Get time axis if "time" in axes_list: # Check if Time is already in input dict of axes if axes_dict_in is not None and "time" in axes_dict_in: Time_in = axes_dict_in["time"] else: Time_in = None # Calculate time axis Time = self.comp_axis_time(p, per_t, is_antiper_t, Time_in) # Store time axis in dict axes_dict["time"] = Time # Get angle axis if "angle" in axes_list: # Airgap radius Rag = machine.comp_Rgap_mec() # Check if Angle is already in input dict of axes if axes_dict_in is not None and "angle" in axes_dict_in: Angle_in = axes_dict_in["angle"] else: Angle_in = None # Calculate angle axis Angle = self.comp_axis_angle(p, Rag, per_a, is_antiper_a, Angle_in) # Store angle axis in dict axes_dict["angle"] = Angle if "phase_S" in axes_list: # Check if Phase is already in input dict of axes stator_label = "phase_" + machine.stator.get_label() if axes_dict_in is not None and stator_label in axes_dict_in: Phase_in = axes_dict_in[stator_label] else: Phase_in = None # Calculate stator phase axis Phase = self.comp_axis_phase(machine.stator, Phase_in) if Phase is not None: # Store phase axis in dict axes_dict[stator_label] = Phase if "phase_R" in axes_list: # Check if Phase is already in input dict of axes rotor_label = "phase_" + machine.rotor.get_label() if axes_dict_in is not None and rotor_label in axes_dict_in: Phase_in = axes_dict_in[rotor_label] else: Phase_in = None # Calculate rotor phase axis per_a_phase = 2 * per_a if is_antiper_a else per_a Phase = self.comp_axis_phase(machine.rotor, per_a_phase, Phase_in) if Phase is not None: # Store phase axis in dict axes_dict[rotor_label] = Phase return axes_dict
d01cb4efb2b1676cc2548e3e57324733708e4764
3,649,561
def clean_vehicles(country): """Delete all vehicles from given country.""" with elastic() as client: search = Vehicle.search(using=client).filter("term", country=country) count = search.count() search.delete() return count
021189a68ec6035af2b5140f79a29ce71caa12fd
3,649,562
from typing import Union from pathlib import Path from typing import Any def render_template( env: NativeEnvironment, template: Union[Text, Path], context: Any, ) -> Any: """Utility function for rendering Jinja2 text or file templates. Args: env: The Jinja2 environment to use for rendering template: The template string or file to render context: The context variables to use for rendering Returns: The rendered template string or data structure """ # convert strings to template if isinstance(template, Path): _template = env.get_template(str(template)) else: _template = env.from_string(template) value = _template.render(**context) if isinstance(value, Undefined): value._fail_with_undefined_error() return value
61585cf76896afd70be8b3a620cb4dbe8620c567
3,649,563
def aspect_ToCString(*args): """ * Translates an ExtendedString to a CString depending of the local format. :param aString: :type aString: TCollection_ExtendedString & :rtype: char * """ return _Aspect.aspect_ToCString(*args)
e5f5b352b60610f3a18e0757a98b8e58f31c84ff
3,649,565
def suites_list(request): """List suites.""" return TemplateResponse( request, "manage/suite/suites.html", { "suites": model.Suite.objects.select_related().annotate( case_count=NotDeletedCount("cases", distinct=True)), } )
55e1cd86a0d82bc6fd2a6b75248a1a4b06264bb5
3,649,566
def push_gitlab_event_dict(): """ Cleared version of the push gitlab webhook content. """ return { "object_kind": "push", "event_name": "push", "before": "0e27f070efa4bef2a7c0168f07a0ac36ef90d8cb", "after": "cb2859505e101785097e082529dced35bbee0c8f", "ref": "refs/heads/build-branch", "checkout_sha": "cb2859505e101785097e082529dced35bbee0c8f", "user_id": 5647360, "user_name": "Shreyas Papinwar", "user_username": "shreyaspapi", "user_email": "", "user_avatar": "https://assets.gitlab-static.net/uploads/-" "/system/user/avatar/5647360/avatar.png", "project_id": 18032222, "project": { "id": 18032222, "name": "Hello there", "description": "Hehehehe", "web_url": "https://gitlab.com/the-namespace/repo-name", "git_ssh_url": "[email protected]:the-namespace/repo-name.git", "git_http_url": "https://gitlab.com/the-namespace/repo-name.git", "namespace": "Testing packit", "visibility_level": 20, "path_with_namespace": "the-namespace/repo-name", "default_branch": "master", "homepage": "https://gitlab.com/the-namespace/repo-name", "url": "[email protected]:the-namespace/repo-name.git", "ssh_url": "[email protected]:the-namespace/repo-name.git", "http_url": "https://gitlab.com/the-namespace/repo-name.git", }, "commits": [ { "id": "cb2859505e101785097e082529dced35bbee0c8f", "message": "Update README.md", "title": "Update README.md", "timestamp": "2020-06-04T23:14:57+00:00", "url": "https://gitlab.com/the-namespace/repo-name/-/commit/" "cb2859505e101785097e082529dced35bbee0c8f", "author": {"name": "Shreyas Papinwar", "email": "[email protected]"}, "added": [], "modified": ["README.md"], "removed": [], } ], "total_commits_count": 1, "push_options": {}, "repository": { "name": "Hello there", "url": "[email protected]:the-namespace/repo-name.git", "description": "Hehehehe", "homepage": "https://gitlab.com/the-namespace/repo-name", "git_http_url": "https://gitlab.com/the-namespace/repo-name.git", "git_ssh_url": "[email protected]:the-namespace/repo-name.git", "visibility_level": 20, }, }
3a0134774f828e233c8b1e3fd2d6b94d6fae699f
3,649,567
def compute_rotation_effects(VD, settings, EW_small, GAMMA, len_mach, X, CHORD, XLE, XBAR, rhs, COSINP, SINALF, PITCH, ROLL, YAW, STB, RNMAX): """ This computes the effects of the freestream and aircraft rotation rate on CLE, the induced flow at the leading edge Assumptions: Several of the values needed in this calculation have been computed earlier and stored in VD Normally, VORLAX skips the calculation implemented in this function for linear chordwise spacing (the if statement below). However, since the trends are correct, albeit underestimated, this calculation is being forced here. """ LE_ind = VD.leading_edge_indices RNMAX = VD.panels_per_strip ##spacing = settings.spanwise_cosine_spacing ##if spacing == False: # linear spacing is LAX==1 in VORLAX ## return 0 #CLE not calculated till later for linear spacing # Computate rotational effects (pitch, roll, yaw rates) on LE suction # pick leading edge strip values for EW and reshape GAMMA -> gamma accordingly EW = EW_small[: ,LE_ind, :] n_tot_strips = EW.shape[1] gamma = np.array(np.split(np.repeat(GAMMA, n_tot_strips, axis=0), len_mach)) CLE = (EW*gamma).sum(axis=2) # Up till EFFINC, some of the following values were computed in compute_RHS_matrix(). # EFFINC and ALOC are calculated the exact same way, except for the XGIRO term. # LOCATE VORTEX LATTICE CONTROL POINT WITH RESPECT TO THE # ROTATION CENTER (XBAR, 0, ZBAR). THE RELATIVE COORDINATES # ARE XGIRO, YGIRO, AND ZGIRO. XGIRO = X - CHORD*XLE - np.repeat(XBAR, RNMAX[LE_ind]) YGIRO = rhs.YGIRO ZGIRO = rhs.ZGIRO # VX, VY, VZ ARE THE FLOW ONSET VELOCITY COMPONENTS AT THE LEADING # EDGE (STRIP MIDPOINT). VX, VY, VZ AND THE ROTATION RATES ARE # REFERENCED TO THE FREE STREAM VELOCITY. VX = rhs.VX VY = (COSINP - YAW *XGIRO + ROLL *ZGIRO) VZ = (SINALF - ROLL *YGIRO + PITCH*XGIRO) # CCNTL, SCNTL, SID, and COD were computed in compute_RHS_matrix() # EFFINC = COMPONENT OF ONSET FLOW ALONG NORMAL TO CAMBERLINE AT # LEADING EDGE. EFFINC = VX *rhs.SCNTL + VY *rhs.CCNTL *rhs.SID - VZ *rhs.CCNTL *rhs.COD CLE = CLE - EFFINC[:,LE_ind] CLE = np.where(STB > 0, CLE /RNMAX[LE_ind] /STB, CLE) return CLE
6184f0732c4da4726a5f17e99bd7329bd55c4907
3,649,568
def introduction(course): """This method represents route to 'courses/<course>/intro.html' where the character introduction is rendered. This method handles both GET and POST requests. Args: course (string): Name of the course. Returns: render_template: Returns rendered 'courses/<course>/intro.html' template. """ courseClass = class_for_name("project.models", course.capitalize()) introLevel = courseClass.query.filter_by(email=current_user.email).first().get_introLevel() letters = introduceLetters(course, introLevel) return render_template('courses/introduction.html', letters=letters, course=course)
7c569f98afbced4a0e1c45b0956d3ba15147384f
3,649,569
import pesummary.core.file.formats import pesummary.gw.file.formats import pkgutil import importlib def available_formats(): """Return the available formats for reading and writing Returns ------- tuple: tuple of sets. First set are the available formats for reading. Second set are the available sets for writing. """ read_formats, write_formats = [], [] modules = { "gw": pesummary.gw.file.formats, "core": pesummary.core.file.formats } for package in ["core", "gw"]: formats = [ a for _, a, _ in pkgutil.walk_packages(path=modules[package].__path__) ] for _format in formats: _submodule = importlib.import_module( "pesummary.{}.file.formats.{}".format(package, _format) ) if hasattr(_submodule, "write_{}".format(_format)): write_formats.append(_format) if hasattr(_submodule, "read_{}".format(_format)): read_formats.append(_format) return set(read_formats), set(write_formats)
1419092210d0cf5dfa116d43c0354c309afc831e
3,649,570
import json def bbox_from_openpose(openpose_file, rescale=1.2, detection_thresh=0.2): """Get center and scale for bounding box from openpose detections.""" with open(openpose_file, 'r') as f: keypoints = json.load(f)['people'][0]['pose_keypoints_2d'] keypoints = np.reshape(np.array(keypoints), (-1, 3)) valid = keypoints[:, -1] > detection_thresh valid_keypoints = keypoints[valid][:, :-1] center = valid_keypoints.mean(axis=0) bbox_size = (valid_keypoints.max(axis=0) - valid_keypoints.min(axis=0)).max() # adjust bounding box tightness scale = bbox_size / 200.0 scale *= rescale return center, scale
c91397fbe42a15d8bce1f1018303e6ff7328c467
3,649,571
def calc_KPs(TempC, Sal, P=None): """ Calculate equilibrium constants for P species. KP1 = H3PO4 KP2 = H2PO4 KP3 = HPO4 Chapter 5, Section 7.2.5 of Dickson, Sabine and Christian (2007, http://cdiac.ornl.gov/oceans/Handbook_2007.html) **WITHOUT APPROX PH SCALE CONVERSION IN CONSTANT** (See footnote 5 in 'Best Practices' Guide) This produces constants on SWS pH Scale. Must be converted to Total scale before use. Parameters ---------- TempC : array-like Temperature in Celcius. Sal : array-like Salinity in PSU P : array-like Pressure in bar Returns ------- dict of KPs """ TempK = TempC + 273.15 lnTempK = np.log(TempK) a0, a1, a2, a3, a4, a5, a6 = ( -4576.752, 115.54, -18.453, -106.736, 0.69171, -0.65643, -0.01844, ) b0, b1, b2, b3, b4, b5, b6 = ( -8814.715, 172.1033, -27.927, -160.340, 1.3566, 0.37335, -0.05778, ) c0, c1, c3, c4, c5, c6 = (-3070.75, -18.126, 17.27039, 2.81197, -44.99486, -0.09984) KP1 = np.exp( a0 / TempK + a1 + a2 * lnTempK + (a3 / TempK + a4) * Sal ** 0.5 + (a5 / TempK + a6) * Sal ) KP2 = np.exp( b0 / TempK + b1 + b2 * lnTempK + (b3 / TempK + b4) * Sal ** 0.5 + (b5 / TempK + b6) * Sal ) KP3 = np.exp( c0 / TempK + c1 + (c3 / TempK + c4) * Sal ** 0.5 + (c5 / TempK + c6) * Sal ) # parameters from Table 5 of Millero 2007 (doi:10.1021/cr0503557) # Checked against CO2SYS if P is not None: ppar = { "KP1": [-14.51, 0.1211, -0.000321, -2.67, 0.0427], "KP2": [-23.12, 0.1758, -2.647e-3, -5.15, 0.09], "KP3": [-26.57, 0.2020, -3.042e-3, -4.08, 0.0714], } KP1 *= prescorr(P, TempC, *ppar["KP1"]) KP2 *= prescorr(P, TempC, *ppar["KP2"]) KP3 *= prescorr(P, TempC, *ppar["KP3"]) return {"KP1": KP1, "KP2": KP2, "KP3": KP3}
a850fb9a85946d4fc9607f8b6744291157b980d1
3,649,572
def evaluate_model_sector_prediction( model, test_data_x, test_data_y, test_data_industry, test_data_size, mode_classifier=True, max_seq_length=512, batch_size=8, ): """This is a function to predict the sector given the input text ids""" model = model.eval() pred_label_test = [] answer_label_test = [] pred_industry_test = [] answer_indesutry_test = [] pred_label_prob_list = [] pred_industry_prob_list = [] for data_index in range(0, len(test_data_x), batch_size): data_batch = test_data_x[data_index : data_index + batch_size] doc_batch = [doc[0] for doc in data_batch] logits = 0 industry_logits_all = 0 """formatting the input data""" input_array_doc = [] for doc_batch_index, input_ids in enumerate(doc_batch): input_array = np.zeros(max_seq_length, dtype=np.int) input_array[: min(max_seq_length, 1)] = input_ids[: min(max_seq_length, 1)] input_array_doc.append(input_array) input_ids = LongTensor(np.array(input_array_doc).astype(np.int32)) """getting the model's output""" label_logits, industry_logits = model(input_ids) """getting the values of the predicted probabilities""" logits += label_logits industry_logits_all += industry_logits pred_label = np.argmax(logits.detach().to("cpu").numpy(), axis=1) pred_industry = np.argmax( industry_logits_all.detach().to("cpu").numpy(), axis=1 ) """creating the output lists for the predicted values""" pred_label_test += list(pred_label) pred_industry_test += list(pred_industry) answer_label_test += list(test_data_y[data_index : data_index + batch_size]) answer_indesutry_test += list( test_data_industry[data_index : data_index + batch_size] ) """printing classification metrics of the sectors""" target_sectors = [0, 1, 2, 3, 4, 5, 6, 7, 8] print(classification_report(answer_label_test, pred_label_test, target_sectors)) return ( pred_label_test, answer_label_test, pred_industry_test, answer_indesutry_test, )
4b0d97c647f9e49600a149a0f5144744ea78f8bc
3,649,573
def is_no_op(module: Module) -> bool: """Return whether the module does no operation in graph. Args: module: module Returns: whether module is no operation """ no_op_modules = (Sequential, _Branch, Parallel, ReduceTuple, GraphModule) return isinstance(module, no_op_modules)
6b5a765be41353596a500e6827800868daa16386
3,649,574
def colors_from_cmap(length=50, cmap=None, start=None, stop=None): """Return color cycle from a given colormap. Parameters ---------- length : int The number of colors in the cycle. When `length` is large (> ~10), it is difficult to distinguish between successive lines because successive colors are very similar. cmap : str Name of a matplotlib colormap (see matplotlib.pyplot.cm). start, stop: 0 <= float <= 1 Limit colormap to this range (start < stop 1). You should limit the range of colormaps with light values (assuming a white background). Some colors have default start/stop values (see `CMAP_RANGE`). Returns ------- colors : list List of RGBA colors. See Also -------- cycle_cmap """ if cmap is None: cmap = config['color']['cmap'] if isinstance(cmap, basestring): cmap = getattr(plt.cm, cmap) crange = CMAP_RANGE.get(cmap.name, (0, 1)) if start is not None: crange[0] = start if stop is not None: crange[1] = stop assert 0 <= crange[0] <= 1 assert 0 <= crange[1] <= 1 idx = np.linspace(crange[0], crange[1], num=length) return cmap(idx)
e2c7c117ab3d463ee20548c15d3e7deee3a1879a
3,649,577
def mag_thresh(img, sobel_kernel=3, mag_thresh=(30, 100)): """ Return the magnitude of the gradient for a given sobel kernel size and threshold values """ # Apply the following steps to img # 1) Convert to grayscale gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY) # 2) Take the gradient in x and y separately sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=sobel_kernel) sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=sobel_kernel) # 3) Calculate the gradient magnitude mag_sobel = np.sqrt(sobelx**2 + sobely**2) # 4) Scale to 8-bit (0 - 255) and convert to type = np.uint8 scaled_sobel = np.uint8(255*mag_sobel/np.max(mag_sobel)) # 5) Create a binary mask where mag thresholds are met, zeros otherwise binary_output = np.zeros_like(scaled_sobel) binary_output[(scaled_sobel >= mag_thresh[0] ) & (scaled_sobel <= mag_thresh[1])] = 1 # 6) Return this mask as your binary_output image return binary_output
c079ca591c4e35e69821d871d7f451aaaf867ef9
3,649,578
def mean_absolute_percentage_error(predictions, targets): """Calculate mean absolute percentage error""" mask = (targets != 0.0) return (np.fabs(targets - predictions)/targets)[mask].mean()*100.0
1f37da29c47035a3656d3d07b34ec26f862a80ac
3,649,579
def make_net_xds_list(data_xds_list, coords_per_xds): """Construct a list of dicts of xarray.Datasets to house the net gains. Args: data_xds_list: A List of xarray.Dataset objects containing MS data. coords_per_xds: A List of Dicts containing dataset coords. Returns: net_gain_xds_list: A List of xarray.Dataset objects to house the net gains. """ net_gain_xds_list = [] for data_xds, xds_coords in zip(data_xds_list, coords_per_xds): net_t_chunks = np.tile(data_xds.UTIME_CHUNKS, 2).reshape(2, -1) net_f_chunks = np.tile(data_xds.chunks["chan"], 2).reshape(2, -1) # Create a default config object, consistent with the net gain. # NOTE: If we have a direction-dependent model, assume the net gain # is also direction dependent. config = Gain(direction_dependent=bool(data_xds.dims["dir"])) net_obj = TERM_TYPES["complex"]("NET", config, data_xds, xds_coords, net_t_chunks, net_f_chunks) net_gain_xds_list.append(net_obj.make_xds()) return net_gain_xds_list
1546555e76d0f6bab4abc7985707f8be9fc19558
3,649,580
import collections def sort_dict(d, key=None, reverse=False): """ Sorts a dict by value. Args: d: Input dictionary key: Function which takes an tuple (key, object) and returns a value to compare and sort by. By default, the function compares the values of the dict i.e. key = lambda t : t[1] reverse: Allows to reverse sort order. Returns: OrderedDict object whose keys are ordered according to their value. """ kv_items = list(d.items()) # Sort kv_items according to key. if key is None: kv_items.sort(key=lambda t: t[1], reverse=reverse) else: kv_items.sort(key=key, reverse=reverse) # Build ordered dict. return collections.OrderedDict(kv_items)
9ca904a5e0df3e3c50b29967adfe9061e778dfc9
3,649,581
import requests def check_builds(): """Base task""" response = requests.get( url=urljoin(Config.SISENSE_URL, "v2/builds"), headers=Config.SISENSE_HEADERS ) builds = pd.DataFrame(data=response.json()) failed_builds = builds.loc[(builds.status == "failed")] # for each failed cube: for build in failed_builds.to_dict(orient="records"): # check if failed cube is already recorded (oid), if not record recorded_failure = ( session.query(FailedBuilds).filter(FailedBuilds.oid == build["oid"]).first() ) if recorded_failure is None: # record record_failure( build["oid"], build["datamodelId"], build["datamodelTitle"], build["instanceId"], ) # save log and get elements for log card error_dict = get_logs(build["datamodelId"], build["datamodelTitle"]) # prepare card (so look into log) card = make_teams_card( build["datamodelTitle"], error_dict["timestamp"], error_dict["error_message"], error_dict["file_link"], ) # send card send_teams_card(card) return error_dict
38820f314ff8a57cbf5b7242a52b557905b0f1eb
3,649,582
def comp_neworig(tileid,dirn='/global/cfs/cdirs/desi/survey/catalogs/testfiberassign/SV3rerun/orig/'): """ check that new matches the original """ ts = str(tileid).zfill(6) fa = fitsio.read('/global/cfs/cdirs/desi/target/fiberassign/tiles/trunk/'+ts[:3]+'/fiberassign-'+ts+'.fits.gz') fn = fitsio.read(dirn+'fba-'+ts+'.fits') w = fn['DEVICE_TYPE'] == 'POS' fn = fn[w] wn = fn['TARGETID'] >= 0 fn = fn[wn] print(len(fn)) wa = fa['TARGETID'] >= 0 fa = fa[wa] print(len(fa)) ws = np.isin(fn['TARGETID'],fa['TARGETID']) print(np.sum(ws)) if np.sum(ws) == len(fa) and len(fa) == len(fn): return True else: return False
e7d1d4202b024508712e14de86341d3597c85314
3,649,583
def _get_widget_handler(webmanager): """ Returns a handler to get the widgets :param WebManager webmanager: :return tornado.web.RequestHandler: """ class WidgetHandler(web.RequestHandler): """ Handler for all communications over WebSockets """ def get(self): """ Called when a client connection is closed """ webmanager.on_get_widgets(self) return WidgetHandler
734b081e3b92180356e88ca21418785d45662b64
3,649,584
def get_model_field_type(model, field_label): """ Returns model's field type. """ return FIELD_TYPES_MAPPING.get(type(get_model_field(model, field_label)), 'STRING')
aeba374954b25f0383015f56be41cdc5f9917ae3
3,649,585
def Normalize_Column_Scores(df, columns, norm_type = 'divide_by_max'): """Normalizes scores for specified columns in a pandas dataframe Parameters ---------- df : a pandas DataFrame object that contains the specified columns columns: a list object that includes the columns to normalize norm_type : a string specifying the type of normalization to perform - 'divide_by_max' divides all values by the maximum value - 'range_norm' divides all values (+ the min) by the range of values in the column - 'z_norm' computes a z-score based on the mean and standard deviation of values - 'divide_by_sum' divides all values by the sum of the values - 'vector' dives all values by the square root of the sum of the squares of all values Yields ------ temp_df: a copy of the passed dataframe with the normalizations performed Examples -------- >>> import pandas as pd >>> import numpy as np >>> import mcdm_functions as mcfunc >>> data_dict = {'Product': ['A', 'B', 'C', 'D'], 'Product Advantage': [13.1,13.2,12.2,13.2], 'Strategic Alignment': [9.8,8.2,10.0,9.6], 'Technical Feasibility': [20.0,18.7,18.5,17.1], 'Market Attractiveness': [15.5,12.3,13.1,13.1]} >>> score_data = pd.DataFrame(data_dict) >>> score_data = score_data.set_index('Product') >>> print(score_data) Market Attractiveness Product Advantage Strategic Alignment \ Product A 15.5 13.1 9.8 B 12.3 13.2 8.2 C 13.1 12.2 10.0 D 13.1 13.2 9.6 Technical Feasibility Product A 20.0 B 18.7 C 18.5 D 17.1 >>> columns = ['Market Attractiveness','Product Advantage'] >>> temp = mcfunc.Normalize_Column_Scores(score_data,columns) >>> print(temp) Market Attractiveness Product Advantage Strategic Alignment \ Product A 1.000000 13.1 9.8 B 0.793548 13.2 8.2 C 0.845161 12.2 10.0 D 0.845161 13.2 9.6 Technical Feasibility Product A 20.0 B 18.7 C 18.5 D 17.1 """ temp_df = df.copy() for column in columns: if norm_type is 'divide_by_max': max_entry = temp_df[column].max() temp_df[column] = temp_df[column]/max_entry elif norm_type is 'range_norm': min_entry = temp_df[column].min() max_entry = temp_df[column].max() temp_df[column] = (temp_df[column]-min_entry)/(max_entry - min_entry) elif norm_type is 'z_norm': mean = temp_df[column].mean() sd = temp_df[column].std() temp_df[column] = (temp_df[column]-mean)/sd elif norm_type is 'divide_by_sum': temp_df[column] = temp_df[column]/temp_df[column].sum() elif norm_type is 'vector': values = temp_df[column].values values_squared = values**2 vector_norm = values/np.sqrt(np.sum(values_squared)) temp_df[column] = vector_norm else: print('You did not enter a valid type, so no changes were made') return temp_df
906fcf944b676e04120eb915e7ead24c97900f56
3,649,586
def find_most_common_word(sentence): """Return the most common word in the sentence.""" # Change to lowercase and strip out punctuation sentence = clean_sentence(sentence) list_of_words = sentence.split() word_to_count = dict() # Create a histogram of the occurrence of all words for word in list_of_words: if word not in word_to_count: word_to_count[word] = 1 else: word_to_count[word] += 1 most_common_word = '' highest_count = 0 # Find highest count in the histogram for word, count in word_to_count.items(): if count > highest_count: most_common_word, highest_count = word, count return most_common_word
0c9e03fb4324999e73e2d036ab3dec53f1857fe8
3,649,588
def fast_rcnn_inference(boxes, scores, image_shapes, predictions, score_thresh, nms_thresh, topk_per_image): """ Call `fast_rcnn_inference_single_image` for all images. Args: boxes (list[Tensor]): A list of Tensors of predicted class-specific or class-agnostic boxes for each image. Element i has shape (Ri, K * 4) if doing class-specific regression, or (Ri, 4) if doing class-agnostic regression, where Ri is the number of predicted objects for image i. This is compatible with the output of :meth:`FastRCNNOutputLayers.predict_boxes`. scores (list[Tensor]): A list of Tensors of predicted class scores for each image. Element i has shape (Ri, K + 1), where Ri is the number of predicted objects for image i. Compatible with the output of :meth:`FastRCNNOutputLayers.predict_probs`. image_shapes (list[tuple]): A list of (width, height) tuples for each image in the batch. score_thresh (float): Only return detections with a confidence score exceeding this threshold. nms_thresh (float): The threshold to use for box non-maximum suppression. Value in [0, 1]. topk_per_image (int): The number of top scoring detections to return. Set < 0 to return all detections. Returns: instances: (list[Instances]): A list of N instances, one for each image in the batch, that stores the topk most confidence detections. kept_indices: (list[Tensor]): A list of 1D tensor of length of N, each element indicates the corresponding boxes/scores index in [0, Ri) from the input, for image i. """ result_per_image = [ fast_rcnn_inference_single_image( boxes_per_image, scores_per_image, image_shape, score_thresh, nms_thresh, topk_per_image, prediction ) for scores_per_image, boxes_per_image, image_shape, prediction in zip(scores, boxes, image_shapes, predictions) ] return [x[0] for x in result_per_image], [x[1] for x in result_per_image]
24fec22cdd285d50b4512115b638f3b7499f47be
3,649,589
def GetVar(doc:NexDoc, varNumber, varType) -> NexVar: """Returns the reference to the specified variable.""" return NexRun("GetVar", locals())
355f51f3ffa9b0d5c5f835546ee38bc3e0784328
3,649,590
def putversenotes(): """Serves AJAX call for json data to save notes. See also [M:NOTESAVE.putVerseNotes][notesave.NOTESAVE.putVerseNotes]. Client code: [{noteverse.sendnotes}][noteversesendnotes] """ session.forget(response) Books = BOOKS() Note = NOTE(Books) NoteSave = NOTESAVE(Note) return NoteSave.putVerseNotes()
b1fece391a6e47c2f500a540d57e12c1c0c11279
3,649,591
def _valid_multiview_args(cfg_user, logger): """ Validates the "multiview" parameters of a json configuration file used for training. The function returns False if an error has occurred and True if all settings have passed the check. :param cfg_user: EasyDict, json configuration file imported as dictionary :param logger: logger instance :return: boolean, True if no errors have been detected, False otherwise """ error = False # Extract the input channel configuration (use the default setting if not specified by the user) if 'model' in cfg_user and 'input_channels' in cfg_user.model: # Use user setting input_config = cfg_user.model.input_channels else: # Use default setting input_config = cfg_default.model.input_channels if input_config != 'geom-multiview' and 'multiview' in cfg_user: logger.warning(f"The argument 'model': 'input_channels' is set to '{input_config}'. Hence, the multiview " "settings will be ignored.\n") elif input_config == 'geom-multiview' and 'multiview' in cfg_user: if not all_keys_known(cfg_user.multiview, arguments.MULTIVIEW_KEYS, logger): error = True if 'config' in cfg_user.multiview and cfg_user.multiview.config not in arguments.MULTIVIEW_CONFIG: logger.error(f"Unknown multiview configuration: '{cfg_user.multiview.config}'. Choose among " f"{arguments.MULTIVIEW_CONFIG} to specify 'config'.\n") error = True if error: logger.info('\n') else: logger.info('Settings check: ok.\n\n') return not error
1a60afbf956b5b7096ec21a52669b1aa85f54c7d
3,649,592
def apply_gradient_descent(var_list, obj, learning_rate = 0.01): """ Sets up the gradient descent optimizer Args: var_list: List of variables to optimizer over. obj: Node of the objective to minimize Notes: learning_rate: What learning rate to run with. (Default = ``0.01``) Set with ``LR`` """ back_prop = tf.train.GradientDescentOptimizer( learning_rate = learning_rate, name = 'gradient_descent' ).minimize(loss = obj, \ var_list = var_list ) return back_prop
97ed8db3e02412f2dfbe4e44b6835ed8fe754c57
3,649,594
from re import T from typing import Callable import inspect from typing import get_type_hints def make_cls_accept_cls_annotated_deps(cls: type[T]) -> type[T]: """ Make class `cls` accept class-annotated dependencies, performing following modifications: - Update `__init__` function to set any class-annotated dependencies as instance attributes - Update `__signature__` attribute to indicate to FastAPI what arguments should be passed to the initializer """ old_init: Callable[..., None] = cls.__init__ old_signature = inspect.signature(old_init) old_params = list(old_signature.parameters.values())[1:] # drop `self` param new_params = [ param for param in old_params if param.kind not in {Parameter.VAR_POSITIONAL, Parameter.VAR_KEYWORD} ] dep_names: list[str] = [] for name, hint in get_type_hints(cls).items(): if is_classvar(hint): continue dep_names.append(name) new_params.append( Parameter( name=name, kind=Parameter.KEYWORD_ONLY, annotation=hint, default=getattr(cls, name, Ellipsis), ) ) new_signature = old_signature.replace(parameters=new_params) def new_init(self: T, *args, **kwargs) -> None: for dep_name in dep_names: dep_value = kwargs.pop(dep_name) setattr(self, dep_name, dep_value) old_init(self, *args, **kwargs) setattr(cls, "__init__", new_init) setattr(cls, "__signature__", new_signature) return cls
529b73cd76adde9868bebec2a91b323c679fcdd0
3,649,595
def getReviewRedirect(entity, params): """Returns the redirect to review the specified entity. """ return '/%s/review/%s' % ( params['url_name'], entity.key().id_or_name())
959ff6d0297ec54248ee725e93a79702512d00d7
3,649,596
def leapfrog_step(state, target_log_prob_fn, kinetic_energy_fn, step_size, rng=None): """Single step of leapfrog. Notes ===== The canonical distribution is related to the energy of the system by p(p, \theta) = 1/Zexp(-H(\theta, p)/T) For now, we assume that the kinetic energy takes the form K(p) = sum_i(p_i^2/(2m_i)) """ del rng p, q, q_grad = state.momentum, state.state, state.state_grads p_half = tree_util.tree_multimap(lambda p, qg: p + 0.5 * step_size * qg, p, q_grad) _, grad_p_half = utils.call_fn_value_and_grad(kinetic_energy_fn, p_half) q_full = tree_util.tree_multimap(lambda q, ph: q + step_size * ph, q, grad_p_half) logprob, q_full_grad = utils.call_fn_value_and_grad(target_log_prob_fn, q_full) p_full = tree_util.tree_multimap(lambda ph, qg: ph + 0.5 * step_size * qg, p_half, q_full_grad) return IntegratorState(q_full, q_full_grad, logprob, p_full)
78a98c9edaabefd0d6d13b8b83ee080b4a11e941
3,649,597
import requests def get_plugins_json(url: str = "https://repobee.org/plugins.json") -> dict: """Fetch and parse the plugins.json file. Args: url: URL to the plugins.json file. Returns: A dictionary with the contents of the plugins.json file. """ resp = requests.get(url) if resp.status_code != 200: plug.log.error(resp.content.decode("utf8")) raise plug.PlugError(f"could not fetch plugins.json from '{url}'") return resp.json()
f6d795d88d124d8cb68e2dad4d8a354af88525c1
3,649,598
def add_available_prefixes(parent, prefix_list): """ Create fake Prefix objects for all unallocated space within a prefix. """ # Find all unallocated space available_prefixes = IPSet(parent) ^ IPSet([p.prefix for p in prefix_list]) available_prefixes = [Prefix(prefix=p) for p in available_prefixes.iter_cidrs()] # Concatenate and sort complete list of children prefix_list = list(prefix_list) + available_prefixes prefix_list.sort(key=lambda p: p.prefix) return prefix_list
1df9f991f33e1a77b81b43de08c5f86f6acc7a20
3,649,599
def isready() -> bool: """Is the embedded R ready for use.""" INITIALIZED = RPY_R_Status.INITIALIZED return bool( rpy2_embeddedR_isinitialized == INITIALIZED.value )
ce9bc69c897004f135297331c33101e30e71dca7
3,649,600
from typing import Optional from typing import Dict from typing import Any from typing import Tuple import types def create_compressed_model(model: tf.keras.Model, config: NNCFConfig, compression_state: Optional[Dict[str, Any]] = None) \ -> Tuple[CompressionAlgorithmController, tf.keras.Model]: """ The main function used to produce a model ready for compression fine-tuning from an original TensorFlow Keras model and a configuration object. :param model: The original model. Should have its parameters already loaded from a checkpoint or another source. :param config: A configuration object used to determine the exact compression modifications to be applied to the model. :param compression_state: compression state to unambiguously restore the compressed model. Includes builder and controller states. If it is specified, trainable parameter initialization will be skipped during building. :return: A tuple (compression_ctrl, compressed_model) where - compression_ctrl: The controller of the compression algorithm. - compressed_model: The model with additional modifications necessary to enable algorithm-specific compression during fine-tuning. """ model = get_built_model(model, config) original_model_accuracy = None if is_accuracy_aware_training(config, compression_config_passed=True): if config.has_extra_struct(ModelEvaluationArgs): evaluation_args = config.get_extra_struct(ModelEvaluationArgs) original_model_accuracy = evaluation_args.eval_fn(model) builder = create_compression_algorithm_builder(config, should_init=not compression_state) if compression_state: builder.load_state(compression_state[BaseController.BUILDER_STATE]) compressed_model = builder.apply_to(model) compression_ctrl = builder.build_controller(compressed_model) compressed_model.original_model_accuracy = original_model_accuracy if isinstance(compressed_model, tf.keras.Model): compressed_model.accuracy_aware_fit = types.MethodType(accuracy_aware_fit, compressed_model) return compression_ctrl, compressed_model
42ffc9c9426ce8b95db05e042fa2d51098fc544f
3,649,602
def load_misc_config(): """Load misc configuration. Returns: Misc object for misc config. """ return Misc(config.load_config('misc.yaml'))
b1eb2e8cc3e836b846d292c03bd28c4449d80805
3,649,603
def filter_activations_remove_neurons(X, neurons_to_remove): """ Filter activations so that they do not contain specific neurons. .. note:: The returned value is a view, so modifying it will modify the original matrix. Parameters ---------- X : numpy.ndarray Numpy Matrix of size [``NUM_TOKENS`` x ``NUM_NEURONS``]. Usually the output of ``interpretation.utils.create_tensors`` neurons_to_remove : list or numpy.ndarray List of neurons to remove Returns ------- filtered_X : numpy.ndarray view Numpy Matrix of size [``NUM_TOKENS`` x ``NUM_NEURONS - len(neurons_to_remove)``] """ neurons_to_keep = np.arange(X.shape[1]) neurons_to_keep[neurons_to_remove] = -1 neurons_to_keep = np.where(neurons_to_keep != -1)[0] return X[:, neurons_to_keep]
711a858f8d28e5d0909991d85538a24bf063c523
3,649,604
def adaptive_threshold(im, block_size, constant, mode=cv2.THRESH_BINARY): """ Performs an adaptive threshold on an image Uses cv2.ADAPTIVE_THRESH_GAUSSIAN_C: threshold value is the weighted sum of neighbourhood values where weights are a gaussian window. Uses cv2.THRESH_BINARY: Pixels below the threshold set to black Pixels above the threshold set to white Parameters ---------- img: numpy array containing an image block_size: the size of the neighbourhood area constant: subtracted from the weighted sum """ out = cv2.adaptiveThreshold( im, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, mode, block_size, constant ) return out
c237a0bb05dc8a43495f60ef9d8157c4b9c4bf1f
3,649,605
def get_loss(stochastic, variance_regularizer): """Get appropriate loss function for training. Parameters ---------- stochastic : bool determines if policy to be learned is deterministic or stochastic variance_regularizer : float regularization hyperparameter to penalize high variance policies Returns ------- Keras loss function to use for imitation learning. """ if stochastic: return negative_log_likelihood_loss(variance_regularizer) else: return tf.keras.losses.mean_squared_error
e78d47c31a7762bcb091ea1a314348c27f2174b7
3,649,606
import copy def simul_growth_ho_amir(nbstart, run_time, params, name): """Simulate the Ho and Amir model (Front. in Microbiol. 2015) with inter-initiation per origin adder and timer from initiation to division Parameters ---------- nbstart : int number of cells to simulate run_time: int number of iterations params: dict experimental parameters name: str name of runs Returns ------- cells : list of dict Each element of the list is a cell cycle defined by a dictionary of features (Lb, Ld etc.) """ #initialize birth length and growth rate L0 = np.exp(np.random.normal(params['Lb_logn_mu'],params['Lb_logn_sigma'],size=nbstart)) tau = np.exp(np.random.normal(params['tau_logn_mu'], params['tau_logn_sigma'], size=nbstart)) #standard value of growth rate. Used to scale the noise appropriately normval = np.exp(params['tau_logn_mu']) #initialize the inter-initiation adder (exact procedure doesn't really matter here) #as all cells start with n_ori = 1, there's no initiation to division adder running DLi = np.random.normal(params['DLi_mu'], params['DLi_sigma'], size=nbstart) #time from initiation to division tid_mu = 90 tid_var = 5 Tid = np.random.normal(tid_mu, tid_var, size=nbstart) #initialize cell infos as a list of dictionaries. All cells start with n_ori = 1 cells = {} for x in range(nbstart): dict1 = {'Lb': L0[x],'L':L0[x], 'gen': str(x), 'tau':tau[x], 'Lt': [[0,L0[x],1]], 'finish': False, 'born':0, 'DLi': [[0,DLi[x]]],'DLdLi': [],'Li':[],'Ti':[], 'numori':1,'Ld':np.nan, 'numori_born':1,'name': name,'mLi':np.nan, 'mLd':np.nan, 'rfact':0.5, 'Tid': [[0,Tid[x]]]} cells[str(x)] = dict1 for t in range(run_time): divide_cell = [] for x in cells: if cells[x]['finish']==False: #update cell size cells[x]['L'] = cells[x]['L']*(2**(1/cells[x]['tau'])) cells[x]['Lt'].append([t,cells[x]['L'],cells[x]['numori']]) #increment the most recent inter-initiation adder cells[x]['DLi'][-1][0] = cells[x]['DLi'][-1][0]+(cells[x]['Lt'][-1][1]-cells[x]['Lt'][-2][1]) #if at least one volume counter since RI is running, increment all of them if len(cells[x]['DLdLi'])>0: cells[x]['DLdLi'] = [[k[0]+(cells[x]['Lt'][-1][1]-cells[x]['Lt'][-2][1]),k[1]] for k in cells[x]['DLdLi']] cells[x]['Tid'] = [[k[0]+1,k[1]] for k in cells[x]['Tid']] #if a volume counter has reached its limit divide if len(cells[x]['DLdLi'])>0: if (cells[x]['numori']>1) and (cells[x]['Tid'][0][0]>cells[x]['Tid'][0][1]): cells[x]['finish'] = True#tag cell as finished cells[x]['Ld'] = cells[x]['L'] cells[x]['Td'] = len(cells[x]['Lt']) cells[x]['Td_abs'] = t cells[x]['d_Ld_Lb'] = cells[x]['L']-cells[x]['Lb'] #assign the correct adders (the oldest ones) to the cell that just divided cells[x]['final_DLdLi'] = cells[x]['DLdLi'][0][0] cells[x]['final_DLi'] = cells[x]['DLi'][0][1] cells[x]['final_Li'] = cells[x]['Li'][0] cells[x]['final_Tid'] = cells[x]['Tid'][0][1] #for each accumulated variable suppress the oldest one if len(cells[x]['DLdLi'])==1: cells[x]['DLdLi'] = [] else: cells[x]['DLdLi'].pop(0) if len(cells[x]['Tid'])==1: cells[x]['Tid'] = [] else: cells[x]['Tid'].pop(0) if len(cells[x]['DLi'])==1: cells[x]['DLi'] = [] else: cells[x]['DLi'].pop(0) if len(cells[x]['Li'])==1: cells[x]['Li'] = [] else: cells[x]['Li'].pop(0) divide_cell.append(x) #if the added volume has reached its limit make new RI if cells[x]['DLi'][-1][0]>cells[x]['DLi'][-1][1]: #duplicate origin cells[x]['numori'] = cells[x]['numori']*2 #Version where adder is noisy itself newdli = cells[x]['numori']*np.random.normal(params['DLi_mu'], params['DLi_sigma']) cells[x]['DLi'].append([0,newdli]) cells[x]['Li'].append(cells[x]['L']) #temporarilly store TL_S as absolute time cells[x]['Ti'].append(t) #Version where adder itself is noisy new_dv = cells[x]['numori']*np.exp(np.random.normal(params['DLdLi_logn_mu'], params['DLdLi_logn_sigma'])) cells[x]['DLdLi'].append([0,new_dv]) cells[x]['Tid'].append([0,np.random.normal(tid_mu, tid_var, size=1)]) for x in divide_cell: #Draw division ratio rfact = 1/(1+np.random.normal(1,params['div_ratio'])) #Create new cell using mother information new_tau = np.exp(correlated_normal(np.log(cells[x]['tau']), params['tau_logn_mu'], params['tau_logn_sigma'], params['tau_corr'])) new_Lb = copy.deepcopy(rfact*cells[x]['L']) new_L = copy.deepcopy(rfact*cells[x]['L']) new_Lt = [[t,copy.deepcopy(rfact*cells[x]['L']),copy.deepcopy(cells[x]['numori'])/2]] new_DLi = copy.deepcopy([[rfact*y[0],rfact*y[1]] for y in cells[x]['DLi']]) new_DLdLi = copy.deepcopy([[rfact*y[0],rfact*y[1]] for y in cells[x]['DLdLi']]) new_Tid = copy.deepcopy(cells[x]['Tid']) new_Li = copy.deepcopy([rfact*y for y in cells[x]['Li']]) new_numori = copy.deepcopy(cells[x]['numori'])/2 mother_initL = copy.deepcopy(cells[x]['final_Li'])/2 mother_Ld = copy.deepcopy(cells[x]['Ld']) dict1 = {'Lb': new_Lb,'L': new_L, 'gen': str(x)+'B', 'tau': new_tau,'Lt': new_Lt, 'finish': False, 'born':t, 'DLi': new_DLi,'DLdLi': new_DLdLi,'Tid': new_Tid, 'Li':new_Li,'Ti':[], 'numori':new_numori, 'numori_born':copy.deepcopy(new_numori),'Ld':np.nan, 'name': name,'mLi': mother_initL, 'mLd':mother_Ld, 'rfact':rfact} cells[x+'B'] = copy.deepcopy(dict1) #keep oldest timer as final timer and give daughter remaining ones. Caclulate initiation time based on cell birth. TL_S_val = copy.deepcopy(cells[x]['Ti'].pop(0)) cells[x+'B']['Ti'] = copy.deepcopy(cells[x]['Ti']) cells[x]['Ti'] = TL_S_val-copy.deepcopy(cells[x]['born']) for x in cells: if len(cells[x]['Li'])>0: cells[x]['Li'] = np.nan return cells
fa4d35cfd26dbcb08217b3ffee6cf4e3e7431a08
3,649,607
def variable_id(variable): """Return variable identification for .dot file""" if isinstance(variable, FileAccess): return "a_{}".format(variable.id) act_id = variable.activation_id act_id = "global" if act_id == -1 else act_id return "v_{}_{}".format(act_id, variable.id)
b68fd9d6b08a537768dc82b7925f0cb6f383428e
3,649,608
def node_set_power_state(request, node_id, state, soft=False): """Set power state for a given node. :param request: HTTP request. :param node_id: The UUID or name of the node. :param state: the power state to set ['on', 'off', 'reboot']. :param soft: flag for graceful power 'off' or reboot :return: node. http://docs.openstack.org/developer/python-ironicclient/api/ironicclient.v1.node.html#ironicclient.v1.node.NodeManager.set_power_state """ return ironicclient(request).node.set_power_state(node_id, state, soft)
e94a13f4a797d31bd0eae24803a782b049ea44dc
3,649,609
import sympy def __sympyToC_Grad(exprs: list, doOpts: bool = False) -> str: """ creates C code from a list of sympy functions (somewhat optimized). source: https://stackoverflow.com/questions/22665990/optimize-code-generated-by-sympy and modified """ tmpsyms = sympy.numbered_symbols("tmp") if doOpts: symbols, simple = sympy.cse(exprs, symbols=tmpsyms, optimizations="basic", order='none') else: symbols, simple = sympy.cse(exprs, symbols=tmpsyms) c_code = "" for s in symbols: c_code += " double " +sympy.ccode(s[0]) + " = " + sympy.ccode(s[1]) + ";\n" for i,s in enumerate(simple): c_code += f" out({i}) = " + sympy.ccode(s) + ";\n" return c_code
33a95d99b19458ac7b8dd8d8e4272485b0f5f206
3,649,610
def index(): """User friendly index page at the root of the server guides the user to the reportss """ return render_template('index.html')
0e810716e0bbfae98736bc13f458636eb33dc87d
3,649,612
def read_lookup(infile): """ ----------------------------------------------------------------------------- Read data from a lookup database. Inputs: infile [string] Input file containing the lookup data base. Outputs: [tuple] each element of the tuple is a numpy array. The elements in order are x-coordinates, y-coordinates, data value at those coordiantes. The data values are real or complex depending on whether the lookup table has an 'imag_value' column ----------------------------------------------------------------------------- """ if not isinstance(infile, str): raise TypeError('Input parameter infile must be of string data type') try: cols = ascii.read(infile, data_start=1, comment='#') except IOError: raise IOError('Could not read the specified file: '+infile) if 'imag_value' in cols.colnames: return cols['x'].data, cols['y'].data, cols['real_value'].data+1j*cols['imag_value'].data else: return cols['x'].data, cols['y'].data, cols['real_value'].data
a86a2e8da2580e66656f8328488941c402383c60
3,649,613
import json def event_detail(request, id): """ Return a JSON dict mapping for event given id """ event = get_object_or_404(Event, pk=id) event_dict = { "success": 1, "result": [{ "id": event.id, "title": event.title, "description": event.description, "created_date": event.created_date.strftime('%Y/%m/%d'), "location": event.location }] } return HttpResponse(json.dumps(event_dict), content_type="application/json")
4b4083a81d5de90e9156f05d9f7b0375981a42d0
3,649,615
import logging def prepare_state(qubits: list[cirq.Qid], x: int) -> list[cirq.Gate]: """Prepare qubits into an initial state. Args: qubits: The qubits to prepare. x: The initial state of the qubits. Must be non-negative. Returns: A list of gates to prepare the qubits. Raises: ValueError: If `x` is negative. """ gates = list() if size_in_bits(x) > len(qubits): logging.warning(f"prepare_state: `x` ({x}) cannot fit into {len(qubits)} qubits; some bits will be dropped.") for q in qubits: if x % 2: gates.append(cirq.X(q)) x >>= 1 return gates
f11a4ddd83a6e2d1d7348c8ef3b5693a26e3e26d
3,649,616
def manage(id): """Manage room request.""" room_request = RoomRequest.query.get(id) if room_request is None: return abort(404) return render_template('room_request/manage.html', room_request=room_request)
5a565342adbe53a647cb622e4688d1c26d88078d
3,649,617
def ger(self, y): """Computer an outer product between two vectors""" assert self.dim() == 1 and y.dim() == 1, "Outer product must be on 1D tensors" return self.view((-1, 1)).matmul(y.view((1, -1)))
003dda3dd678fdcf35f63f80c064586320c97d23
3,649,618
def load_data(database_filepath): """ Input: 1. database_filepath: the path of cleaned datasets Output: 1. X: all messages 2. y: category columns generated by cleaning process 3. category_names: category columns' names Process: 1. Read-in the datafrmae 2. Select required datasets 3. Generate category columns' names """ # 1. Read-in dataframe engine = create_engine('sqlite:///{}'.format(database_filepath)) df = pd.read_sql_table(database_filepath, engine) # 2. Select required datasets X = df['message'] y = df.iloc[:, 4:] # 3. Generate category columns' names category_names = y.columns return X, y, category_names
15ec78cfac2dfde9294061432514001b21967b93
3,649,619