content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def get_transformation_id(action): """ Get the id of a transformation. Parameters ---------- action: function The transformation function Returns ------- int The id of the action (-1 if not found) """ for index, trans in TRANSFORMATIONS.items(): if trans == action: return index return -1
2f08e7bb2b0418d39421e6b03e011d8ab4d68380
5,928
def getString(t): """If t is of type string, return it, otherwise raise InvalidTypeError. """ s = c_char_p() if PL_get_chars(t, byref(s), REP_UTF8|CVT_STRING): return s.value else: raise InvalidTypeError("string")
1f128369f1ce3950ed352e43eea5db30f6da2d6e
5,929
def prep_data(filename, in_len, pred_len): """load data from the file and chunk it into windows of input""" # Columns are # 0:datetime, 1:temperature, 2:humidity, 3:pressure, 4:wind_direction, 5:wind_speed data = np.genfromtxt(filename, delimiter=',', skip_header=1, usecols=(1, 2, 3, 4, 5), dtype=float) # Remove rows that are missing values data = data[~np.isnan(data).any(axis=1)] # We will save the last 1/8th of the data for validation/testing data, # 1/16 for validation, 1/16 for testing total_len = data.shape[0] val_len = total_len // 16 test_len = total_len // 16 train_len = total_len - val_len - test_len train_data = data[:train_len] val_data = data[train_len:train_len + val_len] test_data = data[train_len + val_len:] # To stay in the most accurate ranges of the ESN, and to put the various # features on equal footing, we standardize the training data. train_data, mu_arr, sigma_arr = standardize_traindata(train_data) # We now need to scale our validation and test data by the means and standard # deviations determined from the training data val_data = scale_data(val_data, mu_arr, sigma_arr) test_data = scale_data(test_data, mu_arr, sigma_arr) # We need to convert the time series data to forecast form for one-step # prediction training. For simplicity we will discard the remainder batches rU, rY train_batch_size = 200 val_batch_size = in_len + pred_len + 1 test_batch_size = in_len + pred_len + 1 trainU, trainY, rU, rY = to_forecast_form(train_data, batch_size=train_batch_size) valU, valY, rU, rY = to_forecast_form(val_data, batch_size=val_batch_size) testU, testY, rU, rY = to_forecast_form(test_data, batch_size=test_batch_size) return trainU, trainY, valU, valY, testU, testY, mu_arr, sigma_arr
33e1348acdcf6025159b7ed81e18358d56838d3e
5,930
def _get_security_group_id(connection, security_group_name): """ Takes a security group name and returns the ID. If the name cannot be found, the name will be attempted as an ID. The first group found by this name or ID will be used.) :param connection: :param security_group_name: :return: """ if not security_group_name: print('The bees need a security group to run under. Need to open a port from where you are to the target ' 'subnet.') return # Try by name security_groups = connection.describe_security_groups( Filters=[{'Name': 'group-name', 'Values': [security_group_name, ]}, ] ) security_groups = security_groups['SecurityGroups'] if not security_groups: # Try by id security_groups = connection.describe_security_groups( Filters=[{'Name': 'group-id', 'Values': [security_group_name, ]}, ] ) security_groups = security_groups['SecurityGroups'] if not security_groups: print('The bees need a security group to run under. The one specified was not found. ' 'Create a sg that has access to port 22 ie. from 0.0.0.0/0') return return security_groups[0]['GroupId'] if security_groups else None
70c9b8357a9634043f07ad0019ff3cc621ba859c
5,932
def viz_preprocessing(df_path): """ Preprocess the aggregation csv into a good format for visualization """ df = pd.read_csv(df_path) res = df.T res = res.rename(columns=res.iloc[0]).drop(res.index[0]) res = res.astype("int64") res.reset_index(inplace=True) res["index"] = res["index"].apply( lambda x: "{}-{}-{}".format(x[0:4], x[4:6], x[6:]) ) res["index"] = pd.to_datetime(res["index"]) return res
fc1c39d094934aa47ac26f6e5a70f071c1df4fbd
5,933
def adjacency(G, nodelist=None, weight="weight"): """ Returns the sparse adjacency matrix representation of the graph. """ if nodelist is None: nodelist = G.nodes() A = nx.to_scipy_sparse_matrix(G, nodelist=nodelist, weight=weight, format="csr") return A
e17c0030a7d2c4e13659ca3585820e1c8da89101
5,935
from datetime import datetime def sample_movie(user, **params): """Create and return a movie""" defaults = { 'title': 'A Walk to Remember', 'duration': datetime.timedelta(hours=2, minutes=15), 'price': 8.99 } defaults.update(params) return Movie.objects.create(user=user, **defaults)
d07716fbe4b043022592ae2465bb02d02f45fe41
5,936
import difflib def lines_diff(lines1, lines2): """Show difference between lines.""" is_diff = False diffs = list() for line in difflib.ndiff(lines1, lines2): if not is_diff and line[0] in ('+', '-'): is_diff = True diffs.append(line) return is_diff, diffs
50916d46871980fadfd854dc698481a4b0f35834
5,937
import re def parse_ipmi_hpm(output): """Parse the output of the hpm info retrieved with ipmitool""" hrdw = [] line_pattern = re.compile(r'^\|[^0-9]*([0-9]+)\|[^a-zA-Z ]* ?([^\|]*)\|([^\|]*)\|([^\|]*)\|([^\|]*)\|') for line in output: match = line_pattern.match(line) if match: name = match.group(2).strip() version = match.group(3).strip().split(" ")[0] hrdw.append(('firmware', name, 'version', version)) return hrdw
001731ce46fa6bbdb5103727265a0bdd353773be
5,938
def get_genes_and_pathways(reactions, r_numbers, species): """Returns a CSV-formatted string with the list of genes and pathways where the reaction(s) of 'species' appear. :param reactions: list of reactions for species :param r_numbers: RNumbers object :param species: KEGG organism code :return: CSV-formatted string with genes and pathways where reactions of species are present """ gene_set = set() pathway_set = set() for reaction in reactions: organism = r_numbers.find(reaction).find(species) assert organism is not None for gene in organism.genes: gene_set.add(gene.replace(species + ':', '')) for pathway in organism.pathways: pathway_set.add(pathway) gene_col = ' '.join(sorted(gene_set)) pathway_col = ' '.join(sorted(pathway_set)) return gene_col.rstrip() + ';' + pathway_col.rstrip() + ';'
0ecddcaf50650b04125be73bcf6b304a77df011d
5,939
def relate_ca(assessment, template): """Generates custom attribute list and relates it to Assessment objects Args: assessment (model instance): Assessment model template: Assessment Temaplte instance (may be None) """ if not template: return None ca_definitions = all_models.CustomAttributeDefinition.query.options( orm.undefer_group('CustomAttributeDefinition_complete'), ).filter_by( definition_id=template.id, definition_type="assessment_template", ).order_by( all_models.CustomAttributeDefinition.id ) created_cads = [] for definition in ca_definitions: cad = all_models.CustomAttributeDefinition( title=definition.title, definition=assessment, attribute_type=definition.attribute_type, multi_choice_options=definition.multi_choice_options, multi_choice_mandatory=definition.multi_choice_mandatory, mandatory=definition.mandatory, helptext=definition.helptext, placeholder=definition.placeholder, ) db.session.add(cad) created_cads.append(cad) return created_cads
31744ac40f385746e6d4e13a97ed461312280d99
5,941
def getSenderNumberMgtURL(request): """ 발신번호 관리 팝업 URL을 반환합니다. - 보안정책에 따라 반환된 URL은 30초의 유효시간을 갖습니다. - https://docs.popbill.com/fax/python/api#GetSenderNumberMgtURL """ try: # 팝빌회원 사업자번호 CorpNum = settings.testCorpNum # 팝빌회원 아이디 UserID = settings.testUserID url = faxService.getSenderNumberMgtURL(CorpNum, UserID) return render(request, 'url.html', {'url': url}) except PopbillException as PE: return render(request, 'exception.html', {'code': PE.code, 'message': PE.message})
371ca0a813c54061c68af34719ca132081f0bfda
5,942
def closest_match(match, specs, depth=0): """ Recursively iterates over type, group, label and overlay key, finding the closest matching spec. """ new_specs = [] match_lengths = [] for i, spec in specs: if spec[0] == match[0]: new_specs.append((i, spec[1:])) else: if all(isinstance(s[0], basestring) for s in [spec, match]): match_length = max(i for i in range(len(match[0])) if match[0].startswith(spec[0][:i])) elif is_number(match[0]) and is_number(spec[0]): m = bool(match[0]) if isinstance(match[0], np.bool_) else match[0] s = bool(spec[0]) if isinstance(spec[0], np.bool_) else spec[0] match_length = -abs(m-s) else: match_length = 0 match_lengths.append((i, match_length, spec[0])) if len(new_specs) == 1: return new_specs[0][0] elif new_specs: depth = depth+1 return closest_match(match[1:], new_specs, depth) else: if depth == 0 or not match_lengths: return None else: return sorted(match_lengths, key=lambda x: -x[1])[0][0]
3a212d880004fad843fe2d254ac96315bd1d12cf
5,943
def average(w, axis=-1): """Calculate average Example: >>> w1=Waveform([range(2), range(2)],array([[1.0, 3.0], [0.0, 5.0]])) >>> average(w1) Waveform(array([0, 1]), array([ 2. , 2.5])) >>> w1=Waveform([range(2), range(2)],array([[1.0, 3.0], [0.0, 5.0]]), \ xlabels=['row','col']) >>> average(w1, axis='row') Waveform(array([0, 1]), array([ 0.5, 4. ])) """ return reducedim(w, np.mean(w._y, axis=w.getaxis(axis)), axis=w.getaxis(axis))
bd5510e78c995e0a9f656144393b0496e071cdf5
5,944
def random(): """Return a random parameter set for the model.""" total_thickness = 10**np.random.uniform(2, 4.7) Nlayers = np.random.randint(2, 200) d_spacing = total_thickness / Nlayers thickness = d_spacing * np.random.uniform(0, 1) length_head = thickness * np.random.uniform(0, 1) length_tail = thickness - length_head Caille_parameter = np.random.uniform(0, 0.8) pars = dict( length_head=length_head, length_tail=length_tail, Nlayers=Nlayers, d_spacing=d_spacing, Caille_parameter=Caille_parameter, ) return pars
958410bb8a696652b5a58cb15168719c2391179d
5,945
def extract_features_to_dict(image_dir, list_file): """extract features and save them with dictionary""" label, img_list = load_image_list(image_dir, list_file) ftr = feature integer_label = label_list_to_int(label) feature_dict = {'features': ftr, 'label': integer_label, 'label_original': string_list_to_cells(label), 'image_path': string_list_to_cells(img_list)} return feature_dict
2fe641d7bcc24f293fae0c8badf274c9f32051d4
5,946
def capitalize(s): """capitalize(s) -> string Return a copy of the string s with only its first character capitalized. """ return s.capitalize()
1c9b86e2bbffc486d624e7305f303d517a282b75
5,948
def S_tunnel_e0(self, mu, sig, Efl, Efr, Tl, Tr): """energy flux Conduction band edge 0 at higher of the two """ a = mu-sig/2 b = mu+sig/2 kTl = sc.k*Tl kTr = sc.k*Tr Blr = (a/kTl+1)*np.exp(-a/kTl)-(b/kTl+1)*np.exp(-b/kTl) Brl = (a/kTr+1)*np.exp(-a/kTr)-(b/kTr+1)*np.exp(-b/kTr) Slr = kTl**3*Blr*np.exp(Efl/kTl) Srl = -kTr**3*Brl*np.exp(Efr/kTr) # Slr = kTl**3*Blr # Srl = -kTr**3*Brl ret = self._cS*(Slr+Srl) return ret
224b115d7205994e897bc74010fd4f24d562cc6c
5,949
def to_camel_java(text, first_lower=True): """Returns the text in camelCase or CamelCase format for Java """ return to_camelcase(text, first_lower=first_lower, reserved_keywords=JAVA_KEYWORDS, suffix="_")
c14b102502d7caa1dc51511ffd3c97f736a5c17b
5,950
def rectangle_field(N_1, N_2, B_1, B_2, H, D, r_b): """ Build a list of boreholes in a rectangular bore field configuration. Parameters ---------- N_1 : int Number of borehole in the x direction. N_2 : int Number of borehole in the y direction. B_1 : float Distance (in meters) between adjacent boreholes in the x direction. B_2 : float Distance (in meters) between adjacent boreholes in the y direction. H : float Borehole length (in meters). D : float Borehole buried depth (in meters). r_b : float Borehole radius (in meters). Returns ------- boreField : list of Borehole objects List of boreholes in the rectangular bore field. Examples -------- >>> boreField = gt.boreholes.rectangle_field(N_1=3, N_2=2, B_1=5., B_2=5., H=100., D=2.5, r_b=0.05) The bore field is constructed line by line. For N_1=3 and N_2=2, the bore field layout is as follows:: 3 4 5 0 1 2 """ borefield = [] for j in range(N_2): for i in range(N_1): borefield.append(Borehole(H, D, r_b, x=i*B_1, y=j*B_2)) return borefield
955bc7f2bf3a79d790683e7589010bc81af98f85
5,951
def convertHunit(conc, from_unit='H/10^6 Si', to_unit='ppm H2O', phase='Fo90', printout=True): """ Convert hydrogen concentrations to/from H/10^6 Si and ppm H2O. Based on Table 3 of Denis et al. 2013 """ if phase == 'Fo90': H_to_1_ppm = 16.35 elif phase == 'opx': H_to_1_ppm = 11.49 elif phase == 'cpx': H_to_1_ppm = 11.61 else: print('Valid options for phase are Fo90, opx, and cpx') return if from_unit == 'H/10^6 Si': if to_unit == 'ppm H2O': new_conc = conc / H_to_1_ppm elif to_unit == 'per m3': new_conc = conc * (1.0/308.67) * (1e30) else: print('only going to units "ppm H2O" and "per m3"') return elif from_unit == 'ppm H2O': if to_unit == 'H/10^6 Si': new_conc = conc * H_to_1_ppm elif to_unit == 'per m3': new_conc = (conc * H_to_1_ppm) * (1.0/308.67) * (1e30) else: print('only going to "H/10^6 Si" or "per m3"') return elif from_unit == 'per m3': if to_unit == 'H/10^6 Si': new_conc = conc / ((1.0/308.67) * (1e30)) elif to_unit == 'ppm H2O': new_conc = (conc / ((1.0/308.67) * (1e30))) / H_to_1_ppm else: print('only going to "H/10^6 Si" or "ppm H2O"') return else: print('Only going from H/10^6 Si, ppm H2O, and per m3 for now') return if printout is True: output = ' '.join(('{:.2f}'.format(conc), from_unit, '=', '{:.2f}'.format(new_conc), to_unit, 'for', phase)) print(output) return new_conc
fdd0646a09f3a2c3a8cbbc02410103caa9e023dd
5,952
import re def countBasesInFasta(fastaFile): """ Given a fasta file, return a dict where the number of records and the total number of bases are given by 'records' and 'bases' respectively. """ recordRE = re.compile(r'^>') whiteSpaceRE = re.compile(r'\s+') total_bases = 0 total_seqs = 0 with open(fastaFile) as f: for line in f: if recordRE.match(line): total_seqs += 1 continue total_bases += len(whiteSpaceRE.sub('', line)) return {'records': total_seqs, 'bases': total_bases}
45eaa5b8d36b4bae6b97bb29fdead1efc0aed8c2
5,953
import torchvision import torch def load_mnist_denoising(path_raw_dataset, batch_size=1, mu=0., sigma=0.6, deterministic=True): """ 1. Get the MNIST dataset via PyTorch built-in APIs. 2. Wrap it with customized wrapper with additive Gaussian noise processor 3. Build PyTorch data loader objects. :param path_raw_dataset: :param batch_size: :param mu: :param sigma: :param deterministic: :return: dict of pytorch DataLoader objects. { 'train': (iterable) [noisy_image, (clean_image, noise)] noisy_image shape: [batch, c, w, h] clean_image shape: [batch, c, w, h] noise shape: [batch, 1, c, w, h] 'val': (iterable) [noisy_image, (clean_image, noise)] noisy_image shape: [batch, c, w, h] clean_image shape: [batch, c, w, h] noise shape: [batch, 1, c, w, h] } """ MNIST = P.data_processor_wrapper(torchvision.datasets.MNIST, P.Processor_Denoising_AddGau(mu, sigma, deterministic, grayscale=True)) transform_input = transforms.Compose([ transforms.ToTensor(), P.TransTo3Channels() ]) try: data_train = MNIST(root=path_raw_dataset, train=True, download=False, transform=transform_input) except: torch_dataset_download_helper() data_train = MNIST(root=path_raw_dataset, train=True, download=True, transform=transform_input) try: data_val = MNIST(root=path_raw_dataset, train=False, download=False, transform=transform_input) except: torch_dataset_download_helper() data_val = MNIST(root=path_raw_dataset, train=False, download=True, transform=transform_input) datasets = {'train': data_train, 'val': data_val} data_loaders = {i: torch.utils.data.DataLoader(datasets[i], batch_size=batch_size, shuffle=False) for i in ['train', 'val']} return data_loaders
4dbd365a0fa6d795714aa90828fe7bb2cbc9b99f
5,954
def make_triplet_freqs(sentence, triplet_freqs): """ 文字列を3つ組にする """ # Janomeで単語に分割する t = Tokenizer() morphemes = [token.surface for token in t.tokenize(sentence)] if len(morphemes) < 3: return {} # 繰り返し for i in range(len(morphemes) - 2): triplet = tuple(morphemes[i:i+3]) triplet_freqs[triplet] += 1 # beginを追加 triplet = (BEGIN, morphemes[0], morphemes[1]) triplet_freqs[triplet] = 1 # endを追加 triplet = (morphemes[-2], morphemes[-1], END) triplet_freqs[triplet] = 1 return triplet_freqs
97fc3affd841e148f58de487d171df61745d17a9
5,955
def test_train_val_split(patient_id, sub_dataset_ids, cv_fold_number): """ if cv_fold_number == 1: if patient_id in sub_dataset_ids[-5:]: return 'test' elif patient_id in sub_dataset_ids[-7:-5]: return 'validation' else: return 'train' elif cv_fold_number == 2: if patient_id in sub_dataset_ids[-10:-5]: return 'test' elif patient_id in sub_dataset_ids[-12:-10]: return 'validation' else: return 'train' # used for accumulating results of tests on cv1 and cv2 if cv_fold_number == 3: if patient_id in sub_dataset_ids[-10:]: return 'test' elif patient_id in sub_dataset_ids[-12:-11]: return 'validation' else: return 'train' """ if patient_id in [1, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35]: return 'test' elif patient_id == 36: return 'validation' else: return 'train'
129f3856875033505555241408577f8885c9c393
5,956
def test_striplog_colour_plot(): """ Tests mpl image of striplog with the ladder option. """ legend = Legend.builtin('NSDOE') imgfile = "tutorial/M-MG-70_14.3_135.9.png" striplog = Striplog.from_image(imgfile, 14.3, 135.9, legend=legend) for iv in striplog: iv.data['porosity'] = iv.top.z/100 fig = striplog.plot(colour='porosity', aspect=3, return_fig=True) return fig
a76f01a5b6255a0dfe39aca7cc3e352787457d17
5,958
def searchArtist(artistName, session=models.session): """Search for artist. Returns models.ArtistSearch""" return models.ArtistSearch(artistName, session)
4fd9e45b633285a9ee1817a84508749d1ba724e7
5,960
def _ddnone(): """allow defaultdict to be pickled""" return defaultdict(_none)
9a050e08b0c47bc789f0238489c679d01a42c1ba
5,961
def filter_shapely(feature): """ feature1 = feature_extract(feature) feature2 = filter_shapely(feature1) """ tmp = extract_Accumulation_entropy_list(feature) tmp2=[] for i in range(len(tmp)): if i!=0: tmp2.append(tmp[i]-tmp[i-1]) else: tmp2.append(tmp[i]) return tmp2
54654130340a3485a7de9a3d5a51d3def8a01037
5,963
def stations_by_river(stations): """Returns a dictionary mapping river names (key) to a list of stations (object)""" rivers_stations_dict = {} # Create empty dictionary for i in range(len(stations)): # Iterate through list of stations # Data type checks if type(stations[i]) is MonitoringStation: pass # Checks if stations are correct class else: raise TypeError("ERROR: Station is not a MonitoringStation") if type(stations[i].name) is str: # Checks if name is string pass else: raise TypeError("ERROR: Station 'name' attribute is not a string") if type(stations[i].river) is str: # Checks if river is string pass else: raise TypeError("ERROR: Station 'river' attribute is not a string") if not stations[i].river in rivers_stations_dict: # Checks if river is not in dictionary rivers_stations_dict[stations[i].river] = [] # Adds river to dictionary with blank list if not stations[i].name in rivers_stations_dict: rivers_stations_dict[stations[i].river].append(stations[i].name) # Adds station name to object list return rivers_stations_dict
d57bc06b60d6669bf6a10b7ad05363124f2312b5
5,964
def getCurrentProfile(): """ Get the name of the current profile. """ return __createJSON("GetCurrentProfile", {})
6627d01348d566f0d079b8e7bcf04e35ad6ed0ba
5,965
def get_params_from_request(req: web.Request) -> QueryParams: """ This function need for convert query string to filter parameters. """ page = int(req.rel_url.query.get('page', '1')) cursor = req.rel_url.query.get('cursor') sort = req.rel_url.query.get('sort') sort_dir = req.rel_url.query.get('sortDir') if sort and sort_dir == 'desc': sort = f'-{sort}' return QueryParams( page=page, cursor=int(cursor) if cursor else None, order_by=sort, )
b0deb4e5a1dc10fe82745e6c3c0869015424e2e0
5,966
def norm_mem_interval(pt): """Normalize membership in interval.""" return pt.on_prop(arg_conv(binop_conv(auto.auto_conv())))
b50aa86d942fe1c2f35c6bcffae350042ff86090
5,967
def create_figure(): """ Creates a simple example figure. """ fig = Figure() a = fig.add_subplot(111) t = np.arange(0.0, 3.0, 0.01) s = np.sin(2 * np.pi * t) a.plot(t, s) return fig
532a4eda745cb969f8ef60e66d6f63e761b8a5ff
5,968
def rdf_reader(src): """rdf = rdf_reader(src) src rdf filename rdf The RDF mapping object""" return RDF(*list(rdf_include(src)))
cf64ee6ed12a3e0d1667a537ac696918d26f80ba
5,969
def draw_signalData(Nsamp=1, alpha=__alpha, beta=__beta, **kwargs): """ draw an SNR from the signal distribution """ return np.array([ncx2.rvs(__noise_df, nc) for nc in __draw_truncatedPareto(Nsamp, alpha=alpha, beta=beta)])
6dc320e2289c30a0e68696be71ded30066d7fa74
5,970
def choose_weighted_images_forced_distribution(num_images, images, nodes): """Returns a list of images to cache Enforces the distribution of images to match the weighted distribution as closely as possible. Factors in the current distribution of images cached across nodes. It is important to note that there may be circumstances which prevent this function from attaining the desired ideal distribution, but the function will always try its best to reach the desired distribution based on the specified weights. num_images - the number (integer) of images to choose to cache images - a list of to ImageInputs consider for caching nodes - a list of NodeInputs to use for determining which images need to be cached the most """ named_distribution = _get_named_image_distribution(images, nodes) # Take the difference of the desired distribution with the current # one. scaled_weights = _get_scaled_weights( images, _get_scale_factor_for_caching_nodes(num_images, images, nodes)) distribution_difference = [ [image, (scaled_weights[image.name] - named_distribution[image.name])] for image in images ] def decrement_distribution(distribution_pair, diff_dict): distribution_pair[1] -= 1 return _pick_images( images, distribution_difference, num_images, picker_func=lambda diff: max(diff, key=lambda pair: pair[1]), distribution_mutator_func=decrement_distribution)
8cf49fd376893be254d5075930475de9cedee004
5,971
def predict_lumbar_ankles_model(data): """Generate lumbar + 2 ankles model predictions for data. Args: data (dict): all data matrices/lists for a single subject. Returns: labels (dict): columns include 'probas' (from model) and 'true' (ground truth). One row for each fold. """ RESULT_DIR = '../results/imus6_subjects7/sensors03_lumbar_ankles/'\ 'iteration0/' data = selectFeats(data, ['lumbar','ankle_r','ankle_l']) test_dset = (data['X'], data['y']) subject = str(int(data['subjectID'])) model = load_model_and_weights(subject, RESULT_DIR) labels = make_predictions(model, test_dset) return labels
581a45a71bb17ebebf3a8ea63dbbfb898c6e3567
5,972
def linear_search(iterable, item): """Returns the index of the item in the unsorted iterable. Iterates through a collection, comparing each item to the target item, and returns the index of the first item that is equal to the target item. * O(n) time complexity * O(1) space complexity Args: iterable: A collection that is iterable. item: An object to search for. Returns: The index of the item in the sorted iterable, or -1 if not found. Raises: TypeError: If iterable is not iterable. """ try: _ = iter(iterable) except TypeError: raise TypeError('\'{}\' object is not iterable'.format( type(iterable).__name__)) for index, _item in enumerate(iterable): if _item == item: return index return -1
bdbd7e70cea79deef1375648bde61067df1d2221
5,974
def create_MD_tag(reference_seq, query_seq): """Create MD tag Args: reference_seq (str) : reference sequence of alignment query_seq (str) : query bases of alignment Returns: md_tag(str) : md description of the alignment """ no_change = 0 md = [] for ref_base, query_base in zip(reference_seq, query_seq): if ref_base.upper() == query_base: no_change += 1 else: if no_change > 0: md.append(str(no_change)) md.append(ref_base) no_change = 0 if no_change > 0: md.append(str(no_change)) return ''.join(md)
4b711521d00af132e8e29fe4fc44785b985c2607
5,975
def split_last(dataframe, target_col, sort_col='date', cut=.9): """Splits the dataframe on sort_column at the given cut ratio, and splits the target column Args: dataframe: dataframe to be cut sort_col: column to be sorted on. Default='date' cut: cut ratio for the train/eval sets Returns: X_train: dataframe of the first cut of the data set without the target y_train: dataframe of the first cut of the data set only target values X_eval: dataframe of the remaining slice of the data set without target y_eval: dataframe of the remaining slice of the data set only targets """ if sort_col != None: dataframe = dataframe.sort_values(by=sort_col, axis='columns') cutoff = dataframe.shape[0]*cut first_df = dataframe.reset_index(drop=True).loc[:cutoff] last_df = dataframe.reset_index(drop=True).loc[cutoff:] X_train = first_df.drop(columns=[target_col]) y_train = np.array(first_df[target_col]).ravel() X_eval = last_df.drop(columns=[target_col]) y_eval = np.array(last_df[target_col]).ravel() return X_train, y_train, X_eval, y_eval
090144fa9c68f8ffc9e9e7c2e9c8427f0aff862d
5,977
def ignore_warnings(obj=None): """ Context manager and decorator to ignore warnings Note. Using this (in both variants) will clear all warnings from all python modules loaded. In case you need to test cross-module-warning-logging this is not your tool of choice. Examples -------- >>> with ignore_warnings(): ... warnings.warn('buhuhuhu') >>> def nasty_warn(): ... warnings.warn('buhuhuhu') ... print(42) >>> ignore_warnings(nasty_warn)() 42 """ if callable(obj): return _ignore_warnings(obj) else: return _IgnoreWarnings()
2fc8c4d12467ab3c0b86201271f42b7d22130b82
5,978
import csv def readCSV(associated_ipaddr, ipaddr, timestamp): """ Method that extracts observations from a CSV file. Parameters: associated_ipaddr (str): The name of the column that specifies IP addresses of VPN clients ipaddr (str): The name of the column that specifies IP addresses of users on the public internet timestamp (str): The name of the column that specifies the observation creation time Returns: observations (list): A list of observation dictionaries """ observations = [] with open(CSV_FILE, "rt", encoding="ascii") as f: reader = csv.reader(f) header = next(reader, None) for row in reader: observations.append( { "associated_ipaddr": row[header.index(associated_ipaddr)], "ipaddr": row[header.index(ipaddr)], "timestamp": translateTime(row[header.index(timestamp)]), } ) return observations
77594e98b83cd5d49bd8a70b28b54cab92dcadeb
5,979
def interp2d(x, y, z, outshape, verbose=True, doplot=True): """ Parameters ---------- x, y : int X and Y indices of `z`. z : float Values for given `x` and `y`. outshape : tuple of int Shape of 2D output array. verbose : bool, optional Print info to screen. doplot : bool, optional Plot results. Returns ------- im : float array 2-D array of interpolated data. """ # Print the data to screen for checking if verbose: print 'DATA USED FOR INTERPOLATION:' for i, (xx, yy, zz) in enumerate(zip(x, y, z), start=1): print '{}: {} {} {}'.format(i, xx, yy, zz) # Perform 2D interpolation func = interpolate.interpolate.interp2d(x, y, z) im = func(np.mgrid[:outshape[1]], np.mgrid[:outshape[0]]) if doplot: # Get min/max to use same colorbar on for base and overlay pmin = im.min() pmax = im.max() fig, ax = plt.subplots() # Show interpolated 2D image p = ax.imshow(im, vmin=pmin, vmax=pmax) # Overlay data points used for interpolation ax.scatter(x, y, s=100, c=z, vmin=pmin, vmax=pmax, marker='s') # Display colorbar. # Shrink to make it same width as display. c = fig.colorbar(p, orientation='horizontal', shrink=0.7) c.set_label('Pixel value') # Plot labels ax.set_xlabel('X') ax.set_ylabel('Y') ax.set_title('Interpolated image') plt.draw() return im
05558e413139a0ad71a4240e3f44c4bb9019c314
5,980
def _nms_boxes(detections, nms_threshold): """Apply the Non-Maximum Suppression (NMS) algorithm on the bounding boxes with their confidence scores and return an array with the indexes of the bounding boxes we want to keep. # Args detections: Nx7 numpy arrays of [[x, y, w, h, box_confidence, class_id, class_prob], ......] """ x_coord = detections[:, 0] y_coord = detections[:, 1] width = detections[:, 2] height = detections[:, 3] box_confidences = detections[:, 4] * detections[:, 6] areas = width * height ordered = box_confidences.argsort()[::-1] keep = list() while ordered.size > 0: # Index of the current element: i = ordered[0] keep.append(i) xx1 = np.maximum(x_coord[i], x_coord[ordered[1:]]) yy1 = np.maximum(y_coord[i], y_coord[ordered[1:]]) xx2 = np.minimum(x_coord[i] + width[i], x_coord[ordered[1:]] + width[ordered[1:]]) yy2 = np.minimum(y_coord[i] + height[i], y_coord[ordered[1:]] + height[ordered[1:]]) width1 = np.maximum(0.0, xx2 - xx1 + 1) height1 = np.maximum(0.0, yy2 - yy1 + 1) intersection = width1 * height1 union = (areas[i] + areas[ordered[1:]] - intersection) iou = intersection / union indexes = np.where(iou <= nms_threshold)[0] ordered = ordered[indexes + 1] keep = np.array(keep) return keep
9d3ad16396f1e94e4ac8efe1e73e8f06b529ff0f
5,982
import types def dht_get_key(data_key): """ Given a key (a hash of data), go fetch the data. """ dht_client = get_dht_client() ret = dht_client.get(data_key) if ret is not None: if type(ret) == types.ListType: ret = ret[0] if type(ret) == types.DictType and ret.has_key("value"): ret = ret["value"] else: raise Exception("No data returned from %s" % data_key) return ret
0c8680996e21b7dcc02cd4b7d81f3fa500b02076
5,983
def get_dataframe_from_table(table_name, con): """ put table into DataFrame """ df = pd.read_sql_table(table_name, con) return df
cdf94277c2f4e3acdd22b87de7cd9d0fee63b24c
5,984
from typing import List from typing import Dict import requests def _find_links_in_headers(*, headers, target_headers: List[str]) -> Dict[str, Dict[str, str]]: """Return a dictionary { rel: { url: 'url', mime_type: 'mime_type' } } containing the target headers.""" found: Dict[str, Dict[str, str]] = {} links = headers.get("link") if links: # [{'url': 'https://micropub.jamesg.blog/micropub', 'rel': 'micropub'} ] parsed_link_headers: List[Dict[str, str]] = requests.utils.parse_header_links(links) else: return found for header in parsed_link_headers: url = header.get("url", "") rel = header.get("rel", "") mime_type = header.get("type", "") if _is_http_url(url) and rel in target_headers: found[rel] = { "url": url, "mime_type": mime_type, } # Add check for x-pingback header if "x-pingback" in target_headers: pingback_url = headers.get("x-pingback") if _is_http_url(pingback_url): # assign as "pingback" key in dictionary found["pingback"] = { "url": url, "mime_type": "", } return found
ee23c9c7ca2633d11ea33ac2695a46eca4188af5
5,985
import re def calc_word_frequency(my_string, my_word): """Calculate the number of occurrences of a given word in a given string. Args: my_string (str): String to search my_word (str): The word to search for Returns: int: The number of occurrences of the given word in the given string. """ # Remove all non alphanumeric characters from the string filtered_string = re.sub(r'[^A-Za-z0-9 ]+', '', my_string) # Return the number of occurrences of my_word in the filtered string return filtered_string.split().count(my_word)
15ff723dd2ff089fb12cccb38283f1f75e37079d
5,986
def _make_warmstart_dict_env(): """Warm-start VecNormalize by stepping through BitFlippingEnv""" venv = DummyVecEnv([make_dict_env]) venv = VecNormalize(venv) venv.reset() venv.get_original_obs() for _ in range(100): actions = [venv.action_space.sample()] venv.step(actions) return venv
67e0ee3e8440c24a08e306afbb9891dee64dd11d
5,988
def record_attendance(lesson_id): """ Record attendance for a lesson. """ # Get the UserLessonAssociation for the current and # the given lesson id. (So we can also display attendance etc.) lesson = Lesson.query.filter(Lesson.lesson_id == lesson_id).first() # Ensure the lesson id/association object is found. if not lesson: abort(404) record_single_attendance_form = RecordSingleAttendanceForm() if request.method == 'POST' and record_single_attendance_form.validate_on_submit(): assoc = UserLessonAssociation.query.filter( UserLessonAssociation.lesson_id == lesson_id ).filter( UserLessonAssociation.user_id == int(record_single_attendance_form.user_id.data) ).first() if assoc: assoc.attendance_code = record_single_attendance_form.attendance_code.data flash("Successfully updated lesson attendance.") else: abort(500) # We only want to send updates if they we're late or not there. if assoc.attendance_code == 'L' or assoc.attendance_code == 'N': # Send an email update. html = 'Attendance for your lesson on: ' + assoc.lesson.get_lesson_date() \ + ' has been updated. Your attendance is now recorded as: ' + \ assoc.get_lesson_attendance_str() # Send a lesson update. send_lesson_update( assoc.user, html, url_for( 'student.view_lesson', lesson_id=lesson_id, _external=True ), parent=True ) if check_attendance_complete(lesson): # The attendance is complete. lesson.update_lesson_details(attendance_recorded=True) else: lesson.update_lesson_details(attendance_recorded=False) # Save Changes db.session.commit() # Refresh return redirect(url_for('staff.record_attendance', lesson_id=lesson_id)) # Render the view lesson template and pass in the association and the lesson object. return render_template( 'staff/record_attendance.html', lesson=lesson, record_single_attendance_form=record_single_attendance_form )
237fb1df5eaf1f1b7d9555ca636971318f23c360
5,989
def ts_to_datestr(ts, fmt="%Y-%m-%d %H:%M"): """可读性""" return ts_to_datetime(ts).strftime(fmt)
29b180c0d569768b173afb960d9cb09e86519741
5,990
def so3_rotate(batch_data): """ Randomly rotate the point clouds to augument the dataset rotation is per shape based along up direction Input: BxNx3 array, original batch of point clouds Return: BxNx3 array, rotated batch of point clouds """ rotated_data = np.zeros(batch_data.shape, dtype=np.float32) for k in range(batch_data.shape[0]): rotation_angle_A = np.random.uniform() * 2 * np.pi rotation_angle_B = np.random.uniform() * 2 * np.pi rotation_angle_C = np.random.uniform() * 2 * np.pi cosval_A = np.cos(rotation_angle_A) sinval_A = np.sin(rotation_angle_A) cosval_B = np.cos(rotation_angle_B) sinval_B = np.sin(rotation_angle_B) cosval_C = np.cos(rotation_angle_C) sinval_C = np.sin(rotation_angle_C) rotation_matrix = np.array([[cosval_B*cosval_C, -cosval_B*sinval_C, sinval_B], [sinval_A*sinval_B*cosval_C+cosval_A*sinval_C, -sinval_A*sinval_B*sinval_C+cosval_A*cosval_C, -sinval_A*cosval_B], [-cosval_A*sinval_B*cosval_C+sinval_A*sinval_C, cosval_A*sinval_B*sinval_C+sinval_A*cosval_C, cosval_A*cosval_B]]) shape_pc = batch_data[k, ...] rotated_data[k, ...] = np.dot(shape_pc.reshape((-1, 3)), rotation_matrix) return rotated_data
84c184c920833bf2037b0f4181e9f25bcf6fd5ce
5,992
import hashlib def intmd5(source: str, nbytes=4) -> int: """ Generate a predictive random integer of nbytes*8 bits based on a source string. :param source: seed string to generate random integer. :param nbytes: size of the integer. """ hashobj = hashlib.md5(source.encode()) return int.from_bytes(hashobj.digest()[:nbytes], byteorder="big", signed=False)
c03eb99a67af00a4a081423ecca3a724111514e1
5,993
def trisolve(a, b, c, y, inplace=False): """ The tridiagonal matrix (Thomas) algorithm for solving tridiagonal systems of equations: a_{i}x_{i-1} + b_{i}x_{i} + c_{i}x_{i+1} = y_{i} in matrix form: Mx = y TDMA is O(n), whereas standard Gaussian elimination is O(n^3). Arguments: ----------- a: (n - 1,) vector the lower diagonal of M b: (n,) vector the main diagonal of M c: (n - 1,) vector the upper diagonal of M y: (n,) vector the result of Mx inplace: if True, and if b and y are both float64 vectors, they will be modified in place (may be faster) Returns: ----------- x: (n,) vector the solution to Mx = y References: ----------- http://en.wikipedia.org/wiki/Tridiagonal_matrix_algorithm http://www.netlib.org/lapack/explore-html/d1/db3/dgtsv_8f.html """ if (a.shape[0] != c.shape[0] or a.shape[0] >= b.shape[0] or b.shape[0] != y.shape[0]): raise ValueError('Invalid diagonal shapes') yshape_in = y.shape if y.ndim == 1: # needs to be (ldb, nrhs) y = y[:, None] rtype = np.result_type(a, b, c, y) if not inplace: # force a copy a = np.array(a, dtype=rtype, copy=True, order='C') b = np.array(b, dtype=rtype, copy=True, order='C') c = np.array(c, dtype=rtype, copy=True, order='C') y = np.array(y, dtype=rtype, copy=True, order='C') # this may also force copies if arrays have inconsistent types / incorrect # order a, b, c, y = (np.array(v, dtype=rtype, copy=False, order='C') for v in (a, b, c, y)) # y will now be modified in place to give the result if rtype == np.float32: _fnndeconv.TDMAs_lapacke(a, b, c, y) elif rtype == np.float64: _fnndeconv.TDMAd_lapacke(a, b, c, y) else: raise ValueError('Unsupported result type: %s' %rtype) return y.reshape(yshape_in)
ead814b1025e8458f7e1eabeecf3eb89cb9edd5d
5,994
def calc_mean_pred(df: pd.DataFrame): """ Make a prediction based on the average of the predictions of phones in the same collection. from https://www.kaggle.com/t88take/gsdc-phones-mean-prediction """ lerp_df = make_lerp_data(df=df) add_lerp = pd.concat([df, lerp_df]) # each time step == only one row, average over all phone latDeg, # lanDeg at each time step # eg. mean(original Deg Pixel4 and interpolated Deg 4XLModded with `make_lerp_data`) mean_pred_result = ( add_lerp.groupby(["collectionName", "millisSinceGpsEpoch"])[ ["latDeg", "lngDeg"] ] .mean() .reset_index() ) base_cols = ["collectionName", "phoneName", "phone", "millisSinceGpsEpoch"] try: mean_pred_df = df[base_cols + ["latDeg_gt", "lngDeg_gt", "speedMps"]].copy() except Exception: mean_pred_df = df[base_cols].copy() mean_pred_df = mean_pred_df.merge( mean_pred_result[["collectionName", "millisSinceGpsEpoch", "latDeg", "lngDeg"]], on=["collectionName", "millisSinceGpsEpoch"], how="left", ) return mean_pred_df
a4f6cdb0d5efb72cd6b503a8eb3a0f4b13cee0bf
5,995
def get_meals(v2_response, venue_id): """ Extract meals into old format from a DiningV2 JSON response """ result_data = v2_response["result_data"] meals = [] day_parts = result_data["days"][0]["cafes"][venue_id]["dayparts"][0] for meal in day_parts: stations = [] for station in meal["stations"]: items = [] for item_id in station["items"]: item = result_data["items"][item_id] new_item = {} new_item["txtTitle"] = item["label"] new_item["txtPrice"] = "" new_item["txtNutritionInfo"] = "" new_item["txtDescription"] = item["description"] new_item["tblSide"] = "" new_item["tblFarmToFork"] = "" attrs = [{"description": item["cor_icon"][attr]} for attr in item["cor_icon"]] if len(attrs) == 1: new_item["tblAttributes"] = {"txtAttribute": attrs[0]} elif len(attrs) > 1: new_item["tblAttributes"] = {"txtAttribute": attrs} else: new_item["tblAttributes"] = "" if isinstance(item["options"], list): item["options"] = {} if "values" in item["options"]: for side in item["options"]["values"]: new_item["tblSide"] = {"txtSideName": side["label"]} items.append(new_item) stations.append({"tblItem": items, "txtStationDescription": station["label"]}) meals.append({"tblStation": stations, "txtDayPartDescription": meal["label"]}) return meals
9d27d225a39248690529167f7ff18777a086bcc6
5,996
async def async_setup(hass, config_entry): """ Disallow configuration via YAML """ return True
759cc705a82a0f9ff9d4d43cb14d641d7e552aaa
5,997
def blend(im1, im2, mask): """ Blends and shows the given images according to mask :param im1: first image :param im2: second image :param mask: binary mask :return: result blend """ res = [] for i in range(3): res.append(pyramid_blending(im1[:, :, i], im2[:, :, i], mask, 7, 5, 5)) res = np.dstack(res) fig, a = plt.subplots(nrows=2, ncols=2) a[0][0].imshow(im1, cmap='gray') a[0][1].imshow(im2, cmap='gray') a[1][0].imshow(mask, cmap='gray') a[1][1].imshow(res, cmap='gray') plt.show() return res
4b4a635d1f44ced411b9dfe2037b0f42805f38b2
5,998
def parse(fileName): """ Pull the EXIf info from a photo and sanitize it so for sending as JSON by converting values to strings. """ f = open(fileName, 'rb') exif = exifread.process_file(f, details=False) parsed = {} for key, value in exif.iteritems(): parsed[key] = str(value) return parsed
3f5aca5b38dd7f3b3a9defae1fc5f645e255a191
5,999
from typing import Optional from typing import Callable import requests def make_request( endpoint: str, method: str = "get", data: Optional[dict] = None, timeout: int = 15 ) -> Response: """Makes a request to the given endpoint and maps the response to a Response class""" method = method.lower() request_method: Callable = getattr(requests, method) if method not in SAFE_METHODS and data is None: raise ValueError("Data must be provided for POST, PUT and PATCH requests.") r: RequestsResponse if method not in SAFE_METHODS: r = request_method(endpoint, json=data, timeout=timeout) else: r = request_method(endpoint, timeout=timeout) return Response(status_code=r.status_code, data=r.json())
8dd88583f61e5c42689461dd6d316297d910f197
6,001
import socket def _is_rpc_timeout(e): """ check whether an exception individual rpc timeout. """ # connection caused socket timeout is being re-raised as # ThriftConnectionTimeoutError now return isinstance(e, socket.timeout)
ec832bec086b59698eed12b18b7a37e5eb541329
6,002
def fake_quantize_with_min_max(inputs, f_min, f_max, bit_width, quant_zero=True): """The fake quantization operation kernel. Args: inputs: a tensor containing values to be quantized. f_min: the minimum input value f_max: the maximum input value bit_width: the bit width Returns: a tensor containing quantized values. """ @tf.function def forward(inputs, f_min, f_max, bit_width, quant_zero): with tf.name_scope("FakeQuantizeWithMinMax"): float_bit_width = tf.cast(bit_width, dtype=tf.float32, name="bit_width") bound = tf.math.pow(2.0, float_bit_width - 1) q_min = tf.math.negative(bound, name="q_min") q_max = tf.math.subtract(bound, 1, name="q_max") scale = get_scale(f_min, f_max, q_min, q_max) if quant_zero: q_zero_point, new_f_min, new_f_max = quantize_zero_point( scale, f_min, f_max, q_min, q_max) shift = new_f_min if quant_zero else f_min quantized = quantize(inputs, scale, shift, q_min, q_max) dequantized = dequantize(quantized, scale, shift, q_min, q_max) return dequantized @tf.function def grad_fn(dy): float_bit_width = tf.cast(bit_width, dtype=tf.float32, name="bit_width") bound = tf.math.pow(2.0, float_bit_width - 1) q_min = tf.math.negative(bound, name="q_min") q_max = tf.math.subtract(bound, 1, name="q_max") scale = get_scale(f_min, f_max, q_min, q_max) if quant_zero: q_zero_point, new_f_min, new_f_max = quantize_zero_point( scale, f_min, f_max, q_min, q_max) between_min_max = (inputs >= new_f_min) & (inputs <= new_f_max) below_min = (inputs <= new_f_min) above_max = (inputs >= new_f_max) else: between_min_max = (inputs >= f_min) & (inputs <= f_max) below_min = (inputs <= f_min) above_max = (inputs >= f_max) ones = tf.ones_like(dy) zeros = tf.zeros_like(dy) grad_wrt_inputs = dy * tf.where(between_min_max, ones, zeros) grad_wrt_f_min = tf.reduce_sum(dy * tf.where(below_min, ones, zeros)) grad_wrt_f_max = tf.reduce_sum(dy * tf.where(above_max, ones, zeros)) return grad_wrt_inputs, grad_wrt_f_min, grad_wrt_f_max, None results = forward(inputs, f_min, f_max, bit_width, quant_zero) return results, grad_fn
2034dbe02d50ce0317dc4dbb6f2ed59137e671d2
6,003
def lorentzian(coordinates, center, fwhm): """ Unit integral Lorenzian function. Parameters ---------- coordinates : array-like Can be either a list of ndarrays, as a meshgrid coordinates list, or a single ndarray for 1D computation center : array-like Center of the lorentzian. Should be the same shape as `coordinates.ndim`. fwhm : float Full-width at half-max of the function. Returns ------- out : ndarray Lorentzian function of unit integral. Notes ----- The functional form of the Lorentzian is given by: .. math:: L(x) = \\frac{1}{\pi} \\frac{(\gamma/2)}{(x-c)^2 + (\gamma/2)^2} where :math:`\gamma` is the full-width at half-maximum, and :math:`c` is the center. For n dimensions, the functional form of the Lorentzian is given by: .. math:: L(x_1, ..., x_n) = \\frac{1}{n \pi} \\frac{(\gamma/2)}{(\sum_i{(x_i - c_i)^2} + (\gamma/2)^2)^{\\frac{1+n}{2}}} Example ------- >>> import numpy as np >>> from skued import lorentzian >>> >>> span = np.arange(-10, 10, 0.1) >>> xx, yy = np.meshgrid(span, span) >>> center = [0,0] >>> l = lorentzian( coordinates = [xx,yy], center = [0,0], fwhm = 1) >>> l.shape == xx.shape #True >>> np.sum(l)*0.1**2 #Integral should be unity (spacing = 0.1) """ width = 0.5 * fwhm # 1D is a special case, as coordinates are not given as a list of arrays if not isinstance(coordinates, (list, tuple)): # iterable but not ndarray return (width / pi) / ((coordinates - center) ** 2 + width ** 2) dim = len(coordinates) core = width / ( (sum([(x - c) ** 2 for x, c in zip(coordinates, center)]) + width ** 2) ) ** ((dim + 1) / 2) factor = 1 / (dim * pi) return factor * core
8631ef30f0fd50ac516f279cd130d6d9b099d953
6,004
def _to_gzip_base64(self, **kwargs): """ Reads the file as text, then turns to gzip+base64""" data = self.read_text(**kwargs) return Base.b64_gzip_encode(data)
bb3e01bcac5e551d862629e79f4c54827ca3783c
6,005
import traceback def get_recipe_data(published=False, complete_data=False): """Return published or unpublished recipe data.""" try: Changed = User.alias() recipes = recipemodel.Recipe.select( recipemodel.Recipe, storedmodel.Stored, pw.fn.group_concat(tagmodel.Tag.tagname).alias("taglist") ).where( recipemodel.Recipe.published == published ).join( storedmodel.Stored, pw.JOIN.LEFT_OUTER, on=(storedmodel.Stored.recipeID == recipemodel.Recipe.id) ).join( tagmodel.RecipeTags, pw.JOIN.LEFT_OUTER, on=(tagmodel.RecipeTags.recipeID == recipemodel.Recipe.id) ).join( tagmodel.Tag, pw.JOIN.LEFT_OUTER, on=(tagmodel.Tag.id == tagmodel.RecipeTags.tagID) ).group_by( recipemodel.Recipe.id) if complete_data: # Load in User table recipes = recipes.select( User, Changed, recipemodel.Recipe, storedmodel.Stored, pw.fn.group_concat(tagmodel.Tag.tagname).alias("taglist") ).switch( recipemodel.Recipe ).join( User, pw.JOIN.LEFT_OUTER, on=(User.id == recipemodel.Recipe.created_by).alias("a") ).switch( recipemodel.Recipe ).join( Changed, pw.JOIN.LEFT_OUTER, on=(Changed.id == recipemodel.Recipe.changed_by).alias("b")) data = recipemodel.get_recipes(recipes, complete_data=complete_data) return utils.success_response(msg="Data loaded", data=data, hits=len(data)) except Exception as e: current_app.logger.error(traceback.format_exc()) return utils.error_response(f"Failed to load data: {e}")
35afc8247912bd6814b5f4e76716f39d9f244e90
6,006
def html_anchor_navigation(base_dir, experiment_dir, modules): """Build header of an experiment with links to all modules used for rendering. :param base_dir: parent folder in which to look for an experiment folders :param experiment_dir: experiment folder :param modules: list of all loaded modules :return: str """ return "\n".join(( """<header class="w3-container w3-dark-grey"> <h5><a href='#'>{folder}</a></h5> </header>""".format(folder=experiment_dir), "\n".join(""" <div style='white-space: nowrap;'> <div class=\"show toggle-cookie padding-right\" data-toggle='toggle-{id}-all' data-class-off='no-show'>&nbsp;</div> <a class='' href='#{module_title}'>{module_title}</a> </div>""".format( folder=experiment_dir, module_title=module.title, id=module.id) for module in modules), "<hr />" ))
1fea16c0aae2f73be713271de5f003e608cee7e9
6,007
import typing def _to_int_and_fraction(d: Decimal) -> typing.Tuple[int, str]: """convert absolute decimal value into integer and decimal (<1)""" t = d.as_tuple() stringified = ''.join(map(str, t.digits)) fraction = '' if t.exponent < 0: int_, fraction = stringified[:t.exponent], stringified[t.exponent:] fraction = fraction.rjust(-t.exponent, '0') else: int_ = stringified + t.exponent * '0' return int(int_ or 0), fraction
d1f83df06ae42cdc3e6b7c0582397ee3a79ff99b
6,009
import json def json_to_obj(my_class_instance): """ Получает на вход JSON-представление, выдает на выходе объект класса MyClass. >>> a = MyClass('me', 'my_surname', True) >>> json_dict = get_json(a) >>> b = json_to_obj(json_dict) <__main__.MyClass object at 0x7fd8e9634510> """ some_dict = json.loads(my_class_instance) return MyClass(**some_dict)
1f881e609f1c895173f4c27ebdaf413a336a4b8f
6,010
def all_tags(path) -> {str: str}: """Method to return Exif tags""" file = open(path, "rb") tags = exifread.process_file(file, details=False) return tags
29132ad176ba68d7026ebb78d9fed6170833255e
6,011
def static(request): """ Backport django.core.context_processors.static to Django 1.2. """ return {'STATIC_URL': djangoSettings.STATIC_URL}
cf74daed50e7e15f15fbe6592f36a523e388e11e
6,012
def genBoard(): """ Generates an empty board. >>> genBoard() ["A", "B", "C", "D", "E", "F", "G", "H", "I"] """ # Empty board empty = ["A", "B", "C", "D", "E", "F", "G", "H", "I"] # Return it return empty
c47e766a0c897d3a1c589a560288fb52969c04a3
6,013
def _partition_at_level(dendrogram, level) : """Return the partition of the nodes at the given level A dendrogram is a tree and each level is a partition of the graph nodes. Level 0 is the first partition, which contains the smallest snapshot_affiliations, and the best is len(dendrogram) - 1. The higher the level is, the bigger are the snapshot_affiliations """ partition = dendrogram[0].copy() for index in range(1, level + 1) : for node, community in partition.items() : partition[node] = dendrogram[index][community] return partition
b179127076c386480c31a18a0956eb30d5f4ef2a
6,014
def generate_ab_data(): """ Generate data for a second order reaction A + B -> P d[A]/dt = -k[A][B] d[B]/dt = -k[A][B] d[P]/dt = k[A][B] [P] = ([B]0 - [A]0 h(t)) / (1 - h(t)) where h(t) = ([B]0 / [A]0) e^(kt ([B]0 - [A]0)) Data printed in a .csv file """ times = np.linspace(0, 10, num=100) # s a0 = 0.6 # mol dm^-3 b0 = 0.5 # mol dm^-3 k = 1.7 # mol^-1 dm^3 s^-1 with open('ab.csv', 'w') as data_file: print('Data for A + B -> P where v = k[A][B]', file=data_file) for i, t in enumerate(times): h = (b0 / a0) * np.exp(k * t * (b0 - a0)) p = (b0 - a0 * h) / (1.0 - h) a = a0 - p b = b0 - p # Time, [A], [B], [P] print(f'{t:.6f},{a:.6f},{b:.6f},{p:.6f}', file=data_file) return None
d36521953129b5e002d3a3d2bcf929322c75470c
6,016
import typing def autocomplete(segment: str, line: str, parts: typing.List[str]): """ :param segment: :param line: :param parts: :return: """ if parts[-1].startswith('-'): return autocompletion.match_flags( segment=segment, value=parts[-1], shorts=['f', 'a', 'd'], longs=['force', 'append', 'directory'] ) if len(parts) == 1: return autocompletion.match_path(segment, parts[0]) return []
8d929e96684d8d1c3ad492424821d27c4d1a2e66
6,017
def elast_tri3(coord, params): """Triangular element with 3 nodes Parameters ---------- coord : ndarray Coordinates for the nodes of the element (3, 2). params : tuple Material parameters in the following order: young : float Young modulus (>0). poisson : float Poisson coefficient (-1, 0.5). dens : float, optional Density (>0). Returns ------- stiff_mat : ndarray Local stiffness matrix for the element (6, 6). mass_mat : ndarray Local mass matrix for the element (6, 6). Examples -------- >>> coord = np.array([ ... [0, 0], ... [1, 0], ... [0, 1]]) >>> params = [8/3, 1/3] >>> stiff, mass = uel3ntrian(coord, params) >>> stiff_ex = 1/2 * np.array([ ... [4, 2, -3, -1, -1, -1], ... [2, 4, -1, -1, -1, -3], ... [-3, -1, 3, 0, 0, 1], ... [-1, -1, 0, 1, 1, 0], ... [-1, -1, 0, 1, 1, 0], ... [-1, -3, 1, 0, 0, 3]]) >>> np.allclose(stiff, stiff_ex) True """ stiff_mat = np.zeros([6, 6]) mass_mat = np.zeros([6, 6]) C = fem.umat(params[:2]) if len(params) == 2: dens = 1 else: dens = params[-1] gpts, gwts = gau.gauss_tri(order=2) for cont in range(gpts.shape[0]): r, s = gpts[cont, :] H, B, det = fem.elast_diff_2d(r, s, coord, fem.shape_tri3) factor = det * gwts[cont] stiff_mat += 0.5 * factor * (B.T @ C @ B) mass_mat += 0.5 * dens * factor * (H.T @ H) return stiff_mat, mass_mat
5a0381bb7961b811650cc57af7317737995dd866
6,018
import torch def make_offgrid_patches_xcenter_xincrement(n_increments:int, n_centers:int, min_l:float, patch_dim:float, device): """ for each random point in the image and for each increments, make a square patch return: I x C x P x P x 2 """ patches_xcenter = make_offgrid_patches_xcenter(n_centers, min_l, patch_dim, device) # C x P x P x 2 increments = min_l * torch.arange(0,n_increments,device=patches_xcenter.device) # expand patches for each increments size = (n_increments, *patches_xcenter.shape) patches_xcenter_xincrement = patches_xcenter.unsqueeze(0).expand(size) assert torch.allclose(patches_xcenter_xincrement[0,:,:], patches_xcenter) assert torch.allclose(patches_xcenter_xincrement[1,:,:], patches_xcenter) patches_xcenter_xincrement = patches_xcenter_xincrement + increments[:,None,None,None,None] # some checks assert len(patches_xcenter_xincrement.shape) == 5 assert patches_xcenter_xincrement.shape[-1] == 2 assert patches_xcenter_xincrement.shape[0] == n_increments assert patches_xcenter_xincrement.shape[1] == n_centers assert patches_xcenter_xincrement.shape[2] == patches_xcenter_xincrement.shape[3] == patch_dim*2 return patches_xcenter_xincrement
dc7fe393e6bee691f9c6ae399c52668ef98372c4
6,019
def load_gazes_from_xml(filepath: str) -> pd.DataFrame: """loads data from the gaze XML file output by itrace. Returns the responses as a pandas DataFrame Parameters ---------- filepath : str path to XML Returns ------- pd.DataFrame Gazes contained in the xml file """ root = ET.parse(filepath) return pd.DataFrame(list(map(lambda e: e.attrib, root.findall("./gazes/response"))))
b1fd17eace5ea253ce82617f5e8c9238a78d925a
6,020
def axis_rotation(points, angle, inplace=False, deg=True, axis='z'): """Rotate points angle (in deg) about an axis.""" axis = axis.lower() # Copy original array to if not inplace if not inplace: points = points.copy() # Convert angle to radians if deg: angle *= np.pi / 180 if axis == 'x': y = points[:, 1] * np.cos(angle) - points[:, 2] * np.sin(angle) z = points[:, 1] * np.sin(angle) + points[:, 2] * np.cos(angle) points[:, 1] = y points[:, 2] = z elif axis == 'y': x = points[:, 0] * np.cos(angle) + points[:, 2] * np.sin(angle) z = - points[:, 0] * np.sin(angle) + points[:, 2] * np.cos(angle) points[:, 0] = x points[:, 2] = z elif axis == 'z': x = points[:, 0] * np.cos(angle) - points[:, 1] * np.sin(angle) y = points[:, 0] * np.sin(angle) + points[:, 1] * np.cos(angle) points[:, 0] = x points[:, 1] = y else: raise ValueError('invalid axis. Must be either "x", "y", or "z"') if not inplace: return points
dccb663a9d8d4f6551bde2d6d26868a181c3c0a7
6,021
async def unhandled_exception(request: Request, exc: UnhandledException): """Raises a custom TableKeyError.""" return JSONResponse( status_code=400, content={"message": "Something bad happened" f" Internal Error: {exc.message!r}"}, )
bff466190f5804def1416ee6221dccb3739c7dec
6,022
def register_view(request): """Render HTTML page""" form = CreateUserForm() if request.method == 'POST': form = CreateUserForm(request.POST) if form.is_valid(): form.save() user = form.cleaned_data.get('username') messages.success(request, "Account was created for "+ user) return redirect('loginPage') context = {'form': form} return render(request, 'register.html', {'data': context})
c129a561c31c442ca091cfe38cb2f7a27f94a25d
6,023
def luv2rgb(luv, *, channel_axis=-1): """Luv to RGB color space conversion. Parameters ---------- luv : (..., 3, ...) array_like The image in CIE Luv format. By default, the final dimension denotes channels. Returns ------- out : (..., 3, ...) ndarray The image in RGB format. Same dimensions as input. Raises ------ ValueError If `luv` is not at least 2-D with shape (..., 3, ...). Notes ----- This function uses luv2xyz and xyz2rgb. """ return xyz2rgb(luv2xyz(luv))
bac8e7155f2249135158786d39c1f2d95af22fc8
6,024
def deposit_fetcher(record_uuid, data): """Fetch a deposit identifier. :param record_uuid: Record UUID. :param data: Record content. :returns: A :class:`invenio_pidstore.fetchers.FetchedPID` that contains data['_deposit']['id'] as pid_value. """ return FetchedPID( provider=DepositProvider, pid_type=DepositProvider.pid_type, pid_value=str(data['_deposit']['id']), )
c4505eff50473204c5615991f401a45cc53779a1
6,025
import time def make_filename(): """"This functions creates a unique filename.""" unique_filename = time.strftime("%Y%m%d-%H%M%S") #unique_filename = str(uuid.uuid1()) #unique_filename = str(uuid.uuid1().hex[0:7]) save_name = 'capture_ferhat_{}.png'.format(unique_filename) return(save_name)
bf16b642884381d795148e045de2387d0acaf23d
6,026
import re def compareLists(sentenceList, majorCharacters): """ Compares the list of sentences with the character names and returns sentences that include names. """ characterSentences = defaultdict(list) for sentence in sentenceList: for name in majorCharacters: if re.search(r"\b(?=\w)%s\b(?!\w)" % re.escape(name), sentence, re.IGNORECASE): characterSentences[name].append(sentence) return characterSentences
4b41da794ff936a3769fe67580b989e0de343ee7
6,027
import re def is_live_site(url): """Ensure that the tool is not used on the production Isaac website. Use of this tool or any part of it on Isaac Physics and related websites is a violation of our terms of use: https://isaacphysics.org/terms """ if re.search("http(s)?://isaac(physics|chemistry|maths|biology|science)\.org", url): return True else: return False
407624a049e92740eb82753d941780a446b1facf
6,028
def score_false(e, sel): """Return scores for internal-terminal nodes""" return e*(~sel).sum()
077cd38c6d1186e2d70fd8a93f44249b0cef2885
6,029
import logging import numpy def retrieveXS(filePath, evMin=None, evMax=None): """Open an ENDF file and return the scattering XS""" logging.info('Retrieving scattering cross sections from file {}' .format(filePath)) energies = [] crossSections = [] with open(filePath) as fp: line = fp.readline() while line[0] == '#': line = fp.readline() while line != '' and '#END' not in line: ev, xs = [float(xx) for xx in line.split()[:2]] energies.append(ev) crossSections.append(xs) line = fp.readline() logging.info('Done') energies = numpy.array(energies) crossSections = numpy.array(crossSections) bounds = energies.min(), energies.max() if evMin is None: evMin = bounds[0] else: if bounds[0] > evMin: logging.warning('Could not find requested minimum energy ' '{:.4E} eV in cross section file {}. ' 'Using minimum found: {:.4E} eV' .format(evMin, filePath, bounds[0])) evMin = bounds[0] indices = numpy.where(energies >= evMin) energies = energies[indices] crossSections = crossSections[indices] if evMax is None: evMax = bounds[1] else: if bounds[1] < evMax: logging.warning('Could not find requested maximum energy ' '{:.4E} eV in cross section file {}. ' 'Using maximum found: {:.4E} eV' .format(evMax, filePath, bounds[1])) evMax = bounds[1] indices = numpy.where(energies <= evMax) energies = energies[indices] crossSections = crossSections[indices] return energies, crossSections
388986facd75540983870f1f7e0a6f51b6034271
6,031
import string def _parse_java_simple_date_format(fmt): """ Split a SimpleDateFormat into literal strings and format codes with counts. Examples -------- >>> _parse_java_simple_date_format("'Date:' EEEEE, MMM dd, ''yy") ['Date: ', ('E', 5), ', ', ('M', 3), ' ', ('d', 2), ", '", ('y', 2)] """ out = [] quoted = False prev_c = None prev_count = 0 literal_text = '' k = 0 while k < len(fmt): c = fmt[k] k += 1 if not quoted and c == "'" and k < len(fmt) and fmt[k] == "'": # Repeated single quote. if prev_c is not None: out.append((prev_c, prev_count)) prev_c = None prev_count = 0 literal_text += c k += 1 continue if c == "'": if not quoted: if prev_c is not None: out.append((prev_c, prev_count)) prev_c = None prev_count = 0 if literal_text: out.append(literal_text) literal_text = '' quoted = not quoted continue if quoted: literal_text += c continue if c not in string.ascii_letters: if prev_c is not None: out.append((prev_c, prev_count)) prev_c = None prev_count = 0 literal_text += c continue if c not in 'GyMdhHmsSEDFwWakKzZ': raise ValueError(f"unknown format character {c}") if literal_text != '': out.append(literal_text) literal_text = '' if prev_c is not None and c != prev_c: out.append((prev_c, prev_count)) prev_count = 0 prev_c = c prev_count += 1 else: if quoted: raise ValueError("missing closing quote; input ends " f"with '{literal_text}") if literal_text != '': out.append(literal_text) elif prev_c is not None: out.append((prev_c, prev_count)) return out
3fe42e4fc96ee96c665c3c240cb00756c8534c84
6,032
import logging def rekey_by_sample(ht): """Re-key table by sample id to make subsequent ht.filter(ht.S == sample_id) steps 100x faster""" ht = ht.key_by(ht.locus) ht = ht.transmute( ref=ht.alleles[0], alt=ht.alleles[1], het_or_hom_or_hemi=ht.samples.het_or_hom_or_hemi, #GQ=ht.samples.GQ, HL=ht.samples.HL, S=ht.samples.S, ) ht = ht.key_by(ht.S) ht = ht.transmute( chrom=ht.locus.contig.replace("chr", ""), pos=ht.locus.position ) logging.info("Schema after re-key by sample:") ht.describe() return ht
3e879e6268017de31d432706dab9e672e85673aa
6,033
import collections def _sample_prior_fixed_model(formula_like, data=None, a_tau=1.0, b_tau=1.0, nu_sq=1.0, n_iter=2000, generate_prior_predictive=False, random_state=None): """Sample from prior for a fixed model.""" rng = check_random_state(random_state) y, X = patsy.dmatrices(formula_like, data=data) y, X = _check_design_matrices(y, X) outcome_names = y.design_info.column_names coef_names = [rdu.get_default_coefficient_name(n) for n in X.design_info.column_names] n_coefs = len(coef_names) beta, tau_sq, lp = _sample_parameters_conjugate_priors( n_coefs, a_tau=a_tau, b_tau=b_tau, nu_sq=nu_sq, size=n_iter, random_state=rng) chains = collections.OrderedDict({'tau_sq': tau_sq}) for j, t in enumerate(coef_names): chains[t] = beta[:, j] chains['lp__'] = lp outcome_chains = None if generate_prior_predictive: sampled_outcomes, _ = _sample_outcomes( X, beta, tau_sq, random_state=rng) outcome_chains = collections.OrderedDict( {n: sampled_outcomes[..., i] for i, n in enumerate(outcome_names)}) args = {'random_state': random_state, 'n_iter': n_iter} results = {'chains': chains, 'args': args, 'acceptance': 1.0, 'accept_stat': np.ones((n_iter,), dtype=float), 'mean_lp__': np.mean(chains['lp__'])} prior_predictive = None if generate_prior_predictive: prior_predictive = { 'chains': outcome_chains, 'args': args, 'acceptance': 1.0, 'accept_stat': np.ones((n_iter,), dtype=float) } return results, prior_predictive
1e57cd3f8812e28a8d178199d3dd9c6a23614dc0
6,034
from typing import Any async def validate_input( hass: core.HomeAssistant, data: dict[str, Any] ) -> dict[str, str]: """Validate the user input allows us to connect. Data has the keys from STEP_USER_DATA_SCHEMA with values provided by the user. """ zeroconf_instance = await zeroconf.async_get_instance(hass) async_client = get_async_client(hass) device = Device(data[CONF_IP_ADDRESS], zeroconf_instance=zeroconf_instance) await device.async_connect(session_instance=async_client) await device.async_disconnect() return { SERIAL_NUMBER: str(device.serial_number), TITLE: device.hostname.split(".")[0], }
5ececb6dfc84e232d413b2ada6c6076a75420b49
6,035
def closeWindow(plotterInstance=None): """Close the current or the input rendering window.""" if not plotterInstance: plotterInstance = settings.plotter_instance if not plotterInstance: return if plotterInstance.interactor: plotterInstance.interactor.ExitCallback() plotterInstance.closeWindow() return plotterInstance
af3df7fa07069413c59f498529f4d21a9b88e9f4
6,036
def _format_stages_summary(stage_results): """ stage_results (list of (tuples of (success:boolean, stage_name:string, status_msg:string))) returns a string of a report, one line per stage. Something like: Stage: <stage x> :: SUCCESS Stage: <stage y> :: FAILED Stage: <stage z> :: SUCCESS """ #find the longest stage name to pad report lines max_name_len = 0 for entry in stage_results: x, stage_name, y = entry name_len = len(stage_name) if name_len > max_name_len: max_name_len = name_len summary = "" for entry in stage_results: x, stage_name, status_msg = entry summary += 'Stage: ' + stage_name.ljust(max_name_len) + ":: " summary += status_msg + '\n' return summary
2f5c757342e98ab258bdeaf7ffdc0c5d6d4668ca
6,037
import json def pack(envelope, pack_info): """Pack envelope into a byte buffer. Parameters ---------- envelope : data structure pack_info : packing information Returns ------- packet : bytes """ ptype = pack_info.ptype packer = packers[ptype] payload = packer.pack(envelope) hdr = dict(packer=packer.kind, ver=packer.version, nbytes=len(payload)) hdr_buf = json.dumps(hdr).encode() packet = hdr_buf + partition + payload return packet
5202e9eef7fc658157798d7f0d64820b1dfa3ac3
6,038
import tempfile def tmpnam_s(): """Implementation of POSIX tmpnam() in scalar context""" ntf = tempfile.NamedTemporaryFile(delete=False) result = ntf.name ntf.close() return result
a8c193a0e1ed6cd386dda9e0c084805cbed5f189
6,039
def timezone_lookup(): """Force a timezone lookup right now""" TZPP = NSBundle.bundleWithPath_("/System/Library/PreferencePanes/" "DateAndTime.prefPane/Contents/" "Resources/TimeZone.prefPane") TimeZonePref = TZPP.classNamed_('TimeZonePref') ATZAdminPrefererences = TZPP.classNamed_('ATZAdminPrefererences') atzap = ATZAdminPrefererences.defaultPreferences() pref = TimeZonePref.alloc().init() atzap.addObserver_forKeyPath_options_context_(pref, "enabled", 0, 0) result = pref._startAutoTimeZoneDaemon_(0x1) # If this is not set to 1 then AutoTimezone still isn't enabled. # This additional preference check makes this script work with 10.12 if pref.isTimeZoneAutomatic() is not 1: return False return True
a78a7f32f02e4f6d33b91bb68c1330d531b0208e
6,040
def rollingCPM(dynNetSN:DynGraphSN,k=3): """ This method is based on Palla et al[1]. It first computes overlapping snapshot_communities in each snapshot based on the clique percolation algorithm, and then match snapshot_communities in successive steps using a method based on the union graph. [1] Palla, G., Barabási, A. L., & Vicsek, T. (2007). Quantifying social group evolution. Nature, 446(7136), 664. :param dynNetSN: a dynamic network (DynGraphSN) :param k: the size of cliques used as snapshot_communities building blocks :return: DynCommunitiesSN """ DynCom = DynCommunitiesSN() old_communities = None old_graph = nx.Graph() graphs=dynNetSN.snapshots() for (date, graph) in graphs.items(): communitiesAtT = list(_get_percolated_cliques(graph, k)) #get the percolated cliques (snapshot_affiliations) as a list of set of nodes for c in communitiesAtT: DynCom.add_community(date, c) if old_communities == None: #if first snapshot old_graph = graph dateOld=date old_communities = communitiesAtT else: if len(communitiesAtT)>0: #if there is at least one community union_graph = nx.compose(old_graph, graph) #create the union graph of the current and the previous communities_union = list(_get_percolated_cliques(union_graph, k)) #get the snapshot_affiliations of the union graph jaccardBeforeAndUnion = _included(old_communities, communities_union) #we only care if the value is above 0 jaccardUnionAndAfter = _included(communitiesAtT,communities_union) #we only care if the value is above 0 for c in jaccardBeforeAndUnion: #for each community in the union graph matched = [] born = [] killed = [] allJaccards = set() for oldC in jaccardBeforeAndUnion[c]: for newC in jaccardUnionAndAfter[c]: allJaccards.add(((oldC,newC),_singleJaccard(oldC,newC))) #compute jaccard between candidates before and after allJaccards = sorted(allJaccards, key=itemgetter(1), reverse=True) sortedMatches = [k[0] for k in allJaccards] oldCToMatch = dict(jaccardBeforeAndUnion[c]) #get all coms before newCToMatch = dict(jaccardUnionAndAfter[c]) #get all new coms while len(sortedMatches)>0: #as long as there are couples of unmatched snapshot_affiliations matchedKeys = sortedMatches[0] #pair of snapshot_affiliations of highest jaccard matched.append(matchedKeys) #this pair will be matched del oldCToMatch[matchedKeys[0]] #delete chosen com from possible to match del newCToMatch[matchedKeys[1]] sortedMatches = [k for k in sortedMatches if len(set(matchedKeys) & set(k))==0] #keep only pairs of unmatched snapshot_affiliations if len(oldCToMatch)>0: killed.append(list(oldCToMatch.keys())[0]) if len(newCToMatch)>0: born.append(list(newCToMatch.keys())[0]) for aMatch in matched: DynCom.events.add_event((dateOld, DynCom._com_ID(dateOld, aMatch[0])), (date, DynCom._com_ID(date, aMatch[1])), dateOld, date, "continue") for kil in killed:#these are actual merge (unmatched snapshot_affiliations are "merged" to new ones) for com in jaccardUnionAndAfter[c]: DynCom.events.add_event((dateOld, DynCom._com_ID(dateOld, kil)), (date, DynCom._com_ID(date, com)), dateOld, date, "merged") for b in born:#these are actual merge (unmatched snapshot_affiliations are "merged" to new ones) for com in jaccardBeforeAndUnion[c]: DynCom.events.add_event((dateOld, DynCom._com_ID(dateOld, com)), (date, DynCom._com_ID(date, b)), dateOld, date, "split") old_graph = graph dateOld=date old_communities = communitiesAtT print(DynCom.snapshots) print(DynCom.events.nodes) DynCom._relabel_coms_from_continue_events() return(DynCom)
b4050544cd8a98346f436c75e5c3eeeb9a64c030
6,041
def penalty_eqn(s_m, Dt): """ Description: Simple function for calculating the penalty for late submission of a project. Args: :in (1): maximum possible score :in (2): difference between the date of deadline and the date of assignment of the project (in hours) :out (1): rounded result of the calculation """ # difference between the date of deadline and the date of assignment delta_p = s_m/10 # main equation of penalty for late submission p_s = abs((Dt/24)*np.exp(0.5)) + delta_p return round(s_m - p_s)
694a2b77c1612d7036c46768ee834043a1af3902
6,042
import re def stop(name=None, id=None): """ Stop (terminate) the VM identified by the given id or name. When both a name and id are provided, the id is ignored. name: Name of the defined VM. id: VM id. CLI Example: .. code-block:: bash salt '*' vmctl.stop name=alpine """ ret = {} cmd = ["vmctl", "stop"] if not (name or id): raise SaltInvocationError('Must provide either "name" or "id"') elif name: cmd.append(name) else: cmd.append(id) result = __salt__["cmd.run_all"](cmd, output_loglevel="trace", python_shell=False) if result["retcode"] == 0: if re.match("^vmctl: sent request to terminate vm.*", result["stderr"]): ret["changes"] = True else: ret["changes"] = False else: raise CommandExecutionError( "Problem encountered running vmctl", info={"errors": [result["stderr"]], "changes": ret}, ) return ret
3dbb2771f7407f3a28a9249551268b9ba23d906e
6,043