content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def filter(p): """ 把索引list转换为单词list """ result = [] for idx in p: if idx == stop_tag: break if idx == padding_tag: continue result.append(index_word[idx]) return result
ab79343d3d924bf1b69813d6a32b967bf45f39bd
3,656,394
def transformer_decoder_layer(dec_input, enc_output, slf_attn_bias, dec_enc_attn_bias, n_head, d_key, d_value, d_model, d_inner_hid, prepostprocess_dropout, attention_dropout, relu_dropout, hidden_act, preprocess_cmd, postprocess_cmd, cache=None, gather_idx=None, param_initializer=None, name=''): """ The layer to be stacked in decoder part. :param dec_input: (batch_size, tgt_len, emb_dim) :param enc_output: (batch_size, n_tokens, emb_dim) :param slf_attn_bias: (batch_size, n_head, tgt_len, tgt_len) :param dec_enc_attn_bias: (batch_size, n_head, tgt_len, n_tokens) """ # (batch_size, tgt_len, emb_dim) slf_attn_output = multi_head_attention( queries=pre_process_layer(out=dec_input, # add layer normalization process_cmd=preprocess_cmd, dropout_rate=prepostprocess_dropout, name=name + '_pre_slf_attn'), keys=None, values=None, attn_bias=slf_attn_bias, # (batch_size, n_head, tgt_len, tgt_len) d_key=d_key, d_value=d_value, d_model=d_model, n_head=n_head, dropout_rate=attention_dropout, cache=cache, gather_idx=gather_idx, param_initializer=param_initializer, name=name + '_slf_attn') # add dropout and residual connection # (batch_size, tgt_len, emb_dim) slf_attn_output = post_process_layer( prev_out=dec_input, out=slf_attn_output, process_cmd=postprocess_cmd, dropout_rate=prepostprocess_dropout, name=name + '_post_slf_attn') # (batch_size, tgt_len, emb_dim) context_attn_output = multi_head_attention( queries=pre_process_layer(out=slf_attn_output, # add layer normalization process_cmd=preprocess_cmd, dropout_rate=prepostprocess_dropout, name=name + '_pre_context_attn'), keys=enc_output, # (batch_size, n_tokens, emb_dim) values=enc_output, # (batch_size, n_tokens, emb_dim) attn_bias=dec_enc_attn_bias, # (batch_size, n_head, tgt_len, n_tokens) d_key=d_key, d_value=d_value, d_model=d_model, n_head=n_head, dropout_rate=attention_dropout, cache=cache, gather_idx=gather_idx, static_kv=True, param_initializer=param_initializer, name=name + '_context_attn') # add dropout and residual connection context_attn_output = post_process_layer( prev_out=slf_attn_output, out=context_attn_output, process_cmd=postprocess_cmd, dropout_rate=prepostprocess_dropout, name=name + '_post_context_attn') ffd_output = positionwise_feed_forward( x=pre_process_layer(out=context_attn_output, # add layer normalization process_cmd=preprocess_cmd, dropout_rate=prepostprocess_dropout, name=name + '_pre_ffn'), d_inner_hid=d_inner_hid, d_hid=d_model, dropout_rate=relu_dropout, hidden_act=hidden_act, param_initializer=param_initializer, name=name + '_ffn') # add dropout and residual connection dec_output = post_process_layer( prev_out=context_attn_output, out=ffd_output, process_cmd=postprocess_cmd, dropout_rate=prepostprocess_dropout, name=name + '_post_ffn') return dec_output
57367b4aa27da48a1cff5ca24bfcecb36a10c39b
3,656,396
def replace_word_choice(sentence: str, old_word: str, new_word: str) -> str: """Replace a word in the string with another word. :param sentence: str - a sentence to replace words in. :param old_word: str - word to replace :param new_word: str - replacement word :return: str - input sentence with new words in place of old words """ return sentence.replace(old_word, new_word)
27d0eae1aa12538c570fec3aa433d59c40556592
3,656,397
def append_slash(url): """Make sure we append a slash at the end of the URL otherwise we have issues with urljoin Example: >>> urlparse.urljoin('http://www.example.com/api/v3', 'user/1/') 'http://www.example.com/api/user/1/' """ if url and not url.endswith('/'): url = '{0}/'.format(url) return url
3d8f009f0f7a2b93e2c9ed3fee593bbcf0f25c4f
3,656,398
def find_cards(thresh_image): """Finds all card-sized contours in a thresholded camera image. Returns the number of cards, and a list of card contours sorted from largest to smallest.""" # Find contours and sort their indices by contour size dummy, cnts, hier = cv2.findContours(thresh_image, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) index_sort = sorted(range(len(cnts)), key=lambda i: cv2.contourArea(cnts[i]), reverse=True) # If there are no contours, do nothing if len(cnts) == 0: return [], [] # Otherwise, initialize empty sorted contour and hierarchy lists cnts_sort = [] hier_sort = [] cnt_is_card = np.zeros(len(cnts), dtype=int) # Fill empty lists with sorted contour and sorted hierarchy. Now, # the indices of the contour list still correspond with those of # the hierarchy list. The hierarchy array can be used to check if # the contours have parents or not. for i in index_sort: cnts_sort.append(cnts[i]) hier_sort.append(hier[0][i]) # Determine which of the contours are cards by applying the # following criteria: 1) Smaller area than the maximum card size, # 2), bigger area than the minimum card size, 3) have no parents, # and 4) have four corners for i in range(len(cnts_sort)): size = cv2.contourArea(cnts_sort[i]) peri = cv2.arcLength(cnts_sort[i], True) approx = cv2.approxPolyDP(cnts_sort[i], 0.01*peri, True) if ((size < CARD_MAX_AREA) and (size > CARD_MIN_AREA) and (hier_sort[i][3] == -1) and (len(approx) == 4)): cnt_is_card[i] = 1 return cnts_sort, cnt_is_card
2bf8a0a0ea64de51b34c5826538d591865533353
3,656,399
def _count_partial_errors(client: GoogleAdsClient, conversion_upload_response) -> int: """Counts the partial errors in the GAds response. Args: client: A GoogleAdsClient instance conversion_upload_response: Google Upload Conversion service response. Returns: An integer representing the total number of partial errors in the response failure error. A list containing the code, message and number of times that each unique error code was returned by the API for one of the conversions uploaded. """ error_count = 0 error_stats = {} error_array = [] if _is_partial_failure_error_present(conversion_upload_response): partial_failure = getattr(conversion_upload_response, 'partial_failure_error', None) error_details = getattr(partial_failure, 'details', []) for error_detail in error_details: failure_message = client.get_type('GoogleAdsFailure') google_ads_failure = type(failure_message) failure_object_des = google_ads_failure.deserialize(error_detail.value) error_count += len(failure_object_des.errors) for error in failure_object_des.errors: str_code = str(error.error_code).strip() if str_code in error_stats: error_stats[str_code]['count'] += 1 else: error_stats[str_code] = {} error_stats[str_code]['count'] = 1 error_stats[str_code]['message'] = str(error.message).strip() print('A partial failure at index ' f'{error.location.field_path_elements[0].index} occurred ' f'\nError message: {error.message}\nError code: ' f'{error.error_code}') for code_key in error_stats: error_array.append({ 'code': code_key, 'message': error_stats[code_key]['message'], 'count': error_stats[code_key]['count'] }) return error_count, error_array
aaa0ab8c3668765539a05374afb06bc3e661af23
3,656,400
def cumulative_similarity(atoms, representations, threshold=0.98): """ """ u_representations = [representations[0]] s_idxs = [0] for i, representation in enumerate(representations[1:]): i += 1 similar = merge_asymmetric_similarity(atoms, [representation], u_representations, threshold=threshold) # We are only looking at one representation similar = similar[0] if len(similar) > 0: continue u_representations += [representation] s_idxs += [i] return np.asarray(s_idxs)
6f2c065233a6b7b7931bfdfcfe08a032287d6ffc
3,656,401
def get_project_by_id(project_id): """ Retrieve a project by its Id. Returns None if no project is found. """ try: return Project.objects.get(pk=project_id) except Project.DoesNotExist: return None
e7ae842d7b9daa5bde08a00f6dd9ac84246d4e13
3,656,402
def one_c(rand_gen): """ KS Test :param rand_gen: :return: """ # Now need to do the ks test # This calculates the value for KS at given points def ks_test(z): if z == 0: return 1 elif z < 1.18: # Numerically optimal cutoff block = ((np.exp((-1. * np.pi ** 2) / (8 * z ** 2)))) p = (np.sqrt(2 * np.pi) / z) * \ (block + block ** 9 + block ** 25) else: block = np.exp(-2 * z ** 2) p = 1 - 2 * (block - block ** 4 + block ** 9) return 1 - p def ks_test_part(points, values, bins): summed_bins = sum(values) distribution = [] for i in range(len(values)): distribution.append(abs(sum(values[:i]) / summed_bins - norm.cdf(bins[i]))) distribution = np.asarray(distribution) D = max(distribution) z = D * (np.sqrt(len(points)) + 0.12 + 0.11 / np.sqrt(len(points))) return D, ks_test(z) sigma = 1 u = 0 num_samples = np.logspace(np.log10(10), np.log10(10 ** 5), num=50) reference_ks = np.zeros(50) reference_p_value = np.zeros(50) ks = np.zeros(50) p_value = np.zeros(50) for index, sample in enumerate(num_samples): sample = int(sample) gauss = box_muller(rand_gen, sample) gauss = map_to_guass(gauss, u=u, sigma=sigma) ks[index], p_value[index] = common_test(gauss, ks_test_part) reference_ks[index], reference_p_value[index] = kstest(gauss, "norm") plt.plot(num_samples, ks, c='b', label='My KS Test') plt.plot(num_samples, reference_ks, c='r', label='Scipy KS Test') plt.xscale('log') plt.yscale('log') plt.xlabel("Number of Points") plt.ylabel("KS Statistic (D)") plt.legend(loc='best') plt.savefig("plots/KStest.png", dpi=300) plt.cla() plt.plot(num_samples, p_value, c='b', label='My KS Test Probability') plt.plot(num_samples, reference_p_value, c='r', label='Scipy KS Test Probability') plt.xscale('log') plt.yscale('log') plt.xlabel("Number of Points") plt.ylabel("Probability") plt.legend(loc='best') plt.savefig("plots/KStest_pvalue.png", dpi=300) plt.cla()
9a2b420f3620bc198bc880c01d90cc743ac5c2ec
3,656,404
from typing import List def divide_into_sentences( text: str, num_of_senteces: int, is_reversed: bool = False, offset: int = 0 ) -> str: """ This function divides the text into sentences and returns either the first X sentences or the last X sentences. """ tokens_sent = nltk.sent_tokenize(text) # fix uncorrect dialog sentences tokens_sent = fix_direct_speech_sentences(tokens_sent) output_text: List[str] = [] if not is_reversed: for i, sentence in enumerate(tokens_sent): if i < offset: continue if i < num_of_senteces + offset: output_text.append(sentence) else: break else: for i, sentence in enumerate(reversed(tokens_sent)): if i < offset: continue if i < num_of_senteces + offset: output_text.append(sentence) else: break output_text.reverse() return " ".join(output_text)
e57e772942953c890b12e91c888688295dcf89ae
3,656,405
def intersection(bbox1: BoundingBox, bbox2: BoundingBox) -> BoundingBox: """ Calculate the intersection of two bounding boxes. """ assert bbox1.x_min <= bbox1.x_max assert bbox1.y_min <= bbox1.y_max assert bbox2.x_min <= bbox2.x_max assert bbox2.y_min <= bbox2.y_max # determine the coordinates of the intersection rectangle x_left = max(bbox1.x_min, bbox2.x_min) y_top = max(bbox1.y_min, bbox2.y_min) x_right = min(bbox1.x_max, bbox2.x_max) y_bottom = min(bbox1.y_max, bbox2.y_max) if x_right < x_left or y_bottom < y_top: return EMPTY_BBOX return BoundingBox(x_left, x_right, y_top, y_bottom)
71ce5b562f5f6fbfe6dba51db43240a98b0d7d49
3,656,406
def test_if_tech_defined(enduse_fueltypes_techs): """Test if a technology has been configured, i.e. a fuel share has been assgined to one of the fueltpyes in `fuel_shares`. Arguments --------- enduse_fueltypes_techs : dict Configured technologies and fuel shares of an enduse Returns ------- c_tech_defined : bool Criteria whether technologies have been configured for an enduse or not """ c_tech_defined = False for fueltype in enduse_fueltypes_techs: if enduse_fueltypes_techs[fueltype] == {}: pass else: c_tech_defined = True break return c_tech_defined
a727b375dc1bc7e76fe63090d8e278013fa2c6bb
3,656,408
from mindboggle.guts.segment import segment_regions def segment_rings(region, seeds, neighbor_lists, step=1, background_value=-1, verbose=False): """ Iteratively segment a region of surface mesh as concentric segments. Parameters ---------- region : list of integers indices of region vertices to segment (such as a fold) seeds : list of integers indices of seed vertices neighbor_lists : list of lists of integers indices to neighboring vertices for each vertex step : integer number of segmentation steps before assessing segments background_value : integer background value verbose : bool print statements? Returns ------- segments : list of lists of integers indices to vertices for each concentric segment Examples -------- >>> import numpy as np >>> from mindboggle.mio.vtks import read_scalars >>> from mindboggle.guts.mesh import find_neighbors_from_file >>> from mindboggle.guts.segment import extract_borders >>> from mindboggle.guts.segment import segment_rings >>> from mindboggle.mio.fetch_data import prep_tests >>> urls, fetch_data = prep_tests() >>> vtk_file = fetch_data(urls['left_travel_depth'], '', '.vtk') >>> folds_file = fetch_data(urls['left_folds'], '', '.vtk') >>> values, name = read_scalars(vtk_file, True, True) >>> neighbor_lists = find_neighbors_from_file(vtk_file) >>> background_value = -1 >>> fold, name = read_scalars(folds_file) >>> indices = [i for i,x in enumerate(fold) if x != background_value] >>> # Initialize seeds with the boundary of thresholded indices: >>> use_threshold = True >>> if use_threshold: ... # Threshold at the median depth or within maximum values in boundary: ... threshold = np.median(values[indices]) #+ np.std(values[indices]) ... indices_high = [x for x in indices if values[x] >= threshold] ... # Make sure threshold is within the maximum values of the boundary: ... B = np.ones(len(values)) ... B[indices] = 2 ... borders, foo1, foo2 = extract_borders(list(range(len(B))), B, neighbor_lists) ... borders = [x for x in borders if values[x] != background_value] ... if list(frozenset(indices_high).intersection(borders)): ... threshold = np.max(values[borders]) + np.std(values[borders]) ... indices_high = [x for x in indices if values[x] >= threshold] ... # Extract threshold boundary vertices as seeds: ... B = background_value * np.ones(len(values)) ... B[indices_high] = 2 ... seeds, foo1, foo2 = extract_borders(list(range(len(values))), B, neighbor_lists) ... # Or initialize P with the maximum value point: ... else: ... seeds = [indices[np.argmax(values[indices])]] ... indices_high = [] >>> indices = list(frozenset(indices).difference(indices_high)) >>> indices = list(frozenset(indices).difference(seeds)) >>> step = 1 >>> verbose = False >>> segments = segment_rings(indices, seeds, neighbor_lists, step, ... background_value, verbose) >>> len(segments) 56 >>> [len(x) for x in segments][0:10] [5540, 5849, 6138, 5997, 4883, 3021, 1809, 1165, 842, 661] >>> segments[0][0:10] [65539, 65540, 98308, 98316, 131112, 131121, 131122, 131171, 131175, 131185] Write results to vtk file and view (skip test): >>> from mindboggle.mio.plots import plot_surfaces # doctest: +SKIP >>> from mindboggle.mio.vtks import read_scalars, rewrite_scalars # doctest: +SKIP >>> S = background_value * np.ones(len(values)) # doctest: +SKIP >>> for i, segment in enumerate(segments): S[segment] = i # doctest: +SKIP >>> rewrite_scalars(vtk_file, 'segment_rings.vtk', S, 'segment_rings', ... [], -1) # doctest: +SKIP >>> plot_surfaces('segment_rings.vtk') # doctest: +SKIP """ segments = [] while seeds: # Segment step-wise starting from seeds and through the region: seeds_plus_new = segment_regions(region, neighbor_lists, 1, [seeds], False, False, [], [], [], step, background_value, verbose) seeds_plus_new = [i for i,x in enumerate(seeds_plus_new) if x != background_value] # Store the new segment after removing the previous segment: region = list(frozenset(region).difference(seeds)) seeds = list(frozenset(seeds_plus_new).difference(seeds)) if seeds: # Add the new segment and remove it from the region: segments.append(seeds) region = list(frozenset(region).difference(seeds)) return segments
3b2c5c1a68ecef7f036a332b966a3aa8610157af
3,656,409
def classification_result(y, y_pred): """ :param y: :param y_pred: :return: """ assert len(y) == len(y_pred) correct = [] wrong = [] for i in range(len(y)): if y[i] == y_pred[i]: correct.append(i) else: wrong.append(i) return correct, wrong
bdab32eeded40691a721fe8e1463819605c5639c
3,656,410
def flatgrad(loss, var_list, clip_norm=None): """Calculate the gradient and flatten it. Parameters ---------- loss : float the loss value var_list : list of tf.Tensor the variables clip_norm : float clip the gradients (disabled if None) Returns ------- list of tf.Tensor flattened gradient """ grads = tf.gradients(loss, var_list) if clip_norm is not None: grads = [tf.clip_by_norm(grad, clip_norm=clip_norm) for grad in grads] return tf.concat(axis=0, values=[ tf.reshape(grad if grad is not None else tf.zeros_like(v), [numel(v)]) for (v, grad) in zip(var_list, grads) ])
cd359f78c882bbd57876e4011818422799727ce7
3,656,411
async def get_image_from_message( ctx, url=None, *, return_type="image_RGBA", search_last_messages=True, accept_emojis=True, accept_templates=True, ): """Get an image from a discord Context or check on images among the 100 last messages sent in the channel. Return bytes or PIL.Image image and the image url""" assert return_type and return_type in ["image_RGBA", "image", "bytes"] message_limit = 100 initial_message = None if isinstance(ctx, commands.Context): initial_message = ctx.message try: # try to get the image from the initial message return await get_image( initial_message, url, return_type, accept_emojis, accept_templates, accept_embeds=False, ) except ImageNotFoundError as e: # if the message is a reply, we try to find an image in the replied message ref = initial_message.reference if initial_message else None if ref and isinstance(ref.resolved, disnake.Message): reply_message = ref.resolved try: return await get_image( reply_message, url=None, return_type=return_type, accept_emojis=False, accept_templates=False, accept_embeds=True, ) except Exception: pass # if no image was found in the message we check for images in the last # 100 messages sent in the channel if search_last_messages: async for message in ctx.channel.history(limit=message_limit): if message != initial_message: try: return await get_image( message, url=None, return_type=return_type, accept_emojis=False, accept_templates=False, accept_embeds=True, ) except Exception: pass # no image was found in the last 100 images raise ValueError(e) except ValueError as e: # if an image was found but an error occurred, we raise it raise ValueError(e)
fd135153dd6db0fb7e6e1990560da0aa69af6ac7
3,656,412
import tqdm def test_write(size, iterations, exclude_formats, test_compress): """ Test writting for one file Args: size: size of the file to test (0: small, 1: mediumn, 2: big) iterations: number of times to run the test exclude_formats: formats to exclude in this test test_compress: if True it will try all compressions Returns: dictionary with out """ out = {} df = pd.read_csv(f"{PATH_DATA}{FILES[size]}.csv") for extension, func in tqdm(FUNCS["write"].items(), desc=f"{'write':10}", leave=True): # Skip this extension if extension in exclude_formats: continue if not test_compress or extension not in COMPRESSIONS: args = [df, f"{PATH_DATA}data.{extension}"] out[extension] = iterate_one_test(iterations, extension, func, args, {}) # Try all compressions else: if extension not in COMPRESSIONS: continue # Get name of compression parameter and list of extensions comp_list = COMPRESSIONS[extension]["list"] comp_param_name = COMPRESSIONS[extension]["param_name"] for comp in tqdm(comp_list, desc=f"{extension:10}", leave=True): name = f"{extension}_{str(comp)}" out[name] = iterate_one_test( iterations, extension=name, func=func, args=[df, f"{PATH_DATA}data.{extension}_{comp}"], kwargs={comp_param_name: comp}, ) return out
847de26005a291d9505a6c66221eec19e7924e54
3,656,413
from typing import Optional def get_game_server_group(game_server_group_arn: Optional[str] = None, opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetGameServerGroupResult: """ The AWS::GameLift::GameServerGroup resource creates an Amazon GameLift (GameLift) GameServerGroup. :param str game_server_group_arn: A generated unique ID for the game server group. """ __args__ = dict() __args__['gameServerGroupArn'] = game_server_group_arn if opts is None: opts = pulumi.InvokeOptions() if opts.version is None: opts.version = _utilities.get_version() __ret__ = pulumi.runtime.invoke('aws-native:gamelift:getGameServerGroup', __args__, opts=opts, typ=GetGameServerGroupResult).value return AwaitableGetGameServerGroupResult( auto_scaling_group_arn=__ret__.auto_scaling_group_arn, auto_scaling_policy=__ret__.auto_scaling_policy, balancing_strategy=__ret__.balancing_strategy, game_server_group_arn=__ret__.game_server_group_arn, game_server_group_name=__ret__.game_server_group_name, game_server_protection_policy=__ret__.game_server_protection_policy, instance_definitions=__ret__.instance_definitions, launch_template=__ret__.launch_template, max_size=__ret__.max_size, min_size=__ret__.min_size, role_arn=__ret__.role_arn, tags=__ret__.tags, vpc_subnets=__ret__.vpc_subnets)
ad5d517b5c05f0b2bbe300f6fd430d9780ebbc34
3,656,414
def create_wcscorr(descrip=False, numrows=1, padding=0): """ Return the basic definitions for a WCSCORR table. The dtype definitions for the string columns are set to the maximum allowed so that all new elements will have the same max size which will be automatically truncated to this limit upon updating (if needed). The table is initialized with rows corresponding to the OPUS solution for all the 'SCI' extensions. """ trows = numrows + padding c1 = pyfits.Column(name='WCS_ID',format='24A',array=np.array(['OPUS']*numrows+['']*padding,dtype="S24")) c2 = pyfits.Column(name='EXTVER',format='I',array=np.array(list(range(1,numrows+1)),dtype=np.int16)) c3 = pyfits.Column(name='CRVAL1',format='D',array=np.array([0.0]*trows,dtype=np.float64)) c4 = pyfits.Column(name='CRVAL2',format='D',array=np.array([0.0]*trows,dtype=np.float64)) c5 = pyfits.Column(name='CD1_1',format='D',array=np.array([0.0]*trows,dtype=np.float64)) c6 = pyfits.Column(name='CD1_2',format='D',array=np.array([0.0]*trows,dtype=np.float64)) c7 = pyfits.Column(name='CD2_1',format='D',array=np.array([0.0]*trows,dtype=np.float64)) c8 = pyfits.Column(name='CD2_2',format='D',array=np.array([0.0]*trows,dtype=np.float64)) c9 = pyfits.Column(name='ORIENTAT',format='D',array=np.array([0.0]*trows,dtype=np.float64)) c10 = pyfits.Column(name='PA_V3',format='D',array=np.array([0.0]*trows,dtype=np.float64)) c11 = pyfits.Column(name='Delta_RA',format='D',array=np.array([0.0]*trows,dtype=np.float64)) c12 = pyfits.Column(name='Delta_Dec',format='D',array=np.array([0.0]*trows,dtype=np.float64)) c13 = pyfits.Column(name='RMS_RA',format='D',array=np.array([0.0]*trows,dtype=np.float64)) c14 = pyfits.Column(name='RMS_Dec',format='D',array=np.array([0.0]*trows,dtype=np.float64)) c15 = pyfits.Column(name='Delta_Orientat',format='D',array=np.array([0.0]*trows,dtype=np.float64)) c16 = pyfits.Column(name='Delta_Scale',format='D',array=np.array([1.0]*trows,dtype=np.float64)) c17 = pyfits.Column(name='NMatch',format='J',array=np.array([0]*trows,dtype=np.int32)) c18 = pyfits.Column(name='Catalog',format='40A',array=np.array([''],dtype="S40")) if descrip: c19 = pyfits.Column(name='Descrip',format='128A',array=np.array(['Original WCS computed by OPUS']*numrows,dtype="S128")) cdefs = pyfits.ColDefs([c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19]) else: cdefs = pyfits.ColDefs([c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18]) return pyfits.new_table(cdefs,nrows=trows)
3764f412bdbae771fe75fec7b8623906fddf01b1
3,656,415
import requests def get_token(): """ Acquire an OAuth token for Koha returns: OAuth token (string) """ data = { "client_id": config['client_id'], "client_secret": config['client_secret'], "grant_type": "client_credentials", } response = requests.post(config['api_root'] + '/oauth/token', data=data, verify=False) token = str(response.json()['access_token']) return token
02b362a27d8c9101ca24f6a853513f99d167f4c9
3,656,416
def isfile(value): """Validate that the value is an existing file.""" return vol.IsFile('not a file')(value)
2513516dbe0bdb765cbff78780f6386c5809a8d7
3,656,418
def is_in_form(dg: "streamlit.delta_generator.DeltaGenerator") -> bool: """True if the DeltaGenerator is inside an st.form block.""" return current_form_id(dg) != ""
e0f60c4b320d325db2cc27ece9395cae20b92fda
3,656,419
def install_from_deb(deb_path,additional_options): """ Installs package with dpkg command using -i options and some extra options, if needed Raises an exception on non-zero exit code Input: apt file path, additional optons Output: Combined stdout and stderror """ return run_shell_command("dpkg -i "+additional_options+" "+deb_path)
8d452b96e1a3cb8c6b134747fe351158625fed27
3,656,421
import pytz from datetime import datetime import json def sign_award(award: Award) -> FlexSendMessage: """Sign Award Result Args: award (Award): Award Object Returns: FlexSendMessage: Flex Message """ tz = pytz.timezone("Asia/Taipei") now = datetime.now(tz=tz) now_text = now.strftime("%Y/%m/%d %H:%M:%S") with open("line/flex_message_template/sign_award.json") as json_file: contents = json.load(json_file) contents["hero"]["url"] = award.icon contents["body"]["contents"][1]["contents"][1][ "text" ] = f"{award.name} * {award.count}" contents["body"]["contents"][3]["contents"][1]["text"] = now_text message = FlexSendMessage(alt_text=f"簽到成功!", contents=contents) return message
e44f42d8563d641ef9136f4121841c430e95288b
3,656,422
import math import time def create_l5_block(block_id: str) -> l5_block_model.L5BlockModel: """ Creates unfinalized L5 block that needs confirmation """ l5_block = l5_block_model.L5BlockModel( dc_id=keys.get_public_id(), current_ddss=party.get_address_ddss(ADDRESS), # Get DDSS from party, cached hourly block_id=str(block_id), timestamp=str(math.floor(time.time())), prev_proof="", scheme=PROOF_SCHEME, l4_blocks=get_pending_l4_blocks(block_id), ) return l5_block
c4508e4aff53315a0fa84924f8a3fc66d99e0c8f
3,656,423
from typing import List from typing import Tuple def gridgen(xbry: List, ybry: List, beta: List, shape: Tuple, ul_idx=0, focus=None, proj=None, nnodes=14, precision=1.0e-12, nppe=3, newton=True, thin=True, checksimplepoly=True, verbose=False): """ External wrapping function to call Gridgen grid builder. xbry, ybry - nodes coordinates of grid boundary beta - vertex type shape - tuple of grid shape (eta, xi) """ # Prepare the Gridgen object. gn = Gridgen(xbry, ybry, beta, shape, ul_idx=ul_idx, focus=focus, proj=None, nnodes=nnodes, precision=precision, nppe=nppe, newton=newton, thin=thin, checksimplepoly=checksimplepoly, verbose=verbose) # Generate the C-Grid. if proj is not None: lon_vert, lat_vert = proj(gn.x, gn.y, inverse=True) grd = CGridGeo(lon_vert, lat_vert, proj) else: grd = CGrid(gn.x, gn.y) # Attach the Gridgen object to grid. grd.Gridgen = gn print('Grid construction complete.') return grd
e1e3eea43aff3301f317b1103e820bfb79169fbd
3,656,424
def get_output(): """Gets the current global output stream""" global OUTPUT return OUTPUT
63480fb1dc071f3f3df878204fd2af6994cc9ea0
3,656,425
import scipy def load_pascal_annotation(index, pascal_root): """ This code is borrowed from Ross Girshick's FAST-RCNN code (https://github.com/rbgirshick/fast-rcnn). It parses the PASCAL .xml metadata files. See publication for further details: (http://arxiv.org/abs/1504.08083). Thanks Ross! """ classes = ('__background__', # always index 0 'aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor') class_to_ind = dict(zip(classes, xrange(21))) filename = osp.join(pascal_root, 'Annotations', index + '.xml') # print 'Loading: {}'.format(filename) def get_data_from_tag(node, tag): return node.getElementsByTagName(tag)[0].childNodes[0].data with open(filename) as f: data = minidom.parseString(f.read()) objs = data.getElementsByTagName('object') num_objs = len(objs) boxes = np.zeros((num_objs, 4), dtype=np.uint16) gt_classes = np.zeros((num_objs), dtype=np.int32) overlaps = np.zeros((num_objs, 21), dtype=np.float32) # Load object bounding boxes into a data frame. for ix, obj in enumerate(objs): # Make pixel indexes 0-based x1 = float(get_data_from_tag(obj, 'xmin')) - 1 y1 = float(get_data_from_tag(obj, 'ymin')) - 1 x2 = float(get_data_from_tag(obj, 'xmax')) - 1 y2 = float(get_data_from_tag(obj, 'ymax')) - 1 cls = class_to_ind[ str(get_data_from_tag(obj, "name")).lower().strip()] boxes[ix, :] = [x1, y1, x2, y2] gt_classes[ix] = cls overlaps[ix, cls] = 1.0 overlaps = scipy.sparse.csr_matrix(overlaps) return {'boxes': boxes, 'gt_classes': gt_classes, 'gt_overlaps': overlaps, 'flipped': False, 'index': index}
62495a50995f9fb0cec30d63b627f1b66022561b
3,656,426
def run_pca( X_train, y_train, mean_widget, std_widget, x_widget, labels_map=labels_map, labels_inv_map=labels_inv_map, ): """Runs PCA on the passed data based on the defined parameters and returns a pandas Dataframe. Consider the PCA is always fitted on the whole dataset X_train and the returned Dataframe isdependable on the values from the x_widget object. Parameters ========== X_train : numpy.ndarray Data matrix to run PCA on it y_train : numpy.ndarray Ground truth vector with integer class labels mean_widget : ipywidgets.widgets.widget_bool.Checkbox Widgets that indicates to center the data before scaling std_widget : ipywidgets.widgets.widget_bool.Checkbox Widget that indicates to scale the data to unit variance x_widget : ipywidgets.widgets.widget_selection.SelectMultiple Widget that defines, which data observation is returned, based on the containing labels in the widget object labels_map : dict Dictionary that maps from plant species representation to integer class represention. labels_inv_map : dict Dictionary that maps from integer class represention to plant species representation. Returns ======= pc_df : pandas.DataFrame Data matrix with 4 PCA-Components and the regarding label entry as 'Species' in plant species representation . """ ss = StandardScaler(with_mean=mean_widget.value, with_std=std_widget.value) train_data = ss.fit_transform(X_train) pca = decomposition.PCA(n_components=4) _ = pca.fit_transform(train_data) chosen_labels = np.array([labels_map.get(name) for name in x_widget.value]) ix_true = np.argwhere(np.in1d(y_train, chosen_labels)).flatten() pc = pca.transform(X_train[ix_true, ...]) pc_df = pd.DataFrame(data=pc, columns=["PC1", "PC2", "PC3", "PC4"]) pc_df["Species"] = np.array( [labels_inv_map.get(label_nr) for label_nr in y_train[ix_true]] ) return pc_df
de84adbc9a7779c05557941d1c4714e7a3eaf8c7
3,656,427
def validate_url(url): """ Validates the URL :param url: :return: """ if validators.url(url): return url elif validators.domain(url): return "http://{}".format(url) return ""
cd1ea3a834e1e67c4f438a28dcfa08e1dbd041c6
3,656,429
def map_class_to_id(classes): """ Get a 1-indexed id for each class given as an argument Note that for MASATI, len(classes) == 1 when only considering boats Args: classes (list): A list of classes present in the dataset Returns: dict[str, int] """ class_ids = list(range(1, len(classes) + 1)) return dict(zip(classes, class_ids))
7c2b47249f61f446327c0a798c1a129c62fde6b3
3,656,430
def vec2text(vector): """ vector to captcha text :param vector: np array :return: text """ if not isinstance(vector, np.ndarray): vector = np.asarray(vector) vector = np.reshape(vector, [CAPTCHA_LENGTH, -1]) text = '' for item in vector: text += CAPTCHA_LIST[np.argmax(item)] return text
c819407caca85e4ced798b6c4058918708af0095
3,656,431
def get_data_nasdaq_fall(specified_value): """ :param specified_value: the number of datapoints to fetch from the backend :param collection: specify which collection to be fetched :return: list of dictionaries """ data_points = NasdaqAsc.objects.order_by('difference_close') data_points = data_points[:specified_value] return data_points
20d811f7276c410cb8aefb71cfd8ef23bea66977
3,656,432
def login_required(f): """页面要求登录装饰器""" @wraps(f) def decorated_function(*args, **kwargs): if not g.signin: nu = get_redirect_url() if nu and ( nu.startswith("/") or nu.startswith(request.url_root) ): return redirect(url_for('front.login', next=nu)) else: return redirect(url_for('front.login')) return f(*args, **kwargs) return decorated_function
b9e7db40ebb50a71d4d56064fce8ec8e30fb6fbe
3,656,435
def get_agent_type(opt): """ Returns the type of model agent, specified by --model and --model_file. """ model_file = opt['model_file'] optfile = model_file + '.opt' if isfile(optfile): new_opt = _load_opt_file(optfile) if 'batchindex' in new_opt: del new_opt['batchindex'] if opt.get('override'): for k, v in opt['override'].items(): if str(v) != str(new_opt.get(k, None)): print( "[ warning: overriding opt['{}'] to {} (" "previously: {} )]".format( k, v, new_opt.get(k, None))) new_opt[k] = v for k, v in opt.items(): if k not in new_opt: new_opt[k] = v new_opt['model_file'] = model_file if (new_opt.get('dict_file') and not isfile(new_opt['dict_file'])): raise RuntimeError( 'WARNING: Dict file does not exist, check ' 'to make sure it is correct: {}'.format( new_opt['dict_file'])) model_class = get_agent_module(new_opt['model']) return model_class else: return None
59e53f961c29c9cf993bb176d4d993b80848bbcd
3,656,436
from typing import Any import types def is_sparse_or_ragged_tensor_value(tensor: Any) -> bool: """Returns true if sparse or ragged tensor.""" return (isinstance(tensor, types.SparseTensorValue) or isinstance(tensor, types.RaggedTensorValue) or isinstance(tensor, tf.compat.v1.SparseTensorValue))
8c82ffd04dfae89f19f34770b67724ffb9c66fc1
3,656,437
def arcsin(tensor): """Returns the element-wise inverse sine of the tensor""" return TensorBox(tensor).arcsin(wrap_output=False)
c49f520610e0a59c8a29f25ff0f02c81b2226b14
3,656,439
def get_one_pokemon(id: hug.types.number): """Affichage d'un pokemon de la base de donnees""" cursor.execute("""SELECT * FROM pokemon WHERE id=%s """, [id]) row = cursor.fetchone() conn.commit() conn.close() return row
b36b2a5b50c1f0edfc39f3a74341bed583c36e13
3,656,440
import numpy import scipy def shift_fft(input_img, shift_val, method="fft"): """Do shift using FFTs Shift an array like scipy.ndimage.interpolation.shift(input, shift, mode="wrap", order="infinity") but faster :param input_img: 2d numpy array :param shift_val: 2-tuple of float :return: shifted image """ if method == "fft": d0, d1 = input_img.shape v0, v1 = shift_val f0 = numpy.fft.ifftshift(numpy.arange(-d0 // 2, d0 // 2)) f1 = numpy.fft.ifftshift(numpy.arange(-d1 // 2, d1 // 2)) m1, m0 = numpy.meshgrid(f1, f0) e0 = numpy.exp(-2j * numpy.pi * v0 * m0 / float(d0)) e1 = numpy.exp(-2j * numpy.pi * v1 * m1 / float(d1)) e = e0 * e1 out = abs(numpy.fft.ifft2(numpy.fft.fft2(input_img) * e)) else: out = scipy.ndimage.interpolation.shift(input, shift, mode="wrap", order="infinity") return out
2729d187d222ef83635abea5bc29a633abce9e61
3,656,442
def get_output_detections_image_file_path(input_file_path, suffix="--detections"): """Get the appropriate output image path for a given image input. Effectively appends "--detections" to the original image file and places it within the same directory. Parameters ----------- input_file_path: str Path to input image. suffix: str Suffix appended to the file. Default: "--detections" Returns ------- str Full path for detections output image. """ input_file_path = input_file_path.replace('--original.', '.') input_file_paths = input_file_path.split('.') input_file_paths[-2] = input_file_paths[-2]+suffix return '.'.join(input_file_paths)
b8d060dff6800750c418c70c61bd4d8e0b7bb416
3,656,443
from scipy import stats def split_errorSC(tr, t1, t2, q, Emat, maxdt, ddt, dphi): """ Calculate error bars based on a F-test and a given confidence interval q Parameters ---------- tr : :class:`~obspy.core.Trace` Seismogram t1 : :class:`~obspy.core.utcdatetime.UTCDateTime` Start time of picking window t2 : :class:`~obspy.core.utcdatetime.UTCDateTime` End time of picking window q : float Confidence level Emat : :class:`~numpy.ndarray` Energy minimization matrix Returns ------- err_dtt : float Error in dt estimate (sec) err_phi : float Error in phi estimate (degrees) err_contour : :class:`~numpy.ndarray` Error contour for plotting """ # Bounds on search phi = np.arange(-90.0, 90.0, dphi)*np.pi/180. dtt = np.arange(0., maxdt, ddt) # Copy trace to avoid overriding tr_tmp = tr.copy() tr_tmp.trim(t1, t2) # Get degrees of freedom dof = split_dof(tr_tmp) if dof < 3: dof = 3 print( "Degrees of freedom < 3. Fixing to DOF = 3, which may " + "result in accurate errors") n_par = 2 # Error contour vmin = Emat.min() vmax = Emat.max() err_contour = vmin*(1. + n_par/(dof - n_par) * stats.f.ppf(1. - q, n_par, dof - n_par)) # Estimate uncertainty (q confidence interval) err = np.where(Emat < err_contour) if len(err) == 0: return False, False, False err_phi = max( 0.25*(phi[max(err[0])] - phi[min(err[0])])*180./np.pi, 0.25*dphi) err_dtt = max(0.25*(dtt[max(err[1])] - dtt[min(err[1])]), 0.25*ddt) return err_dtt, err_phi, err_contour
41c56204884bafc32effe5f96557b703da589e05
3,656,444
def add(x, y): """Creates an SMTLIB addition statement formatted string Parameters ---------- x, y: float First and second numerical arguments to include in the expression """ return "(+ " + x + " " + y + ")"
5145573a4616cc92be72301eae0a5dfffecf9234
3,656,446
def build_put_cat_request( **kwargs # type: Any ): # type: (...) -> HttpRequest """Put a cat with name 'Boots' where likesMilk and hisses is false, meows is true. See https://aka.ms/azsdk/python/protocol/quickstart for how to incorporate this request builder into your code flow. :keyword json: Pass in a JSON-serializable object (usually a dictionary). See the template in our example to find the input shape. Put a cat with name 'Boots' where likesMilk and hisses is false, meows is true. :paramtype json: any :keyword content: Pass in binary content you want in the body of the request (typically bytes, a byte iterator, or stream input). Put a cat with name 'Boots' where likesMilk and hisses is false, meows is true. :paramtype content: any :return: Returns an :class:`~azure.core.rest.HttpRequest` that you will pass to the client's `send_request` method. See https://aka.ms/azsdk/python/protocol/quickstart for how to incorporate this response into your code flow. :rtype: ~azure.core.rest.HttpRequest Example: .. code-block:: python # JSON input template you can fill out and use as your body input. json = { "hisses": bool, # Optional. "likesMilk": bool, # Optional. "meows": bool, # Optional. "name": "str" # Required. } """ content_type = kwargs.pop('content_type', None) # type: Optional[str] accept = "application/json" # Construct URL url = kwargs.pop("template_url", '/multipleInheritance/cat') # Construct headers header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any] if content_type is not None: header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str') header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str') return HttpRequest( method="PUT", url=url, headers=header_parameters, **kwargs )
5191eac67e6cdeedfa32ca32fad1cdd96e7a2870
3,656,447
from typing import Callable def partial(fn: Callable, *args, **kwargs) -> Callable: """Takes a function and fewer than normal arguments, and returns a function That will consume the remaining arguments and call the function""" def partial_fn(*rem_args, **rem_kwargs): return fn(*args, *rem_args, **kwargs, **rem_kwargs) return partial_fn
80f0df16915593fa0c5212e7560626db78147da6
3,656,448
import re def parse_lipid(name): """ parse_lipid description: parses a lipid name into lipid class and fatty acid composition, returning a dictionary with the information. Handles total fatty acid composition, as well as individual composition, examples: PC(38:3) --> class: PC, n_carbon: 38, n_unsat: 3 PC(18:1/20:2) --> class: PC, n_carbon: 38, n_unsat: 3, fa_comp: ((n_carbon: 18, n_unsat: 1), (n_carbon: 20, n_unsat: 2)) Also, handles special fatty acid notations (modifiers) used for ceramides and plasmalogen lipids, examples: Cer(d36:2) --> class: Cer, n_carbon: 36, n_unsat: 2, fa_mod: d Cer(d18:1/18:1) --> class: PC, n_carbon: 38, n_unsat: 3, fa_mod: d, fa_comp: ((n_carbon: 18, n_unsat: 1), (n_carbon: 18, n_unsat: 1)) PE(p40:4) --> class: PE, n_carbon: 40, n_unsat: 4, fa_mod: p PE(p20:2/20:2) --> class: PE, n_carbon: 40, n_unsat: 4, fa_mod: p, fa_comp: ((n_carbon: 20, n_unsat: 2), (n_carbon: 20, n_unsat: 2)) lipid name must conform to the general format: <lipid_class>([modifier]<n_carbon>:<n_unsat>[/<n_carbon>:<n_unsat>[/<n_carbon>:<n_unsat>]]) parameters: name (str) -- lipid name to parse returns: (dict or None) -- parsed lipid information (always contains 'class', 'n_carbon', and 'n_unsat' attributes) or None if it cannot be parsed as a lipid """ parsed = {} # compile regex pattern l_pat = re.compile( r"^(?P<cls>[A-Za-z123]+)\((?P<mod>[pdoe]*)(?P<fc1>[0-9]+):(?P<fu1>[0-9]+)/*((?P<fc2>[0-9]+):(?P<fu2>[0-9]+))*/*((?P<fc3>[0-9]+):(?P<fu3>[0-9]+))*\)") # parse the name using regex l_res = l_pat.match(name) if l_res: # lipid class (required) if l_res.group('cls'): parsed["lipid_class"] = l_res.group('cls') else: # msg = "parse_lipid: failed to parse lipid class for: {}".format(name) # raise ValueError(msg) return None # value error due to failure to parse fatty acid composition # def raise_fatty_acid_value_error(): # msg = "parse_lipid: failed to parse fatty acid composition for: {}".format(name) # raise ValueError(msg) # fc1 and fu1 are always required if not l_res.group('fc1') or not l_res.group('fu1'): # raise_fatty_acid_value_error() return None # check if a second fatty acid composition is supplied, e.g. (18:1/16:0) # if so, need to compute total fatty acid composition and add individual # fatty acids to a list if l_res.group('fc2'): if not l_res.group('fu2'): # raise_fatty_acid_value_error() return None # add info from the first two fatty acid compositions fc1, fu1 = int(l_res.group('fc1')), int(l_res.group('fu1')) fc2, fu2 = int(l_res.group('fc2')), int(l_res.group('fu2')) parsed["fa_comp"] = [ {"n_carbon": fc1, "n_unsat": fu1}, {"n_carbon": fc2, "n_unsat": fu2} ] # check for 3rd FA composition fc3, fu3 = 0, 0 if l_res.group('fc3'): if not l_res.group('fu3'): # raise_fatty_acid_value_error() return None fc3, fu3 = int(l_res.group('fc3')), int(l_res.group('fu3')) parsed["fa_comp"].append({"n_carbon": fc3, "n_unsat": fu3}) # compute total fatty acid composition parsed["n_carbon"] = fc1 + fc2 + fc3 parsed["n_unsat"] = fu1 + fu2 + fc3 else: # fc1 and fu1 are the total fatty acid composition parsed["n_carbon"] = int(l_res.group('fc1')) parsed["n_unsat"] = int(l_res.group('fu1')) # add fatty acid modifier if present if l_res.group('mod'): parsed["fa_mod"] = l_res.group('mod') else: # could not parse name as a lipid parsed = None return parsed
31a26cf57edfd08c6025c07982b7d6805704088e
3,656,449
import chunk import requests import json def query(lon, lat, coordsys='gal', mode='full', limit=500000): """ Send a line-of-sight reddening query to the Argonaut web server. lon, lat: longitude and latitude, in degrees. coordsys: 'gal' for Galactic, 'equ' for Equatorial (J2000). mode: 'full', 'lite' or 'sfd' In 'full' mode, outputs a dictionary containing, among other things: - 'distmod': The distance moduli that define the distance bins. - 'best': The best-fit (maximum proability density) line-of-sight reddening, in units of SFD-equivalent E(B-V), to each distance modulus in 'distmod.' See Schlafly & Finkbeiner (2011) for a definition of the reddening vector (use R_V = 3.1). - 'samples': Samples of the line-of-sight reddening, drawn from the probability density on reddening profiles. - 'success': 1 if the query succeeded, and 0 otherwise. - 'converged': 1 if the line-of-sight reddening fit converged, and 0 otherwise. - 'n_stars': # of stars used to fit the line-of-sight reddening. - 'DM_reliable_min': Minimum reliable distance modulus in pixel. - 'DM_reliable_max': Maximum reliable distance modulus in pixel. Less information is returned in 'lite' mode, while in 'sfd' mode, the Schlegel, Finkbeiner & Davis (1998) E(B-V) is returned. """ # make sure we have list if type(lon) == float: lon, lat = [lon], [lat] # Make sure to have less than 500000 objects (the limit). # Cut the list in smaller pieces if that is the case. if len(lon) >= limit: dicts = [query(loni, lati, coordsys=coordsys, mode=mode) for loni, lati in zip(chunk(lon, limit - 1), chunk(lat, limit - 1))] for dic in dicts[1:]: for k in dic: dicts[0][k].extend(dic[k]) return dicts[0] if coordsys.lower() in ['gal', 'g']: payload = {'mode': mode, 'l': lon, 'b': lat} elif coordsys.lower() in ['equ', 'e']: payload = {'mode': mode, 'ra': lon, 'dec': lat} else: raise ValueError("coordsys '{0}' not understood.".format(coordsys)) req = requests.post('http://argonaut.skymaps.info/gal-lb-query-light', data=json.dumps(payload), headers={'content-type': 'application/json'}) try: req.raise_for_status() except requests.exceptions.HTTPError as excep: print('Response received from Argonaut:') print(req.text) raise excep return json.loads(req.text)
1975d69b1c01d0cbb824b813d994941a63728750
3,656,450
def f(x0,x1,l,mig_spont,mig_ind,eps): """ function defining the model dx/dt=f(x)""" return [f0(x0,x1,l,mig_spont,mig_ind,eps),f1(x0,x1,l,mig_spont,mig_ind,eps)]
f6c6a9bdfd9eac7db40306388035bb2127301753
3,656,451
from amara.lib import inputsource from amara.xpath.util import parameterize from amara.xslt.result import streamresult, stringresult from amara.xslt.processor import processor def transform(source, transforms, params=None, output=None): """ Convenience function for applying an XSLT transform. Returns a result object. source - XML source document in the form of a string (not Unicode object), file-like object (stream), file path, URI or amara.lib.inputsource instance. If string or stream it must be self-contained XML (i.e. not requiring access to any other resource such as external entities or includes) transforms - XSLT document (or list thereof) in the form of a string, stream, URL, file path or amara.lib.inputsource instance params - optional dictionary of stylesheet parameters, the keys of which may be given as unicode objects if they have no namespace, or as (uri, localname) tuples if they do. output - optional file-like object to which output is written (incrementally, as processed) """ #do the imports within the function: a tad bit less efficient, but #avoid circular crap params = parameterize(params) if params else {} proc = processor() if isinstance(transforms, (list, tuple)): for transform in transforms: proc.append_transform(inputsource(transform)) else: proc.append_transform(inputsource(transforms)) if output is not None: result = streamresult(output) else: result = stringresult() return proc.run(inputsource(source), params, result)
4a9bbb7a27a9daff977ccefc151a0c480b27f71b
3,656,452
from typing import Optional import requests def lookup_user_github_username(user_github_id: int) -> Optional[str]: """ Given a user github ID, looks up the user's github login/username. :param user_github_id: the github id :return: the user's github login/username """ try: headers = { 'Authorization': 'Bearer {}'.format(cla.conf['GITHUB_OAUTH_TOKEN']), 'Accept': 'application/json', } r = requests.get(f'https://api.github.com/user/{user_github_id}', headers=headers) r.raise_for_status() except requests.exceptions.HTTPError as err: msg = f'Could not get user github user from id: {user_github_id}: error: {err}' cla.log.warning(msg) return None github_user = r.json() if 'message' in github_user: cla.log.warning(f'Unable to lookup user from id: {user_github_id} ' f'- message: {github_user["message"]}') return None else: if 'login' in github_user: return github_user['login'] else: cla.log.warning('Malformed HTTP response from GitHub - expecting "login" attribute ' f'- response: {github_user}') return None
2943ff8760ff02006efcd33ecb59508fc2262520
3,656,453
def get_plot_values(radar): """ Return the values specific to a radar for plotting the radar fields. """ return _DEFAULT_PLOT_VALUES[radar].copy()
579cf303a7be1201e71831a13f156b72766bad7f
3,656,454
import time def time_series_dict_to_list(dictionary, key=lambda x: time.mktime(x.timetuple()), value=identity): """ Convert the incoming dictionary of keys to a list of sorted tuples. :param dictionary: dictionary to retrieve data from :param key: expression used to retrieve the time_series key from the key :param value: expression used to retrieve the time_series value from the value :return: list of tuples where index 0 is seconds since epoch, and index 1 is value """ if key is None: key = identity if value is None: value = identity time_series = [[key(k), value(v)] for k, v in dictionary.iteritems()] return sorted(time_series, key=itemgetter(0))
f5234ec0d5296c17f6758b2815b65ac33009944d
3,656,455
from datetime import datetime def get_data_from_csv(csv_reader): """Creates a list of StatEntry objects based on data in CSV data. Input CSV data must be in the format: Description,timestamp,num_batches,time mean value,time sd Args: csv_reader: csv.reader instance. Returns: A tuple of datetime timestamp and list of benchmark_util.StatEntry objects. Raises: ValueError: if CSV is invalid. """ timestamp = None stat_entries = [] for row in csv_reader: if len(row) != 5: raise ValueError('Expected 5 entries per line in the input CSV file, ' 'but found %d entries.' % len(row)) if '' in row: raise ValueError('Found empty entries in row: %s' % row) # Set timestamp based on the first line in CSV file. if timestamp is None: # Example of time formatting: 2017-06-26 02:59:29.325579 timestamp = datetime.strptime(row[1], "%Y-%m-%d %H:%M:%S.%f") stat_entries.append( benchmark_util.StatEntry(row[0], float(row[3]), 1)) return timestamp, stat_entries
31473648b91b605d8537da720f316a22f8584f2a
3,656,456
def either(a, b): """ :param a: Uncertain value (might be None). :param b: Default value. :return: Either the uncertain value if it is not None or the default value. """ return b if a is None else a
3fd2f99fa0851dae6d1b5f11b09182dbd29bb8c1
3,656,459
def get_app_label_and_model_name(path): """Gets app_label and model_name from the path given. :param str path: Dotted path to the model (without ".model", as stored in the Django `ContentType` model. :return tuple: app_label, model_name """ parts = path.split('.') return (''.join(parts[:-1]), parts[-1])
998e8d81f59491a51f3ae463c76c8627ed63b435
3,656,460
def get_item_editor(val): """ (val: Any) -> Editor Returns customized View editor type for given attribute value. """ if isinstance(val, list): # later might need tuple with label case if isinstance(val[0], str): return CheckListEditor(values=val) else: return CheckListEditor(values=[str(item) for item in val]) if isinstance(val, bool): return BooleanEditor() else: return TextEditor(auto_set=False, enter_set=True)
46dd3011bcd34ee7ecf5a311d45e8fa7a6d8603e
3,656,461
from typing import List import json def index_js_to_enriched_function_blocks(index_js: str) -> List[EnrichedFunctionBlock]: """ Main function of the file. Converts raw index.js file into the output dataclass. """ trimmed_index_js = trim_index_js(index_js) index_json = json.loads(trimmed_index_js) rtn_blocks = [] for package_name, list_of_scala_types in index_json.items(): for scala_type in list_of_scala_types: enriched_blocks = extract_enriched_function_blocks(package_name, scala_type) rtn_blocks.extend(enriched_blocks) return rtn_blocks
46776f7a277da4f111fb8b2d797c91777d84c2a7
3,656,462
from typing import Union def compute_single_results(base_path: str, file_name: str, selection_metric: str, selection_scheme: Union[None, str], selection_mode: str, selection_domain: str, result_scheme: str, result_mode: str, result_metric: str): """ Parameters ---------- base_path file_name selection_metric selection_mode selection_scheme selection_domain result_scheme result_mode result_metric """ path = base_path + file_name csv_path = base_path + 'results.csv' # read the data from the tensorboard summary writer file iterator = summary_iterator(path) tag_dict = create_tag_dict(iterator) # create a csv file for storing the results create_csv(csv_path, tag_dict) # read the results data_frame = read_csv_file(csv_path) # get the desired results in columns column_indices, target_groups = get_metric_columns(data_frame, selection_metric, selection_scheme, mode=selection_mode) # determine the time step of the best results of the desired result selection_col_index = target_groups.index(selection_domain) _, time_step = get_max_val(column_indices[selection_col_index], data_frame) # get the targets and columns of the metrics, which should be reported column_indices, target_groups = get_metric_columns(data_frame, result_metric, result_scheme, mode=result_mode) results = select_results_by_time_step(column_indices, data_frame, time_step) result_dict = {} for key, value in zip(target_groups, results): result_dict[key] = value return result_dict
b176f7d2a3ae3c19c67794c47969a3bf4d5d203b
3,656,463
def run(): """ Step through each row and every 3rd column to find collisions """ trees = 0 x = 0 width = len(rows[0]) for line in rows[1:]: x += 3 if x >= width: x -= width if line[x] == "#": trees += 1 return trees
f8c3f05ad411990bf16c6161f8ebcb544d5930df
3,656,464
def div_col(*items, size=None, style=None, id=None, classes=None) -> HTML: """Generate a new div with a col class Parameters ---------- items: argument list DOM children of this div """ children = ''.join(items) attr = [] if style is not None: attr.append(f'style="{style}"') if id is not None: attr.append(f'id="{id}"') if classes is not None: attr.append(f'class="{classes}"') elif size is not None: attr.append(f'class="col-{size}"') else: attr.append(f'class="col"') attr = ' '.join(attr) return f'<div {attr}>{children}</div>'
9200c23481756fa8d82813d1e95edc7328e63497
3,656,465
import face_recognition def predict(image: bytes) -> ndarray: """ Call the model returning the image with the faces blured :param image: the image to blur the faces from :return: the image with the faces blured """ sigma = 50 image = face_recognition.load_image_file(image) locations = face_recognition.face_locations(image) for location in locations: (startY, endY) = location[0:2] (startX, endX) = location[2:4] image = blur_image(image, startX, endX, startY, endY, sigma=sigma) is_successful, im_png = cv2.imencode(".png", image) if is_successful: return im_png raise Exception("Error encoding image")
87cfffa2a63c3e90baf023f2d37effa753c9ab89
3,656,466
from frappe.defaults import get_user_default_as_list from erpnext.buying.doctype.purchase_order.purchase_order import item_last_purchase_rate import json def get_basic_details(args, item): """ :param args: { "item_code": "", "warehouse": None, "customer": "", "conversion_rate": 1.0, "selling_price_list": None, "price_list_currency": None, "price_list_uom_dependant": None, "plc_conversion_rate": 1.0, "doctype": "", "name": "", "supplier": None, "transaction_date": None, "conversion_rate": 1.0, "buying_price_list": None, "is_subcontracted": "Yes" / "No", "ignore_pricing_rule": 0/1 "project": "", barcode: "", serial_no: "", warehouse: "", currency: "", update_stock: "", price_list: "", company: "", order_type: "", is_pos: "", ignore_pricing_rule: "", project: "", qty: "", stock_qty: "", conversion_factor: "" } :param item: `item_code` of Item object :return: frappe._dict """ if not item: item = frappe.get_doc("Item", args.get("item_code")) if item.variant_of: item.update_template_tables() user_default_warehouse_list = get_user_default_as_list('Warehouse') user_default_warehouse = user_default_warehouse_list[0] \ if len(user_default_warehouse_list) == 1 else "" item_defaults = get_item_defaults(item.name, args.company) warehouse = user_default_warehouse or item_defaults.get("default_warehouse") or args.warehouse material_request_type = '' if args.get('doctype') == "Material Request" and not args.get('material_request_type'): args['material_request_type'] = frappe.db.get_value('Material Request', args.get('name'), 'material_request_type') #Set the UOM to the Default Sales UOM or Default Purchase UOM if configured in the Item Master if not args.uom: if args.get('doctype') in ['Quotation', 'Sales Order', 'Delivery Note', 'Sales Invoice']: args.uom = item.sales_uom if item.sales_uom else item.stock_uom elif (args.get('doctype') in ['Purchase Order', 'Purchase Receipt', 'Purchase Invoice']) or \ (args.get('doctype') == 'Material Request' and args.get('material_request_type') == 'Purchase'): args.uom = item.purchase_uom if item.purchase_uom else item.stock_uom else: args.uom = item.stock_uom out = frappe._dict({ "item_code": item.name, "item_name": item.item_name, "description": cstr(item.description).strip(), "image": cstr(item.image).strip(), "warehouse": warehouse, "income_account": get_default_income_account(args, item_defaults), "expense_account": get_default_expense_account(args, item_defaults), "cost_center": get_default_cost_center(args, item_defaults), 'has_serial_no': item.has_serial_no, 'has_batch_no': item.has_batch_no, "batch_no": None, "item_tax_rate": json.dumps(dict(([d.tax_type, d.tax_rate] for d in item.get("taxes")))), "uom": args.uom, "min_order_qty": flt(item.min_order_qty) if args.doctype == "Material Request" else "", "qty": args.qty or 1.0, "stock_qty": args.qty or 1.0, "price_list_rate": 0.0, "base_price_list_rate": 0.0, "rate": 0.0, "base_rate": 0.0, "amount": 0.0, "base_amount": 0.0, "net_rate": 0.0, "net_amount": 0.0, "discount_percentage": 0.0, "supplier": item_defaults.get("default_supplier"), "update_stock": args.get("update_stock") if args.get('doctype') in ['Sales Invoice', 'Purchase Invoice'] else 0, "delivered_by_supplier": item.delivered_by_supplier if args.get("doctype") in ["Sales Order", "Sales Invoice"] else 0, "is_fixed_asset": item.is_fixed_asset, "weight_per_unit":item.weight_per_unit, "weight_uom":item.weight_uom, "last_purchase_rate": item.last_purchase_rate if args.get("doctype") in ["Purchase Order"] else 0 }) if item.enable_deferred_revenue: service_end_date = add_months(args.transaction_date, item.no_of_months) out.update({ "enable_deferred_revenue": item.enable_deferred_revenue, "deferred_revenue_account": get_default_deferred_revenue_account(args, item), "service_start_date": args.transaction_date, "service_end_date": service_end_date }) # calculate conversion factor if item.stock_uom == args.uom: out.conversion_factor = 1.0 else: out.conversion_factor = args.conversion_factor or \ get_conversion_factor(item.item_code, args.uom).get("conversion_factor") or 1.0 args.conversion_factor = out.conversion_factor out.stock_qty = out.qty * out.conversion_factor # calculate last purchase rate out.last_purchase_rate = item_last_purchase_rate(args.name, args.conversion_rate, item.item_code, out.conversion_factor) # if default specified in item is for another company, fetch from company for d in [ ["Account", "income_account", "default_income_account"], ["Account", "expense_account", "default_expense_account"], ["Cost Center", "cost_center", "cost_center"], ["Warehouse", "warehouse", ""]]: if not out[d[1]]: out[d[1]] = frappe.db.get_value("Company", args.company, d[2]) if d[2] else None for fieldname in ("item_name", "item_group", "barcodes", "brand", "stock_uom"): out[fieldname] = item.get(fieldname) return out
f79438ebdcb7de48f48ca92e80f160a396d6ea6a
3,656,467
import math def vec_len(x): """ Length of the 2D vector""" length = math.sqrt(x[0]**2 + x[1]**2) return length
a357d31df808720eb2c4dfc12f4d6194ef904f67
3,656,468
def part1_count_increases(measurements): """Count increases of a measure with the next.""" windows = zip(measurements[1:], measurements[:-1]) increases = filter(lambda w: w[0] > w[1], windows) return len(list(increases))
59311b940ff7fe72cd6fe9cd4d0705918e796e69
3,656,469
def remove_empties(seq): """ Remove items of length 0 >>> remove_empties([1, 2, ('empty', np.nan), 4, 5]) [1, 2, 4, 5] >>> remove_empties([('empty', np.nan)]) [nan] >>> remove_empties([]) [] """ if not seq: return seq seq2 = [x for x in seq if not (isinstance(x, tuple) and x and x[0] == 'empty')] if seq2: return seq2 else: return [seq[0][1]]
500cbbd942682bfde1b9c1babe9a2190413b07fd
3,656,470
def breadth_first_graph_search(problem): """Grafo paieškos į plotį algoritmas""" global frontier, node, explored, counter if counter == -1: node = Node(problem.initial) display_current(node) if problem.goal_test(node.state): return node frontier = deque([node]) # FIFO queue display_frontier(frontier) explored = set() if counter % 3 == 0 and counter >= 0: node = frontier.popleft() display_current(node) explored.add(node.state) if counter % 3 == 1 and counter >= 0: for child in node.expand(problem): if child.state not in explored and child not in frontier: if problem.goal_test(child.state): return child frontier.append(child) display_frontier(frontier) if counter % 3 == 2 and counter >= 0: display_explored(node) return None
332d5d300615f30f1125c5fafed882f1941b8f39
3,656,471
def to_smiles(rdm): """ SMILES string from an rdkit molecule object """ smi = _rd_chem.MolToSmiles(rdm) return smi
bd0c79b8b0066bd0cf2f49d99c0ec80543f50c9b
3,656,472
import collections import logging import scipy def merge_bins(adata, bin_size): """Merge bins.""" orig_bins = collections.defaultdict(list) for coor in adata.var_names: chrom, start, end = coor.split(':')[0], int( coor.split(':')[1].split('-')[0]), int( coor.split(':')[1].split('-')[1]) orig_bins[chrom].append((start, end)) logging.info('Done with counting the bins') resized_bins_index = [] resized_chrs = [] resized_bins_counts = [] for chrom, ranges in orig_bins.items(): curr_bin = 0 curr_acc = [] for (start, end) in sorted(ranges): if start // bin_size == curr_bin: curr_acc.append(f'{chrom}:{start}-{end}') else: if curr_acc: # For the empty initialisation at the beginning of the chr. resized_bins_counts.append(adata[:, curr_acc].X.sum(axis=1)) resized_bins_index.append( f'{chrom}:{curr_bin*bin_size}-{(curr_bin+1)*bin_size}') curr_acc = [f'{chrom}:{start}-{end}'] curr_bin = start // bin_size resized_bins_counts.append(adata[:, curr_acc].X.sum(axis=1)) resized_bins_index.append( f'{chrom}:{curr_bin*bin_size}-{(curr_bin+1)*bin_size}') resized_chrs.append(scipy.sparse.csr_matrix(np.hstack(resized_bins_counts))) resized_bins_counts = [] logging.info('Done with %s', chrom) new_adata = anndata.AnnData( scipy.sparse.csr_matrix( np.hstack([chrom.toarray() for chrom in resized_chrs]))) new_adata.var_names = resized_bins_index new_adata.obs = adata.obs return new_adata
dc1f939e5bcd1604b525d616ee94868e6baae8c6
3,656,473
def show_all_fruits(): """Show all fruits in the database.""" fruits = fruits_collection.find({}) for fruit in fruits: print(fruit) context = { 'list_of_fruits': fruits_collection.find({}) } return render_template('show_fruits.html', **context)
6329c6f6f1a7a30f6e35ea83aecd7fd71e81fe24
3,656,474
import json def load_fields(path: str = f'{DEFAULT_FIELD_PATH}{FIELD_FILENAME}') -> dict: """Load Fields. PARAMETERS ---------- :param: path: string path to the fields file. Returns ------- A dictionary of fields, with the following format: { "field_name": { "help_text": "", "type": "" } """ with open(path, 'r') as json_file: return json.load(json_file)
b6bc8916fa3a9d8a53f7cda5e13acf32a9b57860
3,656,475
def set_max_concurrency( uses: int, bucket: t.Type[buckets.Bucket] ) -> t.Callable[[commands.base.CommandLike], commands.base.CommandLike]: """ Second order decorator that defines the max concurrency limit for a command. Args: uses (:obj:`int`): The maximum number of uses of the command that can be executing concurrently before a :obj:`~.errors.MaxConcurrencyLimitReached` will be raised upon invocation. bucket (Type[:obj:`~.buckets.Bucket`]): Bucket that command max concurrency will be processed under. """ if uses < 1 or not isinstance(uses, int): raise ValueError("'uses' must be a positive integer") def decorate(c_like: commands.base.CommandLike) -> commands.base.CommandLike: if not isinstance(c_like, commands.base.CommandLike): raise SyntaxError("'set_max_concurrency' decorator must be above the 'command' decorator") c_like.max_concurrency = (uses, bucket) return c_like return decorate
b0677bc71f68d9ae674b424795bb29e93915726b
3,656,476
def three_to_one_protocol_bob(q1, q2, q3, bob, socket): """ Implements Bob's side of the 3->1 distillation protocol. This function should perform the gates and measurements for 3->1 using qubits q1 and q2, then send the measurement outcome to Alice and determine if the distillation was successful. :param q1: Bob's qubit from the first entangled pair :param q2: Bob's qubit from the second entangled pair :param q3: Bob's qubit from the third entangled pair :param bob: Bob's NetQASMConnection :param socket: Alice's classical communication socket to Bob :return: True/False indicating if protocol was successful """ b1, b2 = three_to_one_gates_and_measurement_bob(q1, q2, q3) bob.flush() # Send measurement result to Bob, receive measurement result from Bob and check if protocol was successful b1 = int(b1) b2 = int(b2) socket.send_structured(StructuredMessage("The outcome is: ", (b1, b2))) a1, a2 = socket.recv_structured().payload if (a1, a2) == (b1, b2): return True else: return False
0eab81d5d860c4314be4411b0dca429fc58cdb28
3,656,477
def read_code_blocks_from_md(md_path): """ Read ```python annotated code blocks from a markdown file. Args: md_path (str): Path to the markdown fle Returns: py_blocks ([str]): The blocks of python code. """ with open(md_path, "r") as f: full_md = f.read() md_py_splits = full_md.split("```python")[1:] py_blocks = [split.split("```")[0] for split in md_py_splits] return py_blocks
ca920f74e9326cf5f3635fbb6ebe125b6d97a349
3,656,478
from re import T import math def CBOW(vocab_size, emb_size): """ CBOW: Function to define the CBOW model parameters: vocab_size: the vocabulary size emb_size: dimension of the embedding vector return: List of theano variables [context, target], represents the model input, Theano function represents the loss (i.e. the cose or the objective) function, List of theano (shared) variable params, represents the parameters of the model. """ context = T.imatrix(name='context') target = T.ivector('target') W_in_values = np.asarray(np.random.uniform(-1.0, 1.0, (vocab_size, emb_size)), dtype=theano.config.floatX) W_out_values = np.asarray(np.random.normal(scale=1.0 / math.sqrt(emb_size), size=(emb_size, vocab_size)), dtype=theano.config.floatX) W_in = theano.shared( value=W_in_values, name='W_in', borrow=True) W_out = theano.shared( value=W_out_values, name='W_out', borrow=True) h = T.mean(W_in[context], axis=1) # compute the hidden (projection) layer output : input -> hidden (eq. 1) uj = T.dot(h, W_out) # hidden -> output (eq. 2) p_target_given_contex = T.nnet.softmax(uj) # softmax activation (eq. 3) loss = -T.mean(T.log(p_target_given_contex)[T.arange(target.shape[0]), target]) # loss function (eq. 4) params = [W_in, W_out] return [context, target], loss, params
82275f52528715fc783247b649cc5b56e51e1ce2
3,656,479
def subject(request, clas_slug, subject_slug, page=1): """ Список гдз сборников для предмета """ gdz_clas = get_object_or_404(GdzClas, slug=clas_slug) gdz_subject = get_object_or_404(GdzSubject, slug=subject_slug, gdz_clas=gdz_clas) book_list = GdzBook.published.filter(gdz_clas=gdz_clas, gdz_subject=gdz_subject).order_by('-public_time') paginator = Paginator(book_list, PAGE_ITEM) try: books = paginator.page(page) except EmptyPage: raise Http404 h1 = "Гдз {subject_title} {clas_slug} клас".format(subject_title=gdz_subject.title, clas_slug=gdz_clas.slug) page_title = "Гдз {subject_title} {clas_slug} клас".format(subject_title=gdz_subject.title, clas_slug=gdz_clas.slug) return render(request, 'gdz/subject.html', {'books': books, 'h1': h1, 'page_title': page_title, 'gdz_clas': gdz_clas, 'gdz_subject': gdz_subject, 'paginate_link': 'gdz:subject_paginate', 'link': 'gdz:subject'})
1b7a3bd6314de87ec059313cd020a8249586619f
3,656,480
def root_mean_square_ffinalise(out, sub_samples=None): """Divide the weighted sum by the sum of weights and take the square root. Also mask out any values derived from a too-small sample size. :Parameters: out: 3-`tuple` of `numpy.ndarray` An output from `root_mean_square_fpartial`. sub_samples: optional :Returns: 2-`tuple` of `numpy.ndarray` The sample size and the RMS. """ N, avg = mean_ffinalise(out, sub_samples=sub_samples) avg **= 0.5 return asanyarray(N, avg)
82562ef562b2e7dfaeefee9de42224568900f5a1
3,656,481
import hashlib def md5sum_fileobj(f, start = 0, end = None): """Accepts a file object and returns the md5sum.""" m = hashlib.md5() for block in file_reader(f, start, end): assert block != "", "Got an empty read" m.update(block) return m.hexdigest()
db1046c2466d408b0de9e402af31930b72ce9d76
3,656,482
import math def get_localization_scores(predicted_start: int, predicted_end: int, true_start: int, true_end: int): """ exp(-abs(t_pred_start-t_start)/(t_end-t_start)) exp(-abs(t_pred_end-t_end)/(t_end-t_start)) :param predicted_start: :param predicted_end: :param true_start: :param true_end: """ if true_end - true_start <= 0: return 0, 0 base = math.exp(1 / (true_start - true_end)) return base ** abs(predicted_start - true_start), base ** abs(predicted_end - true_end)
dfcef55e0594507b48aa83027c5b55a2a6530717
3,656,483
def json_compatible_key(key: str) -> str: """As defined in :pep:`566#json-compatible-metadata`""" return key.lower().replace("-", "_")
b914ba17b3da5df84d72497048565a118fc4fb05
3,656,484
def _scale_func(k): """ Return a lambda function that scales its input by k Parameters ---------- k : float The scaling factor of the returned lambda function Returns ------- Lambda function """ return lambda y_values_input: k * y_values_input
65fd06bfb1a278b106eecc4974bc9317b1dea67f
3,656,486
import torch from typing import Optional from typing import Union from typing import List def erosion_dependent(input_tensor: torch.Tensor, structuring_element: torch.Tensor, origin: Optional[Union[tuple, List[int]]] = None, border_value: Union[int, float, str] = 'geodesic'): """ This type of erosion is needed when you want a structuring element to vary along one axis. Parameters ---------- :param input_tensor: torch.Tensor The input tensor that you want to erode. It should be a PyTorch tensor of 2 dimensions. :param structuring_element: torch.Tensor The structuring element to erode. The structuring element should be a PyTorch tensor of 3 dimensions; first dimension should coincide with first dimension of input_tensor and two other dimensions are the shape of the structuring element. :param origin: None, tuple, List[int] The origin of the structuring element. Default to center of the structuring element. Negative indexes are allowed. The origin will be the same for all the structuring elements. :param border_value: int, float, str The value used to pad the image in the border. Two options are allowed when a string is passed in parameter: - 'geodesic': only points within the input are considered when taking the minimum. - 'euclidean': extends naturally the image setting minus infinite value to the border. Default value is 'geodesic'. Outputs ------- :return: torch.Tensor The erosion dependent of the first axis as a PyTorch tensor of the same shape than the original input. """ # Check parameters check_parameters_dependent(input_tensor, structuring_element, origin, border_value) # Adapt origin if not origin: origin = (structuring_element.shape[1] // 2, structuring_element.shape[2] // 2) # Fill border value if needed border_value = fill_border(border_value, 'erosion') # Convert tensor to float if needed input_tensor = convert_float(input_tensor) # Pad input pad_list = [origin[1], structuring_element.shape[2] - origin[1] - 1, origin[0], structuring_element.shape[1] - origin[0] - 1] input_pad = f.pad(input_tensor, pad_list, mode='constant', value=border_value) # Compute erosion if str(input_tensor.device) == 'cpu': raise ValueError('Operation currently only implemented for GPU.') else: result = morphology_cuda.erosion_dependent(input_pad, structuring_element, BLOCK_SHAPE) return result
7646d56ceab9a7ec27182c485954f748cf4afd75
3,656,488
def bin_barcodes(barcodes, binsize=1000): """Binning barcodes into chunks Parameters ---------- barcodes : iterable Iterable of barcodes binsize : int Size of bin for grouping barcodes Returns ------- yields list of barcode (1 bin) """ binsize = int(float(binsize)) bins = np.digitize(np.arange(0,barcodes.shape[0]), np.arange(0,barcodes.shape[0],binsize)) return [barcodes[bins == x] for x in np.unique(bins)]
3cc063f68a89a325a53def31bfe779d3aa8e62c6
3,656,489
def flash_regions(device, region_map): """divide the named memory into sized memory regions""" regions = [] for x in region_map: if len(x) == 2: # no meta information: set it all to None (name, region_sizes) = x meta = (None,) * len(region_sizes) elif len(x) == 3: # provided meta information - make sure it's per region (name, region_sizes, meta) = x assert len(region_sizes) == len(meta), 'need meta information for each flash region' else: assert False, 'bad flash region specification' # the regions are based on the peripheral memory space base_adr = device.peripherals[name].address total_size = device.peripherals[name].size adr = base_adr for (s, m) in zip(region_sizes, meta): regions.append(region(name, adr, s, m)) adr += s # make sure the regions cover the entire memory space of the peripheral assert base_adr + total_size == adr, "regions don't encompass all memory" return regions
43f444c1bdfee8441a8e6bf6c72dbc06dccb56df
3,656,490
from typing import Collection import json def _load_explorer_data(multiprocess=False): """ Load in all available corpora and make their initial tables This is run when the app starts up """ corpora = dict() tables = dict() for corpus in Corpus.objects.all(): if corpus.disabled: print(f"Skipping corpus because it is disabled: {corpus.name}") continue buzz_collection = Collection(corpus.path) # a corpus must have a feather or conll to be explorable. prefer feather. buzz_corpus = buzz_collection.feather or buzz_collection.conllu if buzz_corpus is None: print(f"No parsed data found for {corpus.path}") continue corpora[corpus.slug] = buzz_corpus if corpus.load: print(f"Loading corpus into memory: {corpus.name} ...") opts = dict(add_governor=corpus.add_governor, multiprocess=multiprocess) buzz_corpus = buzz_corpus.load(**opts) buzz_corpus = _postprocess_corpus(buzz_corpus, corpus) corpora[corpus.slug] = buzz_corpus else: print(f"NOT loading corpus into memory: {corpus.name} ...") # what should be shown in the frequencies space to begin with? if getattr(corpus, "initial_table", False): display = json.loads(corpus.initial_table) else: display = dict(show="p", subcorpora="file") print(f"Generating an initial table for {corpus.name} using {display}") initial_table = buzz_corpus.table(**display) tables[corpus.slug] = initial_table return corpora, tables
f302b2529402b4ed75fa554ef915d7c117bca149
3,656,491
def compute_CD_projected_psth(units, time_period=None): """ Routine for Coding Direction computation on all the units in the specified unit_keys Coding Direction is calculated in the specified time_period :param: unit_keys - list of unit_keys :return: coding direction unit-vector, contra-trials CD projected trial-psth, ipsi-trials CD projected trial-psth psth time-stamps """ unit_hemi = (ephys.ProbeInsertion.InsertionLocation * experiment.BrainLocation & units).fetch('hemisphere') if len(set(unit_hemi)) != 1: raise Exception('Units from both hemispheres found') else: unit_hemi = unit_hemi[0] session_key = experiment.Session & units if len(session_key) != 1: raise Exception('Units from multiple sessions found') # -- the computation part # get units and trials - ensuring they have trial-spikes contra_trials = (TrialCondition().get_trials( 'good_noearlylick_right_hit' if unit_hemi == 'left' else 'good_noearlylick_left_hit') & session_key & ephys.Unit.TrialSpikes).fetch('KEY') ipsi_trials = (TrialCondition().get_trials( 'good_noearlylick_left_hit' if unit_hemi == 'left' else 'good_noearlylick_right_hit') & session_key & ephys.Unit.TrialSpikes).fetch('KEY') # get per-trial unit psth for all units - unit# x (trial# x time) contra_trial_psths, contra_edges = zip(*(compute_unit_psth(unit, contra_trials, per_trial=True) for unit in units)) ipsi_trial_psths, ipsi_edges = zip(*(compute_unit_psth(unit, ipsi_trials, per_trial=True) for unit in units)) # compute trial-ave unit psth contra_psths = zip((p.mean(axis=0) for p in contra_trial_psths), contra_edges) ipsi_psths = zip((p.mean(axis=0) for p in ipsi_trial_psths), ipsi_edges) # compute coding direction cd_vec = compute_coding_direction(contra_psths, ipsi_psths, time_period=time_period) # get time vector, relying on all units PSTH shares the same time vector time_stamps = contra_edges[0] # get coding projection per trial - trial# x unit# x time contra_psth_per_trial = np.dstack(contra_trial_psths) ipsi_psth_per_trial = np.dstack(ipsi_trial_psths) proj_contra_trial = np.vstack(np.dot(tr_u, cd_vec) for tr_u in contra_psth_per_trial) # trial# x time proj_ipsi_trial = np.vstack(np.dot(tr_u, cd_vec) for tr_u in ipsi_psth_per_trial) # trial# x time return cd_vec, proj_contra_trial, proj_ipsi_trial, time_stamps, unit_hemi
44025b200855cb685efa052e106b4b5a1ed47b6e
3,656,492
import struct def _tvos_extension_impl(ctx): """Implementation of the `tvos_extension` Skylark rule.""" binary_artifact = binary_support.get_binary_provider( ctx.attr.deps, apple_common.AppleExecutableBinary).binary deps_objc_provider = binary_support.get_binary_provider( ctx.attr.deps, apple_common.AppleExecutableBinary).objc additional_providers, legacy_providers, additional_outputs = bundler.run( ctx, "TvosExtensionArchive", "tvOS extension", ctx.attr.bundle_id, binary_artifact=binary_artifact, deps_objc_providers=[deps_objc_provider], ) return struct( files=additional_outputs, providers=[ TvosExtensionBundleInfo(), ] + additional_providers, **legacy_providers )
e1bbd3711e7b449fdb23ebb6bbb755c4dbbe14c9
3,656,493
import copy def simplify_graph(G): """remove the scores, so the cycle_exits() function can work""" graph = copy.deepcopy(G) simplified = dict((k, graph[k][0]) for k in graph) # add dummy edges,so the cycle_exists() function works for source in simplified.keys(): for target in simplified[source]: if target not in simplified: simplified[target] = [] return simplified
fc9b052c83ce500d20842367b3b6f011268a5a7d
3,656,494
def Run_INCR(num_vertices, edge_density, algorithm_name, k, init_tree=None): """ Initialize and run the MVA algorithm """ edges_bound = int(edge_density * (num_vertices * (num_vertices - 1) / 2)) k = max(1, k * edges_bound) runner = runner_factory(num_vertices, algorithm_name, None, edges_bound=edges_bound, edge_density=edge_density, k=k) randomizer = Randomizer(2 * num_vertices, runner["Parameters"]["seed"]) with Timer("t_expand_cliques", runner["Times"]): if init_tree == "ktree": ktree_k = 1 / 2 * (2 * num_vertices - 1 - sqrt(((2 * num_vertices - 1) * (2 * num_vertices - 1)) - (8 * edges_bound))) ktree_k = int(floor(ktree_k)) k_edges = (num_vertices - ktree_k - 1) * ktree_k + (ktree_k * (ktree_k + 1) / 2) p_mva = init_k_tree_incr(runner["Parameters"]["n"], ktree_k, randomizer) print("- Init with " + str(ktree_k) + "-tree:") elif init_tree == "tree": p_mva = expand_tree(runner["Parameters"]["n"], randomizer) print("- Expand tree:") else: p_mva = expand_cliques(runner["Parameters"]["n"], randomizer) print("- Expand cliques:") print(p_mva) with Timer("t_split_edges", runner["Times"]): loops = split_edges_k(p_mva, runner["Parameters"]["edges_bound"], randomizer, k) print("- Split edges:") runner["Stats"]["total"] = runner["Times"]["t_split_edges"] + runner["Times"]["t_expand_cliques"] runner["Stats"]["loops%"] = loops / edges_bound print(" loops:", runner["Stats"]["loops%"]) print(p_mva) return calculate_mva_statistics(p_mva, runner, randomizer, num_vertices)
4b64891d773f8e5f43833984727d514e089937cb
3,656,495
def _two_point_interp(times, altitudes, horizon=0*u.deg): """ Do linear interpolation between two ``altitudes`` at two ``times`` to determine the time where the altitude goes through zero. Parameters ---------- times : `~astropy.time.Time` Two times for linear interpolation between altitudes : array of `~astropy.units.Quantity` Two altitudes for linear interpolation between horizon : `~astropy.units.Quantity` Solve for the time when the altitude is equal to reference_alt. Returns ------- t : `~astropy.time.Time` Time when target crosses the horizon """ if not isinstance(times, Time): return MAGIC_TIME else: slope = (altitudes[1] - altitudes[0])/(times[1].jd - times[0].jd) return Time(times[1].jd - ((altitudes[1] - horizon)/slope).value, format='jd')
b7b9bd53464d17c9e8fc51006a938b4c6b9cfac1
3,656,497
import string def setup_sample_data(no_of_records): """Generate the given number of sample data with 'id', 'name', and 'dt'""" rows_in_database = [{'id': counter, 'name': get_random_string(string.ascii_lowercase, 20), 'dt': '2017-05-03'} for counter in range(0, no_of_records)] return rows_in_database
65659f931a103ea80dce19eabe277bba88653279
3,656,498
from io import StringIO import csv def generate_csv_string(csv_data): """ Turn 2d string array into a string representing a csv file """ output_buffer = StringIO() writer = csv.writer(output_buffer) csv_data = equalize_array(csv_data) csv_data = utf_8_encode_array(csv_data) for row in csv_data: writer.writerow(row) body = output_buffer.getvalue() output_buffer.close() return body
70861f363ed3d8445b38f448ffdea9ea1d479239
3,656,499
def build_params_comments(python_code, keyword, info): """Builds comments for parameters""" for arg, arg_info in zip(info.get('expected_url_params').keys(), info.get('expected_url_params').values()): python_code += '\n' + 2*TAB_BASE*SPACE + ':param ' + score_to_underscore(arg) + ': ' python_code += str(arg_info.get('description')) + ' ' + str(arg_info.get('possible_values')) return python_code
ce7446bb49ff25cbb2fb08ed8ca389dea16919bd
3,656,501
async def async_setup(hass: HomeAssistant, config: dict): """Set up the Netatmo component.""" hass.data[DOMAIN] = {} hass.data[DOMAIN][DATA_PERSONS] = {} if DOMAIN not in config: return True config_flow.NetatmoFlowHandler.async_register_implementation( hass, config_entry_oauth2_flow.LocalOAuth2Implementation( hass, DOMAIN, config[DOMAIN][CONF_CLIENT_ID], config[DOMAIN][CONF_CLIENT_SECRET], OAUTH2_AUTHORIZE, OAUTH2_TOKEN, ), ) return True
7913dd1b7eaa60e7bedfba2a9da199cb4045e7ba
3,656,502
def hotkey(x: int, y: int) -> bool: """Try to copy by dragging over the string, and then use hotkey.""" gui.moveTo(x + 15, y, 0) gui.mouseDown() gui.move(70, 0) gui.hotkey("ctrl", "c") gui.mouseUp() return check_copied()
5cd789fd8e1b3ecf9dd1585a6831f6db92d4b6b0
3,656,504
import requests def get_tv_imdbid_by_id( tv_id, verify = True ): """ Returns the IMDb_ ID for a TV show. :param int tv_id: the TMDB_ series ID for the TV show. :param bool verify: optional argument, whether to verify SSL connections. Default is ``True``. :returns: the IMDB_ ID for that TV show. Otherwise returns ``None`` if cannot be found. :rtype: str .. _IMDb: https://www.imdb.com """ response = requests.get( 'https://api.themoviedb.org/3/tv/%d/external_ids' % tv_id, params = { 'api_key' : tmdb_apiKey }, verify = verify ) if response.status_code != 200: print( 'problem here, %s.' % response.content ) return None data = response.json( ) if 'imdb_id' not in data: return None return data['imdb_id']
363a2284d65fe1cfa2f3d2d07e3205de77bf67ef
3,656,505
def test_reading_cosmos_catalog(): """Returns the cosmos catalog""" cosmos_catalog = CosmosCatalog.from_file(COSMOS_CATALOG_PATHS) return cosmos_catalog
1fc6f32cfc86ee28e114878d5ce7c13891e79ae1
3,656,506
def is_terminal(p): """ Check if a given packet is a terminal element. :param p: element to check :type p: object :return: If ``p`` is a terminal element :rtype: bool """ return isinstance(p, _TerminalPacket)
189da8342e61d112a7d56d778de7562f7b609b82
3,656,507
def vgg11_bn(pretrained=False, **kwargs): """VGG 11-layer model (configuration "A") with batch normalization Args: pretrained (bool): If True, returns a model pre-trained on ImageNet """ if pretrained: kwargs['init_weights'] = False model = VGG(make_layers(cfg['A'], batch_norm=True), **kwargs) if pretrained: model.load_state_dict(model_zoo.load_url(model_urls['vgg11_bn'])) return model
3a8a03bd4a337143d56ed99ae89f2bbc3e312e63
3,656,508
def trf_input_method(config, patient_id="", key_namespace="", **_): """Streamlit GUI method to facilitate TRF data provision. Notes ----- TRF files themselves have no innate patient alignment. An option for TRF collection is to use the CLI tool ``pymedphys trf orchestrate``. This connects to the SAMBA server hosted on the Elekta NSS and downloads the diagnostic backup zips. It then takes these TRF files and queries the Mosaiq database using time of delivery to identify these with a patient id (Ident.Pat_ID1) and name. As such, all references to patient ID and name within this ``trf_input_method`` are actually a reference to their Mosaiq database counterparts. """ FILE_UPLOAD = "File upload" INDEXED_TRF_SEARCH = "Search indexed TRF directory" import_method = st.radio( "TRF import method", [FILE_UPLOAD, INDEXED_TRF_SEARCH], key=f"{key_namespace}_trf_file_import_method", ) if import_method == FILE_UPLOAD: selected_files = st.file_uploader( "Upload TRF files", key=f"{key_namespace}_trf_file_uploader", accept_multiple_files=True, ) if not selected_files: return {} data_paths = [] individual_identifiers = ["Uploaded TRF file(s)"] if import_method == INDEXED_TRF_SEARCH: try: indexed_trf_directory = _config.get_indexed_trf_directory(config) except KeyError: st.write( _exceptions.ConfigMissing( "No indexed TRF directory is configured. Please use " f"'{FILE_UPLOAD}' instead." ) ) return {} patient_id = st.text_input( "Patient ID", patient_id, key=f"{key_namespace}_patient_id" ) st.write(patient_id) filepaths = list(indexed_trf_directory.glob(f"*/{patient_id}_*/*/*/*/*.trf")) raw_timestamps = [ "_".join(path.parent.name.split("_")[0:2]) for path in filepaths ] timestamps = list( pd.to_datetime(raw_timestamps, format="%Y-%m-%d_%H%M%S").astype(str) ) timestamp_filepath_map = dict(zip(timestamps, filepaths)) timestamps = sorted(timestamps, reverse=True) if len(timestamps) == 0: if patient_id != "": st.write( _exceptions.NoRecordsFound( f"No TRF log file found for patient ID {patient_id}" ) ) return {"patient_id": patient_id} if len(timestamps) == 1: default_timestamp = timestamps[0] else: default_timestamp = [] selected_trf_deliveries = st.multiselect( "Select TRF delivery timestamp(s)", timestamps, default=default_timestamp, key=f"{key_namespace}_trf_deliveries", ) if not selected_trf_deliveries: return {} st.write( """ #### TRF filepath(s) """ ) selected_files = [ timestamp_filepath_map[timestamp] for timestamp in selected_trf_deliveries ] st.write([str(path.resolve()) for path in selected_files]) individual_identifiers = [ f"{path.parent.parent.parent.parent.name} {path.parent.name}" for path in selected_files ] data_paths = selected_files st.write( """ #### Log file header(s) """ ) headers = [] tables = [] for path_or_binary in selected_files: try: path_or_binary.seek(0) except AttributeError: pass header, table = read_trf(path_or_binary) headers.append(header) tables.append(table) headers = pd.concat(headers) headers.reset_index(inplace=True) headers.drop("index", axis=1, inplace=True) st.write(headers) deliveries = _deliveries.cached_deliveries_loading( tables, _deliveries.delivery_from_trf ) identifier = f"TRF ({individual_identifiers[0]})" patient_name = _attempt_patient_name_from_mosaiq(config, headers) return { "site": None, "patient_id": patient_id, "patient_name": patient_name, "data_paths": data_paths, "identifier": identifier, "deliveries": deliveries, }
710a3f47e58ea5ed879cba6e51624072340308cf
3,656,509
from datetime import datetime def plotter(fdict): """ Go """ ctx = get_autoplot_context(fdict, get_description()) station = ctx['station'] network = ctx['network'] year = ctx['year'] season = ctx['season'] nt = NetworkTable(network) table = "alldata_%s" % (station[:2],) pgconn = get_dbconn('coop') # Have to do a redundant query to get the running values obs = read_sql(""" WITH trail as ( SELECT day, year, avg((high+low)/2.) OVER (ORDER by day ASC ROWS 91 PRECEDING) as avgt from """ + table + """ WHERE station = %s) SELECT day, avgt from trail WHERE year between %s and %s ORDER by day ASC """, pgconn, params=(station, year, year + 2), index_col='day') df = read_sql(""" WITH trail as ( SELECT day, year, avg((high+low)/2.) OVER (ORDER by day ASC ROWS 91 PRECEDING) as avgt from """ + table + """ WHERE station = %s), extremes as ( SELECT day, year, avgt, rank() OVER (PARTITION by year ORDER by avgt ASC) as minrank, rank() OVER (PARTITION by year ORDER by avgt DESC) as maxrank from trail), yearmax as ( SELECT year, min(day) as summer_end, min(avgt) as summer from extremes where maxrank = 1 GROUP by year), yearmin as ( SELECT year, min(day) as winter_end, min(avgt) as winter from extremes where minrank = 1 GROUP by year) SELECT x.year, winter_end, winter, summer_end, summer, extract(doy from winter_end)::int as winter_end_doy, extract(doy from summer_end)::int as summer_end_doy from yearmax x JOIN yearmin n on (x.year = n.year) ORDER by x.year ASC """, pgconn, params=(station, ), index_col='year') # Throw out spring of the first year for col in ['winter', 'winter_end_doy', 'winter_end']: df.at[df.index.min(), col] = None # Need to cull current year if datetime.date.today().month < 8: for col in ['summer', 'summer_end_doy', 'summer_end']: df.at[datetime.date.today().year, col] = None if datetime.date.today().month < 2: for col in ['winter', 'winter_end_doy', 'winter_end']: df.at[datetime.date.today().year, col] = None df['spring_length'] = df['summer_end_doy'] - 91 - df['winter_end_doy'] # fall is a bit tricker df['fall_length'] = None df['fall_length'].values[:-1] = ((df['winter_end_doy'].values[1:] + 365) - 91 - df['summer_end_doy'].values[:-1]) df['fall_length'] = pd.to_numeric(df['fall_length']) (fig, ax) = plt.subplots(3, 1, figsize=(8, 9)) ax[0].plot(obs.index.values, obs['avgt'].values) ax[0].set_ylim(obs['avgt'].min() - 8, obs['avgt'].max() + 8) ax[0].set_title(("%s-%s [%s] %s\n91 Day Average Temperatures" ) % (nt.sts[station]['archive_begin'].year, year + 3, station, nt.sts[station]['name'])) ax[0].set_ylabel(r"Trailing 91 Day Avg T $^{\circ}$F") ax[0].xaxis.set_major_formatter(mdates.DateFormatter('%b\n%Y')) ax[0].grid(True) # Label the maxes and mins for yr in range(year, year+3): if yr not in df.index: continue date = df.at[yr, 'winter_end'] val = df.at[yr, 'winter'] if date is not None: ax[0].text( date, val - 1, r"%s %.1f$^\circ$F" % (date.strftime("%-d %b"), val), ha='center', va='top', bbox=dict(color='white', boxstyle='square,pad=0') ) date = df.at[yr, 'summer_end'] val = df.at[yr, 'summer'] if date is not None: ax[0].text( date, val + 1, r"%s %.1f$^\circ$F" % (date.strftime("%-d %b"), val), ha='center', va='bottom', bbox=dict(color='white', boxstyle='square,pad=0') ) df2 = df.dropna() p2col = 'winter_end_doy' if season == 'spring' else 'summer_end_doy' slp, intercept, r, _, _ = stats.linregress(df2.index.values, df2[p2col].values) ax[1].scatter(df.index.values, df[p2col].values) ax[1].grid(True) # Do labelling yticks = [] yticklabels = [] for doy in range(int(df[p2col].min()), int(df[p2col].max())): date = datetime.date(2000, 1, 1) + datetime.timedelta(days=(doy - 1)) if date.day in [1, 15]: yticks.append(doy) yticklabels.append(date.strftime("%-d %b")) ax[1].set_yticks(yticks) ax[1].set_yticklabels(yticklabels) lbl = ("Date of Minimum (Spring Start)" if season == 'spring' else "Date of Maximum (Fall Start)") ax[1].set_ylabel(lbl) ax[1].set_xlim(df.index.min() - 1, df.index.max() + 1) avgv = df[p2col].mean() ax[1].axhline(avgv, color='r') ax[1].plot(df.index.values, intercept + (df.index.values * slp)) d = (datetime.date(2000, 1, 1) + datetime.timedelta(days=int(avgv))).strftime("%-d %b") ax[1].text(0.02, 0.02, r"$\frac{\Delta days}{decade} = %.2f,R^2=%.2f, avg = %s$" % ( slp * 10.0, r ** 2, d), va='bottom', transform=ax[1].transAxes) ax[1].set_ylim(bottom=(ax[1].get_ylim()[0] - 10)) p3col = 'spring_length' if season == 'spring' else 'fall_length' slp, intercept, r, _, _ = stats.linregress(df2.index.values, df2[p3col]) ax[2].scatter(df.index.values, df[p3col]) ax[2].set_xlim(df.index.min() - 1, df.index.max() + 1) ax[2].set_ylabel("Length of '%s' [days]" % (season.capitalize(),)) ax[2].grid(True) avgv = df[p3col].mean() ax[2].axhline(avgv, color='r') ax[2].plot(df.index.values, intercept + (df.index.values * slp)) ax[2].text(0.02, 0.02, r"$\frac{\Delta days}{decade} = %.2f,R^2=%.2f, avg = %.1fd$" % ( slp * 10.0, r ** 2, avgv), va='bottom', transform=ax[2].transAxes) ax[2].set_ylim(bottom=(ax[2].get_ylim()[0] - 15)) return fig, df
ff07233d7c716715f1b4a414f0e2066222439925
3,656,510
from .. import sim def connectCells(self): """ Function for/to <short description of `netpyne.network.conn.connectCells`> Parameters ---------- self : <type> <Short description of self> **Default:** *required* """ # Instantiate network connections based on the connectivity rules defined in params sim.timing('start', 'connectTime') if sim.rank==0: print('Making connections...') if sim.nhosts > 1: # Gather tags from all cells allCellTags = sim._gatherAllCellTags() else: allCellTags = {cell.gid: cell.tags for cell in self.cells} allPopTags = {-i: pop.tags for i,pop in enumerate(self.pops.values())} # gather tags from pops so can connect NetStim pops if self.params.subConnParams: # do not create NEURON objs until synapses are distributed based on subConnParams origCreateNEURONObj = bool(sim.cfg.createNEURONObj) origAddSynMechs = bool(sim.cfg.addSynMechs) sim.cfg.createNEURONObj = False sim.cfg.addSynMechs = False gapJunctions = False # assume no gap junctions by default for connParamLabel,connParamTemp in self.params.connParams.items(): # for each conn rule or parameter set connParam = connParamTemp.copy() connParam['label'] = connParamLabel # find pre and post cells that match conditions preCellsTags, postCellsTags = self._findPrePostCellsCondition(allCellTags, connParam['preConds'], connParam['postConds']) # if conn function not specified, select based on params if 'connFunc' not in connParam: if 'probability' in connParam: connParam['connFunc'] = 'probConn' # probability based func elif 'convergence' in connParam: connParam['connFunc'] = 'convConn' # convergence function elif 'divergence' in connParam: connParam['connFunc'] = 'divConn' # divergence function elif 'connList' in connParam: connParam['connFunc'] = 'fromListConn' # from list function else: connParam['connFunc'] = 'fullConn' # convergence function connFunc = getattr(self, connParam['connFunc']) # get function name from params # process string-based funcs and call conn function if preCellsTags and postCellsTags: # initialize randomizer in case used in string-based function (see issue #89 for more details) self.rand.Random123(sim.hashStr('conn_'+connParam['connFunc']), sim.hashList(sorted(preCellsTags)+sorted(postCellsTags)), sim.cfg.seeds['conn']) self._connStrToFunc(preCellsTags, postCellsTags, connParam) # convert strings to functions (for the delay, and probability params) connFunc(preCellsTags, postCellsTags, connParam) # call specific conn function # check if gap junctions in any of the conn rules if not gapJunctions and 'gapJunction' in connParam: gapJunctions = True if sim.cfg.printSynsAfterRule: nodeSynapses = sum([len(cell.conns) for cell in sim.net.cells]) print((' Number of synaptic contacts on node %i after conn rule %s: %i ' % (sim.rank, connParamLabel, nodeSynapses))) # add presynaptoc gap junctions if gapJunctions: # distribute info on presyn gap junctions across nodes if not getattr(sim.net, 'preGapJunctions', False): sim.net.preGapJunctions = [] # if doesn't exist, create list to store presynaptic cell gap junctions data = [sim.net.preGapJunctions]*sim.nhosts # send cells data to other nodes data[sim.rank] = None gather = sim.pc.py_alltoall(data) # collect cells data from other nodes (required to generate connections) sim.pc.barrier() for dataNode in gather: if dataNode: sim.net.preGapJunctions.extend(dataNode) # add gap junctions of presynaptic cells (need to do separately because could be in different ranks) for preGapParams in getattr(sim.net, 'preGapJunctions', []): if preGapParams['gid'] in self.gid2lid: # only cells in this rank cell = self.cells[self.gid2lid[preGapParams['gid']]] cell.addConn(preGapParams) # apply subcellular connectivity params (distribution of synaspes) if self.params.subConnParams: self.subcellularConn(allCellTags, allPopTags) sim.cfg.createNEURONObj = origCreateNEURONObj # set to original value sim.cfg.addSynMechs = origAddSynMechs # set to original value cellsUpdate = [c for c in sim.net.cells if c.tags['cellModel'] not in ['NetStim', 'VecStim']] if sim.cfg.createNEURONObj: for cell in cellsUpdate: # Add synMechs, stim and conn NEURON objects cell.addStimsNEURONObj() #cell.addSynMechsNEURONObj() cell.addConnsNEURONObj() nodeSynapses = sum([len(cell.conns) for cell in sim.net.cells]) if sim.cfg.createPyStruct: nodeConnections = sum([len(set([conn['preGid'] for conn in cell.conns])) for cell in sim.net.cells]) else: nodeConnections = nodeSynapses print((' Number of connections on node %i: %i ' % (sim.rank, nodeConnections))) if nodeSynapses != nodeConnections: print((' Number of synaptic contacts on node %i: %i ' % (sim.rank, nodeSynapses))) sim.pc.barrier() sim.timing('stop', 'connectTime') if sim.rank == 0 and sim.cfg.timing: print((' Done; cell connection time = %0.2f s.' % sim.timingData['connectTime'])) return [cell.conns for cell in self.cells]
8f037f2ae6dbf8aab68c12fbbedcf71dd3ca6b31
3,656,511