content
stringlengths
22
815k
id
int64
0
4.91M
def func1(): """Generic short description.""" pass
5,354,400
def test_write(size, iterations, exclude_formats, test_compress): """ Test writting for one file Args: size: size of the file to test (0: small, 1: mediumn, 2: big) iterations: number of times to run the test exclude_formats: formats to exclude in this test test_compress: if True it will try all compressions Returns: dictionary with out """ out = {} df = pd.read_csv(f"{PATH_DATA}{FILES[size]}.csv") for extension, func in tqdm(FUNCS["write"].items(), desc=f"{'write':10}", leave=True): # Skip this extension if extension in exclude_formats: continue if not test_compress or extension not in COMPRESSIONS: args = [df, f"{PATH_DATA}data.{extension}"] out[extension] = iterate_one_test(iterations, extension, func, args, {}) # Try all compressions else: if extension not in COMPRESSIONS: continue # Get name of compression parameter and list of extensions comp_list = COMPRESSIONS[extension]["list"] comp_param_name = COMPRESSIONS[extension]["param_name"] for comp in tqdm(comp_list, desc=f"{extension:10}", leave=True): name = f"{extension}_{str(comp)}" out[name] = iterate_one_test( iterations, extension=name, func=func, args=[df, f"{PATH_DATA}data.{extension}_{comp}"], kwargs={comp_param_name: comp}, ) return out
5,354,401
def get_game_server_group(game_server_group_arn: Optional[str] = None, opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetGameServerGroupResult: """ The AWS::GameLift::GameServerGroup resource creates an Amazon GameLift (GameLift) GameServerGroup. :param str game_server_group_arn: A generated unique ID for the game server group. """ __args__ = dict() __args__['gameServerGroupArn'] = game_server_group_arn if opts is None: opts = pulumi.InvokeOptions() if opts.version is None: opts.version = _utilities.get_version() __ret__ = pulumi.runtime.invoke('aws-native:gamelift:getGameServerGroup', __args__, opts=opts, typ=GetGameServerGroupResult).value return AwaitableGetGameServerGroupResult( auto_scaling_group_arn=__ret__.auto_scaling_group_arn, auto_scaling_policy=__ret__.auto_scaling_policy, balancing_strategy=__ret__.balancing_strategy, game_server_group_arn=__ret__.game_server_group_arn, game_server_group_name=__ret__.game_server_group_name, game_server_protection_policy=__ret__.game_server_protection_policy, instance_definitions=__ret__.instance_definitions, launch_template=__ret__.launch_template, max_size=__ret__.max_size, min_size=__ret__.min_size, role_arn=__ret__.role_arn, tags=__ret__.tags, vpc_subnets=__ret__.vpc_subnets)
5,354,402
def create_wcscorr(descrip=False, numrows=1, padding=0): """ Return the basic definitions for a WCSCORR table. The dtype definitions for the string columns are set to the maximum allowed so that all new elements will have the same max size which will be automatically truncated to this limit upon updating (if needed). The table is initialized with rows corresponding to the OPUS solution for all the 'SCI' extensions. """ trows = numrows + padding c1 = pyfits.Column(name='WCS_ID',format='24A',array=np.array(['OPUS']*numrows+['']*padding,dtype="S24")) c2 = pyfits.Column(name='EXTVER',format='I',array=np.array(list(range(1,numrows+1)),dtype=np.int16)) c3 = pyfits.Column(name='CRVAL1',format='D',array=np.array([0.0]*trows,dtype=np.float64)) c4 = pyfits.Column(name='CRVAL2',format='D',array=np.array([0.0]*trows,dtype=np.float64)) c5 = pyfits.Column(name='CD1_1',format='D',array=np.array([0.0]*trows,dtype=np.float64)) c6 = pyfits.Column(name='CD1_2',format='D',array=np.array([0.0]*trows,dtype=np.float64)) c7 = pyfits.Column(name='CD2_1',format='D',array=np.array([0.0]*trows,dtype=np.float64)) c8 = pyfits.Column(name='CD2_2',format='D',array=np.array([0.0]*trows,dtype=np.float64)) c9 = pyfits.Column(name='ORIENTAT',format='D',array=np.array([0.0]*trows,dtype=np.float64)) c10 = pyfits.Column(name='PA_V3',format='D',array=np.array([0.0]*trows,dtype=np.float64)) c11 = pyfits.Column(name='Delta_RA',format='D',array=np.array([0.0]*trows,dtype=np.float64)) c12 = pyfits.Column(name='Delta_Dec',format='D',array=np.array([0.0]*trows,dtype=np.float64)) c13 = pyfits.Column(name='RMS_RA',format='D',array=np.array([0.0]*trows,dtype=np.float64)) c14 = pyfits.Column(name='RMS_Dec',format='D',array=np.array([0.0]*trows,dtype=np.float64)) c15 = pyfits.Column(name='Delta_Orientat',format='D',array=np.array([0.0]*trows,dtype=np.float64)) c16 = pyfits.Column(name='Delta_Scale',format='D',array=np.array([1.0]*trows,dtype=np.float64)) c17 = pyfits.Column(name='NMatch',format='J',array=np.array([0]*trows,dtype=np.int32)) c18 = pyfits.Column(name='Catalog',format='40A',array=np.array([''],dtype="S40")) if descrip: c19 = pyfits.Column(name='Descrip',format='128A',array=np.array(['Original WCS computed by OPUS']*numrows,dtype="S128")) cdefs = pyfits.ColDefs([c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18,c19]) else: cdefs = pyfits.ColDefs([c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16,c17,c18]) return pyfits.new_table(cdefs,nrows=trows)
5,354,403
def get_token(): """ Acquire an OAuth token for Koha returns: OAuth token (string) """ data = { "client_id": config['client_id'], "client_secret": config['client_secret'], "grant_type": "client_credentials", } response = requests.post(config['api_root'] + '/oauth/token', data=data, verify=False) token = str(response.json()['access_token']) return token
5,354,404
def build_step(incr: bool, cfg: RunConfig, repo: Repo, time_dict: dict) -> List[str]: """ Build jar for multiple versions of a repo :param incr: incremental build or not :param cfg: all configurations read from *.toml :param repo: a GitPython git.Repo object :param time_dict: recording running time :return: the list of commits (sha1 strings) """ outdirs: DirsTuple = cfg.out_dirs check_dir(str(outdirs.jar), make_if_not=True) commit_list: List[str] = interp_rev_range(repo, cfg.rev_range) commit_list_repr = '\n'.join(commit_list) logger.debug(f"Commits to be processed are:\n{commit_list_repr}") start_time: float = time.time() if incr: logger.debug("Build mode: Incremental build") step_compile(outdirs.jar, repo, commit_list, cfg.njobs, True) else: # default logger.debug("Build mode: Non-incremental build") step_compile(outdirs.jar, repo, commit_list, cfg.njobs, False) time_dict["compile"] = time.time() - start_time return commit_list
5,354,405
def fetch_hs_races(race_store): """ :param race_store: a store of races we currently have :return: void """ parser = GopherStateHSRaceInfoParser() content = get_gopher_state_content(HS_RACE_INFO_PAGE) if not content: print("Warning: skipping fetch for all gopher state highschool events!") return parser.feed(content) for ix, event_info in enumerate(parser.get_race_infos()): process_hs_event(event_info, parser.get_post_ids()[ix])
5,354,406
def isfile(value): """Validate that the value is an existing file.""" return vol.IsFile('not a file')(value)
5,354,407
def is_in_form(dg: "streamlit.delta_generator.DeltaGenerator") -> bool: """True if the DeltaGenerator is inside an st.form block.""" return current_form_id(dg) != ""
5,354,408
def convert_rating() -> None: """ Creates a file with ratings of type for each user and item from the user-item matrix. """ logger.info('Reading user-item matrix.') # Load user-item matrix user_item_matrix = get_user_item_matrix() user_item_matrix_np = user_item_matrix.to_numpy() item_set = set(news_id2index.keys()) user_pos_ratings = dict() for user_idx in range(user_item_matrix_np.shape[0]): rated_news_indices = np.where(user_item_matrix_np[user_idx]==1)[0] if user_idx not in user_pos_ratings: user_pos_ratings[user_idx] = set() user_pos_ratings[user_idx].update(list(rated_news_indices)) logger.info('Converting rating file.') writer = open(FILENAME_RATINGS_FINAL_TXT, 'w', encoding='utf-8') user_cnt = 0 for user_idx, pos_item_set in user_pos_ratings.items(): user_cnt += 1 for item in pos_item_set: writer.write('%d\t%d\t1\n' % (user_idx, item)) # There are no negative ratings, only unread news unread_set = item_set - pos_item_set for item in np.random.choice(list(unread_set), size=len(pos_item_set), replace=False): writer.write('%d\t%d\t0\n' % (user_idx, item)) writer.close() logger.info(f'Number of users: {user_cnt}.') logger.info(f'Number of items: {len(item_set)}.\n')
5,354,409
def fetch(opts): """ support fetching from scp sources With provided fetch options (``RelengFetchOptions``), the fetch stage will be processed. Args: opts: fetch options Returns: ``True`` if the fetch stage is completed; ``False`` otherwise """ assert opts cache_file = opts.cache_file name = opts.name site = opts.site work_dir = opts.work_dir if not SCP.exists(): err('unable to fetch package; scp is not installed') return None note('fetching {}...', name) sys.stdout.flush() if not SCP.execute(['-o', 'BatchMode yes', site, cache_file], cwd=work_dir): err('unable to secure-copied file from target') return None log('successfully secure-copied file from target') return cache_file
5,354,410
def install_from_deb(deb_path,additional_options): """ Installs package with dpkg command using -i options and some extra options, if needed Raises an exception on non-zero exit code Input: apt file path, additional optons Output: Combined stdout and stderror """ return run_shell_command("dpkg -i "+additional_options+" "+deb_path)
5,354,411
def associate_phone_number_with_user(AccountId=None, UserId=None, E164PhoneNumber=None): """ Associates a phone number with the specified Amazon Chime user. See also: AWS API Documentation Exceptions :example: response = client.associate_phone_number_with_user( AccountId='string', UserId='string', E164PhoneNumber='string' ) :type AccountId: string :param AccountId: [REQUIRED]\nThe Amazon Chime account ID.\n :type UserId: string :param UserId: [REQUIRED]\nThe user ID.\n :type E164PhoneNumber: string :param E164PhoneNumber: [REQUIRED]\nThe phone number, in E.164 format.\n :rtype: dict ReturnsResponse Syntax {} Response Structure (dict) -- Exceptions Chime.Client.exceptions.UnauthorizedClientException Chime.Client.exceptions.NotFoundException Chime.Client.exceptions.ForbiddenException Chime.Client.exceptions.BadRequestException Chime.Client.exceptions.AccessDeniedException Chime.Client.exceptions.ThrottledClientException Chime.Client.exceptions.ServiceUnavailableException Chime.Client.exceptions.ServiceFailureException :return: {} :returns: (dict) -- """ pass
5,354,412
def sign_award(award: Award) -> FlexSendMessage: """Sign Award Result Args: award (Award): Award Object Returns: FlexSendMessage: Flex Message """ tz = pytz.timezone("Asia/Taipei") now = datetime.now(tz=tz) now_text = now.strftime("%Y/%m/%d %H:%M:%S") with open("line/flex_message_template/sign_award.json") as json_file: contents = json.load(json_file) contents["hero"]["url"] = award.icon contents["body"]["contents"][1]["contents"][1][ "text" ] = f"{award.name} * {award.count}" contents["body"]["contents"][3]["contents"][1]["text"] = now_text message = FlexSendMessage(alt_text=f"簽到成功!", contents=contents) return message
5,354,413
def create_l5_block(block_id: str) -> l5_block_model.L5BlockModel: """ Creates unfinalized L5 block that needs confirmation """ l5_block = l5_block_model.L5BlockModel( dc_id=keys.get_public_id(), current_ddss=party.get_address_ddss(ADDRESS), # Get DDSS from party, cached hourly block_id=str(block_id), timestamp=str(math.floor(time.time())), prev_proof="", scheme=PROOF_SCHEME, l4_blocks=get_pending_l4_blocks(block_id), ) return l5_block
5,354,414
def gridgen(xbry: List, ybry: List, beta: List, shape: Tuple, ul_idx=0, focus=None, proj=None, nnodes=14, precision=1.0e-12, nppe=3, newton=True, thin=True, checksimplepoly=True, verbose=False): """ External wrapping function to call Gridgen grid builder. xbry, ybry - nodes coordinates of grid boundary beta - vertex type shape - tuple of grid shape (eta, xi) """ # Prepare the Gridgen object. gn = Gridgen(xbry, ybry, beta, shape, ul_idx=ul_idx, focus=focus, proj=None, nnodes=nnodes, precision=precision, nppe=nppe, newton=newton, thin=thin, checksimplepoly=checksimplepoly, verbose=verbose) # Generate the C-Grid. if proj is not None: lon_vert, lat_vert = proj(gn.x, gn.y, inverse=True) grd = CGridGeo(lon_vert, lat_vert, proj) else: grd = CGrid(gn.x, gn.y) # Attach the Gridgen object to grid. grd.Gridgen = gn print('Grid construction complete.') return grd
5,354,415
def get_output(): """Gets the current global output stream""" global OUTPUT return OUTPUT
5,354,416
def load_pascal_annotation(index, pascal_root): """ This code is borrowed from Ross Girshick's FAST-RCNN code (https://github.com/rbgirshick/fast-rcnn). It parses the PASCAL .xml metadata files. See publication for further details: (http://arxiv.org/abs/1504.08083). Thanks Ross! """ classes = ('__background__', # always index 0 'aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor') class_to_ind = dict(zip(classes, xrange(21))) filename = osp.join(pascal_root, 'Annotations', index + '.xml') # print 'Loading: {}'.format(filename) def get_data_from_tag(node, tag): return node.getElementsByTagName(tag)[0].childNodes[0].data with open(filename) as f: data = minidom.parseString(f.read()) objs = data.getElementsByTagName('object') num_objs = len(objs) boxes = np.zeros((num_objs, 4), dtype=np.uint16) gt_classes = np.zeros((num_objs), dtype=np.int32) overlaps = np.zeros((num_objs, 21), dtype=np.float32) # Load object bounding boxes into a data frame. for ix, obj in enumerate(objs): # Make pixel indexes 0-based x1 = float(get_data_from_tag(obj, 'xmin')) - 1 y1 = float(get_data_from_tag(obj, 'ymin')) - 1 x2 = float(get_data_from_tag(obj, 'xmax')) - 1 y2 = float(get_data_from_tag(obj, 'ymax')) - 1 cls = class_to_ind[ str(get_data_from_tag(obj, "name")).lower().strip()] boxes[ix, :] = [x1, y1, x2, y2] gt_classes[ix] = cls overlaps[ix, cls] = 1.0 overlaps = scipy.sparse.csr_matrix(overlaps) return {'boxes': boxes, 'gt_classes': gt_classes, 'gt_overlaps': overlaps, 'flipped': False, 'index': index}
5,354,417
def run_pca( X_train, y_train, mean_widget, std_widget, x_widget, labels_map=labels_map, labels_inv_map=labels_inv_map, ): """Runs PCA on the passed data based on the defined parameters and returns a pandas Dataframe. Consider the PCA is always fitted on the whole dataset X_train and the returned Dataframe isdependable on the values from the x_widget object. Parameters ========== X_train : numpy.ndarray Data matrix to run PCA on it y_train : numpy.ndarray Ground truth vector with integer class labels mean_widget : ipywidgets.widgets.widget_bool.Checkbox Widgets that indicates to center the data before scaling std_widget : ipywidgets.widgets.widget_bool.Checkbox Widget that indicates to scale the data to unit variance x_widget : ipywidgets.widgets.widget_selection.SelectMultiple Widget that defines, which data observation is returned, based on the containing labels in the widget object labels_map : dict Dictionary that maps from plant species representation to integer class represention. labels_inv_map : dict Dictionary that maps from integer class represention to plant species representation. Returns ======= pc_df : pandas.DataFrame Data matrix with 4 PCA-Components and the regarding label entry as 'Species' in plant species representation . """ ss = StandardScaler(with_mean=mean_widget.value, with_std=std_widget.value) train_data = ss.fit_transform(X_train) pca = decomposition.PCA(n_components=4) _ = pca.fit_transform(train_data) chosen_labels = np.array([labels_map.get(name) for name in x_widget.value]) ix_true = np.argwhere(np.in1d(y_train, chosen_labels)).flatten() pc = pca.transform(X_train[ix_true, ...]) pc_df = pd.DataFrame(data=pc, columns=["PC1", "PC2", "PC3", "PC4"]) pc_df["Species"] = np.array( [labels_inv_map.get(label_nr) for label_nr in y_train[ix_true]] ) return pc_df
5,354,418
def record_nonempty( entries: MutableMapping, entry_lines: Sequence[str], moment: TimeSwitcher ): """ insert timestamped sequence into dictionary iff the sequence has anything in it """ if len(entry_lines) < 1: return entries[moment.times[-2]] = entry_lines
5,354,419
def cleanup_dir(dir_path=WORKING_DIR): """ A function decorator that cleans up file directory before executing. """ def rm_content(dir_path): rm_count = 0 for filename in os.listdir(dir_path): filepath = os.path.join(dir_path, filename) if os.path.isfile(filepath) or os.path.islink(filepath): os.remove(filepath) else: shutil.rmtree(filepath) rm_count += 1 logger.info(f'removed {rm_count} file/directories from {dir_path}') def inner(f): @wraps(f) def dir_cleanup_wrapper(*args, **kwargs): rm_content(dir_path) result = f(*args, **kwargs) return result return dir_cleanup_wrapper return inner
5,354,420
def test_table(): """ Tests creating table without parent. :return: None. """ a = BbnNode(Variable(0, 'a', ['on', 'off']), [0.5, 0.5]) table = Table(a) assert not table.has_parents() assert_almost_equal(table.probs, np.array([0.5, 1.0])) assert 'on' == table.get_value(0.4) assert 'off' == table.get_value(0.6)
5,354,421
def validate_url(url): """ Validates the URL :param url: :return: """ if validators.url(url): return url elif validators.domain(url): return "http://{}".format(url) return ""
5,354,422
def test_companies_details_unrelated(user: User, other_company: Company): """ Company details can be viewed by an unrelated user (non-employee), but only basic information is returned. """ client = APIClient() client.force_authenticate(user) resp = client.get(client.reverse('company-detail', pk=other_company.pk)) validate_jsonapi_detail_response( resp, expected_attributes=ATTRIBUTES_PUBLIC, expected_relationships=RELATIONSHIPS_PUBLIC, )
5,354,423
def map_class_to_id(classes): """ Get a 1-indexed id for each class given as an argument Note that for MASATI, len(classes) == 1 when only considering boats Args: classes (list): A list of classes present in the dataset Returns: dict[str, int] """ class_ids = list(range(1, len(classes) + 1)) return dict(zip(classes, class_ids))
5,354,424
def run_directory(tmp_path_factory): """ Prepare mock directory structure for run directory of code. """ tmp_path = tmp_path_factory.mktemp('output') for path in OUTPUT_PATHS: if os.path.dirname(path): os.makedirs(tmp_path / os.path.dirname(path), exist_ok=True) with open(tmp_path / path, 'w') as handle: handle.write("Test content") yield tmp_path
5,354,425
def setup_py_main(): """Main function for setup script.""" if sys.stdin.isatty() and sys.stdout.isatty() and sys.stderr.isatty(): if os.getpgrp() == os.tcgetpgrp(sys.stdout.fileno()): if os.geteuid() == 0: main() terminate() else: print("[!] Run me as superuser (a.k.a. root)") sys.exit(2) else: sys.exit(2) else: sys.exit(2)
5,354,426
def vec2text(vector): """ vector to captcha text :param vector: np array :return: text """ if not isinstance(vector, np.ndarray): vector = np.asarray(vector) vector = np.reshape(vector, [CAPTCHA_LENGTH, -1]) text = '' for item in vector: text += CAPTCHA_LIST[np.argmax(item)] return text
5,354,427
def get_data_nasdaq_fall(specified_value): """ :param specified_value: the number of datapoints to fetch from the backend :param collection: specify which collection to be fetched :return: list of dictionaries """ data_points = NasdaqAsc.objects.order_by('difference_close') data_points = data_points[:specified_value] return data_points
5,354,428
def download(ctx): """Download code of the current project.""" user, project_name = get_project_or_local(ctx.obj.get('project')) try: PolyaxonClient().project.download_repo(user, project_name) except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e: Printer.print_error('Could not download code for project `{}`.'.format(project_name)) Printer.print_error('Error message `{}`.'.format(e)) sys.exit(1) Printer.print_success('Files downloaded.')
5,354,429
def check_qe_completed(folder,prefix,output_file,calc_type='pw'): """ Check if qe calculation has correctly completed. - folder: where the calculation has been run. - prefix: qe prefix - output_file: name of output file - calc_type: either 'pw' or 'ph' or 'gkkp' """ status = True # If save folder does not exist, return False (= NOT completed) immediately if calc_type=='pw' and not os.path.isdir('%s/%s.save'%(folder,prefix)): status = False return status elif calc_type=='ph' and not os.path.isdir('%s/_ph0'%folder): status = False return status elif calc_type=='gkkp' and not os.path.isdir('%s/elph_dir'%folder): status = False return status if calc_type != 'pw' and calc_type != 'ph' and calc_type != 'gkkp': raise ValueError("calc_type not recognised: it has to be either 'pw' or 'ph' or 'gkkp'.") # Next, check if output is correctly completed try: check = subprocess.check_output("grep JOB %s/%s*"%(folder,output_file), shell=True, stderr=subprocess.STDOUT) check = check.decode('utf-8') check = check.strip().split()[-1] except subprocess.CalledProcessError as e: check = "" if check != "DONE.": status = False return status
5,354,430
def require_openssl(required_version): """ This function checks that the required version of OpenSSL is present, and skips the test if not. Use it as a test function decorator: @require_openssl("2.3.4") def test_something(): ... :param required_version: minimal required version as a string: "1.2.3" """ def versiontuple(v): clean_v = re.sub(r"[^\d\.]", "", v) return tuple(map(int, (clean_v.split(".")))) try: command_output = subprocess.check_output(["openssl", "version"]) except OSError: return pytest.mark.skip("openssl command is not available in test environment") else: if not command_output: raise Exception("Could not get openssl version") openssl_version = str(command_output.split()[1]) return pytest.mark.skipif( versiontuple(openssl_version) < versiontuple(required_version), reason=f"openssl v{openssl_version} is less than required version {required_version}")
5,354,431
def login_required(f): """页面要求登录装饰器""" @wraps(f) def decorated_function(*args, **kwargs): if not g.signin: nu = get_redirect_url() if nu and ( nu.startswith("/") or nu.startswith(request.url_root) ): return redirect(url_for('front.login', next=nu)) else: return redirect(url_for('front.login')) return f(*args, **kwargs) return decorated_function
5,354,432
def get_agent_type(opt): """ Returns the type of model agent, specified by --model and --model_file. """ model_file = opt['model_file'] optfile = model_file + '.opt' if isfile(optfile): new_opt = _load_opt_file(optfile) if 'batchindex' in new_opt: del new_opt['batchindex'] if opt.get('override'): for k, v in opt['override'].items(): if str(v) != str(new_opt.get(k, None)): print( "[ warning: overriding opt['{}'] to {} (" "previously: {} )]".format( k, v, new_opt.get(k, None))) new_opt[k] = v for k, v in opt.items(): if k not in new_opt: new_opt[k] = v new_opt['model_file'] = model_file if (new_opt.get('dict_file') and not isfile(new_opt['dict_file'])): raise RuntimeError( 'WARNING: Dict file does not exist, check ' 'to make sure it is correct: {}'.format( new_opt['dict_file'])) model_class = get_agent_module(new_opt['model']) return model_class else: return None
5,354,433
def update_col(col, collation=None, mssql_from=True, mssql_to=True): """Updates the default value, type and collation of the specified column.""" # - Update the default value update_col_default(col, mssql_from=mssql_from, mssql_to=mssql_to) # - Update the type update_col_type(col, mssql_from=mssql_from, mssql_to=mssql_to) # - Update the collation update_col_collation(col, collation=collation)
5,354,434
def create_markup_map(name: str, df: pd.core.frame.DataFrame): """Place a markup for each point with a valid housing price evaluation and position """ map_ = folium.Map( location=france_location, zoom_start=3, control_scale=True, tiles="openstreetmap", ) mcg = folium.plugins.MarkerCluster(control=False) map_.add_child(mcg) houses = folium.plugins.FeatureGroupSubGroup(mcg, "houses") appartements = folium.plugins.FeatureGroupSubGroup(mcg, "appartements") others = folium.plugins.FeatureGroupSubGroup(mcg, "others") map_.add_child(houses) map_.add_child(appartements) map_.add_child(others) for _, row in df.iterrows(): housing_type = row["Type local"] if housing_type == "Maison": color = "darkgreen" icon = "home" context = houses elif housing_type == "Appartement": color = "red" icon = "pause" context = appartements else: color = "black" icon = "info-sign" context = others price = int(row["Valeur fonciere"]) address = get_address_from_row(row) context.add_child( folium.Marker( (row["lat"], row["lon"]), popup=folium.Popup( f"{housing_type}</br> {address} <b>{price}€</b>", # Not working properly max_width="400px", min_width="200px", ), tooltip=housing_type, icon=folium.Icon(color=color, icon=icon), ) ) map_.add_child( plugins.Fullscreen( position="topright", title="Expand me", title_cancel="Exit me", force_separate_button=True, ) ) map_.add_child(folium.LayerControl(collapsed=False)) map_.add_child(plugins.MeasureControl()) map_.add_child(plugins.MiniMap()) save_map(map_, name)
5,354,435
def write_sequential_results_to_csv(results, opts): """ :param results: SequentialResults :param opts: :return: """ prefix = opts.get_alad_metrics_name_prefix() num_seen_file = os.path.join(opts.resultsdir, "%s-num_seen.csv" % (prefix,)) baseline_file = os.path.join(opts.resultsdir, "%s-baseline.csv" % (prefix,)) stream_window_file = os.path.join(opts.resultsdir, "%s-window.csv" % (prefix,)) stream_window_baseline_file = os.path.join(opts.resultsdir, "%s-window-baseline.csv" % (prefix,)) queried_idxs_file = os.path.join(opts.resultsdir, "%s-queried.csv" % (prefix,)) queried_idxs_baseline_file = os.path.join(opts.resultsdir, "%s-queried-baseline.csv" % (prefix,)) aucs_file = os.path.join(opts.resultsdir, "%s-aucs.csv" % (prefix,)) if results.num_seen is not None: np.savetxt(num_seen_file, results.num_seen, fmt='%d', delimiter=',') if results.num_seen_baseline is not None: np.savetxt(baseline_file, results.num_seen_baseline, fmt='%d', delimiter=',') if results.true_queried_indexes is not None: np.savetxt(queried_idxs_file, results.true_queried_indexes, fmt='%d', delimiter=',') if results.true_queried_indexes_baseline is not None: np.savetxt(queried_idxs_baseline_file, results.true_queried_indexes_baseline, fmt='%d', delimiter=',') if results.stream_window is not None: np.savetxt(stream_window_file, results.stream_window, fmt='%d', delimiter=',') if results.stream_window_baseline is not None: np.savetxt(stream_window_baseline_file, results.stream_window_baseline, fmt='%d', delimiter=',') if results.aucs is not None: np.savetxt(aucs_file, results.aucs, fmt='%f', delimiter=',')
5,354,436
def is_sparse_or_ragged_tensor_value(tensor: Any) -> bool: """Returns true if sparse or ragged tensor.""" return (isinstance(tensor, types.SparseTensorValue) or isinstance(tensor, types.RaggedTensorValue) or isinstance(tensor, tf.compat.v1.SparseTensorValue))
5,354,437
def _is_file_not_empty(file_path): """Return True when buildinfo file is not empty""" # NOTE: we can assume, that when file exists, all # content have been dowloaded to the directory. return os.path.getsize(file_path) > 0
5,354,438
def arcsin(tensor): """Returns the element-wise inverse sine of the tensor""" return TensorBox(tensor).arcsin(wrap_output=False)
5,354,439
def get_one_pokemon(id: hug.types.number): """Affichage d'un pokemon de la base de donnees""" cursor.execute("""SELECT * FROM pokemon WHERE id=%s """, [id]) row = cursor.fetchone() conn.commit() conn.close() return row
5,354,440
def check_file_exists(filename): """Try to open the file `filename` and return True if it's valid """ return os.path.exists(filename)
5,354,441
def set_loop_header_loop(context: X12ParserContext, segment_data: Dict) -> None: """ Resets the loop context to the Eligibility Loop 2120C/D (Subscriber and Dependent) The LS segment precedes the Subscriber/Dependent Benefit Related Entity Name :param context: The X12Parsing context which contains the current loop and transaction record. :param segment_data: The current segment's data """ eligibility = _get_eligibility(context) if _is_subscriber_patient(context): loop_name = TransactionLoops.SUBSCRIBER_ELIGIBILITY else: loop_name = TransactionLoops.DEPENDENT_ELIGIBILITY context.set_loop_context(loop_name, eligibility)
5,354,442
def shift_fft(input_img, shift_val, method="fft"): """Do shift using FFTs Shift an array like scipy.ndimage.interpolation.shift(input, shift, mode="wrap", order="infinity") but faster :param input_img: 2d numpy array :param shift_val: 2-tuple of float :return: shifted image """ if method == "fft": d0, d1 = input_img.shape v0, v1 = shift_val f0 = numpy.fft.ifftshift(numpy.arange(-d0 // 2, d0 // 2)) f1 = numpy.fft.ifftshift(numpy.arange(-d1 // 2, d1 // 2)) m1, m0 = numpy.meshgrid(f1, f0) e0 = numpy.exp(-2j * numpy.pi * v0 * m0 / float(d0)) e1 = numpy.exp(-2j * numpy.pi * v1 * m1 / float(d1)) e = e0 * e1 out = abs(numpy.fft.ifft2(numpy.fft.fft2(input_img) * e)) else: out = scipy.ndimage.interpolation.shift(input, shift, mode="wrap", order="infinity") return out
5,354,443
def get_output_detections_image_file_path(input_file_path, suffix="--detections"): """Get the appropriate output image path for a given image input. Effectively appends "--detections" to the original image file and places it within the same directory. Parameters ----------- input_file_path: str Path to input image. suffix: str Suffix appended to the file. Default: "--detections" Returns ------- str Full path for detections output image. """ input_file_path = input_file_path.replace('--original.', '.') input_file_paths = input_file_path.split('.') input_file_paths[-2] = input_file_paths[-2]+suffix return '.'.join(input_file_paths)
5,354,444
def plot_weather(wr_date, full=False, fname=None): """Plot a weather radar image. """ if isinstance(wr_date, str): wr_date = datetime.strptime(wr_date, '%Y%m%dT%H%M') # Load the weather radar image wr_before, wr_after = workflow.find_closest_weather_radar_files(wr_date) if wr_before != wr_after: logging.warning('Found two different radar images near %s. Interpolating', wr_date) wr_before = nimrod.Nimrod.from_netcdf(wr_before) wr_after = nimrod.Nimrod.from_netcdf(wr_after) # wr = nimrod.Nimrod.interp_radar(wr_before, wr_after, wr_date) wr = wr_after if not full: # Clip image to target region lon_bounds = (config.REGION['lon_min'], config.REGION['lon_max']) lat_bounds = (config.REGION['lat_min'], config.REGION['lat_max']) wr.clip(lon_bounds, lat_bounds) fig, _ = plot_wr(wr) if fname: fig.savefig(fname, bbox_inches='tight') else: plt.show() plt.close()
5,354,445
def split_errorSC(tr, t1, t2, q, Emat, maxdt, ddt, dphi): """ Calculate error bars based on a F-test and a given confidence interval q Parameters ---------- tr : :class:`~obspy.core.Trace` Seismogram t1 : :class:`~obspy.core.utcdatetime.UTCDateTime` Start time of picking window t2 : :class:`~obspy.core.utcdatetime.UTCDateTime` End time of picking window q : float Confidence level Emat : :class:`~numpy.ndarray` Energy minimization matrix Returns ------- err_dtt : float Error in dt estimate (sec) err_phi : float Error in phi estimate (degrees) err_contour : :class:`~numpy.ndarray` Error contour for plotting """ from scipy import stats # Bounds on search phi = np.arange(-90.0, 90.0, dphi)*np.pi/180. dtt = np.arange(0., maxdt, ddt) # Copy trace to avoid overriding tr_tmp = tr.copy() tr_tmp.trim(t1, t2) # Get degrees of freedom dof = split_dof(tr_tmp) if dof < 3: dof = 3 print( "Degrees of freedom < 3. Fixing to DOF = 3, which may " + "result in accurate errors") n_par = 2 # Error contour vmin = Emat.min() vmax = Emat.max() err_contour = vmin*(1. + n_par/(dof - n_par) * stats.f.ppf(1. - q, n_par, dof - n_par)) # Estimate uncertainty (q confidence interval) err = np.where(Emat < err_contour) if len(err) == 0: return False, False, False err_phi = max( 0.25*(phi[max(err[0])] - phi[min(err[0])])*180./np.pi, 0.25*dphi) err_dtt = max(0.25*(dtt[max(err[1])] - dtt[min(err[1])]), 0.25*ddt) return err_dtt, err_phi, err_contour
5,354,446
def main(): """Build the same model using pybinding and kwant and verify that the results are identical""" width, length = 15, 15 electron_energy = 0.25 barrier_heights = np.linspace(0, 0.5, 100) with pb.utils.timed("pybinding:"): pb_transmission = measure_pybinding(width, length, electron_energy, barrier_heights) with pb.utils.timed("kwant:"): kwant_transmission = measure_kwant(width, length, electron_energy, barrier_heights) plt.plot(barrier_heights, pb_transmission, label="pybinding") plt.plot(barrier_heights, kwant_transmission, ls="--", label="kwant") plt.ylabel("transmission") plt.xlabel("barrier height (eV)") plt.axvline(electron_energy, 0, 0.5, color="gray", ls=":") plt.annotate("electron energy\n{} eV".format(electron_energy), (electron_energy, 0.52), xycoords=("data", "axes fraction"), ha="center") pb.pltutils.despine() pb.pltutils.legend() plt.show()
5,354,447
def test_add_via_stack(): """ Unit test definition """ spec_file = 'bpg_test_suite/specs/add_via_stack.yaml' plm = BPG.PhotonicLayoutManager(spec_file) plm.generate_content() plm.generate_gds()
5,354,448
def test_ridges_at_region(): """Test getting ridges that bound regions.""" v = Voronoi(POINTS) converter = VoronoiConverter(v) ridges_at_region = converter.get_ridges_at_region() assert_tuple_equal(ridges_at_region.shape, (len(v.regions), 6)) assert_is_instance(ridges_at_region[0, 0], np.int_) assert_array_equal(ridges_at_region[0], [-1] * 6)
5,354,449
def get_image(): """ Returns an image taken using raspberry pi camera. This image can be directly used with OpenCV library. """ if DEBUG: print("\tTakes image using camera") camera = PiCamera() camera.resolution = (512,512) raw_img = PiRGBArray(camera) time.sleep(0.1) # Let camera warm up camera.capture(raw_img, format="bgr") camera.close() image = raw_img.array return image
5,354,450
def add(x, y): """Creates an SMTLIB addition statement formatted string Parameters ---------- x, y: float First and second numerical arguments to include in the expression """ return "(+ " + x + " " + y + ")"
5,354,451
def build_put_cat_request( **kwargs # type: Any ): # type: (...) -> HttpRequest """Put a cat with name 'Boots' where likesMilk and hisses is false, meows is true. See https://aka.ms/azsdk/python/protocol/quickstart for how to incorporate this request builder into your code flow. :keyword json: Pass in a JSON-serializable object (usually a dictionary). See the template in our example to find the input shape. Put a cat with name 'Boots' where likesMilk and hisses is false, meows is true. :paramtype json: any :keyword content: Pass in binary content you want in the body of the request (typically bytes, a byte iterator, or stream input). Put a cat with name 'Boots' where likesMilk and hisses is false, meows is true. :paramtype content: any :return: Returns an :class:`~azure.core.rest.HttpRequest` that you will pass to the client's `send_request` method. See https://aka.ms/azsdk/python/protocol/quickstart for how to incorporate this response into your code flow. :rtype: ~azure.core.rest.HttpRequest Example: .. code-block:: python # JSON input template you can fill out and use as your body input. json = { "hisses": bool, # Optional. "likesMilk": bool, # Optional. "meows": bool, # Optional. "name": "str" # Required. } """ content_type = kwargs.pop('content_type', None) # type: Optional[str] accept = "application/json" # Construct URL url = kwargs.pop("template_url", '/multipleInheritance/cat') # Construct headers header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any] if content_type is not None: header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str') header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str') return HttpRequest( method="PUT", url=url, headers=header_parameters, **kwargs )
5,354,452
def partial(fn: Callable, *args, **kwargs) -> Callable: """Takes a function and fewer than normal arguments, and returns a function That will consume the remaining arguments and call the function""" def partial_fn(*rem_args, **rem_kwargs): return fn(*args, *rem_args, **kwargs, **rem_kwargs) return partial_fn
5,354,453
def do_nothing(apps, schema_editor): """ Do nothing since this is removing bad data """
5,354,454
def parse_lipid(name): """ parse_lipid description: parses a lipid name into lipid class and fatty acid composition, returning a dictionary with the information. Handles total fatty acid composition, as well as individual composition, examples: PC(38:3) --> class: PC, n_carbon: 38, n_unsat: 3 PC(18:1/20:2) --> class: PC, n_carbon: 38, n_unsat: 3, fa_comp: ((n_carbon: 18, n_unsat: 1), (n_carbon: 20, n_unsat: 2)) Also, handles special fatty acid notations (modifiers) used for ceramides and plasmalogen lipids, examples: Cer(d36:2) --> class: Cer, n_carbon: 36, n_unsat: 2, fa_mod: d Cer(d18:1/18:1) --> class: PC, n_carbon: 38, n_unsat: 3, fa_mod: d, fa_comp: ((n_carbon: 18, n_unsat: 1), (n_carbon: 18, n_unsat: 1)) PE(p40:4) --> class: PE, n_carbon: 40, n_unsat: 4, fa_mod: p PE(p20:2/20:2) --> class: PE, n_carbon: 40, n_unsat: 4, fa_mod: p, fa_comp: ((n_carbon: 20, n_unsat: 2), (n_carbon: 20, n_unsat: 2)) lipid name must conform to the general format: <lipid_class>([modifier]<n_carbon>:<n_unsat>[/<n_carbon>:<n_unsat>[/<n_carbon>:<n_unsat>]]) parameters: name (str) -- lipid name to parse returns: (dict or None) -- parsed lipid information (always contains 'class', 'n_carbon', and 'n_unsat' attributes) or None if it cannot be parsed as a lipid """ parsed = {} # compile regex pattern l_pat = re.compile( r"^(?P<cls>[A-Za-z123]+)\((?P<mod>[pdoe]*)(?P<fc1>[0-9]+):(?P<fu1>[0-9]+)/*((?P<fc2>[0-9]+):(?P<fu2>[0-9]+))*/*((?P<fc3>[0-9]+):(?P<fu3>[0-9]+))*\)") # parse the name using regex l_res = l_pat.match(name) if l_res: # lipid class (required) if l_res.group('cls'): parsed["lipid_class"] = l_res.group('cls') else: # msg = "parse_lipid: failed to parse lipid class for: {}".format(name) # raise ValueError(msg) return None # value error due to failure to parse fatty acid composition # def raise_fatty_acid_value_error(): # msg = "parse_lipid: failed to parse fatty acid composition for: {}".format(name) # raise ValueError(msg) # fc1 and fu1 are always required if not l_res.group('fc1') or not l_res.group('fu1'): # raise_fatty_acid_value_error() return None # check if a second fatty acid composition is supplied, e.g. (18:1/16:0) # if so, need to compute total fatty acid composition and add individual # fatty acids to a list if l_res.group('fc2'): if not l_res.group('fu2'): # raise_fatty_acid_value_error() return None # add info from the first two fatty acid compositions fc1, fu1 = int(l_res.group('fc1')), int(l_res.group('fu1')) fc2, fu2 = int(l_res.group('fc2')), int(l_res.group('fu2')) parsed["fa_comp"] = [ {"n_carbon": fc1, "n_unsat": fu1}, {"n_carbon": fc2, "n_unsat": fu2} ] # check for 3rd FA composition fc3, fu3 = 0, 0 if l_res.group('fc3'): if not l_res.group('fu3'): # raise_fatty_acid_value_error() return None fc3, fu3 = int(l_res.group('fc3')), int(l_res.group('fu3')) parsed["fa_comp"].append({"n_carbon": fc3, "n_unsat": fu3}) # compute total fatty acid composition parsed["n_carbon"] = fc1 + fc2 + fc3 parsed["n_unsat"] = fu1 + fu2 + fc3 else: # fc1 and fu1 are the total fatty acid composition parsed["n_carbon"] = int(l_res.group('fc1')) parsed["n_unsat"] = int(l_res.group('fu1')) # add fatty acid modifier if present if l_res.group('mod'): parsed["fa_mod"] = l_res.group('mod') else: # could not parse name as a lipid parsed = None return parsed
5,354,455
def query(lon, lat, coordsys='gal', mode='full', limit=500000): """ Send a line-of-sight reddening query to the Argonaut web server. lon, lat: longitude and latitude, in degrees. coordsys: 'gal' for Galactic, 'equ' for Equatorial (J2000). mode: 'full', 'lite' or 'sfd' In 'full' mode, outputs a dictionary containing, among other things: - 'distmod': The distance moduli that define the distance bins. - 'best': The best-fit (maximum proability density) line-of-sight reddening, in units of SFD-equivalent E(B-V), to each distance modulus in 'distmod.' See Schlafly & Finkbeiner (2011) for a definition of the reddening vector (use R_V = 3.1). - 'samples': Samples of the line-of-sight reddening, drawn from the probability density on reddening profiles. - 'success': 1 if the query succeeded, and 0 otherwise. - 'converged': 1 if the line-of-sight reddening fit converged, and 0 otherwise. - 'n_stars': # of stars used to fit the line-of-sight reddening. - 'DM_reliable_min': Minimum reliable distance modulus in pixel. - 'DM_reliable_max': Maximum reliable distance modulus in pixel. Less information is returned in 'lite' mode, while in 'sfd' mode, the Schlegel, Finkbeiner & Davis (1998) E(B-V) is returned. """ # make sure we have list if type(lon) == float: lon, lat = [lon], [lat] # Make sure to have less than 500000 objects (the limit). # Cut the list in smaller pieces if that is the case. if len(lon) >= limit: dicts = [query(loni, lati, coordsys=coordsys, mode=mode) for loni, lati in zip(chunk(lon, limit - 1), chunk(lat, limit - 1))] for dic in dicts[1:]: for k in dic: dicts[0][k].extend(dic[k]) return dicts[0] if coordsys.lower() in ['gal', 'g']: payload = {'mode': mode, 'l': lon, 'b': lat} elif coordsys.lower() in ['equ', 'e']: payload = {'mode': mode, 'ra': lon, 'dec': lat} else: raise ValueError("coordsys '{0}' not understood.".format(coordsys)) req = requests.post('http://argonaut.skymaps.info/gal-lb-query-light', data=json.dumps(payload), headers={'content-type': 'application/json'}) try: req.raise_for_status() except requests.exceptions.HTTPError as excep: print('Response received from Argonaut:') print(req.text) raise excep return json.loads(req.text)
5,354,456
def CreatePreDefinedMapUnits(Map_Units, in_features, field_name=None): """ Intersects the Map Units feature class with the in_features feature class. A field name may be provided from the in_features to include in the output feature class as a label for the map unit, the field will be updated with 'N/A' for any map units that don't interstect the in_features. :param Map_Units: the Map Units feature class :param in_features: a feature class to create pre-defined map units from :param field_name: the name of a field in the in_features attribute table to preserve in the output. Will be updated with 'N/A' if no overlap. :return: None """ # Clip the provided features to the Map_Units layer clip_features = Map_Units out_feature_class = "in_memory/clip" arcpy.Clip_analysis(in_features, clip_features, out_feature_class) # Union the clipped features and the Map Units layer FCs = [Map_Units, out_feature_class] out_feature_class = "in_memory/Map_Units_Union" Map_Units_Union = arcpy.Union_analysis(FCs, out_feature_class) # Overwrite the existing Map_Units layer util.RenameFeatureClass(Map_Units_Union, Map_Units) # Populate blank fields with N/A if field_name: with arcpy.da.UpdateCursor(Map_Units, field_name) as cursor: for row in cursor: if row[0] is None or row[0] == "": row[0] = "N/A" cursor.updateRow(row) # # Add fields and populate with 'True' wherever a new map unit was created # if field_name: # fieldsToAdd = [field_name] # fieldTypes = ["TEXT"] # AddFields(Map_Units, fieldsToAdd, fieldTypes) # FID_field = "FID_clip" # with arcpy.da.UpdateCursor(Map_Units, # [FID_field, field_name]) as cursor: # for row in cursor: # if row[0] > -1: # row[1] = "True" # else: # row[1] = "N/A" # cursor.updateRow(row) # Clean up arcpy.Delete_management("in_memory")
5,354,457
def f(x0,x1,l,mig_spont,mig_ind,eps): """ function defining the model dx/dt=f(x)""" return [f0(x0,x1,l,mig_spont,mig_ind,eps),f1(x0,x1,l,mig_spont,mig_ind,eps)]
5,354,458
def transform(source, transforms, params=None, output=None): """ Convenience function for applying an XSLT transform. Returns a result object. source - XML source document in the form of a string (not Unicode object), file-like object (stream), file path, URI or amara.lib.inputsource instance. If string or stream it must be self-contained XML (i.e. not requiring access to any other resource such as external entities or includes) transforms - XSLT document (or list thereof) in the form of a string, stream, URL, file path or amara.lib.inputsource instance params - optional dictionary of stylesheet parameters, the keys of which may be given as unicode objects if they have no namespace, or as (uri, localname) tuples if they do. output - optional file-like object to which output is written (incrementally, as processed) """ #do the imports within the function: a tad bit less efficient, but #avoid circular crap from amara.lib import inputsource from amara.xpath.util import parameterize from amara.xslt.result import streamresult, stringresult from amara.xslt.processor import processor params = parameterize(params) if params else {} proc = processor() if isinstance(transforms, (list, tuple)): for transform in transforms: proc.append_transform(inputsource(transform)) else: proc.append_transform(inputsource(transforms)) if output is not None: result = streamresult(output) else: result = stringresult() return proc.run(inputsource(source), params, result)
5,354,459
def lookup_user_github_username(user_github_id: int) -> Optional[str]: """ Given a user github ID, looks up the user's github login/username. :param user_github_id: the github id :return: the user's github login/username """ try: headers = { 'Authorization': 'Bearer {}'.format(cla.conf['GITHUB_OAUTH_TOKEN']), 'Accept': 'application/json', } r = requests.get(f'https://api.github.com/user/{user_github_id}', headers=headers) r.raise_for_status() except requests.exceptions.HTTPError as err: msg = f'Could not get user github user from id: {user_github_id}: error: {err}' cla.log.warning(msg) return None github_user = r.json() if 'message' in github_user: cla.log.warning(f'Unable to lookup user from id: {user_github_id} ' f'- message: {github_user["message"]}') return None else: if 'login' in github_user: return github_user['login'] else: cla.log.warning('Malformed HTTP response from GitHub - expecting "login" attribute ' f'- response: {github_user}') return None
5,354,460
def get_plot_values(radar): """ Return the values specific to a radar for plotting the radar fields. """ return _DEFAULT_PLOT_VALUES[radar].copy()
5,354,461
def time_series_dict_to_list(dictionary, key=lambda x: time.mktime(x.timetuple()), value=identity): """ Convert the incoming dictionary of keys to a list of sorted tuples. :param dictionary: dictionary to retrieve data from :param key: expression used to retrieve the time_series key from the key :param value: expression used to retrieve the time_series value from the value :return: list of tuples where index 0 is seconds since epoch, and index 1 is value """ if key is None: key = identity if value is None: value = identity time_series = [[key(k), value(v)] for k, v in dictionary.iteritems()] return sorted(time_series, key=itemgetter(0))
5,354,462
def get_data_from_csv(csv_reader): """Creates a list of StatEntry objects based on data in CSV data. Input CSV data must be in the format: Description,timestamp,num_batches,time mean value,time sd Args: csv_reader: csv.reader instance. Returns: A tuple of datetime timestamp and list of benchmark_util.StatEntry objects. Raises: ValueError: if CSV is invalid. """ timestamp = None stat_entries = [] for row in csv_reader: if len(row) != 5: raise ValueError('Expected 5 entries per line in the input CSV file, ' 'but found %d entries.' % len(row)) if '' in row: raise ValueError('Found empty entries in row: %s' % row) # Set timestamp based on the first line in CSV file. if timestamp is None: # Example of time formatting: 2017-06-26 02:59:29.325579 timestamp = datetime.strptime(row[1], "%Y-%m-%d %H:%M:%S.%f") stat_entries.append( benchmark_util.StatEntry(row[0], float(row[3]), 1)) return timestamp, stat_entries
5,354,463
def init_database(db: str): """ Init database. :param db: Database file path. :return: """ connection = sqlite3.connect(db) cursor = connection.cursor() cursor.execute('CREATE TABLE IF NOT EXISTS user_traffic (' 'id INTEGER PRIMARY KEY AUTOINCREMENT,' 'name TEXT,' 'traffic INT,' 'type TEXT,' 'timestamp DATETIME DEFAULT CURRENT_TIMESTAMP)') cursor.execute('CREATE TABLE IF NOT EXISTS system_traffic (' 'id INTEGER PRIMARY KEY AUTOINCREMENT,' 'name TEXT,' 'traffic INT,' 'type TEXT,' 'timestamp DATETIME DEFAULT CURRENT_TIMESTAMP)') cursor.close() connection.commit() connection.close()
5,354,464
def rootUrlJoin(base, extend): """ Adds a path element to the path within a ROOT url """ if base: match = re.match("^root://([^/]+)/(.+)", base) if match: host = match.group(1) path = match.group(2) newpath = os.path.join(path, extend) newurl = "root://%s/%s" % (host, newpath) return newurl return None
5,354,465
def getChildren(SOUP, ADDRESS_SET, FOLDER_LIST, FOLDER_PTR, DEBUG, LEVEL=0): """ Loop interative call to move into soup Args: SOUP ie bs.BeautifulSoup( doc ) or a sub-portion there-of ADDRESS_SET list of address information FOLDER_LIST list of folders FOLDER_PTR integer pointer into FOLDER_LIST DEBUG boolean, if true print messages LEVEL integer counter that tracks recursive getChildren call only prints when DEBUG = True Returns: address set FOLDER_LIST FOLDER_PTR dev note: using SOUP.get_text() vs str(SOUP) solves some recursion issues except get_text() does not include html formatting, which breaks code that tries to match the formatting; therefore use SOUP.name for that note: str(SOUP) returns UTF-8 SOUP.decode() also returns str but in unicode, SOUP.decode_contents() returns str but without leading element SOUP.get_text() is only the human readable text per ref: https://stackoverflow.com/questions/31528600/beautifulsoup-runtimeerror-maximum-recursion-depth-exceeded """ if DEBUG: print(f'getChildren call level = {LEVEL}') LEVEL = LEVEL + 1 # - first handle if new folder or not soup_text = (SOUP.decode()).replace('\r', ' ').replace('\n', ' ') # was using SOUP.get_text() but it doesn't include html formatting # need html formatting for the next line to work # SOUP.name gives the current element so don't have to use a string if SOUP.name == 'dl': #if (re.search('^<dl>', stringNChar(soup_text, 10)) is not None): newFolder = True if (DEBUG): print('SOUPI' + str(len(SOUP)) + ':enter:' + stringNChar(soup_text, 100)) else: newFolder = False if (DEBUG): print('SOUPI' + str(len(SOUP)) + '::' + stringNChar(soup_text, 100)) # - now handle the sub elements of the passed SOUP tagNowI = -1 while (tagNowI < (len(SOUP)-1)): tagNowI = tagNowI + 1 # only process Tags if (re.search('Tag', str(type(SOUP.contents[tagNowI]))) is None): continue soupLength = len(SOUP.contents[tagNowI]) if (DEBUG): print('getChildren: ' + str(tagNowI) + '::' + str(soupLength)) if (soupLength == 0): continue if (soupLength == 1): if (DEBUG): if type(SOUP.contents[tagNowI]) is bs.element.NavigableString: print('found:: ' + (SOUP.contents[tagNowI].title())) else: print('found:: ' + (SOUP.contents[tagNowI].get_text())) (addr, FOLDER_LIST, elemType) = tagElement( SOUP.contents[tagNowI], FOLDER_LIST, DEBUG) if (DEBUG): print('element type: ' + str(elemType)) if (elemType == 0 and addr is not None): # append the dd information string to the last address ADDRESS_SET[len(ADDRESS_SET)-1].append(addr) elif (elemType == 1 and addr is not None): # append the latest address information to the ADDRESS_SET ADDRESS_SET.append(addr) elif (elemType == 2): # 2: increment the folder pointer; QQQ okay but how to leave folder? if (tagNowI < len(SOUP)-2): x=1 if (len(SOUP.contents[tagNowI+1]) == 1): # empty folder must leave (fixes Raspberry pi issue but not Entertainment and Lifestyle not-leaving folder issue) x = FOLDER_LIST.pop() if (DEBUG): print('Drop Bad folder:' + x) elif (elemType == 3 or elemType == 4): # 3: folder name new; QQQ: already appended at a lower level # 4: folder name new; QQQ: already appended at a lower level; parent folder # this doesn't do anything anymore except prevent no match message # script not optimized so don't remove; leave for documentation x = 1 else: # nothing happened; why? # <p> gets here; needs to be folder type or is it dl that marks folders? technically both # title gets here also # \n gets here if (DEBUG): print('no match by type:: ' + (SOUP.contents[tagNowI].get_text())) else: # pseudo-code if len > 1 then need to call getChildren # when exit after a call to getChildren then reduce FOLDER_PTR??? # problem decrementing FOLDER_PTR here is too overzealous if (re.search('empty_folder_auto_can_bus', stringNChar(SOUP.contents[tagNowI].get_text(), 100)) is not None): x = 1 if (DEBUG): print('Calling getChildren:' + str(tagNowI) + ': ' + stringNChar(SOUP.contents[tagNowI].get_text(), 100)) (ADDRESS_SET, FOLDER_LIST, FOLDER_PTR) = getChildren( SOUP.contents[tagNowI], ADDRESS_SET, FOLDER_LIST, FOLDER_PTR, DEBUG, LEVEL) if newFolder: pre_folder = FOLDER_LIST FOLDER_LIST.pop() if (DEBUG): print('Exit folder (' + str(FOLDER_PTR) + ') from' + ':'.join(pre_folder) + '\n\tnow' + ':'.join(FOLDER_LIST)) FOLDER_PTR = 0 # should it -1 instead if odd/even return(ADDRESS_SET, FOLDER_LIST, FOLDER_PTR)
5,354,466
def at_initial_setup(): """ Custom hook for users to overload some or all parts of the initial setup. Called very last in the sequence. It tries to import and srun a module settings.AT_INITIAL_SETUP_HOOK_MODULE and will fail silently if this does not exist or fails to load. """ modname = settings.AT_INITIAL_SETUP_HOOK_MODULE if not modname: return try: mod = __import__(modname, fromlist=[None]) except (ImportError, ValueError): return print " Running at_initial_setup() hook." if mod.__dict__.get("at_initial_setup", None): mod.at_initial_setup()
5,354,467
def either(a, b): """ :param a: Uncertain value (might be None). :param b: Default value. :return: Either the uncertain value if it is not None or the default value. """ return b if a is None else a
5,354,468
def get_app_label_and_model_name(path): """Gets app_label and model_name from the path given. :param str path: Dotted path to the model (without ".model", as stored in the Django `ContentType` model. :return tuple: app_label, model_name """ parts = path.split('.') return (''.join(parts[:-1]), parts[-1])
5,354,469
def remove_cache(): """Util to remove the cache files, which can be located at two different places depending if pycee is running as a installed package or as a cloned repository""" installed_module_path = pathlib.Path(__file__).parent.absolute() package_cache = glob.glob(os.path.join(installed_module_path, "*.cache*")) local_cache = glob.glob("pycee/*.cache*") files = package_cache + local_cache print("Cache removed!\nPlease run pycee again without -rm or --remove-cache argument to get your answers") # excecvp replace the current process # This is currently necessary because filecache package # wouldn't let me delete all cache files on the main process # -f so not found files won't polute the terminal os.execvp("rm", ["rm", "-f"] + files) # after execv vp finishes executing rm it exits
5,354,470
def get_item_editor(val): """ (val: Any) -> Editor Returns customized View editor type for given attribute value. """ if isinstance(val, list): # later might need tuple with label case if isinstance(val[0], str): return CheckListEditor(values=val) else: return CheckListEditor(values=[str(item) for item in val]) if isinstance(val, bool): return BooleanEditor() else: return TextEditor(auto_set=False, enter_set=True)
5,354,471
def index_js_to_enriched_function_blocks(index_js: str) -> List[EnrichedFunctionBlock]: """ Main function of the file. Converts raw index.js file into the output dataclass. """ trimmed_index_js = trim_index_js(index_js) index_json = json.loads(trimmed_index_js) rtn_blocks = [] for package_name, list_of_scala_types in index_json.items(): for scala_type in list_of_scala_types: enriched_blocks = extract_enriched_function_blocks(package_name, scala_type) rtn_blocks.extend(enriched_blocks) return rtn_blocks
5,354,472
def menu(dictionary : Dictionary): """ Wrapper for using the dictionary. """ option = None menu_options = {'read_file': 'Read File', 'add_word': 'Add Word', 'find_word': 'Find Word', 'delete_word': 'Delete Word', 'exit': 'Exit'} exit_option = list(menu_options.keys()).index('exit') + 1 while option != exit_option: print('---------------------') opt = 1 for menu_option in menu_options.values(): print('{}. {}'.format(opt, menu_option)) opt += 1 print('---------------------') try: option = int(input("Enter option: ")) if option < 1 or option > exit_option: raise ValueError('Option must be between 1 and ' + str(exit_option)) except ValueError as e: print('[{}] {}'.format('menu', e)) else: if option != exit_option: process_option(dictionary, list(menu_options.keys())[option - 1]) print("---------------------")
5,354,473
def compute_single_results(base_path: str, file_name: str, selection_metric: str, selection_scheme: Union[None, str], selection_mode: str, selection_domain: str, result_scheme: str, result_mode: str, result_metric: str): """ Parameters ---------- base_path file_name selection_metric selection_mode selection_scheme selection_domain result_scheme result_mode result_metric """ path = base_path + file_name csv_path = base_path + 'results.csv' # read the data from the tensorboard summary writer file iterator = summary_iterator(path) tag_dict = create_tag_dict(iterator) # create a csv file for storing the results create_csv(csv_path, tag_dict) # read the results data_frame = read_csv_file(csv_path) # get the desired results in columns column_indices, target_groups = get_metric_columns(data_frame, selection_metric, selection_scheme, mode=selection_mode) # determine the time step of the best results of the desired result selection_col_index = target_groups.index(selection_domain) _, time_step = get_max_val(column_indices[selection_col_index], data_frame) # get the targets and columns of the metrics, which should be reported column_indices, target_groups = get_metric_columns(data_frame, result_metric, result_scheme, mode=result_mode) results = select_results_by_time_step(column_indices, data_frame, time_step) result_dict = {} for key, value in zip(target_groups, results): result_dict[key] = value return result_dict
5,354,474
def run(): """ Step through each row and every 3rd column to find collisions """ trees = 0 x = 0 width = len(rows[0]) for line in rows[1:]: x += 3 if x >= width: x -= width if line[x] == "#": trees += 1 return trees
5,354,475
def div_col(*items, size=None, style=None, id=None, classes=None) -> HTML: """Generate a new div with a col class Parameters ---------- items: argument list DOM children of this div """ children = ''.join(items) attr = [] if style is not None: attr.append(f'style="{style}"') if id is not None: attr.append(f'id="{id}"') if classes is not None: attr.append(f'class="{classes}"') elif size is not None: attr.append(f'class="col-{size}"') else: attr.append(f'class="col"') attr = ' '.join(attr) return f'<div {attr}>{children}</div>'
5,354,476
def test_variable_init_with_no_terms() -> None: """Test fuzzy variable creation""" universe_range: tuple[int, int] = (0, 1) terms: dict = {} fuzzyvar: FuzzyVariable = FuzzyVariable(universe_range=universe_range, terms=terms) assert fuzzyvar.universe_range == universe_range assert fuzzyvar.terms == terms assert (fuzzyvar.universe == np.linspace(start=0, stop=1, num=11)).all()
5,354,477
def predict(image: bytes) -> ndarray: """ Call the model returning the image with the faces blured :param image: the image to blur the faces from :return: the image with the faces blured """ import face_recognition sigma = 50 image = face_recognition.load_image_file(image) locations = face_recognition.face_locations(image) for location in locations: (startY, endY) = location[0:2] (startX, endX) = location[2:4] image = blur_image(image, startX, endX, startY, endY, sigma=sigma) is_successful, im_png = cv2.imencode(".png", image) if is_successful: return im_png raise Exception("Error encoding image")
5,354,478
def get_basic_details(args, item): """ :param args: { "item_code": "", "warehouse": None, "customer": "", "conversion_rate": 1.0, "selling_price_list": None, "price_list_currency": None, "price_list_uom_dependant": None, "plc_conversion_rate": 1.0, "doctype": "", "name": "", "supplier": None, "transaction_date": None, "conversion_rate": 1.0, "buying_price_list": None, "is_subcontracted": "Yes" / "No", "ignore_pricing_rule": 0/1 "project": "", barcode: "", serial_no: "", warehouse: "", currency: "", update_stock: "", price_list: "", company: "", order_type: "", is_pos: "", ignore_pricing_rule: "", project: "", qty: "", stock_qty: "", conversion_factor: "" } :param item: `item_code` of Item object :return: frappe._dict """ if not item: item = frappe.get_doc("Item", args.get("item_code")) if item.variant_of: item.update_template_tables() from frappe.defaults import get_user_default_as_list user_default_warehouse_list = get_user_default_as_list('Warehouse') user_default_warehouse = user_default_warehouse_list[0] \ if len(user_default_warehouse_list) == 1 else "" item_defaults = get_item_defaults(item.name, args.company) warehouse = user_default_warehouse or item_defaults.get("default_warehouse") or args.warehouse material_request_type = '' if args.get('doctype') == "Material Request" and not args.get('material_request_type'): args['material_request_type'] = frappe.db.get_value('Material Request', args.get('name'), 'material_request_type') #Set the UOM to the Default Sales UOM or Default Purchase UOM if configured in the Item Master if not args.uom: if args.get('doctype') in ['Quotation', 'Sales Order', 'Delivery Note', 'Sales Invoice']: args.uom = item.sales_uom if item.sales_uom else item.stock_uom elif (args.get('doctype') in ['Purchase Order', 'Purchase Receipt', 'Purchase Invoice']) or \ (args.get('doctype') == 'Material Request' and args.get('material_request_type') == 'Purchase'): args.uom = item.purchase_uom if item.purchase_uom else item.stock_uom else: args.uom = item.stock_uom out = frappe._dict({ "item_code": item.name, "item_name": item.item_name, "description": cstr(item.description).strip(), "image": cstr(item.image).strip(), "warehouse": warehouse, "income_account": get_default_income_account(args, item_defaults), "expense_account": get_default_expense_account(args, item_defaults), "cost_center": get_default_cost_center(args, item_defaults), 'has_serial_no': item.has_serial_no, 'has_batch_no': item.has_batch_no, "batch_no": None, "item_tax_rate": json.dumps(dict(([d.tax_type, d.tax_rate] for d in item.get("taxes")))), "uom": args.uom, "min_order_qty": flt(item.min_order_qty) if args.doctype == "Material Request" else "", "qty": args.qty or 1.0, "stock_qty": args.qty or 1.0, "price_list_rate": 0.0, "base_price_list_rate": 0.0, "rate": 0.0, "base_rate": 0.0, "amount": 0.0, "base_amount": 0.0, "net_rate": 0.0, "net_amount": 0.0, "discount_percentage": 0.0, "supplier": item_defaults.get("default_supplier"), "update_stock": args.get("update_stock") if args.get('doctype') in ['Sales Invoice', 'Purchase Invoice'] else 0, "delivered_by_supplier": item.delivered_by_supplier if args.get("doctype") in ["Sales Order", "Sales Invoice"] else 0, "is_fixed_asset": item.is_fixed_asset, "weight_per_unit":item.weight_per_unit, "weight_uom":item.weight_uom, "last_purchase_rate": item.last_purchase_rate if args.get("doctype") in ["Purchase Order"] else 0 }) if item.enable_deferred_revenue: service_end_date = add_months(args.transaction_date, item.no_of_months) out.update({ "enable_deferred_revenue": item.enable_deferred_revenue, "deferred_revenue_account": get_default_deferred_revenue_account(args, item), "service_start_date": args.transaction_date, "service_end_date": service_end_date }) # calculate conversion factor if item.stock_uom == args.uom: out.conversion_factor = 1.0 else: out.conversion_factor = args.conversion_factor or \ get_conversion_factor(item.item_code, args.uom).get("conversion_factor") or 1.0 args.conversion_factor = out.conversion_factor out.stock_qty = out.qty * out.conversion_factor # calculate last purchase rate from erpnext.buying.doctype.purchase_order.purchase_order import item_last_purchase_rate out.last_purchase_rate = item_last_purchase_rate(args.name, args.conversion_rate, item.item_code, out.conversion_factor) # if default specified in item is for another company, fetch from company for d in [ ["Account", "income_account", "default_income_account"], ["Account", "expense_account", "default_expense_account"], ["Cost Center", "cost_center", "cost_center"], ["Warehouse", "warehouse", ""]]: if not out[d[1]]: out[d[1]] = frappe.db.get_value("Company", args.company, d[2]) if d[2] else None for fieldname in ("item_name", "item_group", "barcodes", "brand", "stock_uom"): out[fieldname] = item.get(fieldname) return out
5,354,479
def vec_len(x): """ Length of the 2D vector""" length = math.sqrt(x[0]**2 + x[1]**2) return length
5,354,480
def part1_count_increases(measurements): """Count increases of a measure with the next.""" windows = zip(measurements[1:], measurements[:-1]) increases = filter(lambda w: w[0] > w[1], windows) return len(list(increases))
5,354,481
def remove_empties(seq): """ Remove items of length 0 >>> remove_empties([1, 2, ('empty', np.nan), 4, 5]) [1, 2, 4, 5] >>> remove_empties([('empty', np.nan)]) [nan] >>> remove_empties([]) [] """ if not seq: return seq seq2 = [x for x in seq if not (isinstance(x, tuple) and x and x[0] == 'empty')] if seq2: return seq2 else: return [seq[0][1]]
5,354,482
def breadth_first_graph_search(problem): """Grafo paieškos į plotį algoritmas""" global frontier, node, explored, counter if counter == -1: node = Node(problem.initial) display_current(node) if problem.goal_test(node.state): return node frontier = deque([node]) # FIFO queue display_frontier(frontier) explored = set() if counter % 3 == 0 and counter >= 0: node = frontier.popleft() display_current(node) explored.add(node.state) if counter % 3 == 1 and counter >= 0: for child in node.expand(problem): if child.state not in explored and child not in frontier: if problem.goal_test(child.state): return child frontier.append(child) display_frontier(frontier) if counter % 3 == 2 and counter >= 0: display_explored(node) return None
5,354,483
def test_copy_files_no_source_dir(): """ Test that copy_files throws FileNotFoundError when source_dir does not exist. """ with pytest.raises(FileNotFoundError): abcutils.copy_files("dirthatdoesnotexist", "destination")
5,354,484
def to_smiles(rdm): """ SMILES string from an rdkit molecule object """ smi = _rd_chem.MolToSmiles(rdm) return smi
5,354,485
def merge_bins(adata, bin_size): """Merge bins.""" orig_bins = collections.defaultdict(list) for coor in adata.var_names: chrom, start, end = coor.split(':')[0], int( coor.split(':')[1].split('-')[0]), int( coor.split(':')[1].split('-')[1]) orig_bins[chrom].append((start, end)) logging.info('Done with counting the bins') resized_bins_index = [] resized_chrs = [] resized_bins_counts = [] for chrom, ranges in orig_bins.items(): curr_bin = 0 curr_acc = [] for (start, end) in sorted(ranges): if start // bin_size == curr_bin: curr_acc.append(f'{chrom}:{start}-{end}') else: if curr_acc: # For the empty initialisation at the beginning of the chr. resized_bins_counts.append(adata[:, curr_acc].X.sum(axis=1)) resized_bins_index.append( f'{chrom}:{curr_bin*bin_size}-{(curr_bin+1)*bin_size}') curr_acc = [f'{chrom}:{start}-{end}'] curr_bin = start // bin_size resized_bins_counts.append(adata[:, curr_acc].X.sum(axis=1)) resized_bins_index.append( f'{chrom}:{curr_bin*bin_size}-{(curr_bin+1)*bin_size}') resized_chrs.append(scipy.sparse.csr_matrix(np.hstack(resized_bins_counts))) resized_bins_counts = [] logging.info('Done with %s', chrom) new_adata = anndata.AnnData( scipy.sparse.csr_matrix( np.hstack([chrom.toarray() for chrom in resized_chrs]))) new_adata.var_names = resized_bins_index new_adata.obs = adata.obs return new_adata
5,354,486
def main(): """ Perform cost modeling for PV systems using SAM and PVRPM """ pass
5,354,487
def show_all_fruits(): """Show all fruits in the database.""" fruits = fruits_collection.find({}) for fruit in fruits: print(fruit) context = { 'list_of_fruits': fruits_collection.find({}) } return render_template('show_fruits.html', **context)
5,354,488
def load_fields(path: str = f'{DEFAULT_FIELD_PATH}{FIELD_FILENAME}') -> dict: """Load Fields. PARAMETERS ---------- :param: path: string path to the fields file. Returns ------- A dictionary of fields, with the following format: { "field_name": { "help_text": "", "type": "" } """ with open(path, 'r') as json_file: return json.load(json_file)
5,354,489
def test_recall_at_k(): """Test Metric.recall_at_k """ scores = np.array([[4., 3., 2., 1., 0.]]) gt = np.array([[1., 1., 0., 0., 1.]]) gt_2 = np.array([[0, 0, 1., 1., 1.]]) assert Metrics.recall_at_k(scores, gt, 2) == np.array([1.]), "recall@2 should be 1." assert Metrics.recall_at_k(scores, gt_2, 2) == np.array([0.]), "recall@2 should be 0." eps = np.array([0.00001]) assert np.abs(Metrics.recall_at_k(scores, gt, 3) - np.array([0.6666666])) < eps,\ "recall@3 should be .66666666" assert np.abs(Metrics.recall_at_k(scores, gt_2, 3) - np.array([0.3333333])) < eps,\ "recall@3 should be 0.3333333"
5,354,490
def set_max_concurrency( uses: int, bucket: t.Type[buckets.Bucket] ) -> t.Callable[[commands.base.CommandLike], commands.base.CommandLike]: """ Second order decorator that defines the max concurrency limit for a command. Args: uses (:obj:`int`): The maximum number of uses of the command that can be executing concurrently before a :obj:`~.errors.MaxConcurrencyLimitReached` will be raised upon invocation. bucket (Type[:obj:`~.buckets.Bucket`]): Bucket that command max concurrency will be processed under. """ if uses < 1 or not isinstance(uses, int): raise ValueError("'uses' must be a positive integer") def decorate(c_like: commands.base.CommandLike) -> commands.base.CommandLike: if not isinstance(c_like, commands.base.CommandLike): raise SyntaxError("'set_max_concurrency' decorator must be above the 'command' decorator") c_like.max_concurrency = (uses, bucket) return c_like return decorate
5,354,491
def patch_jars(cluster, localSolrDir, n=None, jars='core solrj', vers='4.7.1'): """ Replaces Solr JAR files on remote servers with new ones built locally. This command helps you patch a running system with a quick fix w/o having to rebuild the AMI. """ localSolrDir = os.path.expanduser(localSolrDir) if os.path.isdir(localSolrDir) is False: _fatal('Local Solr directory %s not found!' % localSolrDir) # on first server, rm -rf cloud/tmp/jars/*; mkdir -p cloud/tmp/jars # upload jars to first server into cloud/tmp/jars # upload the ssh key to .ssh # scp jars from first server to others via fab run jarList = jars.split() filesToPatch = [] for jar in jarList: jarFile = '%s/build/solr-%s/solr-%s-%s.jar' % (localSolrDir, jar, jar, vers) if os.path.isfile(jarFile): filesToPatch.append(jarFile) else: _fatal('JAR %s not found on LOCAL FS!' % jarFile) # get list of hosts and verify SSH connectivity cloud = _provider_api(cluster) hosts = _cluster_hosts(cloud, cluster) # ability to patch a single server only if n is not None: hosts = [hosts[int(n)]] _verify_ssh_connectivity(hosts) remoteSolrDir = _env(cluster, 'solr_tip') # get num Solr nodes per host to determine which ports are active numNodes = _num_solr_nodes_per_host(cluster) activePorts = [] for n in range(0,numNodes): activePorts.append(str(84 + n)) solrHostsAndPortsToRestart = {} with settings(host_string=hosts[0]), hide('output', 'running', 'warnings'): host = hosts[0] solrHostsAndPortsToRestart[host] = set([]) # set is important remoteJarDir = '%s/cloud/tmp/jars' % user_home run('mkdir -p %s/.ssh' % user_home) run('rm -rf %s; mkdir -p %s' % (remoteJarDir,remoteJarDir)) put(_env(cluster,'ssh_keyfile_path_on_local'), '%s/.ssh' % user_home) run('chmod 600 '+_env(cluster,'ssh_keyfile_path_on_local')) for jarFile in filesToPatch: lastSlashAt = jarFile.rfind('/') remoteJarFile = '%s/%s' % (remoteJarDir, jarFile[lastSlashAt+1:]) _status('Uploading to %s on %s ... please be patient (the other hosts will go faster)' % (remoteJarFile, host)) put(jarFile, remoteJarDir) run('cp %s %s/dist' % (remoteJarFile, remoteSolrDir)) for port in activePorts: solrHostsAndPortsToRestart[host].add(port) run('cp %s %s/cloud%s/solr-webapp/webapp/WEB-INF/lib' % (remoteJarFile, remoteSolrDir, port)) # scp from the first host to the rest if len(hosts) > 1: for h in range(1,len(hosts)): host = hosts[h] solrHostsAndPortsToRestart[host] = set([]) # set is important run('scp -o StrictHostKeyChecking=no -i %s %s %s@%s:%s' % (_env(cluster,'ssh_keyfile_path_on_local'), remoteJarFile, ssh_user, host, remoteSolrDir+'/dist')) for port in activePorts: run('scp -o StrictHostKeyChecking=no -i %s %s %s@%s:%s/cloud%s/solr-webapp/webapp/WEB-INF/lib' % (_env(cluster,'ssh_keyfile_path_on_local'), remoteJarFile, ssh_user, host, remoteSolrDir, port)) solrHostsAndPortsToRestart[host].add(port) _info('JARs uploaded and patched successfully.') _rolling_restart_solr(cloud, cluster, solrHostsAndPortsToRestart, 0)
5,354,492
def three_to_one_protocol_bob(q1, q2, q3, bob, socket): """ Implements Bob's side of the 3->1 distillation protocol. This function should perform the gates and measurements for 3->1 using qubits q1 and q2, then send the measurement outcome to Alice and determine if the distillation was successful. :param q1: Bob's qubit from the first entangled pair :param q2: Bob's qubit from the second entangled pair :param q3: Bob's qubit from the third entangled pair :param bob: Bob's NetQASMConnection :param socket: Alice's classical communication socket to Bob :return: True/False indicating if protocol was successful """ b1, b2 = three_to_one_gates_and_measurement_bob(q1, q2, q3) bob.flush() # Send measurement result to Bob, receive measurement result from Bob and check if protocol was successful b1 = int(b1) b2 = int(b2) socket.send_structured(StructuredMessage("The outcome is: ", (b1, b2))) a1, a2 = socket.recv_structured().payload if (a1, a2) == (b1, b2): return True else: return False
5,354,493
def test_run(cli_runner: CliRunner, servo_cli: Typer) -> None: """Run the servo"""
5,354,494
def test_tensorboard() -> None: """Test if tensorboard returns a decorator.""" # Prepare with TemporaryDirectory() as tmpdir: my_decorator = tensorboard(tmpdir) # Assert assert callable(my_decorator)
5,354,495
def read_code_blocks_from_md(md_path): """ Read ```python annotated code blocks from a markdown file. Args: md_path (str): Path to the markdown fle Returns: py_blocks ([str]): The blocks of python code. """ with open(md_path, "r") as f: full_md = f.read() md_py_splits = full_md.split("```python")[1:] py_blocks = [split.split("```")[0] for split in md_py_splits] return py_blocks
5,354,496
def CBOW(vocab_size, emb_size): """ CBOW: Function to define the CBOW model parameters: vocab_size: the vocabulary size emb_size: dimension of the embedding vector return: List of theano variables [context, target], represents the model input, Theano function represents the loss (i.e. the cose or the objective) function, List of theano (shared) variable params, represents the parameters of the model. """ context = T.imatrix(name='context') target = T.ivector('target') W_in_values = np.asarray(np.random.uniform(-1.0, 1.0, (vocab_size, emb_size)), dtype=theano.config.floatX) W_out_values = np.asarray(np.random.normal(scale=1.0 / math.sqrt(emb_size), size=(emb_size, vocab_size)), dtype=theano.config.floatX) W_in = theano.shared( value=W_in_values, name='W_in', borrow=True) W_out = theano.shared( value=W_out_values, name='W_out', borrow=True) h = T.mean(W_in[context], axis=1) # compute the hidden (projection) layer output : input -> hidden (eq. 1) uj = T.dot(h, W_out) # hidden -> output (eq. 2) p_target_given_contex = T.nnet.softmax(uj) # softmax activation (eq. 3) loss = -T.mean(T.log(p_target_given_contex)[T.arange(target.shape[0]), target]) # loss function (eq. 4) params = [W_in, W_out] return [context, target], loss, params
5,354,497
def elina_tcons0_array_add_dimensions_with(tcons_array, dimchange): """ Add dimensions to an ElinaTcons0Array by following the semantics of an ElinaDimchange. Parameters ---------- tcons_array : ElinaTcons0ArrayPtr Pointer to the ElinaTcons0Array to which we want to add dimensions. dimchange : ElinaDimchangePtr Pointer to the ElinaDimchange which semantics we want to follow. Returns ------- None """ try: elina_tcons0_array_add_dimensions_with_c = elina_auxiliary_api.elina_tcons0_array_add_dimensions_with elina_tcons0_array_add_dimensions_with_c.restype = None elina_tcons0_array_add_dimensions_with_c.argtypes = [ElinaTcons0ArrayPtr, ElinaDimchangePtr] elina_tcons0_array_add_dimensions_with_c(tcons_array, dimchange) except: print('Problem with loading/calling "elina_tcons0_array_add_dimensions_with" from "libelinaux.so"') print('Make sure you are passing ElinaTcons0ArrayPtr, ElinaDimchangePtr to the function')
5,354,498
def subject(request, clas_slug, subject_slug, page=1): """ Список гдз сборников для предмета """ gdz_clas = get_object_or_404(GdzClas, slug=clas_slug) gdz_subject = get_object_or_404(GdzSubject, slug=subject_slug, gdz_clas=gdz_clas) book_list = GdzBook.published.filter(gdz_clas=gdz_clas, gdz_subject=gdz_subject).order_by('-public_time') paginator = Paginator(book_list, PAGE_ITEM) try: books = paginator.page(page) except EmptyPage: raise Http404 h1 = "Гдз {subject_title} {clas_slug} клас".format(subject_title=gdz_subject.title, clas_slug=gdz_clas.slug) page_title = "Гдз {subject_title} {clas_slug} клас".format(subject_title=gdz_subject.title, clas_slug=gdz_clas.slug) return render(request, 'gdz/subject.html', {'books': books, 'h1': h1, 'page_title': page_title, 'gdz_clas': gdz_clas, 'gdz_subject': gdz_subject, 'paginate_link': 'gdz:subject_paginate', 'link': 'gdz:subject'})
5,354,499