content
stringlengths
22
815k
id
int64
0
4.91M
def select_organization(cursor): """organization情報取得(全取得) Args: cursor (mysql.connector.cursor): カーソル Returns: dict: select結果 """ # select実行 cursor.execute('SELECT * FROM organization ORDER BY organization_id') rows = cursor.fetchall() return rows
5,355,500
def _GetInstDisk(index, cb): """Build function for calling another function with an instance Disk. @type index: int @param index: Disk index @type cb: callable @param cb: Callback """ def fn(ctx, inst): """Call helper function with instance Disk. @type ctx: L{InstanceQueryData} @type inst: L{objects.Instance} @param inst: Instance object """ try: nic = inst.disks[index] except IndexError: return _FS_UNAVAIL return cb(ctx, index, nic) return fn
5,355,501
def Jphii_cal(L, W, q, xi_local): """タスク写像のヤコビ行列""" return np.array([[1, 0, -sin(q[2, 0]) * xi_local[0, 0] - cos(q[2, 0]) * xi_local[1, 0]], [0, 1, cos(q[2, 0]) * xi_local[0, 0] - sin(q[2, 0]) * xi_local[1, 0]]], dtype = np.float32) #return np.array([[1, 0, -xi_local[1, 0]], # [0, 1, xi_local[0, 0]]], dtype = np.float32)
5,355,502
def model_evalution(test_data): """ function to test the loss and accuracy on validation data """ for X_test, y_test in val_data: y_pred = model(X_test, training=False) val_acc_metrics.update_state(y_test, y_pred) accuracy = val_acc_metrics.result() return float(accuracy)
5,355,503
def main(): """ Main function :return: None """ # read the netlist net = kicad_netlist_reader.netlist(sys.argv[1]) # extract boms from netlist boms = extract_boms(net) # write boms to csv files for sup, comps in boms.items(): num_comps = len(comps) if num_comps > 0: write_csv(sup, comps) print("> Generated CSV BOM for supplier with key `{}` " "with a total of {} unique components.".format(sup, num_comps)) else: print("> Ignoring supplier with key `{}`, no components found.".format(sup)) # we got so far, nice sys.exit(0)
5,355,504
def make_python_script_from_list(list_optical_elements1,script_file=""): """ program to build automatically a python script to run shadow3 the system is read from a list of instances of Shadow.Source and Shadow.OE :argument list of optical_elements A python list with intances of Shadow.Source and Shadow.OE objects :param script_file: a string with the name of the output file (default="", no output file) :return: template with the script """ #make sure that the list does not contain lists haslist = sum([isinstance(i,list) for i in list_optical_elements1]) list_optical_elements = list_optical_elements1 if haslist: while(haslist > 0): newlist = [] for i in list_optical_elements: if isinstance(i,list): newlist.extend(i) else: newlist.append(i) list_optical_elements = newlist haslist = sum([isinstance(i,list) for i in list_optical_elements]) #make sure that the list does not contain compoundOE (developed) hascomp = sum([isinstance(i,(Shadow.CompoundOE,Shadow.ShadowLibExtensions.CompoundOE)) for i in list_optical_elements]) if hascomp: newlist = [] for i in list_optical_elements: if isinstance(i,(Shadow.CompoundOE,Shadow.ShadowLibExtensions.CompoundOE)): newlist.extend(i.list) else: newlist.append(i) list_optical_elements = newlist template = """# # Python script to run shadow3. Created automatically with ShadowTools.make_python_script_from_list(). # import Shadow import numpy # write (1) or not (0) SHADOW files start.xx end.xx star.xx iwrite = 0 # # initialize shadow3 source (oe0) and beam # beam = Shadow.Beam() """ n_elements = len(list_optical_elements) for i,element in enumerate(list_optical_elements): if isinstance(element,Shadow.Source): template += "oe0 = Shadow.Source()\n" elif isinstance(element,Shadow.OE): template += "oe%d = Shadow.OE()\n"%(i) elif isinstance(element,Shadow.IdealLensOE): template += "oe%d = Shadow.IdealLensOE()\n"%(i) else: raise Exception("Error: Element not known") template += "\n#\n# Define variables. See meaning of variables in: \n" \ "# https://raw.githubusercontent.com/srio/shadow3/master/docs/source.nml \n" \ "# https://raw.githubusercontent.com/srio/shadow3/master/docs/oe.nml\n#\n" for ioe,oe1B in enumerate(list_optical_elements): template += "\n" if isinstance(oe1B,Shadow.Source): oe1 = Shadow.Source() elif isinstance(element,Shadow.OE): oe1 = Shadow.OE() elif isinstance(element,Shadow.IdealLensOE): oe1 = Shadow.IdealLensOE() else: raise Exception("Error: Element not known") if isinstance(oe1B,Shadow.IdealLensOE): template += "oe"+str(ioe)+".T_SOURCE = "+str(oe1B.T_SOURCE).strip()+"\n" template += "oe"+str(ioe)+".T_IMAGE = "+str(oe1B.T_IMAGE).strip()+"\n" template += "oe"+str(ioe)+".focal_x = "+str(oe1B.focal_x).strip()+"\n" template += "oe"+str(ioe)+".focal_z = "+str(oe1B.focal_z).strip()+"\n" else: memB = inspect.getmembers(oe1B) mem = inspect.getmembers(oe1) for i,var in enumerate(memB): ivar = mem[i] ivarB = memB[i] if ivar[0].isupper(): if isinstance(ivar[1],numpy.ndarray): # print(" are ALL different ? ", (ivar[1] != ivarB[1]).all()) # print(" are the same ? ", (ivar[1] == ivarB[1]).all()) # print(" there is at least ONE diff ? ", not((ivar[1] == ivarB[1]).all())) if not( (ivar[1] == ivarB[1]).all()) : line = "oe"+str(ioe)+"."+ivar[0]+" = numpy.array("+str(ivarB[1].tolist())+ ")\n" template += line # if (ivar[1] != ivarB[1]).all(): # line = "oe"+str(ioe)+"."+ivar[0]+" = "+str(ivarB[1])+"\n" # if ("SPECIFIED" in line): # pass # else: # template += line else: if ivar[1] != ivarB[1]: if isinstance(ivar[1],(str,bytes)): line = "oe"+str(ioe)+"."+ivar[0]+" = "+str(ivarB[1]).strip()+"\n" #line = re.sub('\s{2,}', ' ',line) if "SPECIFIED" in line: pass else: template += line else: line = "oe"+str(ioe)+"."+ivar[0]+" = "+str(ivarB[1])+"\n" template += line template += """\n\n #Run SHADOW to create the source if iwrite: oe0.write("start.00") beam.genSource(oe0) if iwrite: oe0.write("end.00") beam.write("begin.dat") """ template_oeA = """\n # #run optical element {0} # print(" Running optical element: %d"%({0})) if iwrite: oe{0}.write("start.{1}") """ template_oeB = """\n if iwrite: oe{0}.write("end.{1}") beam.write("star.{1}") """ for i in range(1,n_elements): template += template_oeA.format(i,"%02d"%(i)) if isinstance(list_optical_elements[i],Shadow.OE): template += "\nbeam.traceOE(oe%d,%d)"%(i,i) elif isinstance(list_optical_elements[i],Shadow.IdealLensOE): template += "\nbeam.traceIdealLensOE(oe%d,%d)"%(i,i) template += template_oeB.format(i,"%02d"%(i)) # # display results (using ShadowTools, matplotlib needed) # template += """\n Shadow.ShadowTools.plotxy(beam,1,3,nbins=101,nolost=1,title="Real space") # Shadow.ShadowTools.plotxy(beam,1,4,nbins=101,nolost=1,title="Phase space X") # Shadow.ShadowTools.plotxy(beam,3,6,nbins=101,nolost=1,title="Phase space Z") """ if script_file != "": open(script_file, "wt").write(template) print("File written to disk: %s"%(script_file)) return template
5,355,505
def flax_tag(arr): """Wraps a value in a flax module, to inspect intermediate values.""" return arr
5,355,506
def toEpoch( dateTimeObject = None ): """ Get seconds since epoch """ if dateTimeObject == None: dateTimeObject = dateTime() return nativetime.mktime( dateTimeObject.timetuple() )
5,355,507
def sendNotification(token, title, message, extraData=None, channelID=None): """ send Notification to Devices :param token: :param title: :param message: :return: """ url = 'https://exp.host/--/api/v2/push/send' headers = { "Content-Type": "application/json" } data = { "to": token, "title": title, "body": message } # Verify we have Additional data to append if extraData is not None: data["data"] = extraData # Android Only! Verify if we have a channel ID and append it if channelID is not None: data["channelId"] = channelID res = requests.post(url, data=json.dumps(data), headers=headers) return res.status_code
5,355,508
def make_phsfct_kernel(size_px, dpx, g_fac): """ Make a kernel for phase function convolution :param size_px: :param dpx: [deg/px] :param g_fac: :return: ph_ker [deg] """ ke = np.mgrid[:size_px, :size_px] half = (size_px - 1) / 2 ke[0] -= half ke[1] -= half dist = np.sqrt(ke[0] * ke[0] + ke[1] * ke[1]) dist_deg = dist * dpx ph_ker = phasefunc(g_fac, dist_deg) # Fill radially with phase function # ph_ker = ph_ker/np.sum(ph_ker) ph_ker = ph_ker / (2. * np.pi) return ph_ker
5,355,509
def sample_pts_ellipsoid_surface(mu, Q, NB_pts, random=True): """ Uniformly samples points on the surface of an ellipsoid, specified as (xi-mu)^T Q^{-1} (xi-mu) == 1 arguments: mu - mean [dim] Q - Q [dim x dim] NB_pts - nb of points random - True: Uniform sampling. False: Uniform deterministic grid output: ell_pts - points on the boundary of the ellipse [xdim x NB_pts] """ dim = mu.shape[0] if dim != Q.shape[0] or dim != Q.shape[1]: raise ValueError("mu (%d) and Q (%d,%d) must be the same size" %(mu.shape[0], Q.shape[0], Q.shape[1])) if (Q == np.zeros((dim,dim))).all(): return np.zeros((dim,NB_pts)) if random == False and dim > 2: raise ValueError("sample_pts_ellipsoid_surface: non random sampling not implemented") mut = np.array([mu]) pts = sample_pts_unit_sphere(dim, NB_pts, random=random).T E = np.linalg.cholesky(Q) ell_pts = (mut + pts @ E.T).T return ell_pts
5,355,510
def load_mzml_path(): """Return the path to the mzML toy file. Parameters ---------- None Returns ------- path_data : str The path to the mzML data. Examples -------- >>> from specio.datasets import load_mzml_path >>> load_mzml_path() # doctest: +ELLIPSIS '...spectra.mzml' """ module_path = dirname(__file__) return join(module_path, 'data', 'spectra.mzml')
5,355,511
def setStrictCheckingFlag( newValue=True ): """ See the strict checking flag. """ global strictCheckingFlag strictCheckingFlag = newValue dPrint( 'Verbose', debuggingThisModule, ' strictCheckingFlag =', strictCheckingFlag )
5,355,512
def _czce_df_read(url, skip_rows, encoding='utf-8', header=0): """ 郑州商品交易所的网页数据 :param header: :type header: :param url: 网站 string :param skip_rows: 去掉前几行 int :param encoding: utf-8 or gbk or gb2312 :return: pd.DataFrame """ headers = { "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9", "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.89 Safari/537.36", "Host": "www.czce.com.cn", "Cookie": "XquW6dFMPxV380S=CAaD3sMkdXv3fUoaJlICIEv0MVegGq5EoMyBcxkOjCgSjmpuovYFuTLtYFcxTZGw; XquW6dFMPxV380T=5QTTjUlA6f6WiDO7fMGmqNxHBWz.hKIc8lb_tc1o4nHrJM4nsXCAI9VHaKyV_jkHh4cIVvD25kGQAh.MvLL1SHRA20HCG9mVVHPhAzktNdPK3evjm0NYbTg2Gu_XGGtPhecxLvdFQ0.JlAxy_z0C15_KdO8kOI18i4K0rFERNPxjXq5qG1Gs.QiOm976wODY.pe8XCQtAsuLYJ.N4DpTgNfHJp04jhMl0SntHhr.jhh3dFjMXBx.JEHngXBzY6gQAhER7uSKAeSktruxFeuKlebse.vrPghHqWvJm4WPTEvDQ8q", } r = requests_link(url, encoding, headers=headers) data = pd.read_html(r.text, match='.+', flavor=None, header=header, index_col=0, skiprows=skip_rows, attrs=None, parse_dates=False, thousands=', ', encoding="gbk", decimal='.', converters=None, na_values=None, keep_default_na=True) return data
5,355,513
def select(weights): """ select a node with probability proportional to its "weight" """ r = random.random() * sum(weights) s = 0.0 for k,w in enumerate(weights): s += w if r <= s: return k raise RuntimeError("select WTF from %s" % weights)
5,355,514
def kexo(spacecraft_id, sensor_id, band_id): """Sun exo-atmospheric irridiance [W/m2/sr] This is used for processing surface reflectance. Spacecraft_id: Landsat7 Sensor_id: ETM+ band_id: band1, band2, band3, band4, band5, band7, band8 Spacecraft_id: Terra Sensor_id: Aster band_id: band1, band2, band3, band4, band5, band7, band8, band9 kexo(spacecraft_id, sensor_id, band_id) """ if(spacecraft_id == "Landsat7"): if (sensor_id == "ETM+"): if(band_id == "band1"): kexo = 1969.0 if(band_id == "band2"): kexo = 1840.0 if(band_id == "band3"): kexo = 1551.0 if(band_id == "band4"): kexo = 1044.0 if(band_id == "band5"): kexo = 225.7 if(band_id == "band7"): kexo = 82.07 if(band_id == "band8"): kexo = 1385.64 # Self calculated value... else: kexo = 0.0 if(spacecraft_id == "Terra"): if (sensor_id == "Aster"): if(band_id == "band1"): kexo = 1828.0 if(band_id == "band2"): kexo = 1559.0 if(band_id == "band3"): kexo = 1045.0 if(band_id == "band4"): kexo = 226.73 if(band_id == "band5"): kexo = 86.50 if(band_id == "band7"): kexo = 74.72 if(band_id == "band8"): kexo = 66.41 if(band_id == "band9"): kexo = 59.83 else: kexo = 0.0 else: kexo = 0.0 else: kexo = 0.0 return kexo
5,355,515
def gen_profile_id(profile_id): """ Generates the Elasticsearch document id for a profile Args: profile_id (str): The username of a Profile object Returns: str: The Elasticsearch document id for this object """ return "u_{}".format(profile_id)
5,355,516
def test_netconf_get_config_subtree(nornir): """Test filter subtree of get_config.""" nr = nornir.filter(name="netconf1") assert nr.inventory.hosts result = nr.run( netconf_get_config, source="startup", path="<keystore xmlns='urn:ietf:params:xml:ns:yang:ietf-keystore'><asymmetric-keys><asymmetric-key><name></name></asymmetric-key></asymmetric-keys></keystore>", filter_type="subtree", xmldict=True, ) assert result["netconf1"].result["ok"] assert "keystore" in result["netconf1"].result["xml_dict"]["data"].keys() assert "netconf-server" not in result["netconf1"].result["xml_dict"]["data"].keys()
5,355,517
def check_config(): """ Check required fields are present in config. """ sections = [{'name': 'assembly', 'keys': ['accession', 'prefix', 'alias', 'span'], 'defaults': {'accession': 'draft', 'alias': '==prefix'}}, {'name': 'busco', 'keys': ['lineage_dir', 'lineages'], 'defaults': {'lineage_dir': 'busco_lineages', 'lineages': []}}, {'name': 'reads', 'keys': ['paired', 'single'], 'defaults': {'paired': [], 'single': []}}, {'name': 'settings', 'keys': ['blast_chunk', 'blast_max_chunks', 'blast_overlap', 'blobtools2_path', 'chunk', 'taxonomy', 'tmp'], 'defaults': {'blast_chunk': 100000, 'blast_max_chunks': 10, 'blast_overlap': 500, 'chunk': 1000000, 'tmp': '/tmp'}}, {'name': 'similarity', 'keys': ['databases', 'taxrule'], 'defaults': {'taxrule': 'eachdistorder'}}, {'name': 'taxon', 'keys': ['name', 'taxid'], 'defaults': {}}] similarity_defaults = {'evalue': 1e-25, 'mask_ids': [], 'max_target_seqs': 10, 'root': 1} container_defaults = { 'busco': {'lineage_dir': '/blobtoolkit/databases/busco'}, 'settings': {'blobtools2_path': '/blobtoolkit/blobtools2', 'blobtools_viewer_path': '/blobtoolkit/viewer', 'taxonomy': '/blobtoolkit/databases/ncbi_taxdump'}, 'similarity': { 'databases': [ {'local': '/blobtoolkit/databases/ncbi_db', 'name': 'nt'}, {'local': '/blobtoolkit/databases/uniprot_db', 'name': 'reference_proteomes'} ] } } if pathlib.Path('/blobtoolkit/databases/ncbi_db').exists(): # set some container specific defaults for section, defaults in container_defaults.items(): if section not in config: config[section] = {} for key, value in defaults.items(): if key not in config[section]: config[section].update({key: value}) container_version = os.environ.get('CONTAINER_VERSION') if container_version: config.update({'container': {}}) config['container'].update({'version': container_version}) optional = ['busco', 'reads'] for section in sections: if section['name'] not in config: if section['name'] in optional: print("INFO: optional section '%s' is not present in config file" % section['name'], file=sys.stderr) config[section['name']] = {} else: raise ConfigurationError("ERROR: config file must contain a '%s' section with keys '%s'" % (section['name'], ', '.join(section['keys']))) for key in section['keys']: if key not in config[section['name']]: if key in section['defaults']: value = section['defaults'][key] if isinstance(value, str) and value.startswith('=='): value = config[section['name']][value.replace('==', '')] print("INFO: using default value for '%s.%s'" % (section['name'], key), file=sys.stderr) print(value, file=sys.stderr) config[section['name']][key] = value else: raise ConfigurationError("ERROR: config file section '%s' must contain '%s'" % (section['name'], key)) # fill in additional database info if 'defaults' not in config['similarity'] or not config['similarity']['defaults']: config['similarity']['defaults'] = {} for key, value in similarity_defaults.items(): if key not in config['similarity']['defaults']: config['similarity']['defaults'].update({key: value}) for db in config['similarity']['databases']: if 'name' not in db or 'local' not in db: quit("ERROR: 'name' and 'local' must be specified for all databases") if db['name'] == 'nt' or db['name'] == 'nt_v5': db.update({'source': 'ncbi', 'tool': 'blast', 'type': 'nucl'}) elif db['name'] == 'reference_proteomes': db.update({'source': 'uniprot', 'tool': 'diamond', 'type': 'prot'}) else: print("INFO: only 'nt' and 'reference_proteomes' databases are supported, ignoring '%s'" % db['name'], file=sys.stderr) if not re.match(r'^\w+$', config['assembly']['prefix']): raise ConfigurationError("ERROR: assembly prefix '%s' contains non-word characters. Please use only letters, numbers and underscores." % config['assembly']['prefix']) for readset in config['reads']['single'] + config['reads']['paired']: if not re.match(r'^[a-zA-Z0-9]+$', readset[0]): raise ConfigurationError("ERROR: read file basename '%s' contains non-word characters. Please use only letters and numbers." % readset[0]) if '--use-singularity' in sys.argv: return True return False
5,355,518
async def get_self_info(credential: Credential): """ 获取自己的信息 Args: credential (Credential): Credential """ api = API["info"]["my_info"] credential.raise_for_no_sessdata() return await request("GET", api["url"], credential=credential)
5,355,519
def workflow_workflows(ctx, sessions, _filter, output_format, access_token, show_all, verbose): """List all workflows user has.""" logging.debug('command: {}'.format(ctx.command_path.replace(" ", "."))) for p in ctx.params: logging.debug('{param}: {value}'.format(param=p, value=ctx.params[p])) type = 'interactive' if sessions else 'batch' try: _url = current_rs_api_client.swagger_spec.api_url except MissingAPIClientConfiguration as e: click.secho( 'REANA client is not connected to any REANA cluster.', fg='red', err=True ) sys.exit(1) if not access_token: click.echo( click.style(ERROR_MESSAGES['missing_access_token'], fg='red'), err=True) sys.exit(1) if _filter: parsed_filters = parse_parameters(_filter) try: response = get_workflows(access_token, type, bool(verbose)) verbose_headers = ['id', 'user', 'size'] headers = { 'batch': ['name', 'run_number', 'created', 'status'], 'interactive': ['name', 'run_number', 'created', 'session_type', 'session_uri'] } if verbose: headers[type] += verbose_headers data = [] for workflow in response: if workflow['status'] == 'deleted' and not show_all: continue name, run_number = get_workflow_name_and_run_number( workflow['name']) workflow['name'] = name workflow['run_number'] = run_number if type == 'interactive': workflow['session_uri'] = format_session_uri( reana_server_url=ctx.obj.reana_server_url, path=workflow['session_uri'], access_token=access_token) data.append([str(workflow[k]) for k in headers[type]]) data = sorted(data, key=lambda x: int(x[1])) workflow_ids = ['{0}.{1}'.format(w[0], w[1]) for w in data] if os.getenv('REANA_WORKON', '') in workflow_ids: active_workflow_idx = \ workflow_ids.index(os.getenv('REANA_WORKON', '')) for idx, row in enumerate(data): if idx == active_workflow_idx: data[idx][headers[type].index('run_number')] += ' *' tablib_data = tablib.Dataset() tablib_data.headers = headers[type] for row in data: tablib_data.append(row=row, tags=row) if _filter: tablib_data, filtered_headers = \ filter_data(parsed_filters, headers[type], tablib_data) if output_format: click.echo(json.dumps(tablib_data)) else: tablib_data = [list(item.values()) for item in tablib_data] click_table_printer(filtered_headers, filtered_headers, tablib_data) else: if output_format: click.echo(tablib_data.export(output_format)) else: click_table_printer(headers[type], _filter, data) except Exception as e: logging.debug(traceback.format_exc()) logging.debug(str(e)) click.echo( click.style('Workflow list could not be retrieved: \n{}' .format(str(e)), fg='red'), err=True)
5,355,520
def app(request): """Testable flask application""" _app.config.from_mapping( TESTING=True, SECRET_KEY=os.environ.get('SECRET_KEY'), SQLALCHEMY_DATABASE_URI=os.getenv('TEST_DATABASE_URL'), SQLALCHEMY_TRACK_MODIFICATIONS=False, WTF_CSRF_ENABLED=False ) ctx = _app.app_context() ctx.push() def teardown(): """Cleans up and closes out test session.""" ctx.pop() request.addfinalizer(teardown) return _app
5,355,521
def _wct_test(name, srcs, split_index, split_count): """Macro to define single WCT suite Defines a private macro for a portion of test files with split_index. The actual split happens in test/tests.js file Args: name: name of generated sh_test srcs: source files split_index: index WCT suite. Must be less than split_count split_count: total number of WCT suites """ str_index = str(split_index) config_json = struct(splitIndex = split_index, splitCount = split_count).to_json() native.sh_test( name = name, size = "enormous", srcs = ["wct_test.sh"], args = [ "$(location @ui_dev_npm//web-component-tester/bin:wct)", config_json, ], data = [ "@ui_dev_npm//web-component-tester/bin:wct", ] + srcs, # Should not run sandboxed. tags = [ "local", "manual", ], )
5,355,522
def mpl_event_handler(event_type: MplEvent): """Marks the decorated method as given matplotlib event handler .. note:: This decorator should be used only for methods of classes that inherited from :class:`MplEventDispatcher` class. This decorator can be used for reassignment event handlers in a dispatcher class. Examples -------- .. code-block:: python from mpl_events import MplEventDispatcher, mpl_event_handler, mpl class MyEventDispatcher(MplEventDispatcher): @mpl_event_handler(MplEvent.KEY_PRESS) def on_my_key_press(self, event: mpl.KeyPress): pass """ class HandlerDescriptor: """Adds handler method name to event handlers mapping """ def __init__(self, handler): self.handler = handler def __get__(self, obj, cls=None): return self.handler.__get__(obj, cls) def __set_name__(self, owner, name): if 'mpl_event_handlers' not in owner.__dict__: owner.mpl_event_handlers = getattr(owner, 'mpl_event_handlers', {}).copy() owner.mpl_event_handlers[event_type] = name return HandlerDescriptor
5,355,523
def get_date_pairs(in_dates, step): """ 入场点出场点数据 :param in_dates: 所有入场日期 :param step: 步长 :return: """ DatePair = namedtuple('DatePair', ['in_date', 'out_date']) date_pairs = [] for in_date in in_dates: out_date = date_utility.date_cal(in_date, step) date_pairs.append(DatePair(in_date, out_date)) return date_pairs
5,355,524
def new_jitters(jitter): """ update jitter vector every 100 frames by setting ~half of noise vector units to lower sensitivity """ jitters=np.zeros(128) for j in range(128): if random.uniform(0,1)<0.5: jitters[j]=1 else: jitters[j]=1-jitter return jitters
5,355,525
def test_owe_groups(dev, apdev): """Opportunistic Wireless Encryption - DH groups""" if "OWE" not in dev[0].get_capability("key_mgmt"): raise HwsimSkip("OWE not supported") params = { "ssid": "owe", "wpa": "2", "wpa_key_mgmt": "OWE", "rsn_pairwise": "CCMP" } hapd = hostapd.add_ap(apdev[0], params) bssid = hapd.own_addr() dev[0].scan_for_bss(bssid, freq="2412") for group in [ 19, 20, 21 ]: dev[0].connect("owe", key_mgmt="OWE", owe_group=str(group)) hwsim_utils.test_connectivity(dev[0], hapd) dev[0].request("REMOVE_NETWORK all") dev[0].wait_disconnected() dev[0].dump_monitor()
5,355,526
def reduce_jscs(file_line_mapping, **extra): """ Runs JSHCS on the project with the default configured rules. The output is reduced to only contain entries from the Git change set. :param file_line_mapping: Mapping of files with changed lines (obtained `get_git_line_sets()`). :param extra: Optional keyword arguments: `norules`: If true, omit verbose output of violated rule identifier (default: `False` to include rules). :return: A tuple containing the formatted string suitable for output and an integer containing the number of failed rules. """ norules = extra['norules'] if 'norules' in extra else False # Get the JSCS output. logging.info('Obtaining JSCS output ...') os.chdir(PROJECT_PATH) rules = config.JSHINT_RULES if not norules else '' command = config.JSCS_COMMAND.format(binary=config.JSCS_BIN, rules=rules) output = None try: output = subprocess.check_output(command.split()) except subprocess.CalledProcessError as ex: # JSCS found something, so it has returned an error code. # But we still want the output in the same fashion. output = ex.output output = output.decode('utf8').split('\n\n') # Go through output and collect only relevant lines to the result. result = ['\nJSCS output:\n============'] lines_expression = re.compile(r'^ +(\d+) |.*(?:\n|\r\n?)-', re.MULTILINE) file_expression = re.compile(r'^[^\b].* (?:\./)?(.+) :$', re.MULTILINE) for item in output: # Do the processing for every block here. line_no_candidates = lines_expression.findall(item, re.MULTILINE) # Check if we've got a relevant block. if line_no_candidates and '' in line_no_candidates: line_no = int(line_no_candidates[line_no_candidates.index('') - 1]) file_name = file_expression.findall(item)[0] file_name = tuple(re.split(PATH_SPLITTER, file_name)) # Check if the line is part of our selection list. if line_no in file_line_mapping[file_name]: result.append(item) # Add the number of errors and return in a nicely formatted way. error_count = len(result) - 1 result.append('\n{} code style errors found.'.format(error_count)) return '\n\n'.join(result), error_count
5,355,527
def add_manuscript_urls_to_ci_params(ci_params): """ Return and edit in-place the ci_params dictionary to include 'manuscript_url'. This function assumes Travis CI is used to deploy to GitHub Pages, while AppVeyor is used for storing manuscript artifacts for pull request builds. """ if not ci_params: return ci_params assert isinstance(ci_params, dict) provider = ci_params.get('provider') if provider == 'travis': ci_params['manuscript_url'] = ( "https://{repo_owner}.github.io/{repo_name}/v/{commit}/" .format(**ci_params) ) if provider == 'appveyor': ci_params['manuscript_url'] = f"{ci_params['build_url']}/artifacts" return ci_params
5,355,528
def station_stats(df): """Displays statistics on the most popular stations and trip.""" print('\nCalculating The Most Popular Stations and Trip...\n') start_time = time.time() # display most commonly used start station start_station = get_most_common_idx_and_val(df['Start Station']) # display most commonly used end station end_station = get_most_common_idx_and_val(df['End Station']) # display most frequent combination of start station and end station trip combination = [ df.groupby(['Start Station', 'End Station']).size().idxmax(), df.groupby(['Start Station', 'End Station']).size().max() ] print( "The Most Commonly Used Stations -\n" " Start Station : {0[0]}, count {0[1]}\n" " End Station : {1[0]}, count {1[1]}\n" " Start/End Station Combo : {2[0][0]}/{2[0][1]}, count {2[1]}" .format( start_station, end_station, combination ) ) print("\nThis took %s seconds." % (time.time() - start_time)) print('-'*40)
5,355,529
def count_sites(vcfpath): """Extract number of sites in VCF from its tabix index.""" cmd = ["bcftools","index","--nrecords", vcfpath] so, se, code = slurp_command(cmd) return int(so)
5,355,530
def test_check_compability(qtbot, setup_reports, monkeypatch): """Test state and message returned by check_compatibility.""" monkeypatch.setattr('spyder_reports.reportsplugin.PYQT4', True) reports = setup_reports valid, message = reports.check_compatibility() assert not valid assert 'qt4' in message.lower()
5,355,531
def model_contrast_score(overlays: torch.Tensor, masks: torch.Tensor, object_labels: torch.Tensor, scene_labels: torch.Tensor, object_model: Callable, scene_model: Callable, object_method: Callable, scene_method: Callable, device: str): """ Model contrast score: Difference of importance of object pixels for model trained on object labels (should be important) and model trained on scene labels (should not be important) """ overlays = overlays.to(device) object_labels = object_labels.to(device) scene_labels = scene_labels.to(device) masks = masks.squeeze().to(device) # We check if both the object model and the scene model make the correct classification with torch.no_grad(): y_pred_obj = torch.argmax(object_model(overlays), dim=1) y_pred_scene = torch.argmax(scene_model(overlays), dim=1) correctly_classified = ((y_pred_obj == object_labels) & (y_pred_scene == scene_labels)) object_model_attrs = object_method(overlays, object_labels) scene_model_attrs = scene_method(overlays, scene_labels) mask_sizes = torch.sum(masks.flatten(1), dim=1) diffs = (object_model_attrs - scene_model_attrs) / mask_sizes return diffs.cpu(), correctly_classified.cpu()
5,355,532
def get_network_insights_access_scope_analysis(network_insights_access_scope_analysis_id: Optional[str] = None, opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetNetworkInsightsAccessScopeAnalysisResult: """ Resource schema for AWS::EC2::NetworkInsightsAccessScopeAnalysis """ __args__ = dict() __args__['networkInsightsAccessScopeAnalysisId'] = network_insights_access_scope_analysis_id if opts is None: opts = pulumi.InvokeOptions() if opts.version is None: opts.version = _utilities.get_version() __ret__ = pulumi.runtime.invoke('aws-native:ec2:getNetworkInsightsAccessScopeAnalysis', __args__, opts=opts, typ=GetNetworkInsightsAccessScopeAnalysisResult).value return AwaitableGetNetworkInsightsAccessScopeAnalysisResult( analyzed_eni_count=__ret__.analyzed_eni_count, end_date=__ret__.end_date, findings_found=__ret__.findings_found, network_insights_access_scope_analysis_arn=__ret__.network_insights_access_scope_analysis_arn, network_insights_access_scope_analysis_id=__ret__.network_insights_access_scope_analysis_id, start_date=__ret__.start_date, status=__ret__.status, status_message=__ret__.status_message, tags=__ret__.tags)
5,355,533
def learnable_eval( cfg: OmegaConf, classifier, encoder: ContrastiveModel, training_data_loader: DataLoader, val_data_loader: DataLoader, top_k: int, ) -> tuple: """ :param cfg: Hydra's config instance. :param classifier: Instance of classifier with learnable parameters. :param encoder: feature extractor trained on self-supervised method. :param training_data_loader: Training data loader for a downstream task. :param val_data_loader: Validation data loader for a downstream task. :param top_k: The number of top-k for evaluation. :return: tuple of train acc, train top-k acc, train loss, val acc, val top-k acc, and val loss. """ local_rank = cfg["distributed"]["local_rank"] epochs = cfg["experiment"]["epochs"] normalized = cfg["experiment"]["normalize"] cross_entropy_loss = torch.nn.CrossEntropyLoss() optimizer = torch.optim.SGD( params=classifier.parameters(), lr=cfg["optimizer"]["lr"], momentum=cfg["optimizer"]["momentum"], nesterov=True, weight_decay=cfg["optimizer"]["decay"] ) cos_lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=epochs, eta_min=0.) train_accuracies = [] train_top_k_accuracies = [] val_accuracies = [] val_top_k_accuracies = [] train_losses = [] val_losses = [] num_train = len(training_data_loader.dataset) num_val = len(val_data_loader.dataset) highest_val_acc = 0. encoder.eval() for epoch in range(1, epochs + 1): classifier.train() training_data_loader.sampler.set_epoch(epoch) # to shuffle dataset for x, y in training_data_loader: optimizer.zero_grad() with torch.no_grad(): rep = encoder(x.to(local_rank)) if normalized: rep = torch.nn.functional.normalize(rep, p=2, dim=1) # t is not used outputs = classifier(rep) loss = cross_entropy_loss(outputs, y.to(local_rank)) loss.backward() optimizer.step() cos_lr_scheduler.step() # train and val metrics train_acc, train_top_k_acc, train_loss = calculate_accuracies_loss( classifier, encoder, training_data_loader, local_rank, top_k=top_k, normalized=normalized ) torch.distributed.barrier() torch.distributed.reduce(train_acc, dst=0) torch.distributed.reduce(train_top_k_acc, dst=0) torch.distributed.reduce(train_loss, dst=0) val_acc, val_top_k_acc, val_loss = calculate_accuracies_loss( classifier, encoder, val_data_loader, local_rank, top_k=top_k, normalized=normalized ) torch.distributed.barrier() torch.distributed.reduce(val_acc, dst=0) torch.distributed.reduce(val_top_k_acc, dst=0) torch.distributed.reduce(val_loss, dst=0) if local_rank == 0: # NOTE: since drop=True, num_train is not approximate value train_losses.append(train_loss.item() / num_train) train_acc = train_acc.item() / num_train train_accuracies.append(train_acc) train_top_k_accuracies.append(train_top_k_acc.item() / num_train) val_losses.append(val_loss.item() / num_val) val_acc = val_acc.item() / num_val val_accuracies.append(val_acc) val_top_k_accuracies.append(val_top_k_acc.item() / num_val) current_lr = optimizer.param_groups[0]["lr"] current_progress = epoch / epochs logging.info(f"Epoch:{epoch}/{epochs} progress:{current_progress:.2f}, train acc.:{train_acc * 100.:.1f} " f"val acc.:{val_acc * 100.:.1f} lr:{current_lr:.4f}") if highest_val_acc < val_acc and local_rank == 0: # save best linear classifier on validation dataset highest_val_acc = val_acc # delete old checkpoint file if "save_fname" in locals(): if os.path.exists(save_fname): os.remove(save_fname) save_fname = "epoch_{}-{}".format(epoch, cfg["experiment"]["output_model_name"]) torch.save(classifier.state_dict(), save_fname) return train_accuracies, train_top_k_accuracies, train_losses, val_accuracies, val_top_k_accuracies, val_losses
5,355,534
def wavenumber(src, rec, depth, res, freq, wavenumber, ab=11, aniso=None, epermH=None, epermV=None, mpermH=None, mpermV=None, verb=2): """Return the electromagnetic wavenumber-domain field. Calculate the electromagnetic wavenumber-domain field due to infinitesimal small electric or magnetic dipole source(s), measured by infinitesimal small electric or magnetic dipole receiver(s); sources and receivers are directed along the principal directions x, y, or z, and all sources are at the same depth, as well as all receivers are at the same depth. See Also -------- dipole : Electromagnetic field due to an electromagnetic source (dipoles). bipole : Electromagnetic field due to an electromagnetic source (bipoles). fem : Electromagnetic frequency-domain response. tem : Electromagnetic time-domain response. Parameters ---------- src, rec : list of floats or arrays Source and receiver coordinates (m): [x, y, z]. The x- and y-coordinates can be arrays, z is a single value. The x- and y-coordinates must have the same dimension. The x- and y-coordinates only matter for the angle-dependent factor. Sources or receivers placed on a layer interface are considered in the upper layer. depth : list Absolute layer interfaces z (m); #depth = #res - 1 (excluding +/- infinity). res : array_like Horizontal resistivities rho_h (Ohm.m); #res = #depth + 1. freq : array_like Frequencies f (Hz), used to calculate etaH/V and zetaH/V. wavenumber : array Wavenumbers lambda (1/m) ab : int, optional Source-receiver configuration, defaults to 11. +---------------+-------+------+------+------+------+------+------+ | | electric source | magnetic source | +===============+=======+======+======+======+======+======+======+ | | **x**| **y**| **z**| **x**| **y**| **z**| +---------------+-------+------+------+------+------+------+------+ | | **x** | 11 | 12 | 13 | 14 | 15 | 16 | + **electric** +-------+------+------+------+------+------+------+ | | **y** | 21 | 22 | 23 | 24 | 25 | 26 | + **receiver** +-------+------+------+------+------+------+------+ | | **z** | 31 | 32 | 33 | 34 | 35 | 36 | +---------------+-------+------+------+------+------+------+------+ | | **x** | 41 | 42 | 43 | 44 | 45 | 46 | + **magnetic** +-------+------+------+------+------+------+------+ | | **y** | 51 | 52 | 53 | 54 | 55 | 56 | + **receiver** +-------+------+------+------+------+------+------+ | | **z** | 61 | 62 | 63 | 64 | 65 | 66 | +---------------+-------+------+------+------+------+------+------+ aniso : array_like, optional Anisotropies lambda = sqrt(rho_v/rho_h) (-); #aniso = #res. Defaults to ones. epermH, epermV : array_like, optional Relative horizontal/vertical electric permittivities epsilon_h/epsilon_v (-); #epermH = #epermV = #res. Default is ones. mpermH, mpermV : array_like, optional Relative horizontal/vertical magnetic permeabilities mu_h/mu_v (-); #mpermH = #mpermV = #res. Default is ones. verb : {0, 1, 2, 3, 4}, optional Level of verbosity, default is 2: - 0: Print nothing. - 1: Print warnings. - 2: Print additional runtime and kernel calls - 3: Print additional start/stop, condensed parameter information. - 4: Print additional full parameter information Returns ------- PJ0, PJ1 : array Wavenumber-domain EM responses: - PJ0: Wavenumber-domain solution for the kernel with a Bessel function of the first kind of order zero. - PJ1: Wavenumber-domain solution for the kernel with a Bessel function of the first kind of order one. Examples -------- >>> import numpy as np >>> from empymod.model import wavenumber >>> src = [0, 0, 100] >>> rec = [5000, 0, 200] >>> depth = [0, 300, 1000, 1050] >>> res = [1e20, .3, 1, 50, 1] >>> freq = 1 >>> wavenrs = np.logspace(-3.7, -3.6, 10) >>> PJ0, PJ1 = wavenumber(src, rec, depth, res, freq, wavenrs, verb=0) >>> print(PJ0) [ -1.02638329e-08 +4.91531529e-09j -1.05289724e-08 +5.04222413e-09j -1.08009148e-08 +5.17238608e-09j -1.10798310e-08 +5.30588284e-09j -1.13658957e-08 +5.44279805e-09j -1.16592877e-08 +5.58321732e-09j -1.19601897e-08 +5.72722830e-09j -1.22687889e-08 +5.87492067e-09j -1.25852765e-08 +6.02638626e-09j -1.29098481e-08 +6.18171904e-09j] >>> print(PJ1) [ 1.79483705e-10 -6.59235332e-10j 1.88672497e-10 -6.93749344e-10j 1.98325814e-10 -7.30068377e-10j 2.08466693e-10 -7.68286748e-10j 2.19119282e-10 -8.08503709e-10j 2.30308887e-10 -8.50823701e-10j 2.42062030e-10 -8.95356636e-10j 2.54406501e-10 -9.42218177e-10j 2.67371420e-10 -9.91530051e-10j 2.80987292e-10 -1.04342036e-09j] """ # === 1. LET'S START ============ t0 = printstartfinish(verb) # === 2. CHECK INPUT ============ # Check layer parameters (isfullspace not required) modl = check_model(depth, res, aniso, epermH, epermV, mpermH, mpermV, False, verb) depth, res, aniso, epermH, epermV, mpermH, mpermV, _ = modl # Check frequency => get etaH, etaV, zetaH, and zetaV f = check_frequency(freq, res, aniso, epermH, epermV, mpermH, mpermV, verb) freq, etaH, etaV, zetaH, zetaV = f # Check src-rec configuration # => Get flags if src or rec or both are magnetic (msrc, mrec) ab_calc, msrc, mrec = check_ab(ab, verb) # Check src and rec src, nsrc = check_dipole(src, 'src', verb) rec, nrec = check_dipole(rec, 'rec', verb) # Get angle-dependent factor off, angle = get_off_ang(src, rec, nsrc, nrec, verb) factAng = kernel.angle_factor(angle, ab, msrc, mrec) # Get layer number in which src and rec reside (lsrc/lrec) lsrc, zsrc = get_layer_nr(src, depth) lrec, zrec = get_layer_nr(rec, depth) # === 3. EM-FIELD CALCULATION ============ # If <ab> = 36 (or 63), field is zero # In `bipole` and in `dipole`, this is taken care of in `fem`. Here we # have to take care of it separately if ab_calc in [36, ]: PJ0 = np.zeros((freq.size, off.size, wavenumber.size), dtype=complex) PJ1 = PJ0.copy() else: # Regular calculation # Calculate wavenumber response PJ0, PJ1, PJ0b = kernel.wavenumber(zsrc, zrec, lsrc, lrec, depth, etaH, etaV, zetaH, zetaV, np.atleast_2d(wavenumber), ab_calc, False, msrc, mrec, False) # Collect output PJ1 = factAng[:, np.newaxis]*PJ1 if ab in [11, 12, 21, 22, 14, 24, 15, 25]: # Because of J2 # J2(kr) = 2/(kr)*J1(kr) - J0(kr) PJ1 /= off[:, None] PJ0 = PJ0 + factAng[:, np.newaxis]*PJ0b # === 4. FINISHED ============ printstartfinish(verb, t0, 1) return np.squeeze(PJ0), np.squeeze(PJ1)
5,355,535
def gumbel_softmax(logits, temperature, dtype=tf.float32, seed=0): """Gumbel Softmax Layer.""" log_alpha = tf.nn.log_softmax(logits) eps = 1e-7 gumbel = -tf.log(-tf.log( tf.random_uniform( tf.shape(logits), minval=0, maxval=1 - eps, dtype=dtype, seed=seed) + eps)) prob = tf.nn.softmax((log_alpha + gumbel) / temperature) return prob
5,355,536
def divide(num1, num2=1): """ 除法 :param num1: int :param num2: int :return: float """ # 增加判断操作,抛出自定义异常 if num2 == 0: raise InvalidOpreation() val = num1 / num2 return val
5,355,537
def convert2int(image): """ Transfrom from float tensor ([-1.,1.]) to int image ([-1024,6500]) """ return tf.image.convert_image_dtype((image + 1) * 2036 - 1000, tf.float32)
5,355,538
def test_stationary_component(): """ Integrated test which fits a single component to a synthetic association. Runtime on my mac (single thread) is ~ 20 mins. Check logs/groupfitter.log and temp_plots/*.png for progress. Takes about 10 mins single thread with C implementation of overlap or ~40 mins with python implementation of overlap """ # log_filename = 'logs/groupfitter_stationary.log' # synth_data_savefile = 'temp_data/groupfitter_stationary_synthdata.fits' short_burnin_step = 200 true_comp_mean = np.zeros(6) true_comp_dx = 2. true_comp_dv = 2. true_comp_covmatrix = np.identity(6) true_comp_covmatrix[:3,:3] *= true_comp_dx**2 true_comp_covmatrix[3:,3:] *= true_comp_dv**2 true_comp_age = 1e-10 true_comp = SphereComponent(attributes={ 'mean':true_comp_mean, 'covmatrix':true_comp_covmatrix, 'age':true_comp_age, }) nstars = 100 measurement_error = 1e-10 best_comp, chain, lnprob = run_fit_helper( true_comp=true_comp, starcounts=nstars, measurement_error=measurement_error, run_name='stationary', burnin_step=short_burnin_step, trace_orbit_func=dummy_trace_orbit_func, ) np.save('temp_data/{}_groupfitter_stationary_' \ 'true_and_best_comp.npy'.format(PY_VERS), [true_comp, best_comp],) assert np.allclose(true_comp.get_mean(), best_comp.get_mean(), atol=1.0) assert np.allclose(true_comp.get_age(), best_comp.get_age(), atol=1.0) assert np.allclose(true_comp.get_covmatrix(), best_comp.get_covmatrix(), atol=2.0)
5,355,539
def main(args): """Main entry point""" args.archive = expand_path(args.archive) args.files = expand_path(args.files) def additions(search_path): """Generate a list of (lpath, arcname) for writing to zip-file""" aname = Path(args.archive).stem for root, _, files in os.walk(search_path): for fname in files: fpath = os.path.join(root, fname) arcname = fpath.replace( search_path, "/".join([aname, 'subprojects']) ) yield fpath, arcname with zipfile.ZipFile(args.archive, 'a') as zfile: listing = zfile.namelist() for lpath, arcname in additions(args.files): if arcname in listing: print(f"skipping: {lpath} {arcname}") continue zfile.write(lpath, arcname) return 0
5,355,540
def execution_duration(fun): """ Calculates the duration the function 'fun' takes to execute. execution_duration returns a wrapper function to which you pass your arguments. Example: execution_duration(my_function)(my_first_param, my_second_param) The result of the wrapper function will be a tuple, where the fist value is the return value of your function and the second is the execution time in seconds expressed as a float. """ def wrapper(*args, **kwargs): t1 = time.time() result = fun(*args, **kwargs) exec_dur = time.time() - t1 return result, exec_dur return wrapper
5,355,541
def return_origin_and_destination(): """Return origin and destination from session's waypoints key.""" waypoints = session['waypoints'] if len(waypoints) <= 1: return 'Please enter at least 2 destinations for your trip.' else: origin = session['waypoints'][0] destination = session['waypoints'][-1] data = { "origin": origin, "destination": destination } return jsonify(data)
5,355,542
def attempt_move(piece): """ Attempts to make a move if the target coordinate is a legal move. Returns: True if the move is made, False otherwise """ x, y = pygame.mouse.get_pos() x = x // 100 y = y // 100 if (piece is not None) and (x, y) in piece.legal_moves: piece.move(the_board, x, y) initialize_moves() update_moves() return True return False
5,355,543
def _plot_thresholds(thresholds, ax): """ Plot horizontal lines with threshold levels """ if "WhoDaily" in thresholds: ax.axhline(y=thresholds["WhoDaily"], color='red', linestyle='--', label="WHO daily threshold", linewidth=1) if "WhoYearly" in thresholds: ax.axhline(y=thresholds["WhoYearly"], color='magenta', linestyle='--', label="WHO yearly threshold", linewidth=1) if "ItaDaily" in thresholds: ax.axhline(y=thresholds["ItaDaily"], color='red', linestyle=':', label="Italian daily threshold", linewidth=1) if "ItaYearly" in thresholds: ax.axhline(y=thresholds["ItaYearly"], color='magenta', linestyle=':', label="Italian yearly threshold", linewidth=1)
5,355,544
def give(user_id, text, group): """construct a message to be sent that mentions a user, which is surprisingly complicated with GroupMe""" nickname = group.members().filter(user_id=user_id).first.nickname mention = attachments.Mentions([user_id], [[0, len(nickname)+1]]).as_dict() message = '@{} {}'.format(nickname, text) return (message, mention)
5,355,545
def integrate_audio_feat(features, audio_h5, mxm2msd): """ """ # TODO: this part should be moved to MFCC feature extraction # and stored in the feature file for better integrity n_coeffs = 40 audio_feat_cols = ( ['mean_mfcc{:d}'.format(i) for i in range(n_coeffs)] + ['var_mfcc{:d}'.format(i) for i in range(n_coeffs)] + ['mean_dmfcc{:d}'.format(i) for i in range(n_coeffs)] + ['var_dmfcc{:d}'.format(i) for i in range(n_coeffs)] + ['mean_ddmfcc{:d}'.format(i) for i in range(n_coeffs)] + ['var_ddmfcc{:d}'.format(i) for i in range(n_coeffs)] ) with h5py.File(audio_h5, 'r') as hf: tid2row = {tid:i for i, tid in enumerate(hf['tids'][:])} feats = [] for mxmid in corpus.ids: tid = mxm2msd[mxmid] if tid in tid2row: feats.append(hf['feature'][tid2row[tid]][None]) else: feats.append(np.zeros((1, len(audio_feat_cols)))) audio_feat = np.concatenate(feats, axis=0) # idx = [tid2row[mxm2msd[mxmid]] for mxmid in corpus.ids] # audio_feat = hf['feature'][idx] features['audio'] = TextFeature( 'mfcc', corpus.ids, audio_feat, audio_feat_cols ) return features
5,355,546
def retrieve_submodule(): """Fixture to get all the CONP datasets before a test.""" pytest.datasets = {x.path for x in git.Repo(".").submodules} yield
5,355,547
def make_annotation_loader_factory(): """Generate a factory function for constructing annotation loaders. Invoke the returned factory function by passing the name of the annotation loader class you want to construct, followed by the parameters for the constructor as named arguments (e.g., factory('FourCornersCSV', annotations_file=...)) """ return AnnotationLoaderLoader().loader.make_object_factory()
5,355,548
def test_arma(): """arma, check that rho is correct (appendix 10.A )and reproduce figure 10.2""" a,b, rho = arma_estimate(marple_data, 20, 20, 40) psd = arma2psd(A=a,B=b, rho=rho, NFFT=None) psd = arma2psd(A=a,B=b, rho=rho) try: psd = arma2psd(A=None, B=None, rho=rho) assert False except: assert True return psd
5,355,549
def chunks_lists_to_tuples(level: Union[list, int, float]) -> Union[tuple, int, float]: """Convert a recursive list of lists of ints into a tuple of tuples of ints. This is a helper function needed because MongoDB automatically converts tuples to lists, but the dask constructor wants the chunks defined strictly as tuples. e.g. - input: ``[[1, 2], [3, 4]]`` - output: ``((1, 2), (3, 4))`` .. note:: float data type is supported to allow for NaN-sized dask chunks """ if isinstance(level, list): return tuple(chunks_lists_to_tuples(i) for i in level) if isinstance(level, (int, float)): return level raise TypeError(level)
5,355,550
def oid_pattern_specificity(pattern): # type: (str) -> Tuple[int, Tuple[int, ...]] """Return a measure of the specificity of an OID pattern. Suitable for use as a key function when sorting OID patterns. """ wildcard_key = -1 # Must be less than all digits, so that e.G. '1.*' is less specific than '1.n' for n = 0...9. parts = tuple(wildcard_key if digit == '*' else int(digit) for digit in pattern.lstrip('.').split('.')) return ( len(parts), # Shorter OIDs are less specific than longer OIDs, regardless of their contents. parts, # For same-length OIDs, compare their contents (integer parts). )
5,355,551
def test_pearl_ml10(): """Test pearl_ml10.py""" assert subprocess.run([ EXAMPLES_ROOT_DIR / 'torch/pearl_ml10.py', '--num_epochs', '1', '--num_train_tasks', '1', '--num_test_tasks', '1', '--encoder_hidden_size', '1', '--net_size', '2', '--num_steps_per_epoch', '2', '--num_initial_steps', '2', '--num_steps_prior', '1', '--num_extra_rl_steps_posterior', '1', '--batch_size', '2', '--embedding_batch_size', '1', '--embedding_mini_batch_size', '1', '--max_path_length', '1' ], check=False).returncode == 0
5,355,552
def plot_metrics(history, path): """ # Notes Plots the metrics of the history of a classifier. # Arguments - history: history created from calling clf.fit(X, Y) of a classifier. - path: string representing the path where to save this figure. # Source https://www.tensorflow.org/tutorials/structured_data/imbalanced_data """ colors = plt.rcParams['axes.prop_cycle'].by_key()['color'] metrics = ['loss', 'auc', 'precision', 'recall'] for n, metric in enumerate(metrics): name = metric.replace("_", " ").capitalize() plt.subplot(2, 2, n + 1) plt.plot(history.epoch, history.history[metric], color=colors[0], label='Train') plt.plot(history.epoch, history.history['val_' + metric], color=colors[0], linestyle="--", label='Val') plt.xlabel('Epoch') plt.xticks(np.arange(0, 30, step=5)) plt.ylabel(name) if metric == 'loss': plt.ylim([0, plt.ylim()[1]]) elif metric == 'auc': plt.ylim([0.8, 1]) else: plt.ylim([0, 1]) plt.legend() plt.savefig(path + os.sep + 'images' + os.sep + metric + '.png')
5,355,553
def bucket_list(ctx, namespace, compartment_id, limit, page): """ Lists the `BucketSummary`s in a namespace. A `BucketSummary` contains only summary fields for the bucket and not fields such as the user-defined metadata. Example: bmcs os bucket list -ns mynamespace --compartment-id ocid1.compartment.oc1..aaaaaaaarhifmvrvuqtye5q65flzp3pp2jojdc6rck6copzqck3ukcypxfga """ client = build_client('os', ctx) kwargs = { 'opc_client_request_id': ctx.obj['request_id'], 'limit': limit } if page is not None: kwargs['page'] = page render_response(client.list_buckets(namespace, compartment_id, **kwargs))
5,355,554
def import_folder(): """ This test will build a H2O frame from importing the bigdata/laptop/parser/orc/airlines_05p_orc_csv from and build another H2O frame from the multi-file orc parser using multiple orc files that are saved in the directory bigdata/laptop/parser/orc/airlines_05p_orc. It will compare the two frames to make sure they are equal. :return: None if passed. Otherwise, an exception will be thrown. """ startcsv = time.time() multi_file_csv = h2o.import_file(path=pyunit_utils.locate("bigdata/laptop/parser/orc/pubdev_3200/air05_csv"), na_strings=['\\N']) endcsv = time.time() csv_type_dict = multi_file_csv.types multi_file_csv.summary() csv_summary = h2o.frame(multi_file_csv.frame_id)["frames"][0]["columns"] col_ind_name = dict() # change column types from real to enum according to multi_file_csv column types for key_name in list(csv_type_dict): col_ind = key_name.split('C') new_ind = int(str(col_ind[1]))-1 col_ind_name[new_ind] = key_name col_types = [] for ind in range(len(col_ind_name)): col_types.append(csv_type_dict[col_ind_name[ind]]) startorc1 = time.time() multi_file_orc1 = h2o.import_file(path=pyunit_utils.locate("bigdata/laptop/parser/orc/pubdev_3200/air05_orc")) endorc1 = time.time() h2o.remove(multi_file_orc1) startorc = time.time() multi_file_orc = h2o.import_file(path=pyunit_utils.locate("bigdata/laptop/parser/orc/pubdev_3200/air05_orc"), col_types=col_types) endorc = time.time() multi_file_orc.summary() orc_summary = h2o.frame(multi_file_orc.frame_id)["frames"][0]["columns"] print("************** CSV parse time is {0}".format(endcsv-startcsv)) print("************** ORC (without column type forcing) parse time is {0}".format(endorc1-startorc1)) print("************** ORC (with column type forcing) parse time is {0}".format(endorc-startorc)) # compare frame read by orc by forcing column type, pyunit_utils.compare_frame_summary(csv_summary, orc_summary)
5,355,555
def pronunciation_assessment_continuous_from_file(question_num): """performs continuous speech recognition asynchronously with input from an audio file""" import difflib import json # Creates an instance of a speech config with specified subscription key and service region. # Replace with your own subscription key and service region (e.g., "westus"). # Note: The pronunciation assessment feature is currently only available on en-US language. speech_config = speechsdk.SpeechConfig(subscription=speech_key, region=service_region) audio_config = speechsdk.audio.AudioConfig(filename=weatherfilename) if question_num == 1: reference_text = "I have a fox." elif question_num == 2: reference_text = "I have a box." elif question_num == 3: reference_text = "I have a vase." else: reference_text = "My voice is my passport verify me." # create pronunciation assessment config, set grading system, granularity and if enable miscue based on your requirement. enable_miscue = True pronunciation_config = speechsdk.PronunciationAssessmentConfig(reference_text=reference_text, grading_system=speechsdk.PronunciationAssessmentGradingSystem.HundredMark, granularity=speechsdk.PronunciationAssessmentGranularity.Phoneme, enable_miscue=enable_miscue) # Creates a speech recognizer using a file as audio input. # The default language is "en-us". speech_recognizer = speechsdk.SpeechRecognizer(speech_config=speech_config, audio_config=audio_config) # apply pronunciation assessment config to speech recognizer pronunciation_config.apply_to(speech_recognizer) done = False recognized_words = [] accuracy_scores = [] fluency_scores = [] durations = [] def stop_cb(evt): """callback that signals to stop continuous recognition upon receiving an event `evt`""" #print('CLOSING on {}'.format(evt)) nonlocal done done = True def recognized(evt): print('pronunciation assessment for: {}'.format(evt.result.text)) pronunciation_result = speechsdk.PronunciationAssessmentResult(evt.result) print(' Accuracy score: {}, pronunciation score: {}, completeness score : {}, fluency score: {}'.format( pronunciation_result.accuracy_score, pronunciation_result.pronunciation_score, pronunciation_result.completeness_score, pronunciation_result.fluency_score )) scores.append(pronunciation_result.accuracy_score) scores.append(pronunciation_result.pronunciation_score) scores.append(pronunciation_result.completeness_score) scores.append(pronunciation_result.fluency_score) nonlocal recognized_words, accuracy_scores, fluency_scores, durations recognized_words += pronunciation_result.words accuracy_scores.append(pronunciation_result.accuracy_score) fluency_scores.append(pronunciation_result.fluency_score) json_result = evt.result.properties.get(speechsdk.PropertyId.SpeechServiceResponse_JsonResult) jo = json.loads(json_result) nb = jo['NBest'][0] durations.append(sum([int(w['Duration']) for w in nb['Words']])) # Connect callbacks to the events fired by the speech recognizer speech_recognizer.recognized.connect(recognized) #speech_recognizer.session_started.connect(lambda evt: print('SESSION STARTED: {}'.format(evt))) #speech_recognizer.session_stopped.connect(lambda evt: print('SESSION STOPPED {}'.format(evt))) #speech_recognizer.canceled.connect(lambda evt: print('CANCELED {}'.format(evt))) # stop continuous recognition on either session stopped or canceled events speech_recognizer.session_stopped.connect(stop_cb) speech_recognizer.canceled.connect(stop_cb) # Start continuous pronunciation assessment speech_recognizer.start_continuous_recognition() while not done: time.sleep(.5) speech_recognizer.stop_continuous_recognition() # We can calculate whole accuracy and fluency scores by duration weighted averaging accuracy_score = sum(i[0] * i[1] for i in zip(accuracy_scores, durations)) / sum(durations) fluency_score = sum(i[0] * i[1] for i in zip(fluency_scores, durations)) / sum(durations) # we need to convert the reference text to lower case, and split to words, then remove the punctuations. reference_words = [w.strip(string.punctuation) for w in reference_text.lower().split()] # For continuous pronunciation assessment mode, the service won't return the words with `Insertion` or `Omission` # even if miscue is enabled. # We need to compare with the reference text after received all recognized words to get these error words. if enable_miscue: diff = difflib.SequenceMatcher(None, reference_words, [x.word for x in recognized_words]) final_words = [] for tag, i1, i2, j1, j2 in diff.get_opcodes(): if tag == 'insert': for word in recognized_words[j1:j2]: if word.error_type == 'None': word._error_type = 'Insertion' final_words.append(word) elif tag == 'delete': for word_text in reference_words[i1:i2]: word = speechsdk.PronunciationAssessmentWordResult({ 'Word': word_text, 'PronunciationAssessment': { 'ErrorType': 'Omission', } }) final_words.append(word) else: final_words += recognized_words[j1:j2] else: final_words = recognized_words # Calculate whole completeness score completeness_score = len([w for w in final_words if w.error_type == 'None']) / len(reference_words) * 100 print(' Paragraph accuracy score: {}, completeness score: {}, fluency score: {}'.format( accuracy_score, completeness_score, fluency_score )) for idx, word in enumerate(final_words): print(' {}: word: {}\taccuracy score: {}\terror type: {};'.format( idx + 1, word.word, word.accuracy_score, word.error_type ))
5,355,556
def extract_entities(text, json_={}): """ Extract entities from a given text using metamap and generate a json, preserving infro regarding the sentence of each entity that was found. For the time being, we preserve both concepts and the entities related to them Input: - text: str, a piece of text or sentence - json_: dic, sometimes the json to be returned is given to us to be enriched Defaults to an empty json_ Output: - json_: dic, json with fields text, sents, concepts and entities containg the final results """ json_['text'] = text # Tokenize the text sents = sent_tokenize(text) json_['sents'] = [{'sent_id': i, 'sent_text': sent} for i, sent in enumerate(sents)] json_['concepts'], _ = mmap_extract(text) json_['entities'] = {} for i, sent in enumerate(json_['sents']): ents = metamap_ents(sent) json_['entities'][sent['sent_id']] = ents return json_
5,355,557
def gaul_as_df(gaul_path): """ Load the Gaussian list output by PyBDSF as a pd.DataFrame Args: gaul_path (`str`): Path to Gaussian list (.gaul file) """ gaul_df = pd.read_csv( gaul_path, skiprows=6, names=GAUL_COLUMNS, delim_whitespace=True, ) return gaul_df
5,355,558
def dump(): """Displays the state of memory/CPU""" print(end='\n') for row in range(10): for col in range(10): address = str(row * 10 + col).rjust(2) numeric = '[' + str(readMem(int(address))).ljust(3) + ']' print(address + numeric, end=" ") print(end='\n') pc_str = 'PC['+ str(readPC()).ljust(2) +']' acc_str = 'ACC[' + str(readAccum()).ljust(3) + ']' instr_str = toAssembly(readMem(readPC())) print(' ' * 31, pc_str, acc_str, instr_str, end='\n\n') print('In box:', str(inbox)) print('Out box:', str(outbox))
5,355,559
def setup(bot): """Set up the Blood on the Clocktower extension.""" # set up persistent botc town square category settings bot.botc_townsquare_settings = DiscordIDSettings( bot, "botc_townsquare", BOTC_CATEGORY_DEFAULT_SETTINGS ) # set up town square object bot.botc_townsquare = BOTCTownSquare(bot) bot.add_cog(BOTCTownSquareSetup(bot)) bot.add_cog(BOTCTownSquareStorytellers(bot)) bot.add_cog(BOTCTownSquarePlayers(bot)) bot.add_cog(BOTCTownSquareManage(bot))
5,355,560
def psql(riemann_host, riemann_port, sqlquery): """monitor query from a postgresql database""" logging.basicConfig(level=logging.INFO) logger.info("version %s starting", util.get_version()) util.watch_report_loop( lambda: bernhard.Client(riemann_host, riemann_port), functools.partial(watch_psql, sqlquery), 10, )
5,355,561
def normalize_matrix(mat, dim=3, p=2): """Normalize matrix. Args: mat: matrix dim: dimension p: p value for norm Returns: normalized matrix """ mat_divided = F.normalize(mat, p=p, dim=dim) return mat_divided
5,355,562
def _AllowObjectAccess(sid, handle, object_type: int, access_permissions: int) -> None: """Allows access to an object by handle. Args: sid: A `PySID` representing the SID to grant access to. handle: A handle to an object. object_type: A `SE_OBJECT_TYPE` enum value. access_permissions: The permissions as a set of biflags using the `ACCESS_MASK` format. """ info = win32security.GetSecurityInfo(handle, object_type, win32security.DACL_SECURITY_INFORMATION) dacl = info.GetSecurityDescriptorDacl() _AddPermissionToDacl(dacl, sid, access_permissions) win32security.SetSecurityInfo(handle, object_type, win32security.DACL_SECURITY_INFORMATION, None, None, dacl, None)
5,355,563
def species_to_parameters(species_ids: List[str], sbml_model: 'libsbml.Model') -> List[str]: """ Turn a SBML species into parameters and replace species references inside the model instance. :param species_ids: List of SBML species ID to convert to parameters with the same ID as the replaced species. :param sbml_model: SBML model to modify :return: List of IDs of species which have been converted to parameters """ transformables = [] for species_id in species_ids: species = sbml_model.getSpecies(species_id) if species.getHasOnlySubstanceUnits(): logger.warning( f"Ignoring {species.getId()} which has only substance units." " Conversion not yet implemented.") continue if math.isnan(species.getInitialConcentration()): logger.warning( f"Ignoring {species.getId()} which has no initial " "concentration. Amount conversion not yet implemented.") continue transformables.append(species_id) # Must not remove species while iterating over getListOfSpecies() for species_id in transformables: species = sbml_model.removeSpecies(species_id) par = sbml_model.createParameter() par.setId(species.getId()) par.setName(species.getName()) par.setConstant(True) par.setValue(species.getInitialConcentration()) par.setUnits(species.getUnits()) # Remove from reactants and products for reaction in sbml_model.getListOfReactions(): for species_id in transformables: # loop, since removeX only removes one instance while reaction.removeReactant(species_id): # remove from reactants pass while reaction.removeProduct(species_id): # remove from products pass while reaction.removeModifier(species_id): # remove from modifiers pass return transformables
5,355,564
def test_build_artifacts_invokes_docker_commands(mocker): """ Validate that the docker-compose commands are executed with the valid paramters. Since the docker-compose file was dynamically generated, we must pass the full path of that file to docker-compose command. Also, set the context of the execution to the current path. """ tmp_filename = '/var/folders/xw/yk2rrhks1w72y0zr_7t7b851qlt8b3/T/tmp52bd77s3' mock_builder = mocker.patch('juniper.actions.build_compose', return_value=tmp_filename) # Mocking the dependencies of this action. These three high level packages are # needed to invoke docker-compose in the right context! mocker.patch('juniper.actions.os') mocker.patch('juniper.actions.shutil') mock_subprocess_run = mocker.patch('juniper.actions.subprocess.run') compose_cmd_calls = [ mocker.call(["docker-compose", "-f", tmp_filename, '--project-directory', '.', 'down']), mocker.call(["docker-compose", "-f", tmp_filename, '--project-directory', '.', 'up']) ] processor_ctx = reader('./tests/manifests/processor-test.yml') actions.build_artifacts(logger, processor_ctx) mock_subprocess_run.assert_has_calls(compose_cmd_calls) mock_builder.assert_called_once()
5,355,565
def test_fact_name(strings, test_fact_empty_fx): """ Test Fact.name getter/setter """ test_fact = test_fact_empty_fx test_fact.name = strings assert test_fact_empty_fx.name == strings
5,355,566
def gaussian_filter_cv(array: np.ndarray, sigma) -> np.ndarray: """ Apply a Gaussian filter to a raster that may contain NaNs, using OpenCV's implementation. Arguments are for now hard-coded to be identical to scipy. N.B: kernel_size is set automatically based on sigma :param array: the input array to be filtered. :param sigma: the sigma of the Gaussian kernel :returns: the filtered array (same shape as input) """ # Check that array dimension is 2, or can be squeezed to 2D orig_shape = array.shape if len(orig_shape) == 2: pass elif len(orig_shape) == 3: if orig_shape[0] == 1: array = array.squeeze() else: raise NotImplementedError("Case of array of dimension 3 not implemented") else: raise ValueError( f"Invalid array shape given: {orig_shape}. Expected 2D or 3D array" ) # In case array does not contain NaNs, use OpenCV's gaussian filter directly # With kernel size (0, 0), i.e. set to default, and borderType=BORDER_REFLECT, the output is equivalent to scipy if np.count_nonzero(np.isnan(array)) == 0: gauss = cv.GaussianBlur(array, (0, 0), sigmaX=sigma, borderType=cv.BORDER_REFLECT) # If array contain NaNs, need a more sophisticated approach # Inspired by https://stackoverflow.com/a/36307291 else: # Run filter on a copy with NaNs set to 0 array_no_nan = array.copy() array_no_nan[np.isnan(array)] = 0 gauss_no_nan = cv.GaussianBlur(array_no_nan, (0, 0), sigmaX=sigma, borderType=cv.BORDER_REFLECT) del array_no_nan # Mask of NaN values nan_mask = 0 * array.copy() + 1 nan_mask[np.isnan(array)] = 0 gauss_mask = cv.GaussianBlur(nan_mask, (0, 0), sigmaX=sigma, borderType=cv.BORDER_REFLECT) del nan_mask with warnings.catch_warnings(): warnings.filterwarnings("ignore", message="invalid value encountered") gauss = gauss_no_nan / gauss_mask return gauss.reshape(orig_shape)
5,355,567
def plot_eigval_zero_crossings(data_path:str, normalize:bool = False): """Plots scatterplot of eigenvalue magnitude with the number of zero-crossings of the corresponding eigenmode, for each ReferenceModel. Args: - data_path: Diagnostic data path from which to draw the metrics. - normalize: Whether to normalize eigenvalues by the maximum eigenvalue in the ReferenceModel. """ config_names = dt.ncFetch(data_path, 'reference', 'config_name') d_csum = np.cumsum(dt.ncFetch(data_path, 'reference', 'config_pca_dim')) d_csum = np.insert(d_csum, 0, 0) gamma = dt.ncFetch(data_path, 'reference', 'Gamma') var_dof = dt.ncFetch(data_path, 'reference', 'var_dof') num_vars = dt.ncFetch(data_path, 'reference', 'num_vars') config_dofsum = np.cumsum(np.multiply(num_vars, var_dof)) config_dofsum = np.insert(config_dofsum, 0, 0) P_pca = dt.ncFetch(data_path, 'reference', 'P_pca') tab_colors = get_tab_colors() title = 'zero_crossings' if normalize: title = title+'_normalized' plt.figure(title) # Loop over configs for (c, (config_dofs, d_cs, config_name)) in enumerate( zip(config_dofsum, d_csum, config_names)): # PCA matrix for this configuration modes = P_pca[d_cs:d_csum[c+1], config_dofs:config_dofsum[c+1]] # Number of principal modes max_modes = d_csum[c+1] - d_cs zero_crossing_num = np.zeros(max_modes) for m in range(max_modes): # Leading eigenvectors are last in julia eig(), so reverse order mode = modes[max_modes - 1 - m, :] zero_crossings = np.where(np.diff(np.sign(mode)))[0] zero_crossing_num[m] = len(zero_crossings) # Get eigenvalues and order them eigvals_ = np.array(sorted(np.diagonal(gamma[d_cs:d_csum[c+1], d_cs:d_csum[c+1]]), reverse=True)) if normalize: den = eigvals_[0] else: den = 1.0 plt.plot(zero_crossing_num, eigvals_/den, color=tab_colors[c], label=config_name) plt.yscale('log') # Decorate plot plt.xlabel(r'Zero crossings') plt.ylabel(r'Eigenvalue') plt.legend(frameon=False) plt.tight_layout() # Save plot and update index plt.savefig(title+'.pdf', format='pdf') return
5,355,568
def save_controller( controller: Controller, filename: str ) -> None: """Saves the controller in json format into the specified files Parameters ---------- controller : Controller the controller to save into files filename : str the path to the json file Returns ------- None """ with open(filename, 'w') as f: items_list = [] for item in controller.items.values(): item_dict = { "name":item.name, "id":item.id, "rarity":item.rarity} items_list.append(item_dict) banners_list = [] for banner in controller.banners.values(): banner_dict = { "name":banner.name, "id":banner.id, "items":[{"id":item.id} for item in banner.items], "price":banner.price, "key":banner.key} banners_list.append(banner_dict) players_list = [] for player in controller.players.values(): player_dict = { "name":player.name, "id":player.id, "money":player.money, "items":[{"id":item.id} for item in player.items]} players_list.append(player_dict) controller_dict = { "items":items_list,"banners":banners_list,"players":players_list} json.dump(controller_dict,f)
5,355,569
def refresh_cache(f): """Decorator to update the instance_info_cache Requires context and instance as function args """ argspec = inspect.getargspec(f) @functools.wraps(f) def wrapper(self, context, *args, **kwargs): res = f(self, context, *args, **kwargs) try: # get the instance from arguments (or raise ValueError) instance = kwargs.get('instance') if not instance: instance = args[argspec.args.index('instance') - 2] except ValueError: msg = _('instance is a required argument to use @refresh_cache') raise Exception(msg) update_instance_cache_with_nw_info(self, context, instance, nw_info=res) # return the original function's return value return res return wrapper
5,355,570
def get_timestamped_export_dir(export_dir_base): """Builds a path to a new subdirectory within the base directory. Each export is written into a new subdirectory named using the current time. This guarantees monotonically increasing version numbers even across multiple runs of the pipeline. The timestamp used is the number of seconds since epoch UTC. Args: export_dir_base: A string containing a directory to write the exported graph and checkpoints. Returns: The full path of the new subdirectory (which is not actually created yet). Raises: RuntimeError: if repeated attempts fail to obtain a unique timestamped directory name. """ attempts = 0 while attempts < MAX_DIRECTORY_CREATION_ATTEMPTS: export_timestamp = int(time.time()) export_dir = os.path.join( compat.as_bytes(export_dir_base), compat.as_bytes(str(export_timestamp))) if not gfile.Exists(export_dir): # Collisions are still possible (though extremely unlikely): this # directory is not actually created yet, but it will be almost # instantly on return from this function. return export_dir time.sleep(1) attempts += 1 logging.warn( 'Export directory {} already exists; retrying (attempt {}/{})'.format( export_dir, attempts, MAX_DIRECTORY_CREATION_ATTEMPTS)) raise RuntimeError('Failed to obtain a unique export directory name after ' '{} attempts.'.format(MAX_DIRECTORY_CREATION_ATTEMPTS))
5,355,571
def calculate_density(temp, pressure): """Returns density in g/cm^3 """ if (temp < 161.40): raise ValueError("Solid phase!") if (temp < 289.7): VaporP_bar = pow(10, 4.0519 - 667.16 / temp) else: VaporP_bar = sys.float_info.max if (pressure < VaporP_bar): raise ValueError("Gas phase!") density = 2.9970938084691329e2 * np.exp(-8.2598864714323525e-2 * temp) - \ 1.8801286589442915e6 * np.exp( -((temp - 4.0820251276172212e2) / 2.7863170223154846e1)**2) - \ 5.4964506351743057e3 * np.exp( -((temp - 6.3688597345042672e2) / 1.1225818853661815e2)**2) + \ 8.3450538370682614e2 * np.exp( -((temp + 4.8840568924597342e1) / 7.3804147172071107e3)**2) \ - 8.3086310405942265e2 return density
5,355,572
def load_scorers(scorers): """Loads modules and instantiates scorers.""" for sid, sdef in scorers: module = None if os.path.isfile(sdef): try: logging.info('Loading additional feature definitions from file %s', sdef) prefix = os.path.dirname(sdef) sys.path.append(prefix) module = __import__(os.path.basename(sdef).replace('.py', '')) sys.path.remove(prefix) except: logging.error('Could not load feature definitions from file %s', sdef) else: try: logging.info('Loading additional feature definitions from module %s', sdef) module = importlib.import_module(sdef) except: logging.error('Could not load feature defitions from module %s', sdef) if module is None: raise NotImplementedError('Could not load module associated with %s: %s' % (fname, sdef)) _SCORERS_.append(ScorerData(sid, module.get_instance(sid)))
5,355,573
def cross_validation(df, K, hyperparameters): """ Perform cross validation on a dataset. :param df: pandas.DataFrame :param K: int :param hyperparameters: dict """ train_indices = list(df.sample(frac=1).index) k_folds = np.array_split(train_indices, K) if K == 1: K = 2 rmse_list = [] for i in range(len(k_folds)): training_folds = [fold for j, fold in enumerate(k_folds) if j != i] training_indices = np.concatenate(training_folds) x_train, y_train = df.iloc[training_indices, 1:], df.iloc[training_indices, :1] x_validation, y_validation = df.iloc[k_folds[i], 1:], df.iloc[k_folds[i], :1] dtrain = xgb.DMatrix(data=x_train, label=y_train) dvalidation = xgb.DMatrix(data=x_validation, label=y_validation) model = xgb.train( params=hyperparameters, dtrain=dtrain, evals=[(dtrain, "train"), (dvalidation, "validation")], ) eval_results = model.eval(dvalidation) rmse_list.append(float(eval_results.split("eval-rmse:")[1])) return rmse_list, model
5,355,574
def solve_cities(cities: List, gdps: List, sick: List, total_capacity: int, value_r=0, weight_r=0, num_reads=1, verbose=False) -> Dict: """ Solves problem: "Which cities should I should I shut down in order to stay within healthcare resources constraints while maximizing overall GDP" parameters: cities - list of city names gdps - corresponding list of GDP per city sick - corresponding number of sick people per city total_capacity - max capacity for sick people summed over all cities num_reads - number of samples to take verbose - whether to print out best result returns: (dict) - list of dictionaries with individual results and selected attributes sorted in order of least energy first """ if sum(sick) < total_capacity: print("Warning in solve_cities: Total number of sick people is less " + "than total capacity. There's no knapsack problem to solve!") bqm = knapsack_bqm(cities, gdps, sick, total_capacity, value_r=value_r, weight_r=weight_r) sampler = LeapHybridSampler() samplesets = [sampler.sample(bqm) for _ in range(num_reads)] df = pd.DataFrame({'city': cities, 'gdp': gdps, 'sick': sick}) df = df.set_index('city') solution_set = [] for sampleset in samplesets: open_cities = [] closed_cities = [] for k, v in sampleset.first.sample.items(): if k in cities: if v == 1: open_cities.append(k) else: closed_cities.append(k) solution_set.append({ 'open_cities': open_cities, 'closed_cities': closed_cities, 'energy': sampleset.first.energy, 'salvaged_gdp': sum(df.loc[open_cities]['gdp']) + sum(df.loc[closed_cities]['gdp']) * value_r, 'used_capacity': int(round(sum(df.loc[open_cities]['sick']))) }) # do sorting from lowest to highest energy if num_reads > 1: energies = [solution['energy'] for solution in solution_set] solution_set = [x for _, x in sorted(zip(energies, solution_set))] if verbose: print('BEST SOLUTION') print('Open cities') print(solution_set[0]['open_cities']) print('\n') print('Closed cities') print(solution_set[0]['closed_cities']) print('\n') total_gdp = sum(df['gdp']) salvaged_gdp = solution_set[0]['salvaged_gdp'] print( f'Salvaged GDP: {salvaged_gdp} ({(100*salvaged_gdp/total_gdp):.1f}%)') used_capacity = solution_set[0]['used_capacity'] print( f'Used up hospital capacity: {used_capacity:d} of {total_capacity} ({(100*used_capacity/total_capacity):.1f}%)') return solution_set
5,355,575
def catalog(): """Render the mapping catalog page.""" if request.args.get(EQUIVALENT_TO): mappings = current_app.manager.get_mappings_by_type(EQUIVALENT_TO) message = Markup("<h4>You are now visualizing the catalog of equivalent mappings</h4>") flash(message) elif request.args.get(IS_PART_OF): mappings = current_app.manager.get_mappings_by_type(IS_PART_OF) message = Markup("<h4>You are now visualizing the catalog of hierarchical mappings</h4>") flash(message) else: mappings = current_app.manager.get_all_mappings() return render_template( 'curation/catalog.html', STYLED_NAMES=STYLED_NAMES, mappings=mappings, all='all' )
5,355,576
def detect_counterexample(algorithm, test_epsilon, default_kwargs={}, event_search_space=None, databases=None, event_iterations=100000, detect_iterations=500000, cores=0, loglevel=logging.INFO): """ :param algorithm: The algorithm to test for. :param test_epsilon: The privacy budget to test for, can either be a number or a tuple/list. :param default_kwargs: The default arguments the algorithm needs except the first Queries argument. :param event_search_space: The search space for event selector to reduce search time, optional. :param databases: The databases to run for detection, optional. :param event_iterations: The iterations for event selector to run, default is 100000. :param detect_iterations: The iterations for detector to run, default is 500000. :param cores: The cores to utilize, 0 means auto-detection. :param loglevel: The loglevel for logging package. :return: [(epsilon, p, d1, d2, kwargs, event)] The epsilon-p pairs along with databases/arguments/selected event. """ logging.basicConfig(level=loglevel) logger.info('Starting to find counter example on algorithm {} with test epsilon {}\n' .format(algorithm.__name__, test_epsilon)) logger.info('\nExtra arguments:\n' 'default_kwargs: {}\n' 'event_search_space: {}\n' 'databases: {}\n' 'cores:{}\n'.format(default_kwargs, event_search_space, databases, cores)) if databases is not None: d1, d2 = databases kwargs = generate_arguments(algorithm, d1, d2, default_kwargs=default_kwargs) input_list = ((d1, d2, kwargs),) else: input_list = generate_databases(algorithm, 5, default_kwargs=default_kwargs) result = [] test_epsilon = (test_epsilon, ) if isinstance(test_epsilon, (int, float)) else test_epsilon pool = None if cores == 0: pool = mp.Pool(mp.cpu_count()) elif cores != 1: pool = mp.Pool(cores) try: for i, epsilon in enumerate(test_epsilon): d1, d2, kwargs, event = select_event(algorithm, input_list, epsilon, event_iterations, search_space=event_search_space, process_pool=pool) # fix the database and arguments if selected for performance input_list = ((d1, d2, kwargs),) if len(input_list) > 1 else input_list p1, _ = hypothesis_test(algorithm, d1, d2, kwargs, event, epsilon, detect_iterations, process_pool=pool) result.append((epsilon, p1, d1, d2, kwargs, event)) print('Epsilon: {} | p-value: {:5.3f} | Event: {} | {:5.1f}%' .format(epsilon, p1, event, float(i + 1) / len(test_epsilon) * 100)) logger.debug('D1: {} | D2: {} | kwargs: {}'.format(d1, d2, kwargs)) finally: if pool is not None: pool.close() else: pass return result
5,355,577
def produce_dataset_mce(mce, kafka_config): """ Produces a MetadataChangeEvent to Kafka """ conf = {'bootstrap.servers': kafka_config.bootstrap_server, 'on_delivery': delivery_report, 'schema.registry.url': kafka_config.schema_registry} key_schema = avro.loads('{"type": "string"}') record_schema = avro.load(kafka_config.avsc_path) producer = AvroProducer(conf, default_key_schema=key_schema, default_value_schema=record_schema) producer.produce(topic=kafka_config.kafka_topic, key=mce['proposedSnapshot'][1]['urn'], value=mce) producer.flush()
5,355,578
def test_simple(async_fxc, fxc_args): """ Testing basic async functionality """ async_fxc.loop.run_until_complete(simple_task(async_fxc, fxc_args['tutorial_endpoint']))
5,355,579
def test_enable_beacon_module(): """ Test enabling beacons """ comm1 = "Enabled beacons on minion." event_returns = [ { "complete": True, "tag": "/salt/minion/minion_beacon_enabled_complete", "beacons": { "enabled": True, "watch_salt_master": [ {"processes": {"salt-master": "stopped"}}, {"beacon_module": "ps"}, ], }, } ] mock = MagicMock(return_value=True) with patch.dict(beacons.__salt__, {"event.fire": mock}): with patch.object(SaltEvent, "get_event", side_effect=event_returns): assert beacons.enable() == {"comment": comm1, "result": True}
5,355,580
def convert_annotation(ann): """Converts an AST object into its lib2to3 equivalent.""" raise NotImplementedError(f"unknown AST node type: {ann!r}")
5,355,581
def reset_all(rewind_to=None): """Reset an instance of each of the registered projectors""" for name in _projectors: reset(name)
5,355,582
def test_maps(): """This test just tests nothing fails at the moment """ df = pd.DataFrame([["99P", 0.3], ["13T", 1.2]], columns=["pct", "val"]) plt = maps.ccg_map(df, title="foo", column="val") with tempfile.NamedTemporaryFile() as f: plt.savefig(f.name, format="png", dpi=300, bbox_inches="tight")
5,355,583
def apply_inverse_rot_to_vec(rot, vec): """Multiply the inverse of a rotation matrix by a vector.""" # Inverse rotation is just transpose return [rot[0][0] * vec[0] + rot[1][0] * vec[1] + rot[2][0] * vec[2], rot[0][1] * vec[0] + rot[1][1] * vec[1] + rot[2][1] * vec[2], rot[0][2] * vec[0] + rot[1][2] * vec[1] + rot[2][2] * vec[2]]
5,355,584
def make_pkr_plot(): """Does the work of making the real-space P(k) figure.""" zlist = [2.000,6.000] # b1lst = [0.900,2.750] # b2lst = [0.800,6.250] # alpha = [1.500,0.145] # b1lst = [0.920,2.750] b2lst = [-.125,5.788] bnlst = [3.713,1.00] alpha = [1.500,0.150] #bnlst = [3.613,0.650] #alpha = [1.500,0.145] # Now make the figure. fig,ax = plt.subplots(2,2,figsize=(6,4),sharex=True,\ gridspec_kw={'height_ratios':[3,1]}) for ii in range(ax.shape[1]): zz = zlist[ii] aa = 1.0/(1.0+zz) bb = b1lst[ii] + 1.0 b1 = b1lst[ii] b2 = b2lst[ii] # Compute knl pk = np.loadtxt("../../data/pklin_{:6.4f}.txt".format(aa)) knl = 1.0/np.sqrt(np.trapz(pk[:,1],x=pk[:,0])/6./np.pi**2) plh = pk[:,1] * (1+b1lst[ii])**2 plx = pk[:,1] * (1+b1lst[ii]) plm = pk[:,1] * 1.0 ax[0,ii].plot(pk[:,0],plm,'C0:', lw=1.5) ax[0,ii].plot(pk[:,0],plx,'C1:', lw=1.5) ax[0,ii].plot(pk[:,0],plh,'C2:', lw=1.5) # Plot the data, Pmm, Phm, Phh pkd = np.loadtxt(db+"HI_bias_{:06.4f}.txt".format(aa))[1:,:] pkm = pkd[:,3] pkh = pkd[:,3] * pkd[:,2]**2 pkx = pkd[:,3] * pkd[:,1] ax[0,ii].plot(pkd[:,0],pkm,'C0-',alpha=0.75, lw=1.2, label=r'$P_{\rm m-m}$') ax[0,ii].plot(pkd[:,0],pkx,'C1-',alpha=0.75, lw=1.2, label=r'$P_{\rm HI-m}$') ax[0,ii].plot(pkd[:,0],pkh,'C2-',alpha=0.75, lw=1.2, label=r'$P_{\rm HI-HI}$') # Now Zeldovich. #pkz = np.loadtxt("pkzel_{:6.4f}.txt".format(aa)) pkz = np.loadtxt(tb+"zeld_{:6.4f}.pkr".format(aa)) kk = pkz[:,0] ## pzh = (1+alpha[ii]*kk**2)*pkz[:,1]+b1*pkz[:,2]+b2*pkz[:,3]+\ ## b1**2*pkz[:,4]+b2**2*pkz[:,5]+b1*b2*pkz[:,6] ## pzx = (1+alpha[ii]*kk**2)*pkz[:,1]+0.5*b1*pkz[:,2]+0.5*b2*pkz[:,3] ## pzm = (1+alpha[ii]*kk**2)*pkz[:,1] alh = alpha[ii] + bnlst[ii] alx = alpha[ii] + 0.5*bnlst[ii] alm = alpha[ii] pzh = (1+alh*kk**2)*pkz[:,1]+b1*pkz[:,2]+b2*pkz[:,3]+\ b1**2*pkz[:,4]+b2**2*pkz[:,5]+b1*b2*pkz[:,6] pzx = (1+alx*kk**2)*pkz[:,1]+0.5*b1*pkz[:,2]+0.5*b2*pkz[:,3] pzm = (1+alm*kk**2)*pkz[:,1] ax[0,ii].plot(kk[kk<knl],pzm[kk<knl],'C0--', lw=2.2) ax[0,ii].plot(kk[kk<knl],pzx[kk<knl],'C1--', lw=2.2) ax[0,ii].plot(kk[kk<knl],pzh[kk<knl],'C2--', lw=2.2) # Now plot the ratios. ww = np.nonzero( pkd[:,0]<knl )[0] rh = np.interp(pkd[ww,0],kk,pzh)/pkh[ww] rx = np.interp(pkd[ww,0],kk,pzx)/pkx[ww] rm = np.interp(pkd[ww,0],kk,pzm)/pkm[ww] ax[1,ii].plot(pkd[ww,0],1/rm,'C0', lw=1.2) ax[1,ii].plot(pkd[ww,0],1/rx,'C1', lw=1.2) ax[1,ii].plot(pkd[ww,0],1/rh,'C2', lw=1.2) # Add a grey shaded region. ax[1,ii].fill_between([1e-5,3],[0.95,0.95],[1.05,1.05],\ color='lightgrey',alpha=0.25) ax[1,ii].fill_between([1e-5,3],[0.98,0.98],[1.02,1.02],\ color='darkgrey',alpha=0.5) # put on a line for knl. ax[0,ii].plot([knl,knl],[1e-10,1e10],'-.',color='k', alpha=0.2) ax[1,ii].plot([knl,knl],[1e-10,1e10],'-.',color='k', alpha=0.2) ax[0,ii].axvline(0.75*knl,ls=':',color='k',alpha=0.2) ax[1,ii].axvline(0.75*knl,ls=':',color='k',alpha=0.2) if ii == 0:ax[0,ii].text(1.1*knl,1e4,r'$k_{\rm nl}$',color='darkgrey',\ ha='left',va='center', fontdict=font) #cosmic variance ff = open(db + 'HI_pks_1d_{:06.4f}.txt'.format(aa)) tmp = ff.readline() sn = float(tmp.split('=')[1].split('.\n')[0]) print('shotnoise = ', sn) kk = pkd[:, 0]#np.logspace(-2, 0, 1000) Nk = 4*np.pi*kk[:-1]**2*np.diff(kk)*bs**3 / (2*np.pi)**3 std = (2/Nk *(pkh[:-1]+sn)**2)**0.5/pkh[:-1] #std = np.sqrt(2/Nk) #print(Nk) ax[1,ii].fill_between(kk[:-1], 1-std, 1+std, color='magenta',alpha=0.1) # Tidy up the plot. ax[0,ii].set_xlim(0.02,1.0) ax[0,ii].set_ylim(2.0,3e4) ax[0,ii].set_xscale('log') ax[0,ii].set_yscale('log') ax[0,ii].text(0.025,150.,"$z={:.1f}$".format(zz), fontdict=font) ax[1,ii].set_xlim(0.02,1.0) ax[1,ii].set_ylim(0.90,1.1) ax[1,ii].set_xscale('log') ax[1,ii].set_yscale('linear') # # Suppress the y-axis labels on not-column-0. for ii in range(1,ax.shape[1]): ax[0,ii].get_yaxis().set_visible(False) ax[1,ii].get_yaxis().set_visible(False) # Put on some more labels. ax[0,0].legend(prop=fontmanage) ax[1,0].set_xlabel(r'$k\quad [h\,{\rm Mpc}^{-1}]$', fontdict=font) ax[1,1].set_xlabel(r'$k\quad [h\,{\rm Mpc}^{-1}]$', fontdict=font) ax[0,0].set_ylabel(r'$P(k)\quad [h^{-3}{\rm Mpc}^3]$', fontdict=font) ax[1,0].set_ylabel(r'$P_{N-body}/P_Z$', fontdict=font) for axis in ax.flatten(): for tick in axis.xaxis.get_major_ticks(): tick.label.set_fontproperties(fontmanage) for tick in axis.yaxis.get_major_ticks(): tick.label.set_fontproperties(fontmanage) # and finish up. plt.tight_layout() plt.savefig(figpath + '/zeld_pkr.pdf') #
5,355,585
def gsl_blas_dsdot(*args, **kwargs): """gsl_blas_dsdot(gsl_vector_float const * X, gsl_vector_float const * Y) -> int""" return _gslwrap.gsl_blas_dsdot(*args, **kwargs)
5,355,586
def apero_create_pol_product(product, p, loc): """ Create the p.fits product: Polarimetric products only processed in polarimetric mode, from the combination of 4 consecutive exposures. HDU # Name Type Description 1 Primary Header 2 Pol Image The polarized spectrum in the required Stokes configuration 3 PolErr Image The error on the polarized spectrum 4 StokesI Image The combined Stokes I (intensity) spectrum 5 StokesIErr Image The error on the Stokes I spectrum 6 Null1 Image One null spectrum used to check the polarized signal (see Donati et al 1997) 7 Null2 Image The other null spectrum used to check the polarized signal 8 WaveAB Image The wavelength vector for the AB science channel 9 BlazeAB Image The Blaze function for AB (useful for Stokes I) :param exposure: Exposure to create product for """ from apero import core # Get Logging function WLOG = core.wlog # load polardict from loc polardict = loc['POLARDICT'] # get base entry entry_base = polardict[loc['BASENAME']] def wipe_snr(header): for key in header: if key.startswith('EXTSN'): header[key] = 'Unknown' WLOG(p, 'info', 'Creating {}'.format(product)) try: # update loc to make all data 2D arrays uniform loc = make_2D_data_uniform(loc) # set key for e2ds fits files base_e2ds = entry_base["e2dsff_AB"] base_wave_e2ds = entry_base["WAVE_AB"] base_blaze_e2ds = entry_base["BLAZE_AB"] # open e2ds fits file of first image in the sequence hdu_list_base = fits.open(base_e2ds) hdr_base = hdu_list_base[0].header # open e2ds fits file of first image in the sequence hdu_list_wave = fits.open(base_wave_e2ds) hdr_wave_base = hdu_list_wave[0].header # open e2ds fits file of first image in the sequence hdu_list_blaze = fits.open(base_blaze_e2ds) hdr_blaze_base = hdu_list_blaze[0].header # same primary hdu as in first efits of sequence: primary_hdu = hdu_list_base[0] hdu_wave = fits.ImageHDU(data=loc['wave_data'], header=hdr_wave_base, name='WaveAB') hdu_wave.header = add_polar_keywords(p, loc, hdu_wave.header, apero=True) hdu_blaze = fits.ImageHDU(data=loc['blaze_data'], header=hdr_blaze_base, name='BlazeAB') hdu_blaze.header = add_polar_keywords(p, loc, hdu_blaze.header, apero=True) # same cal extensions as in first efits of sequence: cal_extensions = [ extension_from_hdu('WaveAB', hdu_wave), extension_from_hdu('BlazeAB', hdu_blaze) ] hdu_pol = fits.ImageHDU(data=loc['pol_data'], header=hdr_base, name='Pol') hdu_pol.header = add_polar_keywords(p, loc, hdu_pol.header, apero=True) hdu_polerr = fits.ImageHDU(data=loc['polerr_data'], name='PolErr') hdu_stokesI = fits.ImageHDU(data=loc['stokesI_data'], header=hdr_base, name='StokesI') hdu_stokesI.header = add_polar_keywords(p, loc, hdu_stokesI.header, apero=True) hdu_stokesIerr = fits.ImageHDU(data=loc['stokesIerr_data'], name='StokesIErr') hdu_null1 = fits.ImageHDU(data=loc['null1_data'], header=hdr_base, name='Null1') hdu_null1.header = add_polar_keywords(p, loc, hdu_null1.header, apero=True) hdu_null2 = fits.ImageHDU(data=loc['null2_data'], header=hdr_base, name='Null2') hdu_null2.header = add_polar_keywords(p, loc, hdu_null2.header, apero=True) pol_extensions = [ extension_from_hdu('Pol', hdu_pol), extension_from_hdu('PolErr', hdu_polerr), extension_from_hdu('StokesI', hdu_stokesI), extension_from_hdu('StokesIErr', hdu_stokesIerr), extension_from_hdu('Null1', hdu_null1), extension_from_hdu('Null2', hdu_null2), ] for ext in pol_extensions: wipe_snr(ext.header) hdu_list = create_hdu_list([primary_hdu, *pol_extensions, *cal_extensions]) product_header_update(hdu_list) # We copy input files to primary header after duplicate keys have been cleaned out primary_header = hdu_list[0].header pol_header = hdu_list[1].header in_file_cards = [card for card in pol_header.cards if card[0].startswith('FILENAM')] for card in in_file_cards: primary_header.insert('FILENAME', card) primary_header.remove('FILENAME', ignore_missing=True) hdu_list.writeto(product, overwrite=True, output_verify="fix+warn") except: WLOG(p,'error','Creation of {} failed'.format(product))
5,355,587
def generate_tumor_growth_trajectories_base(initialCondition, parameterValues, number_realizations, random_seed=None, output_directory_name='./'): """ Generate many time courses of tumor growth and save data """ prng = np.random.RandomState(random_seed) random_seed_array = prng.randint(0, 1000, number_realizations) number_of_states = int(parameterValues['number_of_states']) time_points = -np.ones((number_realizations, number_of_states), dtype=float) number_C_cells = -np.ones((number_realizations, number_of_states), dtype=int) number_Q_cells = -np.ones((number_realizations, number_of_states), dtype=int) from write import cleanUp_createFile, append_log_file flog = cleanUp_createFile(output_directory_name + 'tumor.log', 'a') for realization in range(number_realizations): tumor = Tumor(initialCondition, parameterValues, random_seed=random_seed_array[realization]) time_course = tumor.generate_time_course() time_points[realization, :] = time_course.time_points number_C_cells[realization, :] = time_course.number_C_cells number_Q_cells[realization, :] = time_course.number_Q_cells append_log_file(flog, realization, time_course) print('finished generating time courses\n') # type '!unzip -l data.npz' in the Python Console to see individual files in zip archive: np.savez(output_directory_name + parameterValues['data_file_name'], time_points=time_points, number_C_cells=number_C_cells, number_Q_cells=number_Q_cells)
5,355,588
async def vote_comment(session: AsyncSession, comment: Comment, user: User) -> Comment: """ Creates a vote on the given comment. """
5,355,589
def setup_module(mod): """ Sets up the pytest environment * `mod`: module name """ global ADDR_TYPES # Required linux kernel version for this suite to run. result = required_linux_kernel_version("4.16") if result is not True: pytest.skip("Kernel requirements are not met") testsuite_run_time = time.asctime(time.localtime(time.time())) logger.info("Testsuite start time: {}".format(testsuite_run_time)) logger.info("=" * 40) logger.info("Running setup_module to create topology") # This function initiates the topology build with Topogen... json_file = "{}/ibgp_gshut_topo1.json".format(CWD) tgen = Topogen(json_file, mod.__name__) global topo topo = tgen.json_topo # ... and here it calls Mininet initialization functions. # Starting topology, create tmp files which are loaded to routers # to start deamons and then start routers start_topology(tgen) # Creating configuration from JSON build_config_from_json(tgen, topo) # Don't run this test if we have any failure. if tgen.routers_have_failure(): pytest.skip(tgen.errors) # Api call verify whether BGP is converged ADDR_TYPES = check_address_types() BGP_CONVERGENCE = verify_bgp_convergence(tgen, topo) assert BGP_CONVERGENCE is True, "setup_module :Failed \n Error:" " {}".format( BGP_CONVERGENCE ) logger.info("Running setup_module() done")
5,355,590
def cost(weights): """Cost function which tends to zero when A |x> tends to |b>.""" p_global_ground = global_ground(weights) p_ancilla_ground = ancilla_ground(weights) p_cond = p_global_ground / p_ancilla_ground return 1 - p_cond
5,355,591
def test_check_urls(file): """ test check urls check function. """ urls = collect_links_from_file(file) checker = UrlCheckResult() assert str(checker) == "UrlCheckResult" # Checker should have passed, failed, and all for attribute in ["passed", "failed", "all"]: assert hasattr(checker, attribute) assert not checker.passed assert not checker.failed assert not checker.all assert not checker.excluded checker.check_urls(urls) # Ensure we have the correct sums for passing/failing assert len(checker.failed + checker.passed + checker.excluded) == checker.count assert len(checker.all) == len(urls) # Ensure one not excluded is failed assert "https://none.html" in checker.failed assert checker.print_all # Run again with excluded exact urls checker = UrlCheckResult(exclude_urls=["https://none.html"]) checker.check_urls(urls) assert "https://none.html" in checker.excluded # Run again with exclude of patterns checker = UrlCheckResult( exclude_patterns=["https://github.com/SuperKogito/URLs-checker/issues"] ) checker.check_urls(urls) for failed in checker.failed: assert not failed.startswith( "https://github.com/SuperKogito/URLs-checker/issues" )
5,355,592
def runOptimization( cfg, optimize_cfg, n_iter=20, split_runs=1, model_runs=1, filename="optimize_result", ): """Optimize the model parameter using hyperopt. The model parameters are optimized using the evaluations on validation dataset. Args: cfg(dict): configuration data optimize_cfg(dict): configuration for optimization n_iter(int): the number of iterations for sequential optimization split_runs(int): the number of runs for different dataset-split random seeds. model_runs(int): the number of runs for different model-initialization random seeds. filename(string): a file-name for logging """ def objective(space): print(space) newcfg = {**cfg} for k in space.keys(): if k in newcfg and type(newcfg[k]) == dict: newcfg[k] = {**space[k]} else: newcfg[k] = space[k] print(newcfg, cfg) result = runEvaluation( newcfg, split_runs=split_runs, model_runs=model_runs ) opt_result = { "loss": result["val_rmse"][0], "loss_variance": result["val_rmse"][1] ** 2, "true_loss": result["test_rmse"][0], "true_loss_variance": result["test_rmse"][1] ** 2, "status": STATUS_OK, "eval_time": time.time(), "data": result, "space": space, } return opt_result trials = Trials() best = fmin( objective, optimize_cfg, algo=tpe.suggest, max_evals=n_iter, trials=trials, ) valid_trial = [t for t in trials if t["result"]["status"] == STATUS_OK] losses_argmin = np.argmin( [float(trial["result"]["loss"]) for trial in valid_trial] ) print([float(trial["result"]["loss"]) for trial in valid_trial]) best_trial = valid_trial[losses_argmin] best_result = best_trial["result"]["data"] print(best, best_trial["result"]["space"], space_eval(optimize_cfg, best)) ret = { "best": best, "n_iter": n_iter, "split_runs": split_runs, "model_runs": model_runs, "result": best_result, "optimize_confg": optimize_cfg, "config": cfg, } ret_str = ConfigEncoder.dumps(ret) with open(f"{filename}.json", "w") as fp: fp.write(ret_str) print(ret) return ret
5,355,593
def __set_metadata(file_path: str, tags: dict): """Save given metadata in the file. This function uses pytaglib since pydub does not support some MP3 tags (e.g. "comment"). :param file_path: the path to the file to set tags in :param tags: the dictionary of tags to set """ song = taglib.File(file_path) for k, v in tags.items(): song.tags[k] = v song.save()
5,355,594
def _capabilities_for_entity(config, entity): """Return an _EntityCapabilities appropriate for given entity. raises _UnknownEntityDomainError if the given domain is unsupported. """ if entity.domain not in _CAPABILITIES_FOR_DOMAIN: raise _UnknownEntityDomainError() return _CAPABILITIES_FOR_DOMAIN[entity.domain](config, entity)
5,355,595
def check_missing_files(client): """Find missing files listed in datasets.""" missing = defaultdict(list) for path, dataset in client.datasets.items(): for file in dataset.files: filepath = (path.parent / file) if not filepath.exists(): missing[str( path.parent.relative_to(client.renku_datasets_path) )].append( os.path.normpath(str(filepath.relative_to(client.path))) ) if not missing: return True click.secho( WARNING + 'There are missing files in datasets.' # '\n (use "renku dataset clean <name>" to clean them)' ) for dataset, files in missing.items(): click.secho( '\n\t' + click.style(dataset, fg='yellow') + ':\n\t ' + '\n\t '.join(click.style(path, fg='red') for path in files) ) return False
5,355,596
def apply_torsion(nodes, suffix=""): """ Torsion energy in nodes. """ if ( "phases%s" % suffix in nodes.data and "periodicity%s" % suffix in nodes.data ): return { "u%s" % suffix: esp.mm.torsion.periodic_torsion( x=nodes.data["x"], k=nodes.data["k%s" % suffix], phases=nodes.data["phases%s" % suffix], periodicity=nodes.data["periodicity%s" % suffix], ) } else: return { "u%s" % suffix: esp.mm.torsion.periodic_torsion( x=nodes.data["x"], k=nodes.data["k%s" % suffix], ) }
5,355,597
def parse_summary_table(doc): """ Parse the etree doc for summarytable, returns:: [{'channel': unicode, 'impressions': int, 'clicks': int, 'ctr': decimal.Decimal, 'ecpm': decimal.Decimal, 'earnings': decimal.Decimal}] """ for t in doc.findall('.//table'): if t.attrib.get('id') == 'summarytable': break else: raise ValueError("summary table not found") res = [] FIELDS = ['channel', 'requests', 'responses', 'impressions', 'clicks', 'ctr', 'ecpm', 'earnings'] for row in t.findall('.//tr'): celltext = [] for c in row.findall('td'): tail = '' # adsense inserts an empty span if a row has a period in it, so # get the children and find the tail element to append to the text if c.find('a') and c.find('a').getchildren(): tail = c.find('a').getchildren()[0].tail or '' celltext.append('%s%s' % ((c.text or c.findtext('a') or '').strip(), tail.strip())) if len(celltext) != 8: continue try: value_cols = map(parse_decimal, celltext[1:]) except decimal.InvalidOperation: continue res.append(dict(zip(FIELDS, [celltext[0]] + value_cols))) return res
5,355,598
def tileset_info(hitile_path): """ Get the tileset info for a hitile file. Parameters ---------- hitile_path: string The path to the hitile file Returns ------- tileset_info: {'min_pos': [], 'max_pos': [], 'tile_size': 1024, 'max_zoom': 7 } """ hdf_file = h5py.File(hitile_path, "r") d = hdf_file["meta"] if "min-pos" in d.attrs: min_pos = d.attrs["min-pos"] else: min_pos = 0 if "max-pos" in d.attrs: max_pos = d.attrs["max-pos"] else: max_pos = d.attrs["max-length"] return { "max_pos": [int(max_pos)], "min_pos": [int(min_pos)], "max_width": 2 ** math.ceil(math.log(max_pos - min_pos) / math.log(2)), "max_zoom": int(d.attrs["max-zoom"]), "tile_size": int(d.attrs["tile-size"]), }
5,355,599