content
stringlengths
22
815k
id
int64
0
4.91M
def create_LED_indicator_rect(**kwargs) -> QPushButton: """ Useful kwargs: text: str, icon: QIcon, checked: bool, parent checked=False -> LED red checked=True -> LED green """ button = QPushButton(checkable=True, enabled=False, **kwargs) button.setStyleSheet(SS_LED_INDICATOR_RECT) return button
5,356,900
def get_current_version_name(): """Returns the version of the current instance. If this is version "v1" of module "module5" for app "my-app", this function will return "v1". """ return os.environ['CURRENT_VERSION_ID'].split('.')[0]
5,356,901
def getAlignments(infile): """ read a PSL file and return a list of PslRow objects """ psls = [] with open(infile, 'r') as f: for psl in readPsls(f): psls.append(psl) return psls
5,356,902
def cleanup(config): """ This function perform cleanups: - Stops and removes ESP containers - Removes ESP from disk - Removes stage status files (e.g. .cloned_esp) Docker images & cache are left intact. """ logging.info("Cleaning the provisioning environment") esp_path = pathlib.Path(config['esp']['dest_dir']) if esp_path.exists(): if is_esp_running(esp_path, service='core'): # in the end, this runs docker-compose down, which stops and removes containers stop_esp(esp_path) shutil.rmtree(esp_path) for _, v in STAGES.items(): status_file = pathlib.Path(v['status_file']) if status_file.exists(): status_file.unlink() output_dir = pathlib.Path(config['usb_images']['output_path']) try: shutil.rmtree(output_dir) except FileNotFoundError as e: logging.debug("Path '%s' doesn't exist", e.filename)
5,356,903
def cd(editor, dir_path): """cdコマンドを上書き。エディタのディレクトリと同期させる. エディタの仕様として、コマンドラインと画面左のディレクトリは同期させたい cdコマンドを使った際は、画面左のディレクトリもそれに応じて動くように するためにcdコマンド自体を上書き """ editor.update_dir(dir_path)
5,356,904
def get_ring_kernel(zs,Rs): """Represents the potential influence due to a line charge density a distance *delta_z* away, at which the azimuthally symmetric charge distribution has a radius *R*.""" Logger.write('Computing ring kernels over %i x %i points...'%((len(zs),)*2)) #Form index enumerations diag_inds=numpy.diag_indices(len(zs)) triud_inds=numpy.triu_indices(len(zs),k=0) triu_inds=numpy.triu_indices(len(zs),k=1) #upper triangle tril_inds=[triu_inds[1],triu_inds[0]] #lower triangle global den1,den2 K=numpy.zeros((len(zs),)*2,dtype=numpy.float) #position "2" corresponds to test charge (rows) #position "1" corresponds to origin of field (columns) zs2=zs.reshape((len(zs),1)); zs1=zs.reshape((1,len(zs))) Rs2=Rs.reshape((len(zs),1)); Rs1=Rs.reshape((1,len(zs))) dr2=(Rs1-Rs2)**2 dz2=(zs1-zs2)**2 rmod2=(Rs1+Rs2)**2 den1=numpy.sqrt(dz2+dr2) dzs=list(numpy.diff(zs)); dzs=numpy.array(dzs+[dzs[-1]]) dRs=list(numpy.diff(Rs)); dRs=numpy.array(dRs+[dRs[-1]]) #fill in diagonal with non-vanishing separation, #proportional to geometric mean of z-bins and local radial difference den1[diag_inds]=numpy.sqrt(dRs**2+dzs**2) arg1=-(4*Rs1*Rs2)/den1**2 den2=numpy.sqrt(dz2+rmod2) arg2=+(4*Rs1*Rs2)/den2**2 #Get elliptic function values ellipk_triud=interp_ellipk(arg1[triud_inds]) ellipk2_triud=interp_ellipk(arg2[triud_inds]) K[triud_inds]=(ellipk_triud/den1[triud_inds]+\ ellipk2_triud/den2[triud_inds])/numpy.pi K[tril_inds]=K[triu_inds] return K
5,356,905
async def test_async_with(client): """ Test async with context manager (with backward compatibility). """ mgr = client.connect(True) aexit = type(mgr).__aexit__ aenter = type(mgr).__aenter__(mgr) conn = await aenter try: assert conn.closed == False _ = await conn.whoami() except: if not (await aexit(mgr, *sys.exc_info())): raise else: await aexit(mgr, None, None, None) assert conn.closed
5,356,906
def dsdh_h(P, h, region = 0): """ Derivative of specific entropy [kJ kg / kg K kJ] w.r.t specific enthalpy at constant pressure""" if region is 0: region = idRegion_h(P, h) if region is 1: return region1.dsdh_h(P, h) elif region is 2: return region2.dsdh_h(P, h) elif region is 4: return region4.dsdh_h(P, h) else: return 0.000
5,356,907
def main(): """main() function """ global LOGGING global LOGFILE xprint("[+] fuzz_cli.py -- by Daniel Roberson @dmfroberson\n") args = parse_cli() # Make sure target exists and is executable progname = args.binary[0] if not os.path.isfile(progname) and not os.access(progname, os.X_OK): xprint("[-] Specified program \"%s\" is not executable." % progname) xprint("[-] Exiting.") exit(os.EX_USAGE) # Make sure script is readable scriptfile = args.script[0] if not os.access(scriptfile, os.R_OK): xprint("[-] Specified script \"%s\" is not readable." % scriptfile) xprint("[-] Exiting.") exit(os.EX_USAGE) # Make sure logfile is writable and set up logging if args.logfile: LOGFILE = args.logfile try: logfile = open(LOGFILE, "w+") except IOError, err: xprint("[-] Could not open logfile for writing: %s" % str(err)) xprint("[-] Exiting.") exit(os.EX_OSFILE) logfile.close() LOGGING = True xprint("[+] Fuzzing %s with tests defined in %s\n" % (progname, scriptfile)) # Rework this section into a function and combine relevant things from # fuzz_test() regarding parsing the lines. linecount = 0 for line in open(scriptfile, "r"): linecount += 1 line = line.rstrip() # Skip comments and blank lines if line[:1] == "#" or not line: continue # Make sure only one @@ per line varcount = 0 for var in fuzz_constants.FUZZ_VARS: varcount += line.count(var[0]) if varcount > 1: xprint("[-] Too many variables on line %d of %s -- Skipping." % \ (linecount, scriptfile)) xprint(" %s\n" % line) continue # Create argv[] for Popen() fuzz_args = shlex.split(line) fuzz_args.insert(0, progname) # Finally, fuzz the target xprint("[+] Fuzzing: %s" % " ".join(fuzz_args)) fuzz_test(fuzz_args, timeout=args.timeout, verbose=args.verbose) xprint("") # All done. xprint("[+] Pledge your allegiance to Shadaloo and I will let you live!") xprint("[+] Done")
5,356,908
def floor_datetime(dt, unit, n_units=1): """Floor a datetime to nearest n units. For example, if we want to floor to nearest three months, starting with 2016-05-06-yadda, it will go to 2016-04-01. Or, if starting with 2016-05-06-11:45:06 and rounding to nearest fifteen minutes, it will result in 2016-05-06-11:45:00. """ if unit == "years": new_year = dt.year - (dt.year - 1) % n_units return datetime.datetime(new_year, 1, 1, 0, 0, 0) elif unit == "months": new_month = dt.month - (dt.month - 1) % n_units return datetime.datetime(dt.year, new_month, 1, 0, 0, 0) elif unit == "weeks": _, isoweek, _ = dt.isocalendar() new_week = isoweek - (isoweek - 1) % n_units return datetime.datetime.strptime( "%d %02d 1" % (dt.year, new_week), "%Y %W %w" ) elif unit == "days": new_day = dt.day - dt.day % n_units return datetime.datetime(dt.year, dt.month, new_day, 0, 0, 0) elif unit == "hours": new_hour = dt.hour - dt.hour % n_units return datetime.datetime(dt.year, dt.month, dt.day, new_hour, 0, 0) elif unit == "minutes": new_minute = dt.minute - dt.minute % n_units return datetime.datetime( dt.year, dt.month, dt.day, dt.hour, new_minute, 0 ) elif unit == "seconds": new_second = dt.second - dt.second % n_units return datetime.datetime( dt.year, dt.month, dt.day, dt.hour, dt.minute, new_second ) else: msg = "Unknown unit type {}".format(unit) raise ValueError(msg)
5,356,909
def get_dataset_descriptor(project_id, dataset_id): """Get the descriptor for the dataset with given identifier.""" try: dataset = api.datasets.get_dataset_descriptor( project_id=project_id, dataset_id=dataset_id ) if not dataset is None: return jsonify(dataset) except ValueError as ex: raise srv.InvalidRequest(str(ex)) raise srv.ResourceNotFound('unknown project \'' + project_id + '\' or dataset \'' + dataset_id + '\'')
5,356,910
def pe41(): """ >>> pe41() 7652413 """ primes = Primes(1000000) for perm in permutations(range(7, 0, -1)): n = list_num(perm) if primes.is_prime(n): return n return -1
5,356,911
def transit_flag(body, time, nsigma=2.0): """Return a flag that indicates if times occured near transit of a celestial body. Parameters ---------- body : skyfield.starlib.Star Skyfield representation of a celestial body. time : np.ndarray[ntime,] Unix timestamps. nsigma : float Number of sigma to flag on either side of transit. Returns ------- flag : np.ndarray[ntime,] Boolean flag that is True if the times occur within nsigma of transit and False otherwise. """ time = np.atleast_1d(time) obs = ephemeris.chime # Create boolean flag flag = np.zeros(time.size, dtype=np.bool) # Find transit times transit_times = obs.transit_times( body, time[0] - 24.0 * 3600.0, time[-1] + 24.0 * 3600.0 ) # Loop over transit times for ttrans in transit_times: # Compute source coordinates sf_time = ephemeris.unix_to_skyfield_time(ttrans) pos = obs.skyfield_obs().at(sf_time).observe(body) alt = pos.apparent().altaz()[0] dec = pos.cirs_radec(sf_time)[1] # Make sure body is above horizon if alt.radians > 0.0: # Estimate the amount of time the body is in the primary beam # as +/- nsigma sigma, where sigma denotes the width of the # primary beam. We use the lowest frequency and E-W (or X) polarisation, # since this is the most conservative (largest sigma). window_deg = nsigma * cal_utils.guess_fwhm( 400.0, pol="X", dec=dec.radians, sigma=True ) window_sec = window_deg * 240.0 * ephemeris.SIDEREAL_S # Flag +/- window_sec around transit time begin = ttrans - window_sec end = ttrans + window_sec flag |= (time >= begin) & (time <= end) # Return boolean flag indicating times near transit return flag
5,356,912
def _parse_port_ranges(pool_str): """Given a 'N-P,X-Y' description of port ranges, return a set of ints.""" ports = set() for range_str in pool_str.split(','): try: a, b = range_str.split('-', 1) start, end = int(a), int(b) except ValueError: log.error('Ignoring unparsable port range %r.', range_str) continue if start < 1 or end > 65535: log.error('Ignoring out of bounds port range %r.', range_str) continue ports.update(set(range(start, end + 1))) return ports
5,356,913
def get_flavors(): """ Get Nectar vm flavors in a dict with openstack_id as key """ fls = Flavor.query.all() results = [] for fl in fls: results.append(repack(fl.json(), {"name": "flavor_name"}, ["id"])) return array_to_dict(results)
5,356,914
def _HasTrafficChanges(args): """True iff any of the traffic flags are set.""" traffic_flags = ['to_revision', 'to_latest'] return _HasChanges(args, traffic_flags)
5,356,915
def send_attachment(recipient_id, url, type=None): """ Send an attachment by URL. If the URL has not been uploaded before, it will be uploaded and the attachment ID will be saved to the database. If the URL has been uploaded before, the ID is fetched from the database. Then, the attachment is sent by ID. :param recipient_id: The user ID of the recipient :param url: The URL of the attachment :param type: Type of the attachment. If not defined, guess_attachment_type is used """ try: attachment = Attachment.objects.get(url=url) attachment_id = attachment.attachment_id except Attachment.DoesNotExist: attachment_id = upload_attachment(url, type) if attachment_id is None: raise ValueError('Uploading attachment with URL %s failed' % url) Attachment(url=url, attachment_id=attachment_id).save() send_attachment_by_id(recipient_id, attachment_id, type or guess_attachment_type(url))
5,356,916
def ad5940_switch_test(): """ Capture ECG for 10s turning off all the Switches, no data should be received, the Turn On the 5940 & 8233 switch the capture ECG data you should receive the data as expected with the default frequency :return: """ capture_time = 10 freq_hz = 0 common.dcb_cfg('d', 'ecg') common.watch_shell.do_disable_electrode_switch('3') common.watch_shell.do_disable_electrode_switch('2') common.watch_shell.do_disable_electrode_switch('1') common.watch_shell.quick_start('ecg', 'ecg') time.sleep(capture_time) common.watch_shell.quick_stop('ecg', 'ecg') f_path_sw_off = common.rename_stream_file(common.ecg_stream_file_name, '_ad5940_switch_off_test.csv') err_status, err_str, results_dict = qa_utils.check_stream_data(f_path_sw_off, 'ecg', 1, freq_hz) common.test_logger.info('ECG {}Hz Stream Test Results: {}'.format(freq_hz, results_dict)) if err_status: common.test_logger.error('*** ECG {}Hz SW OFF Test - FAIL ***'.format(freq_hz)) raise ConditionCheckFailure("\n\n" + '{}'.format(err_str)) freq_hz = 100 common.dcb_cfg('d', 'ecg') common.watch_shell.do_disable_electrode_switch('3') common.watch_shell.do_disable_electrode_switch('2') common.watch_shell.do_enable_electrode_switch('1') common.quick_start_ecg(freq_hz) time.sleep(capture_time) common.watch_shell.quick_stop('ecg', 'ecg') common.dcb_cfg('d', 'ecg') f_path_sw_on = common.rename_stream_file(common.ecg_stream_file_name, '_ad5940_switch_on_test.csv') err_status, err_str, results_dict = qa_utils.check_stream_data(f_path_sw_on, 'ecg', 1, freq_hz) common.test_logger.info('ECG {}Hz Stream Test Results: {}'.format(freq_hz, results_dict)) if err_status: common.test_logger.error('*** ECG {}Hz SW On Test - FAIL ***'.format(freq_hz)) raise ConditionCheckFailure("\n\n" + '{}'.format(err_str))
5,356,917
def find_credentials(account): """ fumction that check if a credentials exists with that username and return true or false """ return Credentials.find_credentialls(account)
5,356,918
def enumerate_imports(tokens): """ Iterates over *tokens* and returns a list of all imported modules. .. note:: This ignores imports using the 'as' and 'from' keywords. """ imported_modules = [] import_line = False from_import = False for index, tok in enumerate(tokens): token_type = tok[0] token_string = tok[1] if token_type == tokenize.NEWLINE: import_line = False from_import = False elif token_string == "import": import_line = True elif token_string == "from": from_import = True elif import_line: if token_type == tokenize.NAME and tokens[index+1][1] != 'as': if not from_import: if token_string not in reserved_words: if token_string not in imported_modules: imported_modules.append(token_string) return imported_modules
5,356,919
def solved(maze): """Checks if the maze was solved. The maze is solved, if there is no 3 to be found. Returns: True if the maze has no 3. """ # TODO: Extend this function to properly check for 3s inside the maze. return True
5,356,920
def safe_htcondor_attribute(attribute: str) -> str: """Convert input attribute name into a valid HTCondor attribute name HTCondor ClassAd attribute names consist only of alphanumeric characters or underscores. It is not clearly documented, but the alphanumeric characters are probably restricted to ASCII. Attribute names created from multiple words typically capitalize the first letter in each word for readability, although all comparisions are case-insensitive. e.g., "central-manager" -> "CentralManager" Args: attribute: a string representing the name of an attribute Returns: The attribute name stripped of invalid characters and re-capitalized in the manner typical of HTCondor ClassAd attributes. Raises: None """ # splitting by invalid characters removes them from the resulting array split_attr = re.split(r"[^\w]", attribute, flags=re.ASCII) safe_attr = "".join([word.capitalize() for word in split_attr if word]) return safe_attr
5,356,921
def store_booster(set_dict: Dict) -> None: """Fills boosters table from sets collection. """ s = Set.objects.get(id=set_dict['code']) booster = Booster.objects.get_or_create(set=s)[0] booster_slots = [] single_slots = [s for s in set_dict['booster'] if isinstance(s, str)] multi_slots = [s for s in set_dict['booster'] if isinstance(s, list)] for rarity, number in Counter(single_slots).items(): slot = Slot.objects.get_or_create(number=number, booster=booster)[0] rarity = Rarity.objects.get_or_create(**parse_rarity(rarity))[0] slot.rarities.add(rarity) slot.save() booster_slots.append(slot) for multi_slot in multi_slots: slot = Slot.objects.get_or_create(number=1, booster=booster)[0] for rarity in multi_slot: rarity = Rarity.objects.get_or_create(**parse_rarity(rarity))[0] slot.rarities.add(rarity) slot.save() booster_slots.append(slot)
5,356,922
def test_save_layer_single_no_named_plugin(tmpdir, layer_data_and_types): """Test saving a single layer without naming plugin.""" # make writer builtin plugins get called first from napari.plugins import plugin_manager plugin_manager.hooks.napari_write_image.bring_to_front(['builtins']) plugin_manager.hooks.napari_write_points.bring_to_front(['builtins']) layers, _, _, filenames = layer_data_and_types for layer, fn in zip(layers, filenames): path = os.path.join(tmpdir, fn) # Check file does not exist assert not os.path.isfile(path) # Write data save_layers(path, [layer]) # Check file now exists assert os.path.isfile(path)
5,356,923
def assert_inheritance(obj, cls): """Asserts whether an object inherits from a particular class. Uses isinstance. Parameters ---------- obj : object The object to test. cls : Class Class to check obj is an instance of. """ check_is_class(cls) assert isinstance( obj, cls ), f"Incorrect inheritance - passed obj of class {obj.__class__.__name__} is not an instance of {cls}"
5,356,924
def main(): """Main function that reads command-line args and starts Tornado server""" args = parser.parse_args() app = Application(args) app.listen(args.local_port, '0.0.0.0') print(f"open http://127.0.0.1:{args.local_port} in your browser to view the application") if not args.websocket_server: raise RuntimeError("Please specify the server to connect to with --websocket-server") tornado.ioloop.IOLoop.current().spawn_callback(app.subscribe_frames) tornado.ioloop.IOLoop.current().spawn_callback(app.subscribe_twod) try: tornado.ioloop.IOLoop.current().start() except KeyboardInterrupt: tornado.ioloop.IOLoop.instance().stop()
5,356,925
def _permutations(objs: Iterable[object]) -> Iterable[Iterable[object]]: """Return a list of permutations, all of which are deep copied""" def deep_copy_iterator(perm: Iterable[object]) -> Iterable[object]: for o in perm: if inspect.ismodule(o) or isinstance(o, str): yield o else: yield copy.deepcopy(o) for perm in itertools.permutations(objs): yield deep_copy_iterator(perm)
5,356,926
def store_data(df: pd.DataFrame, path: str): """Store DataFrame to pickle compressed with gzip. Args: df (pd.DataFrame): DataFrame to be stored. path (str): Path where to store created file. """ df.to_pickle(path="{}.pkl.gz".format(path), compression="gzip")
5,356,927
def focal_length_to_fov(focal_length, length): """Convert focal length to field-of-view (given length of screen)""" fov = 2 * np.arctan(length / (2 * focal_length)) return fov
5,356,928
def create_generic_connection(connection, verbose: bool = False): """ Generic Engine creation from connection object :param connection: JSON Schema connection model :param verbose: debugger or not :return: SQAlchemy Engine """ options = connection.connectionOptions if not options: options = ConnectionOptions() engine = create_engine( get_connection_url(connection), **options.dict(), connect_args=get_connection_args(connection), echo=verbose, ) return engine
5,356,929
def main(): """Main part of script to execute. """ args = parse_command_line() # print('\n{0}\n\n'.format(args,)) # return # print('User VENV Location = {0}\nExists: {1}\n'.format(VENV_LOCATION, check_venv())) if 'language' in vars(args): if args.language == 'all': process_languages = LANGUAGES elif check_language(args.language): process_languages = [args.language] else: print('\nInvalid language selected: {0}'.format(args.language)) if not ('build_target' in vars(args) and (args.build_target == 'po')): print('\nPlease select from:') list_languages() exit_with_code(1) else: process_languages = [DEFAULT_LANGUAGE] if 'info_type' in vars(args): if args.info_type == 'about': print(ABOUT_TEXT) sys.exit(0) elif args.info_type == 'warranty': print(WARRANTY_TEXT) sys.exit(0) elif args.info_type == 'languages': print('\nSupported languages are:') list_languages() sys.exit(0) else: print("\nUnknown info type: '{0}'\n".format(args.info_type)) exit_with_code(1) elif 'build_target' in vars(args): if args.build_target in SPHINX_BUILD_TARGETS.keys(): for lang in process_languages: do_build(target=args.build_target, language=lang) elif args.build_target == 'po': build_pot() for lang in process_languages: if lang != DEFAULT_LANGUAGE: update_po(lang) elif args.build_target == 'pot': build_pot() check_sphinx_intl() print('\nUpdating PO files for other languages.') for lang in LANGUAGE_LIST.keys(): if lang != DEFAULT_LANGUAGE: print("\n\nUpdating the '{0}' ({1}) files.\n".format(lang, LANGUAGE_LIST[lang])) update_po(lang) elif args.build_target == 'clean': for target, target_dir in [('html', 'html'), ('pdf', 'latex')]: clean_dir = os.path.join(SPHINX_BUILD_DIR, target_dir) clean_directory(clean_dir, target) else: print("\nUnknown build target: '{0}'\n".format(args.build_target)) exit_with_code(1) elif 'clean_target' in vars(args): if args.clean_target in SPHINX_BUILD_TARGETS.keys(): clean_dir = os.path.join(SPHINX_BUILD_DIR, SPHINX_BUILD_TARGETS[args.clean_target]['dir']) clean_directory(clean_dir, args.clean_target) else: print("\nUnknown clean target: '{0}'\n".format(args.clean_target)) exit_with_code(1) elif 'test_target' in vars(args): if args.test_target == 'rst': run_lint(SPHINX_SOURCE_DIR, ignore_info=IGNORE_INFO_MESSAGES, fail_on_warnings=FAIL_ON_WARNINGS) elif args.test_target == 'html': print('\nThat function is still under development.\n') exit_with_code(1) elif args.test_target == 'pdf': print('\nThat function is still under development.\n') exit_with_code(1) else: print("\nUnknown test target: '{0}'\n".format(args.test_target)) exit_with_code(1) else: # show help information show_help() # sys.exit(1) # for arg in vars(args): # print("arg: {0} = {1}".format(arg, getattr(args, arg))) exit_with_code(0)
5,356,930
def make_csv(headers, data): """ Creates a CSV given a set of headers and a list of database query results :param headers: A list containg the first row of the CSV :param data: The list of query results from the Database :returns: A str containing a csv of the query results """ # Create a list where each entry is one row of the CSV file, starting # with the headers csvRows =[','.join(headers),] # Iterate through the provided data and create the rest of the CSV's rows for datum in data: currentRow = '' for header in headers: # Get this rows value for the given header val = getattr(datum, header) if type(val) is str: # Escape the strings currentRow += '"' + val + '",' elif type(val) is float: # Don't Escape the floats currentRow += str(val) + ',' else: # If it is empty and a place holder currentRow += ',' csvRows.append(currentRow[:-1]) # Combine all of the rows into a single single string and return it. return "\n".join(csvRows)
5,356,931
def draw_color_rect(buf,ix,iy,size,wrect,color): """ draw a square centerd on x,y filled with color """ code = """ int nd = %d; int x, y, i, j; int ny = 1 + 2 * nd; int nx = ny; y = iy - nd; if (y < 0) { ny += y; y = 0; } else if ((y + ny) > dimy) ny -= y + ny - dimy; x = ix - nd; if (x < 0) { nx += x; x = 0; } else if ((x + nx) > dimx) nx -= x + nx - dimx; int k = y * dimx * 3 + 3 * x; int deltak = 3 * (dimx - nx); for (i = 0;i < ny;i++) { for (j = 0;j < nx;j++) { #if 1 *(buf+k++) = color[0]; *(buf+k++) = color[1]; *(buf+k++) = color[2]; #else *(buf+k) = (*(buf+k) / 2) + (color[0] / 2); k++; *(buf+k) = (*(buf+k) / 2) + (color[1] / 2); k++; *(buf+k) = (*(buf+k) / 2) + (color[2] / 2); k++; #endif } k += deltak; } """ %wrect (dimx,dimy) = (size[0],size[1]) #ll lqprint "XX %d %d" %(ix,iy) if(ix < 0 or iy < 0 or ix >= dimx or iy >= dimy): return() weave.inline(code,['buf' ,'ix','iy','dimx','dimy','color'])
5,356,932
def convert_fill_constant(g, op, block): """Operator converter for fill_constant.""" value = op.attr('value') shape = block.var(op.output('Out')[0]).shape dtype = block.var(op.output('Out')[0]).dtype dtype = str(dtype).strip().split('.')[1] value = np.full(shape, value, dtype) out = _expr.const(value.astype(dtype)).astype(dtype) g.add_node(op.output('Out')[0], out)
5,356,933
def test_validate_python(mock_exit): """Test validate Python version method.""" with patch('sys.version_info', new_callable=PropertyMock(return_value=(2, 7, 8))): main.validate_python() assert mock_exit.called is True mock_exit.reset_mock() with patch('sys.version_info', new_callable=PropertyMock(return_value=(3, 2, 0))): main.validate_python() assert mock_exit.called is True mock_exit.reset_mock() with patch('sys.version_info', new_callable=PropertyMock(return_value=(3, 4, 1))): main.validate_python() assert mock_exit.called is True mock_exit.reset_mock() with patch('sys.version_info', new_callable=PropertyMock(return_value=(3, 4, 2))): main.validate_python() assert mock_exit.called is False mock_exit.reset_mock() with patch('sys.version_info', new_callable=PropertyMock(return_value=(3, 5, 1))): main.validate_python() assert mock_exit.called is False
5,356,934
def get_template_versions(obj, pretty_print, beep, template_id, headers): """Returns the versions of a specified template. """ spinner = init_spinner(beep=beep) start_spinner(spinner) try: if headers is not None: headers = json.loads(headers) result = obj.get_template_versions( template_id=template_id, headers=headers) stop_spinner(spinner) opprint(result, indent=pretty_print) except Exception as e: stop_spinner(spinner) tbprint() eprint('Error:', e) click.Context.exit(-1)
5,356,935
def _compute_focus_2d(image_2d, kernel_size): """Compute a pixel-wise focus metric for a 2-d image. Parameters ---------- image_2d : np.ndarray, np.float A 2-d image with shape (y, x). kernel_size : int The size of the square used to define the neighborhood of each pixel. An odd value is preferred. Returns ------- focus : np.ndarray, np.float64 A 2-d tensor with the R(y, x) computed for each pixel of the original image. """ # mean filtered image image_filtered_mean = mean_filter(image_2d, "square", kernel_size) # compute focus metric ratio_default = np.ones_like(image_2d, dtype=np.float64) ratio_1 = np.divide(image_2d, image_filtered_mean, out=ratio_default, where=image_filtered_mean > 0) ratio_2 = np.divide(image_filtered_mean, image_2d, out=ratio_default, where=image_2d > 0) focus = np.where(image_2d >= image_filtered_mean, ratio_1, ratio_2) return focus
5,356,936
def test_ap_wps_er_set_sel_reg_oom(dev, apdev): """WPS ER SetSelectedRegistrar OOM""" try: _test_ap_wps_er_set_sel_reg_oom(dev, apdev) finally: dev[0].request("WPS_ER_STOP")
5,356,937
def extract_value_from_config( config: dict, keys: Tuple[str, ...], ): """ Traverse a config dictionary to get some hyper-parameter's value. Parameters ---------- config A config dictionary. keys The possible names of a hyper-parameter. Returns ------- The hyper-parameter value. """ result = [] for k, v in config.items(): if k in keys: result.append(v) elif isinstance(v, dict): result += extract_value_from_config(v, keys) else: pass return result
5,356,938
def boundaryStats(a): """ Returns the minimum and maximum values of a only on the boundaries of the array. """ amin = numpy.amin(a[0,:]) amin = min(amin, numpy.amin(a[1:,-1])) amin = min(amin, numpy.amin(a[-1,:-1])) amin = min(amin, numpy.amin(a[1:-1,0])) amax = numpy.amax(a[0,:]) amax = max(amax, numpy.amax(a[1:,-1])) amax = max(amax, numpy.amax(a[-1,:-1])) amax = max(amax, numpy.amax(a[1:-1,0])) return amin, amax
5,356,939
def offset_func(func, offset, *args): """ Offsets inputs by offset >>> double = lambda x: x * 2 >>> f = offset_func(double, (10,)) >>> f(1) 22 >>> f(300) 620 """ def _offset(*args): args2 = list(map(add, args, offset)) return func(*args2) with ignoring(Exception): _offset.__name__ = 'offset_' + func.__name__ return _offset
5,356,940
def add_summary_logger(experiment, original, value, *args, **kwargs): """ Note: auto_metric_logging controls summary metrics, and auto_metric_logging controls summary histograms Note: assumes "simple_value" is a metric """ try: LOGGER.debug("TENSORBOARD LOGGER CALLED") metrics, histograms, step = extract_from_add_summary(*args, **kwargs) if metrics and experiment.auto_metric_logging: if LOG_METRICS: experiment._log_metrics(metrics, step=step, framework="tensorboard") else: experiment._log_once_at_level( logging.INFO, "ignoring tensorflow summary log of metrics because of keras; set `comet_ml.loggers.tensorboard_logger.LOG_METRICS = True` to override", ) if histograms and experiment.auto_histogram_tensorboard_logging: if LOG_HISTOGRAMS: for histo in histograms: experiment.log_histogram_3d(convert_histograms(histo), step=step) else: experiment._log_once_at_level( logging.INFO, "ignoring tensorflow summary log of histograms because of keras; set `comet_ml.loggers.tensorboard_logger.LOG_HISTOGRAMS = True` to override", ) except Exception: LOGGER.error( "Failed to extract metrics/histograms from add_summary()", exc_info=True )
5,356,941
def get_redshift_schemas(cursor, user): """ Get all the Amazon Redshift schemas on which the user has create permissions """ get_schemas_sql = "SELECT s.schemaname " \ "FROM pg_user u " \ "CROSS JOIN " \ "(SELECT DISTINCT schemaname FROM pg_tables) s " \ "WHERE has_schema_privilege(u.usename,s.schemaname,'create') = true " \ "AND u.usename = '" + user + "' " \ "AND s.schemaname NOT LIKE '%pg_%' " \ "AND s.schemaname NOT LIKE '%information_schema%' ;" try: cursor.execute(get_schemas_sql) schemas = cursor.fetchall() except Exception as e: logger.error('Error in executing SQL: {}'.format(get_schemas_sql)) raise e return convert_to_list(schemas)
5,356,942
def window_features(idx, window_size=100, overlap=10): """ Generate indexes for a sliding window with overlap :param array idx: The indexes that need to be windowed. :param int window_size: The size of the window. :param int overlap: How much should each window overlap. :return array view: The indexes for the windows with overlap. """ overlap = window_size - overlap sh = (idx.size - window_size + 1, window_size) st = idx.strides * 2 view = np.lib.stride_tricks.as_strided(idx, strides=st, shape=sh)[0::overlap] return view
5,356,943
def DetectVisualStudioPath(version_as_year): """Return path to the version_as_year of Visual Studio. """ year_to_version = { '2013': '12.0', '2015': '14.0', '2017': '15.0', '2019': '16.0', } if version_as_year not in year_to_version: raise Exception(('Visual Studio version %s (from version_as_year)' ' not supported. Supported versions are: %s') % ( version_as_year, ', '.join(year_to_version.keys()))) if version_as_year in ('2017', '2019'): # The VC++ 2017+ install location needs to be located using COM instead of # the registry. For details see: # https://blogs.msdn.microsoft.com/heaths/2016/09/15/changes-to-visual-studio-15-setup/ vswhere_path = os.path.expandvars(_VSWHERE_PATH) if os.path.exists(vswhere_path): version = year_to_version[version_as_year] try: out = json.loads(subprocess.check_output([ vswhere_path, '-version', '[{},{})'.format(float(version), float(version) + 1), '-legacy', '-format', 'json', '-utf8', ])) if out: return out[0]['installationPath'] except subprocess.CalledProcessError: pass root_path = r'C:\Program Files (x86)\Microsoft Visual Studio\\' + version_as_year for edition in ['Professional', 'Community', 'Enterprise', 'BuildTools']: path = os.environ.get('vs{}_install'.format(version_as_year), os.path.join(root_path, edition)) if os.path.exists(path): return path else: version = year_to_version[version_as_year] keys = [r'HKLM\Software\Microsoft\VisualStudio\%s' % version, r'HKLM\Software\Wow6432Node\Microsoft\VisualStudio\%s' % version] for key in keys: path = _RegistryGetValue(key, 'InstallDir') if not path: continue path = os.path.normpath(os.path.join(path, '..', '..')) return path raise Exception(('Visual Studio Version %s (from version_as_year)' ' not found.') % (version_as_year))
5,356,944
def extract_pages(f, filter_namespaces=False): """ Extract pages from a MediaWiki database dump = open file-like object `f`. Return an iterable over (str, str, str) which generates (title, content, pageid) triplets. """ elems = (elem for _, elem in iterparse(f, events=("end",))) # We can't rely on the namespace for database dumps, since it's changed # it every time a small modification to the format is made. So, determine # those from the first element we find, which will be part of the metadata, # and construct element paths. elem = next(elems) namespace = get_namespace(elem.tag) ns_mapping = {"ns": namespace} page_tag = "{%(ns)s}page" % ns_mapping text_path = "./{%(ns)s}revision/{%(ns)s}text" % ns_mapping title_path = "./{%(ns)s}title" % ns_mapping ns_path = "./{%(ns)s}ns" % ns_mapping pageid_path = "./{%(ns)s}id" % ns_mapping for elem in elems: if elem.tag == page_tag: title = elem.find(title_path).text text = elem.find(text_path).text if filter_namespaces: ns = elem.find(ns_path).text if ns not in filter_namespaces: text = None pageid = elem.find(pageid_path).text yield title, text or "", pageid # empty page will yield None # Prune the element tree, as per # http://www.ibm.com/developerworks/xml/library/x-hiperfparse/ # except that we don't need to prune backlinks from the parent # because we don't use LXML. # We do this only for <page>s, since we need to inspect the # ./revision/text element. The pages comprise the bulk of the # file, so in practice we prune away enough. elem.clear()
5,356,945
def create_suburbans_answer(from_code, to_code, for_date, limit=3): """ Creates yandex suburbans answer for date by stations codes :param from_code: `from` yandex station code :type from_code: str :param to_code: `to` yandex station code :type to_code: str :param for_date: date for which data should be received :type for_date: date :param limit: limit of segments in answer :type limit: int :return: tuple with `answer`, `is_tomorrow` and `is_error` data :rtype: tuple """ code, data = get_yandex_raw_data(from_code, to_code, for_date) if code != 200: return yandex_error_answer, False, True from_title = data["search"]["from"]["title"] to_title = data["search"]["to"]["title"] answer = "" for segment in data["segments"]: if len(answer.split("\n\n")) > limit: break if datetime_from_string(segment["departure"]) >= datetime.now(): answer += parse_yandex_segment(segment) if answer: answer = "<b>{0}</b> => <b>{1}</b>\n\n".format( from_title, to_title ) + answer is_tomorrow = False else: for_date = date.today() + timedelta(days=1) answer += create_suburbans_answer( from_code, to_code, for_date, limit=5 )[0] is_tomorrow = True return answer, is_tomorrow, False
5,356,946
def update_user(usr): """ Update user and return new data :param usr: :return object: """ user = session.query(User).filter_by(id=usr['uid']).first() user.username = usr['username'] user.first_name = usr['first_name'] user.last_name = usr['last_name'] user.email = usr['email'] session.commit() return user
5,356,947
def get_data(date_from=None, date_to=None, location=None): """Get covid data Retrieve covid data in pandas dataframe format with the time periods and countries provided. Parameters ---------- date_from : str, optional Start date of the data range with format 'YYYY-MM-DD'. By default 'None' is used to represent 7 days prior to today's date date_to : str, optional End date of data range with format 'YYYY-MM-DD'. By default 'None' is used to represent today's date location : list, optional List of target country names. By default 'None' is used for all countries. Returns ------- pandas.DataFrame Pandas dataframe of the selected covid data. Examples -------- >>> get_data(date_from="2022-01-01", date_to="2022-01-07", location=["Canada", "China"]) """ query = "@date_from <= date <= @date_to" url = "https://covid.ourworldindata.org/data/owid-covid-data.csv" if date_from is None: date_from = ( pd.to_datetime("today").normalize() - pd.to_timedelta(7, unit="d") ).strftime("%Y-%m-%d") if date_to is None: date_to = pd.to_datetime("today").normalize().strftime("%Y-%m-%d") try: date_from != datetime.strptime(date_from, "%Y-%m-%d").strftime("%Y-%m-%d") # raise ValueError except ValueError: raise ValueError( "Invalid argument value: date_from must be in format of YYYY-MM-DD. Also check if it is a valid date." ) except TypeError: raise TypeError( "Invalid argument type: date_from must be in string format of YYYY-MM-DD." ) try: date_to != datetime.strptime(date_to, "%Y-%m-%d").strftime("%Y-%m-%d") # raise ValueError except ValueError: raise ValueError( "Invalid argument value: date_to must be in format of YYYY-MM-DD. Also check if it is a valid date." ) except TypeError: raise TypeError( "Invalid argument type: date_to must be in string format of YYYY-MM-DD." ) error_msg = ( "Invalid values: date_from should be smaller or equal" " to date_to (or today's date if date_to is not specified)." ) if pd.to_datetime(date_to) < pd.to_datetime(date_from): raise ValueError( error_msg, ) if pd.to_datetime(date_to) > pd.to_datetime("today").normalize(): raise ValueError("Invalid values: date_to should be smaller or equal to today.") if location is not None: if not (isinstance(location, list)): raise TypeError( "Invalid argument type: location must be a list of strings." ) for item in location: if not (isinstance(item, str)): raise TypeError( "Invalid argument type: values inside location list must be a strings." ) query += " and location in @location" try: covid_df = pd.read_csv(url, parse_dates=["date"]) except BaseException: return "The link to the data is broken." covid_df = covid_df.query(query) covid_df = covid_df[~covid_df["iso_code"].str.startswith("OWID")] return covid_df
5,356,948
def group_by_lambda(array: List[dict], func: GroupFunc) -> Dict[Any, List[dict]]: """ Convert list of objects to dict of list of object when key of dict is generated by func. Example:: grouped = group_by_lambda(detections, lambda x: x.get(DEVICE_ID)) :param array: list of objects to group :param func: give object as param and return key or None, when key is None then object will be excluded The ``func(obj, ret)`` callback provided as arg: Args: * ``obj``: next element from ``array`` * ``ret``: dictionary of just grouped objects Return effect: * ``None``: object will not be added anywhere * *some value* : object will be append to array in *some value* key Note: there are some wrappers for this functions like ``group_by_device_id()``, ``group_by_timestamp_division()``, ``group_by_timestamp_division()``, ``group_by_resolution()``. :return: dict of list of object """ ret = {} for o in array: key = func(o, ret) if key is None: continue os = get_and_set(ret, key, []) os.append(o) return ret
5,356,949
def update(isamAppliance, is_primary, interface, remote, port, health_check_interval, health_check_timeout, check_mode=False, force=False): """ Updating HA configuration """ # Call to check function to see if configuration already exist update_required = _check_enable(isamAppliance, is_primary=is_primary, interface=interface, remote=remote, port=port, health_check_interval=health_check_interval, health_check_timeout=health_check_timeout) if force is True or update_required is True: if check_mode is True: return isamAppliance.create_return_object(changed=True) else: return isamAppliance.invoke_put("Updating HA configuration", module_uri, { "is_primary": is_primary, "interface": interface, "remote": remote, "port": port, "health_check_interval": health_check_interval, "health_check_timeout": health_check_timeout }, requires_modules=requires_module, requires_version=requires_version) else: return isamAppliance.create_return_object()
5,356,950
def compare_images(image_file_name1, image_file_name2, no_print=True): """ Compare two images by calculating Manhattan and Zero norms """ # Source: http://stackoverflow.com/questions/189943/ # how-can-i-quantify-difference-between-two-images img1 = imread(image_file_name1).astype(float) img2 = imread(image_file_name2).astype(float) if img1.size != img2.size: m_norm, z_norm = 2*[2*IMGTOL] else: # Element-wise for Scipy arrays diff = img1-img2 # Manhattan norm m_norm = scipy.sum(numpy.abs(diff)) # Zero norm z_norm = scipy.linalg.norm(diff.ravel(), 0) result = bool((m_norm < IMGTOL) and (z_norm < IMGTOL)) if not no_print: print( 'Image 1: {0}, Image 2: {1} -> ({2}, {3}) [{4}]'.format( image_file_name1, image_file_name2, m_norm, z_norm, result ) ) return result
5,356,951
def prepare_update_mutation_classes(): """ Here it's preparing actual mutation classes for each model. :return: A tuple of all mutation classes """ _models = get_enabled_app_models() _classes = [] for m in _models: _attrs = prepare_update_mutation_class_attributes(model=m) # Creating a fake base class for making mutate properly. _base_class = class_factory(__class_name='Update' + m.__name__, base_classes=(Mutation,), **_attrs) _attrs.update(mutate=prepare_update_mutate(model=m, _mutation_class=_base_class)) _class = class_factory(__class_name='Update' + m.__name__, base_classes=(_base_class,), **_attrs) _classes.append(_class) return tuple(_classes)
5,356,952
def makePacket(ID, instr, reg=None, params=None): """ This makes a generic packet. TODO: look a struct ... does that add value using it? 0xFF, 0xFF, 0xFD, 0x00, ID, LEN_L, LEN_H, INST, PARAM 1, PARAM 2, ..., PARAM N, CRC_L, CRC_H] in: ID - servo id instr - instruction reg - register params - instruction parameter values out: packet """ pkt = [] pkt += HEADER # header and reserved byte pkt += [ID] pkt += [0x00, 0x00] # length placeholder pkt += [instr] # instruction if reg: pkt += le(reg) # not everything has a register if params: pkt += params # not everything has parameters length = le(len(pkt) - 5) # length = len(packet) - (header(3), reserve(1), id(1)) pkt[5] = length[0] # L pkt[6] = length[1] # H crc = crc16(pkt) pkt += le(crc) print(pkt) return pkt
5,356,953
def vision_matched_template_get_pose(template_match): """ Get the pose of a previously detected template match. Use list operations to get specific entries, otherwise returns value of first entry. Parameters: template_match (List[MatchedTemplate3D] or MatchedTemplate3D): The template match(s) Return (Pose): The pose of the template match """ if isinstance(template_match,list): template_match = template_match[0] return template_match.pose.pose.pose
5,356,954
def create_rotor(model, ring_setting=0): """Factory function to create and return a rotor of the given model name.""" if model in ROTORS: data = ROTORS[model] return Rotor(model, data['wiring'], ring_setting, data['stepping']) raise RotorError("Unknown rotor type: %s" % model)
5,356,955
def prog_parts(fig, ax, z, peaks, title='', vmin=-2.5, vmax=-1.3, sigma=4, lmap= None, ax_label=True): """A plot to show how contours encapsulate the massive progenitors and how the absorption peaks are proxy to the center of mass of these protoclusters""" """ if z is not None : zs, ze = z, z+1 xs, xe = 0, 205 zs, ze = 0, 205 if x is not None : xs, xe = x. x+1 ys, ye = 0, 205 zs, ze = 0, 205 if y is not None: ys, ye = y, y+1 xs, xe = 0, 205 ys, ye = 0,205 """ DM_prog_all = h5py.File('./progenitors/Full_prog_map.hdf5','r')['DM'][:] #num_prog_parts = h5py.File('./spectra/maps/map_PC_prog_R200_all_clusters.hdf5','r')['num_parts'][()] # Limit the range of variation for plotting purposses DM_prog_all = gf(DM_prog_all, sigma=4 , mode='wrap') ind = DM_prog_all == 0 DM_prog_all[ind] = 1e-10 DM_prog_all = np.log10(DM_prog_all) all_clusters = h5py.File('clusters_TNG300-1.hdf5','r') fcofm = h5py.File('cofm_progenitors.hdf5','r') cmap = plt.get_cmap('jet') # 0, 2 im = ax.imshow(DM_prog_all[:,:,z], extent=[0,DM_prog_all.shape[0],0,DM_prog_all.shape[1]], origin='lower', cmap=cmap, interpolation='bilinear', vmin=vmin, vmax=vmax) if lmap is not None : ax.contour(lmap[:,:,z], levels=np.arange(.1, np.unique(lmap).size), origin='lower', colors='w', linewidths=1.8) # Draw the absorption peaks mask = np.ones((peaks['z'][:].shape[0],), dtype=bool) mask *= peaks['z'][:] >= z-np.around(sigma) mask *= peaks['z'][:] <= z+np.around(sigma) ax.scatter(peaks['y'][mask], peaks['x'][mask], marker='*', s=150, color='lime', label='Absorption peaks') # Draw the Center of Mass of the proto-clusters/gropus mask = np.ones((fcofm['z'][:].shape[0],), dtype=bool) mask *= fcofm['z'][:] >= z-np.around(sigma) mask *= fcofm['z'][:] <= z+np.around(sigma) masses = all_clusters['Mass'][:][fcofm['cluster_ind'][:][mask].astype(int)] ind1, ind2, ind3 = np.where(masses<10**4.0), np.where((masses < 10**4.5)*(masses > 10**4.0)), np.where(masses > 10**4.5) ax.scatter(fcofm['y'][mask][ind1], fcofm['x'][mask][ind1], marker='D', s=150, c='orchid', label=r'$ 10^{13.5} < M < 10^{14} $') ax.scatter(fcofm['y'][mask][ind2], fcofm['x'][mask][ind2], marker='D', s=150, c='cyan', label=r'$ 10^{14} < M < 10^{14.5} $') ax.scatter(fcofm['y'][mask][ind3], fcofm['x'][mask][ind3], marker='D', s=150, c='w', label=r'$ 10^{14.5} < M$') ax.set_title(title, fontsize=50) if ax_label: ax.set_xlabel('cMpc/h', fontsize=50) ax.set_ylabel('cMpc/h', fontsize=50) ax.tick_params(labelsize=50, width=5, length=10) ax.set_xticks(np.arange(0,DM_prog_all.shape[0],200)) ax.set_yticks(np.arange(0,DM_prog_all.shape[0],200)) else : ax.set_xticks([]) ax.set_yticks([]) cb = fig.colorbar(im , ax=ax, orientation='horizontal', fraction=0.047, pad=0.01) cb.ax.tick_params(labelsize=30, width=5, length=10) cb.set_label(r'$log_{10} (\rho_m / \bar{\rho}_m)$', size=45) plt.legend(loc=(1.005,0), fontsize=25)
5,356,956
def get_description(sequence, xrefs, taxid=None): """ Compute a description for the given sequence and optional taxon id. This function will use the rule scoring if possible, otherwise it will fall back to the previous scoring method. In addition, if the rule method cannot produce a name it also falls back to the previous method. Providing a taxon id means to create a species name that is specific for the sequence in the given organism, otherwise one is created that is general for all species that this sequence is found in. Parameters ---------- sequence : Rna The sequence to generate a name for. taxid : int, None The taxon id to use Returns ------- description : str The description of this sequence. """ logger.debug("Computing get_description for %s (%s)", sequence.upi, taxid) name = _rm.get_description(sequence, xrefs, taxid=taxid) if not name: logger.debug("New style method failed, using score") return name or _sm.get_description(sequence, taxid=taxid)
5,356,957
def compute_ess(samples): """Compute an estimate of the effective sample size (ESS). See the [Stan manual](https://mc-stan.org/docs/2_18/reference-manual/effective-sample-size-section.html) for a definition of the effective sample size in the context of MCMC. Args: samples: Tensor, vector (n,), float32 of n sequential observations. Returns: ess: float, effective sample size, >= 1, <= n. efficiency: float, >= 0.0, the relative efficiency obtained compared to the naive Monte Carlo estimate which has an efficiency of one. """ ess, efficiency = compute_ess_multidimensional( tf.reshape(samples, (1, tf.size(samples)))) ess = ess[0] efficiency = efficiency[0] return ess, efficiency
5,356,958
def uptime(): """Returns uptime in milliseconds, starting at first call""" if not hasattr(uptime, "t0") is None: uptime.t0 = time.time() return int((time.time() - uptime.t0)*1000)
5,356,959
def load_plot(axis, plot, x_vals, y1=None, y2=None, y3=None, y4=None, title="", xlab="", ylab="", ltype=[1, 1, 1, 1], marker=['g-', 'r-', 'b-', 'k--']): """ Function to load the matplotlib plots. :param matplotlib.Axis axis: the matplotlib axis object. :param matplotlib.FigureCanvas plot: the matplotlib plot object. :param list x_vals: list of the x values to plot. :keyword float y1: list of the first data set y values to plot. :keyword float y2: list of the second data set y values to plot. :keyword float y3: list of the third data set y values to plot. :keyword float y4: list of the fourth data set y values to plot. :keyword str title: the title for the plot. :keyword str xlab: the x axis label for the plot. :keyword str ylab: the y axis label for the plot. :keyword int ltype: list of the type of line to plot. Options are: 1 = step 2 = plot 3 = histogram 4 = date plot :keyword str marker: list of the markers to use on the plot. Defaults are: g- = green solid line r- = red solid line b- = blue solid line k- = black dashed line :return: False if successful or True if an error is encountered. :rtype: bool """ # WARNING: Refactor load_plot; current McCabe Complexity metric=23. axis.cla() axis.grid(True, which='both') _x_min = min(x_vals) _x_max = max(x_vals) _y_min = 0.0 _lst_min = [0.0] _lst_max = [] if y1 is not None: if ltype[0] == 1: line, = axis.step(x_vals, y1, marker[0], where='mid') line.set_ydata(y1) _lst_min.append(min(y1)) _lst_max.append(max(y1)) elif ltype[0] == 2: line, = axis.plot(x_vals, y1, marker[0], linewidth=2) line.set_ydata(y1) _lst_min.append(min(y1)) _lst_max.append(max(y1)) elif ltype[0] == 3: axis.grid(False, which='both') _values, _edges, __patches = axis.hist(x_vals, bins=y1, color=marker[0]) _x_min = min(_edges) _x_max = max(_edges) _lst_min.append(min(_values)) _lst_max.append(max(_values) + 1) elif ltype[0] == 4: line, = axis.plot_date(x_vals, y1, marker[0], xdate=True, linewidth=2) _lst_min.append(min(y1)) _lst_max.append(max(y1)) _y_min = min(y1) if y2 is not None: if ltype[1] == 1: line2, = axis.step(x_vals, y2, marker[1], where='mid') line2.set_ydata(y2) _lst_min.append(min(y2)) _lst_max.append(max(y2)) elif ltype[1] == 2: line2, = axis.plot(x_vals, y2, marker[1], linewidth=2) line2.set_ydata(y2) _lst_min.append(min(y2)) _lst_max.append(max(y2)) elif ltype[1] == 3: axis.grid(False, which='both') _values, _edges, __patches = axis.hist(x_vals, bins=len(y2), color=marker[1]) _x_min = min(_edges) _x_max = max(_edges) _lst_min.append(min(_values)) _lst_max.append(max(_values) + 1) elif ltype[1] == 4: line2, = axis.plot_date(x_vals, y2, marker[1], xdate=True, linewidth=2) _lst_min.append(min(y2)) _lst_max.append(max(y2)) _y_min = min(y2) if y3 is not None: if ltype[2] == 1: line3, = axis.step(x_vals, y3, marker[2], where='mid') line3.set_ydata(y3) _lst_min.append(min(y3)) _lst_max.append(max(y3)) elif ltype[2] == 2: line3, = axis.plot(x_vals, y3, marker[2], linewidth=2) line3.set_ydata(y3) _lst_min.append(min(y3)) _lst_max.append(max(y3)) elif ltype[2] == 3: axis.grid(False, which='both') _values, _edges, __patches = axis.hist(x_vals, bins=len(y3), color=marker[2]) _x_min = min(_edges) _x_max = max(_edges) _lst_min.append(min(_values)) _lst_max.append(max(_values) + 1) elif ltype[2] == 4: line3, = axis.plot_date(x_vals, y3, marker[2], xdate=True, linewidth=2) _lst_min.append(min(y3)) _lst_max.append(max(y3)) _y_min = min(y3) if y4 is not None: if ltype[3] == 1: line4, = axis.step(x_vals, y4, marker[3], where='mid') line4.set_ydata(y4) _lst_min.append(min(y4)) _lst_max.append(max(y4)) elif ltype[3] == 2: line4, = axis.plot(x_vals, y4, marker[3], linewidth=2) line4.set_ydata(y4) _lst_min.append(min(y4)) _lst_max.append(max(y4)) elif ltype[3] == 3: axis.grid(False, which='both') _values, _edges, __patches = axis.hist(x_vals, bins=len(y4), color=marker[3]) _x_min = min(_edges) _x_max = max(_edges) _lst_min.append(min(_values)) _lst_max.append(max(_values) + 1) elif ltype[3] == 4: line4, = axis.plot_date(x_vals, y4, marker[3], xdate=True, linewidth=2) _lst_min.append(min(y4)) _lst_max.append(max(y4)) _y_min = min(y4) axis.set_title(title, {'fontsize': 16, 'fontweight': 'bold', 'verticalalignment': 'baseline', 'horizontalalignment': 'center'}) # Set the x-axis label. _x_pos = (_x_max - _x_min) / 2.0 _y_pos = _y_min - 0.65 axis.set_xlabel(xlab, {'fontsize': 14, 'fontweight': 'bold', 'verticalalignment': 'center', 'horizontalalignment': 'center', 'x': _x_pos, 'y': _y_pos}) # Set the y-axis label. axis.set_ylabel(ylab, {'fontsize': 14, 'fontweight': 'bold', 'verticalalignment': 'center', 'horizontalalignment': 'center', 'rotation': 'vertical'}) # Get the minimum and maximum y-values to set the axis bounds. If the # maximum value is infinity, use the next largest value and so forth. _min = min(_lst_min) _max = _lst_max[0] for i in range(1, len(_lst_max)): if _max < _lst_max[i] and _lst_max[i] != float('inf'): _max = _lst_max[i] axis.set_ybound(_min, _max) plot.draw() return False
5,356,960
def configure(output_path, install_dir=UNDEFINED, wrap_info=UNDEFINED, verbose_level=UNDEFINED, debug=UNDEFINED, freeze=UNDEFINED, update_shebang=UNDEFINED): """Change the STATE and update the file if necessary""" state = STATE.copy() if install_dir is not UNDEFINED: if install_dir is not None: install_dir = Path(install_dir).resolve() state['install_dir'] = install_dir if wrap_info is not UNDEFINED: check_wrap_info(wrap_info) state['wrap_mode'] = wrap_info.wrap_mode state['wraps'] = wrap_info.wraps if verbose_level is not UNDEFINED: state['verbose_level'] = verbose_level if debug is not UNDEFINED: state['debug'] = debug if freeze is not UNDEFINED: state['freeze'] = freeze if update_shebang is not UNDEFINED: state['update_shebang'] = update_shebang replace_state(output_path, state)
5,356,961
def _optimize_loop_axis(dim): """ Chooses kernel parameters including CUDA block size, grid size, and number of elements to compute per thread for the loop axis. The loop axis is the axis of the tensor for which a thread can compute multiple outputs. Uses a simple heuristic which tries to get at least 4 warps per block and 8 items per thread to hide latencies. Prefers a higher item-per-thread to launching many blocks for very large axes since blocks are serialized by the GPU after all SMs are filled. Arguments: dim (int): Size of the tensor on the loop axis. Returns: tuple of grid dimension, block dimension, and items per thread """ sm_count = _get_sm_count() griddim = min(sm_count, -((-dim) // 32)) items_per_block = -((-dim) // griddim) items_per_thread = 1 warps = -((-items_per_block) // (32 * items_per_thread)) while (warps > 4 and items_per_thread < 8) or (warps > 32): items_per_thread = items_per_thread + 1 warps = -((-items_per_block) // (32 * items_per_thread)) blockdim = warps * 32 return (griddim, blockdim, items_per_thread)
5,356,962
def no_zero(t): """ This function replaces all zeros in a tensor with ones. This allows us to take the logarithm and then sum over all values in the matrix. Args: t: tensor to be replaced returns: t: tensor with ones instead of zeros. """ t[t==0] = 1. return t
5,356,963
def php_implode(*args): """ >>> array = Array('lastname', 'email', 'phone') >>> php_implode(",", array) 'lastname,email,phone' >>> php_implode('hello', Array()) '' """ if len(args) == 1: assert isinstance(args, list) return "".join(args) assert len(args) == 2 assert (isinstance(args[0], str) and isinstance(args[1], Array)) or \ (isinstance(args[1], str) and isinstance(args[0], Array)) _glue = args[0] if isinstance(args[0], str) else args[1] _array = args[1] if isinstance(args[1], Array) else args[0] return _glue.join([str(x) for x in _array.values()])
5,356,964
def RemoveInflectionalParticle(context): """ Remove Inflectional particle (lah|kah|tah|pun). Asian J. (2007) "Effective Techniques for Indonesian Text Retrieval". page 60 @link http://researchbank.rmit.edu.au/eserv/rmit:6312/Asian.pdf """ result = re.sub(r'-*(lah|kah|tah|pun)$', '', context.current_word, 1) if result != context.current_word: removed_part = re.sub(result, '', context.current_word, 1) removal = Removal(context.current_word, result, removed_part, 'P') context.add_removal(removal) context.current_word = result
5,356,965
def import_murals(infile): """ Imports murals from Montréal Open Data's Geo-JSON export. """ json_data = json.loads(infile.read()) # Imports each mural using the Geo-JSON export. murals_count, murals_errored_count = 0, 0 for feature in json_data.get('features'): try: import_id = feature['properties']['id'] click.echo(click.style( 'Start importing the mural with identifier "{}"'.format(import_id), fg='magenta'), nl=False) _import_mural(feature) click.echo(click.style(' [OK]', fg='green', bold=True)) except Exception as e: murals_errored_count += 1 msg = 'Unable to import the mural with identifier "{0}": {1}'.format(import_id, e) click.echo(click.style(msg, fg='red')) logger.error(msg, exc_info=True) else: murals_count += 1 click.echo(click.style( '\nMurals imported: {0} / Murals errored: {1}'.format(murals_count, murals_errored_count), bg='blue', fg='white', bold=True))
5,356,966
def make_incompressible(velocity: Grid, domain: Domain, obstacles: tuple or list = (), solve_params: math.LinearSolve = math.LinearSolve(None, 1e-3), pressure_guess: CenteredGrid = None): """ Projects the given velocity field by solving for the pressure and subtracting its spatial_gradient. This method is similar to :func:`field.divergence_free()` but differs in how the boundary conditions are specified. Args: velocity: Vector field sampled on a grid domain: Used to specify boundary conditions obstacles: List of Obstacles to specify boundary conditions inside the domain (Default value = ()) pressure_guess: Initial guess for the pressure solve solve_params: Parameters for the pressure solve Returns: velocity: divergence-free velocity of type `type(velocity)` pressure: solved pressure field, `CenteredGrid` iterations: Number of iterations required to solve for the pressure divergence: divergence field of input velocity, `CenteredGrid` """ input_velocity = velocity active = domain.grid(HardGeometryMask(~union(*[obstacle.geometry for obstacle in obstacles])), extrapolation=domain.boundaries['active_extrapolation']) accessible = domain.grid(active, extrapolation=domain.boundaries['accessible_extrapolation']) hard_bcs = field.stagger(accessible, math.minimum, domain.boundaries['accessible_extrapolation'], type=type(velocity)) velocity = layer_obstacle_velocities(velocity * hard_bcs, obstacles).with_(extrapolation=domain.boundaries['near_vector_extrapolation']) div = divergence(velocity) if domain.boundaries['near_vector_extrapolation'] == math.extrapolation.BOUNDARY: div -= field.mean(div) # Solve pressure def laplace(p): grad = spatial_gradient(p, type(velocity)) grad *= hard_bcs grad = grad.with_(extrapolation=domain.boundaries['near_vector_extrapolation']) div = divergence(grad) lap = where(active, div, p) return lap pressure_guess = pressure_guess if pressure_guess is not None else domain.scalar_grid(0) converged, pressure, iterations = field.solve(laplace, y=div, x0=pressure_guess, solve_params=solve_params, constants=[active, hard_bcs]) if math.all_available(converged) and not math.all(converged): raise AssertionError(f"pressure solve did not converge after {iterations} iterations\nResult: {pressure.values}") # Subtract grad pressure gradp = field.spatial_gradient(pressure, type=type(velocity)) * hard_bcs velocity = (velocity - gradp).with_(extrapolation=input_velocity.extrapolation) return velocity, pressure, iterations, div
5,356,967
def create_and_empty_dir(tdir, label, suffix=datetime.now().strftime("%Y%m%d%H%M%S"), sep='__', simulation=False): """ Tests if directory exists, if not creates it. If yes, tests if readable/writable. Returns True if new directory created, False if already existed and emptied (keeping directory, not contents) :param tdir: path to directory to be tested/created :type tdir: str :param label: label for type of directory being tested/created :type label: str :param suffix: string to old suffix directory name :type suffix: str :param suffix: string to separate suffix from original name :type suffix: str :param simulation: True if simulation only (no changes to be made), False if commands should be executed :type simulation: bool :rtype: bool """ if os.path.isdir(tdir): if not os.access(tdir, os.W_OK): msg = "Cannot write to pipeline {l} directory '{d}'".format(l=label, d=tdir) log.critical(msg) raise ONEFluxPipelineError(msg) if not os.access(tdir, os.R_OK): msg = "Cannot read from pipeline {l} directory '{d}'".format(l=label, d=tdir) log.critical(msg) raise ONEFluxPipelineError(msg) new_tdir = tdir + sep + suffix if not simulation: shutil.rmtree(path=tdir, ignore_errors=False, onerror=None) os.makedirs(new_tdir) os.makedirs(tdir) log.debug("Pipeline {l} moved EMPTY directory '{o}' to '{n}'".format(l=label, o=tdir, n=new_tdir)) log.debug("Created '{d}'".format(d=tdir)) return False else: if not simulation: os.makedirs(tdir) log.debug("Created '{d}'".format(d=tdir)) return True
5,356,968
def no_source( time: datetime, glat: float, glon: float, Nbins: int, Talt: float, Thot: float ) -> xarray.Dataset: """testing only, may give non-physical results""" idate, utsec = glowdate(time) ip = gi.get_indices([time - timedelta(days=1), time], 81) cmd = [ str(get_exe()), idate, utsec, str(glat), str(glon), str(ip["f107s"][1]), str(ip["f107"][1]), str(ip["f107"][0]), str(ip["Ap"][1]), "-nosource", str(Nbins), str(Talt), str(Thot), ] dat = subprocess.check_output(cmd, timeout=15, stderr=subprocess.DEVNULL, text=True) return glowread(dat, time, ip, glat, glon)
5,356,969
def reduce_to_1D(ds, latitude_range, latitude_name='Latitude', time_mean=True, time_name='Time'): """ TODO """ if isinstance(latitude_range, (int, float)): latitude_range = [latitude_range, latitude_range] elif len(latitude_range) == 1: latitude_range = [latitude_range[0], latitude_range[0]] elif len(latitude_range) != 2: errmsg = ' '.join(['latitude_range has to be float or list of', 'one or two floats and not {}']).format( latitude_range) raise ValueError(errmsg) lats = ds[latitude_name].data lats = lats[(lats >= latitude_range[0]) & (lats <= latitude_range[1])] ds = ds.sel(**{latitude_name: lats}) ds = (ds.sum(latitude_name) > 0).astype(int) if time_mean: ds = ds.mean(time_name) return ds
5,356,970
def get_y_generator_method(x_axis, y_axis): """Return the y-value generator method for the given x-axis. Arguments: x_axis -- an instance of an XAxis class y_axis -- an instance of a YAxis class Returns: A reference to the y-value generator if it was found, and otherwise None. """ try: method_name = AXIS_PAIRS[x_axis.slug][y_axis.slug] except KeyError: raise ValueError("A %(x)s x-axis cannot be paired with a %(y)s y-axis" % { 'x': x_axis.__class__.name, 'y': x_axis.__class__.name }) y_method = getattr(y_axis, method_name, None) if not y_method: raise ValueError("No method named '%(method)s' exists for the %(axis)s y-axis" % { 'method': method_name, 'axis': y_axis.__class__.name }) return y_method
5,356,971
def generate_filename(table_type, table_format): """Generate the table's filename given its type and file format.""" ext = TABLE_FORMATS[table_format] return f'EIA_MER_{table_type}.{ext}'
5,356,972
def rescale_coords(df,session_epochs,maze_size_cm): """ rescale xy coordinates of each epoch into cm note: automatically detects linear track by x to y ratio input: df: [ts,x,y] pandas data frame session_epochs: nelpy epoch class with epoch times mazesize: list with size of maze in cm for each epoch output: df: rescaled df """ for i,val in enumerate(session_epochs.data): temp_df = df[df['ts'].between(val[0],val[1])] x_range = np.nanmax(temp_df.x) - np.nanmin(temp_df.x) y_range = np.nanmax(temp_df.y) - np.nanmin(temp_df.y) x_y_ratio = x_range/y_range # if the ratio of x to y is > 5, it is probably a linear track if x_y_ratio > 5: df.loc[df['ts'].between(val[0],val[1]),'x'] = rescale(temp_df.x,0,maze_size_cm[i]) df.loc[df['ts'].between(val[0],val[1]),'y'] = rescale(temp_df.y,0,maze_size_cm[i]/x_y_ratio) else: df.loc[df['ts'].between(val[0],val[1]),'x'] = rescale(temp_df.x,0,maze_size_cm[i]) df.loc[df['ts'].between(val[0],val[1]),'y'] = rescale(temp_df.y,0,maze_size_cm[i]) return df
5,356,973
def newsreader_debug(): """Given an query, return that news debug mode.""" query = request.args.get('query') if query is None: return 'No provided.', 400 result = SL.news_check(query, debug=True) if result is None: return 'not found : %s' % query, 400 return result, 200
5,356,974
def test_make_resource(pool): """ Test the resource object returned from _make_resource is the proper class instance. """ r, _ = pool._make_resource() assert pool.size == 1 assert isinstance(r, _ResourceTracker)
5,356,975
def test_modify_stats(m_app, m_get_filters): """ Test the modify_stats method in controller """ handle = RandomizerHandler(m_app) with mock.patch("controller.create_stat_modifier"): handle._modify_stats()
5,356,976
def get_vocabulary(query_tree): """Extracts the normalized search terms from the leaf nodes of a parsed query to construct the vocabulary for the text vectorization. Arguments --------- query_tree: pythonds.trees.BinaryTree The binary tree object representing a parsed search query. Each leaf node is a search term and internal nodes represent boolean operations. See parse_query() for details. Returns ------- vocabulary: list List of strings representing unique normalized search terms. """ def _getleafnodes(node): terms = [] if node.isLeaf(): return terms + [node.normedterm] elif node.leftChild and not node.rightChild: return terms + _getleafnodes(node.getLeftChild()) elif node.rightChild and not node.leftChild: return terms + _getleafnodes(node.getRightChild()) else: # has two children return terms + _getleafnodes(node.getLeftChild()) \ + _getleafnodes(node.getRightChild()) # extract terms from the leaf nodes of the query object. terms = _getleafnodes(query_tree) # remove duplicates. vocabulary = list(set(terms)) return vocabulary
5,356,977
def pubkey_to_address(pubkey): """Convert a public key (in hex) to a Bitcoin address""" return bin_to_b58check(hash_160(changebase(pubkey, 16, 256)))
5,356,978
def test_inplace_add_model_parallel(): """ Feature: test InplaceAdd model parallel Description: model parallel Expectation: compile success """ context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=8, global_rank=0) strategy = ((1, 4, 2), (1, 4, 2)) net = InplaceAddNet(indices_, strategy) compile_net(net, x_, input_v_)
5,356,979
def transpose(A): """ Matrix transposition :rtype m: list :param m: a list of lists representing a matrix A :rtype: list :return: a list of lists representing the transpose of matrix A Example: -------- >>> A = [[0, -4, 4], [-3, -2, 0]] >>> print(transpose(A)) [[0.0, -3.0], [-4.0, -2.0], [4.0, 0.0]] """ # TODO: write exceptions to help user with errors from the backend return Clinear_algebra.transpose(A)
5,356,980
def find_point_in_section_list(point, section_list): """Returns the start of the section the given point belongs to. The given list is assumed to contain start points of consecutive sections, except for the final point, assumed to be the end point of the last section. For example, the list [5, 8, 30, 31] is interpreted as the following list of sections: [5-8), [8-30), [30-31], so the points -32, 4.5, 32 and 100 all match no section, while 5 and 7.5 match [5-8) and so for them the function returns 5, and 30, 30.7 and 31 all match [30-31]. Parameters --------- point : float The point for which to match a section. section_list : sortedcontainers.SortedList A list of start points of consecutive sections. Returns ------- float The start of the section the given point belongs to. None if no match was found. Example ------- >>> from sortedcontainers import SortedList >>> seclist = SortedList([5, 8, 30, 31]) >>> find_point_in_section_list(4, seclist) >>> find_point_in_section_list(5, seclist) 5 >>> find_point_in_section_list(27, seclist) 8 >>> find_point_in_section_list(31, seclist) 30 """ if point < section_list[0] or point > section_list[-1]: return None if point in section_list: if point == section_list[-1]: return section_list[-2] ind = section_list.bisect(point)-1 if ind == 0: return section_list[0] return section_list[ind] try: ind = section_list.bisect(point) return section_list[ind-1] except IndexError: return None
5,356,981
def mk_metrics_api(tm_env): """Factory to create metrics api. """ class _MetricsAPI(object): """Acess to the locally gathered metrics. """ def __init__(self): def _get(rsrc_id, timeframe, as_json=False): """Return the rrd metrics. """ with lc.LogContext(_LOGGER, rsrc_id): _LOGGER.info('Get metrics') id_ = self._unpack_id(rsrc_id) file_ = self._get_rrd_file(**id_) if as_json: return rrdutils.get_json_metrics(file_, timeframe) return file_ def _file_path(rsrc_id): """Return the rrd metrics file path. """ id_ = self._unpack_id(rsrc_id) return self._abs_met_path(**id_) self.file_path = _file_path self.get = _get def _remove_ext(self, fname, extension='.rrd'): """Returns the basename of a file and removes the extension as well. """ res = os.path.basename(fname) res = res[:-len(extension)] return res def _unpack_id(self, rsrc_id): """Decompose resource_id to a dictionary. Unpack the (core) service or the application name and "uniq name" from rsrc_id to a dictionary. """ if '/' in rsrc_id: app, uniq = rsrc_id.split('/') return {'app': app, 'uniq': uniq} return {'service': rsrc_id} def _get_rrd_file(self, service=None, app=None, uniq=None, arch_extract=True): """Return the rrd file path of an app or a core service.""" if uniq is None: return self._core_rrd_file(service) if uniq == 'running': arch_extract = False # find out uniq ... state_json = os.path.join(tm_env().running_dir, app, 'data', 'state.json') with io.open(state_json) as f: uniq = json.load(f)['uniqueid'] return self._app_rrd_file(app, uniq, arch_extract) def _app_rrd_file(self, app, uniq, arch_extract=True): """Return an application's rrd file.""" return _get_file( self._abs_met_path(app=app, uniq=uniq), arch_extract=arch_extract, arch=_archive_path(tm_env, 'sys', app, uniq), arch_extract_filter=functools.partial(_arch_file_filter, fname='metrics.rrd')) def _core_rrd_file(self, service): """Return the given service's rrd file.""" return _get_file(self._abs_met_path(service), arch_extract=False) def _abs_met_path(self, service=None, app=None, uniq=None): """Return the rrd metrics file's full path.""" if service is not None: return os.path.join(tm_env().metrics_dir, 'core', service + '.rrd') return os.path.join(tm_env().metrics_dir, 'apps', '%s-%s.rrd' % (app.replace('#', '-'), uniq)) return _MetricsAPI
5,356,982
def create_base_args(parser: argparse.ArgumentParser, model_types=None): """Add base arguments for Transformers based models """ # Required parameters if model_types is not None and len(model_types) > 1: parser.add_argument("--model_type", default=None, type=str, choices=model_types, required=True, help="Model type selected in the list: " + ", ".join(model_types)) parser.add_argument("--output_dir", default=None, type=str, required=True, help="The output directory where the model predictions and checkpoints " "will be written.") # Other parameters parser.add_argument("--tokenizer_name", default="", type=str, help="Pretrained tokenizer name or path if not the same as model_name") parser.add_argument("--max_seq_length", default=128, type=int, help="The maximum total input sequence length after tokenization. " "Sequences longer than this will be truncated, sequences shorter " "will be padded.") parser.add_argument("--cache_dir", default="", type=str, help="Where do you want to store the pre-trained models downloaded " "from s3") parser.add_argument("--do_lower_case", action='store_true', help="Set this flag if you are using an uncased model.") parser.add_argument("--per_gpu_eval_batch_size", default=8, type=int, help="Batch size per GPU/CPU for evaluation.") parser.add_argument("--no_cuda", action='store_true', help="Avoid using CUDA when available") parser.add_argument('--overwrite_output_dir', action='store_true', help="Overwrite the content of the output directory") parser.add_argument('--overwrite_cache', action='store_true', help="Overwrite the cached training and evaluation sets")
5,356,983
def generate_module(file_allocator, name): """ Generate an in-memory module from a generated Python implementation. """ assert name in file_allocator.allocated_files f = file_allocator.allocated_files[name] f.seek(0) data = f.read() modname, _ = os.path.splitext(name) d = {} eval(compile(data, name, "exec"), d, d) m = types.ModuleType(modname) vars(m).update(d) return m
5,356,984
def all_reduce_sum(t, dim): """Like reduce_sum, but broadcasts sum out to every entry in reduced dim.""" t_shape = t.get_shape() rank = t.get_shape().ndims return tf.tile( tf.expand_dims(tf.reduce_sum(t, dim), dim), [1] * dim + [t_shape[dim].value] + [1] * (rank - dim - 1))
5,356,985
def pprint(obj): """ Pretty Prints the object in json format using the following pprint options: pprint.PrettyPrinter(indent=2, width=120, sort_dicts=False) """ _pprint_object.pprint(obj)
5,356,986
def get_deltas_from_bboxes_and_landmarks(prior_boxes, bboxes_and_landmarks): """Calculating bounding box and landmark deltas for given ground truth boxes and landmarks. inputs: prior_boxes = (total_bboxes, [center_x, center_y, width, height]) bboxes_and_landmarks = (batch_size, total_bboxes, [y1, x1, y2, x2, landmark_x0, landmark_y0, ..., landmark_xN, landmark_yN]) outputs: deltas = (batch_size, total_bboxes, [delta_bbox_y, delta_bbox_x, delta_bbox_h, delta_bbox_w, delta_landmark_x0, delta_landmark_y0, ..., delta_landmark_xN, delta_landmark_yN]) """ # gt_width = bboxes_and_landmarks[..., 3] - bboxes_and_landmarks[..., 1] gt_height = bboxes_and_landmarks[..., 2] - bboxes_and_landmarks[..., 0] gt_ctr_x = bboxes_and_landmarks[..., 1] + 0.5 * gt_width gt_ctr_y = bboxes_and_landmarks[..., 0] + 0.5 * gt_height # delta_x = (gt_ctr_x - prior_boxes[..., 0]) / prior_boxes[..., 2] delta_y = (gt_ctr_y - prior_boxes[..., 1]) / prior_boxes[..., 3] delta_w = gt_width / prior_boxes[..., 2] delta_h = gt_height / prior_boxes[..., 3] # total_landmarks = tf.shape(bboxes_and_landmarks[..., 4:])[-1] // 2 xy_pairs = tf.tile(prior_boxes[..., 0:2], (1, total_landmarks)) wh_pairs = tf.tile(prior_boxes[..., 2:4], (1, total_landmarks)) landmark_deltas = (bboxes_and_landmarks[..., 4:] - xy_pairs) / wh_pairs # return tf.concat([tf.stack([delta_y, delta_x, delta_h, delta_w], -1), landmark_deltas], -1)
5,356,987
def choose(population, sample): """ Returns ``population`` choose ``sample``, given by: n! / k!(n-k)!, where n == ``population`` and k == ``sample``. """ if sample > population: return 0 s = max(sample, population - sample) assert s <= population assert population > -1 if s == population: return 1 numerator = 1 denominator = 1 for i in range(s+1, population + 1): numerator *= i denominator *= (i - s) return numerator/denominator
5,356,988
def _where_cross(data,threshold): """return a list of Is where the data first crosses above threshold.""" Is=np.where(data>threshold)[0] Is=np.concatenate(([0],Is)) Ds=Is[:-1]-Is[1:]+1 return Is[np.where(Ds)[0]+1]
5,356,989
def TVD_to_MD(well,TVD): """It returns the measure depth position for a well based on a true vertical depth Parameters ---------- well : str Selected well TVD : float Desire true vertical depth Returns ------- float MD : measure depth Attention --------- The input information comes from the files input/ubication.csv and input/survey/{well}_MD.dat. Note ---- A linear regression is used. Examples -------- >>> TVD_to_MD('WELL-1',-100) """ file="../input/survey/%s_MD.dat"%well MD,DeltaY,DeltaX=np.loadtxt(file,skiprows=1,unpack=True,delimiter=',') reader = csv.DictReader(open("../input/ubication.csv", 'r')) #'rb' dict_ubication={} for line in reader: dict_ubication[line['well']]=line z_0=float(dict_ubication[well]['masl']) x_0=float(dict_ubication[well]['east']) y_0=float(dict_ubication[well]['north']) #Initialize the delta z values z_delta=[0 for i in MD] x=[0 for i in MD] y=[0 for i in MD] z=[0 for i in MD] #Assuming straight line between points for j in range(len(MD)): if j==0: z_delta[j]=0 else: z_delta[j]=((MD[j]-MD[j-1])**2-(DeltaX[j]-DeltaX[j-1])**2-(DeltaY[j]-DeltaY[j-1])**2)**0.5+z_delta[j-1] #Convertion delta to absolute for j in range(len(MD)): z[j]=z_0-z_delta[j] #Function of X-Y-Z with MD funzmd=interpolate.interp1d(z,MD) try: MD=funzmd(TVD) except ValueError: MD=np.nan return MD
5,356,990
def get_table_b_2_b(): """表 B.2 居住人数 2 人における照明設備の使用時間率 (b) 休日在宅 Args: Returns: list: 表 B.2 居住人数 2 人における照明設備の使用時間率 (b) 休日在宅 """ table_b_2_b = [ (0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00), (0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00), (0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00), (0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00), (0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00), (0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00), (0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00), (0.25, 0.00, 0.00, 0.00, 0.25, 0.00, 0.00, 0.50, 0.00, 0.25, 0.00, 0.50, 0.00, 0.25, 0.00, 0.00, 0.25, 0.25, 0.00), (0.50, 0.25, 0.50, 0.00, 0.25, 0.00, 0.00, 0.25, 0.00, 0.25, 0.00, 0.25, 0.00, 0.25, 0.00, 0.00, 0.25, 0.25, 0.00), (0.75, 0.25, 0.25, 0.00, 0.25, 0.00, 0.00, 0.75, 0.25, 0.25, 0.25, 0.25, 0.00, 0.25, 0.00, 0.00, 0.25, 0.25, 0.00), (1.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00), (1.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00), (0.50, 0.50, 0.25, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00), (0.25, 0.25, 0.25, 0.00, 0.00, 0.00, 0.00, 0.25, 0.00, 0.25, 0.25, 0.25, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00), (0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00), (0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00), (0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00), (0.25, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.25, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.25), (1.00, 0.00, 0.50, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00), (0.50, 0.50, 0.50, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00), (0.75, 0.25, 0.25, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00), (1.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00), (1.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.25, 0.50, 0.25, 0.00, 0.75, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00), (0.50, 0.00, 0.00, 0.00, 0.25, 0.00, 0.00, 0.25, 0.25, 0.25, 0.00, 0.50, 0.00, 0.25, 0.00, 0.00, 0.25, 0.00, 0.00), ] return table_b_2_b
5,356,991
def eea(m, n): """ Compute numbers a, b such that a*m + b*n = gcd(m, n) using the Extended Euclidean algorithm. """ p, q, r, s = 1, 0, 0, 1 while n != 0: k = m // n m, n, p, q, r, s = n, m - k*n, q, p - k*q, s, r - k*s return (p, r)
5,356,992
def build_pot(): """Build the current 'gettext' language translation files and updates the *.po files for the supported languages. """ check_sphinx_build() command = ' '.join([SPHINX_BUILD, '-M', 'gettext', '"' + SPHINX_SOURCE_DIR + '"', '"' + SPHINX_LOCALE_DIR + '"', '-c', '.', '-D', 'language={0}'.format(DEFAULT_LANGUAGE)]) print('\nExtracting POT files with command: {0}\n'.format(command)) exit_code = subprocess.call(command, timeout=SPHINX_BUILD_TIMEOUT) if exit_code: exit_with_code(exit_code) check_sphinx_intl() print('\nUpdating PO files for other languages.') for lang in LANGUAGE_LIST.keys(): if lang != DEFAULT_LANGUAGE: print("\n\nUpdating the '{0}' ({1}) files.\n".format(lang, LANGUAGE_LIST[lang])) update_po(lang)
5,356,993
def string_quote(s): """ TODO(ssx): quick way to quote string """ return '"' + s + '"'
5,356,994
def nextjs_build(name, **kwargs): """Wrapper macro around nextjs cli Args: name: name **kwargs: **kwargs """ args = kwargs.pop("args", []) args = [ "build", # --outDir is parsed out by custom tool entry point & not forwarded to the underlying tool cli "--outDir", "%s/$(@D)" % path_to_workspace_root(), ] + args env = kwargs.pop("env", {}) env["NEXT_TELEMETRY_DISABLED"] = "1" # https://nextjs.org/telemetry npm_package_bin( name = name, args = args, env = env, chdir = native.package_name(), output_dir = True, # Use a custom tool binary which uses a customized entry point # tool = ":%s_entry_bin" % name, tool = "//bazel/rules:nextjs-bazel-entry", **kwargs )
5,356,995
def arcball_constrain_to_axis(point, axis): """Return sphere point perpendicular to axis.""" v = np.array(point, dtype=np.float64, copy=True) a = np.array(axis, dtype=np.float64, copy=True) v -= a * np.dot(a, v) # on plane n = vector_norm(v) if n > _EPS: if v[2] < 0.0: v *= -1.0 v /= n return v if a[2] == 1.0: return np.array([1, 0, 0], dtype=np.float64) return unit_vector([-a[1], a[0], 0])
5,356,996
def text( node: "RenderTreeNode", renderer_funcs: Mapping[str, RendererFunc], options: Mapping[str, Any], env: MutableMapping, ) -> str: """Process a text token. Text should always be a child of an inline token. An inline token should always be enclosed by a heading or a paragraph. """ text = node.content if is_text_inside_autolink(node): return text # Escape backslash to prevent it from making unintended escapes. # This escape has to be first, else we start multiplying backslashes. text = text.replace("\\", "\\\\") text = escape_asterisk_emphasis(text) # Escape emphasis/strong marker. text = escape_underscore_emphasis(text) # Escape emphasis/strong marker. text = text.replace("[", "\\[") # Escape link label enclosure text = text.replace("]", "\\]") # Escape link label enclosure text = text.replace("<", "\\<") # Escape URI enclosure text = text.replace("`", "\\`") # Escape code span marker # Escape "&" if it starts a sequence that can be interpreted as # a character reference. for char_refs_found, char_ref in enumerate(RE_CHAR_REFERENCE.finditer(text)): start = char_ref.start() + char_refs_found text = text[:start] + "\\" + text[start:] # The parser can give us consecutive newlines which can break # the markdown structure. Replace two or more consecutive newlines # with newline character's decimal reference. text = text.replace("\n\n", "&#10;&#10;") # If the last character is a "!" and the token next up is a link, we # have to escape the "!" or else the link will be interpreted as image. next_sibling = node.next_sibling if text.endswith("!") and next_sibling and next_sibling.type == "link": text = text[:-1] + "\\!" return text
5,356,997
def patch_importlib_util_find_spec(name,package=None): """ function used to temporarily redirect search for loaders to hickle_loader directory in test directory for testing loading of new loaders """ return find_spec("hickle.tests." + name.replace('.','_',1),package)
5,356,998
def format_msg_controller(data): """Prints a formatted message from a controller :param data: The bytes from the controller message :type data: bytes """ return format_message(data, 13, "Controller")
5,356,999