content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def tau_for_x(x, beta): """Rescales tau axis to x -1 ... 1""" if x.min() < -1 or x.max() > 1: raise ValueError("domain of x") return .5 * beta * (x + 1)
1d7b868dfadb65e6f98654276763fd4bff2c20ff
3,653,700
from typing import Optional from typing import Dict def _generate_element(name: str, text: Optional[str] = None, attributes: Optional[Dict] = None) -> etree.Element: """ generate an ElementTree.Element object :param name: namespace+tag_name of the element :param text: Text of the element. Default is None :param attributes: Attributes of the elements in form of a dict {"attribute_name": "attribute_content"} :return: ElementTree.Element object """ et_element = etree.Element(name) if text: et_element.text = text if attributes: for key, value in attributes.items(): et_element.set(key, value) return et_element
d7d8f7d174f207d64993aca54803af6600c3ddb6
3,653,701
def CoA_Cropland_URL_helper(*, build_url, config, **_): """ This helper function uses the "build_url" input from flowbyactivity.py, which is a base url for data imports that requires parts of the url text string to be replaced with info specific to the data year. This function does not parse the data, only modifies the urls from which data is obtained. :param build_url: string, base url :param config: dictionary, items in FBA method yaml :return: list, urls to call, concat, parse, format into Flow-By-Activity format """ # initiate url list for coa cropland data urls = [] # call on state acronyms from common.py (and remove entry for DC) state_abbrevs = abbrev_us_state state_abbrevs = {k: v for (k, v) in state_abbrevs.items() if k != "DC"} # replace "__aggLevel__" in build_url to create three urls for x in config['agg_levels']: for y in config['sector_levels']: # at national level, remove the text string calling for # state acronyms if x == 'NATIONAL': url = build_url url = url.replace("__aggLevel__", x) url = url.replace("__secLevel__", y) url = url.replace("&state_alpha=__stateAlpha__", "") if y == "ECONOMICS": url = url.replace( "AREA%20HARVESTED&statisticcat_desc=AREA%20IN%20" "PRODUCTION&statisticcat_desc=TOTAL&statisticcat_desc=" "AREA%20BEARING%20%26%20NON-BEARING", "AREA&statisticcat_desc=AREA%20OPERATED") else: url = url.replace("&commodity_desc=AG%20LAND&" "commodity_desc=FARM%20OPERATIONS", "") urls.append(url) else: # substitute in state acronyms for state and county url calls for z in state_abbrevs: url = build_url url = url.replace("__aggLevel__", x) url = url.replace("__secLevel__", y) url = url.replace("__stateAlpha__", z) if y == "ECONOMICS": url = url.replace( "AREA%20HARVESTED&statisticcat_desc=AREA%20IN%20" "PRODUCTION&statisticcat_desc=TOTAL&" "statisticcat_desc=AREA%20BEARING%20%26%20NON-BEARING", "AREA&statisticcat_desc=AREA%20OPERATED") else: url = url.replace("&commodity_desc=AG%20LAND&commodity_" "desc=FARM%20OPERATIONS", "") urls.append(url) return urls
5cd08b8c4198428e45267f33d35d98b63df4fd17
3,653,702
def _centered_bias(logits_dimension, head_name=None): """Returns `logits`, optionally with centered bias applied. Args: logits_dimension: Last dimension of `logits`. Must be >= 1. head_name: Optional name of the head. Returns: Centered bias `Variable`. Raises: ValueError: if `logits_dimension` is invalid. """ if (logits_dimension is None) or (logits_dimension < 1): raise ValueError("Invalid logits_dimension %s." % logits_dimension) centered_bias = variable_scope.get_variable( name="centered_bias_weight", shape=(logits_dimension,), initializer=init_ops.zeros_initializer(), trainable=True) for dim in range(logits_dimension): if head_name: summary.scalar("centered_bias/bias_%d/%s" % (dim, head_name), centered_bias[dim]) else: summary.scalar("centered_bias/bias_%d" % dim, centered_bias[dim]) return centered_bias
868fc2681ee1177932b77bdfe9ce9eefc3c5fde1
3,653,703
from typing import Union from typing import List def get_columns(dataframe: pd.DataFrame, columns: Union[str, List[str]]) -> Union[pd.Series, pd.DataFrame]: """Get the column names, and can rename according to list""" return dataframe[list(columns)].copy(True)
e624233a3aca3f71f203bf7acca700722819b237
3,653,704
import pandas import math def get_vote_activity(session): """Create a plot showing the inline usage statistics.""" creation_date = func.date_trunc("day", Vote.created_at).label("creation_date") votes = ( session.query(creation_date, func.count(Vote.id).label("count")) .group_by(creation_date) .order_by(creation_date) .all() ) total_votes = [("Total votes", q[0], q[1]) for q in votes] # Grid style plt.style.use("seaborn-whitegrid") # Combine the results in a single dataframe and name the columns dataframe = pandas.DataFrame(total_votes, columns=["type", "date", "votes"]) months = mdates.MonthLocator() # every month months_fmt = mdates.DateFormatter("%Y-%m") max_value = max([vote[2] for vote in total_votes]) magnitude = get_magnitude(max_value) # Plot each result set fig, ax = plt.subplots(figsize=(30, 15), dpi=120) for key, group in dataframe.groupby(["type"]): ax = group.plot(ax=ax, kind="bar", x="date", y="votes", label=key) ax.xaxis.set_major_locator(months) ax.xaxis.set_major_formatter(months_fmt) ax.yaxis.set_ticks(np.arange(0, max_value, math.pow(10, magnitude - 1))) image = image_from_figure(fig) image.name = "vote_statistics.png" return image
9b59ad083147d7e21d8d32e730b235a23b187c0f
3,653,705
def viz_graph(obj): """ Generate the visulization of the graph in the JupyterLab Arguments ------- obj: list a list of Python object that defines the nodes Returns ----- nx.DiGraph """ G = nx.DiGraph() # instantiate objects for o in obj: for i in o['inputs']: G.add_edge(i, o['id']) return G
a826438b3e207f88a7bddddcd4fc02a4ad9c753d
3,653,706
def zrand_convolve(labelgrid, neighbors='edges'): """ Calculates the avg and std z-Rand index using kernel over `labelgrid` Kernel is determined by `neighbors`, which can include all entries with touching edges (i.e., 4 neighbors) or corners (i.e., 8 neighbors). Parameters ---------- grid : (S, K, N) array_like Array containing cluster labels for each `N` samples, where `S` is mu and `K` is K. neighbors : str, optional How many neighbors to consider when calculating Z-rand kernel. Must be in ['edges', 'corners']. Default: 'edges' Returns ------- zrand_avg : (S, K) np.ndarray Array containing average of the z-Rand index calculated using provided neighbor kernel zrand_std : (S, K) np.ndarray Array containing standard deviation of the z-Rand index """ inds = cartesian([range(labelgrid.shape[0]), range(labelgrid.shape[1])]) zrand = np.empty(shape=labelgrid.shape[:-1] + (2,)) for x, y in inds: ninds = get_neighbors(x, y, neighbors=neighbors, shape=labelgrid.shape) zrand[x, y] = zrand_partitions(labelgrid[ninds].T) return zrand[..., 0], zrand[..., 1]
4b3950239886cb7e41fb2a7105c2413234dcdb30
3,653,707
def msg_receiver(): """ 消息已收界面 :return: """ return render_template('sysadmin/sysmsg/sys_msg_received.html', **locals())
0902f9eb4ad75802d7f858f4474c5e587082403f
3,653,708
from datetime import datetime def abs_timedelta(delta): """Returns an "absolute" value for a timedelta, always representing a time distance.""" if delta.days < 0: now = datetime.datetime.now() return now - (now + delta) return delta
81018ea9c54585a8c24e52cc48c21fcb2d73e9b3
3,653,709
import subprocess def is_word_file(file): """ Check to see if the given file is a Word file. @param file (str) The path of the file to check. @return (bool) True if the file is a Word file, False if not. """ typ = subprocess.check_output(["file", file]) return ((b"Microsoft Office Word" in typ) or (b"Word 2007+" in typ) or (b"Microsoft OOXML" in typ))
cb297e9cf8ed709e9802f1d3d48bc7d1271eac26
3,653,710
def make_new_get_user_response(row): """ Returns an object containing only what needs to be sent back to the user. """ return { 'userName': row['userName'], 'categories': row['categories'], 'imageName': row['imageName'], 'refToImage': row['refToImage'], 'imgDictByTag': row['imgDictByTag'], 'canView': row['canView'] }
e13d8d297bd1401752ce07d93a68e765ed1113e8
3,653,711
def upload_bus_data(number: int) -> dict: """Загружает данные по матерям из базы""" logger.debug("Стартует upload_bus_data") try: query = ProfilsCows.select().where( ProfilsCows.number == number ) if query.exists(): bus = ProfilsCows.select().where( ProfilsCows.number == number ).get() res = { "BM1818_mutter": bus.BM1818, "BM1824_mutter": bus.BM1824, "BM2113_mutter": bus.BM2113, "CSRM60_mutter": bus.CSRM60, "CSSM66_mutter": bus.CSSM66, "CYP21_mutter": bus.CYP21, "ETH10_mutter": bus.ETH10, "ETH225_mutter": bus.ETH225, "ETH3_mutter": bus.ETH3, "ILSTS6_mutter": bus.ILSTS6, "INRA023_mutter": bus.INRA023, "RM067_mutter": bus.RM067, "SPS115_mutter": bus.SPS115, "TGLA122_mutter": bus.TGLA122, "TGLA126_mutter": bus.TGLA126, "TGLA227_mutter": bus.TGLA227, "TGLA53_mutter": bus.TGLA53, "MGTG4B_mutter": bus.MGTG4B, "SPS113_mutter": bus.SPS113, } logger.debug("Конец upload_bus_data") return res else: res = { "BM1818_mutter": '-', "BM1824_mutter": '-', "BM2113_mutter": '-', "CSRM60_mutter": '-', "CSSM66_mutter": '-', "CYP21_mutter": '-', "ETH10_mutter": '-', "ETH225_mutter": '-', "ETH3_mutter": '-', "ILSTS6_mutter": '-', "INRA023_mutter": '-', "RM067_mutter": '-', "SPS115_mutter": '-', "TGLA122_mutter": '-', "TGLA126_mutter": '-', "TGLA227_mutter": '-', "TGLA53_mutter": '-', "MGTG4B_mutter": '-', "SPS113_mutter": '-', } logger.debug("Конец upload_bus_data") return res except Exception as e: logger.error(e) name = '\njob_db.py\nupload_bus_data\n' QMessageBox.critical( None, 'Ошибка ввода', f'{answer_error()}{name}Подробности:\n {e}' ) logger.debug("Конец upload_bus_data")
fa1697cb1e67410d7c8da74f299fe2ed38990257
3,653,712
def is_feature_enabled(): """ Helper to check Site Configuration for ENABLE_COURSE_ACCESS_GROUPS. :return: bool """ is_enabled = bool(configuration_helpers.get_value('ENABLE_COURSE_ACCESS_GROUPS', default=False)) if is_enabled: # Keep the line below in sync with `util.organizations_helpers.organizations_enabled` if not settings.FEATURES.get('ORGANIZATIONS_APP', False): raise ConfigurationError( 'The Course Access Groups feature is enabled but the Oragnizations App is not. ' 'Please enable the feature flag `ORGANIZATIONS_APP` to fix this exception.' ) return is_enabled
57f0b94409d9332f8846d64a6a30518b6dcc8173
3,653,713
def solve_disp_eq(betbn, betbt, bet, Znak, c, It, Ia, nb, var): """ Решение дисперсионного уравнения. Znak = -1 при преломлении Znak = 1 при отражении """ betb = sqrt(betbn ** 2. + betbt ** 2.) gamb = 1. / sqrt(1. - betb ** 2.) d = c * It / Ia Ab = 1. + (nb ** 2. - 1.) * gamb ** 2. * (1. - betbn ** 2.) Bb = d ** 2. * (1. - bet ** 2. - (nb ** 2. - 1.) * (gamb * (bet - betbn)) ** 2.) Cb = (nb ** 2. - 1.) * gamb ** 2. * d * betbt * (2. - 2. * bet * betbn - d * betbt * (1. - bet ** 2.)) Qb = Ab - Bb - Cb CHb = bet + (nb ** 2. - 1.) * gamb ** 2. * (bet - betbn) * (1. - betbt * d) ZNb = 1. - bet ** 2. - (nb ** 2. - 1.) * (gamb * (bet - betbn)) ** 2. kbna = Ia * (CHb + Znak * sqrt(Qb)) / (c * ZNb) # норм.проекция волн.вектора kbt = It # Тангенц.проекция волн.вектора iQb = arctan(abs(kbt / kbna)) wi = kbna * bet * c + Ia # Частота прел. волны ci = wi * cos(iQb) / abs(kbna) # Скорость света в среде if var < 0: iQb = -iQb # k = kbna / cos(arctan(abs(kbt / kbna))) # # ui=betb*c # uit = betbt * c # uin = betbn * c # V = bet * c # Ai = -1 / pow(c, 2.) - (pow(nb, 2.) - 1.) * pow(1. - uin / V, 2.) / pow(c, 2.) / (1. - pow(betb, 2.)) # Bi = -2 * (pow(nb, 2.) - 1.) * (-kbt * uit + Ia * uin / V) * (1. - uin / V) / (pow(c, 2.) * (1. - pow(betb, 2.))) # Ci = pow(k, 2.) - (pow(nb, 2.) - 1.) * pow(-kbt * uit + Ia * uin / V, 2.) / pow(c, 2.) / (1. - pow(betb, 2.)) # '''print "Ai = %s"%Ai # print "Bi = %s"%Bi # print "Ci = %s"%Ci''' # # wi=(-Bi-sqrt(pow(Bi,2)-4*Ai*Ci))/(2*Ai) # dispeq = Ai * wi * wi + Bi * wi + Ci # '''print "dispeq = %s"%(dispeq,) # print "wi= %s"%wi''' return (kbna, kbt, iQb, wi, ci)
b67b41cdccf37a14fda103b6f05263c7cbb4514e
3,653,714
import numpy def phistogram(view, a, bins=10, rng=None, normed=False): """Compute the histogram of a remote array a. Parameters ---------- view IPython DirectView instance a : str String name of the remote array bins : int Number of histogram bins rng : (float, float) Tuple of min, max of the range to histogram normed : boolean Should the histogram counts be normalized to 1 """ nengines = len(view.targets) # view.push(dict(bins=bins, rng=rng)) with view.sync_imports(): rets = view.apply_sync(lambda a, b, rng: numpy.histogram(a,b,rng), Reference(a), bins, rng) hists = [ r[0] for r in rets ] lower_edges = [ r[1] for r in rets ] # view.execute('hist, lower_edges = numpy.histogram(%s, bins, rng)' % a) lower_edges = view.pull('lower_edges', targets=0) hist_array = numpy.array(hists).reshape(nengines, -1) # hist_array.shape = (nengines,-1) total_hist = numpy.sum(hist_array, 0) if normed: total_hist = total_hist/numpy.sum(total_hist,dtype=float) return total_hist, lower_edges
3c4633891b495a5cad867c945a8f8cc1c6b3c14f
3,653,715
from typing import Iterable from typing import Iterator def windowed(it: Iterable[_T], size: int) -> Iterator[tuple[_T, ...]]: """Retrieve overlapped windows from iterable. >>> [*windowed(range(5), 3)] [(0, 1, 2), (1, 2, 3), (2, 3, 4)] """ return zip(*(islice(it_, start, None) for start, it_ in enumerate(tee(it, size))))
6e3b29b67f9eb323d00065fa58ccd916c7c49640
3,653,716
def minmax(data): """Solution to exercise R-1.3. Takes a sequence of one or more numbers, and returns the smallest and largest numbers, in the form of a tuple of length two. Do not use the built-in functions min or max in implementing the solution. """ min_idx = 0 max_idx = 0 for idx, num in enumerate(data): if num > data[max_idx]: max_idx = idx if num < data[min_idx]: min_idx = idx return (data[min_idx], data[max_idx])
9715bef69c120f6d1afb933bd9030240f556eb20
3,653,717
def sample_discreate(prob, n_samples): """根据类先验分布对标签值进行采样 M = sample_discreate(prob, n_samples) Input: prob: 类先验分布 shape=(n_classes,) n_samples: 需要采样的数量 shape = (n_samples,) Output: M: 采样得到的样本类别 shape = (n_samples,) 例子: sample_discreate([0.8,0.2],n_samples) 从类别[0,1]中采样产生n_samples个样本 其中采样得到0的概率为0.8,得到1的概率为0.2. """ np.random.seed(1) # 使每一次生成的随机数一样 n = prob.size # 类别的数量 R = np.random.rand(n_samples) # 生成服从均匀分布的随机数 M = np.zeros(n_samples) # 初始化最终结果 cumprob = np.cumsum(prob) # 累积概率分布 if n < n_samples: # 如果采样的样本数量大于类别数量 for i in range(n-1): M = M + np.array(R > cumprob[i]) else: # 如果采样的样本数量小于类别数量 cumprob2 = cumprob[:-1] for i in range(n_samples): M[i] = np.sum(R[i] > cumprob2) return M
34c19c2dcbad652bdae8f2c829f42934c2176e84
3,653,718
from xml.dom.minidom import parseString # tools for handling XML in python def get_catalyst_pmids(first, middle, last, email, affiliation=None): """ Given an author's identifiers and affiliation information, optional lists of pmids, call the catalyst service to retrieve PMIDS for the author and return a list of PMIDS :param first: author first name :param middle: author middle name :param last: author last name :param email: author email(s) as a list :param affiliation: author affiliation as a list :return: list of pmids identified by the catalyst service that have a high probability of being written by the author """ result = get_catalyst_pmids_xml(first, middle, last, email, affiliation) dom = parseString(result) # create a document Object Model (DOM) from the Harvard Catalyst result return [node.childNodes[0].data for node in dom.getElementsByTagName('PMID')]
d0cb5560ec8e6f80627b40c4623683732c84dc7c
3,653,719
def validar(request, op): """ Método que verifica consistência a partir de um log """ lista_datas = [] # arquivo de log para consistência with open(settings.BASE_DIR + "/log.txt", "r+") as fileobj: for line in fileobj: #pega cada linha do arquivo if "inicio" in line: lista_datas.append(line[:8]) #insere na lista de datas com problema elif "fim" in line: lista_datas.remove(line[:8]) #remove da lista de datas com problemas if(len(lista_datas) != 0): #mensagem para ser printada na tela messages.error(request, " operacões inconsistentes refazer dia ou dias -- " + str(lista_datas)) else: # mensagem para ser printada na tela messages.success(request, " operacões consistentes") if(op == 2): #tratando retorno da func do botao executar/gerar if(len(lista_datas) != 0): return False return True
7a86d9c15ae632912039c26e74eb677f2ffd7257
3,653,720
from typing import List from typing import Dict def upload_categories_to_fyle(workspace_id): """ Upload categories to Fyle """ try: fyle_credentials: FyleCredential = FyleCredential.objects.get(workspace_id=workspace_id) xero_credentials: XeroCredentials = XeroCredentials.objects.get(workspace_id=workspace_id) fyle_connection = FyleConnector( refresh_token=fyle_credentials.refresh_token ) platform = PlatformConnector(fyle_credentials) xero_connection = XeroConnector( credentials_object=xero_credentials, workspace_id=workspace_id ) platform.categories.sync() xero_connection.sync_accounts() xero_attributes = DestinationAttribute.objects.filter(attribute_type='ACCOUNT', workspace_id=workspace_id) xero_attributes = remove_duplicates(xero_attributes) fyle_payload: List[Dict] = create_fyle_categories_payload(xero_attributes, workspace_id) if fyle_payload: fyle_connection.connection.Categories.post(fyle_payload) platform.categories.sync() return xero_attributes except XeroCredentials.DoesNotExist: logger.error( 'Xero Credentials not found for workspace_id %s', workspace_id, )
0efcdc205a3aaa33acd88a231984ab9407d994ac
3,653,721
def georegister_px_df(df, im_fname=None, affine_obj=None, crs=None, geom_col='geometry', precision=None): """Convert a dataframe of geometries in pixel coordinates to a geo CRS. Arguments --------- df : :class:`pandas.DataFrame` A :class:`pandas.DataFrame` with polygons in a column named ``"geometry"``. im_fname : str, optional A filename or :class:`rasterio.DatasetReader` object containing an image that has the same bounds as the pixel coordinates in `df`. If not provided, `affine_obj` and `crs` must both be provided. affine_obj : `list` or :class:`affine.Affine`, optional An affine transformation to apply to `geom` in the form of an ``[a, b, d, e, xoff, yoff]`` list or an :class:`affine.Affine` object. Required if not using `raster_src`. crs : dict, optional The coordinate reference system for the output GeoDataFrame. Required if not providing a raster image to extract the information from. Format should be ``{'init': 'epsgxxxx'}``, replacing xxxx with the EPSG code. geom_col : str, optional The column containing geometry in `df`. If not provided, defaults to ``"geometry"``. precision : int, optional The decimal precision for output geometries. If not provided, the vertex locations won't be rounded. """ if im_fname is not None: affine_obj = rasterio.open(im_fname).transform crs = rasterio.open(im_fname).crs else: if not affine_obj or not crs: raise ValueError( 'If an image path is not provided, ' + 'affine_obj and crs must be.') tmp_df = affine_transform_gdf(df, affine_obj, geom_col=geom_col, precision=precision) return gpd.GeoDataFrame(tmp_df, crs=crs)
e310fee04d214186f60965e68fb2b896b8ad0004
3,653,722
def load_ui_type(ui_file): """ Pyside "load_ui_type" command like PyQt4 has one, so we have to convert the ui file to py code in-memory first and then execute it in a special frame to retrieve the form_class. """ parsed = xml.parse(ui_file) widget_class = parsed.find('widget').get('class') form_class = parsed.find('class').text with open(ui_file, 'r') as f: o = StringIO() frame = {} uic.compileUi(f, o, indent=0) pyc = compile(o.getvalue(), '<string>', 'exec') exec pyc in frame # Fetch the base_class and form class based on their type # in the xml from designer form_class = frame['Ui_%s' % form_class] base_class = eval('QtWidgets.%s' % widget_class) return base_class, form_class
1f9bfc05d52fd8f25d63104c93f675cc8e978501
3,653,723
def how_did_I_do(MLP, df, samples, expected): """Simple report of expected inputs versus actual outputs.""" predictions = MLP.predict(df[samples].to_list()) _df = pd.DataFrame({"Expected": df[expected], "Predicted": predictions}) _df["Correct"] = _df["Expected"] == _df["Predicted"] print(f'The network got {sum(_df["Correct"])} out of {len(_df)} correct.') return _df
5fbebeac01dad933c20b3faf3f8682ae59d173ba
3,653,724
def all_bootstrap_os(): """Return a list of all the OS that can be used to bootstrap Spack""" return list(data()['images'])
b7a58aabe17ee28ed783a9d43d1d8db5d0b85db3
3,653,725
def coords_to_volume(coords: np.ndarray, v_size: int, noise_treatment: bool = False) -> np.ndarray: """Converts coordinates to binary voxels.""" # Input is centered on [0,0,0]. return weights_to_volume(coords=coords, weights=1, v_size=v_size, noise_treatment=noise_treatment)
62e2ba5549faff51e4da68f6bc9521ff2f9ce9cb
3,653,726
def logo(symbol, external=False, vprint=False): """:return: Google APIs link to the logo for the requested ticker. :param symbol: The ticker or symbol of the stock you would like to request. :type symbol: string, required """ instance = iexCommon('stock', symbol, 'logo', external=external) return instance.execute()
320755632f81686ceb35a75b44c5176893ea37e2
3,653,727
def get_dependency_node(element): """ Returns a Maya MFnDependencyNode from the given element :param element: Maya node to return a dependency node class object :type element: string """ # adds the elements into an maya selection list m_selectin_list = OpenMaya.MSelectionList() m_selectin_list.add(element) # creates an MObject m_object = OpenMaya.MObject() # gets the MObject from the list m_selectin_list.getDependNode(0, m_object) return OpenMaya.MFnDependencyNode(m_object)
d573b14cf7ba54fd07f135d37c90cfe75e74992a
3,653,728
def create_lineal_data(slope=1, bias=0, spread=0.25, data_size=50): """ Helper function to create lineal data. :param slope: slope of the lineal function. :param bias: bias of the lineal function. :param spread: spread of the normal distribution. :param data_size: number of samples to generate. :return x, y: data and labels """ x = np.linspace(0, 1, data_size) y = x * slope + bias + np.random.normal(scale=spread, size=x.shape) return x, y
fa735416a1f23a5aa29f66e353d187a5a896df7a
3,653,729
def parse_station_resp(fid): """ Gather information from a single station IRIS response file *fid*. Return the information as a :class:`RespMap`. """ resp_map = RespMap() # sanity check initialization network = None stn = None location = None # skip initial header comment block skip_block_header_comments(fid) while True: block_header, block, eof = parse_block(fid) # sanity check (same network, station, and location across recorded blocks) network = check(block_header, 'Network', network) stn = check(block_header, 'Station', stn) location = check(block_header, 'Location', location) # store block information interval = DateTimeInterval.closed_open(block_header['Start_date'], block_header['End_date']) resp_map.setdefault(interval, {})[block_header['Channel']] = block if eof: break resp_map.network = network resp_map.stn = stn resp_map.location = location return resp_map
9d61b2c033008fc594b230aad83378a442cb748b
3,653,730
def plot_pol(image, figsize=(8,8), print_stats=True, scaled=True, evpa_ticks=True): """Mimics the plot_pol.py script in ipole/scripts""" fig, ax = plt.subplots(2, 2, figsize=figsize) # Total intensity plot_I(ax[0,0], image, xlabel=False) # Quiver on intensity if evpa_ticks: plot_evpa_ticks(ax[0,0], image, n_evpa=30) # Linear polarization fraction plot_lpfrac(ax[0,1], image, xlabel=False, ylabel=False) # evpa plot_evpa_rainbow(ax[1,0], image) # circular polarization fraction plot_cpfrac(ax[1,1], image, ylabel=False) if print_stats: # print image-average quantities to command line print("Flux [Jy]: {0:g} ({1:g} unpol)".format(image.flux(), image.flux_unpol())) print("I,Q,U,V [Jy]: {0:g} {1:g} {2:g} {3:g}".format(image.Itot(), image.Qtot(), image.Utot(), image.Vtot())) print("LP [%]: {0:g}".format(100.*image.lpfrac_int())) print("CP [%]: {0:g}".format(100.*image.cpfrac_int())) print("EVPA [deg]: {0:g}".format(image.evpa_int())) return fig
dc3741703435bb95b7ea511460d9feda39ea11f3
3,653,731
def dbrg(ds, T, r): """ Segmentation by density-based region growing (DBRG). Parameters ---------- ds : np.ndarray The mask image. T : float Initial mask threshold. r : int Density connectivity search radius. """ M = _generate_init_mask(ds, T) D = _density_distribution(len(ds), M, r) S = _generate_seeds(D) # make sure at least one seed exists assert(S.any()) # unlabeled R = np.zeros_like(M, dtype=np.uint32) V = np.full_like(M, np.NINF, dtype=np.float32) logger.debug("initial labeling by density") # label by density map for i, d in enumerate(D): R[(d > V) & S] = i+1 V[(d > V) & S] = d[(d > V) & S] logger.debug("density conncetivity") # label by density connectivity n, m, l = M.shape v = np.empty(len(D)+1, dtype=np.float32) ps = [] # reset pixel coordinates for z in range(0, n): for y in range(0, m): for x in range(0, l): if R[z, y, x] > 0: continue pu = min(x+1, l-1) pd = max(x-1, 0) pr = min(y+1, m-1) pl = max(y-1, 0) pt = min(z+1, n-1) pb = max(z-1, 0) v.fill(0) for zz in range(pb, pt+1): for yy in range(pl, pr+1): for xx in range(pd, pu+1): if ((xx-x)**2 + (yy-y)**2 + (zz-z)*2 <= r*r): v[R[zz, yy, xx]] += 1 R[z, y, x] = v.argmax() if R[z, y, x] == 0: ps.append((z, y, x)) logger.debug("nearest neighbor") # label by nearest neighbor psv = [] # filled result for z, y, x in ps: r = 1 while True: pu = min(x+1, l-1) pd = max(x-1, 0) pr = min(y+1, m-1) pl = max(y-1, 0) pt = min(z+1, n-1) pb = max(z-1, 0) v = [] for zz in range(pb, pt+1): for yy in range(pl, pr+1): for xx in range(pd, pu+1): v.append((R[zz, yy, xx], (xx-x)**2 + (yy-y)**2 + (zz-z)**2)) if len(v) == 0: r += 1 else: v.sort(key=lambda p: p[1]) psv.append(v[0][0]) break for (z, y, x), v in zip(ps, psv): R[z, y, x] = v # make sure each position is assigned a mask value assert(np.all(R != 0)) return R
8b83c1335080ebda6087489d76db7bbbcc5d3b29
3,653,732
def _ensure_dtype_type(value, dtype: DtypeObj): """ Ensure that the given value is an instance of the given dtype. e.g. if out dtype is np.complex64_, we should have an instance of that as opposed to a python complex object. Parameters ---------- value : object dtype : np.dtype or ExtensionDtype Returns ------- object """ # Start with exceptions in which we do _not_ cast to numpy types if is_extension_array_dtype(dtype): return value elif dtype == np.object_: return value elif isna(value): # e.g. keep np.nan rather than try to cast to np.float32(np.nan) return value return dtype.type(value)
36de4b993e2da0bacf3228d46e13332f89346210
3,653,733
from typing import List def format_batch_request_last_fm(listens: List[Listen]) -> Request: """ Format a POST request to scrobble the given listens to Last.fm. """ assert len(listens) <= 50, 'Last.fm allows at most 50 scrobbles per batch.' params = { 'method': 'track.scrobble', 'sk': LAST_FM_SESSION_KEY, } for i, listen in enumerate(listens): params.update(listen.format_lastfm_scrobble(i)) return format_signed_request(http_method='POST', data=params)
8f7b36b6880ecd91e19282b80975cccc999014b6
3,653,734
import os import logging import time def train_eval( root_dir, gpu=0, env_load_fn=None, model_ids=None, reload_interval=None, eval_env_mode='headless', num_iterations=1000000, conv_1d_layer_params=None, conv_2d_layer_params=None, encoder_fc_layers=[256], actor_fc_layers=[256, 256], critic_obs_fc_layers=None, critic_action_fc_layers=None, critic_joint_fc_layers=[256, 256], # Params for collect initial_collect_steps=10000, collect_steps_per_iteration=1, num_parallel_environments=1, replay_buffer_capacity=1000000, # Params for target update target_update_tau=0.005, target_update_period=1, # Params for train train_steps_per_iteration=1, batch_size=256, actor_learning_rate=3e-4, critic_learning_rate=3e-4, alpha_learning_rate=3e-4, td_errors_loss_fn=tf.compat.v1.losses.mean_squared_error, gamma=0.99, reward_scale_factor=1.0, gradient_clipping=None, # Params for eval num_eval_episodes=30, eval_interval=10000, eval_only=False, eval_deterministic=False, num_parallel_environments_eval=1, model_ids_eval=None, # Params for summaries and logging train_checkpoint_interval=10000, policy_checkpoint_interval=10000, rb_checkpoint_interval=50000, log_interval=100, summary_interval=1000, summaries_flush_secs=10, debug_summaries=False, summarize_grads_and_vars=False, eval_metrics_callback=None): """A simple train and eval for SAC.""" root_dir = os.path.expanduser(root_dir) train_dir = os.path.join(root_dir, 'train') eval_dir = os.path.join(root_dir, 'eval') train_summary_writer = tf.compat.v2.summary.create_file_writer( train_dir, flush_millis=summaries_flush_secs * 1000) train_summary_writer.set_as_default() eval_summary_writer = tf.compat.v2.summary.create_file_writer( eval_dir, flush_millis=summaries_flush_secs * 1000) eval_metrics = [ batched_py_metric.BatchedPyMetric( py_metrics.AverageReturnMetric, metric_args={'buffer_size': num_eval_episodes}, batch_size=num_parallel_environments_eval), batched_py_metric.BatchedPyMetric( py_metrics.AverageEpisodeLengthMetric, metric_args={'buffer_size': num_eval_episodes}, batch_size=num_parallel_environments_eval), ] eval_summary_flush_op = eval_summary_writer.flush() global_step = tf.compat.v1.train.get_or_create_global_step() with tf.compat.v2.summary.record_if( lambda: tf.math.equal(global_step % summary_interval, 0)): if reload_interval is None: if model_ids is None: model_ids = [None] * num_parallel_environments else: assert len(model_ids) == num_parallel_environments, \ 'model ids provided, but length not equal to num_parallel_environments' else: train_model_ids = [model['id'] for model in suite_gibson.get_train_models()] model_ids = np.random.choice(train_model_ids, num_parallel_environments).tolist() if model_ids_eval is None: model_ids_eval = [None] * num_parallel_environments_eval else: assert len(model_ids_eval) == num_parallel_environments_eval, \ 'model ids eval provided, but length not equal to num_parallel_environments_eval' tf_py_env = [lambda model_id=model_ids[i]: env_load_fn(model_id, 'headless', gpu) for i in range(num_parallel_environments)] tf_env = tf_py_environment.TFPyEnvironment(parallel_py_environment.ParallelPyEnvironment(tf_py_env)) if eval_env_mode == 'gui': assert num_parallel_environments_eval == 1, 'only one GUI env is allowed' eval_py_env = [lambda model_id=model_ids_eval[i]: env_load_fn(model_id, eval_env_mode, gpu) for i in range(num_parallel_environments_eval)] eval_py_env = parallel_py_environment.ParallelPyEnvironment(eval_py_env) # Get the data specs from the environment time_step_spec = tf_env.time_step_spec() observation_spec = time_step_spec.observation action_spec = tf_env.action_spec() print('observation_spec', observation_spec) print('action_spec', action_spec) glorot_uniform_initializer = tf.compat.v1.keras.initializers.glorot_uniform() preprocessing_layers = {} if 'rgb' in observation_spec: preprocessing_layers['rgb'] = tf.keras.Sequential(mlp_layers( conv_1d_layer_params=None, conv_2d_layer_params=conv_2d_layer_params, fc_layer_params=encoder_fc_layers, kernel_initializer=glorot_uniform_initializer, )) if 'depth' in observation_spec: preprocessing_layers['depth'] = tf.keras.Sequential(mlp_layers( conv_1d_layer_params=None, conv_2d_layer_params=conv_2d_layer_params, fc_layer_params=encoder_fc_layers, kernel_initializer=glorot_uniform_initializer, )) if 'sensor' in observation_spec: preprocessing_layers['sensor'] = tf.keras.Sequential(mlp_layers( conv_1d_layer_params=None, conv_2d_layer_params=None, fc_layer_params=encoder_fc_layers, kernel_initializer=glorot_uniform_initializer, )) if len(preprocessing_layers) <= 1: preprocessing_combiner = None else: preprocessing_combiner = tf.keras.layers.Concatenate(axis=-1) actor_net = actor_distribution_network.ActorDistributionNetwork( observation_spec, action_spec, preprocessing_layers=preprocessing_layers, preprocessing_combiner=preprocessing_combiner, fc_layer_params=actor_fc_layers, continuous_projection_net=normal_projection_net, kernel_initializer=glorot_uniform_initializer, ) critic_net = critic_network.CriticNetwork( (observation_spec, action_spec), preprocessing_layers=preprocessing_layers, preprocessing_combiner=preprocessing_combiner, observation_fc_layer_params=critic_obs_fc_layers, action_fc_layer_params=critic_action_fc_layers, joint_fc_layer_params=critic_joint_fc_layers, kernel_initializer=glorot_uniform_initializer, ) tf_agent = sac_agent.SacAgent( time_step_spec, action_spec, actor_network=actor_net, critic_network=critic_net, actor_optimizer=tf.compat.v1.train.AdamOptimizer( learning_rate=actor_learning_rate), critic_optimizer=tf.compat.v1.train.AdamOptimizer( learning_rate=critic_learning_rate), alpha_optimizer=tf.compat.v1.train.AdamOptimizer( learning_rate=alpha_learning_rate), target_update_tau=target_update_tau, target_update_period=target_update_period, td_errors_loss_fn=td_errors_loss_fn, gamma=gamma, reward_scale_factor=reward_scale_factor, gradient_clipping=gradient_clipping, debug_summaries=debug_summaries, summarize_grads_and_vars=summarize_grads_and_vars, train_step_counter=global_step) config = tf.compat.v1.ConfigProto() config.gpu_options.allow_growth = True sess = tf.compat.v1.Session(config=config) # Make the replay buffer. replay_buffer = tf_uniform_replay_buffer.TFUniformReplayBuffer( data_spec=tf_agent.collect_data_spec, batch_size=tf_env.batch_size, max_length=replay_buffer_capacity) replay_observer = [replay_buffer.add_batch] if eval_deterministic: eval_py_policy = py_tf_policy.PyTFPolicy(greedy_policy.GreedyPolicy(tf_agent.policy)) else: eval_py_policy = py_tf_policy.PyTFPolicy(tf_agent.policy) step_metrics = [ tf_metrics.NumberOfEpisodes(), tf_metrics.EnvironmentSteps(), ] train_metrics = step_metrics + [ tf_metrics.AverageReturnMetric( buffer_size=100, batch_size=num_parallel_environments), tf_metrics.AverageEpisodeLengthMetric( buffer_size=100, batch_size=num_parallel_environments), ] collect_policy = tf_agent.collect_policy initial_collect_policy = random_tf_policy.RandomTFPolicy(time_step_spec, action_spec) initial_collect_op = dynamic_step_driver.DynamicStepDriver( tf_env, initial_collect_policy, observers=replay_observer + train_metrics, num_steps=initial_collect_steps * num_parallel_environments).run() collect_op = dynamic_step_driver.DynamicStepDriver( tf_env, collect_policy, observers=replay_observer + train_metrics, num_steps=collect_steps_per_iteration * num_parallel_environments).run() # Prepare replay buffer as dataset with invalid transitions filtered. def _filter_invalid_transition(trajectories, unused_arg1): return ~trajectories.is_boundary()[0] dataset = replay_buffer.as_dataset( num_parallel_calls=5, sample_batch_size=5 * batch_size, num_steps=2).apply(tf.data.experimental.unbatch()).filter( _filter_invalid_transition).batch(batch_size).prefetch(5) dataset_iterator = tf.compat.v1.data.make_initializable_iterator(dataset) trajectories, unused_info = dataset_iterator.get_next() train_op = tf_agent.train(trajectories) summary_ops = [] for train_metric in train_metrics: summary_ops.append(train_metric.tf_summaries( train_step=global_step, step_metrics=step_metrics)) with eval_summary_writer.as_default(), tf.compat.v2.summary.record_if(True): for eval_metric in eval_metrics: eval_metric.tf_summaries( train_step=global_step, step_metrics=step_metrics) train_checkpointer = common.Checkpointer( ckpt_dir=train_dir, agent=tf_agent, global_step=global_step, metrics=metric_utils.MetricsGroup(train_metrics, 'train_metrics')) policy_checkpointer = common.Checkpointer( ckpt_dir=os.path.join(train_dir, 'policy'), policy=tf_agent.policy, global_step=global_step) rb_checkpointer = common.Checkpointer( ckpt_dir=os.path.join(train_dir, 'replay_buffer'), max_to_keep=1, replay_buffer=replay_buffer) init_agent_op = tf_agent.initialize() with sess.as_default(): # Initialize the graph. train_checkpointer.initialize_or_restore(sess) if eval_only: metric_utils.compute_summaries( eval_metrics, eval_py_env, eval_py_policy, num_episodes=num_eval_episodes, global_step=0, callback=eval_metrics_callback, tf_summaries=False, log=True, ) print('EVAL DONE') return # Initialize training. rb_checkpointer.initialize_or_restore(sess) sess.run(dataset_iterator.initializer) common.initialize_uninitialized_variables(sess) sess.run(init_agent_op) sess.run(train_summary_writer.init()) sess.run(eval_summary_writer.init()) global_step_val = sess.run(global_step) if global_step_val == 0: # Initial eval of randomly initialized policy metric_utils.compute_summaries( eval_metrics, eval_py_env, eval_py_policy, num_episodes=num_eval_episodes, global_step=0, callback=eval_metrics_callback, tf_summaries=True, log=True, ) # Run initial collect. logging.info('Global step %d: Running initial collect op.', global_step_val) sess.run(initial_collect_op) # Checkpoint the initial replay buffer contents. rb_checkpointer.save(global_step=global_step_val) logging.info('Finished initial collect.') else: logging.info('Global step %d: Skipping initial collect op.', global_step_val) collect_call = sess.make_callable(collect_op) train_step_call = sess.make_callable([train_op, summary_ops]) global_step_call = sess.make_callable(global_step) timed_at_step = global_step_call() time_acc = 0 steps_per_second_ph = tf.compat.v1.placeholder( tf.float32, shape=(), name='steps_per_sec_ph') steps_per_second_summary = tf.compat.v2.summary.scalar( name='global_steps_per_sec', data=steps_per_second_ph, step=global_step) for _ in range(num_iterations): start_time = time.time() collect_call() for _ in range(train_steps_per_iteration): total_loss, _ = train_step_call() time_acc += time.time() - start_time global_step_val = global_step_call() if global_step_val % log_interval == 0: logging.info('step = %d, loss = %f', global_step_val, total_loss.loss) steps_per_sec = (global_step_val - timed_at_step) / time_acc logging.info('%.3f steps/sec', steps_per_sec) sess.run( steps_per_second_summary, feed_dict={steps_per_second_ph: steps_per_sec}) timed_at_step = global_step_val time_acc = 0 if global_step_val % train_checkpoint_interval == 0: train_checkpointer.save(global_step=global_step_val) if global_step_val % policy_checkpoint_interval == 0: policy_checkpointer.save(global_step=global_step_val) if global_step_val % rb_checkpoint_interval == 0: rb_checkpointer.save(global_step=global_step_val) if global_step_val % eval_interval == 0: metric_utils.compute_summaries( eval_metrics, eval_py_env, eval_py_policy, num_episodes=num_eval_episodes, global_step=global_step_val, callback=eval_metrics_callback, tf_summaries=True, log=True, ) if reload_interval is not None and global_step_val % reload_interval == 0: model_ids = np.random.choice(train_model_ids, num_parallel_environments).tolist() tf_env.reload_model(model_ids) sess.close()
5fe4a1798b8d521af8cfc70b04af177203aa1aa0
3,653,735
import os def file_mtime_ns(file): """Get the ``os.stat(file).st_mtime_ns`` value.""" return os.stat(file).st_mtime_ns
20b384549dae19e35d02b85b20dd62271352f08d
3,653,736
def get_entry_for_file_attachment(item_id, attachment): """ Creates a file entry for an attachment :param item_id: item_id of the attachment :param attachment: attachment dict :return: file entry dict for attachment """ entry = fileResult(get_attachment_name(attachment.name), attachment.content) entry["EntryContext"] = { CONTEXT_UPDATE_EWS_ITEM_FOR_ATTACHMENT + CONTEXT_UPDATE_FILE_ATTACHMENT: parse_attachment_as_dict(item_id, attachment) } return entry
c3d10402da0ada14289a7807ef1a57f97c6a22ba
3,653,737
def check_all_particles_present(partlist, gambit_pdg_codes): """ Checks all particles exist in the particle_database.yaml. """ absent = [] for i in range(len(partlist)): if not partlist[i].pdg() in list(gambit_pdg_codes.values()): absent.append(partlist[i]) absent_by_pdg = [x.pdg() for x in absent] if len(absent) == 0: print("All particles are in the GAMBIT database.") else: print(("\nThe following particles (by PDG code) are missing from the " "particle database: {0}. GUM is now adding them to " "../config/particle_database.yaml.\n").format(absent_by_pdg)) return absent
eab49388d472934a61900d8e972c0f2ef01ae1fb
3,653,738
def binarize_tree(t): """Convert all n-nary nodes into left-branching subtrees Returns a new tree. The original tree is intact. """ def recurs_binarize_tree(t): if t.height() <= 2: return t[0] if len(t) == 1: return recurs_binarize_tree(t[0]) elif len(t) == 2: new_children = [] for i, child in enumerate(t): new_children.append(recurs_binarize_tree(child)) return Tree(t.node, new_children) #return Tree(-1, new_children) else: #left_child = recurs_binarize_tree(Tree(-1, t[0:-1])) if t.node[-1] != '_': new_node_name = t.node + '_' else: new_node_name = t.node left_child = recurs_binarize_tree(Tree(new_node_name, t[0:-1])) right_child = recurs_binarize_tree(t[-1]) #return Tree(-1, [left_child, right_child]) return Tree(t.node, [left_child, right_child]) return recurs_binarize_tree(t)
5f9bc8ab7a0c1ab862b7366b188072006a80ff51
3,653,739
def calculate_prfs_using_rdd(y_actual, y_predicted, average='macro'): """ Determines the precision, recall, fscore, and support of the predictions. With average of macro, the algorithm Calculate metrics for each label, and find their unweighted mean. See http://scikit-learn.org/stable/modules/generated/sklearn.metrics.precision_recall_fscore_support.html for details A better metric for recommender systems is precision at N (also in this package) Args: y_actual: actual ratings in the format of an RDD of [ (userId, itemId, actualRating) ] y_predicted: predicted ratings in the format of an RDD of [ (userId, itemId, predictedRating) ] Returns: precision, recall, fbeta_score, and support values """ prediction_rating_pairs = y_predicted.map(lambda x: ((x[0], x[1]), x[2]))\ .join(y_actual.map(lambda x: ((x[0], x[1]), x[2])))\ .map(lambda ((user, item), (prediction, rating)): (user, item, prediction, rating)) true_vals = np.array(prediction_rating_pairs.map(lambda (user, item, prediction, rating): rating).collect()) pred_vals = np.array(prediction_rating_pairs.map(lambda (user, item, prediction, rating): prediction).collect()) return precision_recall_fscore_support(map(lambda x: int(np.round(x)), true_vals),\ map(lambda x: int(np.round(x)), pred_vals), average = average)
01fadc6a03f6ce24e736da9d1cfd088b490aa482
3,653,740
def translation_from_matrix(M): """Returns the 3 values of translation from the matrix M. Parameters ---------- M : list[list[float]] A 4-by-4 transformation matrix. Returns ------- [float, float, float] The translation vector. """ return [M[0][3], M[1][3], M[2][3]]
2b3bddd08772b2480a923a778d962f8e94f4b78a
3,653,741
def saving_filename_boundary(save_location, close_up, beafort, wave_roughness): """ Setting the filename of the figure """ if close_up is None: return save_location + 'Boundary_comparison_Bft={}_roughness={}.png'.format(beafort, wave_roughness) else: ymax, ymin = close_up return save_location + 'Boundary_comparison_Bft={}_max={}_min={}_roughness={}.png'.format(beafort, ymax, ymin, wave_roughness)
c0357a211adc95c35873a0f3b0c900f6b5fe42d0
3,653,742
def get_library() -> CDLL: """Return the CDLL instance, loading it if necessary.""" global LIB if LIB is None: LIB = _load_library("aries_askar") _init_logger() return LIB
64183953e7ab3f4e617b050fbf985d79aebc9b95
3,653,743
def childs_page_return_right_login(response_page, smarsy_login): """ Receive HTML page from login function and check we've got expected source """ if smarsy_login in response_page: return True else: raise ValueError('Invalid Smarsy Login')
e7cb9b8d9df8bd5345f308e78cec28a20919370e
3,653,744
def merge_files(intakes, outcomes): """ Merges intakes and outcomes datasets to create unique line for each animal in the shelter to capture full stories for each animal takes intakes file then outcomes file as arguments returns merged dataset """ # Merge intakes and outcomes on animal id and year animal_shelter_df = pd.merge(intakes, outcomes, on=['animal_id', 'year'], how='left', suffixes=('_intake', '_outcome')) # Filters out animals who have yet to have outcomes and keeps animals where outcome data is later than intake date animal_shelter_df = animal_shelter_df[(~animal_shelter_df['date_o'].isna()) & (animal_shelter_df['date_o'] > animal_shelter_df['date_i'])] # Creates new days_in_shelter variable animal_shelter_df['days_in_shelter'] = (animal_shelter_df['date_o'] - animal_shelter_df['date_i']).dt.days # Sorts the column names to be alphabetical animal_shelter_df = animal_shelter_df[animal_shelter_df.columns.sort_values()] return animal_shelter_df
c7110cf1b5fe7fad52c3e331c8d6840de83891b3
3,653,745
def _ssepdpsolve_single_trajectory(data, Heff, dt, times, N_store, N_substeps, psi_t, dims, c_ops, e_ops): """ Internal function. See ssepdpsolve. """ states_list = [] phi_t = np.copy(psi_t) prng = RandomState() # todo: seed it r_jump, r_op = prng.rand(2) jump_times = [] jump_op_idx = [] for t_idx, t in enumerate(times): if e_ops: for e_idx, e in enumerate(e_ops): s = cy_expect_psi_csr( e.data.data, e.data.indices, e.data.indptr, psi_t, 0) data.expect[e_idx, t_idx] += s data.ss[e_idx, t_idx] += s ** 2 else: states_list.append(Qobj(psi_t, dims=dims)) for j in range(N_substeps): if norm(phi_t) ** 2 < r_jump: # jump occurs p = np.array([norm(c.data * psi_t) ** 2 for c in c_ops]) p = np.cumsum(p / np.sum(p)) n = np.where(p >= r_op)[0][0] # apply jump psi_t = c_ops[n].data * psi_t psi_t /= norm(psi_t) phi_t = np.copy(psi_t) # store info about jump jump_times.append(times[t_idx] + dt * j) jump_op_idx.append(n) # get new random numbers for next jump r_jump, r_op = prng.rand(2) # deterministic evolution wihtout correction for norm decay dphi_t = (-1.0j * dt) * (Heff.data * phi_t) # deterministic evolution with correction for norm decay dpsi_t = (-1.0j * dt) * (Heff.data * psi_t) A = 0.5 * np.sum([norm(c.data * psi_t) ** 2 for c in c_ops]) dpsi_t += dt * A * psi_t # increment wavefunctions phi_t += dphi_t psi_t += dpsi_t # ensure that normalized wavefunction remains normalized # this allows larger time step than otherwise would be possible psi_t /= norm(psi_t) return states_list, jump_times, jump_op_idx
0d83b67049d2e48ec3c887339b7d1c935cd897c7
3,653,746
def construct_features_MH_1(data): """ Processes the provided pandas dataframe object by: Deleting the original METER_ID, LOCATION_NO, BILLING_CYCLE, COMMENTS, and DAYS_FROM_BILLDT columns Constructing a time series index out of the year, month, day, hour, minute, second columns Sorting by the time series index """ try: del data['METER_ID'] del data['LOCATION_HASH'] del data['BILLING_CYCLE'] del data['COMMENTS'] del data['DAYS_FROM_BILLDT'] return data except Exception as e: logger.info( 'There was a problem constructing the feature vector for the provided data set: {}'.format(str(e)))
32f238ee730e84c0c699759913ffd2f6a2fc6fbf
3,653,747
from functools import cmp_to_key def sort_observations(observations): """ Method to sort observations to make sure that the "winner" is at index 0 """ return sorted(observations, key=cmp_to_key(cmp_observation), reverse=True)
183b044a48b4a7ea5093efaa92bd0977b085d949
3,653,748
def coor_trans(point, theta): """ coordinate transformation (坐标转换) theta方向:以顺时针旋转为正 """ point = np.transpose(point) k = np.array([[np.cos(theta), np.sin(theta)], [-np.sin(theta), np.cos(theta)]]) print(point) # return np.dot(k, point) return np.round(np.dot(k, point),6)
aa3b1532c629011e6f0ce72dc80eb1eebfc43765
3,653,749
import torch import time def ppo( env_fn, actor_critic=core.MLPActorCritic2Heads, ac_kwargs=dict(), seed=0, steps_per_epoch=4000, epochs=100, epochs_rnd_warmup=1, gamma=0.99, clip_ratio=0.2, pi_lr=3e-4, vf_lr=1e-3, rnd_lr=1e-3, train_pi_iters=80, train_v_iters=80, train_rnd_iters=80, lam=0.97, max_ep_len=200, target_kl=0.01, logger_kwargs=dict(), save_freq=10, scale_reward=100, only_intr=False, norm_intr=False, alpha_std_est=0.05, single_head=False, ): """ Proximal Policy Optimization (by clipping), with early stopping based on approximate KL Args: env_fn : A function which creates a copy of the environment. The environment must satisfy the OpenAI Gym API. actor_critic: The constructor method for a PyTorch Module with a ``step`` method, an ``act`` method, a ``pi`` module, and a ``v`` module. The ``step`` method should accept a batch of observations and return: =========== ================ ====================================== Symbol Shape Description =========== ================ ====================================== ``a`` (batch, act_dim) | Numpy array of actions for each | observation. ``v`` (batch,) | Numpy array of value estimates | for the provided observations. ``logp_a`` (batch,) | Numpy array of log probs for the | actions in ``a``. =========== ================ ====================================== The ``act`` method behaves the same as ``step`` but only returns ``a``. The ``pi`` module's forward call should accept a batch of observations and optionally a batch of actions, and return: =========== ================ ====================================== Symbol Shape Description =========== ================ ====================================== ``pi`` N/A | Torch Distribution object, containing | a batch of distributions describing | the policy for the provided observations. ``logp_a`` (batch,) | Optional (only returned if batch of | actions is given). Tensor containing | the log probability, according to | the policy, of the provided actions. | If actions not given, will contain | ``None``. =========== ================ ====================================== The ``v`` module's forward call should accept a batch of observations and return: =========== ================ ====================================== Symbol Shape Description =========== ================ ====================================== ``v`` (batch,) | Tensor containing the value estimates | for the provided observations. (Critical: | make sure to flatten this!) =========== ================ ====================================== ac_kwargs (dict): Any kwargs appropriate for the ActorCritic object you provided to PPO. seed (int): Seed for random number generators. steps_per_epoch (int): Number of steps of interaction (state-action pairs) for the agent and the environment in each epoch. epochs (int): Number of epochs of interaction (equivalent to number of policy updates) to perform. epochs_rnd_warmup (int): Number of epochs of training RND before starting training agent. gamma (float): Discount factor. (Always between 0 and 1.) clip_ratio (float): Hyperparameter for clipping in the policy objective. Roughly: how far can the new policy go from the old policy while still profiting (improving the objective function)? The new policy can still go farther than the clip_ratio says, but it doesn't help on the objective anymore. (Usually small, 0.1 to 0.3.) Typically denoted by :math:`\\epsilon`. pi_lr (float): Learning rate for policy optimizer. vf_lr (float): Learning rate for value function optimizer. rnd_lr (float): Learning rate for RND optimizer. train_pi_iters (int): Maximum number of gradient descent steps to take on policy loss per epoch. (Early stopping may cause optimizer to take fewer than this.) train_v_iters (int): Number of gradient descent steps to take on value function per epoch. train_rnd_iters (int): Number of gradient descent steps to take on RND per epoch. lam (float): Lambda for GAE-Lambda. (Always between 0 and 1, close to 1.) max_ep_len (int): Maximum length of trajectory / episode / rollout. target_kl (float): Roughly what KL divergence we think is appropriate between new and old policies after an update. This will get used for early stopping. (Usually small, 0.01 or 0.05.) logger_kwargs (dict): Keyword args for EpochLogger. save_freq (int): How often (in terms of gap between epochs) to save the current policy and value function. scale_reward (float): total_reward = extr_reward + scale_reward*intr_reward """ # Special function to avoid certain slowdowns from PyTorch + MPI combo. setup_pytorch_for_mpi() # Set up logger and save configuration logger = EpochLogger(**logger_kwargs) logger.save_config(locals()) # Random seed seed += 10000 * proc_id() torch.manual_seed(seed) np.random.seed(seed) # Instantiate environment env = env_fn() obs_dim = env.observation_space.shape act_dim = env.action_space.shape # Create actor-critic module ac = actor_critic(env.observation_space, env.action_space, **ac_kwargs) # Sync params across processes sync_params(ac) # Create RND module and optimizer rnd = RND(obs_dim[0], (32, 32), nn.Sigmoid) sync_params(rnd) rnd_optimizer = Adam(rnd.predictor_network.parameters(), lr=rnd_lr) # Create running estimator for reward normalization reward_std_estimator = core.running_exp_estimator(alpha_std_est) # Count variables var_counts = tuple(core.count_vars(module) for module in [ac.pi, ac.v_extr, ac.v_intr, rnd.predictor_network]) logger.log("\nNumber of parameters: \t pi: %d, \t v_extr: %d, \t v_intr: %d, \t rnd: %d\n" % var_counts) local_steps_per_epoch = int(steps_per_epoch / num_procs()) o = env.reset() # Train RND on random agent for 'epochs_rnd_warmup' epochs for epoch in range(epochs_rnd_warmup): for t in range(local_steps_per_epoch): a, _, _, _ = ac.step(torch.as_tensor(o, dtype=torch.float32)) next_o, r, d, _ = env.step(a) rnd_loss = rnd.loss(torch.as_tensor(next_o, dtype=torch.float32)) reward_std_estimator.update(rnd_loss.item()) rnd_optimizer.zero_grad() rnd_loss.backward() mpi_avg_grads(rnd.predictor_network) # average grads across MPI processes rnd_optimizer.step() # Set up experience buffer buf = PPOBuffer(obs_dim, act_dim, local_steps_per_epoch, gamma, lam) # Set up function for computing PPO policy loss def compute_loss_pi(data): obs, act, adv, logp_old = data["obs"], data["act"], data["adv"], data["logp"] # Policy loss pi, logp = ac.pi(obs, act) ratio = torch.exp(logp - logp_old) clip_adv = torch.clamp(ratio, 1 - clip_ratio, 1 + clip_ratio) * adv loss_pi = -(torch.min(ratio * adv, clip_adv)).mean() # Useful extra info approx_kl = (logp_old - logp).mean().item() ent = pi.entropy().mean().item() clipped = ratio.gt(1 + clip_ratio) | ratio.lt(1 - clip_ratio) clipfrac = torch.as_tensor(clipped, dtype=torch.float32).mean().item() pi_info = dict(kl=approx_kl, ent=ent, cf=clipfrac) return loss_pi, pi_info # Set up functions for computing value loss def compute_loss_v_extr(data): obs, ret = data["obs"], data["ret_extr"] return ((ac.v_extr(obs) - ret) ** 2).mean() def compute_loss_v_intr(data): obs, ret = data["obs"], data["ret_intr"] return ((ac.v_intr(obs) - ret) ** 2).mean() # Set up optimizers for policy and value function pi_optimizer = Adam(ac.pi.parameters(), lr=pi_lr) vf_extr_optimizer = Adam(ac.v_extr.parameters(), lr=vf_lr) if not single_head: vf_intr_optimizer = Adam(ac.v_intr.parameters(), lr=vf_lr) # Set up model saving logger.setup_pytorch_saver(ac) def update(epoch): data = buf.get() pi_l_old, pi_info_old = compute_loss_pi(data) pi_l_old = pi_l_old.item() v_extr_l_old = compute_loss_v_extr(data).item() if not single_head: v_intr_l_old = compute_loss_v_intr(data).item() loss_rnd_old = rnd.loss(data["obs"]).item() # Train policy with multiple steps of gradient descent for i in range(train_pi_iters): pi_optimizer.zero_grad() loss_pi, pi_info = compute_loss_pi(data) kl = mpi_avg(pi_info["kl"]) if kl > 1.5 * target_kl: logger.log("Early stopping at step %d due to reaching max kl." % i) break loss_pi.backward() mpi_avg_grads(ac.pi) # average grads across MPI processes pi_optimizer.step() logger.store(StopIter=i) # Value function learning for i in range(train_v_iters): vf_extr_optimizer.zero_grad() loss_v_extr = compute_loss_v_extr(data) loss_v_extr.backward() mpi_avg_grads(ac.v_extr) # average grads across MPI processes vf_extr_optimizer.step() if not single_head: for i in range(train_v_iters): vf_intr_optimizer.zero_grad() loss_v_intr = compute_loss_v_intr(data) loss_v_intr.backward() mpi_avg_grads(ac.v_intr) # average grads across MPI processes vf_intr_optimizer.step() for i in range(train_rnd_iters): rnd_optimizer.zero_grad() loss_rnd = rnd.loss(data["obs"]) loss_rnd.backward() mpi_avg_grads(rnd.predictor_network) # average grads across MPI processes rnd_optimizer.step() # Log changes from update kl, ent, cf = pi_info["kl"], pi_info_old["ent"], pi_info["cf"] logger.store( LossPi=pi_l_old, LossV_extr=v_extr_l_old, LossRND=loss_rnd_old, KL=kl, Entropy=ent, ClipFrac=cf, DeltaLossPi=(loss_pi.item() - pi_l_old), DeltaLossV_extr=(loss_v_extr.item() - v_extr_l_old), DeltaLossRND=(loss_rnd.item() - loss_rnd_old), ) if not single_head: logger.store(LossV_intr=v_intr_l_old, DeltaLossV_intr=(loss_v_intr.item() - v_intr_l_old)) # Prepare for interaction with environment start_time = time.time() o, ep_ret_extr, ep_ret_intr, ep_len = env.reset(), 0, 0, 0 # Main loop: collect experience in env and update/log each epoch for epoch in range(epochs): for t in range(local_steps_per_epoch): a, v_extr, v_intr, logp = ac.step(torch.as_tensor(o, dtype=torch.float32)) next_o, r_extr, d, _ = env.step(a) rnd_reward = rnd.reward(torch.as_tensor(next_o, dtype=torch.float32)) if norm_intr: reward_std_estimator.update(rnd_reward) r_intr = rnd_reward / reward_std_estimator.get_std() logger.store(EpRet_exp_std=reward_std_estimator.get_std()) else: r_intr = rnd_reward # save and log ep_ret_extr += r_extr ep_ret_intr += r_intr ep_len += 1 if only_intr: r_extr = 0 if single_head: buf.store(o, a, r_extr + scale_reward * r_intr, 0, v_extr, 0, logp) else: buf.store(o, a, r_extr, scale_reward * r_intr, v_extr, v_intr, logp) logger.store(VVals_extr=v_extr, VVals_intr=v_intr) # Update obs (critical!) o = next_o timeout = ep_len == max_ep_len terminal = d or timeout epoch_ended = t == local_steps_per_epoch - 1 if terminal or epoch_ended: # if epoch_ended and not(terminal): # print('Warning: trajectory cut off by epoch at %d steps.' % ep_len, flush=True) # logger.log('Warning: trajectory cut off by epoch at %d steps.' % ep_len) _, v_extr, v_intr, _ = ac.step(torch.as_tensor(o, dtype=torch.float32)) # if trajectory reached terminal state, value_extr target is zero, else bootstrap value target if not (timeout or epoch_ended): v_extr = 0 if single_head: buf.finish_path(v_extr + v_intr, 0) else: buf.finish_path(v_extr, v_intr) if terminal: # only save EpRet / EpLen if trajectory finished logger.store(EpRet_extr=ep_ret_extr, EpLen=ep_len, EpRet_intr=ep_ret_intr) o, ep_ret_extr, ep_ret_intr, ep_len = env.reset(), 0, 0, 0 # Save model if (epoch % save_freq == 0) or (epoch == epochs - 1): logger.save_state({"env": env}, None) # Perform PPO update! update(epoch) # Log info about epoch logger.log_tabular("Epoch", epoch) logger.log_tabular("EpRet_extr", with_min_and_max=True) logger.log_tabular("EpRet_intr", average_only=True) if norm_intr: logger.log_tabular("EpRet_exp_std", average_only=True) logger.log_tabular("EpLen", average_only=True) logger.log_tabular("VVals_extr", average_only=True) if not single_head: logger.log_tabular("VVals_intr", average_only=True) logger.log_tabular("LossPi", average_only=True) logger.log_tabular("LossV_extr", average_only=True) if not single_head: logger.log_tabular("LossV_intr", average_only=True) logger.log_tabular("LossRND", average_only=True) logger.log_tabular("DeltaLossPi", average_only=True) logger.log_tabular("DeltaLossV_extr", average_only=True) if not single_head: logger.log_tabular("DeltaLossV_intr", average_only=True) logger.log_tabular("TotalEnvInteracts", (epoch + 1) * steps_per_epoch) logger.log_tabular("Entropy", average_only=True) logger.log_tabular("KL", average_only=True) logger.log_tabular("ClipFrac", average_only=True) logger.log_tabular("StopIter", average_only=True) logger.log_tabular("Time", time.time() - start_time) logger.dump_tabular()
481da1fc7cc0677e02009d983345a15fbca23159
3,653,750
import heapq def ltopk(k, seq, key=None): """ >>> ltopk(2, [1, 100, 10, 1000]) [1000, 100] >>> ltopk(2, ['Alice', 'Bob', 'Charlie', 'Dan'], key=len) ['Charlie', 'Alice'] """ if key is not None and not callable(key): key = getter(key) return list(heapq.nlargest(k, seq, key=key))
3d41f8576ca6b2741d12ca8b80c8fb220166b85b
3,653,751
def index(): """ Root URL response """ return ( jsonify( name="Promotion REST API Service", version="1.0", ), status.HTTP_200_OK, )
7c45e54c3500f638291c85d38d27976952d0a6e3
3,653,752
def add_cameras_default(scene): """ Make two camera (main/top) default setup for demo images.""" cam_main = create_camera_perspective( location=(-33.3056, 24.1123, 26.0909), rotation_quat=(0.42119, 0.21272, -0.39741, -0.78703), ) scene.collection.objects.link(cam_main) cam_top = create_camera_top_view_ortho() scene.collection.objects.link(cam_top) # make this the main scene camera scene.camera = cam_main return cam_main, cam_top
50428d5f3c79c4581e397af1411a5a92055fe695
3,653,753
def distr_mean_stde(distribution: np.ndarray) -> tuple: """ Purpose: Compute the mean and standard deviation for a distribution. Args: distribution (np.ndarray): distribution Returns: tuple (ie. distribution mean and standard deviation) """ # Compute and print the mean, stdev of the resample distribution of means distribution_mean = np.mean(distribution) standard_error = np.std(distribution) print('Bootstrap Distribution: center={:0.2f}, spread={:0.2f}'.format(distribution_mean, standard_error)) print() return distribution_mean, standard_error
9232587e2c1e71a8f7c672cb962961cab7ad8d85
3,653,754
from operator import and_ def release_waiting_requests_grouped_fifo(rse_id, count=None, direction='destination', deadline=1, volume=0, session=None): """ Release waiting requests. Transfer requests that were requested first, get released first (FIFO). Also all requests to DIDs that are attached to the same dataset get released, if one children of the dataset is choosed to be released (Grouped FIFO). :param rse_id: The RSE id. :param count: The count to be released. If None, release all waiting requests. :param direction: Direction if requests are grouped by source RSE or destination RSE. :param deadline: Maximal waiting time in hours until a dataset gets released. :param volume: The maximum volume in bytes that should be transfered. :param session: The database session. """ amount_updated_requests = 0 # Release requests that exceeded waiting time if deadline: amount_updated_requests = release_waiting_requests_per_deadline(rse_id=rse_id, deadline=deadline, session=session) count = count - amount_updated_requests grouped_requests_subquery, filtered_requests_subquery = create_base_query_grouped_fifo(rse_id=rse_id, filter_by_rse=direction, session=session) # cumulate amount of children per dataset and combine with each request and only keep requests that dont exceed the limit cumulated_children_subquery = session.query(grouped_requests_subquery.c.name, grouped_requests_subquery.c.scope, grouped_requests_subquery.c.amount_childs, grouped_requests_subquery.c.oldest_requested_at, func.sum(grouped_requests_subquery.c.amount_childs).over(order_by=(grouped_requests_subquery.c.oldest_requested_at)).label('cum_amount_childs'))\ .subquery() cumulated_children_subquery = session.query(filtered_requests_subquery.c.id)\ .join(cumulated_children_subquery, and_(filtered_requests_subquery.c.dataset_name == cumulated_children_subquery.c.name, filtered_requests_subquery.c.dataset_scope == cumulated_children_subquery.c.scope))\ .filter(cumulated_children_subquery.c.cum_amount_childs - cumulated_children_subquery.c.amount_childs < count)\ .subquery() # needed for mysql to update and select from the same table cumulated_children_subquery = session.query(cumulated_children_subquery.c.id).subquery() statement = update(models.Request).where(models.Request.id.in_(cumulated_children_subquery)).values(state=RequestState.QUEUED) amount_updated_requests += session.execute(statement).rowcount # release requests where the whole datasets volume fits in the available volume space if volume: amount_updated_requests += release_waiting_requests_per_free_volume(rse_id=rse_id, volume=volume, session=session) return amount_updated_requests
9a52a28fe06634de73a0436721aa97e590612e17
3,653,755
def _get_gap_memory_pool_size_MB(): """ Return the gap memory pool size suitable for usage on the GAP command line. The GAP 4.5.6 command line parser had issues with large numbers, so we return it in megabytes. OUTPUT: String. EXAMPLES: sage: from sage.interfaces.gap import \ ... _get_gap_memory_pool_size_MB sage: _get_gap_memory_pool_size_MB() # random output '1467m' """ pool = get_gap_memory_pool_size() pool = (pool // (1024**2)) + 1 return str(pool)+'m'
035072ff6fff18859717b131cdd660f252ac6262
3,653,756
async def order_book_l2(symbol: str) -> dict: """オーダーブックを取得""" async with pybotters.Client(base_url=base_url, apis=apis) as client: r = await client.get("/orderBook/L2", params={"symbol": symbol,},) data = await r.json() return data
4c9b8e067874871cda8b9a9f113f8ff6e4529c02
3,653,757
import json import sys def _load_json(json_path): """Load JSON from a file with a given path.""" # Note: Binary so load can detect encoding (as in Section 3 of RFC 4627) with open(json_path, 'rb') as json_file: try: return json.load(json_file) except Exception as ex: if sys.version_info[0] >= 3: ex2 = Exception('Error loading ' + json_path) exec('raise ex2 from ex') # nosec else: ex2 = Exception('Error loading ' + json_path + ': ' + str(ex)) ex2.__cause__ = ex raise ex2
86a6ab7c509c24a50c248134e01a7d61d1499adb
3,653,758
async def create_comment_in_post(*, post: models.Post = Depends(resolve_post), created_comment: CreateComment, current_user: models.User = Depends(resolve_current_user), db: Session = Depends(get_db)): """Create a comment in a post.""" return crud.create_comment(db, author_id=current_user.id, parent_resub_id=post.parent_resub_id, parent_post_id=post.id, parent_comment_id=None, content=created_comment.content)
90e4a8628d631bcb33eb5462e0e8001f90fb5c86
3,653,759
def sigma_bot(sigma_lc_bot, sigma_hc_bot, x_aver_bot_mass): """ Calculates the surface tension at the bottom of column. Parameters ---------- sigma_lc_bot : float The surface tension of low-boilling component at the bottom of column, [N / m] sigma_hc_bot : float The surface tension of high-boilling component at the bottom of column, [N / m] x_aver_bot_mass : float The average mass concentration at bot of column, [kg/kg] Returns ------- sigma_bot : float The surface tension at the bottom of column, [N / m] References ---------- &&&&& """ return (sigma_lc_bot * x_aver_bot_mass + (1 - x_aver_bot_mass) * sigma_hc_bot)
5105e5592556cab14cb62ab61b4f242499b33e1d
3,653,760
def _normalize_zonal_lat_lon(ds: xr.Dataset) -> xr.Dataset: """ In case that the dataset only contains lat_centers and is a zonal mean dataset, the longitude dimension created and filled with the variable value of certain latitude. :param ds: some xarray dataset :return: a normalized xarray dataset """ if 'latitude_centers' not in ds.coords or 'lon' in ds.coords: return ds ds_zonal = ds.copy() resolution = (ds.latitude_centers[1].values - ds.latitude_centers[0].values) ds_zonal = ds_zonal.assign_coords( lon=[i + (resolution / 2) for i in np.arange(-180.0, 180.0, resolution)]) for var in ds_zonal.data_vars: if 'latitude_centers' in ds_zonal[var].dims: ds_zonal[var] = xr.concat([ds_zonal[var] for _ in ds_zonal.lon], 'lon') ds_zonal[var]['lon'] = ds_zonal.lon var_dims = ds_zonal[var].attrs.get('dimensions', []) lat_center_index = var_dims.index('latitude_centers') var_dims.remove('latitude_centers') var_dims.append('lat') var_dims.append('lon') var_chunk_sizes = ds_zonal[var].attrs.get('chunk_sizes', []) lat_chunk_size = var_chunk_sizes[lat_center_index] del var_chunk_sizes[lat_center_index] var_chunk_sizes.append(lat_chunk_size) var_chunk_sizes.append(ds_zonal.lon.size) ds_zonal = ds_zonal.rename_dims({'latitude_centers': 'lat'}) ds_zonal = ds_zonal.assign_coords(lat=ds.latitude_centers.values) ds_zonal = ds_zonal.drop_vars('latitude_centers') ds_zonal = ds_zonal.transpose(..., 'lat', 'lon') has_lon_bnds = 'lon_bnds' in ds_zonal.coords or 'lon_bnds' in ds_zonal if not has_lon_bnds: lon_values = [[i - (resolution / 2), i + (resolution / 2)] for i in ds_zonal.lon.values] ds_zonal = ds_zonal.assign_coords(lon_bnds=xr.DataArray(lon_values, dims=['lon', 'bnds'])) has_lat_bnds = 'lat_bnds' in ds_zonal.coords or 'lat_bnds' in ds_zonal if not has_lat_bnds: lat_values = [[i - (resolution / 2), i + (resolution / 2)] for i in ds_zonal.lat.values] ds_zonal = ds_zonal.assign_coords(lat_bnds=xr.DataArray(lat_values, dims=['lat', 'bnds'])) ds_zonal.lon.attrs['bounds'] = 'lon_bnds' ds_zonal.lon.attrs['long_name'] = 'longitude' ds_zonal.lon.attrs['standard_name'] = 'longitude' ds_zonal.lon.attrs['units'] = 'degrees_east' ds_zonal.lat.attrs['bounds'] = 'lat_bnds' ds_zonal.lat.attrs['long_name'] = 'latitude' ds_zonal.lat.attrs['standard_name'] = 'latitude' ds_zonal.lat.attrs['units'] = 'degrees_north' return ds_zonal
0a6021cc22271d6489a1a946e5ff38a6019ae3e8
3,653,761
def setup_audio(song_filename): """Setup audio file and setup setup the output device.output is a lambda that will send data to fm process or to the specified ALSA sound card :param song_filename: path / filename to music file :type song_filename: str :return: output, fm_process, fft_calc, music_file :rtype tuple: lambda, subprocess, fft.FFT, decoder """ # Set up audio force_header = False if any([ax for ax in [".mp4", ".m4a", ".m4b"] if ax in song_filename]): force_header = True music_file = decoder.open(song_filename, force_header) sample_rate = music_file.getframerate() num_channels = music_file.getnchannels() fft_calc = fft.FFT(CHUNK_SIZE, sample_rate, hc.GPIOLEN, cm.audio_processing.min_frequency, cm.audio_processing.max_frequency, cm.audio_processing.custom_channel_mapping, cm.audio_processing.custom_channel_frequencies) # setup output device output = set_audio_device(sample_rate, num_channels) chunks_per_sec = ((16 * num_channels * sample_rate) / 8) / CHUNK_SIZE light_delay = int(cm.audio_processing.light_delay * chunks_per_sec) # Output a bit about what we're about to play to the logs nframes = str(music_file.getnframes() / sample_rate) log.info("Playing: " + song_filename + " (" + nframes + " sec)") return output, fft_calc, music_file, light_delay
63ca73faf6511047d273e3b36d3ef450dc073a2f
3,653,762
def _collect_package_prefixes(package_dir, packages): """ Collect the list of prefixes for all packages The list is used to match paths in the install manifest to packages specified in the setup.py script. The list is sorted in decreasing order of prefix length so that paths are matched with their immediate parent package, instead of any of that package's ancestors. For example, consider the project structure below. Assume that the setup call was made with a package list featuring "top" and "top.bar", but not "top.not_a_subpackage". :: top/ -> top/ __init__.py -> top/__init__.py (parent: top) foo.py -> top/foo.py (parent: top) bar/ -> top/bar/ (parent: top) __init__.py -> top/bar/__init__.py (parent: top.bar) not_a_subpackage/ -> top/not_a_subpackage/ (parent: top) data_0.txt -> top/not_a_subpackage/data_0.txt (parent: top) data_1.txt -> top/not_a_subpackage/data_1.txt (parent: top) The paths in the generated install manifest are matched to packages according to the parents indicated on the right. Only packages that are specified in the setup() call are considered. Because of the sort order, the data files on the bottom would have been mapped to "top.not_a_subpackage" instead of "top", proper -- had such a package been specified. """ return list( sorted( ((package_dir[package].replace(".", "/"), package) for package in packages), key=lambda tup: len(tup[0]), reverse=True, ) )
6c497725e8a441f93f55084ef42489f97e35acf8
3,653,763
def _grae_ymin_ ( graph ) : """Get minimal y for the points >>> graph = ... >>> ymin = graph.ymin () """ ymn = None np = len(graph) for ip in range( np ) : x , exl , exh , y , eyl , eyh = graph[ip] y = y - abs( eyl ) if None == ymn or y <= ymn : ymn = y return ymn
99efb6f6466e56b350da02963e442ac2b991ecf5
3,653,764
def get_working_id(id_: str, entry_id: str) -> str: """Sometimes new scanned files ID will be only a number. Should connect them with base64(MD5:_id). Fixes bug in VirusTotal API. Args: entry_id: the entry id connected to the file id_: id given from the API Returns: A working ID that we can use in other commands. """ if isinstance(id_, str) and id_.isnumeric() or (isinstance(id_, int)): demisto.debug(f'Got an integer id from file-scan. {id_=}, {entry_id=}\n') raise DemistoException( f'Got an int {id_=} as analysis report. This is a bug in VirusTotal v3 API.\n' f'While VirusTotal team is fixing the problem, try to resend the file.' ) return id_
d55fd0855fc954db0ff9a678de3c71529e0946ae
3,653,765
from keras import models from keras.layers import Dense from keras.layers import Dropout from keras.regularizers import l2 def merck_net(input_shape=(128)): """ # The recommended network presented in the paper: Junshui Ma et. al., Deep Neural Nets as a Method for Quantitative # Structure Activity Relationships # URL: http://www.cs.toronto.edu/~gdahl/papers/deepQSARJChemInfModel2015.pdf # :param input_shape: dim of input features # :return: a keras model """ # TODO: is kernel_regularizer=l2(0.0001) the best way to add weight cost strength? model = models.Sequential() model.add(Dense(4000, activation='relu', input_shape=input_shape, kernel_regularizer=l2(0.0001))) model.add(Dropout(0.25)) model.add(Dense(2000, activation='relu', input_shape=input_shape, kernel_regularizer=l2(0.0001))) model.add(Dropout(0.25)) model.add(Dense(1000, activation='relu', input_shape=input_shape, kernel_regularizer=l2(0.0001))) model.add(Dropout(0.25)) model.add(Dense(1000, activation='relu', input_shape=input_shape, kernel_regularizer=l2(0.0001))) model.add(Dropout(0.10)) model.add(Dense(1, activation=None, use_bias=True, kernel_regularizer=l2(0.0001))) # model.summary() return model
d1c8b92c702752c42bf3c2780969ce0c3d64ff4c
3,653,766
def vec_sum(a, b): """Compute the sum of two vector given in lists.""" return [va + vb for va, vb in zip(a, b)]
d85f55e22a60af66a85eb6c8cd180007351bf5d9
3,653,767
import time def one_v_one_classifiers(x,y,lambd,max_iters,eps=.0001): """ Function for running a 1v1 classifier on many classes using the linearsvm function. Inputs: x: numpy matrix a matrix of size nxd y: numpy matrix a matrix of size nx1 lambd: float lambda, the penalization constant. Default = -1 max_iters: int maximum number of iterations. Default: 100 eps: float the stopping criteria for the normalized gradient. Default: .001 Returns: vals: numpy matrix beta values for each pair of classes i_vals: numpy matrix matrix of first class tested for 1v1 comparison of class i vs class j j_vals: numpy matrix matrix of second class tested for 1v1 comparison of class i vs class j """ classified_vals = [] i_vals = [] j_vals = [] classes = len(np.unique(y)) t_init = 10**-1 t0 = time.time() vals_to_run = [] k=3 # 3 fold CV num_lambdas = 3 # num lambdas to try in CV vals = [] vals_to_run = [] # group for i in range(classes): for j in range(i+1,classes): features_to_test = x[(y==i)|(y==j)] scaler = preprocessing.StandardScaler() features_to_test = scaler.fit_transform(features_to_test) labels_to_test = y[(y==i)|(y==j)] labels_to_test = ((labels_to_test - min(labels_to_test)) / (max(labels_to_test)-min(labels_to_test)))*2-1 # save a list of parameters to call run_svm as a list vals_to_run.append( (features_to_test, labels_to_test, k, max_iters, num_lambdas , t_init, lambd , eps) ) #classified_vals.append(betas[-1]) i_vals.append(i) j_vals.append(j) print("setup complete. Time :",time.time()-t0, " " , time.strftime('%X %x %Z')) t0 = time.time() #do computation pool = ThreadPool(35) vals_temp = pool.starmap(run_svm,vals_to_run) objs = np.asarray(vals_temp)[:,1] vals_temp = np.asarray(vals_temp)[:,0] vals = vals + list(vals_temp) return np.asarray(vals), np.asarray(i_vals) , np.asarray(j_vals), objs
3cf564039c78363021cb65650dd50db9536922bb
3,653,768
def rlsp(mdp, s_current, p_0, horizon, temp=1, epochs=1, learning_rate=0.2, r_prior=None, r_vec=None, threshold=1e-3, check_grad_flag=False): """The RLSP algorithm""" def compute_grad(r_vec): # Compute the Boltzmann rational policy \pi_{s,a} = \exp(Q_{s,a} - V_s) policy = value_iter(mdp, 1, mdp.f_matrix @ r_vec, horizon, temp) d_last_step, d_last_step_list = compute_d_last_step( mdp, policy, p_0, horizon, return_all=True) if d_last_step[s_current] == 0: print('Error in om_method: No feasible trajectories!') return r_vec expected_features, expected_features_list = compute_feature_expectations( mdp, policy, p_0, horizon) G = compute_g(mdp, policy, p_0, horizon, d_last_step_list, expected_features_list) # Compute the gradient dL_dr_vec = G[s_current] / d_last_step[s_current] # Gradient of the prior if r_prior!= None: dL_dr_vec += r_prior.logdistr_grad(r_vec) return dL_dr_vec def compute_log_likelihood(r_vec): policy = value_iter(mdp, 1, mdp.f_matrix @ r_vec, horizon, temp) d_last_step = compute_d_last_step(mdp, policy, p_0, horizon) log_likelihood = np.log(d_last_step[s_current]) if r_prior!= None: log_likelihood += np.sum(r_prior.logpdf(r_vec)) return log_likelihood def get_grad(_): """dummy function for use with check_grad()""" return dL_dr_vec if r_vec is None: r_vec = 0.01*np.random.randn(mdp.f_matrix.shape[1]) print('Initial reward vector: {}'.format(r_vec)) if check_grad_flag: grad_error_list=[] for i in range(epochs): dL_dr_vec = compute_grad(r_vec) if check_grad_flag: grad_error_list.append(check_grad(compute_log_likelihood, get_grad, r_vec)) # Gradient ascent r_vec = r_vec + learning_rate * dL_dr_vec # with printoptions(precision=4, suppress=True): # print('Epoch {}; Reward vector: {}'.format(i, r_vec)) # if check_grad_flag: print('grad error: {}'.format(grad_error_list[-1])) if np.linalg.norm(dL_dr_vec) < threshold: if check_grad_flag: print() print('Max grad error: {}'.format(np.amax(np.asarray(grad_error_list)))) print('Median grad error: {}'.format(np.median(np.asarray(grad_error_list)))) break return r_vec
d389363929f4e7261d72b0d9d83a806fae10b8ab
3,653,769
import numbers def rotate(img, angle=0, order=1): """Rotate image by a certain angle around its center. Parameters ---------- img : ndarray(uint16 or uint8) Input image. angle : integer Rotation angle in degrees in counter-clockwise direction. Returns ------- rotated : ndarray(uint16 or uint8) Rotated version of the input. Examples -------- rotate(image, 30) rotate(image, 180) """ if not _is_numpy_image(img): raise TypeError('img should be numpy ndarray. Got {}'.format(type(img))) if not (isinstance(angle, numbers.Number)): raise TypeError('Angle should be integer. Got {}'.format(type(angle))) img_new = transform.rotate(img, angle, order=order, preserve_range=True) img_new = img_new.astype(img.dtype) return img_new
8b55fe060ff6b8eb0c7137dc38a72531c24c7534
3,653,770
def activate(request, uidb64, token): """Function that activates the user account.""" try: uid = force_text(urlsafe_base64_decode(uidb64)) user = User.objects.get(pk=uid) except(TypeError, ValueError, OverflowError, User.DoesNotExist): user = None if user is not None and account_activation_token.check_token(user, token): user.is_active = True user.save() login(request, user) messages.success(request, 'Konto zostało pomyślnie aktywowane. Możesz się zalogować.') return redirect('login') else: messages.warning(request, 'Link aktywacyjny jest nieprawidłowy lub konto zostało już aktywowane.') return redirect('login')
8538fc17e37b2743a7145286720ba5e8d653c790
3,653,771
def dfn(*args, **kwargs): """ The HTML Definition Element (<dfn>) represents the defining instance of a term. """ return el('dfn', *args, **kwargs)
798fb57360aca6f035ad993998c622eb6fff4e82
3,653,772
def _compile_rds_files_TRHP(array_codes, years_processed, filetypes_to_check, extensions_to_check, subfolder_filestypes): """ Get indexed information from server for Hydrothermal Vent Fluid Temperature and Resistivity (RS03INT1-MJ03C-10-TRHPHA301) Example where dat exists: https://rawdata.oceanobservatories.org/files/RS03INT1/MJ03C/TRHPHA301/2017/08/ ['TRHPHA301_10.31.8.10_2101_20171202T0211_UTC.dat', 'TRHPHA301_10.31.8.10_2101_20171203T0000_UTC.dat', . . .] Cache build out with actual reference designators: { "rds_TRHP": { "RS03INT1-MJ03C-10-TRHPHA301": { "2017": { "08": { "22": [ { "date": "2017-08-22", "datetime": "20170822T075800.000Z", "ext": ".dat", "filename": "TRHPHA301_10.31.8.10_2101_20170822T0758_UTC.dat", "rd": "RS03INT1-MJ03C-10-TRHPHA301", "url": "/RS03INT1/MJ03C/TRHPHA301/2017/08/" }, { "date": "2017-08-22", "datetime": "20170822T103300.000Z", "ext": ".dat", "filename": "TRHPHA301_10.31.8.10_2101_20170822T1033_UTC.dat", "rd": "RS03INT1-MJ03C-10-TRHPHA301", "url": "/RS03INT1/MJ03C/TRHPHA301/2017/08/" }, { "date": "2017-08-22", "datetime": "20170822T104900.000Z", "ext": ".dat", "filename": "TRHPHA301_10.31.8.10_2101_20170822T1049_UTC.dat", "rd": "RS03INT1-MJ03C-10-TRHPHA301", "url": "/RS03INT1/MJ03C/TRHPHA301/2017/08/" }, { "date": "2017-08-22", "datetime": "20170822T105200.000Z", "ext": ".dat", "filename": "TRHPHA301_10.31.8.10_2101_20170822T1052_UTC.dat", "rd": "RS03INT1-MJ03C-10-TRHPHA301", "url": "/RS03INT1/MJ03C/TRHPHA301/2017/08/" } ], """ debug = False debug_trace = False debug_details = False time = True try: # Local variables. rds_base_url = get_rds_base_url() base_url = rds_base_url + '/' timeout, timeout_read = get_uframe_timeout_info() # Specific instruments processed. actual_reference_designator = 'RS03INT1-MJ03C-10-TRHPHA301' # Create rds navigation urls. work_nav_urls = {} work_nav_urls[actual_reference_designator] = None # Verify sensor type requested in processed in this function, else return {}. sensor_type = filetypes_to_check if sensor_type != ['TRHP']: return {} # Determine cache destination. cache_destination = get_target_cache_by_sensor_type(sensor_type) if debug: print '\n debug -- Entered _compile_rds_files_TRHP...' if time: print '\n Compiling cache for ', filetypes_to_check[0].replace('-','') print '\t-- Arrays processed: ', array_codes print '\t-- Years processed: ', years_processed print '\t-- Sensor types processed: ', filetypes_to_check print '\t-- Extensions checked: ', extensions_to_check print '\t-- Subfolder filetypes to check: ', subfolder_filestypes # Time start = datetime.now() if time: print '\t-- Start time: ', start # Get and process returned content for links. r = requests.get(base_url, timeout=(timeout, timeout_read)) soup = BeautifulSoup(r.content, "html.parser") ss = soup.findAll('a') data_dict = {} # Get root entry (either subsite or subsite-node). ss_reduced = [] for s in ss: if 'href' in s.attrs: len_href = len(s.attrs['href']) if len_href == 9 or len_href == 15 or len_href == 28: ss_reduced.append(s) if debug_trace: print '\n debug -- The root folder items: ', len(ss_reduced) for s in ss_reduced: # Limit to those arrays identified in array_codes, not processing platforms at this time # Lock down to specific subsite for this sensor type. rd = s.attrs['href'] if rd and len(rd) == 9 or len(rd) == 15: if len(rd) == 9: subsite = rd.rstrip('/') if subsite != 'RS03INT1': continue else: continue #----------------------------------------------- # Level 1 - subsite processing d_url = base_url+s.attrs['href'] subfolders, file_list = _get_subfolder_list(d_url, filetypes=subfolder_filestypes, extensions=extensions_to_check) if not subfolders or subfolders is None: continue # Level 2 - node processing if debug_details: print '\n debug -- Now walking subfolders...' for item in subfolders: if len(item) != 6: continue # Determine if item is a folder link or file if '/' in item: subfolder_url = base_url + rd + item node_subfolders, node_file_list = _get_subfolder_list(subfolder_url, filetypes=subfolder_filestypes, extensions=extensions_to_check) if not node_subfolders or node_subfolders is None: continue # Level 3 - processing sensor information if node_subfolders: for node_item in node_subfolders: #================== ok_to_go = False for check in filetypes_to_check: if check in node_item: ok_to_go = True break if not ok_to_go: continue #================ node_folder_url = subfolder_url + node_item nav_url = '/' + node_folder_url.replace(base_url, '') detail_subfolders, detail_file_list = _get_subfolder_list(node_folder_url, filetypes=subfolder_filestypes, extensions=extensions_to_check) if detail_subfolders: # Process years for year in detail_subfolders: #======================================= # Remove to process all years folder_year = year year_tmp = folder_year.rstrip('/') if year_tmp not in years_processed: continue #======================================= year_url = node_folder_url + year months_subfolders, months_file_list = _get_subfolder_list(year_url, None) if months_subfolders: for month in months_subfolders: month_url = year_url + month days_subfolders, days_file_list = \ _get_subfolder_list(month_url, filetypes=filetypes_to_check, extensions=extensions_to_check) if not days_file_list: continue date_part = None for filename in days_file_list: if debug: print '\n debug ------------ Processing filename: ', filename if '_UTC.dat' in filename: tmp_filename = filename.replace('_UTC.dat', '') junk_part, date_part = tmp_filename.rsplit('_', 1) # Process date_part (i.e. 20170815T1927) if date_part is None: continue _dt = date_part # Process file datetime based on extension. ext = None for extension in extensions_to_check: if extension in filename: ext = extension break dt = _dt + '00.000Z' _year = _dt[0:4] _month = _dt[4:6] _day = _dt[6:8] _url = urllib.unquote(month_url).decode('utf8') if rds_base_url in _url: _url = _url.replace(rds_base_url, '') _url = _url.replace(filename, '') tmp_item = {'url': _url, 'filename': filename, 'datetime': dt, 'ext': ext} # Update rds_nav_urls for sensor. work_nav_urls[actual_reference_designator] = rds_base_url + nav_url # Custom for instrument ref_des = actual_reference_designator # Build cache dictionary entry if ref_des not in data_dict: data_dict[str(ref_des)] = {} if _year not in data_dict[ref_des]: data_dict[ref_des][_year] = {} if _month not in data_dict[ref_des][_year]: data_dict[ref_des][_year][_month] = {} if _day not in data_dict[ref_des][_year][_month]: data_dict[ref_des][_year][_month][_day] = [] # Add date to item _year = _year.rstrip('/') _month = _month.rstrip('/') _day = _day.rstrip('/') tmp_item['date'] = '-'.join([_year, _month, _day]) tmp_item['rd'] = ref_des # Add item for cache dictionary. data_dict[ref_des][_year][_month][_day].append(tmp_item) else: # Item is not a folder continue end = datetime.now() if time: print '\t-- End time: ', end print '\t-- Time to compile information for cache: %s' % str(end - start) # Process navigation urls. add_nav_urls_to_cache(work_nav_urls, 'TRHP') # Populate cache for sensor type. if data_dict and data_dict is not None: cache.delete(cache_destination) cache.set(cache_destination, data_dict, timeout=get_cache_timeout()) result_keys = data_dict.keys() result_keys.sort() print '\n\t-- Number of items in %s cache(%d): %s' % (cache_destination, len(result_keys), result_keys) return data_dict except Exception as err: message = str(err) current_app.logger.info(message) raise Exception(message)
0d9d490f23afce4d9c5ed47725658127475d9231
3,653,773
def handle_post_runs(project_id, deployment_id): """Handles POST requests to /.""" is_experiment_deployment = False experiment_deployment = request.args.get('experimentDeploy') if experiment_deployment and experiment_deployment == 'true': is_experiment_deployment = True run_id = create_deployment_run(project_id, deployment_id, is_experiment_deployment) return jsonify({"message": "Pipeline running.", "runId": run_id})
5684a2b1f82981d4a3d5d7b870485b01201fdd2e
3,653,774
def get_in_reply_to_user_id(tweet): """ Get the user id of the uesr whose Tweet is being replied to, and None if this Tweet is not a reply. \n Note that this is unavailable in activity-streams format Args: tweet (Tweet): A Tweet object (or a dictionary) Returns: str: the user id of the user whose Tweet is being replied to, None (if not a reply), or for activity-streams raise a NotAvailableError Example: >>> from tweet_parser.getter_methods.tweet_reply import * >>> original_format_dict = { ... "created_at": "Wed May 24 20:17:19 +0000 2017", ... "in_reply_to_user_id_str": "2382763597" ... } >>> get_in_reply_to_user_id(original_format_dict) '2382763597' """ if is_original_format(tweet): return tweet["in_reply_to_user_id_str"] else: raise NotAvailableError("Gnip activity-streams format does not" + " return the replied to user's id")
74bbfa224f15781f769bf52bb470e23e9c93a95a
3,653,775
import os def cl35(): """Cl35 ENDF data (contains RML resonance range)""" endf_data = os.environ['OPENMC_ENDF_DATA'] filename = os.path.join(endf_data, 'neutrons', 'n-017_Cl_035.endf') return openmc.data.IncidentNeutron.from_endf(filename)
b202bfcd4f842a71429d374bcc4f65bcbf62b56b
3,653,776
def release_definition_show(definition_id=None, name=None, open_browser=False, team_instance=None, project=None, detect=None): """Get the details of a release definition. :param definition_id: ID of the definition. :type definition_id: int :param name: Name of the definition. Ignored if --id is supplied. :type name: str :param open_browser: Open the definition summary page in your web browser. :type open_browser: bool :param team_instance: VSTS account or TFS collection URL. Example: https://myaccount.visualstudio.com :type team_instance: str :param project: Name or ID of the team project. :type project: str :param detect: Automatically detect values for instance and project. Default is "on". :type detect: str :rtype: ReleaseDefinitionReference """ team_instance, project = resolve_instance_and_project(detect=detect, team_instance=team_instance, project=project) client = get_release_client(team_instance) if definition_id is None: if name is not None: definition_id = get_definition_id_from_name(name, client, project) else: raise ValueError("Either the --id argument or the --name argument must be supplied for this command.") release_definition = client.get_release_definition(definition_id=definition_id, project=project) if open_browser: _open_definition(release_definition) return release_definition
3a4f13a1dfb7f1bd95bf8eae52d41f14566eb5fb
3,653,777
def GKtoUTM(ea, no=None, zone=32, gk=None, gkzone=None): """Transform any Gauss-Krueger to UTM autodetect GK zone from offset.""" if gk is None and gkzone is None: if no is None: rr = ea[0][0] else: if isinstance(ea, list) or isinstance(ea, tuple): rr = ea[0] else: rr = ea gkzone = int(floor(rr * 1e-6)) print(gkzone) if gkzone <= 0 or gkzone >= 5: print("cannot detect valid GK zone") pyproj = opt_import('pyproj', 'coordinate transformations') if pyproj is None: return None gk = pyproj.Proj(init="epsg:"+str(31464+gkzone)) wgs84 = pyproj.Proj(init="epsg:4326") # pure ellipsoid to doubel transform utm = pyproj.Proj(proj='utm', zone=zone, ellps='WGS84') # UTM if no is None: # two-column matrix lon, lat = pyproj.transform(gk, wgs84, ea[0], ea[1]) else: lon, lat = pyproj.transform(gk, wgs84, ea, no) return utm(lon, lat)
330804d9bfe4785d867755b58355b0633d1fe7c8
3,653,778
def robots(req): """ .. seealso:: http://www.sitemaps.org/protocol.html#submit_robots """ return Response( "Sitemap: %s\n" % req.route_url('sitemapindex'), content_type="text/plain")
42e21c5968d7e6d02049a0539d5b115aa596292e
3,653,779
import numpy as np def bolling(asset:list, samples:int=20, alpha:float=0, width:float=2): """ According to MATLAB: BOLLING(ASSET,SAMPLES,ALPHA,WIDTH) plots Bollinger bands for given ASSET data vector. SAMPLES specifies the number of samples to use in computing the moving average. ALPHA is an optional input that specifies the exponent used to compute the element weights of the moving average. The default ALPHA is 0 (simple moving average). WIDTH is an optional input that specifies the number of standard deviations to include in the envelope. It is a multiplicative factor specifying how tight the bounds should be made around the simple moving average. The default WIDTH is 2. This calling syntax plots the data only and does not return the data. Note: The standard deviations are normalized by (N-1) where N is the sequence length. """ # build weight vector # 主体 r = len(asset) i = np.arange(1,samples+1) ** alpha w = i / sum(i) # build moving average vectors with for loops a = np.zeros((r-samples, 1)) b = a.copy() for i in range(samples, r): a[i-samples] = np.sum( asset[i-samples:i] * w ) b[i-samples] = width * np.sum(np.std( asset[i-samples:i] * w )) return a,a+b,a-b
90c06bb45f30713a05cde865e23c0f9e317b0887
3,653,780
def metrics(): """ Expose metrics for the Prometheus collector """ collector = SensorsDataCollector(sensors_data=list(sensors.values()), prefix='airrohr_') return Response(generate_latest(registry=collector), mimetype='text/plain')
93a3de3fbddaeeeaafd182824559003701b718bc
3,653,781
def solar_energy_striking_earth_today() -> dict: """Get number of solar energy striking earth today.""" return get_metric_of(label='solar_energy_striking_earth_today')
a53c6e45f568d5b4245bbc993547b28f5414ca47
3,653,782
def write_data_str(geoms, grads, hessians): """ Writes a string containing the geometry, gradient, and Hessian for either a single species or points along a reaction path that is formatted appropriately for the ProjRot input file. :param geoms: geometries :type geoms: list :param grads: gradients :type grads: list :param hessians: Hessians :type hessians: list :rtype: str """ # if not isinstance(geoms, list): # geoms = [geoms] # if not isinstance(grads, list): # grads = [grads] # if not isinstance(hessians, list): # hessians = [hessians] nsteps = len(geoms) data_str = '' for i, (geo, grad, hess) in enumerate(zip(geoms, grads, hessians)): data_str += 'Step {0}\n'.format(str(i+1)) data_str += 'geometry\n' data_str += _format_geom_str(geo) data_str += 'gradient\n' data_str += _format_grad_str(geo, grad) data_str += 'Hessian\n' data_str += _format_hessian_str(hess) if i != nsteps-1: data_str += '\n' return remove_trail_whitespace(data_str)
34c1148f820396bf4619ace2d13fb517e4f6f16d
3,653,783
import types from typing import Dict from typing import Any from typing import List def gen_chart_name(data: types.ChartAxis, formatter: Dict[str, Any], device: device_info.DrawerBackendInfo ) -> List[drawings.TextData]: """Generate the name of chart. Stylesheets: - The `axis_label` style is applied. Args: data: Chart axis data to draw. formatter: Dictionary of stylesheet settings. device: Backend configuration. Returns: List of `TextData` drawings. """ style = {'zorder': formatter['layer.axis_label'], 'color': formatter['color.axis_label'], 'size': formatter['text_size.axis_label'], 'va': 'center', 'ha': 'right'} text = drawings.TextData(data_type=types.LabelType.CH_NAME, channels=data.channels, xvals=[types.AbstractCoordinate.LEFT], yvals=[0], text=data.name, ignore_scaling=True, styles=style) return [text]
032abcb5e6fca1920965fdd20203614dd750c9c0
3,653,784
import re def _parse_whois_response(response): """ Dealing with the many many different interpretations of the whois response format. If an empty line is encountered, start a new record If a line with a semicolon is encountered, treat everything before first : as key and start a value If a line without semicolon is encountered when value is started, add it to current value. If a line without semicolon is encountered before value is started, skip it. :param response: the raw response to parse :return:a list of records containg (key, value) tuples """ newkvre = re.compile("^(\s*)([^\>\%\s][^:]+):(\s*(.*))?$") commre = re.compile("^\s*[\%\>\@\;].*$") records = [] currecord, curkey = {}, None comment = False for line in response.splitlines(): if line.strip() is "": comment = False if len(currecord): records.append(currecord) currecord, curkey = {}, None continue if comment: continue match = newkvre.match(line) matchcomm = commre.match(line) if match and matchcomm is None: curkey = match.group(2) val = match.group(4) if match.group(4) else "" if curkey in currecord: currecord[curkey] += "\n" + val else: currecord[curkey] = val elif matchcomm: # part of comments comment = True continue elif match is None and curkey: # this is likely part of multiline value currecord[curkey] += "\n" + line.strip() else: comment = True continue # this is likely start of comments if len(currecord): records.append(currecord) _log.debug("Response parsed succesfully. %d records", len(records)) return records
32978d5965c794b6f388d1e490bfaddd5c58d41f
3,653,785
from typing import Sequence def vector_cosine_similarity(docs: Sequence[spacy.tokens.Doc]) -> np.ndarray: """ Get the pairwise cosine similarity between each document in docs. """ vectors = np.vstack([doc.vector for doc in docs]) return pairwise.cosine_similarity(vectors)
14456abcbb038dd2a4c617690d7f68dfc7a7bcb8
3,653,786
def create_test_validation(): """ Returns a constructor function for creating a Validation object. """ def _create_test_validation(db_session, resource, success=None, started_at=None, secret=None): create_kwargs = {"resource": resource} for kwarg in ['success', 'started_at', 'secret']: if locals()[kwarg] is not None: create_kwargs[kwarg] = locals()[kwarg] (validation, _) = get_one_or_create(db_session, Validation, **create_kwargs) return validation return _create_test_validation
7d78ae1c999cb79151e7527fd5bad448946aaccc
3,653,787
def nrmse(img, ref, axes = (0,1)): """ Compute the normalized root mean squared error (nrmse) :param img: input image (np.array) :param ref: reference image (np.array) :param axes: tuple of axes over which the nrmse is computed :return: (mean) nrmse """ nominator = np.real(np.sum( (img - ref) * np.conj(img - ref), axis = axes)) denominator = np.real(np.sum( ref * np.conj(ref), axis = axes)) nrmse = np.sqrt(nominator / denominator) return np.mean(nrmse)
ab040a2dd88acb2ce1e7df3b37215c5a40092f8a
3,653,788
def pairwise_comparison(column1,var1,column2,var2): """ Arg: column1 --> column name 1 in df column2 --> column name 2 in df var1---> 3 cases: abbreviation in column 1 (seeking better model) abbreviation in column 1 (seeking lesser value in column1 in comparison to column2) empty strong (seeking greater value in column2 in comparison to column1) var2---> 3 cases: abbreviation in column 2 (seeking better model) abbreviation in column 2 (seeking greater value in column2 in comparison to column1) empty strong (seeking lesser value in column1 in comparison to column2) Return: 2 cases: abbreviation of column name in which is smaller/greater depending on function use Function: list comprehension , put two column together (zip) used to find data set with a smaller/greater value """ return [var1 if r < c else var2 for r,c in zip(column1,column2)]
a67ef991dcad4816e9b15c1f352079ce14d7d823
3,653,789
def prep_data_CNN(documents): """ Prepare the padded docs and vocab_size for CNN training """ t = Tokenizer() docs = list(filter(None, documents)) print("Size of the documents in prep_data {}".format(len(documents))) t.fit_on_texts(docs) vocab_size = len(t.word_counts) print("Vocab size {}".format(vocab_size)) encoded_docs = t.texts_to_sequences(docs) print("Size of the encoded documents {}".format(len(encoded_docs))) e_lens = [] for i in range(len(encoded_docs)): e_lens.append(len(encoded_docs[i])) lens_edocs = list(map(size, encoded_docs)) max_length = np.average(lens_edocs) sequence_length = 1500 # Can use this instead of the above average max_length value max_length = sequence_length padded_docs = pad_sequences( encoded_docs, maxlen=int(max_length), padding='post') print("Length of a padded row {}".format(padded_docs.shape)) print("max_length {} and min_length {} and average {}".format( max_length, min(lens_edocs), np.average(lens_edocs))) return padded_docs, max_length, vocab_size, t.word_index
a568942bdedbea99d6abf2bd5b8fc8c7912e4271
3,653,790
def gc2gd_lat(gc_lat): """Convert geocentric latitude to geodetic latitude using WGS84. Parameters ----------- gc_lat : (array_like or float) Geocentric latitude in degrees N Returns --------- gd_lat : (same as input) Geodetic latitude in degrees N """ wgs84_e2 = 0.006694379990141317 - 1.0 gd_lat = np.rad2deg(-np.arctan(np.tan(np.deg2rad(gc_lat)) / wgs84_e2)) return gd_lat
e019a5a122266eb98dba830283091bcbf42f873f
3,653,791
import os import linecache import traceback def _coroutine_format_stack(coro, complete=False): """Formats a traceback from a stack of coroutines/generators. """ dirname = os.path.dirname(__file__) extracted_list = [] checked = set() for f in _get_coroutine_stack(coro): lineno = f.f_lineno co = f.f_code filename = co.co_filename name = co.co_name if not complete and os.path.dirname(filename) == dirname: continue if filename not in checked: checked.add(filename) linecache.checkcache(filename) line = linecache.getline(filename, lineno, f.f_globals) extracted_list.append((filename, lineno, name, line)) if not extracted_list: resp = 'No stack for %r' % coro else: resp = 'Stack for %r (most recent call last):\n' % coro resp += ''.join(traceback.format_list(extracted_list)) return resp
d9828a311150368a958691e286435a6c76b18078
3,653,792
def polynomial_kernel(X, Y, c, p): """ Compute the polynomial kernel between two matrices X and Y:: K(x, y) = (<x, y> + c)^p for each pair of rows x in X and y in Y. Args: X - (n, d) NumPy array (n datapoints each with d features) Y - (m, d) NumPy array (m datapoints each with d features) c - a coefficient to trade off high-order and low-order terms (scalar) p - the degree of the polynomial kernel Returns: kernel_matrix - (n, m) Numpy array containing the kernel matrix """ # YOUR CODE HERE # raise NotImplementedError kernel_matrix = (np.matmul(X, Y.T) + c) ** p return kernel_matrix
5532692b0a8411560f56033bcf6ad27b3c8e41a1
3,653,793
def slug_from_iter(it, max_len=128, delim='-'): """Produce a slug (short URI-friendly string) from an iterable (list, tuple, dict) >>> slug_from_iter(['.a.', '=b=', '--alpha--']) 'a-b-alpha' """ nonnull_values = [str(v) for v in it if v or ((isinstance(v, (int, float, Decimal)) and str(v)))] return slugify(delim.join(shorten(v, max_len=int(float(max_len) / len(nonnull_values))) for v in nonnull_values), word_boundary=True)
0da42aa5c56d3012e5caf4a5ead37632d5d21ab0
3,653,794
def modulusOfRigidity(find="G", printEqs=True, **kwargs): """ Defines the slope of the stress-strain curve up to the elastic limit of the material. For most ductile materials it is the same in compression as in tensions. Not true for cast irons, other brittle materials, or magnesium. Where: E = modulus of elasticity v = poisson's ratio Material v Aluminum 0.34 Copper 0.35 Iron 0.28 Steel 0.28 Magnesium 0.33 Titanium 0.34 """ eq = list() eq.append("Eq(G, E / (2*(1+v))") return solveEqs(eq, find=find, printEq=printEqs, **kwargs)
cb849755799d85b9d4d0671f6656de748ab38f7c
3,653,795
async def async_unload_entry(hass: HomeAssistantType, entry: ConfigType) -> bool: """Unload FRITZ!Box Tools config entry.""" hass.services.async_remove(DOMAIN, SERVICE_RECONNECT) for domain in SUPPORTED_DOMAINS: await hass.config_entries.async_forward_entry_unload(entry, domain) del hass.data[DOMAIN] return True
e934ec21be451cc1084bd293dbce5495f6b4915c
3,653,796
def queryMaxTransferOutAmount(asset, isolatedSymbol="", recvWindow=""): """# Query Max Transfer-Out Amount (USER_DATA) #### `GET /sapi/v1/margin/maxTransferable (HMAC SHA256)` ### Weight: 5 ### Parameters: Name |Type |Mandatory |Description --------|--------|--------|-------- asset |STRING |YES | isolatedSymbol |STRING |NO |isolated symbol recvWindow |LONG |NO |The value cannot be greater than <code>60000</code> timestamp |LONG |YES | """ endpoint = '/sapi/v1/margin/maxTransferable' params = { "asset": asset } if isolatedSymbol: params["isolatedSymbol"] = isolatedSymbol if recvWindow: params["recvWindow"] = recvWindow return getbinancedata_sig(endpoint, params)
f9e178d18eea969e5aabc0efa3aee938ad730752
3,653,797
def IteratePriorityQueueEntry(root, element_type, field_name): """ iterate over a priority queue as defined with struct priority_queue from osfmk/kern/priority_queue.h root - value : Value object for the priority queue element_type - str : Type of the link element field_name - str : Name of the field in link element's structure returns: A generator does not return. It is used for iterating value : an object thats of type (element_type). Always a pointer object """ def _make_pqe(addr): return value(root.GetSBValue().CreateValueFromExpression(None,'(struct priority_queue_entry *)'+str(addr))) queue = [unsigned(root.pq_root_packed) & ~3] while len(queue): elt = _make_pqe(queue.pop()) while elt: yield containerof(elt, element_type, field_name) addr = unsigned(elt.child) if addr: queue.append(addr) elt = elt.next
db0da178b5fef292267f0c2a2d2120833970e4bb
3,653,798
def remove_layer(nn, del_idx, additional_edges, new_strides=None): """ Deletes the layer indicated in del_idx and adds additional_edges specified in additional_edges. """ layer_labels, num_units_in_each_layer, conn_mat, mandatory_child_attributes = \ get_copies_from_old_nn(nn) # First add new edges to conn_mat and remove edges to and from del_idx for add_edge in additional_edges: conn_mat[add_edge[0], add_edge[1]] = 1 conn_mat[del_idx, :] = 0 conn_mat[:, del_idx] = 0 # Now reorder everything so that del_idx is at the end all_idxs = list(range(len(layer_labels))) new_order = all_idxs[:del_idx] + all_idxs[del_idx+1:] + [del_idx] # Now reorder everything so that the layer to be remove is at the end layer_labels = reorder_list_or_array(layer_labels, new_order) num_units_in_each_layer = reorder_list_or_array(num_units_in_each_layer, new_order) conn_mat = reorder_rows_and_cols_in_matrix(conn_mat, new_order) # remove layer layer_labels = layer_labels[:-1] num_units_in_each_layer = num_units_in_each_layer[:-1] conn_mat = conn_mat[:-1, :-1] # Strides for a convolutional network if nn.nn_class == 'cnn': new_strides = new_strides if new_strides is not None else \ mandatory_child_attributes.strides mandatory_child_attributes.strides = reorder_list_or_array( new_strides, new_order) mandatory_child_attributes.strides = mandatory_child_attributes.strides[:-1] return get_new_nn(nn, layer_labels, num_units_in_each_layer, conn_mat, mandatory_child_attributes)
33d4a2e6ba05000f160b0d0cc603c568f68790d7
3,653,799