content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def get_input_fn_common(pattern, batch_size, mode, hparams: SmartComposeArg): """ Returns the common input function used in Smart Compose training and evaluation""" return _get_input_fn_common(pattern, batch_size, mode, **_get_func_param_from_hparams(_get_input_fn_common, hparams, ('pattern', 'batch_size', 'mode')))
414a2281807f5ccba5534f4000a4837409dc0f1f
3,651,500
def text_to_int(sentence, map_dict, max_length=20, is_target=False): """ 对文本句子进行数字编码 @param sentence: 一个完整的句子,str类型 @param map_dict: 单词到数字的映射,dict @param max_length: 句子的最大长度 @param is_target: 是否为目标语句。在这里要区分目标句子与源句子,因为对于目标句子(即翻译后的句子)我们需要在句子最后增加<EOS> """ # 用<PAD>填充整个序列 text_to_idx = [] # unk index unk_idx = map_dict.get("<UNK>") pad_idx = map_dict.get("<PAD>") eos_idx = map_dict.get("<EOS>") # 如果是输入源文本 if not is_target: for word in sentence.lower().split(): text_to_idx.append(map_dict.get(word, unk_idx)) # 否则,对于输出目标文本需要做<EOS>的填充最后 else: for word in sentence.lower().split(): text_to_idx.append(map_dict.get(word, unk_idx)) text_to_idx.append(eos_idx) # 如果超长需要截断 if len(text_to_idx) > max_length: return text_to_idx[:max_length] # 如果不够则增加<PAD> else: text_to_idx = text_to_idx + [pad_idx] * (max_length - len(text_to_idx)) return text_to_idx
9ac1928ff0a71e653c999a173ee4ea9127b29913
3,651,501
from datetime import datetime def map_to_udm_users(users_df: DataFrame) -> DataFrame: """ Maps a DataFrame containing Canvas users into the Ed-Fi LMS Unified Data Model (UDM) format. Parameters ---------- users_df: DataFrame Pandas DataFrame containing all Canvas users Returns ------- DataFrame A LMSUsers-formatted DataFrame DataFrame columns are: EmailAddress: The primary e-mail address for the user LocalUserIdentifier: The user identifier assigned by a school or district Name: The full name of the user SISUserIdentifier: The user identifier defined in the Student Information System (SIS) SourceSystem: The system code or name providing the user data SourceSystemIdentifier: A unique number or alphanumeric code assigned to a user by the source system CreateDate: datetime at which the record was first retrieved LastModifiedDate: datetime when the record was modified, or when first retrieved SourceCreateDate: Date this record was created in the LMS SourceLastModifiedDate: Date this record was last updated in the LMS """ if users_df.empty: return users_df df: DataFrame = users_df[ [ "id", "sis_user_id", "created_at", "name", "email", "login_id", "CreateDate", "LastModifiedDate", ] ].copy() df["SourceSystem"] = constants.SOURCE_SYSTEM df.rename( columns={ "id": "SourceSystemIdentifier", "sis_user_id": "SISUserIdentifier", "login_id": "LocalUserIdentifier", "email": "EmailAddress", "name": "Name", "created_at": "SourceCreateDate", }, inplace=True, ) df["SourceCreateDate"] = df["SourceCreateDate"].apply( lambda x: datetime.strftime( datetime.strptime(x, "%Y-%m-%dT%H:%M:%S%z"), "%Y/%m/%d %H:%M:%S" ) ) df["UserRole"] = constants.ROLES.STUDENT df["SourceLastModifiedDate"] = "" return df
96cd04c425d3a4747a29d0297a8d97451fc18a6e
3,651,502
def custom_shibboleth_institution_login( selenium, config, user_handle, user_pwd, user_name ): """Custom Login on Shibboleth institution page.""" wait = WebDriverWait(selenium, config.MAX_WAIT_TIME) input_user_id = wait.until( EC.element_to_be_clickable((By.XPATH, "//input[@id='userid']")) ) input_user_id.send_keys(user_handle) input_user_pwd = wait.until( EC.element_to_be_clickable((By.XPATH, "//input[@id='password']")) ) input_user_pwd.send_keys(user_pwd) btn_login = wait.until( EC.element_to_be_clickable((By.XPATH, "//button[@name='_eventId_proceed']")) ) btn_login.click() sleep(3) if selenium.title == config.SHIBBOLETH_LOGIN_PAGE_TITLE: btn_tou = wait.until( EC.element_to_be_clickable( (By.XPATH, "//button[@id='_shib_idp_accept_TOU']") ) ) btn_tou.click() btn_next = wait.until( EC.element_to_be_clickable((By.XPATH, "//button[@id='_eventId_proceed']")) ) btn_next.click() navbar_user = wait.until( EC.element_to_be_clickable((By.XPATH, "//span[@id='userDisplayInfoTitle']")) ) assert navbar_user.text == user_name return selenium
c830180b6fad4d454a0ffae76d42015adca5b909
3,651,503
from numpy import array def beamcenter_mask(): """Returns beamcenter mask as an array. Given the PSF and the dimensions of the beamstop, the minimum intensity around beamcenter occurs at a radius of 3 pixels, hence a 7x7 mask.""" return array([[0,0,0,0,0,0,0], [0,0,0,0,0,0,0], [0,0,1,1,1,0,0], [0,0,1,1,1,0,0], [0,0,1,1,1,0,0], [0,0,0,0,0,0,0], [0,0,0,0,0,0,0]])
6efb592aa88c3da57010ab4a70144d645ae916ea
3,651,504
def physical_conversion_actionAngle(quantity,pop=False): """Decorator to convert to physical coordinates for the actionAngle methods: quantity= call, actionsFreqs, or actionsFreqsAngles (or EccZmaxRperiRap for actionAngleStaeckel)""" def wrapper(method): @wraps(method) def wrapped(*args,**kwargs): use_physical= kwargs.get('use_physical',True) ro= kwargs.get('ro',None) if ro is None and hasattr(args[0],'_roSet') and args[0]._roSet: ro= args[0]._ro if _APY_LOADED and isinstance(ro,units.Quantity): ro= ro.to(units.kpc).value vo= kwargs.get('vo',None) if vo is None and hasattr(args[0],'_voSet') and args[0]._voSet: vo= args[0]._vo if _APY_LOADED and isinstance(vo,units.Quantity): vo= vo.to(units.km/units.s).value #Remove ro and vo kwargs if necessary if pop and 'use_physical' in kwargs: kwargs.pop('use_physical') if pop and 'ro' in kwargs: kwargs.pop('ro') if pop and 'vo' in kwargs: kwargs.pop('vo') if use_physical and not vo is None and not ro is None: out= method(*args,**kwargs) if 'call' in quantity or 'actions' in quantity: if 'actions' in quantity and len(out) < 4: # 1D system fac= [ro*vo] if _APY_UNITS: u= [units.kpc*units.km/units.s] else: fac= [ro*vo,ro*vo,ro*vo] if _APY_UNITS: u= [units.kpc*units.km/units.s, units.kpc*units.km/units.s, units.kpc*units.km/units.s] if 'Freqs' in quantity: FreqsFac= freq_in_Gyr(vo,ro) if len(out) < 4: # 1D system fac.append(FreqsFac) if _APY_UNITS: Freqsu= units.Gyr**-1. u.append(Freqsu) else: fac.extend([FreqsFac,FreqsFac,FreqsFac]) if _APY_UNITS: Freqsu= units.Gyr**-1. u.extend([Freqsu,Freqsu,Freqsu]) if 'Angles' in quantity: if len(out) < 4: # 1D system fac.append(1.) if _APY_UNITS: Freqsu= units.Gyr**-1. u.append(units.rad) else: fac.extend([1.,1.,1.]) if _APY_UNITS: Freqsu= units.Gyr**-1. u.extend([units.rad,units.rad,units.rad]) if 'EccZmaxRperiRap' in quantity: fac= [1.,ro,ro,ro] if _APY_UNITS: u= [1., units.kpc, units.kpc, units.kpc] if _APY_UNITS: newOut= () try: for ii in range(len(out)): newOut= newOut+(units.Quantity(out[ii]*fac[ii], unit=u[ii]),) except TypeError: # happens if out = scalar newOut= units.Quantity(out*fac[0],unit=u[0]) else: newOut= () try: for ii in range(len(out)): newOut= newOut+(out[ii]*fac[ii],) except TypeError: # happens if out = scalar newOut= out*fac[0] return newOut else: return method(*args,**kwargs) return wrapped return wrapper
36501fc563a1de71320b205ef1795ea369cc578a
3,651,505
import functools import click def pass_api_client(function): """Create API client form API key and pass it to subcommand. :param function: Subcommand that returns a result from the API. :type function: callable :returns: Wrapped function that prints subcommand results :rtype: callable """ @functools.wraps(function) def wrapper(*args, **kwargs): context = click.get_current_context() api_key = context.params.get("api_key") offering = context.params.get("offering") config = load_config() if api_key is None: if not config["api_key"]: prog_name = context.parent.info_name click.echo( "\nError: API key not found.\n\n" "To fix this problem, please use any of the following methods " "(in order of precedence):\n" "- Pass it using the -k/--api-key option.\n" "- Set it in the GREYNOISE_API_KEY environment variable.\n" "- Run {!r} to save it to the configuration file.\n".format( "{} setup".format(prog_name) ) ) context.exit(-1) api_key = config["api_key"] if offering is None: if not config["offering"]: offering = "enterprise" else: offering = config["offering"] api_client = GreyNoise( api_key=api_key, offering=offering, timeout=config["timeout"], integration_name="cli", ) return function(api_client, *args, **kwargs) return wrapper
af806b8420cfb50b00ed313c5ae35ac847059af4
3,651,506
import torch def vecs_Xg_ig(x): """ Vi = vec(dg/dxi * inv(g)), where g = exp(x) (== [Ad(exp(x))] * vecs_ig_Xg(x)) """ t = x.view(-1, 3).norm(p=2, dim=1).view(-1, 1, 1) X = mat(x) S = X.bmm(X) #B = x.view(-1,3,1).bmm(x.view(-1,1,3)) # B = x*x' I = torch.eye(3).to(X) #V = sinc1(t)*eye(3) + sinc2(t)*X + sinc3(t)*B #V = eye(3) + sinc2(t)*X + sinc3(t)*S V = I + sinc2(t)*X + sinc3(t)*S return V.view(*(x.size()[0:-1]), 3, 3)
dcd7276fbb1aa59128f7c321b36e561e3f90f3f2
3,651,507
def wide_factorial(x): """factorial returns x! = x * x-1 * x-2 * ..., Args: x: bytes to evaluate as an integer Returns: bytes representing the integer that is the result of the factorial applied on the argument passed """ return If( BitLen(x) == Int(1), x, BytesMul(x, wide_factorial(BytesMinus(x, Itob(Int(1))))) )
c6a7b01ec5f140c6bcfad45ae78879c210dd1f33
3,651,508
import pathlib def spring_outpath(filepath: pathlib.Path) -> pathlib.Path: """Build a spring path based on a fastq file path""" LOG.info("Create spring path from %s", filepath) file_name = filepath.name file_parent = filepath.parent splitted = file_name.split("_") spring_base = pathlib.Path("_".join(splitted[:-2])) spring_path = pathlib.Path(file_parent).joinpath(spring_base).with_suffix(".spring") LOG.info("Creates spring path %s", spring_path) return spring_path
dfe9d7d0fb592c8bdbf8f2074e9316e8e1e7fc31
3,651,509
def expanded_bb( final_points): """computation of coordinates and distance""" left, right = final_points left_x, left_y = left right_x, right_y = right base_center_x = (left_x+right_x)/2 base_center_y = (left_y+right_y)/2 dist_base = abs(complex(left_x, left_y)-complex(right_x, right_y ) ) return (int(base_center_x), int(base_center_y) ), dist_base
c033130b0d43ccf9cea3e075305cf464f958c62f
3,651,511
def gen_file_get_url(token, filename): """ Generate httpserver file url. Format: http://<domain:port>/files/<token>/<filename> """ return '%s/files/%s/%s' % (get_httpserver_root(), token, urlquote(filename))
5e8f3367d5872457edc5a8808c3aabb57a8a2748
3,651,512
def count_items(): """ Get number of items in the DB Per the AWS documentation: DynamoDB updates this value approximately every six hours. Recent changes might not be reflected in this value. https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/dynamodb.html#DynamoDB.Client.describe_table """ return dynamo_client.alerts_table.item_count
ac580e172ef2571a4a154af4460cdc1598832ab7
3,651,513
def extract_uris(data): """Convert a text/uri-list to a python list of (still escaped) URIs""" lines = data.split('\r\n') out = [] for l in lines: if l == chr(0): continue # (gmc adds a '\0' line) if l and l[0] != '#': out.append(l) return out
9f6ce28ecf94e07e03afca9852dd9952ed2a2488
3,651,514
def createConnection(ps, graph, e, q, maxIter): """ Try to build a path along a transition from a given configuration """ for i in range(maxIter): q_rand = shootConfig(ps.robot, q, i) res, q1, err = graph.generateTargetConfig(e, q, q_rand) if not res: continue res, p, msg = ps.directPath(q, q1, True) if not res: continue ps.addConfigToRoadmap(q1) ps.addEdgeToRoadmap(q, q1, p, True) print("Success (i={0})".format(i)) return p, q1 print("Failed (maxIter={0})".format(maxIter)) return None, None
62d9c3a3bb5e90cfba5df86d9dbbab5cd3f7a8ea
3,651,515
import re def extract_info(filepath,pdbid,info_id_list): """Returns a dictionary where the key is pocket ID (starting at zero) and the value is a dictionary of information points.""" pockets_info = {} pocket_file = open(filepath+pdbid+'_out/'+pdbid+'_info.txt') pocket_lines = pocket_file.readlines() pocket_file.close() # create inner dictionaries counter = 0 for line in pocket_lines: if line[:6] == 'Pocket': pockets_info[counter] = {} counter += 1 # populate inner dictionaries for info_id in info_id_list: counter = 0 for line in pocket_lines: if line.lstrip()[:len(info_id)] == info_id: split = re.split(r'\s+',line.rstrip()) pockets_info[counter][info_id] = float(split[-1]) counter += 1 return pockets_info
aca4074bc1c48add487268641a66c6e80aa7dafb
3,651,517
def eval_shape_fcn(w, x, N1, N2, yte): """ compute class and shape function :param w: :param x: :param N1: :param N2: :param yte: trailing edge y coordinate :return: """ C = x**N1 * (1-x)**N2 n = len(w) - 1 # degree of Bernstein polynomials S = np.zeros_like(x) for j in range(0, n+1): K = factorial(n)/(factorial(j)*(factorial(n-j))) S += w[j]*K*x**j * ((1-x)**(n-j)) return C * S + x * yte
c1047f6a586f51b4fd82423429b087ca28d87510
3,651,518
def _pickle_path(file_name): """Returns an absolute path to the specified pickle file.""" return project_root_path('pickles', file_name)
18aef638bf3b06eb33b638e7c2038cf07cbd0d7d
3,651,519
def streamentry(parser, token): """ streamentry <entry_var> """ bits = token.split_contents() bits.reverse() tag_name = bits.pop() try: entry_var = bits.pop() except IndexError: raise template.TemplateSyntaxError, "%r is missing entry argument" % tag_name if bits: raise template.TemplateSyntaxError, "%r has unexpected arguments" % tag_name return StreamItemNode(entry_var)
88e6abc56f817f0d4a0c814a672bf0173342347d
3,651,520
def mult_int_list_int(): """ >>> mult_int_list_int() [1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2] """ return 3 * [1, 2] * 2
cd34fa521ae3985f7770f96a1a8985e9473ee2b3
3,651,521
def _gcd_tf(a, b, dtype=tf.int64): """Calculates the greatest common denominator of 2 numbers. Assumes that a and b are tf.Tensor of shape () and performs the extended euclidean algorithm to find the gcd and the coefficients of Bézout's identity (https://en.wikipedia.org/wiki/B%C3%A9zout%27s_identity) Args: a: A scalar `tf.Tensor`. b: A scaler `tf.Tensor`. dtype: Data type to perform operations in. `a` and `b` are casted to this dtype. Returns: A tuple of `tf.Tensor`s `(g, x, y)` such that `a*x + b*y = g = gcd(a, b)`. """ a = tf.cast(a, dtype=dtype) b = tf.cast(b, dtype=dtype) x0, x1, y0, y1 = (tf.constant(0, dtype=dtype), tf.constant(1, dtype=dtype), tf.constant(1, dtype=dtype), tf.constant(0, dtype=dtype)) def cond(a, b, x0, x1, y0, y1): del b, x0, x1, y0, y1 return tf.math.not_equal(a, tf.constant(0, dtype=dtype)) def body(a, b, x0, x1, y0, y1): (q, a), b = (tf.cast(b / a, dtype=dtype), b % a), a y0, y1 = y1, y0 - q * y1 x0, x1 = x1, x0 - q * x1 return a, b, x0, x1, y0, y1 a, b, x0, x1, y0, y1 = tf.while_loop( cond, body, loop_vars=(a, b, x0, x1, y0, y1)) return b, x0, y0
e012ceb40fe778c23687a118ed139f1ba4ea4527
3,651,522
def compute_running_mean(x, kernel_size): """ Fast analogue of scipy.signal.convolve2d with gaussian filter. """ k = kernel_size // 2 padded_x = np.pad(x, (k, k), mode='symmetric') cumsum = np.cumsum(padded_x, axis=1) cumsum = np.cumsum(cumsum, axis=0) return _compute_running_mean_jit(x, kernel_size, cumsum)
8d687c246b584dc43ce80cdfeb585c0f503be37f
3,651,523
def _historicDataUrll(symbol, sDate=(1990,1,1),eDate=date.today().timetuple()[0:3]): """ generate url symbol: Yahoo finanance symbol sDate: start date (y,m,d) eDate: end date (y,m,d) """ urlStr = 'http://ichart.finance.yahoo.com/table.csv?s={0}&a={1}&b={2}&c={3}&d={4}&e={5}&f={6}'.\ format(symbol.upper(),sDate[1]-1,sDate[2],sDate[0],eDate[1]-1,eDate[2],eDate[0]) return urlStr
433c345ae9a55cd628f4232a4dd80507f675b30e
3,651,524
def to_dict(eds, properties=True, lnk=True): """ Encode the EDS as a dictionary suitable for JSON serialization. """ nodes = {} for node in eds.nodes: nd = { 'label': node.predicate, 'edges': node.edges } if lnk and node.lnk is not None: nd['lnk'] = {'from': node.cfrom, 'to': node.cto} if node.type is not None: nd['type'] = node.type if properties: props = node.properties if props: nd['properties'] = props if node.carg is not None: nd['carg'] = node.carg nodes[node.id] = nd return {'top': eds.top, 'nodes': nodes}
c1a777a0a81ad2e3b9197b3df5e0d35a5174d61f
3,651,525
def find_lineage(tax_id: int) -> Lineage: """Finds lineage for a single tax id""" if tax_id % 50000 == 0: _LOGGER.info("working on tax_id: %d", tax_id) lineage = [] while True: record = TAXONOMY_DICT[tax_id] lineage.append((record["tax_id"], record["rank"], record["rank_name"])) tax_id = record["parent_tax_id"] # every tax can be traced back to tax_id == 1, the root if tax_id == ROOT_TAX_ID: break # reverse results in lineage of Kingdom => species, this is helpful for # to_dict when there are multiple "no rank"s lineage.reverse() return Lineage(lineage)
75aeb2a0e222f44e72ba315134278ec9e73de706
3,651,526
from datetime import datetime import pytz import json import traceback def modify(request): """ [メソッド概要] グループのDB更新処理 """ logger.logic_log('LOSI00001', 'None', request=request) msg = '' error_msg = {} now = datetime.datetime.now(pytz.timezone('UTC')) try: with transaction.atomic(): json_str = json.loads(request.POST.get('json_str', '{}')) if 'json_str' not in json_str: msg = get_message('MOSJA23019', request.user.get_lang_mode(), showMsgId=False) logger.user_log('LOSM04000', 'json_str', request=request) raise Exception() # 更新前にレコードロック group_update_list = [ rq['group_id'] for rq in json_str['json_str'] if int(rq['ope']) in (defs.DABASE_OPECODE.OPE_UPDATE, defs.DABASE_OPECODE.OPE_DELETE) ] Group.objects.select_for_update().filter(pk__in=group_update_list) error_flag, error_msg = _validate(json_str['json_str'], request) if error_flag: raise Exception('validation error.') # 更新データ作成 group_id_list_reg = [] group_id_list_mod = [] sorted_data = sorted(json_str['json_str'], key=lambda x: x['group_id']) upd_data = list(filter(lambda x: int(x['ope']) == defs.DABASE_OPECODE.OPE_UPDATE, sorted_data)) del_data = list(filter(lambda x: int(x['ope']) == defs.DABASE_OPECODE.OPE_DELETE, sorted_data)) ins_data = list(filter(lambda x: int(x['ope']) == defs.DABASE_OPECODE.OPE_INSERT, sorted_data)) for rq in upd_data: group_id_list_mod = Group.objects.filter(group_id=rq['group_id']) if len(group_id_list_mod) <= 0: logger.logic_log('LOSI04000', rq['group_name'], request=request) continue # システム管理系はグループ名の更新不可 if int(rq['group_id']) not in defs.GROUP_DEFINE.PROTECTED_GROUP_IDS: group_id_list_mod[0].group_name = rq['group_name'] group_id_list_mod[0].summary = rq['summary'] group_id_list_mod[0].last_update_user = request.user.user_name group_id_list_mod[0].last_update_timestamp = now group_id_list_mod[0].save(force_update=True) group_id_list_del = [rq['group_id'] for rq in del_data if int(rq['group_id']) not in defs.GROUP_DEFINE.PROTECTED_GROUP_IDS] for rq in ins_data: group_info = Group( group_name=rq['group_name'], summary=rq['summary'], last_update_user=request.user.user_name, last_update_timestamp=now ) group_id_list_reg.append(group_info) # 追加 Group.objects.bulk_create(group_id_list_reg) # 権限を追加 _bulk_create_access_permission( request.user.user_name, [i.group_name for i in group_id_list_reg], now, ) # 削除対象グループを削除 Group.objects.filter(pk__in=group_id_list_del).delete() # 削除対象ユーザグループに該当するユーザIDを取得 before_user_list = list(UserGroup.objects.filter(group_id__in=group_id_list_del).values_list('user_id', flat=True).distinct()) # ユーザグループを削除 UserGroup.objects.filter(group_id__in=group_id_list_del).delete() # どのグループにも所属しないユーザを検索 after_user_list = list(UserGroup.objects.filter(user_id__in=before_user_list).values_list('user_id', flat=True).distinct()) delete_user_list = list(set(before_user_list) ^ set(after_user_list)) # ユーザ、パスワード履歴、アクセス権限を削除 User.objects.filter(pk__in=delete_user_list).delete() PasswordHistory.objects.filter(user_id__in=delete_user_list).delete() AccessPermission.objects.filter(group_id__in=group_id_list_del).delete() except Exception as e: logger.logic_log('LOSI00005', traceback.format_exc(), request=request) msg = get_message('MOSJA23021', request.user.get_lang_mode()) + '\\n' + str(e.args) response = {} response['status'] = 'failure' response['msg'] = msg response['error_msg'] = error_msg response_json = json.dumps(response) return HttpResponse(response_json, content_type="application/json") redirect_url = '/oase_web/system/group' response_json = '{"status": "success", "redirect_url": "%s"}' % redirect_url logger.logic_log('LOSI00002', 'None', request=request) return HttpResponse(response_json, content_type="application/json")
d596f0e239d2017f61a9747e2a5ed9731ff9308d
3,651,527
import inspect def _function_args_doc(functions): """ Create documentation of a list of functions. Return: usage dict (usage[funcname] = list of arguments, incl. default values), doc dict (doc[funcname] = docstring (or None)). Called by function_UI. """ usage = {} doc = {} for f in functions: args = inspect.getargspec(f) if args.defaults is None: # Only positional arguments usage[f.__name__] = args.args else: # Keyword arguments too, build complete list usage[f.__name__] = args.args[:-len(args.defaults)] + \ ['%s=%s' % (a, d) for a, d in \ zip(args.args[-len(args.defaults):], args.defaults)] doc[f.__name__] = inspect.getdoc(f) return usage, doc
848fb1c7629d8e4feb848293cd965da6edc2ff4a
3,651,528
def mock_modules_list(): """Standard module list without any issues""" return [ {"name": "foo", "module_type": "app", "supported_platforms": ["macos"]}, {"name": "bar", "module_type": "app"}, ]
c4f20e95e87950a414b0ac156e6a07ac79dcdf19
3,651,529
def cal_iou(box1, box1_area, boxes2, boxes2_area): """ box1 [x1,y1,x2,y2] boxes2 [Msample,x1,y1,x2,y2] """ x1 = np.maximum(box1[0], boxes2[:, 0]) x2 = np.minimum(box1[2], boxes2[:, 2]) y1 = np.maximum(box1[1], boxes2[:, 1]) y2 = np.minimum(box1[3], boxes2[:, 3]) intersection = np.maximum(x2 - x1, 0) * np.maximum(y2 - y1, 0) iou = intersection / (box1_area + boxes2_area[:] - intersection[:]) return iou
e27d942730cfe043034ec3f063934d94907314cf
3,651,530
def hbonds_single_c(snap, id1, id2, cut1, cut2, angle, names=False): """ Binding of C++ routines in :mod:`.hbonds_c` for couting of hydrogen bonds in a single snapshot. Args: snap (:class:`.Snap`): single snapshot containing the atomic information id1 (str): identifier for oxygen atoms (e.g. 'O\_') id2 (str): identifier for hydrogen atoms (e.g. 'H\_') cut1 (float): maximum distance between two oxygen atoms cut2 (float): maximum distance between an oxygen and a hydrogen atom angle (float): minimum O-H-O angle in degree names (list[str], optional): names of oxygen atoms used as search centers Returns: float: number of hydrogen bonds found for this snapshot """ atoms1 = snap.atoms[snap.atoms['id'] == id1]['pos'].values atoms1 = atoms1.reshape(len(atoms1) * 3) atoms2 = snap.atoms[snap.atoms['id'] == id2]['pos'].values atoms2 = atoms2.reshape(len(atoms2) * 3) cell = snap.cell.reshape(9) if names: center = snap.atoms.loc[snap.atoms['name'].isin(names)] center = center['pos'].values center = center.reshape(len(center) * 3) number = hbonds_c.hbonds(atoms1, atoms2, center, cut1, cut2, angle, cell) else: number = hbonds_c.hbonds(atoms1, atoms2, atoms1, cut1, cut2, angle, cell) return number
f4d7c73b631225505f8140e67da950979159e6c6
3,651,531
def _find_event_times(raw, event_id, mask): """Given the event_id and mask, find the event times. """ stim_ch = find_stim_channel(raw) sfreq = raw.info['sfreq'] events = find_events(raw, stim_ch, mask, event_id) times = [(event[0] - raw.first_samp) / sfreq for event in events] return times
1ade6a18567767db64ed57880b2b0837feade5d4
3,651,532
def get_parameters(): """Load parameter values from AWS Systems Manager (SSM) Parameter Store""" parameters = { "kafka_servers": ssm_client.get_parameter( Name="/kafka_spark_demo/kafka_servers")["Parameter"]["Value"], "kafka_demo_bucket": ssm_client.get_parameter( Name="/kafka_spark_demo/kafka_demo_bucket")["Parameter"]["Value"], "schema_registry_url": ssm_client.get_parameter( Name="/kafka_spark_demo/schema_registry_url_int")["Parameter"]["Value"], } return parameters
0dbd8c505c5bf404d612bc83fb119f1291f5cbad
3,651,533
async def get_accounts(context, names, observer=None): """Find and return lite accounts by `names`. Observer: will include `followed` context. """ assert isinstance(names, list), 'names must be a list' assert names, 'names cannot be blank' assert len(names) < 100, 'too many accounts requested' return await accounts_by_name(context['db'], names, observer, lite=True)
9e088f691cb92cf495b238d20902b276943b6044
3,651,534
def softmax_crossentropy_logits(p, q): """see sparse cross entropy""" return -(p * log_softmax(q)).sum(-1)
aa50eb4c7de8060a1ce9f9e7c879970db6d9b505
3,651,535
def SieveOfEratosthenes(limit=10**6): """Returns all primes not greater than limit.""" isPrime = [True]*(limit+1) isPrime[0] = isPrime[1] = False primes = [] for i in range(2, limit+1): if not isPrime[i]:continue primes += [i] for j in range(i*i, limit+1, i): isPrime[j] = False return primes
6d1e12d289c9bfcdfadf64f764deba077a09ffd1
3,651,536
def generate_chromosome(constraint = False, constraint_levers = [], constraint_values = [], threshold = False, threshold_names = [], thresholds = []): """ Initialises a chromosome and returns its corresponding lever values, and temperature and cost. **Args**: - constraint (*boolean*): Flag to select whether any inputs have been fixed. - constraint_levers (*list of strings*): Contains the name of levers to be fixed. - constraint_values (*list of floats*): Contains the values to fix the selected levers to. - threshold (*boolean*): Flag to select whether any inputs have to be bounded within a range. - threshold_names (*list of strings*): Contains the name of the levers to be bounded within a range. - thresholds (*list of lists of floats*): Contains the upper and lower bound for each specified lever. **Returns**: Lever values corresponding to generated chromosome and cost values corresponding to the current chromosome. """ lever_names = list(dfs_3.iloc[:, 0].to_numpy()) # Create list with all lever names # Generate random lever combination random_lever_values = new_lever_combination(threshold = threshold, threshold_names = threshold_names, thresholds = thresholds) # Fix specified input levers if constraint == True: lever_names, random_lever_values = overwrite_lever_values(lever_names, random_lever_values, constraint_levers, constraint_values) result = move_lever(lever_names, random_lever_values, costs = True, constraint = constraint, constraint_levers = constraint_levers, constraint_values = constraint_values) # Move lever accordingly and read temperature and cost valuesw return random_lever_values, result
02fe7b4f34064410f635b68f2764fb50451e7cf0
3,651,538
def make_link_request(data: dict, user_token: str): """ https://yandex.ru/dev/disk/api/reference/response-objects-docpage/#link - it will not raise in case of error HTTP code. - see `api/request.py` documentation for more. :param data: Data of link to handle. :param user_token: User OAuth token to access the API. :raises NotImplementedError: If link requires templating. """ if (data["templated"]): raise NotImplementedError("Templating not implemented") url = data["href"] method = data["method"].upper() timeout = current_app.config["YANDEX_DISK_API_TIMEOUT"] return request( raise_for_status=False, content_type="json", method=method, url=url, timeout=timeout, auth=HTTPOAuthAuth(user_token), allow_redirects=False, verify=True )
4c3c183b7c8bd713594ee42623f5db0a43e98ffd
3,651,539
import warnings def load_sample_bathymetry(**kwargs): """ (Deprecated) Load a table of ship observations of bathymetry off Baja California as a pandas.DataFrame. .. warning:: Deprecated since v0.6.0. This function has been replaced with ``load_sample_data(name="bathymetry")`` and will be removed in v0.9.0. This is the ``@tut_ship.xyz`` dataset used in the GMT tutorials. The data are downloaded to a cache directory (usually ``~/.gmt/cache``) the first time you invoke this function. Afterwards, it will load the data from the cache. So you'll need an internet connection the first time around. Returns ------- data : pandas.DataFrame The data table. Columns are longitude, latitude, and bathymetry. """ if "suppress_warning" not in kwargs: warnings.warn( "This function has been deprecated since v0.6.0 and will be " "removed in v0.9.0. Please use " "load_sample_data(name='bathymetry') instead.", category=FutureWarning, stacklevel=2, ) fname = which("@tut_ship.xyz", download="c") data = pd.read_csv( fname, sep="\t", header=None, names=["longitude", "latitude", "bathymetry"] ) return data
085e2795f9f59a4222bdca5a97e8d1818aa11d75
3,651,540
async def ticket_channel_embed( _: hikari.InteractionCreateEvent, bot: hikari.GatewayBot ) -> hikari.Embed: """Provides an embed for individual ticket channels.""" description = ( "Thanks for submitting a ticket! We take all tickets " "very seriously. Please provide a full explanation in this " "channel. You can include text, images, files, video, or " "documents. \n\nPlease do not ping the Mods or Staff unless " "there is a life or death situation. Someone will address it " "available." ) embed = hikari.Embed(title="", description=description, color=8454399) embed.set_thumbnail( "https://cdn.discordapp.com/attachments/733789542884048906/900079323279663175/85d744c5310511ecb705f23c91500735.png" ) embed.set_author(name=bot.get_me().username, icon=bot.get_me().avatar_url) return embed
1c45535c8a7b606ac80a8a2fefd7e78079ed25f6
3,651,542
from typing import List def count_branching_factor(strips_ops: List[STRIPSOperator], segments: List[Segment]) -> int: """Returns the total branching factor for all states in the segments.""" total_branching_factor = 0 for segment in segments: atoms = segment.init_atoms objects = set(segment.states[0]) ground_ops = { ground_op for op in strips_ops for ground_op in all_ground_operators(op, objects) } for _ in get_applicable_operators(ground_ops, atoms): total_branching_factor += 1 return total_branching_factor
155b7258f320a95ca56736331686470bc8c5a5f7
3,651,543
import torch def iou_overlaps(b1, b2): """ Arguments: b1: dts, [n, >=4] (x1, y1, x2, y2, ...) b1: gts, [n, >=4] (x1, y1, x2, y2, ...) Returns: intersection-over-union pair-wise, generalized iou. """ area1 = (b1[:, 2] - b1[:, 0] + 1) * (b1[:, 3] - b1[:, 1] + 1) area2 = (b2[:, 2] - b2[:, 0] + 1) * (b2[:, 3] - b2[:, 1] + 1) # only for giou loss lt1 = torch.max(b1[:, :2], b2[:, :2]) rb1 = torch.max(b1[:, 2:4], b2[:, 2:4]) lt2 = torch.min(b1[:, :2], b2[:, :2]) rb2 = torch.min(b1[:, 2:4], b2[:, 2:4]) wh1 = (rb2 - lt1 + 1).clamp(min=0) wh2 = (rb1 - lt2 + 1).clamp(min=0) inter_area = wh1[:, 0] * wh1[:, 1] union_area = area1 + area2 - inter_area iou = inter_area / torch.clamp(union_area, min=1) ac_union = wh2[:, 0] * wh2[:, 1] + 1e-7 giou = iou - (ac_union - union_area) / ac_union return iou, giou
ba9b445223fea5ea8332a189b297c8c40205a4e5
3,651,544
def aggregate(data): """Aggregate the data.""" return NotImplemented
2d7fd424d70858e6065dca34991308f0ed6c945c
3,651,545
def get_valid_columns(solution): """Get a list of column indices for which the column has more than one class. This is necessary when computing BAC or AUC which involves true positive and true negative in the denominator. When some class is missing, these scores don't make sense (or you have to add an epsilon to remedy the situation). Args: solution: array, a matrix of binary entries, of shape (num_examples, num_features) Returns: valid_columns: a list of indices for which the column has more than one class. """ num_examples = solution.shape[0] col_sum = np.sum(solution, axis=0) valid_columns = np.where(1 - np.isclose(col_sum, 0) - np.isclose(col_sum, num_examples))[0] return valid_columns
b5aeb01f3362dc8ab1ed22cd86ad7d6995e36a3e
3,651,546
def fourier_transform(data, proc_parameters): """Perform Fourier Transform down dim dimension given in proc_parameters .. Note:: Assumes dt = t[1] - t[0] Args: data (nddata): Data container proc_parameters (dict, procParam): Processing parameters Returns: nddata: Fourier Transformed data Example: .. code-block:: python proc_parameters['dim'] = 't' proc_parameters['zero_fill_factor'] = 2 proc_parameters['shift'] = True proc_parameters['convert_to_ppm'] = True all_data = dnplab.dnpNMR.fourier_transform(all_data, proc_parameters) """ required_parameters = defaults._fourier_transform # Add required parameters to proc_parameters print(required_parameters) for key in required_parameters: if key not in proc_parameters: proc_parameters[key] = required_parameters[key] # dim = proc_parameters["dim"] zero_fill_factor = proc_parameters["zero_fill_factor"] shift = proc_parameters["shift"] convert_to_ppm = proc_parameters["convert_to_ppm"] index = data.dims.index(dim) dt = data.coords[index][1] - data.coords[index][0] n_pts = zero_fill_factor * len(data.coords[index]) f = (1.0 / (n_pts * dt)) * np.r_[0:n_pts] if shift == True: f -= 1.0 / (2 * dt) data.values = np.fft.fft(data.values, n=n_pts, axis=index) if shift: data.values = np.fft.fftshift(data.values, axes=index) data.coords[index] = f return data
e3a9aafdb2661d112f1e02885477711c2c6d3d22
3,651,547
import copy def iupac_fasta_converter(header, sequence): """ Given a sequence (header and sequence itself) containing iupac characters, return a dictionary with all possible sequences converted to ATCG. """ iupac_dict = {"R": "AG", "Y": "CT", "S": "GC", "W": "AT", "K": "GT", "M": "AC", "B": "CGT", "D": "AGT", "H": "ACT", "V": "ACG", "N": "ACGT"} iupac_dict = {k: list(iupac_dict[k]) for k in list(iupac_dict.keys())} if sequence.upper().count("N") >= 10: return {header: sequence} sequence = list(sequence.upper()) result_list = [] def iupac_recurse(seq): for i in range(len(seq)): if seq[i] in list(iupac_dict.keys()): iup = iupac_dict[seq[i]] for i_seq in iup: new_seq = copy.deepcopy(seq) new_seq[i] = i_seq iupac_recurse(new_seq) break else: result_list.append("".join(seq)) iupac_recurse(sequence) if len(result_list) == 1: return {header: result_list[0]} else: return {header + "-" + str(i): result_list[i] for i in range(len(result_list))}
95a713e87564c4d8e807e1d476439568a562731b
3,651,548
def integer_list_to_named_tuple(list_of_integers): """ Converts a list of integers read from the ultrak498 into a named tuple based upon the type. The type is determiend by the first integer in the list. Since all tuples contain five fields, the list of integers must have a length of five. Returns a named tuple based on the type, """ # Dictionary mapping type id to record named tuples. valid_types = { 0: namedtuple("RaceHeader", "type year month day id"), 1: namedtuple("RaceHeader", "type year month day id"), 2: namedtuple("RaceHeader", "type year month day id"), 3: namedtuple("RaceHeader", "type year month day id"), 4: namedtuple("RaceHeader", "type year month day id"), 5: namedtuple("RaceHeader", "type year month day id"), 6: namedtuple("RaceHeader", "type year month day id"), 7: namedtuple("RaceHeader", "type year month day id"), 8: namedtuple("RaceHeader", "type year month day id"), 9: namedtuple("RaceHeader", "type year month day id"), 10: namedtuple("LapTime", "type minutes seconds hundreths lap"), 20: namedtuple("AbsTime", "type minutes seconds hundreths lap"), 30: namedtuple("Type30", "type a b c laps"), 40: namedtuple("Type40", "type a b c laps"), 50: namedtuple("RaceEnd", "type minutes seconds hundreths laps"), } # List of integers must be length of five. if len(list_of_integers) != 5: raise ValueError("Unable to convert list of integers to tuple; incorrect number of integers.") # First byte is the type; type must be known. tuple_type = list_of_integers[0] if tuple_type not in valid_types: raise ValueError("Unable to convert list of integers to tuple; unknown record type [%d]." % tuple_type) # Create a namedtuple based upon the tuple_type. named_tuple = valid_types[tuple_type]._make(list_of_integers) return named_tuple
50aed101577c263f213c3487dc56d9d0886c6530
3,651,549
def get_final_shape(data_array, out_dims, direction_to_names): """ Determine the final shape that data_array must be reshaped to in order to have one axis for each of the out_dims (for instance, combining all axes collected by the '*' direction). """ final_shape = [] for direction in out_dims: if len(direction_to_names[direction]) == 0: final_shape.append(1) else: # determine shape once dimensions for direction (usually '*') are combined final_shape.append( np.product([len(data_array.coords[name]) for name in direction_to_names[direction]])) return final_shape
f1407936f9e1e7bebe55461abe4999a4fdf9636d
3,651,551
import pytz def create_assignment_payload(subsection_block): """ Create a Canvas assignment dict matching a subsection block on edX Args: subsection_block (openedx.core.djangoapps.content.block_structure.block_structure.BlockData): The block data for the graded assignment/exam (in the structure of a course, this unit is a subsection) Returns: dict: Assignment payload to be sent to Canvas to create or update the assignment """ return { "assignment": { "name": subsection_block.display_name, "integration_id": str(subsection_block.location), "grading_type": "percent", "points_possible": DEFAULT_ASSIGNMENT_POINTS, "due_at": ( None if not subsection_block.fields.get("due") # The internal API gives us a TZ-naive datetime for the due date, but Studio indicates that # the user should enter a UTC datetime for the due date. Coerce this to UTC before creating the # string representation. else subsection_block.fields["due"].astimezone(pytz.UTC).isoformat() ), "submission_types": ["none"], "published": False, } }
5c8327d0731aaae16769429833d80b87bf39fb9d
3,651,552
def return_random_initial_muscle_lengths_and_activations(InitialTension,X_o,**kwargs): """ This function returns initial muscle lengths and muscle activations for a given pretensioning level, as derived from (***insert file_name here for scratchwork***) for the system that starts from rest. (Ex. pendulum_eqns.reference_trajectories._01). ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ **kwargs ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 1) Seed - Can see the random tension generated. When FixedInitialTension is provided, this seed will apply only to the initial conditions for activation and muscle length. 2) PlotBool - Must be either True or False. Default is False. Will plot all possible initial muscle lengths and activations for a given pretensioning level. 3) InitialTensionAcceleration - must be a numpy array of shape (2,). Default is set to the value generated from zero IC's. If using different reference trajectory, set InitialAngularAcceleration to d2r(0) (See below). 4) InitialAngularAcceleration - must be either a numpy.float64, float, or int. Default is set to 0 to simulate starting from rest. Choice of reference trajectory *should* not matter as it is either 0 or d2r(0) (either by convention or by choice). 5) InitialAngularSnap - must be either a numpy.float64, float, or int. Default is set to 0 to simulate starting from rest. Choice of reference trajectory *should* not matter as it is either 0 or d4r(0) (either by convention or by choice). ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ """ PlotBool = kwargs.get("PlotBool",False) assert type(PlotBool)==bool,"PlotBool must be a boolean. Default is False." InitialAngularAcceleration = kwargs.get( "InitialAngularAcceleration", 0 ) # 0 or d2r(0) assert str(type(InitialAngularAcceleration)) in ["<class 'float'>","<class 'int'>","<class 'numpy.float64'>"], "InitialAngularAcceleration must be either a float or an int." InitialAngularSnap = kwargs.get( "InitialAngularSnap", 0 ) # 0 or d4r(0) assert str(type(InitialAngularSnap)) in ["<class 'float'>","<class 'int'>","<class 'numpy.float64'>"], "InitialAngularSnap must be either a float or an int." InitialTensionAcceleration = kwargs.get( "InitialTensionAcceleration", return_initial_tension_acceleration( InitialTension, X_o, InitialAngularAcceleration=InitialAngularAcceleration, InitialAngularSnap=InitialAngularSnap ) ) assert np.shape(InitialTensionAcceleration)==(2,) \ and str(type(InitialTensionAcceleration))=="<class 'numpy.ndarray'>", \ "InitialTensionAcceleration must be a numpy array of shape (2,)" a_MTU1_o = np.sign(-r1(X_o[0]))*( InitialAngularAcceleration * np.sqrt(dr1_dθ(X_o[0])**2 + r1(X_o[0])**2) + X_o[1]**2 * dr1_dθ(X_o[0]) * (d2r1_dθ2(X_o[0]) + r1(X_o[0])) / np.sqrt(dr1_dθ(X_o[0])**2 + r1(X_o[0])**2) ) a_MTU2_o = np.sign(-r2(X_o[0]))*( InitialAngularAcceleration * np.sqrt(dr2_dθ(X_o[0])**2 + r2(X_o[0])**2) + X_o[1]**2 * dr2_dθ(X_o[0]) * (d2r2_dθ2(X_o[0]) + r2(X_o[0])) / np.sqrt(dr2_dθ(X_o[0])**2 + r2(X_o[0])**2) ) L1_UB = lo1*L_CE_max_1*( k_1*np.log( np.exp( (m1*InitialTensionAcceleration[0] + (F_MAX1*cT/lTo1) * (1-np.exp(-InitialTension[0]/(F_MAX1*cT*kT))) * (c3*InitialTension[0] - m1*a_MTU1_o ) ) / (F_MAX1*c3**2 *c_1*k_1 *(F_MAX1*cT/lTo1) *(1-np.exp(-InitialTension[0]/(F_MAX1*cT*kT))) ) ) - 1 ) + Lr1 ) L2_UB = lo2*L_CE_max_2*( k_1*np.log( np.exp( (m2*InitialTensionAcceleration[1] + (F_MAX2*cT/lTo2) * (1-np.exp(-InitialTension[1]/(F_MAX2*cT*kT))) * (c4*InitialTension[1] - m2*a_MTU2_o ) ) / (F_MAX2*c4**2 *c_1*k_1 *(F_MAX2*cT/lTo2) *(1-np.exp(-InitialTension[1]/(F_MAX2*cT*kT))) ) ) - 1 ) + Lr1 ) L1_LB = 0.5*lo1 if L1_UB > 1.5*lo1: L1_UB = 1.5*lo1 L1 = np.linspace(L1_LB, L2_UB, 1001) # mu1, sigma1 = lo1, 0.1*lo1 # L1 = np.array(list(sorted(np.random.normal(mu1, sigma1, 1001)))) U1 = (m1*InitialTensionAcceleration[0] + (F_MAX1*cT/lTo1) * (1-np.exp(-InitialTension[0]/(F_MAX1*cT*kT))) * (c3*InitialTension[0] - m1*a_MTU1_o - F_MAX1*c3**3 *c_1*k_1 *np.log(np.exp((L1/(lo1*L_CE_max_1) - Lr1)/k_1)+1) ) ) \ / ( F_MAX1*c3**2 *(F_MAX1*cT/lTo1) *(1-np.exp(-InitialTension[0]/(F_MAX1*cT*kT))) *np.exp(-(abs((L1-lo1)/(lo1*ω))**ρ)) ) # U1 = ( # InitialTension[0][0]/(F_MAX1*np.cos(α1)) # - c_1*k_1*np.log(np.exp((L1/(lo1*L_CE_max_1) - Lr1)/k_1)+1) # ) / (np.exp(-(abs((L1-lo1)/(lo1*ω))**ρ))) L2_LB = 0.5*lo2 if L2_UB > 1.5*lo2: L2_UB = 1.5*lo2 L2 = np.linspace(L2_LB, L2_UB, 1001) # mu2, sigma2 = lo2, 0.1*lo2 # L2 = np.array(list(sorted(np.random.normal(mu2, sigma2, 1001)))) U2 = (m2*InitialTensionAcceleration[1] + (F_MAX2*cT/lTo2) * (1-np.exp(-InitialTension[1]/(F_MAX2*cT*kT))) * (c4*InitialTension[1] - m2*a_MTU2_o - F_MAX2*c4**3 *c_1*k_1 *np.log(np.exp((L2/(lo2*L_CE_max_2) - Lr1)/k_1)+1) ) ) \ / ( F_MAX2*c4**2 *(F_MAX2*cT/lTo2) *(1-np.exp(-InitialTension[1]/(F_MAX2*cT*kT))) *np.exp(-(abs((L2-lo2)/(lo2*ω))**ρ)) ) # U2 = ( # InitialTension[1][0]/(F_MAX2*np.cos(α2)) # - c_1*k_1*np.log(np.exp((L2/(lo2*L_CE_max_2) - Lr1)/k_1)+1) # ) / (np.exp(-(abs((L2-lo2)/(lo2*ω))**ρ))) if PlotBool == True: plt.figure(figsize=(10,8)) plt.title(r"Viable Initial $l_{m,1}$ and $u_{1}$ Values") plt.xlabel(r"$l_{m,1}$ (m)",fontsize=14) plt.ylabel(r"$u_{1}$",fontsize=14) plt.scatter(L1,U1) plt.plot([lo1,lo1],[0,1],'0.70',linestyle='--') plt.gca().set_ylim((0,1)) plt.gca().set_xticks( [0.25*lo1, 0.5*lo1, 0.75*lo1, lo1, 1.25*lo1, 1.5*lo1, 1.75*lo1] ) plt.gca().set_xticklabels( ["", r"$\frac{1}{2}$ $l_{o,2}$", "", r"$l_{o,2}$", "", r"$\frac{3}{2}$ $l_{o,2}$", ""], fontsize=12) plt.figure(figsize=(10,8)) plt.title(r"Viable Initial $l_{m,2}$ and $u_{2}$ Values") plt.xlabel(r"$l_{m,2}$ (m)",fontsize=14) plt.ylabel(r"$u_{2}$",fontsize=14) plt.scatter(L2,U2) plt.plot([lo2,lo2],[0,1],'0.70',linestyle='--') plt.gca().set_ylim((0,1)) plt.gca().set_xticks( [0.25*lo2, 0.5*lo2, 0.75*lo2, lo2, 1.25*lo2, 1.5*lo2, 1.75*lo2] ) plt.gca().set_xticklabels( ["", r"$\frac{1}{2}$ $l_{o,2}$", "", r"$l_{o,2}$", "", r"$\frac{3}{2}$ $l_{o,2}$", ""], fontsize=12) plt.show() return(L1,U1,L2,U2)
afaa5905e3ae978217ac7f7e2b677af62bb33dd9
3,651,553
def add_top_features(df, vocab, n=10): """ INPUT: PySpark DataFrame, List, Int RETURN: PySpark DataFrame Take in DataFrame with TFIDF vectors, list of vocabulary words, and number of features to extract. Map top features from TFIDF vectors to vocabulary terms. Return new DataFrame with terms """ # Create udf function to extract top n features extract_features_udf = udf(lambda x: extract_top_features(x, vocab, n)) # Apply udf, create new df with features column df_features = df.withColumn("top_features", extract_features_udf(df["tfidf_vectors_sum"])) return df_features
741bcbb2fea0894f5218871e3f72360bf6f2caab
3,651,554
from typing import Union from typing import Tuple from typing import List def java_solvability(level: MarioLevel, time_per_episode=20, verbose=False, return_trajectories=False) -> Union[bool, Tuple[bool, List[Tuple[float, float]]]]: """Returns a boolean indicating if this level is solvable. Args: level (MarioLevel): The level time_per_episode (int, optional): How many seconds per episodes. Defaults to 500. verbose (bool, optional): Should this print many info. Defaults to False. return_trajectories (bool, optional). If this is true, then we are by default verbose and we return trajectories Returns: Union[bool, :Is this solvable Tuple[bool, List[Tuple[float, float]]] : solvable, trajectory if return_trajectories = True ] """ filename = write_level_to_file(level) verbose = verbose or return_trajectories args = ["Astar_Solvability", filename, str(time_per_episode), str(1), str(verbose).lower()] s = timer() string = run_java_task(args) e = timer() lines = string.split("\n") result_line = [l for l in lines if 'Result' in l] if len(result_line) == 0: raise Exception("Java didn't print out result properly: " + string + "args = " + ' '.join(args)) if return_trajectories: traj_line = [l for l in lines if 'Trajectories' in l] if len(traj_line) == 0: raise Exception("Java didn't print out trajectory properly: " + string + "args = " + ' '.join(args)) vals = [s.strip() for s in traj_line[0].split(":")[1].split(" ")] vals = [s for s in vals if s != ''] vals = [ tuple(map(float, s.split(','))) for s in vals] return 'WIN' in result_line[0], vals return 'WIN' in result_line[0]
8f4b282a8ae0b217ca12828cb20724e943de35b2
3,651,555
def get_trait_value(traitspec, value_name, default=None): """ Return the attribute `value_name` from traitspec if it is defined. If not will return the value of `default`. Parameters ---------- traitspec: TraitedSpec value_name: str Name of the `traitspect` attribute. default: any A default value in case the attribute does not exist or is not defined. Returns ------- trait_value: any """ val = getattr(traitspec, value_name, default) return default if not isdefined(val) else val
5bc4d23b326b59e0a542a5b3113f8906e9a88c49
3,651,556
import collections def check_if_blank(cell_image: Image) -> bool: """Check if image is blank Sample the color of the black and white content - if it is white enough assume no text and skip. Function takes a small more centered section to OCR to avoid edge lines. :param cell_image: Image to OCR :return: True or None """ w, h = cell_image.size crop = cell_image.crop((w * 0.1, h * 0.1, w * 0.8, h * 0.8)) data = crop.getdata() counts = collections.Counter(data) if ( len(counts) < 50 # this number needs to fluctuate - or i need to find a way to create this in code, # Current ideas is to grab a predictable slice of page that is white and sample it and use that number as a threshold ): # this may need to fluctuate to be accurate at dropping empty sections to remove gibberish return True return False
6cb3be0da1d15e1ba4fb2ccc7199709058792d5c
3,651,557
def get_tpr_from_threshold(scores,labels, threshold_list): """Calculate the recall score list from the threshold score list. Args: score_target: list of (score,label) threshold_list: list, the threshold list Returns: recall_list: list, the element is recall score calculated by the correspond threshold """ tpr_list = [] hack_scores = [] for score, label in zip(scores,labels): if label == 1: hack_scores.append(float(score)) hack_scores.sort(reverse=True) hack_nums = len(hack_scores) for threshold in threshold_list: hack_index = 0 while hack_index < hack_nums: if hack_scores[hack_index] <= threshold: break else: hack_index += 1 if hack_nums != 0: tpr = hack_index * 1.0 / hack_nums else: tpr = 0 tpr_list.append(tpr) return tpr_list
97796fb0f1ba9d41cf6e9c4bb21d1ca8f94499e3
3,651,558
def updating_node_validation_error(address=False, port=False, id=False, weight=False): """ Verified 2015-06-16: - when trying to update a CLB node's address/port/id, which are immutable. - when trying to update a CLB node's weight to be < 1 or > 100 At least one of address, port, id, and weight should be `True` for this error to apply. :param bool address: Whether the address was passed to update :param bool port: Whether the port was passed to update :param bool id: Whether the ID was passed to update :param bool weight: Whether the weight was passed to update and wrong :return: a `tuple` of (dict body message, 400 http status code) """ messages = [] if address: messages.append("Node ip field cannot be modified.") if port: messages.append("Port field cannot be modified.") if weight: messages.append("Node weight is invalid. Range is 1-100. " "Please specify a valid weight.") if id: messages.append("Node id field cannot be modified.") return( { "validationErrors": { "messages": messages }, "message": "Validation Failure", "code": 400, "details": "The object is not valid" }, 400 )
68c5fdda121950c679afe446bfd7fb19331deb40
3,651,559
def gaussianDerivative(x): """This function returns the gaussian derivative of x (Note: Not Real Derivative) """ return -2.0*x*(np.sqrt(-np.log(x)))
6b8312b399f627708007e80e5c72cedde4e944fc
3,651,560
def parse_numbers(numbers): """Return list of numbers.""" return [int(number) for number in numbers]
ee79d4e15cbfb269f7307710d9ad4735687f7128
3,651,561
import json def add_server(): """ Adds a server to database if not exists """ data = json.loads(request.data) ip_addr = IPModel.get_or_create(address=data["ip_addr"])[0] ServerModel.create(ip=ip_addr, port=data["port"]) return 'OK'
31ed6860fb311e00e9ee266a121cb44c256723a6
3,651,562
def get_continuum_extrapolation( # pylint: disable=C0103 df: pd.DataFrame, n_poly_max: int = 4, delta_x: float = 1.25e-13, include_statistics: bool = True, odd_poly: bool = False, ) -> pd.DataFrame: """Takes a data frame read in by read tables and runs a continuum extrapolation for the spectrum. The continuum extrapolation is executed by a even polynomial fit up to order `n_poly_max` **Arguments** df: pd.DataFrame DataFrame returend by `read_table`. n_poly_max: int = 4 Maximal order of the polynomial used for the spectrum extrapolation. The fitter runs fits from 1 to `n_poly_max` even polynomials and picks the best one defined by the maximum of the logGBF. delta_x:float=1.0e-8 Approximate error for the x-values. include_statistics: bool = True Includes fit statistics like chi2/dof or logGBF. odd_poly: bool = False Allow fits of odd polynomials. """ if lsqfit is None or gv is None: raise ImportError( "Cannort load `lsqfit` and `gvar`." " Thus fitting is not possible." ) group = df.groupby(["L", "nstep", "nlevel"])[["epsilon", "x"]] fit_df = group.apply( _group_wise_fit, n_poly_max=n_poly_max, delta_x=delta_x, include_statistics=include_statistics, odd_poly=odd_poly, ).reset_index() fit_df["epsilon"] = 0 if "level_3" in fit_df.columns: fit_df = fit_df.drop(columns=["level_3"]) return fit_df
7c4ce775b064142647259cf25c8f323c08fc99d0
3,651,565
def listvalues(d): """Return `d` value list""" return list(itervalues(d))
2c0bcbc112e10afac3d6d958c6a494bdd19dea6c
3,651,566
def _non_blank_line_count(string): """ Parameters ---------- string : str or unicode String (potentially multi-line) to search in. Returns ------- int Number of non-blank lines in string. """ non_blank_counter = 0 for line in string.splitlines(): if line.strip(): non_blank_counter += 1 return non_blank_counter
dfa6f43af95c898b1f4763573e8bf32ddf659520
3,651,567
def load(map_name, batch_size): """Load CaraEnvironment Args: map_name (str): name of the map. Currently available maps are: 'Town01, Town02', 'Town03', 'Town04', 'Town05', 'Town06', 'Town07', and 'Town10HD' batch_size (int): the number of vehicles in the simulation. """ return CarlaEnvironment(batch_size, map_name)
4433ad4fc4985a9ceaabd8e7ce3d8d3b0d419c80
3,651,568
def decrypt_password(encrypted_password: str) -> str: """ b64 decoding :param encrypted_password: encrypted password with b64 :return: password in plain text """ return b64decode(encrypted_password).decode("UTF-8")
e501a3da671f28f6f751ed289da961f30377d248
3,651,569
def walk(time_limit=_DEFAULT_TIME_LIMIT, random=None, environment_kwargs=None): """Returns the Walk task.""" # physics = Physics.from_xml_string(*get_model_and_assets()) physics = SuperballContactSimulation("tt_ntrt_on_ground.xml") task = PlanarSuperball(move_speed=_WALK_SPEED, random=random) environment_kwargs = environment_kwargs or {} return control.Environment( physics, task, time_limit=time_limit, control_timestep=_CONTROL_TIMESTEP, **environment_kwargs)
2b4de77661a7f0dd235c2f1d258e627ff110f3c3
3,651,571
def encode_direct(list_a: list): """Problem 13: Run-length encoding of a list (direct solution). Parameters ---------- list_a : list The input list Returns ------- list of list An length-encoded list Raises ------ TypeError If the given argument is not of `list` type """ if not isinstance(list_a, list): raise TypeError('The argument given is not of `list` type.') if len(list_a) <= 1: # In case of empty or one-element list return. return list_a encoded, current, count = [], list_a[0], 1 for element in list_a[1:]: if current != element: # If current element does not match the recorded current # append the count to the list encoded.append(current if count == 1 else [count, current]) current, count = element, 1 else: # If another same element is found, increase counter count += 1 encoded.append(current if count == 1 else [count, current]) return encoded
9a20ffd2051003d5350f7e059d98c35310bc9bbe
3,651,573
def handler500(request): """ HTTP Error 500 Internal Server Error """ return HttpResponse('<h1>HTTP Error 500 Internal server error</h1>', {})
92dc4cb815d34425e9c4f49ab878f6c57838d7b8
3,651,574
def increase_line_complexity(linestring, n_points): """ linestring (shapely.geometry.linestring.LineString): n_points (int): target number of points """ # or to get the distances closest to the desired one: # n = round(line.length / desired_distance_delta) distances = np.linspace(0, linestring.length, n_points) points = [linestring.interpolate(distance) for distance in distances] return shapely.geometry.linestring.LineString(points)
9747a6277a6333b6f1e92e479e0f286a01c8ae4e
3,651,575
def get_topic_prevelance(doc_topic_matrix, num_topics, total_num_docs): """Input: doc_topic_matrix, a numpy nd array where each row represents a doc, and each collumn is the assocication of the doc with a topic. Num_topics and integer holding the number of topics. Total_num_docs is an int holding the number of docs in the corpus. Output: a list where index i represents the prevelance of topic i within the corpus.""" topic_prev = [0] * num_topics for i in range(0, num_topics): topic_doc = doc_topic_matrix[:,i] for j in range(0, len(topic_doc)): if topic_doc[j] > TOPIC_PRESSENCE_THRESHOLD: topic_prev[i] +=1 topic_prev[i] = topic_prev[i]/total_num_docs return topic_prev
752214cba87b8d1766ceba139b029197c4f51df2
3,651,576
async def drones_byDroneId_delete(request, droneId): """ Remove a drone from the fleet It is handler for DELETE /drones/<droneId> """ return handlers.drones_byDroneId_deleteHandler(request, droneId)
28900c7df711fde5833b50683a738fe5567202ff
3,651,577
import six def generate_sql_integration_data(sql_test_backends): """Populate test data for SQL backends for integration testing.""" sql_schema_info = get_sqlalchemy_schema_info() vertex_values, edge_values, uuid_to_class_name = get_integration_data() # Represent all edges as foreign keys uuid_to_foreign_key_values = {} for edge_name, edge_values in six.iteritems(edge_values): for edge_value in edge_values: from_classname = uuid_to_class_name[edge_value["from_uuid"]] edge_field_name = "out_{}".format(edge_name) join_descriptor = sql_schema_info.join_descriptors[from_classname][edge_field_name] is_from_uuid = join_descriptor.from_column == "uuid" is_to_uuid = join_descriptor.to_column == "uuid" if is_from_uuid == is_to_uuid: raise NotImplementedError( "Exactly one of the join columns was expected to" "be uuid. found {}".format(join_descriptor) ) if is_from_uuid: existing_foreign_key_values = uuid_to_foreign_key_values.setdefault( edge_value["to_uuid"], {} ) if join_descriptor.to_column in existing_foreign_key_values: raise NotImplementedError( "The SQL backend does not support many-to-many " "edges. Found multiple edges of class {} from " "vertex {}.".format(edge_name, edge_value["to_uuid"]) ) existing_foreign_key_values[join_descriptor.to_column] = edge_value["from_uuid"] elif is_to_uuid: existing_foreign_key_values = uuid_to_foreign_key_values.setdefault( edge_value["from_uuid"], {} ) if join_descriptor.from_column in existing_foreign_key_values: raise NotImplementedError( "The SQL backend does not support many-to-many " "edges. Found multiple edges of class {} to " "vertex {}.".format(edge_name, edge_value["to_uuid"]) ) existing_foreign_key_values[join_descriptor.from_column] = edge_value["to_uuid"] # Insert all the prepared data into the test database for sql_test_backend in six.itervalues(sql_test_backends): for vertex_name, insert_values in six.iteritems(vertex_values): table = sql_schema_info.vertex_name_to_table[vertex_name] table.delete(bind=sql_test_backend.engine) table.create(bind=sql_test_backend.engine) for insert_value in insert_values: foreign_key_values = uuid_to_foreign_key_values.get(insert_value["uuid"], {}) all_values = merge_non_overlapping_dicts(insert_value, foreign_key_values) sql_test_backend.engine.execute(table.insert().values(**all_values)) return sql_schema_info
1f8fe9550b069a942a900d547874c787d27576c3
3,651,578
def software_detail(request, context, task_id, vm_id): """ render the detail of the user page: vm-stats, softwares, and runs """ softwares = model.get_software(task_id, vm_id) runs = model.get_vm_runs_by_task(task_id, vm_id) datasets = model.get_datasets_by_task(task_id) # Construct a dictionary that has the software as a key and as value a list of runs with that software # Note that we order the list in such a way, that evaluations of a run are right behind that run in the list # (based on the input_run) runs_with_input = {} # get the runs which have an input_run_id for r in runs: # if we loop once, might as well get the review-info here. r['review'] = model.get_run_review(r.get("dataset"), vm_id, r.get("run_id")) if r.get("input_run_id") == 'none': continue runs_with_input.setdefault(r.get("input_run_id"), []).append(r) runs_without_input = [r for r in runs if r.get("input_run_id") == "none"] runs_by_software = {} for r in runs_without_input: runs_by_software.setdefault(r.get("software"), []).append(r) runs_by_software.setdefault(r.get("software"), []).extend(runs_with_input.pop(r.get("run_id"), [])) for k, v in runs_with_input.items(): # left-over runs_with_input, where the input-run does not exist anymore for r in v: runs_by_software.setdefault(r.get("software"), []).append(r) software = [{ "software": sw, "runs": runs_by_software.get(sw["id"]) } for sw in softwares] vm = model.get_vm(vm_id) context["task"] = model.get_task(task_id) context["vm_id"] = vm_id context["vm"] = {"host": vm.host, "user": vm.userName, "password": vm.userPw, "ssh": vm.portSsh, "rdp": vm.portRdp} context["software"] = software context["datasets"] = datasets return render(request, 'tira/software.html', context)
2e740426bc4f86d1b3d5dd2ddbaa4bdd5f6ae772
3,651,581
def compile_error_curves(dfs, window_size = 60): """ takes a list of timeseries dfs and returns a DataFrame in which each column is the monatonically decreasing version of % error for one of the dfs in the list. usefull for summarizing how a bunch of timeseries converge on some value after a certain point. params ----- dfs: (list of pd.DataFrames) each df should be a track timeseries window_size: (int or float) size of bins (in seconds) """ error_series = [] for i, t in enumerate(dfs): df = dfs[t] df_window = df[df['t'] <= window_size].copy() if df_window is None: continue if len(df_window) < 0.8 * window_size: continue end_time = df_window.iloc[len(df_window)-1]['t'] #print(t, len(df_window) / 60., end_time) d = calculate_error_window(df_window).set_index('t')['error_window'] d = d.reindex(np.arange(0, window_size + 1)) d = d.fillna(method='bfill') d = d.fillna(method='ffill') d.name = t error_series.append(d) return pd.concat(error_series, axis=1)
602ec4563e2aa368db42b762db7f91c3f868fb73
3,651,582
def _get_cluster_medoids(idx_interval: np.ndarray, labels: np.ndarray, pdist: np.ndarray, order_map: np.ndarray) \ -> np.ndarray: """ Get the indexes of the cluster medoids. Parameters ---------- idx_interval : np.ndarray Embedding indexes. labels : np.ndarray Cluster labels. pdist : np.ndarray Condensed pairwise distance matrix. order_map : np.ndarray Map to convert label indexes to pairwise distance matrix indexes. Returns ------- List[int] List with indexes of the medoids for each cluster. """ medoids, m = [], len(idx_interval) for start_i, stop_i in _get_cluster_group_idx(labels): if stop_i - start_i > 1: row_sum = np.zeros(stop_i - start_i, np.float32) for row in range(stop_i - start_i): for col in range(row + 1, stop_i - start_i): i, j = order_map[start_i + row], order_map[start_i + col] if i > j: i, j = j, i pdist_ij = pdist[m * i + j - ((i + 2) * (i + 1)) // 2] row_sum[row] += pdist_ij row_sum[col] += pdist_ij medoids.append(idx_interval[start_i + np.argmin(row_sum)]) return np.asarray(medoids, dtype=np.int32)
88739d625b5a58d41d9103824f5c733d6e2fcbf9
3,651,583
import webbrowser def perform_authorization_code_flow(): """ Performs spotify's Authorization Code Flow to retrive an API token. This uses the OAuth 2.0 protocol, which requires user input and consent. Output ______ api_key: str a user's api key with prompted permissions refresh_token: str A refresh token used to retrive future api keys expires_in: int the time (in seconds) until the token expires """ # create server that runs at the redirect URI. This is used to catch the # response sent from the OAuth authentication server = OAuthServer(("127.0.0.1", 8080)) # generate a uri with the required Oauth headers and open it in a webbrowser auth_uri, code_verifier, state_token = generate_client_PKCE() webbrowser.open_new_tab(auth_uri) # parse the spotify API's http response for the User's token raw_http_response = server.handle_auth().decode("utf-8") http_headers = parse_spotify_http_response(raw_http_response) # verify that state tokens match to prevent CSRF if state_token != http_headers["state"]: raise StateTokenException # exchange code for access token. The refresh token is automatically cached access_token, refresh_token, expires_in = exchange_auth_code( http_headers["code"], code_verifier ) return access_token, refresh_token, expires_in
6939a4414be28f40d712cc1d54f994b02ce9a688
3,651,584
def calculate_empirical_cdf(variable_values): """Calculate numerical cumulative distribution function. Output tuple can be used to plot empirical cdf of input variable. Parameters ---------- variable_values : numpy array Values of a given variable. Returns ------- numpy array Ordered variable values. numpy array Accumulated percentages of relative variable values. """ # Sort array and calculate accumulated percentages. values = np.sort(variable_values) accum_percentages = np.arange(1, len(values) + 1) / float(len(values)) return values, accum_percentages
4c55f7b230318f212088a7218bac9929a9df01e5
3,651,585
def import_reference(filename): """ Imports object from reference node filename :param filename: str """ return maya.cmds.file(filename, importReference=True)
07747a3ceea95f222b81e7e3b938b758f30937b0
3,651,586
def remove_persons_with_few_joints(all_keypoints, min_total_joints=10, min_leg_joints=2, include_head=False): """Remove bad skeletons before sending to the tracker""" good_keypoints = [] for keypoints in all_keypoints: # include head point or not total_keypoints = keypoints[5:, 1:] if not include_head else keypoints[:, 1:] num_valid_joints = sum(total_keypoints!=0)[0] # number of valid joints num_leg_joints = sum(total_keypoints[-7:-1]!=0)[0] # number of joints for legs if num_valid_joints >= min_total_joints and num_leg_joints >= min_leg_joints: good_keypoints.append(keypoints) return np.array(good_keypoints)
773e9317df75f5d4de12c574a3c599e2729bd427
3,651,588
def message_has_races(message): """ Checks to see if a message has a race kwarg. """ races = get_races_from_message(message) return len(races) > 0 and races[0] != ""
e2f01498f8783d2c311e1e6e06f1e9cac3fe36a6
3,651,589
import re def _find_word(input): """ _find_word - function to find words in the input sentence Inputs: - input : string Input sentence Outputs: - outputs : list List of words """ # lower case input = input.lower() # split by whitespace input = re.split(pattern = '[\s]+', string = input) # find words in WORD_POS pattern valid_word = lambda x: True if re.findall(pattern = r'[a-z]*_[a-z]*', string = x) else False outputs = [] for token in input: if valid_word(token): outputs.append(token.split('_')[0]) return outputs
c2e4aa6b5c127bf03593a9aa2c1ae035e83f5a64
3,651,590
def logp1_r_squared_linreg(y_true, y_pred): """Compute custom logp1 r squared ((follows the scipy linear regression implementation of R2). Parameters ---------- y_true y_true. y_pred y_pred. Returns ------- r2 """ y_pred, _ = tf.split(y_pred, num_or_size_splits=2, axis=2) x = tf.math.log(y_true + 1.0) y = tf.math.log(y_pred + 1.0) # means xmean = tnp.mean(x) ymean = tnp.mean(y) ssxm = tnp.mean(tnp.square(x - xmean)) ssym = tnp.mean(tnp.square(y - ymean)) ssxym = tnp.mean((x - xmean) * (y - ymean)) # R-value r = ssxym / tnp.sqrt(ssxm * ssym) return r ** 2
ea33ff1f16e9dcfd8ea4bdc27ca8388bd5086b1d
3,651,591
from typing import Union import json def to_legacy_data_type(data_type: Union[JsonDict, dt.DataType]) -> JsonDict: """ Convert to simple datatypes ("String", "Long", etc) instead of JSON objects, if possible. The frontend expects the "type" field for enums and arrays to be lowercase. """ if not isinstance(data_type, dt.DataType): return json.loads(data_type) if data_type.is_simple: return data_type.into_simple() data = data_type.to_dict() if data["type"] == "Enum": data["type"] = "enum" if data["type"] == "Array": data["type"] = "array" return data
913c5e523ee74d86c3a64b98b291fb213513ae84
3,651,592
def display_dictionary(dictionary, renormalize=False, reshaping=None, groupings=None, label_inds=False, highlighting=None, plot_title=""): """ Plot each of the dictionary elements side by side Parameters ---------- dictionary : ndarray(float32, size=(s, n) OR (s, c, kh, kw)) If the size of dictionary is (s, n), this is a 'fully-connected' dictionary where each basis element has the same dimensionality as the image it is trying to represent. n is the size of the image and s the number of basis functions. If the size of dictionary is (s, c, kh, kw), this is a 'convolutional' dictionary where each basis element is (potentially much) smaller than the image it is trying to represent. c is the number of channels that in the input space, kh is the dictionary kernel height, and kw is the dictionary kernel width. renormalize : bool, optional If present, display basis functions on their own color scale, using standardize_for_imshow() to put values in the range [0, 1]. Will accentuate the largest-magnitude values in the dictionary element. Default False. reshaping : tuple(int, int), optional Should only be specified for a fully-connected dictionary (where dictionary.ndim==2). The dimension of each patch before vectorization to size n. We reshape the dictionary elements based on this. Default None label_inds : bool, optional Supimpose the index into the dictionary of each element in the displayed grid--helps with quick lookup/selection of individual dictionary elements. Default False. highlighting : dictionary, optional This is used to re-sort and color code the dictionary elements according to scalar weights. Has two keys: 'weights' : ndarray(float, size=(s,)) The weights for each dictionary element 'color_range': tuple(float, float) Values less than or equal to highlighting['color_range'][0] get mapped to dark blue, and values greater than or equal to highlighting['color_range'][1] get mapped to dark red. 'reorder' : bool Use the highlighting weights to reorder the dictionary. Default None. plot_title : str, optional The title of the plot. Default "" Returns ------- dictionary_figs : list A list containing pyplot figures. Can be saved separately, or whatever from the calling function """ if groupings is None: t_ims, raw_val_mapping, lab_w_pix_coords = get_dictionary_tile_imgs( dictionary, reshape_to_these_dims=reshaping, indv_renorm=renormalize, highlights=highlighting) else: t_ims = get_dictionary_tile_imgs_arr_by_group(dictionary, groupings, indv_renorm=renormalize, reshape_to_these_dims=reshaping, highlights=highlighting) fig_refs = [] for fig_idx in range(len(t_ims)): fig = plt.figure(figsize=(10, 10)) ax = plt.axes([0.075, 0.075, 0.85, 0.85]) # [bottom, left, height, width] fig.suptitle(plot_title + ', fig {} of {}'.format( fig_idx+1, len(t_ims)), fontsize=20) im_ref = ax.imshow(t_ims[fig_idx], interpolation='None') if label_inds and groupings is None: for lab_and_coord in lab_w_pix_coords[fig_idx]: ax.text(lab_and_coord[2], lab_and_coord[1], lab_and_coord[0], fontsize=6, verticalalignment='top', horizontalalignment='left', color='w') ax.axis('off') if not renormalize and groupings is None: # add a luminance colorbar. Because there isn't good rgb colorbar # support in pyplot I hack this by adding another image subplot cbar_ax = plt.axes([0.945, 0.4, 0.01, 0.2]) gradient = np.linspace(1.0, 0.0, 256)[:, None] cbar_ax.imshow(gradient, cmap='gray') cbar_ax.set_aspect('auto') cbar_ax.yaxis.tick_right() cbar_ax.xaxis.set_ticks([]) cbar_ax.yaxis.set_ticks([255, 128, 0]) cbar_ax.yaxis.set_ticklabels(['{:.2f}'.format(x) for x in raw_val_mapping], fontsize=8) fig_refs.append(fig) return fig_refs
58e363f7f14ec9bc8b88613777ff446ae63feb85
3,651,593
def rbinary_search(arr, target, left=0, right=None): """Recursive implementation of binary search. :param arr: input list :param target: search item :param left: left most item in the search sub-array :param right: right most item in the search sub-array :return: index of item if found `-1` otherwise """ right = len(arr) - 1 if right is None else right #: base condition (search space is exhausted) if left > right: return UNSUCCESSFUL mid = left + (right - left)//2 if arr[mid] < target: #: focus on right subtree result = rbinary_search(arr, target, mid+1, right) elif arr[mid] > target: #: focus on left subtree result = rbinary_search(arr, target, left, mid-1) else: result = mid return result
23da6b29c122efe77c0dc592d2bfc42f324b1799
3,651,594
def get_redis_posts(author: str) -> (str, str): """Return user's first and other post IDs Retrieve the user's first and other post IDs from Redis, then return them as a tuple in the form (first, extra) :param author: The username to get posts for :return: Tuple of the first and other post IDs """ return r.lindex(author, 0), r.lrange(author, 1, -1)
3653a1bdbc3cde8614098a705ae7f11de850165f
3,651,595
from datetime import datetime def template_localtime(value, use_tz=None): """ Checks if value is a datetime and converts it to local time if necessary. If use_tz is provided and is not None, that will force the value to be converted (or not), overriding the value of settings.USE_TZ. This function is designed for use by the template engine. """ should_convert = (isinstance(value, datetime) and (settings.USE_TZ if use_tz is None else use_tz) and not is_naive(value) and getattr(value, 'convert_to_local_time', True)) return localtime(value) if should_convert else value
7042696ae5291248ee2a2d56dcc5e943ccec92d8
3,651,596
def FilesBrowse(button_text='Browse', target=(ThisRow, -1), file_types=(("ALL Files", "*.*"),), disabled=False, initial_folder=None, tooltip=None, size=(None, None), auto_size_button=None, button_color=None, change_submits=False, enable_events=False, font=None, pad=None, key=None): """ :param button_text: text in the button (Default value = 'Browse') :param target: key or (row,col) target for the button (Default value = (ThisRow, -1)) :param file_types: (Default value = (("ALL Files", "*.*"))) :param disabled: set disable state for element (Default = False) :param initial_folder: starting path for folders and files :param tooltip: (str) text, that will appear when mouse hovers over the element :param size: (w,h) w=characters-wide, h=rows-high :param auto_size_button: True if button size is determined by button text :param button_color: button color (foreground, background) :param change_submits: If True, pressing Enter key submits window (Default = False) :param enable_events: Turns on the element specific events.(Default = False) :param font: Union[str, Tuple[str, int]] specifies the font family, size, etc :param pad: Amount of padding to put around element :param key: Used with window.FindElement and with return values to uniquely identify this element """ return Button(button_text=button_text, button_type=BUTTON_TYPE_BROWSE_FILES, target=target, file_types=file_types, initial_folder=initial_folder, change_submits=change_submits, enable_events=enable_events, tooltip=tooltip, size=size, auto_size_button=auto_size_button, disabled=disabled, button_color=button_color, font=font, pad=pad, key=key)
d712e5e41afa1d09482971864ce1b9af66332394
3,651,597
def f2p(phrase, max_word_size=15, cutoff=3): """Convert a Finglish phrase to the most probable Persian phrase. """ results = f2p_list(phrase, max_word_size, cutoff) return ' '.join(i[0][0] for i in results)
51a6f518481097bbba49685f32fb87ed65cc19ec
3,651,599
def read_sj_out_tab(filename): """Read an SJ.out.tab file as produced by the RNA-STAR aligner into a pandas Dataframe. Parameters ---------- filename : str of filename or file handle Filename of the SJ.out.tab file you want to read in Returns ------- sj : pandas.DataFrame Dataframe of splice junctions """ def int_to_intron_motif(n): if n == 0: return 'non-canonical' if n == 1: return 'GT/AG' if n == 2: return 'CT/AC' if n == 3: return 'GC/AG' if n == 4: return 'CT/GC' if n == 5: return 'AT/AC' if n == 6: return 'GT/AT' sj = pd.read_table(filename, header=None, names=COLUMN_NAMES, low_memory=False) sj.intron_motif = sj.intron_motif.map(int_to_intron_motif) sj.annotated = sj.annotated.map(bool) sj.strand.astype('object') sj.strand = sj.strand.apply(lambda x: ['unk','+','-'][x]) # See https://groups.google.com/d/msg/rna-star/B0Y4oH8ZSOY/NO4OJbbUU4cJ for # definition of strand in SJout files. sj = sj.sort_values(by=['chrom', 'start', 'end']) return sj
bc96813e1e69c8017f7ad0e5c945d4bf8c17e645
3,651,600
def gc_subseq(seq, k=2000): """ Returns GC content of non − overlapping sub− sequences of size k. The result is a list. """ res = [] for i in range(0, len(seq)-k+1, k): subseq = seq[i:i+k] gc = calculate_gc(subseq) res.append(gc) return gc
9c2208f9dad291689ef97556e8aaa69213be6470
3,651,601
def pcursor(): """Database cursor.""" dbconn = get_dbconn("portfolio") return dbconn.cursor()
50a19e3837a3846f10c44bcbb61933786d5bf84b
3,651,603
import math def truncate(f, n): """ Floors float to n-digits after comma. """ return math.floor(f * 10 ** n) / 10 ** n
ae7e935a7424a15c02f7cebfb7de6ca9b4c715c0
3,651,605
import math def rotY(theta): """ returns Rotation matrix such that R*v -> v', v' is rotated about y axis through theta_d. theta is in radians. rotY = Ry' """ st = math.sin(theta) ct = math.cos(theta) return np.matrix([[ ct, 0., st ], [ 0., 1., 0. ], [ -st, 0., ct ]])
1ed327485f9861eb8cf045a60f0a7352de1b4b25
3,651,607
def get_core_blockdata(core_index, spltcore_index, core_bases): """ Get Core Offset and Length :param core_index: Index of the Core :param splitcore_index: Index of last core before split :param core_bases: Array with base offset and offset after split :return: Array with core offset and core length """ core_base = int(core_bases[0]) core_len = int(core_bases[1]) core_split = 0 if len(core_bases) > 4: core_split = int(core_bases[4]) core_offset = core_base + core_index * core_len if core_split and core_index + 2 > spltcore_index: core_offset = core_split + (core_index - spltcore_index + 1) * core_len return [core_offset, core_len]
85efb96fa45ecfa3f526374c677e57c70e3dc617
3,651,608
def make_bench_verify_token(alg): """ Return function which will generate token for particular algorithm """ privk = priv_keys[alg].get('default', priv_key) token = jwt.generate_jwt(payload, privk, alg, timedelta(days=1)) def f(_): """ Verify token """ pubk = pub_keys[alg].get('default', pub_key) jwt.verify_jwt(token, pubk, [alg]) return f
4e7da537ab7027711d338d6d3155c198c371391b
3,651,609
def status(): """ Status of the API """ return jsonify({'status': 'OK'})
579c265c88ac8e2c3b5d19000564e90f106be3f5
3,651,610
def calc_median(input_list): """sort the list and return median""" new_list = sorted(input_list) len_list = len(new_list) if len_list%2 == 0: return (new_list[len_list/2-1] + new_list[len_list/2] ) / 2 else: return new_list[len_list/2]
28c0331d1f2dab56d50d63fa59d4dda79a177057
3,651,611
def _load_eigenvalue(h5_result, log): """Loads a RealEigenvalue""" class_name = _cast(h5_result.get('class_name')) table_name = '???' title = '' nmodes = _cast(h5_result.get('nmodes')) if class_name == 'RealEigenvalues': obj = RealEigenvalues(title, table_name, nmodes=nmodes) elif class_name == 'ComplexEigenvalues': obj = ComplexEigenvalues(title, table_name, nmodes) elif class_name == 'BucklingEigenvalues': obj = BucklingEigenvalues(title, table_name, nmodes=nmodes) else: log.warning(' %r is not supported...skipping' % class_name) return None assert obj.class_name == class_name, 'class_name=%r selected; should be %r' % (obj.class_name, class_name) keys_to_skip = ['class_name', 'is_complex', 'is_real', 'table_name_str'] for key in h5_result.keys(): if key in keys_to_skip: continue else: datai = _cast(h5_result.get(key)) if isinstance(datai, bytes): pass elif isinstance(datai, str): datai = datai.encode('latin1') else: assert not isinstance(datai, bytes), key setattr(obj, key, datai) return obj
f27d65d84481e1bb91a0d2282945da0944de1190
3,651,612
def _GenerateBaseResourcesAllowList(base_module_rtxt_path, base_allowlist_rtxt_path): """Generate a allowlist of base master resource ids. Args: base_module_rtxt_path: Path to base module R.txt file. base_allowlist_rtxt_path: Path to base allowlist R.txt file. Returns: list of resource ids. """ ids_map = resource_utils.GenerateStringResourcesAllowList( base_module_rtxt_path, base_allowlist_rtxt_path) return ids_map.keys()
b6b3ef988b343115e4e1b2950667f07fd3771b19
3,651,613
def finite_min_max(array_like): """ Obtain finite (non-NaN, non-Inf) minimum and maximum of an array. Parameters ---------- array_like : array_like A numeric array of some kind, possibly containing NaN or Inf values. Returns ------- tuple Two-valued tuple containing the finite minimum and maximum of *array_like*. """ array_like = np.asanyarray(array_like) finite_values = array_like[np.isfinite(array_like)] return finite_values.min(), finite_values.max()
c300b55d2e53685fb0ade9809e13af4cfae4b1a8
3,651,614
def list_extend1(n): """ using a list to built it up, then convert to a numpy array """ l = [] num_to_extend = 100 data = range(num_to_extend) for i in xrange(n/num_to_extend): l.extend(data) return np.array(l)
7a2240a397e32fc438f4245b92f97f103752b60c
3,651,615