content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def create_partial_image_rdd_decoder(key_type): """Creates a partial, tuple decoder function. Args: value_type (str): The type of the value in the tuple. Returns: A partial :meth:`~geopyspark.protobufregistry.ProtoBufRegistry.image_rdd_decoder` function that requires ``proto_bytes`` to execute. """ return partial(image_rdd_decoder, key_decoder=key_type)
2df5c506cf8603e9e4acbb4ccb77f8f5d830fe82
3,654,000
import json def to_json(simple_object): """ Serializes the ``simple_object`` to JSON using the EnhancedJSONEncoder above. """ return json.dumps(simple_object, cls=EnhancedJSONEncoder)
c9f8c9474210661a7b63924a72442014c831e170
3,654,001
def bootstrap_metadata(): """ Provides cluster metadata which includes security modes """ return _metadata_helper('bootstrap-config.json')
ec2294606446a9b78a603826fca6f447ed2d9bb9
3,654,002
def unique_slug(*, title: str, new_slug: str = None) -> str: """Create unique slug. Args: title: The text where the slug will be generate. new_slug: Custom slug to hard code. Returns: The created slug or hard code slug """ if new_slug is None: slug = slugify(title) new_slug = f"{slug}-{random_string()}" return new_slug
b4e119502edf144f8393b38a47e3fbeb25335aff
3,654,003
import math def dcg(r, k=None): """The Burges et al. (2005) version of DCG. This is what everyone uses (except trec_eval) :param r: results :param k: cut-off :return: sum (2^ y_i - 1) / log (i +2) """ result = sum([(pow(2, rel) - 1) / math.log(rank + 2, 2) for rank, rel in enumerate(r[:k])]) return result
d93c500ba55411807570c8efebdeaa49ce7fe288
3,654,004
async def detect_objects(computervision_client, image_url): """Detect objects from a remote image""" detect_objects_results_local = \ computervision_client.detect_objects(image_url) return detect_objects_results_local.objects
9adb2a3b2c08f99187159ad6a22047bbf3d4c30a
3,654,005
def rgb(r=None, g=None, b=None, smooth=True, force=True): """ Set RGB values with PWM signal :param r: red value 0-1000 :param g: green value 0-1000 :param b: blue value 0-1000 :param smooth: runs colors change with smooth effect :param force: clean fade generators and set color :return: verdict string """ def __buttery(r_from, g_from, b_from, r_to, g_to, b_to): step_ms = 2 interval_sec = 0.3 if Data.RGB_CACHE[3] == 0: # Turn from OFF to on (to colors) r_from, g_from, b_from = 0, 0, 0 Data.RGB_CACHE[3] = 1 r_gen = transition(from_val=r_from, to_val=r_to, step_ms=step_ms, interval_sec=interval_sec) g_gen = transition(from_val=g_from, to_val=g_to, step_ms=step_ms, interval_sec=interval_sec) b_gen = transition(from_val=b_from, to_val=b_to, step_ms=step_ms, interval_sec=interval_sec) for _r in r_gen: Data.RGB_OBJS[0].duty(_r) Data.RGB_OBJS[1].duty(g_gen.__next__()) Data.RGB_OBJS[2].duty(b_gen.__next__()) sleep_ms(step_ms) __RGB_init() if force and Data.FADE_OBJS[0]: Data.FADE_OBJS = (None, None, None) # Dynamic input handling: user/cache r = Data.RGB_CACHE[0] if r is None else r g = Data.RGB_CACHE[1] if g is None else g b = Data.RGB_CACHE[2] if b is None else b # Set RGB channels if smooth: __buttery(r_from=Data.RGB_CACHE[0], g_from=Data.RGB_CACHE[1], b_from=Data.RGB_CACHE[2], r_to=r, g_to=g, b_to=b) else: Data.RGB_OBJS[0].duty(int(r)) Data.RGB_OBJS[1].duty(int(g)) Data.RGB_OBJS[2].duty(int(b)) # Save channel duties if LED on if r > 0 or g > 0 or b > 0: Data.RGB_CACHE = [r, g, b, 1] else: Data.RGB_CACHE[3] = 0 # Save state machine (cache) __persistent_cache_manager('s') return status()
5d5455a785a719e5252c3333e45c06352e8769ed
3,654,006
from .core.configs import get_configs from .core.configs import set_configs from .core.configs import del_configs def handle_config(args, configs): """Handle `view` subcommand :param args: parsed arguments :type args: `argparse.Namespace` :param configs: configurations object :type configs: ``sfftk.core.configs.Configs`` :return int status: status """ if args.config_subcommand == "get": return get_configs(args, configs) elif args.config_subcommand == "set": return set_configs(args, configs) elif args.config_subcommand == "del": return del_configs(args, configs)
60fb9b289e99369a9f83bdf675a793fe85191257
3,654,007
from typing import Dict from typing import List def filter_safe_actions( action_shield: Dict[int, Dict[ActionData, int]], energy: int, bel_supp_state: int ) -> List[ActionData]: """Utility function to filter actions according to required energy for them with given action shield. Parameters ---------- action_shield : List[Tuple[int, ActionData]] List of pairs of minimum energy and action for which it is required. energy : int Available energy. bel_supp_state : int State in belief support cmdp to filter actions by. Returns ------- List[ActionData] List of available actions for given energy and given belief support cmdp state. """ return [ action for action, min_energy in action_shield[bel_supp_state].items() if min_energy <= energy ]
e62b24233792d4decca1bd853b6344a8541882be
3,654,008
def get_routes_by_tunnel(compute, project, region, tunnel, restore, debug): """ Filters all routes to a specific project, region and tunnel.""" match = '%s/regions/%s/vpnTunnels/%s' % (project, region, tunnel) routes_all = list_routes(compute, project, debug) routes = [] for route in routes_all: if route.has_key('nextHopVpnTunnel'): token = '/'.join(route['nextHopVpnTunnel'].split('/')[-5:]) if token == match and restore == is_route_we_created(route): routes.append(route) return routes
97500f97b75a225cf08329a12d841e0c92fe3c66
3,654,009
def fcard(card): """Create format string for card display""" return f"{card[0]} {card[1]}"
ca3866011b418bf35e1b076afd7134926a9382f9
3,654,010
def resetChapterProgress(chapterProgressDict, chapter, initRepeatLevel): """This method resets chapter progress and sets initial level for repeat routine. Args: chapterProgressDict (dict): Chapter progress data. chapter (int): Number of the chapter. initRepeatLevel (int): Initial level for repeat routine. Returns: dictionary: Return Reseted chapter progress dictionary with initial level set. """ chapterProgressDict[chapter]["status"] = "Not started" chapterProgressDict[chapter]["progress"]["current"] = 0 chapterProgressDict[chapter]["correct"] = {"correct":0, "subtotal":0, "rate":''} chapterProgressDict[chapter]["repeatLevel"] = initRepeatLevel return chapterProgressDict
e02d6e97f556a2c080c2bc273255aacedf7bb086
3,654,011
def aStarSearch(problem, heuristic=nullHeuristic): """Search the node that has the lowest combined cost and heuristic first.""" "*** YOUR CODE HERE ***" priorityqueue = util.PriorityQueue() priorityqueue.push( (problem.getStartState(), [], 0), heuristic(problem.getStartState(), problem) ) checkedstates = [] while not priorityqueue.isEmpty(): state, actions, curCost = priorityqueue.pop() if problem.isGoalState(state): return actions if(not state in checkedstates): checkedstates.append(state) for child in problem.getSuccessors(state): point = child[0] direction = child[1] cost = child[2] g = curCost + cost heuristic_cost = heuristic(point, problem) sum_g_heuristic = g + heuristic_cost priorityqueue.push((point, actions + [direction], g), sum_g_heuristic) # util.raiseNotDefined()
0d84de971424d82020b48e35443bfe92cc2665d0
3,654,012
def gen_flag(p1=0.5, **_args): """ Generates a flag. :param p1: probability of flag = 1 :param _args: :return: flag """ return 1 if np.random.normal(0, 100) <= p1 * 100 else 0
0accef3f2fd03c4918b52db2f6c72d405243af87
3,654,013
from typing import OrderedDict import re def load_migrations(path): """ Given a path, load all migrations in that path. :param path: path to look for migrations in :type path: pathlib.Path or str """ migrations = OrderedDict() r = re.compile(r'^[0-9]+\_.+\.py$') filtered = filter( lambda x: r.search(x) is not None, [x.name for x in path.iterdir()] ) for migration in sorted(filtered): migration_id = migration[:-3] migrations[migration_id] = { 'id': migration_id, 'commit_time': None, 'status': 0, 'module': load_module(migration_id, path / migration), 'table': None } return migrations
5bd761e6a9ffab4aa08a473e8d2c667e7fb87813
3,654,014
def filterkey(e, key, ns=False): """ Gibt eine Liste aus der Liste C{e} mit dem Attribut C{key} zurück. B{Beispiel 1}: Herauslesen der SRS aus einer Liste, die C{dict}'s enthält. >>> e = [{'SRS':'12345', 'Name':'WGS-1'}, {'SRS':'54321', 'Name':'WGS-2'}] >>> key = "SRS" >>> filterkey(e, key) ['12345', '54321'] B{Beispiel 2}: Herauslesen des Namens aus einer Liste, die C{dict}'s enthält. >>> e = [{'SRS':'12345', 'Name':'WGS-1'}, {'SRS':'54321', 'Name':'WGS-2'}] >>> key = "Name" >>> filterkey(e, key) ['WGS-1', 'WGS-2'] @param e: Liste @type e: list @param key: Schlüssel @type key: str @param ns: Status, ob zusätzlich L{_helpers.ns} verwendet werden soll @type ns: bool @return: Liste mit den gefundenen Attributen C{key} @rtype: list """ l = [] key_split = key.split("=") if isinstance(e, list): for i in e: if len(key_split)>1: if i[key_split[0]] == key_split[1]: if ns: l.append(_ns(i[key_split[0]])) else: l.append(i[key_split[0]]) else: if ns: l.append(_ns(i[key])) else: l.append(i[key]) return l
f56967e9623622d2dffdb9fe6f128893df9ad798
3,654,015
def get_runtime_preinstalls(internal_storage, runtime): """ Download runtime information from storage at deserialize """ if runtime in default_preinstalls.modules: logger.debug("Using serialize/default_preinstalls") runtime_meta = default_preinstalls.modules[runtime] preinstalls = runtime_meta['preinstalls'] else: logger.debug("Downloading runtime pre-installed modules from COS") runtime_meta = internal_storage.get_runtime_info(runtime) preinstalls = runtime_meta['preinstalls'] if not runtime_valid(runtime_meta): raise Exception(("The indicated runtime: {} " "is not appropriate for this Python version.") .format(runtime)) return preinstalls
3e9eda4f116f0f5baf38a2fea827c575e814deaa
3,654,016
import os def update_graph(slider_value): """ Update the graph depending on which value of the slider is selected Parameters ---------- slider_value: the current slider value, None when starting the app Returns ------- dcc.Graph object holding the new Plotly Figure as given by the plot_slice function """ value = slider_value if slider_value is not None else 0 fname = os.path.join(solution_dir, f"{solution_prefix}_{value}.csv") return plot_slice(fname)
4a7e8939c1a86239a08f2fa460f7bf407fa5a686
3,654,017
def on_coordinator(f): """A decorator that, when applied to a function, makes a spawn of that function happen on the coordinator.""" f.on_coordinator = True return f
d9c97c47255d165c67a4eb67a18cc85c3c9b9386
3,654,018
def redirect_subfeed(match): """ URL migration: my site used to have per-category/subcategory RSS feeds as /category/path/rss.php. Many of the categories have Path-Aliases in their respective .cat files, but some of the old subcategories no longer apply, so now I just bulk-redirect all unaccounted-for subcategories to the top-level category's feed. """ return flask.url_for( 'category', category=match.group(1), template='feed'), True
101509cbf75ce9be5307edd265988cca662a7880
3,654,019
from typing import List from typing import Tuple import random def _train_test_split_dataframe_strafified(df:pd.DataFrame, split_cols:List[str], test_ratio:float=0.2, verbose:int=0, **kwargs) -> Tuple[pd.DataFrame, pd.DataFrame]: """ ref. the function `train_test_split_dataframe` """ df_inspection = df[split_cols] for item in split_cols: all_entities = df_inspection[item].unique().tolist() entities_dict = {e: str(i) for i, e in enumerate(all_entities)} df_inspection[item] = df_inspection[item].apply(lambda e:entities_dict[e]) inspection_col_name = "Inspection" * (max([len(c) for c in split_cols])//10+1) df_inspection[inspection_col_name] = df_inspection.apply( func=lambda row: "-".join(row.values.tolist()), axis=1 ) item_names = df_inspection[inspection_col_name].unique().tolist() item_indices = { n: df_inspection.index[df_inspection[inspection_col_name]==n].tolist() for n in item_names } if verbose >= 1: print("item_names = {}".format(item_names)) for n in item_names: random.shuffle(item_indices[n]) test_indices = [] for n in item_names: item_test_indices = item_indices[n][:int(round(test_ratio*len(item_indices[n])))] test_indices += item_test_indices if verbose >= 2: print("for the item `{}`, len(item_test_indices) = {}".format(n, len(item_test_indices))) df_test = df.loc[df.index.isin(test_indices)].reset_index(drop=True) df_train = df.loc[~df.index.isin(test_indices)].reset_index(drop=True) return df_train, df_test
82c4f2a1da3d7e7a4a854c037ab209dffe01f5b2
3,654,020
def index(): """ This route will render a template. If a query string comes into the URL, it will return a parsed dictionary of the query string keys & values, using request.args """ args = None if request.args: args = request.args return render_template("public/index.html", args=args) return render_template("public/index.html", args=args)
ae76de55fb9263264d87447fc2fe173ad62e3245
3,654,021
from typing import Type from typing import List def multi_from_dict(cls: Type[T_FromDict], data: dict, key: str) -> List[T_FromDict]: """ Converts {"foo": [{"bar": ...}, {"bar": ...}]} into list of objects using the cls.from_dict method. """ return [cls.from_dict(raw) for raw in data.get(key, [])]
44b9aa28d93f24cc76cddf3cba9fbcbcde937d3d
3,654,022
import inspect def infer_signature(func, class_name=''): """Decorator that infers the signature of a function.""" # infer_method_signature should be idempotent if hasattr(func, '__is_inferring_sig'): return func assert func.__module__ != infer_method_signature.__module__ try: funcfile = get_defining_file(func) funcsource, sourceline = inspect.getsourcelines(func) sourceline -= 1 # getsourcelines is apparently 1-indexed except: return func funcid = (class_name, func.__name__, funcfile, sourceline) func_source_db[funcid] = ''.join(funcsource) try: func_argid_db[funcid] = getfullargspec(func) vargs_name, kwargs_name = func_argid_db[funcid][1], func_argid_db[funcid][2] except TypeError: # Not supported. return func def wrapper(*args, **kwargs): global is_performing_inference # If we're already doing inference, we should be in our own code, not code we're checking. # Not doing this check sometimes results in infinite recursion. if is_performing_inference: return func(*args, **kwargs) expecting_type_error, got_type_error, got_exception = False, False, False is_performing_inference = True try: callargs = getcallargs(func, *args, **kwargs) # we have to handle *args and **kwargs separately if vargs_name: va = callargs.pop(vargs_name) if kwargs_name: kw = callargs.pop(kwargs_name) arg_db = {arg: infer_value_type(value) for arg, value in callargs.items()} # *args and **kwargs need to merge the types of all their values if vargs_name: arg_db[vargs_name] = union_many_types(*[infer_value_type(v) for v in va]) if kwargs_name: arg_db[kwargs_name] = union_many_types(*[infer_value_type(v) for v in kw.values()]) except TypeError: got_exception = expecting_type_error = True except: got_exception = True finally: is_performing_inference = False try: ret = func(*args, **kwargs) except TypeError: got_type_error = got_exception = True raise except: got_exception = True raise finally: if not got_exception: assert not expecting_type_error # if we didn't get a TypeError, update the actual database for arg, t in arg_db.items(): update_db(func_arg_db, (funcid, arg), t) # if we got an exception, we don't have a ret if not got_exception: is_performing_inference = True try: type = infer_value_type(ret) update_db(func_return_db, funcid, type) except: pass finally: is_performing_inference = False return ret if hasattr(func, '__name__'): wrapper.__name__ = func.__name__ wrapper.__is_inferring_sig = True return wrapper
e1ab5d9850b4a3026ecd563324026c9cd1675d31
3,654,023
def field_wrapper(col): """Helper function to dynamically create list display method for :class:`ViewProfilerAdmin` to control value formating and sort order. :type col: :data:`settings.ReportColumnFormat` :rtype: function """ def field_format(obj): return col.format.format(getattr(obj, col.attr_name)) field_format.short_description = col.name field_format.admin_order_field = col.attr_name return field_format
0a41b5462a6905af5d1f0cc8b9f2bdd00206e6bd
3,654,024
def is_isotropic(value): """ Determine whether all elements of a value are equal """ if hasattr(value, '__iter__'): return np.all(value[1:] == value[:-1]) else: return True
9c99855d53cf129931c9a9cb51fc491d2fe0df21
3,654,025
def grouper(iterable, n, fillvalue=None): """Iterate over a given iterable in n-size groups.""" args = [iter(iterable)] * n return zip_longest(*args, fillvalue=fillvalue)
26adffa4a3c748defe3732d5b94358dda20d095c
3,654,026
def format_gro_coord(resid, resname, aname, seqno, xyz): """ Print a line in accordance with .gro file format, with six decimal points of precision Nine decimal points of precision are necessary to get forces below 1e-3 kJ/mol/nm. @param[in] resid The number of the residue that the atom belongs to @param[in] resname The name of the residue that the atom belongs to @param[in] aname The name of the atom @param[in] seqno The sequential number of the atom @param[in] xyz A 3-element array containing x, y, z coordinates of that atom """ return "%5i%-5s%5s%5i % 13.9f % 13.9f % 13.9f" % (resid,resname,aname,seqno,xyz[0],xyz[1],xyz[2])
ceeeeeafe4f7484fa17ee4ebd79363209c8f7391
3,654,027
def return_as_list(ignore_nulls: bool = False): """ Enables you to write a list-returning functions using a decorator. Example: >>> def make_a_list(lst): >>> output = [] >>> for item in lst: >>> output.append(item) >>> return output Is equivalent to: >>> @return_as_list() >>> def make_a_list(lst): >>> for item in lst: >>> yield item Essentially a syntactic sugar for @for_argument(returns=list) :param ignore_nulls: if True, then if your function yields None, it won't be appended. """ def outer(fun): @wraps(fun) def inner(*args, **kwargs): output = [] for item in fun(*args, **kwargs): if item is None and ignore_nulls: continue output.append(item) return output return inner return outer
5f5f089e5664ffbbd5d78a71bf984909e677bcc5
3,654,028
def get_pingback_url(target_url): """ Grabs an page, and reads the pingback url for it. """ logger.debug("get_pingback_url called...") logger.debug("grabbing " + str(target_url)) html = urlopen(target_url).read() logger.info( "Got %d bytes" % len(html)) soup = bs(html, 'html.parser') # check for link tags... pbaddress = None for l in soup.findAll('link'): if l.get('rel','') == 'pingback': pbaddress = str(l.get('href')) logger.debug("Got: %s" % pbaddress) logger.debug("get_pingback_url completed") return pbaddress
cfca2962cf7fee480bae1c0654358dec70906fdd
3,654,029
def assemble_insert_msg_content(row, column, digit): """Assemble a digit insertion message.""" return str(row) + CONTENT_SEPARATOR + str(column) + CONTENT_SEPARATOR + str(digit)
5c4a40aedf4569a8f12793356c2cbedecf32d839
3,654,030
from typing import Union def get_difference_of_means(uni_ts: Union[pd.Series, np.ndarray]) -> np.float64: """ :return: The absolute difference between the means of the first and the second halves of a given univariate time series. """ mid = int(len(uni_ts) / 2) return np.abs(get_mean(uni_ts[:mid]) - get_mean(uni_ts[mid:]))
5e7e709afffde843f3f1be7941c620d4f248e8b4
3,654,031
def user_collection(): """ 用户的收藏页面 1. 获取参数 - 当前页 2. 返回数据 - 当前页 - 总页数 - 每页的数据 :return: """ user = g.user if not user: return "请先登录" # 获取当前页 page = request.args.get('p', 1) page_show = constants.USER_COLLECTION_MAX_NEWS # 校验参数 try: page = int(page) except Exception as e: current_app.logger.error(e) # return jsonify(errno=RET.PARAMERR, errmsg="参数错误") abort(404) try: user_collection = user.collection_news.paginate(page, page_show) currentPage = user_collection.page totalPage = user_collection.pages items = user_collection.items except Exception as e: current_app.logger.error(e) # return jsonify(errno=RET.DBERR, errmsg="数据库查询出错") abort(404) user_collection_list = [] for item in items: user_collection_list.append(item.to_review_dict()) data = { 'currentPage': currentPage, 'totalPage': totalPage, 'user_collection_list': user_collection_list } return render_template("news/user_collection.html", data=data)
d5ad3a3121dfc169952cad514b3fa930662dd2af
3,654,032
def activity(*, domain, name, version): """Decorator that registers a function to `ACTIVITY_FUNCTIONS` """ def function_wrapper(func): identifier = '{}:{}:{}'.format(name, version, domain) ACTIVITY_FUNCTIONS[identifier] = func return function_wrapper
1529c19b8d1d5be02c45e87a446abf62aafc143a
3,654,033
def OEDParser(soup, key): """ The parser of Oxford Learner's Dictionary. """ rep = DicResult(key) rep.defs = parseDefs(soup) rep.examples = parseExample(soup) return rep
21b4670047e06f1251e70e09aed5da4dba0449ec
3,654,034
def X_SIDE_GAP_THREE_METHODS(data): """ Upside/Downside Gap Three Methods :param pd.DataFrame data: pandas DataFrame with open, high, low, close data :return pd.Series: with indicator data calculation results """ fn = Function('CDLXSIDEGAP3METHODS') return fn(data)
87d3ca966d13756c3cd2bf15647a4d696f1e1f02
3,654,035
def fix_legacy_database_uri(uri): """ Fixes legacy Database uris, like postgres:// which is provided by Heroku but no longer supported by SqlAlchemy """ if uri.startswith('postgres://'): uri = uri.replace('postgres://', 'postgresql://', 1) return uri
aa3aa20110b7575abf77534d08a35dccb04b731d
3,654,036
def create_need(): """ Créé le besoin en db :return: """ student = Student.query.filter_by(id_user=session['uid']).first() title = request.form.get('title') description = request.form.get('description') speaker_id = request.form.get('speaker_id') estimated_tokens = int(request.form.get('estimated_tokens')) if estimated_tokens < 0: estimated_tokens = 0 need = Need( title=title, description=description, estimated_tokens=estimated_tokens, status='En cours', id_assigned_team=student.team.id, id_assigned_speaker=speaker_id ) db.session.add(need) try: db.session.commit() except: abort(500) return redirect(url_for('get_student_dashboard'))
d8af541668818e1ed40382b4ed457ac819ab3ce6
3,654,037
def insert_rolling_mean_columns(data, column_list, window): """This function selects the columns of a dataframe according to a provided list of strings, re-scales its values and inserts a new column in the dataframe with the rolling mean of each variable in the column list and the provided window length. Params: data: original dataframe column_list: list of columns to select window: window length to calculate rolling mean """ scaler = MinMaxScaler() data[column_list] = scaler.fit_transform(data[column_list]) for var in column_list: data[var + "_RollingMean"] = data[var].rolling(window).mean() return data
fd37ca307aaa0d755cd59aa69320003d02cb677a
3,654,038
def get_url_names(): """ Получение ссылок на контент Returns: Здесь - список файлов формата *.str """ files = ['srts/Iron Man02x26.srt', 'srts/Iron1and8.srt'] return files
4ee8fdd5ab9efc04eda4bfe1205e073064030520
3,654,039
from datetime import datetime def unix_utc_now() -> int: """ Return the number of seconds passed from January 1, 1970 UTC. """ delta = datetime.utcnow() - datetime(1970, 1, 1) return int(delta.total_seconds())
b9768b60cf6f49a7cccedd88482d7a2b21cf05a2
3,654,040
from typing import Set def _preload_specific_vars(env_keys: Set[str]) -> Store: """Preloads env vars from environ in the given set.""" specified = {} for env_name, env_value in environ.items(): if env_name not in env_keys: # Skip vars that have not been requested. continue specified[env_name] = env_value return specified
6eb6c09f56235b024f15749d8ec65e8801991b43
3,654,041
import re def _format_workflow_id(id): """ Add workflow prefix to and quote a tool ID. Args: id (str): ... """ id = urlparse.unquote(id) if not re.search('^#workflow', id): return urlparse.quote_plus('#workflow/{}'.format(id)) else: return urlparse.quote_plus(id)
ea6b6f83ef430128c8a876c9758ce3d70b1bef63
3,654,042
def calc_log_sum(Vals, sigma): """ Returns the optimal value given the choice specific value functions Vals. Parameters ---------- Vals : [numpy.array] A numpy.array that holds choice specific values at common grid points. sigma : float A number that controls the variance of the taste shocks Returns ------- V : [numpy.array] A numpy.array that holds the integrated value function. """ # Assumes that NaNs have been replaced by -numpy.inf or similar if sigma == 0.0: # We could construct a linear index here and use unravel_index. V = np.amax(Vals, axis=0) return V # else we have a taste shock maxV = np.max(Vals, axis=0) # calculate maxV+sigma*log(sum_i=1^J exp((V[i]-maxV))/sigma) sumexp = np.sum(np.exp((Vals - maxV) / sigma), axis=0) LogSumV = np.log(sumexp) LogSumV = maxV + sigma * LogSumV return LogSumV
18f71725ea4ced0ea5243fb201f25ae636547947
3,654,043
def comment_create(request, post_pk): """記事へのコメント作成""" post = get_object_or_404(Post, pk=post_pk) form = CommentForm(request.POST or None) if request.method == 'POST': comment = form.save(commit=False) comment.post = post comment.save() return redirect('blog:post_detail', pk=post.pk) context = { 'form': form, 'post': post } return render(request, 'blog/comment_form.html', context)
fe6357cfcff1a522064ad9f49b030cf63a02b575
3,654,044
import torch def tensor_from_var_2d_list(target, padding=0.0, max_len=None, requires_grad=True): """Convert a variable 2 level nested list to a tensor. e.g. target = [[1, 2, 3], [4, 5, 6, 7, 8]] """ max_len_calc = max([len(batch) for batch in target]) if max_len == None: max_len = max_len_calc if max_len_calc > max_len: print("Maximum length exceeded: {}>{}".format(max_len_calc, max_len)) target = [batch[:max_len] for batch in target] padded = [batch + (max_len - len(batch)) * [padding] for batch in target] return torch.tensor(padded, requires_grad=requires_grad)
2aa5fcc5b2be683c64026126da55330937cd8242
3,654,045
from web3.middleware import geth_poa_middleware def make_w3(gateway_config=None): """ Create a Web3 instance configured and ready-to-use gateway to the blockchain. :param gateway_config: Blockchain gateway configuration. :type gateway_config: dict :return: Configured Web3 instance. :rtype: :class:`web3.Web3` """ if gateway_config is None or gateway_config['type'] == 'auto': w3 = web3.Web3() elif gateway_config['type'] == 'user': request_kwargs = gateway_config.get('http_options', {}) w3 = web3.Web3(web3.Web3.HTTPProvider(gateway_config['http'], request_kwargs=request_kwargs)) elif gateway_config['type'] == 'infura': request_kwargs = gateway_config.get('http_options', {}) project_id = gateway_config['key'] # project_secret = gateway_config['secret'] http_url = 'https://{}.infura.io/v3/{}'.format(gateway_config['network'], project_id) w3 = web3.Web3(web3.Web3.HTTPProvider(http_url, request_kwargs=request_kwargs)) # https://web3py.readthedocs.io/en/stable/middleware.html#geth-style-proof-of-authority if gateway_config.get('network', None) == 'rinkeby': # This middleware is required to connect to geth --dev or the Rinkeby public network. # inject the poa compatibility middleware to the innermost layer w3.middleware_onion.inject(geth_poa_middleware, layer=0) else: raise RuntimeError('invalid blockchain gateway type "{}"'.format(gateway_config['type'])) return w3
85119161c7842319718e7075192b277f810b4328
3,654,046
def _log2_ratio_to_absolute(log2_ratio, ref_copies, expect_copies, purity=None): """Transform a log2 ratio to absolute linear scale (for an impure sample). Does not round to an integer absolute value here. Math:: log2_ratio = log2(ncopies / ploidy) 2^log2_ratio = ncopies / ploidy ncopies = ploidy * 2^log2_ratio With rescaling for purity:: let v = log2 ratio value, p = tumor purity, r = reference ploidy, x = expected ploidy, n = tumor ploidy ("ncopies" above); v = log_2(p*n/r + (1-p)*x/r) 2^v = p*n/r + (1-p)*x/r n*p/r = 2^v - (1-p)*x/r n = (r*2^v - x*(1-p)) / p If purity adjustment is skipped (p=1; e.g. if germline or if scaling for heterogeneity was done beforehand):: n = r*2^v """ if purity and purity < 1.0: ncopies = (ref_copies * 2**log2_ratio - expect_copies * (1 - purity) ) / purity else: ncopies = _log2_ratio_to_absolute_pure(log2_ratio, ref_copies) return ncopies
939a9e4ccb0a1fe9c8c2e6f369bb23c556f04a14
3,654,047
def fixt(): """ Create an Exchange object that will be re-used during testing. """ mesh = df.UnitCubeMesh(10, 10, 10) S3 = df.VectorFunctionSpace(mesh, "DG", 0) Ms = 1 A = 1 m = df.Function(S3) exch = ExchangeDG(A) exch.setup(S3, m, Ms) return {"exch": exch, "m": m, "A": A, "S3": S3, "Ms": Ms}
4e46550f1ef9e821e459612b82c0410b7459b09b
3,654,048
def touch_emulator(ev, x, y): """ This emulates a touch-screen device, like a tablet or smartphone. """ if ev.type == pygame.MOUSEBUTTONDOWN: if ev.button != 1: return None, x, y elif ev.type == pygame.MOUSEBUTTONUP: if ev.button != 1: return None, x, y move = pygame.event.Event(pygame.MOUSEMOTION, { "pos" : (0, 0), "rel" : (0, 0), "buttons" : (0, 0, 0) }) renpy.display.interface.pushed_event = move elif ev.type == pygame.MOUSEMOTION: if not ev.buttons[0]: x = 0 y = 0 elif ev.type == pygame.KEYDOWN: if not ev.key in TOUCH_KEYS: return None, x, y elif ev.type == pygame.KEYUP: if not ev.key in TOUCH_KEYS: return None, x, y return ev, x, y
826b17c7bc9089acebdf3e1ea64fa0613e13e8ea
3,654,049
import sys def check_python_version() -> bool: """Check minimum Python version is being used. Returns: True if version is OK. """ if sys.version_info.major == 3 and sys.version_info.minor >= 6: return True logger.error( "Aborting... Python version 3.6 or greater is required.\n" f"Current Python version is {sys.version_info.major}.{sys.version_info.minor}." ) return False
3ca8296e72a380f86744460ec88cccae1fd58be4
3,654,050
def invert_dict(d): """ Invert dictionary by switching keys and values. Parameters ---------- d : dict python dictionary Returns ------- dict Inverted python dictionary """ return dict((v, k) for k, v in d.items())
c70bfdb5ffa96cf07b1a4627aa484e3d5d0f4fea
3,654,051
def atom_present_in_geom(geom, b, tol=DEFAULT_SYM_TOL): """Function used by set_full_point_group() to scan a given geometry and determine if an atom is present at a given location. """ for i in range(len(geom)): a = [geom[i][0], geom[i][1], geom[i][2]] if distance(b, a) < tol: return True return False
ad4afd6ca3d419b69ef502d64e3e66635485d340
3,654,052
import locale def atof(value): """ locale.atof() on unicode string fails in some environments, like Czech. """ if isinstance(value, unicode): value = value.encode("utf-8") return locale.atof(value)
b0b2d2ea70c5e631ad2a1d25eb5c55d06cbdac1e
3,654,053
def r_group_list(mol, core_mol): """ This takes a mol and the common core and finds all the R-groups by replacing the atoms in the ligand (which make up the common core) with nothing. This fragments the ligand and from those fragments we are able to determine what our R-groups are. for any common core atom which touched the fragment a * will replace that atom in the fragments. Inputs: :param rdkit.Chem.rdchem.Mol mol: an rdkit molecule :param rdkit.Chem.rdchem.Mol core_mol: an rdkit molecule for the shared common core Returns: :returns: rdkit.Chem.rdchem.Mol replace_core_mol: an rdkit molecule with the common core removed from a ligand fragments the mol which can be used to make lists of R-groups """ # This returns all the mol frags for a particular compound against the # core molecule replace_core_mol = Chem.ReplaceCore( mol, core_mol, labelByIndex=True, replaceDummies=True, requireDummyMatch=False ) if len(replace_core_mol.GetAtoms()) == 0: # This means that the mol either did not contain the core_mol or the # core_mol is the same mol as the mol. ie) if mol_string # ="[10000N-]=[10001N+]=[10002N][10003CH]1[10004O][10005CH]([10006CH2][10007OH])[10008CH]([10013OH])[10009CH]([10012OH])[10010CH]1[10011OH]" # and core_string # ="[10000NH]=[10001N+]=[10002N][10003CH]1[10004O][10005CH]([10006CH2][10007OH])[10008CH]([10013OH])[10009CH]([10012OH])[10010CH]1[10011OH]" # the only difference is the H's which means it can be replaced within # because its the same mol This is rare but does occur. return None return replace_core_mol
16df0f54a5bf374bd1e77d3443baa42aab2dd231
3,654,054
def set_execution_target(backend_id='simulator'): """ Used to run jobs on a real hardware :param backend_id: device name. List of available devices depends on the provider example usage. set_execution_target(backend_id='arn:aws:braket:::device/quantum-simulator/amazon/sv1') """ global device if backend_id == None or backend_id == "": device = None elif backend_id == "simulator": device = LocalSimulator() else: device = AwsDevice(backend_id) if verbose: print(f"... using Braket device = {device}") # create an informative device name device_name = device.name device_str = str(device) if device_str.find(":device/") > 0: idx = device_str.rindex(":device/") device_name = device_str[idx+8:-1] metrics.set_plot_subtitle(f"Device = {device_name}") return device
2efb44723a804e39d998ddc3e7a7c3bdaa0440db
3,654,055
def segregate(str): """3.1 Basic code point segregation""" base = bytearray() extended = set() for c in str: if ord(c) < 128: base.append(ord(c)) else: extended.add(c) extended = sorted(extended) return bytes(base), extended
e274393735bf4f1d51a75c73351848cbfdd5f81f
3,654,056
def count_tetrasomic_indivs(lis) -> dict: """ Count number of times that a chromosome is tetrasomic (present in four copies) :returns counts_of_tetrasomic_chromosomes""" counts_of_tetrasomic_chromosomes = {k: 0 for k in chr_range} for kary_group in lis: for index, chr_type in enumerate(chr_range): if kary_group[index // 2].count(chr_type) == 4: counts_of_tetrasomic_chromosomes[chr_type] += 1 return counts_of_tetrasomic_chromosomes
598cd10bdbaeea061be0e259de756beba9d248b7
3,654,057
def make_image_grid(images: np.ndarray, nrow: int = 1) -> np.ndarray: """Concatenate multiple images into a single image. Args: images (np.array): Images can be: - A 4D mini-batch image of shape [B, C, H, W]. - A 3D RGB image of shape [C, H, W]. - A 2D grayscale image of shape [H, W]. nrow (int): Number of images in each row of the grid. Final grid size is `[B / nrow, nrow]`. Default: `1`. Returns: cat_image (np.ndarray): Concatenated image. """ # NOTE: Type checking if images.ndim == 3: return images # NOTE: Conversion (just for sure) if is_channel_first(images): images = to_channel_last(images) b, c, h, w = images.shape ncols = nrow nrows = (b // nrow) if (b // nrow) > 0 else 1 cat_image = np.zeros((c, int(h * nrows), w * ncols)) for idx, im in enumerate(images): j = idx // ncols i = idx % ncols cat_image[:, j * h: j * h + h, i * w: i * w + w] = im return cat_image
1b367392c275a44e5c23ce19c96f5727015285f5
3,654,058
def drive_time_shapes(drive_time): """Simplify JSON response into a dictionary of point lists.""" isochrones = {} try: for shape in drive_time['response']['isoline']: uid = str(int(shape['range'] / 60)) + ' minutes' points = shape['component'][0]['shape'] point_list = array_to_points(points) isochrones[uid] = point_list except KeyError: print(drive_time) return isochrones
f8f5074d5326ba598c083fdcc228bdfb69f427a5
3,654,059
def compute_transformation_sequence_case_1(cumprod, local_shape, ind, sharded_leg_pos, pgrid): """ Helper function for `pravel`, see `pravel` for more details. """ ops = [] ndev = np.prod(pgrid) if ndev % cumprod[ind - 1] != 0: raise ValueError("reshaping not possible") remainder = ndev // cumprod[ind - 1] # the local leg has to be divisible by the remainder, # otherwise we can't place the sharded legs that need to be # localized at their respective positions if local_shape[sharded_leg_pos] % remainder != 0: raise ValueError( f"tensor.shape[{sharded_leg_pos}] = {local_shape[sharded_leg_pos]}" f" is not divisible by a local remainder of {remainder}. " f"Try using a different shape for the input tensor") if np.prod(local_shape[sharded_leg_pos:]) % remainder != 0: raise ValueError("reshaping not possible 2") # the first index group contains all legs that are going to be sharded # the second index group contain is swapped with the currently sharded legs # the third group remains unchanged orig_left_shape = tuple(local_shape[:sharded_leg_pos],) + (remainder,) orig_right_shape = (local_shape[sharded_leg_pos] // remainder,) + tuple( local_shape[sharded_leg_pos + 1:]) shape_1 = orig_left_shape + (ndev,) + (np.prod(orig_right_shape) // ndev,) ops.append(('reshape', [local_shape, shape_1])) ops.append(('pswapaxes', { 'axis_name': AXIS_NAME, 'axis': sharded_leg_pos + 1 })) # the previously sharded legs are now localized at position # sharded_leg_pos + 1 we now split off the legs that need # to be distributed again and move them to the right of their # corresponding local legs shape_2 = orig_left_shape + tuple(pgrid) + ( np.prod(orig_right_shape) // ndev,) l = list(range(len(shape_2))) left = l[:len(orig_left_shape)] right = l[len(orig_left_shape):] perm_1 = misc.flatten([[r, l] for l, r in zip(left, right[:len(left)]) ]) + right[len(left):] shape_3 = (np.prod(shape_2[:2 * len(orig_left_shape)]),) + tuple( shape_2[2 * len(orig_left_shape):]) ops.append(('reshape', [shape_1, shape_2])) ops.append(('transpose', perm_1)) perm_shape_2 = [shape_2[p] for p in perm_1] ops.append(('reshape', [perm_shape_2, shape_3])) ops.append(('pswapaxes', {'axis_name': AXIS_NAME, 'axis': 0})) # swap the first local axis with the sharded one # now we have the harded legs in the right order # next we need to fix the order of the localized legs perm_2 = list(range(1, len( pgrid[sharded_leg_pos:]))) + [0] + [len(pgrid[sharded_leg_pos:])] shape_4 = tuple(pgrid[sharded_leg_pos + 1:]) + orig_right_shape ops.append(('transpose', perm_2)) perm_shape_3 = [shape_3[p] for p in perm_2] ops.append(('reshape', [perm_shape_3, shape_4])) p = len(pgrid[sharded_leg_pos + 1:]) left = list(range(p)) right = list(range(p + 1, len(shape_4))) perm_3 = [p] + misc.flatten([[l, r] for l, r in zip(left, right[:len(left)]) ]) + right[len(left):] ops.append(('transpose', perm_3)) perm_shape_4 = [shape_4[p] for p in perm_3] shape_5 = misc.maybe_ravel_shape(perm_shape_4) ops.append(('reshape', [perm_shape_4, shape_5])) return ops
0f88967a2fcc132af753a2c5dfbf2a9b8087877a
3,654,060
def fix_join_words(text: str) -> str: """ Replace all join ``urdu`` words with separate words Args: text (str): raw ``urdu`` text Returns: str: returns a ``str`` object containing normalized text. """ for key, value in WORDS_SPACE.items(): text = text.replace(key, value) return text
a13040b420db5b0daf27f3a2f6a1e93a9188c312
3,654,061
def degree_of_appropriateness(attr_list,data_list,summarizers,summarizer_type,t3,letter_map_list,alpha_sizes,age,activity_level,flag=None,goals=None): """ Inputs: - data: the database - summarizer: the conclusive phrase of the summary - summarizer_type: the type of summarizer - t3: the degree of covering - letter_map: a mapping from letters to integers - alpha_size: the alphabet size - age: the user's age - activity_level: the user's activity level Outputs: the degree of appropriateness Purpose: to calculate the degree of appropriateness. The degree of appropriateness describes how characteristic for the particular database the summary found is. This helps avoid the output of trivial summaries """ # TODO: Factor this in once we have multiple attributes #t4_list = [] # Day count r_list = [] #print(data_list) #input([summarizers,attr_list]) #print("degree") #print(summarizers) try: n = len(data_list[0]) except TypeError: data_list = [data_list] n = len(data_list) #if "Pattern Recognition" in summarizer_type: #n = len(data_list) #print(flag) for j in range(len(summarizers)): t_k = [] summarizer = summarizers[j] for i in range(n): #print(j,i) if flag == "compare": if (i == 0 and "Pattern Recognition" not in summarizer_type): #or ("Pattern Recognition" in summarizer_type and ((i+1)%2==0 or i == len(data_list[j])-1)): continue #print(i) prev_letter = data_list[j][i-1] curr_letter = data_list[j][i] if "Pattern Recognition" in summarizer_type: #print(data_list,j,i) prev_letter = data_list[j][i][0] curr_letter = data_list[j][i][1] #print(prev_letter,curr_letter) goal_list, conclusion_list = compare_SAX(attr_list[j],prev_letter,curr_letter,summarizer_type,letter_map_list[j]) conclusion = conclusion_list[0] if "Pattern Recognition" in summarizer_type: summarizer_map = { "better" : "rose", "about the same" : "stayed the same", "not do as well" : "dropped"} conclusion = summarizer_map[conclusion_list[0]] #print(prev_letter,curr_letter,conclusion,summarizer) if conclusion == summarizer: t_k.append(1) else: t_k.append(0) elif flag == "compareHR": #if i%7 != 0 or i < 7: #continue #curr_tw = data_list[j][i:i+7] #last_tw = data_list[j][i-7:i] if "Pattern Recognition" in summarizer_type: last_tw = [data_list[j][i][0]] curr_tw = [data_list[j][i][1]] else: curr_tw = [data_list[j][i]] last_tw = [data_list[j][i-1]] #print(curr_tw,last_tw,flag) summary, conclusion, goal_list = comparison_TW_SAX_summary(summarizer_type,attr_list,last_tw,curr_tw,"weeks",letter_map_list,i-1,i,flag="eval") #conclusion = compare_HR_TW(last_tw,curr_tw,age,activity_level) conclusion_map = { 'lower' : 'dropped', 'higher' : 'rose', 'about the same' : 'stayed the same'} #print(conclusion,summarizers) #print(conclusion_map[conclusion[j]],summarizers[j]) if conclusion_map[conclusion[j]] == summarizer: t_k.append(1) else: t_k.append(0) elif flag == "compareACT": if i==0: continue conclusion, pair_word = compareACT(data_list[j][i-1],data_list[j][i]) if summarizer == conclusion: t_k.append(1) else: t_k.append(0) elif flag == "HR" or (attr_list[j] == "Heart Rate" and "Trends" not in summarizer_type): #print(hr_evaluation(data_list[j][i],age,activity_level),summarizer) if hr_evaluation(data_list[j][i],age,activity_level) == summarizer: t_k.append(1) else: t_k.append(0) else: curr_data = data_list[j][i] if "If-then pattern" in summarizer_type: start_index = 1 skip_index = 1 valid = True extra = len(summarizer) for k in range(start_index,len(summarizer)): if i+k >= len(data_list[j]): valid = False break #print(flag,summarizer,k,weekday_map) if flag != None and summarizer[k] in weekday_map.keys(): #input(weekday_map.keys()) curr_data += str(weekday_map[flag[i+k]]) else: curr_data += data_list[j][i+k] if not valid or len(summarizer)==0: continue goal = None if goals != None and None not in goals: try: goal = goals[j][i] except IndexError: #goal = goals[j] try: goal = goals[j] except IndexError: goals = goals[0] try: goal = goals[j][i] except IndexError: goal = goals[j] #print(curr_data) #print(summarizer_type) #print(summarizer_type,summarizer,flag,goal,curr_data) #input(get_muS(attr_list[j],summarizer_type,summarizer,curr_data,letter_map_list[j],alpha_sizes[j],flag=flag,goal_=goal)) if get_muS(attr_list[j],summarizer_type,summarizer,curr_data,letter_map_list[j],alpha_sizes[j],flag=flag,goal_=goal): t_k.append(1) else: t_k.append(0) #print(sum(t_k),len(t_k)) if sum(t_k)==0: r_k = 1 else: r_k = sum(t_k)/float(len(t_k)) #print(len(t_k),len(data_list[0]), t_k) r_list.append(r_k) #print(r_list,t3) r = 1 for i in range(len(r_list)): r *= r_list[i] #print(r,t3) return abs(r - t3)
60e34efcd3ca45471acc53202d243f6e55813179
3,654,062
import statistics def constitutive_exp_normalization_raw(gene_db,constitutive_gene_db,array_raw_group_values,exon_db,y,avg_const_exp_db): """normalize expression for raw expression data (only for non-baseline data)""" #avg_true_const_exp_db[affygene] = [avg_const_exp] temp_avg_const_exp_db={} for probeset in array_raw_group_values: conditions = len(array_raw_group_values[probeset][y]); break #number of raw expresson values to normalize for affygene in gene_db: ###This is blank when there are no constitutive or the above condition is implemented if affygene in constitutive_gene_db: probeset_list = constitutive_gene_db[affygene] z = 1 else: ###so we can analyze splicing independent of gene expression even if no 'gene' feature is present probeset_list = gene_db[affygene] z = 0 x = 0 while x < conditions: ### average all exp values for constitutive probesets for each conditionF exp_list=[] for probeset in probeset_list: try: exp_val = array_raw_group_values[probeset][y][x] ### try statement is used for constitutive probes that were deleted due to filtering in performExpressionAnalysis except KeyError: continue exp_list.append(exp_val) try: avg_const_exp = statistics.avg(exp_list) except Exception: avg_const_exp = 'null' if only_include_constitutive_containing_genes == 'yes' and avg_const_exp != 'null': if z == 1: try: avg_const_exp_db[affygene].append(avg_const_exp) except KeyError: avg_const_exp_db[affygene] = [avg_const_exp] try: temp_avg_const_exp_db[affygene].append(avg_const_exp) except KeyError: temp_avg_const_exp_db[affygene] = [avg_const_exp] elif avg_const_exp != 'null': ###*** try: avg_const_exp_db[affygene].append(avg_const_exp) except KeyError: avg_const_exp_db[affygene] = [avg_const_exp] try: temp_avg_const_exp_db[affygene].append(avg_const_exp) except KeyError: temp_avg_const_exp_db[affygene] = [avg_const_exp] x += 1 if analysis_method == 'ANOVA': global normalized_raw_exp_ratios; normalized_raw_exp_ratios = {} for affygene in gene_db: probeset_list = gene_db[affygene] for probeset in probeset_list: while x < group_size: new_ratios = [] ### Calculate expression ratios relative to constitutive expression exp_val = array_raw_group_values[probeset][y][x] const_exp_val = temp_avg_const_exp_db[affygene][x] ###Since the above dictionary is agglomerating all constitutive expression values for permutation, ###we need an unbiased way to grab just those relevant const. exp. vals. (hence the temp dictionary) #non_log_exp_val = statistics.log_fold_conversion_fraction(exp_val) #non_log_const_exp_val = statistics.log_fold_conversion_fraction(const_exp_val) #non_log_exp_ratio = non_log_exp_val/non_log_const_exp_val log_exp_ratio = exp_val - const_exp_val try: normalized_raw_exp_ratios[probeset].append(log_exp_ratio) except KeyError: normalized_raw_exp_ratios[probeset] = [log_exp_ratio] return avg_const_exp_db
38d5d39cb6a5532b84f08ddd0fbb27335e45897b
3,654,063
def parse_rummager_topics(results): """ Parse topics from rummager results """ pages = [] for result in results: pages.append( Topic( name=result['title'], base_path=result['slug'], document_type=DocumentType[result['format']] ) ) return pages
d88355014c4a74e1ca7ca2ca1389850cba550612
3,654,064
def format_last_online(last_online): """ Return the upper limit in seconds that a profile may have been online. If last_online is an int, return that int. Otherwise if last_online is a str, convert the string into an int. Returns ---------- int """ if isinstance(last_online, str): if last_online.lower() in ('day', 'today'): last_online_int = 86400 # 3600 * 24 elif last_online.lower() == 'week': last_online_int = 604800 # 3600 * 24 * 7 elif last_online.lower() == 'month': last_online_int = 2678400 # 3600 * 24 * 31 elif last_online.lower() == 'year': last_online_int = 31536000 # 3600 * 365 elif last_online.lower() == 'decade': last_online_int = 315360000 # 3600 * 365 * 10 else: # Defaults any other strings to last hour last_online_int = 3600 else: last_online_int = last_online return last_online_int
335ed9a37062964b785c75246c9f23f678b4a90e
3,654,065
from datetime import datetime import pkg_resources import requests import json def get_currency_cross_historical_data(currency_cross, from_date, to_date, as_json=False, order='ascending', interval='Daily'): """ This function retrieves recent historical data from the introduced `currency_cross` from Investing via Web Scraping. The resulting data can it either be stored in a :obj:`pandas.DataFrame` or in a :obj:`json` file, with `ascending` or `descending` order. Args: currency_cross (:obj:`str`): name of the currency cross to retrieve recent historical data from. from_date (:obj:`str`): date as `str` formatted as `dd/mm/yyyy`, from where data is going to be retrieved. to_date (:obj:`str`): date as `str` formatted as `dd/mm/yyyy`, until where data is going to be retrieved. as_json (:obj:`bool`, optional): optional argument to determine the format of the output data (:obj:`pandas.DataFrame` or :obj:`json`). order (:obj:`str`, optional): optional argument to define the order of the retrieved data (`ascending`, `asc` or `descending`, `desc`). interval (:obj:`str`, optional): value to define the historical data interval to retrieve, by default `Daily`, but it can also be `Weekly` or `Monthly`. Returns: :obj:`pandas.DataFrame` or :obj:`json`: The function returns a either a :obj:`pandas.DataFrame` or a :obj:`json` file containing the retrieved recent data from the specified currency_cross via argument. The dataset contains the open, high, low, close and volume values for the selected currency_cross on market days. The return data is case we use default arguments will look like:: Date || Open | High | Low | Close | Currency -----||------|------|-----|-------|--------- xxxx || xxxx | xxxx | xxx | xxxxx | xxxxxxxx but if we define `as_json=True`, then the output will be:: { name: name, recent: [ dd/mm/yyyy: { 'open': x, 'high': x, 'low': x, 'close': x, 'currency' : x }, ... ] } Raises: ValueError: argument error. IOError: stocks object/file not found or unable to retrieve. RuntimeError: introduced currency_cross does not match any of the indexed ones. ConnectionError: if GET requests does not return 200 status code. IndexError: if currency_cross information was unavailable or not found. Examples: >>> investpy.get_currency_cross_historical_data(currency_cross='EUR/USD', from_date='01/01/2018', to_date='01/01/2019') Open High Low Close Currency Date 2018-01-01 1.2003 1.2014 1.1995 1.2010 USD 2018-01-02 1.2013 1.2084 1.2003 1.2059 USD 2018-01-03 1.2058 1.2070 1.2001 1.2014 USD 2018-01-04 1.2015 1.2090 1.2004 1.2068 USD 2018-01-05 1.2068 1.2085 1.2021 1.2030 USD """ if not currency_cross: raise ValueError("ERR#0052: currency_cross param is mandatory and should be a str.") if not isinstance(currency_cross, str): raise ValueError("ERR#0052: currency_cross param is mandatory and should be a str.") try: datetime.strptime(from_date, '%d/%m/%Y') except ValueError: raise ValueError("ERR#0011: incorrect data format, it should be 'dd/mm/yyyy'.") try: datetime.strptime(to_date, '%d/%m/%Y') except ValueError: raise ValueError("ERR#0011: incorrect data format, it should be 'dd/mm/yyyy'.") start_date = datetime.strptime(from_date, '%d/%m/%Y') end_date = datetime.strptime(to_date, '%d/%m/%Y') if start_date >= end_date: raise ValueError("ERR#0032: to_date should be greater than from_date, both formatted as 'dd/mm/yyyy'.") if not isinstance(as_json, bool): raise ValueError("ERR#0002: as_json argument can just be True or False, bool type.") if order not in ['ascending', 'asc', 'descending', 'desc']: raise ValueError("ERR#0003: order argument can just be ascending (asc) or descending (desc), str type.") if not interval: raise ValueError("ERR#0073: interval value should be a str type and it can just be either 'Daily', 'Weekly' or 'Monthly'.") if not isinstance(interval, str): raise ValueError("ERR#0073: interval value should be a str type and it can just be either 'Daily', 'Weekly' or 'Monthly'.") if interval not in ['Daily', 'Weekly', 'Monthly']: raise ValueError("ERR#0073: interval value should be a str type and it can just be either 'Daily', 'Weekly' or 'Monthly'.") date_interval = { 'intervals': [], } flag = True while flag is True: diff = end_date.year - start_date.year if diff > 20: obj = { 'start': start_date.strftime('%m/%d/%Y'), 'end': start_date.replace(year=start_date.year + 20).strftime('%m/%d/%Y'), } date_interval['intervals'].append(obj) start_date = start_date.replace(year=start_date.year + 20) else: obj = { 'start': start_date.strftime('%m/%d/%Y'), 'end': end_date.strftime('%m/%d/%Y'), } date_interval['intervals'].append(obj) flag = False interval_limit = len(date_interval['intervals']) interval_counter = 0 data_flag = False resource_package = 'investpy' resource_path = '/'.join(('resources', 'currency_crosses', 'currency_crosses.csv')) if pkg_resources.resource_exists(resource_package, resource_path): currency_crosses = pd.read_csv(pkg_resources.resource_filename(resource_package, resource_path)) else: raise FileNotFoundError("ERR#0060: currency_crosses file not found or errored.") if currency_crosses is None: raise IOError("ERR#0050: currency_crosses not found or unable to retrieve.") currency_cross = currency_cross.strip() currency_cross = currency_cross.lower() if unidecode.unidecode(currency_cross) not in [unidecode.unidecode(value.lower()) for value in currency_crosses['name'].tolist()]: raise RuntimeError("ERR#0054: the introduced currency_cross " + str(currency_cross) + " does not exists.") id_ = currency_crosses.loc[(currency_crosses['name'].str.lower() == currency_cross).idxmax(), 'id'] name = currency_crosses.loc[(currency_crosses['name'].str.lower() == currency_cross).idxmax(), 'name'] currency = currency_crosses.loc[(currency_crosses['name'].str.lower() == currency_cross).idxmax(), 'second'] final = list() header = name + ' Historical Data' for index in range(len(date_interval['intervals'])): interval_counter += 1 params = { "curr_id": id_, "smlID": str(randint(1000000, 99999999)), "header": header, "st_date": date_interval['intervals'][index]['start'], "end_date": date_interval['intervals'][index]['end'], "interval_sec": interval, "sort_col": "date", "sort_ord": "DESC", "action": "historical_data" } head = { "User-Agent": get_random(), "X-Requested-With": "XMLHttpRequest", "Accept": "text/html", "Accept-Encoding": "gzip, deflate, br", "Connection": "keep-alive", } url = "https://www.investing.com/instruments/HistoricalDataAjax" req = requests.post(url, headers=head, data=params) if req.status_code != 200: raise ConnectionError("ERR#0015: error " + str(req.status_code) + ", try again later.") root_ = fromstring(req.text) path_ = root_.xpath(".//table[@id='curr_table']/tbody/tr") result = list() if path_: for elements_ in path_: info = [] for nested_ in elements_.xpath(".//td"): info.append(nested_.get('data-real-value')) if elements_.xpath(".//td")[0].text_content() == 'No results found': if interval_counter < interval_limit: data_flag = False else: raise IndexError("ERR#0055: currency_cross information unavailable or not found.") else: data_flag = True if data_flag is True: currency_cross_date = datetime.fromtimestamp(int(info[0])) currency_cross_date = date(currency_cross_date.year, currency_cross_date.month, currency_cross_date.day) currency_cross_close = float(info[1].replace(',', '')) currency_cross_open = float(info[2].replace(',', '')) currency_cross_high = float(info[3].replace(',', '')) currency_cross_low = float(info[4].replace(',', '')) result.insert(len(result), Data(currency_cross_date, currency_cross_open, currency_cross_high, currency_cross_low, currency_cross_close, None, currency)) if data_flag is True: if order in ['ascending', 'asc']: result = result[::-1] elif order in ['descending', 'desc']: result = result if as_json is True: json_ = {'name': name, 'historical': [value.currency_cross_as_json() for value in result] } final.append(json_) elif as_json is False: df = pd.DataFrame.from_records([value.currency_cross_to_dict() for value in result]) df.set_index('Date', inplace=True) final.append(df) else: raise RuntimeError("ERR#0004: data retrieval error while scraping.") if as_json is True: return json.dumps(final[0], sort_keys=False) elif as_json is False: return pd.concat(final)
6a01f89b128842497e76d0a3497b204ac6641080
3,654,066
def load_target(target_name, base_dir, cloud=False): """load_target load target from local or cloud Parameters ---------- target_name : str target name base_dir : str project base directory cloud : bool, optional load from GCS, by default False Returns ------- y_train: pd.DataFrame target data """ if cloud: y_train = load_cloud_target(target_name, base_dir) else: y_train = load_local_target(target_name, base_dir) return y_train
2ea76be87afdf524b45f26e9f8271ec973e0951a
3,654,067
def gradient(pixmap, ca, cb, eff, ncols): """ Returns a gradient width the start and end colors. eff should be Gradient.Vertical or Gradient.Horizontal """ x=0 y=0 rca = ca.red() rDiff = cb.red() - rca gca = ca.green() gDiff = cb.green() - gca bca = ca.blue() bDiff = cb.blue() - bca rl = rca << 16 gl = gca << 16 bl = bca << 16 if eff == Gradient.Vertical: rcdelta = (1<<16) / (pixmap.height() * rDiff) gcdelta = (1<<16) / (pixmap.height() * gDiff) bcdelta = (1<<16) / (pixmap.height() * bDiff) else: print (1<<16) ,pixmap.width() * rDiff rcdelta = (1<<16) / (pixmap.width() * rDiff) gcdelta = (1<<16) / (pixmap.width() * gDiff) bcdelta = (1<<16) / (pixmap.width() * bDiff) p = QPainter(pixmap) # these for-loops could be merged, but the if's in the inner loop # would make it slow if eff == Gradient.Vertical: for y in range(pixmap.height()): rl += rcdelta gl += gcdelta bl += bcdelta p.setPen(QColor(rl>>16, gl>>16, bl>>16)) p.drawLine(0, y, pixmap.width()-1, y) else: for x in pixmap.width(): rl += rcdelta gl += gcdelta bl += bcdelta p.setPen(QColor(rl>>16, gl>>16, bl>>16)) p.drawLine(x, 0, x, pixmap.height()-1) return pixmap
63406959617a7192c35e05b8efc81dcedfa7d54a
3,654,068
def option_to_text(option): """Converts, for example, 'no_override' to 'no override'.""" return option.replace('_', ' ')
4b7febe0c4500aa23c368f83bbb18902057dc378
3,654,069
def login(email, password): """ :desc: Logs a user in. :param: email - Email of the user - required password - Password of the user - required :return: `dict` """ if email == '' or password == '': return {'success': False, 'message': 'Email/Password field left blank.'} resp = {'success': False} data = {'email': email, 'password': password} session = get_session() session.cookies = LWPCookieJar(filename=COOKIES_FILE_PATH) resp_obj = session.post(LOGIN_URL, data=data) if resp_obj.status_code == 200: if resp_obj.url == BASE_URL: session.cookies.save(ignore_expires=True, ignore_discard=True) resp['success'] = True resp['message'] = 'Successfully Logged In!' else: resp['message'] = 'Incorrect credentials' else: resp['message'] = 'Stackoverflow is probably down. Please try again.' return resp
3ea350984d2c4206d66136e283b4784e08606352
3,654,070
def _cons8_89(m8, L88, L89, d_gap, k, Cp, h_gap): """dz constrant for edge gap sc touching edge, corner gap sc""" term1 = 2 * h_gap * L88 / m8 / Cp # conv to inner/outer ducts term2 = k * d_gap / m8 / Cp / L88 # cond to adj bypass edge term3 = k * d_gap / m8 / Cp / L89 # cond to adj bypass corner return 1 / (term1 + term2 + term3)
b6e8b6331be394e9a10659029143997b097fae86
3,654,071
def categories_split(df): """ Separate the categories in their own columns. """ ohe_categories = pd.DataFrame(df.categories.str.split(';').apply( lambda x: {e.split('-')[0]: int(e.split('-')[1]) for e in x}).tolist()) return df.join(ohe_categories).drop('categories', axis=1)
93e6b1dc384162b63fbf5775d168c0e693829f97
3,654,072
def build_received_request(qparams, variant_id=None, individual_id=None, biosample_id=None): """"Fills the `receivedRequest` part with the request data""" request = { 'meta': { 'requestedSchemas' : build_requested_schemas(qparams), 'apiVersion' : qparams.apiVersion, }, 'query': build_received_query(qparams, variant_id, individual_id, biosample_id), } return request
bfb0131f3ead563ffd1840119b6f7297a466d4dc
3,654,073
def is_router_bgp_configured_with_four_octet( device, neighbor_address, vrf, max_time=35, check_interval=10 ): """ Verifies that router bgp has been enabled with four octet capability and is in the established state Args: device('obj'): device to check vrf('vrf'): vrf to check under neighbor_address('str'): neighbor address to match max_time('int'): maximum time to wait check_interval('int'): how often to check Returns: True False Raise: None """ log.info( "Verifying {} has bgp configured with four octet capability".format( device.hostname ) ) timeout = Timeout(max_time, check_interval) while timeout.iterate(): out = device.parse("show ip bgp neighbors") if out: if vrf in out.get("vrf", {}): for neighbor in out["vrf"][vrf].get("neighbor", {}): if neighbor_address in neighbor: neighbor_dict = out["vrf"][vrf]["neighbor"][neighbor] if ( "established" in neighbor_dict.get("session_state", "").lower() ): if "bgp_negotiated_capabilities" in neighbor_dict and "advertised and received" in neighbor_dict[ "bgp_negotiated_capabilities" ].get( "four_octets_asn", "" ): return True timeout.sleep() return False
870600a1a5c68d5a4080d8a18966ddc107ae8a72
3,654,074
import os import pickle def validate(i): """ Input: { model_name - model name: earth lm nnet party randomforest rpart svm model_file - file with model (object) code features_table - features table (in experiment module format) (keep_temp_files) - if 'yes', keep temp files } Output: { return - return code = 0, if successful > 0, if error (error) - error text if return > 0 prediction_table - experiment table with predictions } """ mn=i['model_name'] mf=i['model_file'] mf1=i['model_file']+'.model.obj' mf7=i['model_file']+'.model.decision_tree.json' ftable=i['features_table'] ktf=i.get('keep_temp_files','') lftable=len(ftable) # Convert categorical features to floats r=convert_categories_to_floats({'table':ftable}) if r['return']>0: return r fconv=r['conv'] fconv1=r['conv1'] ftable1=r['table'] lt=[] # Load model object f=open(mf1, 'rb') clf=pickle.load(f) f.close() sx='' ############################################################# if mn=='dtc' or mn=='dtr': pr=clf.predict(ftable1) # Check if CK decision tree file exists if os.path.isfile(mf7): r=ck.load_json_file({'json_file':mf7}) if r['return']>0: return r labels=r['dict'] prx=[] q=-1 for ft in ftable1: q+=1 found=False value=False for label in labels: p=labels[label] dd=p['decision'] dv=p['value'] skip=False for k in range(0,len(dd),2): x=dd[k] y=dd[k+1] yc=y['comparison'] yf=int(y['feature']) yv=float(y['value']) if yc!='<=': return {'return':1, 'error':'not yet supported condition '+yc+' in decision tree'} if x=='': if not ft[yf]<=yv: skip=True else: if ft[yf]<=yv: skip=True if skip: break if not skip: found=True if dv=='true': value=True else: value=False break if not found: return {'return':1, 'error':'decision tree is incomplete'} lt.append(label) # print '**********' # for z in range(0, len(ftable1[q])): # zx=ftable1[q][z] # print 'X['+str(z)+']='+str(zx) else: return {'return':1, 'error':'model name '+mn+' is not found in module model.sklearn'} pr1=[] for q in pr: pr1.append([q]) lt1=[] for q in lt: lt1.append([q]) return {'return':0, 'prediction_table':pr1, 'label_table':lt1}
733de640d864d1484753e459eb3c352201546dd4
3,654,075
import six def cluster_absent( name='localhost', quiet=None): """ Machine is not running as a cluster node quiet: execute the command in quiet mode (no output) """ ret = {'name': name, 'changes': {}, 'result': False, 'comment': ''} if __salt__['crm.status'](): ret['result'] = True ret['comment'] = 'Cluster is already not running' return ret if __opts__['test']: ret['result'] = None ret['comment'] = 'Cluster node {} would be removed'.format(name) ret['changes']['name'] = name return ret try: # Here starts the actual process result = __salt__['crm.cluster_remove']( host=name, force=True, quiet=quiet) if result: ret['changes']['name'] = name ret['comment'] = 'Error removing cluster node' ret['result'] = False return ret ret['changes']['name'] = name ret['comment'] = 'Cluster node removed' ret['result'] = True return ret except exceptions.CommandExecutionError as err: ret['comment'] = six.text_type(err) return ret
f19c4c18cd0812ee2f4426a458cfb49e4faba5e0
3,654,076
def link_symbols_in_code_blocks(path, blocks, symbols): """Link symbols appearing a sequence of blocks.""" return [link_symbols_in_code_block(path, block, symbols) for block in blocks]
4185e9a1c9b0c8ff2748e80390763b089e9f8831
3,654,077
def cem_model_factory( env, network=mlp, network_params={}, input_shape=None, min_std=1e-6, init_std=1.0, adaptive_std=False, model_file_path=None, name='cem'): """ Model for gradient method """ def build_graph(model, network=network, input_shape=input_shape, network_params=network_params): policy = make_policy( env, 'pi', model, network_params=network_params, input_shape=input_shape, init_std=init_std, adaptive_std=adaptive_std, min_std=min_std, network=network) model['policy'] = policy model.add_output_node(policy.distribution.output_node) var_list = policy.get_trainable_variables() shapes = map(tf_utils.var_shape, var_list) total_size = sum(np.prod(shape) for shape in shapes) model['theta'] = tf.placeholder(tf.float32, [total_size]) var_list = policy.get_trainable_variables() model['gf'] = tf_utils.flatten_vars(var_list) model['sff'] = tf_utils.setfromflat(var_list, model['theta']) if model_file_path is not None: return Model.load(model_file_path, name) return Model(env, build_graph, empty_feed_dict, name=name)
e9327a4f3711e19e71cc16658d6e93acba29da47
3,654,078
def get_job(job_id: UUID) -> Job: """ Get job by ID. Args: job_id (UUID): ID of the job to be returned. Returns: Job """ return JobRepository.get_one_by_id(model_id=job_id)
53e70843ce18e77b17e79bac83ba0225d6087e23
3,654,079
import os def set_job_dirs(): """Sets job directories based on env variables set by Vertex AI.""" model_dir = os.getenv('AIP_MODEL_DIR', LOCAL_MODEL_DIR) if model_dir[0:5] == 'gs://': model_dir = model_dir.replace('gs://', '/gcs/') checkpoint_dir = os.getenv('AIP_CHECKPOINT_DIR', LOCAL_CHECKPOINT_DIR) if checkpoint_dir[0:5] == 'gs://': checkpoint_dir = checkpoint_dir.replace('gs://', '/gcs/') return model_dir, checkpoint_dir
93d9a648c7cc89cb5bc316e0e798e8f09ea3c8b9
3,654,080
import pytz def localize_datetime(input_df, timezone=DEFAULT_TIMEZONE, tms_gmt_col=DEFAULT_TMS_GMT_COL): """ Convert datetime column from UTC to another timezone. """ tmz = pytz.timezone(timezone) df = input_df.copy() return (df.set_index(tms_gmt_col) .tz_localize(pytz.utc) #  UTC time .tz_convert(tmz))
0d6f8638199f0ccfcf61e025b38dbe84d9eab8ff
3,654,081
import contextlib import socket def get_available_port() -> int: """Finds and returns an available port on the system.""" with contextlib.closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as sock: sock.bind(('', 0)) _, port = sock.getsockname() return int(port)
c86de127fb237662052b8ce010e99d271836e1ef
3,654,082
def prettyprint_float(val, digits): """Print a floating-point value in a nice way.""" format_string = "%." + f"{digits:d}" + "f" return (format_string % val).rstrip("0").rstrip(".")
ba62671d9cb8061744fbf1e070e76c31d0ba185d
3,654,083
from typing import Any from typing import Dict def year_page(archive_id: str, year: int) -> Any: """ Get year page for archive. Parameters ---------- archive : str Must be an arXiv archive identifier. year: int Must be a two or four digit year. Returns ------- dict Search result response data. int HTTP status code. dict Headers to add to the response. """ thisYear = date.today().year if year is None: year = thisYear if year > thisYear: # 307 because year might be valid in the future return {}, status.HTTP_307_TEMPORARY_REDIRECT, {'Location': '/'} if year < 100: if year >= 91: year = 1900 + year else: year = 2000 + year if archive_id not in taxonomy.ARCHIVES: raise BadRequest("Unknown archive.") else: archive = taxonomy.ARCHIVES[archive_id] listing_service = get_listing_service() month_listing = listing_service.monthly_counts(archive_id, year) for month in month_listing['month_counts']: month['art'] = ascii_art_month(archive_id, month) # type: ignore month['yymm'] = f"{month['year']}-{month['month']:02}" # type: ignore month['url'] = url_for('browse.list_articles', # type: ignore context=archive_id, subcontext=f"{month['year']}{month['month']:02}") response_data: Dict[str, Any] = { 'archive_id': archive_id, 'archive': archive, 'months': month_listing['month_counts'], 'listing': month_listing, 'year': str(year), 'stats_by_year': stats_by_year(archive_id, archive, years_operating(archive), year) } response_headers: Dict[str, Any] = {} response_status = status.HTTP_200_OK return response_data, response_status, response_headers
9bd609718d782d3cca185929ebacebd0e353bb10
3,654,084
import scipy def vert_polyFit2(data, z, bin0, step=1, deg=2): """ Trying to use the vertical polynomial fit to clean up the data not reallly sure about what im doing though """ data = np.squeeze(data) z = np.squeeze(z) dz = np.nanmean(np.gradient(np.squeeze(z))) bin1 = int(np.ceil(bin0/dz)) fits = [] zFits = [] bins = [] for i in range(len(z)): if 2*i+bin1 < len(z): bins.append(np.arange(i,2*i+bin1+1)) mask = np.isfinite(data[i:2*i+bin1]) dataIn = data[i:2*i+bin1] zIn = z[i:2*i+bin1] dataIn = dataIn[mask] if dataIn.size == 0: fits.append(np.nan) zFits.append(np.nan) else: zIn = zIn[mask] zFits.append(np.nanmean(z[i:2*i+bin1])) p = scipy.polyfit(zIn, dataIn, deg) fits.append(np.nanmean(scipy.polyval(p, z[i:2*i+bin1][mask]))) fits = np.hstack(fits) zFits = np.hstack(zFits) mask2 = np.isfinite(fits) P = scipy.interpolate.interp1d(zFits[mask2], fits[mask2], fill_value='extrapolate') fitrev = P(z) return fitrev
ceeeac26b9eba625164a055deb96741c6d99702e
3,654,085
def is_downloadable(url): """ Does the url contain a downloadable resource """ h = requests.head(url, allow_redirects=True) header = h.headers content_type = header.get('content-type') print content_type if 'text' in content_type.lower(): return False if 'html' in content_type.lower(): return False return True
74ccff9d967a3763c852a23d8775970ac9ff9e10
3,654,086
def dataframe_is_one_query_target_pair(dataframe): """ make sure there is only one query sequence and reference sequence in the given dataframe. Used to check that we aren't aggregating % identity numbers across bin alignment pairs. :param dataframe: :return: """ num_query_bins = len(dataframe['query bin'].unique()) num_ref_bins = len(dataframe['ref bin'].unique()) if not num_query_bins == 1: "Dataframe has a mix of {} query bins: {}".format( num_query_bins, dataframe['query bin'].unique()) if not num_ref_bins == 1: "Dataframe has a mix of {} reference bins: {}".format( num_query_bins, dataframe['ref bin'].unique()) if (num_query_bins == 1) & (num_ref_bins == 1): return True else: return False
8a8aba9f4b2eaaca6971bf5c158d043a033d0ec8
3,654,087
def update_api_key( self, name: str, permission: str, expiration: int, active: bool, key: str = None, description: str = None, ip_list: str = None, ) -> bool: """Update existing API key on Orchestrator .. list-table:: :header-rows: 1 * - Swagger Section - Method - Endpoint * - apiKey - PUT - /apiKey/{name} :param name: API Key Name :type name: str :param permission: API Key privileges. Allowed values are 'net_read_write' for RW and 'net_read" for RO :type permission: str :param expiration: API Key expiration in UNIX timestamp. Key will automatically become inactive on expiration date. :type expiration: int :param active: API Key state is active (True) or inactive (False). Inactive keys cannot be used to make requests. :type active: bool :param key: API Key value, defaults to None :type key: str, optional :param description: API Key description, defaults to None :type description: str, optional :param ip_list: List of allowed IP's to make requests with this API Key. Leave blank to allow all IP's. OptionalAPI Key state is active (True) or inactive (False). Inactive keys cannot be used to make requests, defaults to None :type ip_list: str :return: Returns True/False based on successful call :rtype: bool """ api_key_entry = { "name": name, "permission": permission, "expiration": expiration, "active": active, } if key is not None: api_key_entry["key"] = key if description is not None: api_key_entry["description"] = description if ip_list is not None: api_key_entry["ip_list"] = ip_list return self._put( "/apiKey/{}".format(name), data=api_key_entry, expected_status=[204], return_type="bool", )
9e37062475c3b83ab86c51355442cf6de0df1c34
3,654,088
def cleanGender(x): """ This is a helper funciton that will help cleanup the gender variable. """ if x in ['female', 'mostly_female']: return 'female' if x in ['male', 'mostly_male']: return 'male' if x in ['couple'] : return 'couple' else: return 'unknownGender'
23d71f2307aa829312f4a1d2a002ae2b55556050
3,654,089
def get_servers(): """ Retrieve all the discord servers in the database :return: List of servers """ session = Session() servers = session.query(Server).all() return servers
3953867d18c2e282ee11190a3ee1303126b2394e
3,654,090
def wait_till_postgres_responsive(url): """Check if something responds to ``url`` """ engine = sa.create_engine(url) conn = engine.connect() conn.close() return True
645c98799fa7d0347fc52850b7f3813fec74968c
3,654,091
def get_string_display(attr1, attr2, helper1, helper2, attribute_mode): """ get the attribute mode for string attribute mode can be: 'base', 'full', 'partial', 'masked' Note that some attribute does not have partial mode, in this case, partial mode will return masked mode Remeber to call has_partial_mode function to check if it actually has partial mode! Example: Input: attr1: '1000151475' attr2: '1000151575' helper1: '*******4**' helper2: '*******5**' attribute_mode: 'partial' Output: ['*******<span style="color:red">4</span>**', '*******<span style="color:red">5</span>**'] """ if attribute_mode == 'base': attr1_display = attr1 attr2_display = attr2 return [attr1_display, attr2_display] elif attribute_mode == 'full': if not attr1 or not attr2: if not attr1: attr1_display = '<img src="../static/images/site/missing.png" alt="missing" class="missing_icon">' else: attr1_display = attr1 if not attr2: attr2_display = '<img src="../static/images/site/missing.png" alt="missing" class="missing_icon">' else: attr2_display = attr2 else: if '*' not in helper1 and '*' not in helper2: attr1_display = attr1 attr2_display = attr2 else: attr1_display = '' attr2_display = '' i = 0 j = 0 k = 0 while k < len(helper1): if helper1[k] == '*': attr1_display += attr1[i] attr2_display += attr2[j] k += 1 i += 1 j += 1 elif k+1 < len(helper1) and i+1 < len(attr1) and j+1 < len(attr2) and \ helper1[k] not in ['*', '_', '?'] and helper1[k+1] not in ['*', '_', '?'] and attr1[i] == attr2[j+1] and attr1[i+1] == attr2[j]: attr1_display += '<span class="transpose_text">' + attr1[i] + attr1[i+1] + '</span>' attr2_display += '<span class="transpose_text">' + attr2[j] + attr2[j+1] + '</span>' k += 2 i += 2 j += 2 elif helper1[k] == '_' or helper1[k] == '?': attr2_display += '<span class="indel_text">' + attr2[j] + '</span>' k += 1 j += 1 elif helper2[k] == '_' or helper2[k] == '?': attr1_display += '<span class="indel_text">' + attr1[i] + '</span>' k += 1 i += 1 else: attr1_display += '<span class="replace_text">' + attr1[i] + '</span>' attr2_display += '<span class="replace_text">' + attr2[j] + '</span>' k += 1 i += 1 j += 1 return [attr1_display, attr2_display] elif attribute_mode == 'partial': if not attr1 or not attr2: if not attr1: attr1_display = '<img src="../static/images/site/missing.png" alt="missing" class="missing_icon">' else: attr1_display = '*'*len(attr1) if not attr2: attr2_display = '<img src="../static/images/site/missing.png" alt="missing" class="missing_icon">' else: attr2_display = '*'*len(attr2) else: if '*' not in helper1 and '*' not in helper2: attr1_display = len(attr1)*'@' attr2_display = len(attr2)*'&' elif helper1 == helper2: attr1_display = '<img src="../static/images/site/checkmark.png" alt="checkmark" class="freq_icon">' attr2_display = '<img src="../static/images/site/checkmark.png" alt="checkmark" class="freq_icon">' else: attr1_display = '' attr2_display = '' i = 0 j = 0 k = 0 while k < len(helper1): if helper1[k] == '*': attr1_display += '*' attr2_display += '*' k += 1 i += 1 j += 1 elif k+1 < len(helper1) and i+1 < len(attr1) and j+1 < len(attr2) and \ helper1[k] not in ['*', '_', '?'] and helper1[k+1] not in ['*', '_', '?'] and attr1[i] == attr2[j+1] and attr1[i+1] == attr2[j]: attr1_display += '<span class="transpose_text">' + attr1[i] + attr1[i+1] + '</span>' attr2_display += '<span class="transpose_text">' + attr2[j] + attr2[j+1] + '</span>' k += 2 i += 2 j += 2 elif helper1[k] == '_' or helper1[k] == '?': attr2_display += '<span class="indel_text">' + attr2[j] + '</span>' k += 1 j += 1 elif helper2[k] == '_' or helper2[k] == '?': attr1_display += '<span class="indel_text">' + attr1[i] + '</span>' k += 1 i += 1 else: attr1_display += '<span class="replace_text">' + attr1[i] + '</span>' attr2_display += '<span class="replace_text">' + attr2[j] + '</span>' k += 1 i += 1 j += 1 return [attr1_display, attr2_display] elif attribute_mode == 'masked': if not attr1 or not attr2: if not attr1: attr1_display = '<img src="../static/images/site/missing.png" alt="missing" class="missing_icon">' else: attr1_display = '*'*len(attr1) if not attr2: attr2_display = '<img src="../static/images/site/missing.png" alt="missing" class="missing_icon">' else: attr2_display = '*'*len(attr2) else: if '*' not in helper1 and '*' not in helper2: attr1_display = len(attr1)*'@' attr2_display = len(attr2)*'&' elif helper1 == helper2: attr1_display = '<img src="../static/images/site/checkmark.png" alt="checkmark" class="freq_icon">' attr2_display = '<img src="../static/images/site/checkmark.png" alt="checkmark" class="freq_icon">' else: attr1_display = '' attr2_display = '' i = 0 j = 0 k = 0 while k < len(helper1): if helper1[k] == '*': attr1_display += '*' attr2_display += '*' k += 1 i += 1 j += 1 elif k+1 < len(helper1) and i+1 < len(attr1) and j+1 < len(attr2) and \ helper1[k] not in ['*', '_', '?'] and helper1[k+1] not in ['*', '_', '?'] and attr1[i] == attr2[j+1] and attr1[i+1] == attr2[j]: attr1_display += '<span class="transpose_text">' + '@&' + '</span>' attr2_display += '<span class="transpose_text">' + '&@' + '</span>' k += 2 i += 2 j += 2 elif helper1[k] == '_' or helper1[k] == '?': attr2_display += '<span class="indel_text">' + '&' + '</span>' k += 1 j += 1 elif helper2[k] == '_' or helper2[k] == '?': attr1_display += '<span class="indel_text">' + '@' + '</span>' k += 1 i += 1 else: attr1_display += '<span class="replace_text">' + '@' + '</span>' attr2_display += '<span class="replace_text">' + '&' + '</span>' k += 1 i += 1 j += 1 return [attr1_display, attr2_display]
fa61332f82310ece349309f378126a4b3179483f
3,654,092
import re def is_doi(identifier: str) -> bool: """Validates if identifier is a valid DOI Args: identifier (str): potential doi string Returns: bool: true if identifier is a valid DOI """ doi_patterns = [ r"(10[.][0-9]{4,}(?:[.][0-9]+)*/(?:(?![\"&\'])\S)+)", r"(10.\d{4,9}/[-._;()/:A-Z0-9]+)", r"(10.\d{4}/\d+-\d+X?(\d+)\d+<[\d\w]+:[\d\w]*>\d+.\d+.\w+;\d)", r"(10.1021/\w\w\d+)", r"(10.1207/[\w\d]+\&\d+_\d+)", ] for pattern in doi_patterns: match = bool(re.match(pattern, identifier)) if match: return True return False
5c0bfe0527adbf53e89d302ee05feb80d285db64
3,654,093
def meshgrid_flatten(*X): """ Functionally same as numpy.meshgrid() with different output format. Function np.meshgrid() takes n 1d ndarrays of size N_1,...,N_n, and returns X_1,...,X_n n-dimensional arrays of shape (N_1, N_2,... N_n). This returns instead a 2d array of shape (N_1*...*N_n, n). """ if len(X) == 1: # Because np.meshgrid() can't handle return np.array([X[0]]).T # less than 2 arguments return np.vstack( map(lambda x: x.flatten(), mylib_meshgrid.meshgrid(*X, indexing='ij')) ).T
a7136a7a4dadb6449fd5079c78f15b13da3721dd
3,654,094
from typing import Union import copy def transform_scale( features, factor: float, origin: Union[str, list] = "centroid", mutate: bool = False, ): """ Scale a GeoJSON from a given point by a factor of scaling (ex: factor=2 would make the GeoJSON 200% larger). If a FeatureCollection is provided, the origin point will be calculated based on each individual Feature. :param features: GeoJSON to be scaled :param factor: of scaling, positive or negative values greater than 0 :param origin: Point from which the scaling will occur (string options: sw/se/nw/ne/center/centroid) :param mutate: allows GeoJSON input to be mutated (significant performance increase if true) :return: Scaled Geojson Example :- >>> from turfpy.transformation import transform_scale >>> from geojson import Polygon, Feature >>> f = Feature(geometry=Polygon([[[0,29],[3.5,29],[2.5,32],[0,29]]])) >>> transform_scale(f, 3, origin=[0, 29]) """ if not features: raise Exception("geojson is required") if not factor: raise Exception("invalid factor") if not mutate: features = copy.deepcopy(features) if features["type"] == "FeatureCollection": def _callback_feature_each(feature, feature_index): nonlocal factor, origin, features features["features"][feature_index] = scale(feature, factor, origin) feature_each(features, _callback_feature_each) return features return scale(features, factor, origin)
bacc6a365dbed0531d4516a736dd9ca2937b8cad
3,654,095
import importlib def create_agent(opt): """Create an agent from the options model, model_params and model_file. The input is either of the form "parlai.agents.ir_baseline.agents/IrBaselineAgent" (i.e. the path followed by the class name) or else just 'IrBaseline' which assumes the path above, and a class name suffixed with 'Agent' """ dir_name = opt['model'] if ':' in dir_name: s = dir_name.split(':') module_name = s[0] class_name = s[1] else: module_name = "parlai.agents.%s.agents" % (dir_name) words = opt['model'].split('_') class_name = '' for w in words: class_name += ( w[0].upper() + w[1:]) class_name += 'Agent' print(class_name) my_module = importlib.import_module(module_name) model_class = getattr(my_module, class_name) return model_class(opt)
6f5793ee0af7ed677f47c27ba5b94ad6f80ea957
3,654,096
def check_version(actver, version, cmp_op): """ Check version string of an active module against a required version. If dev/prerelease tags result in TypeError for string-number comparison, it is assumed that the dependency is satisfied. Users on dev branches are responsible for keeping their own packages up to date. Copyright (C) 2013 The IPython Development Team Distributed under the terms of the BSD License. """ if isinstance(actver, tuple): actver = '.'.join([str(i) for i in actver]) # Hacks needed so that LooseVersion understands that (for example) # version = '3.0.0' is in fact bigger than actver = '3.0.0rc1' if is_stable_version(version) and not is_stable_version(actver) and \ actver.startswith(version) and version != actver: version = version + 'zz' elif is_stable_version(actver) and not is_stable_version(version) and \ version.startswith(actver) and version != actver: actver = actver + 'zz' try: if cmp_op == '>': return LooseVersion(actver) > LooseVersion(version) elif cmp_op == '>=': return LooseVersion(actver) >= LooseVersion(version) elif cmp_op == '=': return LooseVersion(actver) == LooseVersion(version) elif cmp_op == '<': return LooseVersion(actver) < LooseVersion(version) elif cmp_op == '<=': return LooseVersion(actver) <= LooseVersion(version) else: return False except TypeError: return True
4d2cf92c412659044ad226aeeadb9145ceb75241
3,654,097
def dfa_intersection(dfa_1: dict, dfa_2: dict) -> dict: """ Returns a DFA accepting the intersection of the DFAs in input. Let :math:`A_1 = (Σ, S_1 , s_{01} , ρ_1 , F_1 )` and :math:`A_2 = (Σ, S_2 , s_{02} , ρ_2 , F_2 )` be two DFAs. Then there is a DFA :math:`A_∧` that runs simultaneously both :math:`A_1` and :math:`A_2` on the input word and accepts when both accept. It is defined as: :math:`A_∧ = (Σ, S_1 × S_2 , (s_{01} , s_{02} ), ρ, F_1 × F_2 )` where :math:`ρ((s_1 , s_2 ), a) = (s_{X1} , s_{X2} )` iff :math:`s_{X1} = ρ_1 (s_1 , a)` and :math:`s_{X2}= ρ_2 (s_2 , a)` Implementation proposed guarantees the resulting DFA has only **reachable** states. :param dict dfa_1: first input DFA; :param dict dfa_2: second input DFA. :return: *(dict)* representing the intersected DFA. """ intersection = { 'alphabet': dfa_1['alphabet'].intersection(dfa_2['alphabet']), 'states': {(dfa_1['initial_state'], dfa_2['initial_state'])}, 'initial_state': (dfa_1['initial_state'], dfa_2['initial_state']), 'accepting_states': set(), 'transitions': dict() } boundary = set() boundary.add(intersection['initial_state']) while boundary: (state_dfa_1, state_dfa_2) = boundary.pop() if state_dfa_1 in dfa_1['accepting_states'] \ and state_dfa_2 in dfa_2['accepting_states']: intersection['accepting_states'].add((state_dfa_1, state_dfa_2)) for a in intersection['alphabet']: if (state_dfa_1, a) in dfa_1['transitions'] \ and (state_dfa_2, a) in dfa_2['transitions']: next_state_1 = dfa_1['transitions'][state_dfa_1, a] next_state_2 = dfa_2['transitions'][state_dfa_2, a] if (next_state_1, next_state_2) not in intersection['states']: intersection['states'].add((next_state_1, next_state_2)) boundary.add((next_state_1, next_state_2)) intersection['transitions'][(state_dfa_1, state_dfa_2), a] = \ (next_state_1, next_state_2) return intersection
ea69f3cda2bd28f5b70d1724ffdd628daf1beffa
3,654,098
def change_status(request, page_id): """ Switch the status of a page. """ perm = PagePermission(request.user).check('change', method='POST') if perm and request.method == 'POST': page = Page.objects.get(pk=page_id) page.status = int(request.POST['status']) page.invalidate() page.save() return HttpResponse(str(page.status)) raise Http404
b65775d91c69cf4ac4d5a59d128581011986f1e7
3,654,099