content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def hot(df, hot_maps, drop_cold=True, ret_hots_only=False, verbose=False): """ df: pd.DataFrame hot_maps: list(dict) hot_map: dict key: str column in df value: one_hot vector for unique row value --- returns dataframe """ if verbose: print(f"hot_df cols: {df.columns}") ret = [] for i, (col_name, hot_map) in enumerate(hot_maps.items()): ret.append(hot_col(df[col_name], hot_map)) if ret_hots_only: return ret ret = pd.concat([df] + ret, axis=1) if drop_cold: ret = ret.drop(list(hot_maps.keys()), axis=1) return ret
b0912ae22aa3ee34acde76e89c5f926c9d309492
4,418
def menu(function_text): """ Decorator for plain-text handler :param function_text: function which set as handle in bot class :return: """ def wrapper(self, bot, update): self.text_menu(bot, update) function_text(self, bot, update) return wrapper
dc68a46aaf402cd5ce3bd832a0f2a661b5cbc71b
4,419
def create_delete_classes(system_id_or_identifier, **kwargs): """Create classes for a classification system. :param system_id_or_identifier: The id or identifier of a classification system """ if request.method == "DELETE": data.delete_classes(system_id_or_identifier) return {'message': f'Classes of {system_id_or_identifier} deleted'}, 204 if request.method == "POST": args = request.get_json() errors = ClassMetadataForm().validate(args) if errors: return abort(400, str(errors)) classes = data.insert_classes(system_id_or_identifier=system_id_or_identifier, classes_files_json=args['classes']) result = ClassesSchema(exclude=['classification_system_id']).dump(classes, many=True) return jsonify(result), 201
4f48ebb7fe80854d255f47fb795e83b54f9f60b3
4,420
def add_latents_to_dataset_using_tensors(args, sess, tensors, data): """ Get latent representations from model. Args: args: Arguments from parser in train_grocerystore.py. sess: Tensorflow session. tensors: Tensors used for extracting latent representations. data: Data used during epoch. Returns: Data dictionary filled with latent representations. """ latents = sess.run(tensors['latents'], feed_dict={tensors['x']: data['features']} ) data['latents'] = latents if args.use_private: latents_ux = sess.run(tensors['latents_ux'], feed_dict={tensors['x']: data['features']} ) data['latents_ux'] = latents_ux if args.use_text: all_captions = load_captions(data['captions'], data['labels']) latents_uw = sess.run(tensors['latents_uw'], feed_dict={tensors['captions']: all_captions}) data['latents_uw'] = latents_uw if args.use_iconic: batch_size = args.batch_size n_examples = len(data['iconic_image_paths']) n_batches = int(np.ceil(n_examples/batch_size)) latents_ui = np.zeros([n_examples, args.z_dim]) for i in range(n_batches): start = i * batch_size end = start + batch_size if end > n_examples: end = n_examples iconic_images = load_iconic_images(data['iconic_image_paths'][start:end]) latents_ui[start:end] = sess.run(tensors['latents_ui'], feed_dict={tensors['iconic_images']: iconic_images}) data['latents_ui'] = latents_ui return data
550c7c878b43737b5fcabbb007062774f404b0b3
4,422
def normal_distribution_parameter_estimation(data): """ Notice: Unbiased Estimation Adopted. Line 115. :param data: a list, each element is a real number, the value of some attribute eg: [0.46, 0.376, 0.264, 0.318, 0.215, 0.237, 0.149, 0.211] :return miu: the estimation of miu of the normal distribution based on 'data' eg: 0.27875 :return sigma: the estimation of sigma of the normal distribution based on 'data' eg: 0.10092394590553255 """ miu = np.mean(data) # estimate miu of the normal distribution sigma = 0 # initial sigma data_num = len(data) # the number of data # estimate sigma of the normal distribution for each_data in data: sigma = sigma + (each_data-miu) ** 2 sigma = sigma/(data_num-1) # unbiased estimation adopted!! sigma = sigma ** 0.5 return miu, sigma
ce8ca8010f98fdd2067285fea4779507fe7e958b
4,423
def compose(chosung, joongsung, jongsung=u''): """This function returns a Hangul letter by composing the specified chosung, joongsung, and jongsung. @param chosung @param joongsung @param jongsung the terminal Hangul letter. This is optional if you do not need a jongsung.""" if jongsung is None: jongsung = u'' try: chosung_index = CHOSUNGS.index(chosung) joongsung_index = JOONGSUNGS.index(joongsung) jongsung_index = JONGSUNGS.index(jongsung) except Exception as e: raise NotHangulException('No valid Hangul character can be generated using given combination of chosung, joongsung, and jongsung.') return chr(0xAC00 + chosung_index * NUM_JOONGSUNGS * NUM_JONGSUNGS + joongsung_index * NUM_JONGSUNGS + jongsung_index)
047d0cf68a558d795a5bf71b0ebe686a41208af7
4,425
from typing import Dict from typing import List def generate_markdown_metadata(metadata_obj: Dict[str, str]) -> List[str]: """generate_markdown_metadata Add some basic metadata to the top of the file in HTML tags. """ metadata: List[str] = ["<!---"] passed_metadata: List[str] = [ f" {key}: {value}" for key, value in metadata_obj.items() ] metadata.extend(passed_metadata) metadata.append(f" Tags:") metadata.append("--->") metadata.append(f"# Diary for {metadata_obj['Date']}") metadata.append("") return metadata
02ef3952c265276f4e666f060be6cb1d4a150cca
4,426
def fftshift(x:np.ndarray): """平移FFT频谱 FFT默认频谱不是关于零频率对称的,使用fftshift可以对调左右频谱。 :Parameters: - x: 频谱序列 :Returns: 平移后的频谱 """ N = x.size return np.append(x[N//2:], x[:N//2])
beaf2dcd0d5c9ff0b9bd83d326b3d3a9f6471968
4,427
from typing import List from typing import Tuple def get_20newsgroups_data( train_test, categories=None, max_text_len: int = None, min_num_tokens=0, random_state=42, ) -> List[Tuple[str, str]]: """ 'alt.atheism', 'comp.graphics', 'comp.os.ms-windows.misc', 'comp.sys.ibm.pc.hardware', 'comp.sys.mac.hardware', 'comp.windows.x', 'misc.forsale', 'rec.autos', 'rec.motorcycles', 'rec.sport.baseball', 'rec.sport.hockey', 'sci.crypt', 'sci.electronics', 'sci.med', 'sci.space', 'soc.religion.christian', 'talk.politics.guns', 'talk.politics.mideast', 'talk.politics.misc', 'talk.religion.misc' """ data = fetch_20newsgroups( subset=train_test, shuffle=True, remove=("headers", "footers", "quotes"), categories=categories, random_state=random_state, ) target_names = data.target_names def truncate_to_maxlen(text): if max_text_len is not None: return text[0 : min(len(text), max_text_len)] else: return text text_target_tuples = [ (truncate_to_maxlen(d), target_names[target]) for d, target in zip(data.data, data.target) if len(d.split(" ")) > min_num_tokens ] return text_target_tuples
6053f967ac1fb782cab28fe401e384940703e384
4,428
def crossdomain(allowed_origins=None, methods=None, headers=None, max_age=21600, attach_to_all=True, automatic_options=True, credentials=False): """ http://flask.pocoo.org/snippets/56/ """ if methods is not None: methods = ', '.join(sorted(x.upper() for x in methods)) if headers is not None and not isinstance(headers, str): headers = ', '.join(x.upper() for x in headers) if isinstance(allowed_origins, str): # always have allowed_origins as a list of strings. allowed_origins = [allowed_origins] if isinstance(max_age, timedelta): max_age = max_age.total_seconds() def get_methods(): if methods is not None: return methods options_resp = current_app.make_default_options_response() return options_resp.headers['allow'] def decorator(f): def wrapped_function(*args, **kwargs): # Get a hold of the request origin origin = request.environ.get('HTTP_ORIGIN') if automatic_options and request.method == 'OPTIONS': resp = current_app.make_default_options_response() else: resp = make_response(f(*args, **kwargs)) if not attach_to_all and request.method != 'OPTIONS': return resp h = resp.headers # if the origin matches any of our allowed origins set the # access control header appropriately allow_origin = (origin if origin is not None and allowed_origins is not None and origin in allowed_origins else None) h['Access-Control-Allow-Origin'] = allow_origin h['Access-Control-Allow-Methods'] = get_methods() h['Access-Control-Max-Age'] = str(max_age) if credentials: h['Access-Control-Allow-Credentials'] = 'true' if headers is not None: h['Access-Control-Allow-Headers'] = headers return resp f.provide_automatic_options = False return update_wrapper(wrapped_function, f) return decorator
9d352718406f62eaeb184a081b4223b67f6200f3
4,429
from typing import Callable def make_vector_gradient(bcs: Boundaries) -> Callable: """ make a discretized vector gradient operator for a cylindrical grid |Description_cylindrical| Args: bcs (:class:`~pde.grids.boundaries.axes.Boundaries`): |Arg_boundary_conditions| Returns: A function that can be applied to an array of values """ assert isinstance(bcs.grid, CylindricalGrid) bcs.check_value_rank(1) # calculate preliminary quantities gradient_r = make_gradient(bcs.extract_component(0)) gradient_z = make_gradient(bcs.extract_component(1)) gradient_phi = make_gradient(bcs.extract_component(2)) @jit_allocate_out(out_shape=(3, 3) + bcs.grid.shape) def vector_gradient(arr, out=None): """ apply gradient operator to array `arr` """ gradient_r(arr[0], out=out[:, 0]) gradient_z(arr[1], out=out[:, 1]) gradient_phi(arr[2], out=out[:, 2]) return out return vector_gradient
61d4f1a29d4a81e57ad37a6b963e201e5deabc06
4,430
def exec_in_terminal(command): """Run a command in the terminal and get the output stripping the last newline. Args: command: a string or list of strings """ return check_output(command).strip().decode("utf8")
1186649cebbd20559f7de0ba8aa743d70f35c924
4,431
def replace_string(original, start, end, replacement): """Replaces the specified range of |original| with |replacement|""" return original[0:start] + replacement + original[end:]
c71badb26287d340170cecdbae8d913f4bdc14c6
4,432
def edit_mod(): """ Admin endpoint used for sub transfers. """ if not current_user.is_admin(): abort(403) form = EditModForm() try: sub = Sub.get(fn.Lower(Sub.name) == form.sub.data.lower()) except Sub.DoesNotExist: return jsonify(status='error', error=[_("Sub does not exist")]) try: user = User.get(fn.Lower(User.name) == form.user.data.lower()) except User.DoesNotExist: return jsonify(status='error', error=[_("User does not exist")]) if form.validate(): try: sm = SubMod.get((SubMod.sid == sub.sid) & (SubMod.uid == user.uid)) sm.power_level = 0 sm.invite = False sm.save() except SubMod.DoesNotExist: SubMod.create(sid=sub.sid, uid=user.uid, power_level=0) misc.create_sublog(misc.LOG_TYPE_SUB_TRANSFER, current_user.uid, sub.sid, comment=user.name, admin=True) return jsonify(status='ok') return jsonify(status="error", error=get_errors(form))
debab16603e2cfe412eb0f819753fb7571a9c803
4,433
def get_current_info(symbol_list, columns='*'): """Retrieves the latest data (15 minute delay) for the provided symbols.""" columns = ','.join(columns) symbols = __format_symbol_list(symbol_list) yql = ('select %s from %s where symbol in (%s)' % (columns, FINANCE_TABLES['quotes'], symbols)) response = execute_yql_query(yql) return __validate_response(response, 'quote')
a13df0f44b31ac091a5283958cdb1aa675fe9bdc
4,434
def dictionarify_recpat_data(recpat_data): """ Covert a list of flat dictionaries (single-record dicts) into a dictionary. If the given data structure is already a dictionary, it is left unchanged. """ return {track_id[0]: patterns[0] for track_id, patterns in \ [zip(*item.items()) for item in recpat_data]} \ if not isinstance(recpat_data, dict) else recpat_data
d1cdab68ab7445aebe1bbcce2f220c73d6db308f
4,435
def _get_qualified_name(workflow_name, job_name): """Construct a qualified name from workflow name and job name.""" return workflow_name + _NAME_DELIMITER + job_name
29881480a9db33f18ff4b01abcdd1aaf39781f36
4,436
def normalize_each_time_frame(input_array): """ Normalize each time frame - Input: 3D numpy array - Output: 3D numpy array """ for i in range(input_array.shape[0]): max_value = np.amax(input_array[i, :, :]) if max_value != 0: input_array[i, :, :] = input_array[i, :, :] / max_value return input_array
bee7f41f17e4e24a654426f65c6a73c518abafca
4,437
def pre_process_data(full_data): """ pre process data- dump invalid values """ clean_data = full_data[(full_data["Temp"] > -10)] return clean_data
6172d4a77f5805c60ae9e4f146da2bd8283beef0
4,438
def invalid_grant(_): """Handles the Invalid Grant error when doing Oauth """ del current_app.blueprints['google'].token flash(("InvalidGrant Error"), category="danger") return redirect(url_for('index'))
95b8b20d3d96b46387c6dd23ede9b54c6b056da1
4,439
import difflib def diff_text(a, b): """ Performs a diffing algorithm on two pieces of text. Returns a string of HTML containing the content of both texts with <span> tags inserted indicating where the differences are. """ def tokenise(text): """ Tokenises a string by spliting it into individual characters and grouping the alphanumeric ones together. This means that punctuation, whitespace, CJK characters, etc become separate tokens and words/numbers are merged together to form bigger tokens. This makes the output of the diff easier to read as words are not broken up. """ tokens = [] current_token = "" for c in text: if c.isalnum(): current_token += c else: if current_token: tokens.append(current_token) current_token = "" tokens.append(c) if current_token: tokens.append(current_token) return tokens a_tok = tokenise(a) b_tok = tokenise(b) sm = difflib.SequenceMatcher(lambda t: len(t) <= 4, a_tok, b_tok) changes = [] for op, i1, i2, j1, j2 in sm.get_opcodes(): if op == 'replace': for token in a_tok[i1:i2]: changes.append(('deletion', token)) for token in b_tok[j1:j2]: changes.append(('addition', token)) elif op == 'delete': for token in a_tok[i1:i2]: changes.append(('deletion', token)) elif op == 'insert': for token in b_tok[j1:j2]: changes.append(('addition', token)) elif op == 'equal': for token in a_tok[i1:i2]: changes.append(('equal', token)) # Merge ajacent changes which have the same type. This just cleans up the HTML a bit merged_changes = [] current_value = [] current_change_type = None for change_type, value in changes: if change_type != current_change_type: if current_change_type is not None: merged_changes.append((current_change_type, ''.join(current_value))) current_value = [] current_change_type = change_type current_value.append(value) if current_value: merged_changes.append((current_change_type, ''.join(current_value))) return TextDiff(merged_changes)
e15348e942ac3e6936872ec61f123a9241f49eba
4,440
from common.aes import encrypt from common.datatypes import PasswordResetToken from config import AUTH_TOKEN, RESET_PASSWORD_EXPIRE_SECONDS from time import time from urllib.parse import quote_plus from utils import send_mail import traceback from operator import or_ def require_reset_password(): """ 请求重设密码 参数: { "identifier":"用户识别符" } 返回: { "code":0,//非0表示调用成功 "message":"qwq"//code非0的时候表示错误信息 } """ if config.USE_PHONE_WHEN_REGISTER_AND_RESETPASSWD: return make_response(-1, message="当前不使用邮箱验证密码") if db.session.query(User).filter(User.email == request.form["identifier"]).count() > 1: return make_response(-1, message="此邮箱对应多个用户,请使用用户名进行操作") query = db.session.query(User).filter(or_( User.email == request.form["identifier"], User.username == request.form["identifier"])) if query.count() == 0: return make_response(-1, message="用户名或邮箱错误") user: User = query.one() raw_json = PasswordResetToken( user.id, int(time())+RESET_PASSWORD_EXPIRE_SECONDS, AUTH_TOKEN).as_json() # print(raw_json) to_send_token = encrypt(config.AUTH_PASSWORD, raw_json) # print("raw token", to_send_token) to_send_token = quote_plus(quote_plus(to_send_token)) # print(to_send_token) # user.reset_token = str(uuid.uuid1()) try: send_mail(config.RESET_PASSWORD_EMAIL.format( reset_token=to_send_token), "重置密码", user.email) except Exception as ex: return make_response(-1, message=traceback.format_exc()) return make_response(0, message="重置密码的邮件已经发送到您邮箱的垃圾箱,请注意查收")
aa1c14755485fe3ac5fc294e43fa1d4e610e0a83
4,441
def coerce_affine(affine, *, ndim, name=None): """Coerce a user input into an affine transform object. If the input is already an affine transform object, that same object is returned with a name change if the given name is not None. If the input is None, an identity affine transform object of the given dimensionality is returned. Parameters ---------- affine : array-like or napari.utils.transforms.Affine An existing affine transform object or an array-like that is its transform matrix. ndim : int The desired dimensionality of the transform. Ignored is affine is an Affine transform object. name : str The desired name of the transform. Returns ------- napari.utils.transforms.Affine The input coerced into an affine transform object. """ if affine is None: affine = Affine(affine_matrix=np.eye(ndim + 1), ndim=ndim) elif isinstance(affine, np.ndarray): affine = Affine(affine_matrix=affine, ndim=ndim) elif isinstance(affine, list): affine = Affine(affine_matrix=np.array(affine), ndim=ndim) elif not isinstance(affine, Affine): raise TypeError( trans._( 'affine input not recognized. must be either napari.utils.transforms.Affine or ndarray. Got {dtype}', deferred=True, dtype=type(affine), ) ) if name is not None: affine.name = name return affine
66900e32b83100004d2ea62a742fc0afe8a26cbb
4,442
def known_peaks(): """Return a list of Peak instances with data (identified).""" peak1 = Peak( name="Test1Known", r_time=5.00, mz=867.1391, charge="+", inchi_key="IRPOHFRNKHKIQA-UHFFFAOYSA-N", ) peak2 = Peak( name="Test2Known", r_time=8.00, mz=260.0297, charge="-", inchi_key="HXXFSFRBOHSIMQ-FPRJBGLDSA-N", ) return [peak1, peak2]
3f7d5eb5b16d61f09c0c10f32e9d8d40324e2d5d
4,444
def explode_sheet_music(sheet_music): """ Splits unformatted sheet music into formated lines of LINE_LEN_LIM and such and returns a list of such lines """ split_music = sheet_music.split(',') split_music = list(map(lambda note: note+',', split_music)) split_list = [] counter = 0 line_counter = 1 for note in split_music: if line_counter > LINES_LIMIT-1: break if counter+len(note) > LINE_LENGTH_LIM-2: split_list[-1] = split_list[-1].rstrip(',') split_list[-1] += END_OF_LINE_CHAR counter = 0 line_counter += 1 split_list.append(note) counter += len(note) return split_list
f89ae58a0deb315c61419bd381cd0bf84f079c3e
4,445
def norm_coefficient(m, n): """ Calculate the normalization coefficient for the (m, n) Zernike mode. Parameters ---------- m : int m-th azimuthal Zernike index n : int n-th radial Zernike index Returns ------- norm_coeff : float Noll normalization coefficient """ norm_coeff = np.sqrt(2 * (n + 1)/(1 + (m == 0))) return norm_coeff
1632ffac5e771e4ab16b3f7918d9543ffd67171e
4,446
def get_waveglow(ckpt_url): """ Init WaveGlow vocoder model with weights. Used to generate realistic audio from mel-spectrogram. """ wn_config = { 'n_layers': hp.wg_n_layers, 'n_channels': hp.wg_n_channels, 'kernel_size': hp.wg_kernel_size } audio_config = { 'wav_value': hp.wg_wav_value, 'sampling_rate': hp.wg_sampling_rate } model = WaveGlow( n_mel_channels=hp.wg_n_mel_channels, n_flows=hp.wg_n_flows, n_group=hp.wg_n_group, n_early_every=hp.wg_n_early_every, n_early_size=hp.wg_n_early_size, wn_config=wn_config ) load_checkpoint(ckpt_url, model) model.set_train(False) return model, audio_config
a5b494299fae98be2bb5f764ed7a53fc42d36eff
4,447
def user_exists(keystone, user): """" Return True if user already exists""" return user in [x.name for x in keystone.users.list()]
17d99e12c0fc128607a815f0b4ab9897c5d45578
4,448
from typing import List from typing import Dict import itertools def gen_cartesian_product(*args: List[Dict]) -> List[Dict]: """ generate cartesian product for lists 生成笛卡尔积,估计是参数化用的 Args: args (list of list): lists to be generated with cartesian product Returns: list: cartesian product in list Examples: >>> arg1 = [{"a": 1}, {"a": 2}] >>> arg2 = [{"x": 111, "y": 112}, {"x": 121, "y": 122}] >>> args = [arg1, arg2] >>> gen_cartesian_product(*args) >>> # same as below >>> gen_cartesian_product(arg1, arg2) [ {'a': 1, 'x': 111, 'y': 112}, {'a': 1, 'x': 121, 'y': 122}, {'a': 2, 'x': 111, 'y': 112}, {'a': 2, 'x': 121, 'y': 122} ] """ if not args: return [] elif len(args) == 1: return args[0] """ 经过以上判断,只有args≥2时 """ product_list = [] # itertools.product(*args) 笛卡尔积,相当于嵌套的for循环 for product_item_tuple in itertools.product(*args): """ ({'a': 1}, {'x': 111, 'y': 112}) ({'a': 1}, {'x': 121, 'y': 122}) ({'a': 2}, {'x': 111, 'y': 112}) ({'a': 2}, {'x': 121, 'y': 122}) """ product_item_dict = {} for item in product_item_tuple: """ 1 :{'a': 1} 1 :{'x': 111, 'y': 112} 2 :{'a': 1} 2 :{'x': 121, 'y': 122} 3 :{'a': 2} 3 :{'x': 111, 'y': 112} 4 :{'a': 2} 4 :{'x': 121, 'y': 122} """ product_item_dict.update(item) product_list.append(product_item_dict) # [{'a': 1, 'x': 111, 'y': 112}, {'a': 1, 'x': 121, 'y': 122}, {'a': 2, 'x': 111, 'y': 112}, {'a': 2, 'x': 121, 'y': 122}] return product_list
cbe85f440f399b523aa70bc10733ea175dc93f7a
4,449
def get_234_df(x): """ This function get the dataframe for model2.1,2.2,2.3 input: x, the col we want output: the dataframe only for x """ styles = pd.read_csv("styles.csv", error_bad_lines=False) styles = styles.drop(["productDisplayName"],axis = 1) styles = styles.drop(["year"],axis = 1) styles = styles[(styles.masterCategory=='Apparel')| (styles.masterCategory=='Footwear')] styles = styles.drop(styles[styles["subCategory"] == "Innerwear"].index) styles = styles.dropna() styles = df_drop(styles,"subCategory", ["Apparel Set", "Dress","Loungewear and Nightwear","Saree","Socks"]) styles["subCategory"] = styles["subCategory"].transform(lambda x: "Footwear" if(x in ["Shoes","Flip Flops","Sandal"]) else x) styles = styles.drop(labels=[6695,16194,32309,36381,40000], axis=0) styles = styles[styles.subCategory == x] group_color(styles) styles.baseColour=styles.colorgroup return styles
c9d456ae058492e5e242bbde2288885158681f98
4,450
def appropriate_bond_orders(params, smrts_mol, smrts): """Checks if a SMARTS substring specification has appropriate bond orders given the user-specified mode. :param params: A dictionary of the user parameters and filters. :type params: dict :param smrts_mol: RDKit mol object of the SMARTS string. :type smrts_mol: RDKit mol object. :param smrts: The SMARTS string. :type smrts: str :return: 'True' if it validates, 'False' otherwise. :rtype: bool """ # Test if double bonds are inappropriately specified. if params["mode"] == "NONE" and ( ".pdb" in params["ligand_exts"] or ".pdbqt" in params["ligand_exts"] ): bond_orders = [b.GetBondTypeAsDouble() for b in smrts_mol.GetBonds()] bond_orders = [o for o in bond_orders if o != 1.0] if len(bond_orders) > 0: # So it has bonds with orders greater than 1 output.error( "When processing PDB- and PDBQT-formatted ligands in NONE " + "mode, LigGrep ignores bond orders and simply " + "assumes that all appropriately juxtaposed atoms are " + "connected by single bonds. But one (or more) of your " + "filters describes a substructure with bonds of higher " + "orders: " + smrts, params, ) return False return True
045abda277716812694cc1093256742e1d67a016
4,451
def train(model, train_path, val_path, steps_per_epoch, batch_size, records_path): """ Train the Keras graph model Parameters: model (keras Model): The Model defined in build_model train_path (str): Path to training data val_path (str): Path to validation data steps_per_epoch (int): Len(training_data)/batch_size batch_size (int): Size of mini-batches used during training records_path (str): Path + prefix to output directory Returns: loss (ndarray): An array with the validation loss at each epoch """ adam = Adam(lr=0.001) model.compile(loss='binary_crossentropy', optimizer=adam) train_generator = data_generator(train_path, batch_size, seqlen=500) val_generator = data_generator(val_path, 200000, seqlen=500) validation_data = next(val_generator) precision_recall_history = PrecisionRecall(validation_data) # adding check-pointing checkpointer = ModelCheckpoint(records_path + 'model_epoch{epoch}.hdf5', verbose=1, save_best_only=False) # defining parameters for early stopping # earlystop = EarlyStopping(monitor='val_loss', mode='min', verbose=1, # patience=5) # training the model.. hist = model.fit_generator(epochs=15, steps_per_epoch=steps_per_epoch, generator=train_generator, validation_data=validation_data, callbacks=[precision_recall_history, checkpointer]) loss, val_pr = save_metrics(hist, precision_recall_history, records_path=records_path) return loss, val_pr
24a8080a8b4738f7eb32846729b006ca2237a576
4,452
def Mcnu_to_m1m2(Mc, nu): """Convert chirp mass, symmetric mass ratio pair to m1, m2""" q = nu_to_q(nu) M = Mcq_to_M(Mc, q) return Mq_to_m1m2(M, q)
8b4eb6e49549607bda0ea9a17baec8c4d0b38cb6
4,453
import functools def _AccumulateActions(args): """Given program arguments, determines what actions we want to run. Returns [(ResultsReportCtor, str)], where ResultsReportCtor can construct a ResultsReport, and the str is the file extension for the given report. """ results = [] # The order of these is arbitrary. if args.json: results.append((JSONResultsReport, 'json')) if args.text: results.append((TextResultsReport, 'txt')) if args.email: email_ctor = functools.partial(TextResultsReport, email=True) results.append((email_ctor, 'email')) # We emit HTML if nothing else was specified. if args.html or not results: results.append((HTMLResultsReport, 'html')) return results
73925fe55e6986e1222a5e88f804caaa9793044a
4,454
def build_predictions_dictionary(data, class_label_map): """Builds a predictions dictionary from predictions data in CSV file. Args: data: Pandas DataFrame with the predictions data for a single image. class_label_map: Class labelmap from string label name to an integer. Returns: Dictionary with keys suitable for passing to OpenImagesDetectionChallengeEvaluator.add_single_detected_image_info: standard_fields.DetectionResultFields.detection_boxes: float32 numpy array of shape [num_boxes, 4] containing `num_boxes` detection boxes of the format [ymin, xmin, ymax, xmax] in absolute image coordinates. standard_fields.DetectionResultFields.detection_scores: float32 numpy array of shape [num_boxes] containing detection scores for the boxes. standard_fields.DetectionResultFields.detection_classes: integer numpy array of shape [num_boxes] containing 1-indexed detection classes for the boxes. """ dictionary = { standard_fields.DetectionResultFields.detection_classes: data['LabelName'].map(lambda x: class_label_map[x]).to_numpy(), standard_fields.DetectionResultFields.detection_scores: data['Score'].to_numpy().astype(float) } if 'Mask' in data: segments, boxes = _decode_raw_data_into_masks_and_boxes( data['Mask'], data['ImageWidth'], data['ImageHeight']) dictionary[standard_fields.DetectionResultFields.detection_masks] = segments dictionary[standard_fields.DetectionResultFields.detection_boxes] = boxes else: dictionary[standard_fields.DetectionResultFields.detection_boxes] = data[[ 'YMin', 'XMin', 'YMax', 'XMax' ]].to_numpy().astype(float) return dictionary
738e1c9c4568bc689ecda9765c3412d36f2d73ec
4,455
def create_file_link(link_id, file_id, parent_share_id, parent_datastore_id): """ DB wrapper to create a link between a file and a datastore or a share Takes care of "degenerated" tree structures (e.g a child has two parents) In addition checks if the link already exists, as this is a crucial part of the access rights system :param link_id: :param file_id: :param parent_share_id: :param parent_datastore_id: :return: """ try: File_Link.objects.create( link_id = link_id, file_id = file_id, parent_datastore_id = parent_datastore_id, parent_share_id = parent_share_id ) except: return False return True
0c4abe1d5aa4bce8bd489f8bec1ae900a9194631
4,456
def deptree(lines): """Build a tree of what step depends on what other step(s). Test input becomes {'A': set(['C']), 'C': set([]), 'B': set(['A']), 'E': set(['B', 'D', 'F']), 'D': set(['A']), 'F': set(['C'])} A depends on C B depends on A C depends on nothing (starting point) D depends on A E depends on B, D, F F depends on C """ coll = defaultdict(set) for line in lines: parts = line.split() coll[parts[7]].add(parts[1]) if parts[1] not in coll: coll[parts[1]] = set() return dict(coll)
9a435a0e78dd3a68c97df4bcd2e03583841de216
4,457
from datetime import datetime def get_datetime(time_str, model="0"): """ 时间格式化 '20200120.110227'转为'2020-01-20 11:02:27' 返回一个datetime格式 """ if model == "0": time_str = get_time(time_str) time = datetime.datetime.strptime(time_str, "%Y-%m-%d %H:%M:%S") return time
568c46efab9366f64fc0e286cf9876cd48e7a9bb
4,458
def gather_parent_cnvs(vcf, fa, mo): """ Create BEDTools corresponding to parent CNVs for converage-based inheritance """ cnv_format = '{0}\t{1}\t{2}\t{3}\t{4}\n' fa_cnvs = '' mo_cnvs = '' for record in vcf: # Do not include variants from sex chromosomes if record.chrom in sex_chroms: continue # Process biallelic CNVs if record.info['SVTYPE'] in 'DEL DUP'.split() \ and 'MULTIALLELIC' not in record.filter: # Father fa_ac = get_AC(get_GT(record, fa)) if fa_ac != 'NA': if int(fa_ac) > 0: new_cnv = cnv_format.format(record.chrom, str(record.pos), str(record.stop), record.info['SVTYPE'], fa_ac) fa_cnvs = fa_cnvs + new_cnv # Mother mo_ac = get_AC(get_GT(record, mo)) if mo_ac != 'NA': if int(mo_ac) > 0: new_cnv = cnv_format.format(record.chrom, str(record.pos), str(record.stop), record.info['SVTYPE'], mo_ac) mo_cnvs = mo_cnvs + new_cnv # Process multiallelic CNVs if record.info['SVTYPE'] == 'MCNV' and 'MULTIALLELIC' in record.filter: # Father fa_ac = get_GT(record, fa).split('/')[1] if fa_ac != 'None': fa_ac = int(fa_ac) if fa_ac < 2: new_cnv = cnv_format.format(record.chrom, str(record.pos), str(record.stop), 'DEL', str(2 - fa_ac)) fa_cnvs = fa_cnvs + new_cnv elif fa_ac > 2: new_cnv = cnv_format.format(record.chrom, str(record.pos), str(record.stop), 'DUP', str(fa_ac - 2)) fa_cnvs = fa_cnvs + new_cnv # Mother mo_ac = get_GT(record, mo).split('/')[1] if mo_ac != 'None': mo_ac = int(mo_ac) if mo_ac < 2: new_cnv = cnv_format.format(record.chrom, str(record.pos), str(record.stop), 'DEL', str(2 - mo_ac)) mo_cnvs = mo_cnvs + new_cnv elif mo_ac > 2: new_cnv = cnv_format.format(record.chrom, str(record.pos), str(record.stop), 'DUP', str(mo_ac - 2)) mo_cnvs = mo_cnvs + new_cnv fa_cnvs = pbt.BedTool(fa_cnvs, from_string=True) mo_cnvs = pbt.BedTool(mo_cnvs, from_string=True) return fa_cnvs, mo_cnvs
2d685586a917bbd94f87758221b4d06c2d6ad7c1
4,459
import json def create_images(): """ Create new images Internal Parameters: image (FileStorage): Image Returns: success (boolean) image (list) """ # vars image_file = request.files.get('image') validate_image_data({"image": image_file}) image_url_set = create_img_set(image_file) # create image image = Image(**{ "user_id": auth_user_id(), "url": json.dumps(image_url_set) }) try: image.insert() # return the result return jsonify({ 'success': True, 'image': image.format() }) except Exception as e: abort(400)
682bb2f6044265fbd6c29c4ff6581d6bc2469edb
4,460
from typing import Union from pathlib import Path from typing import Optional def docx2python( docx_filename: Union[str, Path], image_folder: Optional[str] = None, html: bool = False, paragraph_styles: bool = False, extract_image: bool = None, ) -> DocxContent: """ Unzip a docx file and extract contents. :param docx_filename: path to a docx file :param image_folder: optionally specify an image folder (images in docx will be copied to this folder) :param html: bool, extract some formatting as html :param paragraph_styles: prepend the paragraphs style (if any, else "") to each paragraph. This will only be useful with ``*_runs`` attributes. :param extract_image: bool, extract images from document (default True) :return: DocxContent object """ if extract_image is not None: warn( "'extract_image' is no longer a valid argument for docx2python. If an " "image_folder is given as an argument to docx2python, images will be " "written to that folder. A folder can be provided later with " "``docx2python(filename).write_images(image_folder)``. Images files are " "available as before with ``docx2text(filename).images`` attribute." ) docx_context = DocxReader(docx_filename, html, paragraph_styles) docx_content = DocxContent(docx_context, locals()) if image_folder: _ = docx_content.images return docx_content
557ba8502b62ffc771a7d3b6f88a8b769dd55d68
4,461
def parcel_analysis(con_imgs, parcel_img, msk_img=None, vcon_imgs=None, design_matrix=None, cvect=None, fwhm=8, smooth_method='default', res_path=None): """ Helper function for Bayesian parcel-based analysis. Given a sequence of independent images registered to a common space (for instance, a set of contrast images from a first-level fMRI analysis), perform a second-level analysis assuming constant effects throughout parcels defined from a given label image in reference space. Specifically, a model of the following form is assumed: Y = X * beta + variability, where Y denotes the input image sequence, X is a design matrix, and beta are parcel-wise parameter vectors. The algorithm computes the Bayesian posterior probability of cvect'*beta, where cvect is a given contrast vector, in each parcel using an expectation propagation scheme. Parameters ---------- con_imgs: sequence of nipy-like images Images input to the group analysis. parcel_img: nipy-like image Label image where each label codes for a parcel. msk_img: nipy-like image, optional Binary mask to restrict analysis. By default, analysis is carried out on all parcels with nonzero value. vcon_imgs: sequece of nipy-like images, optional First-level variance estimates corresponding to `con_imgs`. This is useful if the input images are "noisy". By default, first-level variances are assumed to be zero. design_matrix: array, optional If None, a one-sample analysis model is used. Otherwise, an array with shape (n, p) where `n` matches the number of input scans, and `p` is the number of regressors. cvect: array, optional Contrast vector of interest. The method makes an inference on the contrast defined as the dot product cvect'*beta, where beta are the unknown parcel-wise effects. If None, `cvect` is assumed to be np.array((1,)). However, the `cvect` argument is mandatory if `design_matrix` is provided. fwhm: float, optional A parameter that represents the localization uncertainty in reference space in terms of the full width at half maximum of an isotropic Gaussian kernel. smooth_method: str, optional One of 'default' and 'spm'. Setting `smooth_method=spm` results in simply smoothing the input images using a Gaussian kernel, while the default method involves more complex smoothing in order to propagate spatial uncertainty into the inference process. res_path: str, optional An existing path to write output images. If None, no output is written. Returns ------- pmap_mu_img: nipy image Image of posterior contrast means for each parcel. pmap_prob_img: nipy image Corresponding image of posterior probabilities of positive contrast. """ p = ParcelAnalysis(con_imgs, parcel_img, parcel_info=None, msk_img=msk_img, vcon_imgs=vcon_imgs, design_matrix=design_matrix, cvect=cvect, fwhm=fwhm, smooth_method=smooth_method, res_path=res_path) return p.parcel_maps()
8abface7ad72f5ca2679dc9a8ea6cedd93f681a5
4,462
def memoize(fn): """Simple memoization decorator for functions and methods, assumes that all arguments to the function can be hashed and compared. """ memoized_values = {} @wraps(fn) def wrapped_fn(*args, **kwargs): key = (args, tuple(sorted(kwargs.items()))) try: return memoized_values[key] except KeyError: memoized_values[key] = fn(*args, **kwargs) return memoized_values[key] return wrapped_fn
2a48fad065e04a7eed9b9865adc0640f2a7cff9f
4,463
from pydantic import BaseModel # noqa: E0611 import torch def validate( args: Namespace, model: BaseModel ) -> pd.DataFrame: """Perform the validation. Parameters ---------- args : Namespace Arguments to configure the model and the validation. model : BaseModel The model to be used for validation. Returns ------- pd.DataFrame A DataFrame with the metric results. See Also -------- ptlflow.models.base_model.base_model.BaseModel : The parent class of the available models. """ model.eval() if torch.cuda.is_available(): model = model.cuda() dataloaders = model.val_dataloader() dataloaders = {model.val_dataloader_names[i]: dataloaders[i] for i in range(len(dataloaders))} metrics_df = pd.DataFrame() metrics_df['model'] = [args.model] metrics_df['checkpoint'] = [args.pretrained_ckpt] for dataset_name, dl in dataloaders.items(): metrics_mean = validate_one_dataloader(args, model, dl, dataset_name) metrics_df[[f'{dataset_name}-{k}' for k in metrics_mean.keys()]] = list(metrics_mean.values()) args.output_path.mkdir(parents=True, exist_ok=True) metrics_df.T.to_csv(args.output_path / 'metrics.csv', header=False) metrics_df = metrics_df.round(3) return metrics_df
456ec24e1639970db285e260028e9ba3bd4d2e31
4,464
def stats(last_day=None, timeframe=None, dates_sources=None): """See :class:`bgpranking.api.get_stats`""" query = {'method': 'stats'} query.update({'last_day': last_day, 'timeframe': timeframe, 'dates_sources': dates_sources}) return __prepare_request(query)
5da42848926372fa5fe90338529ab47396203fd8
4,465
def restore_purchases() -> None: """restore_purchases() -> None (internal) """ return None
7f047cdfe892bd724c2203d846762c3b3786d7c2
4,466
import time def sim_mat(fc7_feats): """ Given a matrix of features, generate the similarity matrix S and sparsify it. :param fc7_feats: the fc7 features :return: matrix_S - the sparsified matrix S """ print("Something") t = time.time() pdist_ = spatial.distance.pdist(fc7_feats) print('Created distance matrix' + ' ' + str(time.time() - t) + ' sec') t = time.time() dist_mat = spatial.distance.squareform(pdist_) print('Created square distance matrix' + ' ' + str(time.time() - t) + ' sec') del pdist_ t = time.time() sigmas = np.sort(dist_mat, axis=1)[:, 7] + 1e-16 matrice_prodotti_sigma = np.dot(sigmas[:, np.newaxis], sigmas[np.newaxis, :]) print('Generated Sigmas' + ' ' + str(time.time() - t) + ' sec') t = time.time() dist_mat /= -matrice_prodotti_sigma print('Computed dists/-sigmas' + ' ' + str(time.time() - t) + ' sec') del matrice_prodotti_sigma t = time.time() W = np.exp(dist_mat, dist_mat) # W = np.exp(-(dist_mat / matrice_prodotti_sigma)) np.fill_diagonal(W, 0.) # sparsify the matrix k = int(np.floor(np.log2(fc7_feats.shape[0])) + 1) n = W.shape[0] print('Created inplace similarity matrix' + ' ' + str(time.time() - t) + ' sec') t = time.time() for x in W: x[np.argpartition(x, n - k)[:(n - k)]] = 0.0 print('Sparsify the matrix' + ' ' + str(time.time() - t) + ' sec') t = time.time() # matrix_S = np.zeros((n, n)) m1 = W[np.triu_indices(n, k=1)] m2 = W.T[np.triu_indices(n, k=1)] W = spatial.distance.squareform(np.maximum(m1, m2)) print('Symmetrized the similarity matrix' + ' ' + str(time.time() - t) + ' sec') return W
6b896a4912f4a9a8fc765674ad47e17aea73bfa0
4,467
def text_split(in_text, insert_points, char_set): """ Returns: Input Text Split into Text and Nonce Strings. """ nonce_key = [] encrypted_nonce = "" in_list = list(in_text) for pos in range(3967): if insert_points[pos] >= len(in_list) - 1: point = len(in_list) - 2 else: point = insert_points[pos] char = in_list[point] in_list.pop(point) nonce_key.append(char) if char is not char_set[-1]: break length = ((len(nonce_key) - 1) * (len(char_set) -2)) + char_set.index(nonce_key[len(nonce_key) - 1]) for pos in range(length): if insert_points[pos + len(nonce_key)] >= len(in_list) - 1: point = len(in_list) - 2 else: point = insert_points[pos + len(nonce_key)] char = in_list[point] in_list.pop(point) encrypted_nonce = encrypted_nonce + char return "".join(in_list), encrypted_nonce
15f496513e63236b0df7e2d8a8949a8b2e632af4
4,468
import decimal def f_approximation(g_matrix, coeficients_array): """ Retorna um vetor para o valor aproximado de f, dados os coeficientes ak. """ decimal.getcontext().prec = PRECSION decimal.getcontext().rounding = ROUNDING_MODE num_of_xs = len(g_matrix[0]) num_of_coeficients = len(g_matrix) f_approx_array = np.full(num_of_xs, decimal.Decimal('0')) for i in range(0, num_of_xs): approx_sum = 0 for k in range(0, num_of_coeficients): approx_sum += coeficients_array[k] * g_matrix[k][i] f_approx_array[i] = approx_sum return f_approx_array
f5f8ce78b07e877c521a6374548e21273c61dcee
4,469
def _module_exists(module_name): """ Checks if a module exists. :param str module_name: module to check existance of :returns: **True** if module exists and **False** otherwise """ try: __import__(module_name) return True except ImportError: return False
8f3ed2e97ee6dbb41d6e84e9e5595ec8b6f9b339
4,470
def users(request): """Show a list of users and their puzzles.""" context = {'user_list': []} for user in User.objects.all().order_by('username'): objs = Puzzle.objects.filter(user=user, pub_date__lte=timezone.now()).order_by('-number') if objs: puzzle_list = [] for puz in objs: puzzle_list.append({'number': puz.number, 'date': get_date_string(puz)}) context['user_list'].append({'name': user.username, 'puzzles': puzzle_list}) return render(request, 'puzzle/users.html', context)
abf953394af6baff08bedf252796eb0d89cba3f4
4,471
from xidplus.stan_fit import get_stancode def MIPS_SPIRE_gen(phot_priors,sed_prior_model,chains=4,seed=5363,iter=1000,max_treedepth=10,adapt_delta=0.8): """ Fit the three SPIRE bands :param priors: list of xidplus.prior class objects. Order (MIPS,PACS100,PACS160,SPIRE250,SPIRE350,SPIRE500) :param sed_prior: xidplus.sed.sed_prior class :param chains: number of chains :param iter: number of iterations :return: pystan fit object """ prior24=phot_priors[0] prior250=phot_priors[1] prior350=phot_priors[2] prior500=phot_priors[3] #input data into a dictionary XID_data = { 'nsrc': prior250.nsrc, 'bkg_prior': [prior24.bkg[0],prior250.bkg[0], prior350.bkg[0], prior500.bkg[0]], 'bkg_prior_sig': [prior24.bkg[1],prior250.bkg[1], prior350.bkg[1], prior500.bkg[1]], 'conf_prior_sig': [0.0001, 0.1, 0.1, 0.1], 'z_median': prior24.z_median, 'z_sig': prior24.z_sig, 'npix_psw': prior250.snpix, 'nnz_psw': prior250.amat_data.size, 'db_psw': prior250.sim, 'sigma_psw': prior250.snim, 'Val_psw': prior250.amat_data, 'Row_psw': prior250.amat_row.astype(np.long), 'Col_psw': prior250.amat_col.astype(np.long), 'npix_pmw': prior350.snpix, 'nnz_pmw': prior350.amat_data.size, 'db_pmw': prior350.sim, 'sigma_pmw': prior350.snim, 'Val_pmw': prior350.amat_data, 'Row_pmw': prior350.amat_row.astype(np.long), 'Col_pmw': prior350.amat_col.astype(np.long), 'npix_plw': prior500.snpix, 'nnz_plw': prior500.amat_data.size, 'db_plw': prior500.sim, 'sigma_plw': prior500.snim, 'Val_plw': prior500.amat_data, 'Row_plw': prior500.amat_row.astype(np.long), 'Col_plw': prior500.amat_col.astype(np.long), 'npix_mips24': prior24.snpix, 'nnz_mips24': prior24.amat_data.size, 'db_mips24': prior24.sim, 'sigma_mips24': prior24.snim, 'Val_mips24': prior24.amat_data, 'Row_mips24': prior24.amat_row.astype(np.long), 'Col_mips24': prior24.amat_col.astype(np.long), 'nTemp': sed_prior_model.shape[0], 'nz': sed_prior_model.shape[2], 'nband': sed_prior_model.shape[1], 'SEDs': sed_prior_model, } #see if model has already been compiled. If not, compile and save it model_file='/XID+MIPS_SPIRE_SED_gen' sm = get_stancode(model_file) fit = sm.sampling(data=XID_data,iter=iter,chains=chains,seed=seed,verbose=True,control=dict(max_treedepth=max_treedepth,adapt_delta=adapt_delta)) #return fit data return fit
be3d168e6a7a5a8159e83371059cfcbd1f0c187e
4,472
def check_cstr(solver, indiv): """Check the number of constraints violations of the individual Parameters ---------- solver : Solver Global optimization problem solver indiv : individual Individual of the population Returns ------- is_feasible : bool Individual feasibility """ # Non valid simulation violate every constraints if indiv.is_simu_valid == False: indiv.cstr_viol = len(solver.problem.constraint) return True # To not add errors to infeasible # Browse constraints for constraint in solver.problem.constraint: # Compute value to compare var_val = constraint.get_variable(indiv.output) # Compare the value with the constraint type_const = constraint.type_const if type_const == "<=": if var_val > constraint.value: indiv.cstr_viol += 1 elif type_const in ["==", "="]: if var_val != constraint.value: indiv.cstr_viol += 1 elif type_const == ">=": if var_val < constraint.value: indiv.cstr_viol += 1 elif type_const == "<": if var_val >= constraint.value: indiv.cstr_viol += 1 elif type_const == ">": if var_val <= constraint.value: indiv.cstr_viol += 1 else: raise ValueError("Wrong type of constraint") return indiv.cstr_viol == 0
2aa52d2badfb45d8e289f8314700648ddc621252
4,473
def _FixFsSelectionBit(key, expected): """Write a repair script to fix a bad fsSelection bit. Args: key: The name of an fsSelection flag, eg 'ITALIC' or 'BOLD'. expected: Expected value, true/false, of the flag. Returns: A python script to fix the problem. """ if not _ShouldFix('fsSelection'): return None op = '|=' verb = 'set' mask = bin(fonts.FsSelectionMask(key)) if not expected: op = '&=' verb = 'unset' mask = '~' + mask return 'ttf[\'OS/2\'].fsSelection %s %s # %s %s' % (op, mask, verb, key)
6dda9ccbb565857c4187afc4dada6dd84653b427
4,474
def dilation_dist(path_dilation, n_dilate=None): """ Compute surface of distances with dilation :param path_dilation: binary array with zeros everywhere except for paths :param dilate: How often to do dilation --> defines radious of corridor :returns: 2dim array of same shape as path_dilation, with values 0 = infinite distance from path n_dilation = path location """ saved_arrs = [path_dilation] if n_dilate is None: # compute number of iterations: maximum distance of pixel to line x_coords, y_coords = np.where(path_dilation) x_len, y_len = path_dilation.shape # dilate as much as the largest distance from the sides n_dilate = max( [ np.min(x_coords), x_len - np.max(x_coords), np.min(y_coords), y_len - np.max(y_coords) ] ) # dilate for _ in range(n_dilate): path_dilation = binary_dilation(path_dilation) saved_arrs.append(path_dilation) saved_arrs = np.sum(np.array(saved_arrs), axis=0) return saved_arrs
0d35ec5a0a14b026f0df228ae752f104502b82ba
4,475
def plot_rgb_phases(absolute, phase): """ Calculates a visualization of an inverse Fourier transform, where the absolute value is plotted as brightness and the phase is plotted as color. :param absolute: 2D numpy array containing the absolute value :param phase: 2D numpy array containing phase information in units of pi (should range from -1 to +1!) :return: numpy array containing red, green and blue values """ red = 0.5 * (np.sin(phase * np.pi) + 1) * absolute / absolute.max() green = 0.5 * (np.sin(phase * np.pi + 2 / 3 * np.pi) + 1) * absolute / absolute.max() blue = 0.5 * (np.sin(phase * np.pi + 4 / 3 * np.pi) + 1) * absolute / absolute.max() return np.dstack([red, green, blue])
d2f12df2af25925ae9607ef102f4b0fc1cb01373
4,476
def normal_shock_pressure_ratio(M, gamma): """Gives the normal shock static pressure ratio as a function of upstream Mach number.""" return 1.0+2.0*gamma/(gamma+1.0)*(M**2.0-1.0)
30d0a339b17bab2b662fecd5b19073ec6478a1ec
4,479
from typing import Tuple import numpy def _lorentz_berthelot( epsilon_1: float, epsilon_2: float, sigma_1: float, sigma_2: float ) -> Tuple[float, float]: """Apply Lorentz-Berthelot mixing rules to a pair of LJ parameters.""" return numpy.sqrt(epsilon_1 * epsilon_2), 0.5 * (sigma_1 + sigma_2)
b27c282cec9f880442be4e83f4965c0ad79dfb1e
4,480
def verify_password_str(password, password_db_str): """Verify password matches database string.""" split_password_db = password_db_str.split('$') algorithm = split_password_db[0] salt = split_password_db[1] return password_db_str == generate_password_str(algorithm, salt, password)
467dcbfa1dbf1af0d7cd343f00149fc8322053e5
4,481
def get_ical_file_name(zip_file): """Gets the name of the ical file within the zip file.""" ical_file_names = zip_file.namelist() if len(ical_file_names) != 1: raise Exception( "ZIP archive had %i files; expected 1." % len(ical_file_names) ) return ical_file_names[0]
7013840891844358f0b4a16c7cefd31a602d9eae
4,482
def unquoted_str(draw): """Generate strings compatible with our definition of an unquoted string.""" start = draw(st.text(alphabet=(ascii_letters + "_"), min_size=1)) body = draw(st.text(alphabet=(ascii_letters + digits + "_"))) return start + body
7927e828a82786f45749e25e25376f48479c0662
4,483
from typing import List from typing import Optional from typing import Any from typing import Callable def _reduce_attribute(states: List[State], key: str, default: Optional[Any] = None, reduce: Callable[..., Any] = _mean) -> Any: """Find the first attribute matching key from states. If none are found, return default. """ attrs = list(_find_state_attributes(states, key)) if not attrs: return default if len(attrs) == 1: return attrs[0] return reduce(*attrs)
bfc4ca6826e05b04ae9e1af6d3c167935bceda6f
4,484
def sync_garmin(fit_file): """Sync generated fit file to Garmin Connect""" garmin = GarminConnect() session = garmin.login(ARGS.garmin_username, ARGS.garmin_password) return garmin.upload_file(fit_file.getvalue(), session)
8e604a0461f503d83b5a304081020d54acd7577c
4,485
from typing import Callable from typing import List def get_paths(graph: Graph, filter: Callable) -> List: """ Collect all the paths consist of valid vertices. Return one path every time because the vertex index may be modified. """ result = [] if filter == None: return result visited = set() vs = graph.topological_sorting() for vertex in vs: if not filter(vertex, graph) or vertex in visited: continue visited.add(vertex) path = [vertex] slist = graph.successors(vertex) while len(set(slist))==1 and filter(slist[0], graph) and not slist[0] in visited: cur = slist[0] path.append(cur) visited.add(cur) slist = graph.successors(cur) if len(path) > 0: result.append(path) return result
f7e1679bae48781010257b4fe8e980964dee80ce
4,486
def create_app(app_name=PKG_NAME): """Initialize the core application.""" app = Flask(app_name) CORS(app) with app.app_context(): # Register Restx Api api.init_app(app) return app
1773b0a84253aa6a1bca5c6f6aec6cd6d59b74fa
4,487
from typing import Dict from typing import Callable import sympy import warnings def smtlib_to_sympy_constraint( smtlib_input: str, interpreted_constants: Dict[str, Callable] = default_interpreted_constants, interpreted_unary_functions: Dict[str, Callable] = default_interpreted_unary_functions): """Convert SMTLIB(v2) constraints into sympy constraints analyzable via SYMPAIS. This function is experimental and introduced as an example. It is implemented on top of PySMT (https://github.com/pysmt/pysmt). Additional features can be added extending the `SMTToSympyWalker` class. Args: smtlib_input: SMT constraint as a string in SMTLIB(v2) format, as accepted by PySMT interpreted_constants: predefined interpreted constants to be declared in the SMT problem. Default: E (Euler), PI interpreted_unary_functions: predefined interpreted functions Real -> Real. Default: sin, cos, tan, asin, acos, atan, log, exp, sqrt Returns: A dict of the estimates found by the DMC sampler. """ interpreted_symbols_declarations = '\n'.join( [f'(declare-const {cname} Real)' for cname in interpreted_constants.keys()]) interpreted_symbols_declarations += '\n'.join([ f'(declare-fun {fname} (Real) Real)' for fname in interpreted_unary_functions.keys() ]) smtlib_with_interpreted_symbols = ( interpreted_symbols_declarations + '\n' + smtlib_input) reset_env() parser = SmtLibParser() script = parser.get_script(cStringIO(smtlib_with_interpreted_symbols)) f = script.get_last_formula() converter = SMTToSympyWalker(get_env(), interpreted_constants, interpreted_unary_functions) f_sympy = converter.walk(f) f_sympy = sympy.logic.simplify_logic(f_sympy) f_sympy = sympy.simplify(f_sympy) if f_sympy.atoms(sympy.logic.Or): warnings.warn( 'Disjunctive constraints are not supported by RealPaver. Consider replacing it with an adequate interval constraint propagation tool for benefit from all the features of SYMPAIS' ) return f_sympy
d04782272cd13fcb7eafdb4b8f9cb7b1fd857dcc
4,488
async def revert(app, change_id: str) -> dict: """ Revert a history change given by the passed ``change_id``. :param app: the application object :param change_id: a unique id for the change :return: the updated OTU """ db = app["db"] change = await db.history.find_one({"_id": change_id}, ["index"]) if change["index"]["id"] != "unbuilt" or change["index"]["version"] != "unbuilt": raise virtool.errors.DatabaseError( "Change is included in a build an not revertible" ) otu_id, otu_version = change_id.split(".") if otu_version != "removed": otu_version = int(otu_version) _, patched, history_to_delete = await patch_to_version(app, otu_id, otu_version - 1) # Remove the old sequences from the collection. await db.sequences.delete_many({"otu_id": otu_id}) if patched is not None: patched_otu, sequences = virtool.otus.utils.split(patched) # Add the reverted sequences to the collection. for sequence in sequences: await db.sequences.insert_one(sequence) # Replace the existing otu with the patched one. If it doesn't exist, insert it. await db.otus.replace_one({"_id": otu_id}, patched_otu, upsert=True) else: await db.otus.delete_one({"_id": otu_id}) await db.history.delete_many({"_id": {"$in": history_to_delete}}) return patched
ad5484639f0a70913b17799534fa52b8531b3356
4,489
def days_away(date): """Takes in the string form of a date and returns the number of days until date.""" mod_date = string_to_date(date) return abs((current_date() - mod_date).days)
f76b10d9e72d8db9e42d7aba7481e63cf1382502
4,490
def node_constraints_transmission(model): """ Constrains e_cap symmetrically for transmission nodes. """ m = model.m # Constraint rules def c_trans_rule(m, y, x): y_remote, x_remote = transmission.get_remotes(y, x) if y_remote in m.y_trans: return m.e_cap[y, x] == m.e_cap[y_remote, x_remote] else: return po.Constraint.NoConstraint # Constraints m.c_transmission_capacity = po.Constraint(m.y_trans, m.x, rule=c_trans_rule)
0fc51f39b63324c73503b349cfd38da4c9816c50
4,491
def plot_mtf(faxis, MTF, labels=None): """Plot the MTF. Return the figure reference.""" fig_lineplot = plt.figure() plt.rc('axes', prop_cycle=PLOT_STYLES) for i in range(0, MTF.shape[0]): plt.plot(faxis, MTF[i, :]) plt.xlabel('spatial frequency [cycles/length]') plt.ylabel('Radial MTF') plt.gca().set_ylim([0, 1]) if labels is not None: plt.legend([str(n) for n in labels]) plt.title("Modulation Tansfer Function for various angles") return fig_lineplot
dac09628a72666a4f4e3e8aae4263cb9f2688fa2
4,492
import enum def forward_ref_structure_hook(context, converter, data, forward_ref): """Applied to ForwardRef model and enum annotations - Map reserved words in json keys to approriate (safe) names in model. - handle ForwardRef types until github.com/Tinche/cattrs/pull/42/ is fixed Note: this is the reason we need a "context" param and have to use a partial func to register the hook. Once the issue is resolved we can remove "context" and the partial. """ data = hooks.tr_data_keys(data) actual_type = eval(forward_ref.__forward_arg__, context, locals()) if issubclass(actual_type, enum.Enum): instance = converter.structure(data, actual_type) elif issubclass(actual_type, model.Model): # cannot use converter.structure - recursion error instance = converter.structure_attrs_fromdict(data, actual_type) else: raise DeserializeError(f"Unknown type to deserialize: {actual_type}") return instance
acbbf365c7a80c7a9f5230bcd038c2c286ae58c5
4,493
def cross(x: VariableLike, y: VariableLike) -> VariableLike: """Element-wise cross product. Parameters ---------- x: Left hand side operand. y: Right hand side operand. Raises ------ scipp.DTypeError If the dtype of the input is not vector3. Returns ------- : The cross product of the input vectors. """ return _call_cpp_func(_cpp.cross, x, y)
372156ba869e3dabb2421e1ea947fdc710c316eb
4,494
def delete(service, name, parent_id=None, appProperties=defaults.GDRIVE_USE_APPPROPERTIES): """ Delete a file/folder on Google Drive Parameters ---------- service : googleapiclient.discovery.Resource Google API resource for GDrive v3 name : str Name of file/folder parent_id : str, optional Parent ID of folder containing file (to narrow search) appProperties : bool Search for application-specific files using ``appProperties`` Returns ------- str ID of deleted file/folder """ name_id = exists(service, name, parent_id=parent_id) resp = service.files().delete(fileId=name_id).execute() return name_id
e2653005d8d0e53df80119869586542b08405c55
4,495
def _is_LoginForm_in_this_frame(driver, frame): """ 判断指定的 frame 中是否有 登录表单 """ driver.switch_to.frame(frame) # 切换进这个 frame if _is_LoginForm_in_this_page(driver): return True else: driver.switch_to.parent_frame() # 如果没有找到就切换回去 return False
16a4b3af1d5cf9abe2efee6856a37b520fc2a1fc
4,496
def parse_range_header(specifier, len_content): """Parses a range header into a list of pairs (start, stop)""" if not specifier or '=' not in specifier: return [] ranges = [] unit, byte_set = specifier.split('=', 1) unit = unit.strip().lower() if unit != "bytes": return [] for val in byte_set.split(","): val = val.strip() if '-' not in val: return [] if val.startswith("-"): # suffix-byte-range-spec: this form specifies the last N # bytes of an entity-body start = len_content + int(val) if start < 0: start = 0 stop = len_content else: # byte-range-spec: first-byte-pos "-" [last-byte-pos] start, stop = val.split("-", 1) start = int(start) # Add 1 to make stop exclusive (HTTP spec is inclusive) stop = int(stop)+1 if stop else len_content if start >= stop: return [] ranges.append((start, stop)) return ranges
2a408abe816684bf42b2253495088338bf4cac2b
4,497
def acf(x, lags=None): """ Computes the empirical autocorralation function. :param x: array (n,), sequence of data points :param lags: int, maximum lag to compute the ACF for. If None, this is set to n-1. Default is None. :return gamma: array (lags,), values of the ACF at lags 0 to lags """ gamma = np.correlate(x, x, mode='full') # Size here is always 2*len(x)-1 gamma = gamma[int((gamma.size - 1) / 2):] # Keep only second half if lags is not None and lags < len(gamma): gamma = gamma[0:lags + 1] return gamma / gamma[0]
9d47df88255ec8c9ae373c501f8b70af8f3c4ebc
4,498
def _login(client, user, users): """Login user and return url.""" login_user_via_session(client, user=User.query.get(user.id)) return user
079136eb777957caf09c51c75ae5148ab2eea836
4,500
def search(request): """renders search page""" queryset_list = Listing.objects.order_by('-list_date') if 'keywords' in request.GET: keywords = request.GET['keywords'] # Checking if its none if keywords: queryset_list = queryset_list.filter( description__icontains=keywords) if 'city' in request.GET: city = request.GET['city'] # Checking if its none if city: queryset_list = queryset_list.filter( city__iexact=city) if 'state' in request.GET: state = request.GET['state'] # Checking if its none if state: queryset_list = queryset_list.filter( state__iexact=state) if 'bedrooms' in request.GET: bedrooms = request.GET['bedrooms'] # Here LTE(lte) means less then or equal if bedrooms: queryset_list = queryset_list.filter( bedrooms__lte=bedrooms) if 'price' in request.GET: price = request.GET['price'] # Here LTE(lte) means less then or equal if price: queryset_list = queryset_list.filter( price__lte=price) context = { "price_choices": price_choices, "bedroom_choices": bedroom_choices, "state_choices": state_choices, "listings": queryset_list, "values": request.GET } return render(request, 'listings/search.html', context)
a25d6e112d4054dfaf505aff5c4c36f07a95d989
4,501
def generate_cutout(butler, skymap, ra, dec, band='N708', data_type='deepCoadd', half_size=10.0 * u.arcsec, psf=True, verbose=False): """Generate a single cutout image. """ if not isinstance(half_size, u.Quantity): # Assume that this is in pixel half_size_pix = int(half_size) else: half_size_pix = int(half_size.to('arcsec').value / PIXEL_SCALE) if isinstance(ra, u.Quantity): ra = ra.value if isinstance(dec, u.Quantity): dec = dec.value # Width and height of the post-stamps stamp_shape = (half_size_pix * 2 + 1, half_size_pix * 2 + 1) # Make a list of (RA, Dec) that covers the cutout region radec_list = np.array( sky_cone(ra, dec, half_size_pix * PIXEL_SCALE * u.Unit('arcsec'), steps=50)).T # Retrieve the Patches that cover the cutout region img_patches = _get_patches(butler, skymap, radec_list, band, data_type=data_type) if img_patches is None: if verbose: print('***** No data at {:.5f} {:.5f} *****'.format(ra, dec)) return None # Coordinate of the image center coord = geom.SpherePoint(ra * geom.degrees, dec * geom.degrees) # Making the stacked cutout cutouts = [] idx, bbox_sizes, bbox_origins = [], [], [] for img_p in img_patches: # Generate cutout cut, x0, y0 = _get_single_cutout(img_p, coord, half_size_pix) cutouts.append(cut) # Original lower corner pixel coordinate bbox_origins.append([x0, y0]) # New lower corner pixel coordinate xnew, ynew = cut.getBBox().getBeginX() - x0, cut.getBBox().getBeginY() - y0 idx.append([xnew, xnew + cut.getBBox().getWidth(), ynew, ynew + cut.getBBox().getHeight()]) # Area of the cutout region on this patch in unit of pixels # Will reverse rank all the overlapped images by this bbox_sizes.append(cut.getBBox().getWidth() * cut.getBBox().getHeight()) # Stitch cutouts together with the largest bboxes inserted last stamp = afwImage.MaskedImageF(geom.BoxI(geom.Point2I(0,0), geom.Extent2I(*stamp_shape))) bbox_sorted_ind = np.argsort(bbox_sizes) for i in bbox_sorted_ind: masked_img = cutouts[i].getMaskedImage() stamp[idx[i][0]: idx[i][1], idx[i][2]: idx[i][3]] = masked_img # Build the new WCS of the cutout stamp_wcs = _build_cutout_wcs(coord, cutouts, bbox_sorted_ind[-1], bbox_origins) cutout = afwImage.ExposureF(stamp, stamp_wcs) if bbox_sizes[bbox_sorted_ind[-1]] < (half_size_pix * 2 + 1) ** 2: flag = 1 else: flag = 2 # The final product of the cutout if psf: psf = _get_psf(cutouts[bbox_sorted_ind[-1]], coord) return cutout, psf, flag return cutout, flag
fdc42ad0dd0f357d53804a1f6fa43c93e86d2c0e
4,502
def get_arraytypes (): """pygame.sndarray.get_arraytypes (): return tuple Gets the array system types currently supported. Checks, which array system types are available and returns them as a tuple of strings. The values of the tuple can be used directly in the use_arraytype () method. If no supported array system could be found, None will be returned. """ vals = [] if __hasnumeric: vals.append ("numeric") if __hasnumpy: vals.append ("numpy") if len (vals) == 0: return None return tuple (vals)
192cb215fdc651543ac6ed4ce2f9cac2b0d3b4f4
4,503
def is_request_authentic(request, secret_token: bytes = conf.WEBHOOK_SECRET_TOKEN): """ Examine the given request object to determine if it was sent by an authorized source. :param request: Request object to examine for authenticity :type request: :class:`~chalice.app.Request` :param secret_token: Shared secret token used to create payload hash :type: :class:`~bytes` :return: Response object indicating whether or not the request is authentic :rtype: :class:`~lopper.response.Response` """ signature = request.headers.get('X-Hub-Signature') if not signature: return response.unauthorized('Missing "X-Hub-Signature" header') return auth.is_authentic(signature, request.raw_body, secret_token)
1ffceea3aebc0c038384c003edc93358e6faa9ed
4,504
def circular_mask_string(centre_ra_dec_posns, aperture_radius="1arcmin"): """Get a mask string representing circular apertures about (x,y) tuples""" mask = '' if centre_ra_dec_posns is None: return mask for coords in centre_ra_dec_posns: mask += 'circle [ [ {x} , {y}] , {r} ]\n'.format( x=coords[0], y=coords[1], r=aperture_radius) return mask
04e66d160eb908f543990adf896e494226674c71
4,505
def dataset_hdf5(dataset, tmp_path): """Make an HDF5 dataset and write it to disk.""" path = str(tmp_path / 'test.h5') dataset.write_hdf5(path, object_id_itemsize=10) return path
4a7920adf7715797561513fbb87593abf95f0bca
4,506
def _make_indexable(iterable): """Ensure iterable supports indexing or convert to an indexable variant. Convert sparse matrices to csr and other non-indexable iterable to arrays. Let `None` and indexable objects (e.g. pandas dataframes) pass unchanged. Parameters ---------- iterable : {list, dataframe, array, sparse} or None Object to be converted to an indexable iterable. """ if sp.issparse(iterable): return iterable.tocsr() elif hasattr(iterable, "__getitem__") or hasattr(iterable, "iloc"): return iterable elif iterable is None: return iterable return np.array(iterable)
94be904009adfd3bf15de0f258b94a196a9612df
4,507
def fib_for(n): """ Compute Fibonnaci sequence using a for loop Parameters ---------- n : integer the nth Fibonnaci number in the sequence Returns ------- the nth Fibonnaci number in the sequence """ res = [0, 1] for i in range(n-1): res.append(res[i] + res[i+1]) return res[n]
1609a2d52f5308a6a9d496f13c1de3f7eee6332d
4,509
import pickle def command_factory(command): """A factory which returns functions for direct daemon communication. This factory will create a function which sends a payload to the daemon and returns the unpickled object which is returned by the daemon. Args: command (string): The type of payload this should be. This determines as what kind of instruction this will be interpreted by the daemon. Returns: function: The created function. """ def communicate(body={}, root_dir=None): """Communicate with the daemon. This function sends a payload to the daemon and returns the unpickled object sent by the daemon. Args: body (dir): Any other arguments that should be put into the payload. root_dir (str): The root directory in which we expect the daemon. We need this to connect to the daemons socket. Returns: function: The returned payload. """ client = connect_socket(root_dir) body['mode'] = command # Delete the func entry we use to call the correct function with argparse # as functions can't be pickled and this shouldn't be send to the daemon. if 'func' in body: del body['func'] data_string = pickle.dumps(body, -1) client.send(data_string) # Receive message, unpickle and return it response = receive_data(client) return response return communicate
ec84d6ab611d4edaf55ba0c365ed8526250c7ce1
4,510
def load_prepare_saif_data(threshold=0.25): """ Loads and prepares saif's data. Parameters ---------- threshold : float Only data with intensities equal to or above this threshold will be kept (range 0-1). Returns ------- DataFrame : pd.DataFrame Concatenated tweets with labels as a pandas DataFrame. """ files = get_saif_files() df = pd.concat([pd.read_csv(f, sep='\t', index_col=0, names=['tweet', 'emotion', 'intensity']) for f in files], axis=0) df = df[df['intensity'] >= threshold] df.drop('intensity', axis=1, inplace=True) return df
b2087d0558473069cf5985bd7e2b063162157df5
4,511
def nonmax_suppression(harris_resp, halfwidth=2): """ Takes a Harris response from an image, performs nonmax suppression, and outputs the x,y values of the corners in the image. :param harris_resp: Harris response for an image which is an array of the same shape as the original image. :param halfwidth: The size of the padding to use in building the window (matrix) for nonmax suppression. The window will have a total shape of (2*halfwidth+1, 2*halfwidth+1). :return: Tuple of x and y coordinates for the corners that were found from the Harris response after nonmax suppression. """ cornersx = [] cornersy = [] h, w = harris_resp.shape[:2] boxlength = 2*halfwidth + 1 for i in range(halfwidth, w-halfwidth-1): for j in range(halfwidth, h-halfwidth-1): matrix = np.zeros((boxlength, boxlength)) for k in range(-halfwidth, halfwidth+1): for l in range(-halfwidth, halfwidth+1): matrix[k+halfwidth, l+halfwidth] = harris_resp[i+k, j+l] if matrix[halfwidth, halfwidth] == 0: pass elif matrix[halfwidth, halfwidth] < np.amax(matrix): matrix[halfwidth, halfwidth] = 0 else: cornersx.append(j) cornersy.append(i) return cornersx, cornersy
b980ac9045728c8231749e7a43aa2f06d958d80c
4,512
import uuid from datetime import datetime import pytz def create_credit_request(course_key, provider_id, username): """ Initiate a request for credit from a credit provider. This will return the parameters that the user's browser will need to POST to the credit provider. It does NOT calculate the signature. Only users who are eligible for credit (have satisfied all credit requirements) are allowed to make requests. A provider can be configured either with *integration enabled* or not. If automatic integration is disabled, this method will simply return a URL to the credit provider and method set to "GET", so the student can visit the URL and request credit directly. No database record will be created to track these requests. If automatic integration *is* enabled, then this will also return the parameters that the user's browser will need to POST to the credit provider. These parameters will be digitally signed using a secret key shared with the credit provider. A database record will be created to track the request with a 32-character UUID. The returned dictionary can be used by the user's browser to send a POST request to the credit provider. If a pending request already exists, this function should return a request description with the same UUID. (Other parameters, such as the user's full name may be different than the original request). If a completed request (either accepted or rejected) already exists, this function will raise an exception. Users are not allowed to make additional requests once a request has been completed. Arguments: course_key (CourseKey): The identifier for the course. provider_id (str): The identifier of the credit provider. username (str): The user initiating the request. Returns: dict Raises: UserIsNotEligible: The user has not satisfied eligibility requirements for credit. CreditProviderNotConfigured: The credit provider has not been configured for this course. RequestAlreadyCompleted: The user has already submitted a request and received a response from the credit provider. Example Usage: >>> create_credit_request(course.id, "hogwarts", "ron") { "url": "https://credit.example.com/request", "method": "POST", "parameters": { "request_uuid": "557168d0f7664fe59097106c67c3f847", "timestamp": 1434631630, "course_org": "HogwartsX", "course_num": "Potions101", "course_run": "1T2015", "final_grade": "0.95", "user_username": "ron", "user_email": "[email protected]", "user_full_name": "Ron Weasley", "user_mailing_address": "", "user_country": "US", "signature": "cRCNjkE4IzY+erIjRwOQCpRILgOvXx4q2qvx141BCqI=" } } """ try: user_eligibility = CreditEligibility.objects.select_related('course').get( username=username, course__course_key=course_key ) credit_course = user_eligibility.course credit_provider = CreditProvider.objects.get(provider_id=provider_id) except CreditEligibility.DoesNotExist: log.warning( 'User "%s" tried to initiate a request for credit in course "%s", ' 'but the user is not eligible for credit', username, course_key ) raise UserIsNotEligible # lint-amnesty, pylint: disable=raise-missing-from except CreditProvider.DoesNotExist: log.error('Credit provider with ID "%s" has not been configured.', provider_id) raise CreditProviderNotConfigured # lint-amnesty, pylint: disable=raise-missing-from # Check if we've enabled automatic integration with the credit # provider. If not, we'll show the user a link to a URL # where the user can request credit directly from the provider. # Note that we do NOT track these requests in our database, # since the state would always be "pending" (we never hear back). if not credit_provider.enable_integration: return { "url": credit_provider.provider_url, "method": "GET", "parameters": {} } else: # If automatic credit integration is enabled, then try # to retrieve the shared signature *before* creating the request. # That way, if there's a misconfiguration, we won't have requests # in our system that we know weren't sent to the provider. shared_secret_key = get_shared_secret_key(credit_provider.provider_id) check_keys_exist(shared_secret_key, credit_provider.provider_id) if isinstance(shared_secret_key, list): # if keys exist, and keys are stored as a list # then we know at least 1 is available for [0] shared_secret_key = [key for key in shared_secret_key if key][0] # Initiate a new request if one has not already been created credit_request, created = CreditRequest.objects.get_or_create( course=credit_course, provider=credit_provider, username=username, ) # Check whether we've already gotten a response for a request, # If so, we're not allowed to issue any further requests. # Skip checking the status if we know that we just created this record. if not created and credit_request.status != "pending": log.warning( ( 'Cannot initiate credit request because the request with UUID "%s" ' 'exists with status "%s"' ), credit_request.uuid, credit_request.status ) raise RequestAlreadyCompleted if created: credit_request.uuid = uuid.uuid4().hex # Retrieve user account and profile info user = User.objects.select_related('profile').get(username=username) # Retrieve the final grade from the eligibility table try: final_grade = CreditRequirementStatus.objects.get( username=username, requirement__namespace="grade", requirement__name="grade", requirement__course__course_key=course_key, status="satisfied" ).reason["final_grade"] # NOTE (CCB): Limiting the grade to seven characters is a hack for ASU. if len(str(final_grade)) > 7: final_grade = f'{final_grade:.5f}' else: final_grade = str(final_grade) except (CreditRequirementStatus.DoesNotExist, TypeError, KeyError): msg = 'Could not retrieve final grade from the credit eligibility table for ' \ 'user [{user_id}] in course [{course_key}].'.format(user_id=user.id, course_key=course_key) log.exception(msg) raise UserIsNotEligible(msg) # lint-amnesty, pylint: disable=raise-missing-from # Getting the students's enrollment date course_enrollment = CourseEnrollment.get_enrollment(user, course_key) enrollment_date = course_enrollment.created if course_enrollment else "" # Getting the student's course completion date completion_date = get_last_exam_completion_date(course_key, username) parameters = { "request_uuid": credit_request.uuid, "timestamp": to_timestamp(datetime.datetime.now(pytz.UTC)), "course_org": course_key.org, "course_num": course_key.course, "course_run": course_key.run, "enrollment_timestamp": to_timestamp(enrollment_date) if enrollment_date else "", "course_completion_timestamp": to_timestamp(completion_date) if completion_date else "", "final_grade": final_grade, "user_username": user.username, "user_email": user.email, "user_full_name": user.profile.name, "user_mailing_address": "", "user_country": ( user.profile.country.code if user.profile.country.code is not None else "" ), } credit_request.parameters = parameters credit_request.save() if created: log.info('Created new request for credit with UUID "%s"', credit_request.uuid) else: log.info( 'Updated request for credit with UUID "%s" so the user can re-issue the request', credit_request.uuid ) # Sign the parameters using a secret key we share with the credit provider. parameters["signature"] = signature(parameters, shared_secret_key) return { "url": credit_provider.provider_url, "method": "POST", "parameters": parameters }
8c9e763d1f10f9187c102746911dc242385100e8
4,513
import pathlib def is_valid_project_root(project_root: pathlib.Path) -> bool: """Check if the project root is a valid trestle project root.""" if project_root is None or project_root == '' or len(project_root.parts) <= 0: return False trestle_dir = pathlib.Path.joinpath(project_root, const.TRESTLE_CONFIG_DIR) if trestle_dir.exists() and trestle_dir.is_dir(): return True return False
f35d63373d96ee34592e84f21296eadb3ebc6c98
4,514
def make_2D_predictions_into_one_hot_4D(prediction_2D, dim): """ This method gets 2D prediction of shape (#batch, #kpts) and then returns 4D one_hot maps of shape (#batch, #kpts, #dim, #dim) """ # getting one_hot maps of predicted locations # one_hot_maps is of shape (#batch, #kpts, #dim * #dim) one_hot_Maps = get_one_hot_map(prediction_2D, dim) num_batch, num_kpt = prediction_2D.shape one_hot_Maps_4D = one_hot_Maps.reshape(num_batch, num_kpt, dim, dim) return one_hot_Maps_4D
507d2fa9c52d5f8a1674e695f55928783a179082
4,515
def distance(a, b): """ """ dimensions = len(a) _sum = 0 for dimension in range(dimensions): difference_sq = (a[dimension] - b[dimension]) ** 2 _sum += difference_sq return sqrt(_sum)
20acd50d7e3ab7f512f3e9ab9920f76b805043a9
4,517
def is_block(modules): """Check if is ResNet building block.""" if isinstance(modules, (BasicBlock, Bottleneck)): return True return False
8c6b5f59797646b27301a25a40d753b6c404b418
4,518
def playlist_500_fixture(): """Load payload for playlist 500 and return it.""" return load_fixture("plex/playlist_500.xml")
834efe057419f56b626c40430b68860fd5e0db1e
4,519
def strip_output(nb): """strip the outputs from a notebook object""" nb.metadata.pop('signature', None) for cell in nb.cells: if 'outputs' in cell: cell['outputs'] = [] if 'prompt_number' in cell: cell['prompt_number'] = None return nb
6339100f6897951bad4f91f8b8d86d1e5a68f459
4,520
def get_neighbors_general(status: CachingDataStructure, key: tuple) -> list: """ Returns a list of tuples of all coordinates that are direct neighbors, meaning the index is valid and they are not KNOWN """ coords = [] for key in get_direct_neighbour_coords_general(key): if status.valid_index(*key) and not status[key]: # Not known coords.append(key) return coords
46a2b3aa91e424122982011ccaa684c2d9cf83f2
4,521
def transit_params(time): """ Dummy transit parameters for time series simulations Parameters ---------- time: sequence The time axis of the transit observation Returns ------- batman.transitmodel.TransitModel The transit model """ params = batman.TransitParams() params.t0 = 0. # time of inferior conjunction params.per = 5.7214742 # orbital period (days) params.a = 0.0558*q.AU.to(q.R_sun)*0.66 # semi-major axis (in units of stellar radii) params.inc = 89.8 # orbital inclination (in degrees) params.ecc = 0. # eccentricity params.w = 90. # longitude of periastron (in degrees) params.limb_dark = 'quadratic' # limb darkening profile to use params.u = [0.1, 0.1] # limb darkening coefficients params.rp = 0. # planet radius (placeholder) tmodel = batman.TransitModel(params, time) tmodel.teff = 3500 # effective temperature of the host star tmodel.logg = 5 # log surface gravity of the host star tmodel.feh = 0 # metallicity of the host star return tmodel
5e74a32ef4077a990d44edb15d66e56e00925666
4,522
def actions(__INPUT): """ Regresamos una lista de los posibles movimientos de la matriz """ MOVIMIENTOS = [] m = eval(__INPUT) i = 0 while 0 not in m[i]: i += 1 # Espacio en blanco (#0) j = m[i].index(0); if i > 0: #ACCION MOVER ARRIBA m[i][j], m[i-1][j] = m[i-1][j], m[i][j]; MOVIMIENTOS.append(str(m)) m[i][j], m[i-1][j] = m[i-1][j], m[i][j]; if i < 3: # ACCION MOVER ABAJO m[i][j], m[i+1][j] = m[i+1][j], m[i][j] MOVIMIENTOS.append(str(m)) m[i][j], m[i+1][j] = m[i+1][j], m[i][j] if j > 0: # ACCION MOVER IZQUIERDA m[i][j], m[i][j-1] = m[i][j-1], m[i][j] MOVIMIENTOS.append(str(m)) m[i][j], m[i][j-1] = m[i][j-1], m[i][j] if j < 3: # ACCION MOVER DERECHA m[i][j], m[i][j+1] = m[i][j+1], m[i][j] MOVIMIENTOS.append(str(m)) m[i][j], m[i][j+1] = m[i][j+1], m[i][j] return MOVIMIENTOS
46875f83d7f50bbd107be8ad5d926397960ca513
4,523
def get_massif_geom(massif: str) -> WKBElement: """process to get the massifs geometries: * go on the meteofrance bra website * then get the html "area" element * then convert it to fake GeoJSON (wrong coordinates) * then open it in qgis. * Select *all* the geom of the layer. * rotate -90° * swap X and Y coordinates (with plugin) * use grass v.transform with various x, y scale and rotation until you get what you want. """ with resource_stream("nivo_api", "cli/data/all_massifs.geojson") as fp: gj = geojson.load(fp) for obj in gj.features: if obj.properties["label"].upper() == massif.upper(): return from_shape(shape(obj.geometry), 4326) else: raise ValueError(f"Massif {massif} geometry cannot be found.")
194ef4274dfd240af65b61781f39464e0cde4b3d
4,524
def _to_arrow(x): """Move data to arrow format""" if isinstance(x, cudf.DataFrame): return x.to_arrow() else: return pa.Table.from_pandas(x, preserve_index=False)
c88c40d2d35f681ff268347c36e2cae4a52576d0
4,525