content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def _key_chord_transition_distribution( key_chord_distribution, key_change_prob, chord_change_prob): """Transition distribution between key-chord pairs.""" mat = np.zeros([len(_KEY_CHORDS), len(_KEY_CHORDS)]) for i, key_chord_1 in enumerate(_KEY_CHORDS): key_1, chord_1 = key_chord_1 chord_index_1 = i % len(_CHORDS) for j, key_chord_2 in enumerate(_KEY_CHORDS): key_2, chord_2 = key_chord_2 chord_index_2 = j % len(_CHORDS) if key_1 != key_2: # Key change. Chord probability depends only on key and not previous # chord. mat[i, j] = (key_change_prob / 11) mat[i, j] *= key_chord_distribution[key_2, chord_index_2] else: # No key change. mat[i, j] = 1 - key_change_prob if chord_1 != chord_2: # Chord probability depends on key, but we have to redistribute the # probability mass on the previous chord since we know the chord # changed. mat[i, j] *= ( chord_change_prob * ( key_chord_distribution[key_2, chord_index_2] + key_chord_distribution[key_2, chord_index_1] / (len(_CHORDS) - 1))) else: # No chord change. mat[i, j] *= 1 - chord_change_prob return mat
0e89b1e11494237c526170f25286c3ad098a1023
3,658,300
def level_set( current_price, standard_deviation, cloud, stop_mod, take_profit_mod, ): """ Calculates risk and reward levels. Should return a stop loss and take profit levels. For opening a new position. Returns a stop (in the format (StopType, offset)) and a take profit level. """ stop = None take_profit = None cloud_color = cloud.status[0] cloud_location = cloud.status[1] direction_mod = 1 if cloud_color == CloudColor.RED: direction_mod = -1 take_profit_mod = take_profit_mod * direction_mod stop_mod = stop_mod * direction_mod if cloud_location == CloudPriceLocation.INSIDE: # ie passing through long ema stop = (StopType.EMA_LONG, (standard_deviation * stop_mod * -1)) # If price passes through short EMA from either color cloud. if cloud_location in (CloudPriceLocation.ABOVE, CloudPriceLocation.BELOW): stop = (StopType.EMA_LONG, 0) # Or in case the long EMA is very far away: if abs(cloud.long_ema - current_price) > abs(current_price - (cloud.short_ema - (direction_mod * 2 * standard_deviation * -1))): stop = ( StopType.EMA_SHORT, (direction_mod * 2 * standard_deviation * -1)) # Or if the long EMA is too close: elif abs(cloud.long_ema - current_price) < abs(current_price - (cloud.short_ema - (direction_mod * 0.5 * standard_deviation * -1))): stop = ( StopType.EMA_SHORT, (direction_mod * 0.5 * standard_deviation * -1)) take_profit = cloud.short_ema + (standard_deviation * take_profit_mod) risk_loss = abs(current_price - StopType.stop_tuple_to_level(stop, cloud)) # Enforce max_ratio:1 reward:risk if take_profit is very far away. max_ratio = 1.5 min_ratio = 1.0 potential_profit = abs(current_price - take_profit) if potential_profit > max_ratio * risk_loss: take_profit = current_price + (direction_mod * max_ratio * risk_loss) if potential_profit < max_ratio * risk_loss: stop = (current_price, potential_profit * direction_mod * -.95) return stop, take_profit
541a15b22bc830db530658c10515a15def196516
3,658,301
def back(deque): """ returns the last elemement in the que """ if length(deque) > 0: return deque[-1] else: return None
810d2135cf39af7959f6142be4b2b3abee8d6185
3,658,302
import json import operator def my_subs_helper(s): """Helper function to handle badly formed JSON stored in the database""" try: return {'time_created':s.time_created, 'json_obj':sorted(json.loads(s.json_data).iteritems(), key=operator.itemgetter(0)), 'plain_json_obj':json.dumps(json.loads(s.json_data)),'id':s.id, 'json_score_data':json.dumps(s.json_score_data)} except ValueError: return {'time_created':s.time_created, 'json_obj':"__ERROR__", 'plain_json_obj':"__ERROR__", 'id':s.id}
4b649d865c3a99f89111baa694df4902e65243e6
3,658,303
def dynamic_features(data_dir, year, data_source, voronoi, radar_buffers, **kwargs): """ Load all dynamic features, including bird densities and velocities, environmental data, and derived features such as estimated accumulation of bird on the ground due to adverse weather. Missing data is interpolated, but marked as missing. :param data_dir: directory containing all relevant data :param year: year of interest :param data_source: 'radar' or 'abm' (simulated data) :param voronoi: Voronoi tessellation (geopandas dataframe) :param radar_buffers: radar buffers with static features (geopandas dataframe) :return: dynamic features (pandas dataframe) """ env_points = kwargs.get('env_points', 100) season = kwargs.get('season', 'fall') random_seed = kwargs.get('seed', 1234) pref_dirs = kwargs.get('pref_dirs', {'spring': 58, 'fall': 223}) pref_dir = pref_dirs[season] wp_threshold = kwargs.get('wp_threshold', -0.5) edge_type = kwargs.get('edge_type', 'voronoi') t_unit = kwargs.get('t_unit', '1H') print(f'##### load data for {season} {year} #####') if data_source in ['radar', 'nexrad']: print(f'load radar data') radar_dir = osp.join(data_dir, data_source) voronoi_radars = voronoi.query('observed == True') birds_km2, _, t_range = datahandling.load_season(radar_dir, season, year, ['vid'], t_unit=t_unit, mask_days=False, radar_names=voronoi_radars.radar, interpolate_nans=False) radar_data, _, t_range = datahandling.load_season(radar_dir, season, year, ['ff', 'dd', 'u', 'v'], t_unit=t_unit, mask_days=False, radar_names=voronoi_radars.radar, interpolate_nans=True) bird_speed = radar_data[:, 0, :] bird_direction = radar_data[:, 1, :] bird_u = radar_data[:, 2, :] bird_v = radar_data[:, 3, :] # rescale according to voronoi cell size data = birds_km2 * voronoi_radars.area_km2.to_numpy()[:, None] t_range = t_range.tz_localize('UTC') elif data_source == 'abm': print(f'load abm data') abm_dir = osp.join(data_dir, 'abm') voronoi_radars = voronoi.query('observed == True') radar_buffers_radars = radar_buffers.query('observed == True') data, t_range, bird_u, bird_v = abm.load_season(abm_dir, season, year, voronoi_radars) buffer_data = abm.load_season(abm_dir, season, year, radar_buffers_radars, uv=False)[0] # rescale to birds per km^2 birds_km2 = data / voronoi_radars.area_km2.to_numpy()[:, None] birds_km2_from_buffer = buffer_data / radar_buffers_radars.area_km2.to_numpy()[:, None] # rescale to birds per voronoi cell birds_from_buffer = birds_km2_from_buffer * voronoi_radars.area_km2.to_numpy()[:, None] # time range for solar positions to be able to infer dusk and dawn solar_t_range = t_range.insert(-1, t_range[-1] + pd.Timedelta(t_range.freq)) print('load env data') env_vars = kwargs.get('env_vars', ['u', 'v', 'u10', 'v10', 'cc', 'tp', 'sp', 't2m', 'sshf']) env_vars = [v for v in env_vars if not v in ['night', 'dusk', 'dawn', 'dayofyear', 'solarpos', 'solarpos_dt']] if len(env_vars) > 0: if edge_type == 'voronoi': env_areas = voronoi.geometry else: env_areas = radar_buffers.geometry env_850 = era5interface.compute_cell_avg(osp.join(data_dir, 'env', season, year, 'pressure_level_850.nc'), env_areas, env_points, t_range.tz_localize(None), vars=env_vars, seed=random_seed) env_surface = era5interface.compute_cell_avg(osp.join(data_dir, 'env', season, year, 'surface.nc'), env_areas, env_points, t_range.tz_localize(None), vars=env_vars, seed=random_seed) dfs = [] for ridx, row in voronoi.iterrows(): df = {} df['radar'] = [row.radar] * len(t_range) print(f'preprocess radar {row.radar}') # time related variables for radar ridx solarpos = np.array(solarposition.get_solarposition(solar_t_range, row.lat, row.lon).elevation) night = np.logical_or(solarpos[:-1] < -6, solarpos[1:] < -6) df['solarpos_dt'] = solarpos[:-1] - solarpos[1:] df['solarpos'] = solarpos[:-1] df['night'] = night df['dusk'] = np.logical_and(solarpos[:-1] >=6, solarpos[1:] < 6) # switching from day to night df['dawn'] = np.logical_and(solarpos[:-1] < 6, solarpos[1:] >=6) # switching from night to day df['datetime'] = t_range df['dayofyear'] = pd.DatetimeIndex(t_range).dayofyear df['tidx'] = np.arange(t_range.size) # bird measurements for radar ridx df['birds'] = data[ridx] if row.observed else [np.nan] * len(t_range) df['birds_km2'] = birds_km2[ridx] if row.observed else [np.nan] * len(t_range) cols = ['birds', 'birds_km2', 'birds_from_buffer', 'birds_km2_from_buffer', 'bird_u', 'bird_v'] df['bird_u'] = bird_u[ridx] if row.observed else [np.nan] * len(t_range) df['bird_v'] = bird_v[ridx] if row.observed else [np.nan] * len(t_range) if data_source == 'abm': df['birds_from_buffer'] = birds_from_buffer[ridx] if row.observed else [np.nan] * len(t_range) df['birds_km2_from_buffer'] = birds_km2_from_buffer[ridx] if row.observed else [np.nan] * len(t_range) else: df['birds_from_buffer'] = data[ridx] if row.observed else [np.nan] * len(t_range) df['birds_km2_from_buffer'] = birds_km2[ridx] if row.observed else [np.nan] * len(t_range) df['bird_speed'] = bird_speed[ridx] if row.observed else [np.nan] * len(t_range) df['bird_direction'] = bird_direction[ridx] if row.observed else [np.nan] * len(t_range) cols.extend(['bird_speed', 'bird_direction']) if len(env_vars) > 0: # environmental variables for radar ridx for var in env_vars: if var in env_850: print(f'found {var} in env_850 dataset') df[var] = env_850[var][ridx] elif var in env_surface: print(f'found {var} in surface dataset') df[var] = env_surface[var][ridx] df['wind_speed'] = np.sqrt(np.square(df['u']) + np.square(df['v'])) # Note that here wind direction is the direction into which the wind is blowing, # which is the opposite of the standard meteorological wind direction df['wind_dir'] = (abm.uv2deg(df['u'], df['v']) + 360) % 360 # compute accumulation variables (for baseline models) groups = [list(g) for k, g in it.groupby(enumerate(df['night']), key=lambda x: x[-1])] nights = [[item[0] for item in g] for g in groups if g[0][1]] df['nightID'] = np.zeros(t_range.size) df['acc_rain'] = np.zeros(t_range.size) df['acc_wind'] = np.zeros(t_range.size) df['wind_profit'] = np.zeros(t_range.size) acc_rain = 0 u_rain = 0 acc_wind = 0 u_wind = 0 for nidx, night in enumerate(nights): df['nightID'][night] = np.ones(len(night)) * (nidx + 1) # accumulation due to rain in the past nights acc_rain = acc_rain/3 + u_rain * 2/3 df['acc_rain'][night] = np.ones(len(night)) * acc_rain # compute proportion of hours with rain during the night u_rain = np.mean(df['tp'][night] > 0.01) # accumulation due to unfavourable wind in the past nights acc_wind = acc_wind/3 + u_wind * 2/3 df['acc_wind'][night] = np.ones(len(night)) * acc_wind # compute wind profit for bird with speed 12 m/s and flight direction 223 degree north v_air = np.ones(len(night)) * 12 alpha = np.ones(len(night)) * pref_dir df['wind_profit'][night] = v_air - np.sqrt(v_air**2 + df['wind_speed'][night]**2 - 2 * v_air * df['wind_speed'][night] * np.cos(np.deg2rad(alpha-df['wind_dir'][night]))) u_wind = np.mean(df['wind_profit'][night]) < wp_threshold radar_df = pd.DataFrame(df) radar_df['missing'] = 0 for col in cols: if data_source == 'radar': # radar quantities being exactly 0 during the night are missing, # radar quantities during the day are set to 0 print(f'check missing data for column {col}') radar_df[col] = radar_df.apply(lambda row: np.nan if (row.night and not row[col]) else (0 if not row.night else row[col]), axis=1) # remember missing radar observations radar_df['missing'] = radar_df['missing'] | radar_df[col].isna() # fill missing bird measurements by interpolation if col == 'bird_direction': # use "nearest", to avoid artifacts of interpolating between e.g. 350 and 2 degree radar_df[col].interpolate(method='nearest', inplace=True) else: # for all other quantities simply interpolate linearly radar_df[col].interpolate(method='linear', inplace=True) else: radar_df[col] = radar_df.apply(lambda row: np.nan if (row.night and np.isnan(row[col])) else (0 if not row.night else row[col]), axis=1) radar_df['missing'] = radar_df['missing'] | radar_df[col].isna() # fill missing bird measurements with 0 radar_df[col].fillna(0, inplace=True) dfs.append(radar_df) print(f'found {radar_df.missing.sum()} misssing time points') dynamic_feature_df = pd.concat(dfs, ignore_index=True) print(f'columns: {dynamic_feature_df.columns}') return dynamic_feature_df
fd5675b127d6a20f930d8ee88366e7426c5a09b9
3,658,304
def __normalize_allele_strand(snp_dfm): """ Keep all the alleles on FWD strand. If `strand` is "-", flip every base in `alleles`; otherwise do not change `alleles`. """ on_rev = (snp_dfm.loc[:, "strand"] == "-") has_alleles = (snp_dfm.loc[:, "alleles"].str.len() > 0) condition = (on_rev & has_alleles) if not snp_dfm.loc[condition, :].empty: snp_dfm.loc[condition, "alleles"] = snp_dfm.loc[condition, "alleles"].apply(flip_allele) return snp_dfm
1ebe00294eb55de96d68fc214bd5051d40a2dfa5
3,658,305
def add_to_codetree(tword,codetree,freq=1): """ Adds one tuple-word to tree structure - one node per symbol word end in the tree characterized by node[0]>0 """ unique=0 for pos in range(len(tword)): s = tword[pos] if s not in codetree: codetree[s] = [0,{}] unique+=1 codetree[s][0] += freq codetree = codetree[s][1] return unique
e92a48f112e7a774bed3b125509f7f64dce0a7ec
3,658,306
def TA_ADXm(data, period=10, smooth=10, limit=18): """ Moving Average ADX ADX Smoothing Trend Color Change on Moving Average and ADX Cross. Use on Hourly Charts - Green UpTrend - Red DownTrend - Black Choppy No Trend Source: https://www.tradingview.com/script/owwws7dM-Moving-Average-ADX/ Parameters ---------- data : (N,) array_like 传入 OHLC Kline 序列。 The OHLC Kline. period : int or None, optional DI 统计周期 默认值为 10 DI Length period. Default value is 10. smooth : int or None, optional ADX 平滑周期 默认值为 10 ADX smoothing length period. Default value is 10. limit : int or None, optional ADX 限制阈值 默认值为 18 ADX MA Active limit threshold. Default value is 18. Returns ------- adx, ADXm : ndarray ADXm 指标和趋势指示方向 (-1, 0, 1) 分别代表 (下跌, 无明显趋势, 上涨) ADXm indicator and thread directions sequence. (-1, 0, 1) means for (Negatice, No Trend, Postive) """ up = data.high.pct_change() down = data.low.pct_change() * -1 trur = TA_HMA(talib.TRANGE(data.high.values, data.low.values, data.close.values) , period) plus = 100 * TA_HMA(np.where(((up > down) & (up > 0)), up, 0), period) / trur minus = 100 * TA_HMA(np.where(((down > up) & (down > 0)), down, 0), period) / trur # 这里是dropna的替代解决办法,因为我觉得nparray的传递方式如果随便drop了可能会跟 data.index 对不上,所以我选择补零替代dropna plus = np.r_[np.zeros(period + 2), plus[(period + 2):]] minus = np.r_[np.zeros(period + 2), minus[(period + 2):]] sum = plus + minus adx = 100 * TA_HMA(abs(plus - minus) / (np.where((sum == 0), 1, sum)), smooth) adx = np.r_[np.zeros(smooth + 2), adx[(smooth + 2):]] ADXm = np.where(((adx > limit) & (plus > minus)), 1, np.where(((adx > limit) & (plus < minus)), -1, 0)) return adx, ADXm
40f41b013127b122bddf66e3dfe53f746c89b3c7
3,658,307
def remove_from_dict(obj, keys=list(), keep_keys=True): """ Prune a class or dictionary of all but keys (keep_keys=True). Prune a class or dictionary of specified keys.(keep_keys=False). """ if type(obj) == dict: items = list(obj.items()) elif isinstance(obj, dict): items = list(obj.items()) else: items = list(obj.__dict__.items()) if keep_keys: return {k: v for k, v in items if k in keys} else: return {k: v for k, v in items if k not in keys}
b1d9a2bd17269e079ce136cc464060fc47fe5906
3,658,308
def unify_qso_catalog_uvqs_old(qsos): """Unifies the name of columns that are relevant for the analysis""" qsos.rename_column('RA','ra') qsos.rename_column('DEC','dec') qsos.rename_column('FUV','mag_fuv') qsos.rename_column('Z','redshift') qsos.add_column(Column(name='id',data=np.arange(len(qsos))+1)) return qsos
8fe561e7d6e99c93d08efe5ff16d6e37ed66ab4e
3,658,309
def get_hash_key_name(value): """Returns a valid entity key_name that's a hash of the supplied value.""" return 'hash_' + sha1_hash(value)
b2bba3031efccb5dab1781695fc39c993f735e71
3,658,310
import yaml def yaml_dumps(value, indent=2): """ YAML dumps that supports Unicode and the ``as_raw`` property of objects if available. """ return yaml.dump(value, indent=indent, allow_unicode=True, Dumper=YamlAsRawDumper)
ed368fb84967190e460c1bcf51bd573323ff4f46
3,658,311
def poi_remove(poi_id: int): """Removes POI record Args: poi_id: ID of the POI to be removed """ poi = POI.get_by_id(poi_id) if not poi: abort(404) poi.delete() db.session.commit() return redirect_return()
26c1cb2524c6a19d9382e9e0d27947a0d2b2a98c
3,658,312
def stringToTupleOfFloats(s): """ Converts s to a tuple @param s: string @return: tuple represented by s """ ans = [] for i in s.strip("()").split(","): if i.strip() != "": if i == "null": ans.append(None) else: ans.append(float(i)) return tuple(ans)
7eec23232f884035b12c6498f1e68616e4580878
3,658,313
import json import requests def create_training(training: TrainingSchema): """ Create an training with an TrainingSchema :param training: training data as TrainingSchema :return: http response """ endpoint_url = Config.get_api_url() + "training" job_token = Config.get_job_token() headers = { 'content-type': 'application/json', 'jobtoken': job_token } data = json.dumps(training.get_dict()) response = requests.post(endpoint_url, data=data, headers=headers) return response
c0ce20fc68cbb3d46b00e451b85bf01991579bcc
3,658,314
def respects_language(fun): """Decorator for tasks with respect to site's current language. You can use this decorator on your tasks together with default @task decorator (remember that the task decorator must be applied last). See also the with-statement alternative :func:`respect_language`. **Example**: .. code-block:: python @task @respects_language def my_task() # localize something. The task will then accept a ``language`` argument that will be used to set the language in the task, and the task can thus be called like: .. code-block:: python from django.utils import translation from myapp.tasks import my_task # Pass the current language on to the task my_task.delay(language=translation.get_language()) # or set the language explicitly my_task.delay(language='no.no') """ @wraps(fun) def _inner(*args, **kwargs): with respect_language(kwargs.pop('language', None)): return fun(*args, **kwargs) return _inner
547629321d649a102a0c082b1eddcac32334432c
3,658,315
def zero_one_window(data, axis=(0, 1, 2), ceiling_percentile=99, floor_percentile=1, floor=0, ceiling=1, channels_axis=None): """ :param data: Numpy ndarray. :param axis: :param ceiling_percentile: Percentile value of the foreground to set to the ceiling. :param floor_percentile: Percentile value of the image to set to the floor. :param floor: New minimum value. :param ceiling: New maximum value. :param channels_axis: :return: """ data = np.copy(data) if len(axis) != data.ndim: floor_threshold = np.percentile(data, floor_percentile, axis=axis) if channels_axis is None: channels_axis = find_channel_axis(data.ndim, axis=axis) data = np.moveaxis(data, channels_axis, 0) for channel in range(data.shape[0]): channel_data = data[channel] # find the background bg_mask = channel_data <= floor_threshold[channel] # use background to find foreground fg = channel_data[bg_mask == False] # find threshold based on foreground percentile ceiling_threshold = np.percentile(fg, ceiling_percentile) # normalize the data for this channel data[channel] = window_data(channel_data, floor_threshold=floor_threshold[channel], ceiling_threshold=ceiling_threshold, floor=floor, ceiling=ceiling) data = np.moveaxis(data, 0, channels_axis) else: floor_threshold = np.percentile(data, floor_percentile) fg_mask = data > floor_threshold fg = data[fg_mask] ceiling_threshold = np.percentile(fg, ceiling_percentile) data = window_data(data, floor_threshold=floor_threshold, ceiling_threshold=ceiling_threshold, floor=floor, ceiling=ceiling) return data
4056433a9f3984bebc1c99f30be4f8e9ddc31026
3,658,316
import sys def factorial(x): """factorial(x) -> Integral "Find x!. Raise a ValueError if x is negative or non-integral.""" if isinstance(x, float): fl = int(x) if fl != x: raise ValueError("float arguments must be integral") x = fl if x > sys.maxsize: raise OverflowError("Too large for a factorial") if x <= 100: if x < 0: raise ValueError("x must be >= 0") res = 1 for i in range(2, x + 1): res *= i return res # Experimentally this gap seems good gap = max(100, x >> 7) def _fac_odd(low, high): if low + gap >= high: t = 1 for i in range(low, high, 2): t *= i return t mid = ((low + high) >> 1) | 1 return _fac_odd(low, mid) * _fac_odd(mid, high) def _fac1(x): if x <= 2: return 1, 1, x - 1 x2 = x >> 1 f, g, shift = _fac1(x2) g *= _fac_odd((x2 + 1) | 1, x + 1) return (f * g, g, shift + x2) res, _, shift = _fac1(x) return res << shift
664cc8e0e215f089bbc57fec68553d788305e4c0
3,658,317
def get_event_stderr(e): """Return the stderr field (if any) associated with the event.""" if _API_VERSION == google_v2_versions.V2ALPHA1: return e.get('details', {}).get('stderr') elif _API_VERSION == google_v2_versions.V2BETA: for event_type in ['containerStopped']: if event_type in e: return e[event_type].get('stderr') else: assert False, 'Unexpected version: {}'.format(_API_VERSION)
89a32228d3ad0ecb92c6c0b45664903d6f4b507d
3,658,318
def xA(alpha, gamma, lsa, lsd, y, xp, nv): """Calculate position where the beam hits the analyzer crystal. :param alpha: the divergence angle of the neutron :param gamma: the tilt angle of the deflector :param lsa: the sample-analyzer distance :param lsd: the sample deflector distance :param y: the translation of the analyser crystal :param xp: the point at the sample where the neutron is scattered :param nv: neutron path: transmitted(0), reflected at the first deflector(1), reflected at the second deflector(2), """ if nv == 0: return xp + (lsa - y) * tan(radians(alpha)) return xp + lsd * tan(radians(alpha)) + \ (lsa - lsd - y) * tan(radians(2 * gamma - alpha))
0dfb9bd7b761fa0669893c692f3adb2a5cb079c4
3,658,319
from typing import Optional from datetime import datetime def find_recent_login(user_id: UserID) -> Optional[datetime]: """Return the time of the user's most recent login, if found.""" recent_login = db.session \ .query(DbRecentLogin) \ .filter_by(user_id=user_id) \ .one_or_none() if recent_login is None: return None return recent_login.occurred_at
153dc509e2382e8f9eb18917d9be04d171ffdee9
3,658,320
async def async_remove_config_entry_device( hass: HomeAssistant, config_entry: ConfigEntry, device_entry: dr.DeviceEntry ) -> bool: """Remove ufp config entry from a device.""" unifi_macs = { _async_unifi_mac_from_hass(connection[1]) for connection in device_entry.connections if connection[0] == dr.CONNECTION_NETWORK_MAC } api = async_ufp_instance_for_config_entry_ids(hass, {config_entry.entry_id}) assert api is not None if api.bootstrap.nvr.mac in unifi_macs: return False for device in async_get_devices(api.bootstrap, DEVICES_THAT_ADOPT): if device.is_adopted_by_us and device.mac in unifi_macs: return False return True
f8ae37a454f5c5e3314676162ff48e1e05530396
3,658,321
def batch_unsrt_segment_sum(data, segment_ids, num_segments): """ Performas the `tf.unsorted_segment_sum` operation batch-wise""" # create distinct segments per batch num_batches = tf.shape(segment_ids, out_type=tf.int64)[0] batch_indices = tf.range(num_batches) segment_ids_per_batch = segment_ids + num_segments * tf.expand_dims(batch_indices, axis=1) # do the normal unsegment sum and reshape to original shape seg_sums = tf.unsorted_segment_sum(data, segment_ids_per_batch, num_segments * num_batches) return tf.reshape(seg_sums, tf.stack((-1, num_segments)))
299a514e926c43564960288c706c1d535620144b
3,658,322
import json def read_json(file_name): """Read json from file.""" with open(file_name) as f: return json.load(f)
2eccab7dddb1c1038de737879c465f293a00e5de
3,658,323
def get_role(request): """Look up the "role" query parameter in the URL.""" query = request.ws_resource.split('?', 1) if len(query) == 1: raise LookupError('No query string found in URL') param = parse.parse_qs(query[1]) if 'role' not in param: raise LookupError('No role parameter found in the query string') return param['role'][0]
87cc8f15a3d0aeb45a8d7ea67fb34573e41b7df7
3,658,324
def login(username: str, password: str) -> Person: """通过用户名和密码登录智学网 Args: username (str): 用户名, 可以为准考证号, 手机号 password (str): 密码 Raises: ArgError: 参数错误 UserOrPassError: 用户名或密码错误 UserNotFoundError: 未找到用户 LoginError: 登录错误 RoleError: 账号角色未知 Returns: Person """ session = get_session(username, password) if check_is_student(session): return StudentAccount(session).set_base_info() return TeacherAccount(session).set_base_info()
a982cddb107cc8ccf8c9d1868e91299cd6ac07f3
3,658,325
def _decode_end(_fp): """Decode the end tag, which has no data in the file, returning 0. :type _fp: A binary `file object` :rtype: int """ return 0
5e8da3585dda0b9c3c7cd428b7e1606e585e15c6
3,658,326
def make_dqn_agent(q_agent_type, arch, n_actions, lr=2.5e-4, noisy_net_sigma=None, buffer_length=10 ** 6, final_epsilon=0.01, final_exploration_frames=10 ** 6, use_gpu=0, replay_start_size=5 * 10 **4, target_update_interval=3 * 10**4, update_interval=4, ): """ given an architecture and an specific dqn return the agent args: q_agent_type: choices=["DQN", "DoubleDQN", "PAL"] arch: choices=["nature", "nips", "dueling", "doubledqn"] final_epsilon: Final value of epsilon during training final_exploration_frames: Timesteps after which we stop annealing exploration rate replay_start_size: Minimum replay buffer size before performing gradient updates. target_update_interval: Frequency (in timesteps) at which the target network is updated update_interval: Frequency (in timesteps) of network updates. """ # q function q_func = parse_arch(arch, n_actions) # explorer if noisy_net_sigma is not None: pnn.to_factorized_noisy(q_func, sigma_scale=noisy_net_sigma) # turn off explorer explorer = explorers.Greedy() else: # deafult option explorer = explorers.LinearDecayEpsilonGreedy( 1.0, final_epsilon, final_exploration_frames, lambda: np.random.randint(n_actions), ) # optimizer # Use the Nature paper's hyperparameters opt = pfrl.optimizers.RMSpropEpsInsideSqrt( q_func.parameters(), lr=lr, alpha=0.95, momentum=0.0, eps=1e-2, centered=True, ) # replay_buffer rbuf = replay_buffers.ReplayBuffer(buffer_length) # Feature extractor def phi(x): return np.asarray(x, dtype=np.float32) / 255 Agent = parse_agent(q_agent_type) agent = Agent( q_func, opt, rbuf, gpu=use_gpu, # 0 or -1 gamma=0.99, explorer=explorer, replay_start_size=replay_start_size, target_update_interval=target_update_interval, clip_delta=True, update_interval=update_interval, batch_accumulator="sum", phi=phi, ) return agent
0b5974e30a12ef760a424d8d229319ccfee3119a
3,658,327
def build_consensus_from_haplotypes(haplotypes): """ # ======================================================================== BUILD CONSENSUS FROM HAPLOTYPES PURPOSE ------- Builds a consensus from a list of Haplotype objects. INPUT ----- [HAPLOTYPE LIST] [haplotypes] The list of haplotypes. RETURN ------ [String] consensus The consensus sequence. # ======================================================================== """ pileup = build_pileup_from_haplotypes(haplotypes) consensus = pileup.build_consensus() return consensus
977e59e77e45cb4ccce95875f4802a43028af060
3,658,328
from typing import List from typing import Dict from typing import Tuple def convert_data_for_rotation_averaging( wTi_list: List[Pose3], i2Ti1_dict: Dict[Tuple[int, int], Pose3] ) -> Tuple[Dict[Tuple[int, int], Rot3], List[Rot3]]: """Converts the poses to inputs and expected outputs for a rotation averaging algorithm. Args: wTi_list: List of global poses. i2Ti1_dict: Dictionary of (i1, i2) -> i2Ti1 relative poses. Returns: i2Ti1_dict's values mapped to relative rotations i2Ri1. wTi_list mapped to global rotations. """ wRi_list = [x.rotation() for x in wTi_list] i2Ri1_dict = {k: v.rotation() for k, v in i2Ti1_dict.items()} return i2Ri1_dict, wRi_list
1f058ae1925f5392416ec4711b55e849e277a24c
3,658,329
def all_arrays_to_gpu(f): """Decorator to copy all the numpy arrays to the gpu before function invokation""" def inner(*args, **kwargs): args = list(args) for i in range(len(args)): if isinstance(args[i], np.ndarray): args[i] = to_gpu(args[i]) return f(*args, **kwargs) return inner
25ea43a611ac8a63aa1246aaaf810cec71be4c3f
3,658,330
def create_intersect_mask(num_v, max_v): """ Creates intersect mask as needed by polygon_intersection_new in batch_poly_utils (for a single example) """ intersect_mask = np.zeros((max_v, max_v), dtype=np.float32) for i in range(num_v - 2): for j in range((i + 2) % num_v, num_v - int(i == 0)): intersect_mask[i, j] = 1. return intersect_mask
32d2758e704901aa57b70e0edca2b9292df2583a
3,658,331
def gdi_abuse_tagwnd_technique_bitmap(): """ Technique to be used on Win 10 v1703 or earlier. Locate the pvscan0 address with the help of tagWND structures @return: pvscan0 address of the manager and worker bitmap and the handles """ window_address = alloc_free_windows(0) manager_bitmap_handle = create_bitmap(0x100, 0x6D, 1) manager_bitmap_pvscan0 = window_address + 0x50 window_address = alloc_free_windows(0) worker_bitmap_handle = create_bitmap(0x100, 0x6D, 1) worker_bitmap_pvscan0 = window_address + 0x50 return (manager_bitmap_pvscan0, worker_bitmap_pvscan0, manager_bitmap_handle, worker_bitmap_handle)
e77253d082b9aaaa083c84ffdbe8a74ae0b84b0b
3,658,332
import os import argparse import sys import array def main(): """CLI entrypoint""" parser = Parser( prog='unwad', description='Default action is to convert files to png format and extract to xdir.', epilog='example: unwad gfx.wad -d ./out => extract all files to ./out' ) parser.add_argument( 'file', metavar='file.wad', action=ResolvePathAction ) parser.add_argument( '-l', '--list', action='store_true', help='list files' ) parser.add_argument( '-d', metavar='xdir', default=os.getcwd(), dest='dest', action=ResolvePathAction, help='extract files into xdir' ) parser.add_argument( '-q', dest='quiet', action='store_true', help='quiet mode' ) parser.add_argument( '-f', dest='format', default='png', choices=['bmp','gif','png','tga'], help='image format to convert to' ) parser.add_argument( '-v', '--version', dest='version', action='version', help=argparse.SUPPRESS, version=f'{parser.prog} version {qcli.__version__}' ) args = parser.parse_args() archive_name = os.path.basename(args.file) if not wad.is_wadfile(args.file): print(f'{parser.prog}: cannot find or open {args.file}', file=sys.stderr) sys.exit(1) if args.list: with wad.WadFile(args.file) as wad_file: info_list = sorted(wad_file.infolist(), key=lambda i: i.filename) lump_types = { 0: 'NONE', 1: 'LABEL', 64: 'LUMP', 65: 'QTEX', 66: 'QPIC', 67: 'SOUND', 68: 'MIPTEX' } def lump_type(num): if num in lump_types: return lump_types[num] return num headers = ['Length', 'Type', 'Name'] table = [[i.file_size, lump_type(i.type), i.filename] for i in info_list] length = sum([i.file_size for i in info_list]) count = len(info_list) table.append([length, '', f'{count} file{"s" if count > 1 else ""}']) separator = [] for i in range(len(headers)): t = max(len(str(length)), len(headers[i]) + 2) separator.append('-' * t) table.insert(-1, separator) print(f'Archive: {archive_name}') print(tabulate(table, headers=headers)) sys.exit(0) if not os.path.exists(args.dest): os.makedirs(args.dest) with wad.WadFile(args.file) as wad_file: if not args.quiet: print(f'Archive: {archive_name}') # Flatten out palette palette = [] for p in quake.palette: palette += p for item in wad_file.infolist(): filename = item.filename fullpath = os.path.join(args.dest, filename) fullpath_ext = '{0}.{1}'.format(fullpath, args.format) data = None size = None # Pictures if item.type == wad.LumpType.QPIC: with wad_file.open(filename) as lmp_file: lump = lmp.Lmp.open(lmp_file) size = lump.width, lump.height data = array.array('B', lump.pixels) # Special cases elif item.type == wad.LumpType.MIPTEX: # Console characters if item.file_size == 128 * 128: size = 128, 128 with wad_file.open(filename) as lump: data = lump.read(item.file_size) else: # Miptextures try: with wad_file.open(filename) as mip_file: mip = wad.Miptexture.read(mip_file) data = mip.pixels[:mip.width * mip.height] data = array.array('B', data) size = mip.width, mip.height except: print(f' failed to extract resource: {item.filename}', file=sys.stderr) continue try: # Convert to image file if data is not None and size is not None: img = Image.frombuffer('P', size, data, 'raw', 'P', 0, 1) img.putpalette(palette) img.save(fullpath_ext) if not args.quiet: print(f' extracting: {fullpath_ext}') # Extract as raw file else: wad_file.extract(filename, args.dest) if not args.quiet: print(f' extracting: {fullpath}') except: print(f'{parser.prog}: error: {sys.exc_info()[1]}', file=sys.stderr) sys.exit(0)
eee25c0b6d6481ff5e169f36a53daa5bdf3bbd52
3,658,333
def check_stop() -> list: """Checks for entries in the stopper table in base db. Returns: list: Returns the flag, caller from the stopper table. """ with db.connection: cursor = db.connection.cursor() flag = cursor.execute("SELECT flag, caller FROM stopper").fetchone() return flag
b2694938541704508d5304bae9abff25da2e0fc9
3,658,334
from TorCtl import SQLSupport import socket def open_controller(filename,ncircuits,use_sql): """ starts stat gathering thread """ s = socket.socket(socket.AF_INET,socket.SOCK_STREAM) s.connect((control_host,control_port)) c = PathSupport.Connection(s) c.authenticate(control_pass) # also launches thread... c.debug(file(filename+".log", "w", buffering=0)) h = CircStatsGatherer(c,__selmgr,filename,ncircuits) c.set_event_handler(h) if use_sql: SQLSupport.setup_db("sqlite:///"+filename+".sqlite", drop=True) c.add_event_listener(SQLSupport.ConsensusTrackerListener()) c.add_event_listener(SQLSupport.CircuitListener()) global FUDValue if not FUDValue: FUDValue = c.get_option("FetchUselessDescriptors")[0][1] c.set_option("FetchUselessDescriptors", "1") c.set_events([TorCtl.EVENT_TYPE.STREAM, TorCtl.EVENT_TYPE.BW, TorCtl.EVENT_TYPE.NEWCONSENSUS, TorCtl.EVENT_TYPE.NEWDESC, TorCtl.EVENT_TYPE.CIRC, TorCtl.EVENT_TYPE.STREAM_BW], True) return c
8686a60dc27d486aac3e6622cd82f94983eda74c
3,658,335
def get_camelcase_name_chunks(name): """ Given a name, get its parts. E.g: maxCount -> ["max", "count"] """ out = [] out_str = "" for c in name: if c.isupper(): if out_str: out.append(out_str) out_str = c.lower() else: out_str += c out.append(out_str) return out
134a8b1d98af35f185b37c999fbf499d18bf76c5
3,658,336
def _GetBuildBotUrl(builder_host, builder_port): """Gets build bot URL for fetching build info. Bisect builder bots are hosted on tryserver.chromium.perf, though we cannot access this tryserver using host and port number directly, so we use another tryserver URL for the perf tryserver. Args: builder_host: Hostname of the server where the builder is hosted. builder_port: Port number of ther server where the builder is hosted. Returns: URL of the buildbot as a string. """ if (builder_host == PERF_BISECT_BUILDER_HOST and builder_port == PERF_BISECT_BUILDER_PORT): return PERF_TRY_SERVER_URL else: return 'http://%s:%s' % (builder_host, builder_port)
551ac7ee9079009cd8b52e41aeabb2b2e4e10c21
3,658,337
def case_two_args_positional_callable_first(replace_by_foo): """ Tests the decorator with one positional argument @my_decorator(goo) """ return replace_by_foo(goo, 'hello'), goo
fa5ca0af3d5af7076aebbb8364f29fc64b4e3c28
3,658,338
def cal_sort_key( cal ): """ Sort key for the list of calendars: primary calendar first, then other selected calendars, then unselected calendars. (" " sorts before "X", and tuples are compared piecewise) """ if cal["selected"]: selected_key = " " else: selected_key = "X" if cal["primary"]: primary_key = " " else: primary_key = "X" return (primary_key, selected_key, cal["summary"])
fd1d8b32ee904d3684decba54268d926c5fd3d82
3,658,339
from datetime import datetime def select_zip_info(sample: bytes) -> tuple: """Print a list of items contained within the ZIP file, along with their last modified times, CRC32 checksums, and file sizes. Return info on the item selected by the user as a tuple. """ t = [] w = 0 z = ZipFile(sample) for i in z.infolist(): if len(i.filename) > w: w = len(i.filename) t.append((i.filename, datetime(*i.date_time), i.CRC, i.file_size)) for i in range(len(t)): dt = t[i][1].strftime('%Y-%m-%d %H:%M:%S') crc = t[i][2].to_bytes(4, 'big').hex() print(f'{i + 1: >2}. {t[i][0]: <{w}} {dt} {crc} {t[i][3]}') n = input('\nEnter a number corresponding to the desired entry: ') print() return t[int(n) - 1]
aac5b04c40552c09d07bf2db0c2d4431fc168aa2
3,658,340
import traceback import select def create_ionosphere_layers(base_name, fp_id, requested_timestamp): """ Create a layers profile. :param None: determined from :obj:`request.args` :return: array :rtype: array """ function_str = 'ionoshere_backend.py :: create_ionosphere_layers' trace = 'none' fail_msg = 'none' layers_algorithms = None layers_added = None value_conditions = ['<', '>', '==', '!=', '<=', '>='] conditions = ['<', '>', '==', '!=', '<=', '>=', 'in', 'not in'] if 'd_condition' in request.args: d_condition = request.args.get('d_condition', '==') else: logger.error('no d_condition argument passed') fail_msg = 'error :: no d_condition argument passed' return False, False, layers_algorithms, layers_added, fail_msg, trace if not str(d_condition) in conditions: logger.error('d_condition not a valid conditon - %s' % str(d_condition)) fail_msg = 'error :: d_condition not a valid conditon - %s' % str(d_condition) return False, False, layers_algorithms, layers_added, fail_msg, trace if 'd_boundary_limit' in request.args: d_boundary_limit = request.args.get('d_boundary_limit', '0') else: logger.error('no d_boundary_limit argument passed') fail_msg = 'error :: no d_boundary_limit argument passed' return False, False, layers_algorithms, layers_added, fail_msg, trace try: # @modified 20170317 - Feature #1960: ionosphere_layers - allow for floats # test_d_boundary_limit = int(d_boundary_limit) + 1 test_d_boundary_limit = float(d_boundary_limit) + 1 except: trace = traceback.format_exc() logger.error('%s' % trace) fail_msg = 'error :: d_boundary_limit is not an int' return False, False, layers_algorithms, layers_added, fail_msg, trace # @modified 20160315 - Feature #1972: ionosphere_layers - use D layer boundary for upper limit # Added d_boundary_times if 'd_boundary_times' in request.args: d_boundary_times = request.args.get('d_boundary_times', '1') else: logger.error('no d_boundary_times argument passed') fail_msg = 'error :: no d_boundary_times argument passed' return False, False, layers_algorithms, layers_added, fail_msg, trace try: test_d_boundary_times = int(d_boundary_times) + 1 except: trace = traceback.format_exc() logger.error('%s' % trace) fail_msg = 'error :: d_boundary_times is not an int' return False, False, layers_algorithms, layers_added, fail_msg, trace # @added 20170616 - Feature #2048: D1 ionosphere layer if 'd1_condition' in request.args: d1_condition = request.args.get('d1_condition', 'none') else: logger.error('no d1_condition argument passed') fail_msg = 'error :: no d1_condition argument passed' return False, False, layers_algorithms, layers_added, fail_msg, trace if str(d1_condition) == 'none': d1_condition = 'none' d1_boundary_limit = 0 d1_boundary_times = 0 else: if not str(d1_condition) in conditions: logger.error('d1_condition not a valid conditon - %s' % str(d1_condition)) fail_msg = 'error :: d1_condition not a valid conditon - %s' % str(d1_condition) return False, False, layers_algorithms, layers_added, fail_msg, trace if 'd1_boundary_limit' in request.args: d1_boundary_limit = request.args.get('d1_boundary_limit', '0') else: logger.error('no d1_boundary_limit argument passed') fail_msg = 'error :: no d1_boundary_limit argument passed' return False, False, layers_algorithms, layers_added, fail_msg, trace try: test_d1_boundary_limit = float(d1_boundary_limit) + 1 except: trace = traceback.format_exc() logger.error('%s' % trace) fail_msg = 'error :: d1_boundary_limit is not an int' return False, False, layers_algorithms, layers_added, fail_msg, trace if 'd1_boundary_times' in request.args: d1_boundary_times = request.args.get('d1_boundary_times', '1') else: logger.error('no d1_boundary_times argument passed') fail_msg = 'error :: no d1_boundary_times argument passed' return False, False, layers_algorithms, layers_added, fail_msg, trace try: test_d1_boundary_times = int(d1_boundary_times) + 1 except: trace = traceback.format_exc() logger.error('%s' % trace) fail_msg = 'error :: d1_boundary_times is not an int' return False, False, layers_algorithms, layers_added, fail_msg, trace if 'e_condition' in request.args: e_condition = request.args.get('e_condition', None) else: logger.error('no e_condition argument passed') fail_msg = 'error :: no e_condition argument passed' return False, False, layers_algorithms, layers_added, fail_msg, trace if not str(e_condition) in value_conditions: logger.error('e_condition not a valid value conditon - %s' % str(e_condition)) fail_msg = 'error :: e_condition not a valid value conditon - %s' % str(e_condition) return False, False, layers_algorithms, layers_added, fail_msg, trace if 'e_boundary_limit' in request.args: e_boundary_limit = request.args.get('e_boundary_limit') else: logger.error('no e_boundary_limit argument passed') fail_msg = 'error :: no e_boundary_limit argument passed' return False, False, layers_algorithms, layers_added, fail_msg, trace try: # @modified 20170317 - Feature #1960: ionosphere_layers - allow for floats # test_e_boundary_limit = int(e_boundary_limit) + 1 test_e_boundary_limit = float(e_boundary_limit) + 1 except: trace = traceback.format_exc() logger.error('%s' % trace) fail_msg = 'error :: e_boundary_limit is not an int' return False, False, layers_algorithms, layers_added, fail_msg, trace if 'e_boundary_times' in request.args: e_boundary_times = request.args.get('e_boundary_times') else: logger.error('no e_boundary_times argument passed') fail_msg = 'error :: no e_boundary_times argument passed' return False, False, layers_algorithms, layers_added, fail_msg, trace try: test_e_boundary_times = int(e_boundary_times) + 1 except: trace = traceback.format_exc() logger.error('%s' % trace) fail_msg = 'error :: e_boundary_times is not an int' return False, False, layers_algorithms, layers_added, fail_msg, trace es_layer = False if 'es_layer' in request.args: es_layer_arg = request.args.get('es_layer') if es_layer_arg == 'true': es_layer = True if es_layer: es_day = None if 'es_day' in request.args: es_day = request.args.get('es_day') else: logger.error('no es_day argument passed') fail_msg = 'error :: no es_day argument passed' return False, False, layers_algorithms, layers_added, fail_msg, trace f1_layer = False if 'f1_layer' in request.args: f1_layer_arg = request.args.get('f1_layer') if f1_layer_arg == 'true': f1_layer = True if f1_layer: from_time = None valid_f1_from_time = False if 'from_time' in request.args: from_time = request.args.get('from_time') if from_time: values_valid = True if len(from_time) == 4: for digit in from_time: try: int(digit) + 1 except: values_valid = False if values_valid: if int(from_time) < 2400: valid_f1_from_time = True if not valid_f1_from_time: logger.error('no valid f1_layer from_time argument passed - %s' % str(from_time)) fail_msg = 'error :: no valid f1_layer from_time argument passed - %s' % str(from_time) return False, False, layers_algorithms, layers_added, fail_msg, trace f2_layer = False if 'f2_layer' in request.args: f2_layer_arg = request.args.get('f2_layer') if f2_layer_arg == 'true': f2_layer = True if f2_layer: until_time = None valid_f2_until_time = False if 'until_time' in request.args: until_time = request.args.get('until_time') if until_time: values_valid = True if len(until_time) == 4: for digit in until_time: try: int(digit) + 1 except: values_valid = False if values_valid: if int(until_time) < 2400: valid_f2_until_time = True if not valid_f2_until_time: logger.error('no valid f2_layer until_time argument passed - %s' % str(until_time)) fail_msg = 'error :: no valid f2_layer until_time argument passed - %s' % str(until_time) return False, False, layers_algorithms, layers_added, fail_msg, trace label = False if 'fp_layer_label' in request.args: label_arg = request.args.get('fp_layer_label') label = label_arg[:255] engine_needed = True engine = None if engine_needed: logger.info('%s :: getting MySQL engine' % function_str) try: engine, fail_msg, trace = get_an_engine() logger.info(fail_msg) except: trace = traceback.format_exc() logger.error(trace) fail_msg = 'error :: could not get a MySQL engine' logger.error('%s' % fail_msg) raise if not engine: trace = 'none' fail_msg = 'error :: engine not obtained' logger.error(fail_msg) raise try: metrics_table, log_msg, trace = metrics_table_meta(skyline_app, engine) logger.info(log_msg) logger.info('metrics_table OK') except: logger.error(traceback.format_exc()) logger.error('error :: failed to get metrics_table meta') if engine: engine_disposal(engine) raise # to webapp to return in the UI metrics_id = 0 try: connection = engine.connect() stmt = select([metrics_table]).where(metrics_table.c.metric == base_name) result = connection.execute(stmt) for row in result: metrics_id = int(row['id']) connection.close() except: trace = traceback.format_exc() logger.error(trace) fail_msg = 'error :: could not determine metric id from metrics table' if engine: engine_disposal(engine) raise # Create layer profile ionosphere_layers_table = None try: ionosphere_layers_table, fail_msg, trace = ionosphere_layers_table_meta(skyline_app, engine) logger.info(fail_msg) except: trace = traceback.format_exc() logger.error('%s' % trace) fail_msg = 'error :: ionosphere_backend :: failed to get ionosphere_layers_table meta for %s' % base_name logger.error('%s' % fail_msg) if engine: engine_disposal(engine) raise layer_id = 0 try: connection = engine.connect() stmt = select([ionosphere_layers_table]).where(ionosphere_layers_table.c.fp_id == fp_id) result = connection.execute(stmt) for row in result: layer_id = int(row['id']) connection.close() except: trace = traceback.format_exc() logger.error(trace) fail_msg = 'error :: could not determine id from ionosphere_layers_table' if engine: engine_disposal(engine) raise if layer_id > 0: return layer_id, True, None, None, fail_msg, trace new_layer_id = False try: connection = engine.connect() ins = ionosphere_layers_table.insert().values( fp_id=fp_id, metric_id=int(metrics_id), enabled=1, label=label) result = connection.execute(ins) connection.close() new_layer_id = result.inserted_primary_key[0] logger.info('new ionosphere layer_id: %s' % str(new_layer_id)) except: trace = traceback.format_exc() logger.error('%s' % trace) fail_msg = 'error :: failed to insert a new record into the ionosphere_layers table for %s' % base_name logger.error('%s' % fail_msg) if engine: engine_disposal(engine) raise # Create layer profile layers_algorithms_table = None try: layers_algorithms_table, fail_msg, trace = layers_algorithms_table_meta(skyline_app, engine) logger.info(fail_msg) except: trace = traceback.format_exc() logger.error('%s' % trace) fail_msg = 'error :: ionosphere_backend :: failed to get layers_algorithms_table meta for %s' % base_name logger.error('%s' % fail_msg) if engine: engine_disposal(engine) raise new_layer_algorithm_ids = [] layers_added = [] # D layer try: connection = engine.connect() ins = layers_algorithms_table.insert().values( layer_id=new_layer_id, fp_id=fp_id, metric_id=int(metrics_id), layer='D', type='value', condition=d_condition, # @modified 20170317 - Feature #1960: ionosphere_layers - allow for floats # layer_boundary=int(d_boundary_limit), layer_boundary=str(d_boundary_limit), # @modified 20160315 - Feature #1972: ionosphere_layers - use D layer boundary for upper limit # Added d_boundary_times times_in_row=int(d_boundary_times)) result = connection.execute(ins) connection.close() new_layer_algorithm_id = result.inserted_primary_key[0] logger.info('new ionosphere_algorithms D layer id: %s' % str(new_layer_algorithm_id)) new_layer_algorithm_ids.append(new_layer_algorithm_id) layers_added.append('D') except: trace = traceback.format_exc() logger.error('%s' % trace) fail_msg = 'error :: failed to insert a new D layer record into the layers_algorithms table for %s' % base_name logger.error('%s' % fail_msg) if engine: engine_disposal(engine) raise # E layer try: connection = engine.connect() ins = layers_algorithms_table.insert().values( layer_id=new_layer_id, fp_id=fp_id, metric_id=int(metrics_id), layer='E', type='value', condition=e_condition, # @modified 20170317 - Feature #1960: ionosphere_layers - allow for floats # layer_boundary=int(e_boundary_limit), layer_boundary=str(e_boundary_limit), times_in_row=int(e_boundary_times)) result = connection.execute(ins) connection.close() new_layer_algorithm_id = result.inserted_primary_key[0] logger.info('new ionosphere_algorithms E layer id: %s' % str(new_layer_algorithm_id)) new_layer_algorithm_ids.append(new_layer_algorithm_id) layers_added.append('E') except: trace = traceback.format_exc() logger.error('%s' % trace) fail_msg = 'error :: failed to insert a new E layer record into the layers_algorithms table for %s' % base_name logger.error('%s' % fail_msg) if engine: engine_disposal(engine) raise # @added 20170616 - Feature #2048: D1 ionosphere layer # This must be the third created algorithm layer as in the frontend list # D is [0], E is [1], so D1 has to be [2] if d1_condition: try: connection = engine.connect() ins = layers_algorithms_table.insert().values( layer_id=new_layer_id, fp_id=fp_id, metric_id=int(metrics_id), layer='D1', type='value', condition=d1_condition, layer_boundary=str(d1_boundary_limit), times_in_row=int(d1_boundary_times)) result = connection.execute(ins) connection.close() new_layer_algorithm_id = result.inserted_primary_key[0] logger.info('new ionosphere_algorithms D1 layer id: %s' % str(new_layer_algorithm_id)) new_layer_algorithm_ids.append(new_layer_algorithm_id) layers_added.append('D1') except: trace = traceback.format_exc() logger.error('%s' % trace) fail_msg = 'error :: failed to insert a new D1 layer record into the layers_algorithms table for %s' % base_name logger.error('%s' % fail_msg) if engine: engine_disposal(engine) raise # Es layer if es_layer: try: connection = engine.connect() ins = layers_algorithms_table.insert().values( layer_id=new_layer_id, fp_id=fp_id, metric_id=int(metrics_id), layer='Es', type='day', condition='in', layer_boundary=es_day) result = connection.execute(ins) connection.close() new_layer_algorithm_id = result.inserted_primary_key[0] logger.info('new ionosphere_algorithms Es layer id: %s' % str(new_layer_algorithm_id)) new_layer_algorithm_ids.append(new_layer_algorithm_id) layers_added.append('Es') except: trace = traceback.format_exc() logger.error('%s' % trace) fail_msg = 'error :: failed to insert a new Es layer record into the layers_algorithms table for %s' % base_name logger.error('%s' % fail_msg) if engine: engine_disposal(engine) raise # F1 layer if f1_layer: try: connection = engine.connect() ins = layers_algorithms_table.insert().values( layer_id=new_layer_id, fp_id=fp_id, metric_id=int(metrics_id), layer='F1', type='time', condition='>', layer_boundary=str(from_time)) result = connection.execute(ins) connection.close() new_layer_algorithm_id = result.inserted_primary_key[0] logger.info('new ionosphere_algorithms F1 layer id: %s' % str(new_layer_algorithm_id)) new_layer_algorithm_ids.append(new_layer_algorithm_id) layers_added.append('F1') except: trace = traceback.format_exc() logger.error('%s' % trace) fail_msg = 'error :: failed to insert a new F1 layer record into the layers_algorithms table for %s' % base_name logger.error('%s' % fail_msg) if engine: engine_disposal(engine) raise # F2 layer if f2_layer: try: connection = engine.connect() ins = layers_algorithms_table.insert().values( layer_id=new_layer_id, fp_id=fp_id, metric_id=int(metrics_id), layer='F2', type='time', condition='<', layer_boundary=str(until_time)) result = connection.execute(ins) connection.close() new_layer_algorithm_id = result.inserted_primary_key[0] logger.info('new ionosphere_algorithms F2 layer id: %s' % str(new_layer_algorithm_id)) new_layer_algorithm_ids.append(new_layer_algorithm_id) layers_added.append('F2') except: trace = traceback.format_exc() logger.error('%s' % trace) fail_msg = 'error :: failed to insert a new F2 layer record into the layers_algorithms table for %s' % base_name logger.error('%s' % fail_msg) if engine: engine_disposal(engine) raise ionosphere_table = None try: ionosphere_table, fail_msg, trace = ionosphere_table_meta(skyline_app, engine) logger.info(fail_msg) except: trace = traceback.format_exc() logger.error('%s' % trace) fail_msg = 'error :: failed to get ionosphere_table meta for options' logger.error('%s' % fail_msg) if engine: engine_disposal(engine) raise logger.info('%s :: ionosphere_table OK' % function_str) try: connection = engine.connect() connection.execute( ionosphere_table.update( ionosphere_table.c.id == fp_id). values(layers_id=new_layer_id)) connection.close() logger.info('updated layers_id for %s' % str(fp_id)) except: trace = traceback.format_exc() logger.error('%s' % trace) fail_msg = 'error :: could not update layers_id for %s ' % str(fp_id) logger.error(fail_msg) # @added 20170806 - Bug #2130: MySQL - Aborted_clients # Added missing disposal if engine: engine_disposal(engine) raise if engine: engine_disposal(engine) return new_layer_id, True, layers_added, new_layer_algorithm_ids, fail_msg, trace
3aa77c6d04fb24b2d8443467fbc0189e42b7dd9f
3,658,341
def unitary_ifft2(y): """ A unitary version of the ifft2. """ return np.fft.ifft2(y)*np.sqrt(ni*nj)
16dfe62cea08a72888cc3390f4d85f069aac5718
3,658,342
def orb_scf_input(sdmc): """ find the scf inputs used to generate sdmc """ myinputs = None # this is the goal sdep = 'dependencies' # string representation of the dependencies entry # step 1: find the p2q simulation id p2q_id = None for key in sdmc[sdep].keys(): if sdmc[sdep][key].result_names[0] == 'orbitals': p2q_id = key # end if # end for dep # step 2: find the nscf simulation nscf_id_list = sdmc[sdep][p2q_id]['sim'][sdep].keys() assert len(nscf_id_list) == 1 nscf_id = nscf_id_list[0] nscf = sdmc[sdep][p2q_id]['sim'][sdep][nscf_id] myinputs = nscf['sim']['input'] # step 3: find the scf simulation calc = myinputs['control']['calculation'] if (calc=='scf'): # scf may actually be the scf simulation pass # myinputs is already set elif (calc=='nscf'): # if nscf is not the scf, then we need to go deeper scf_id = nscf['sim'][sdep].keys()[0] scf = nscf['sim'][sdep][scf_id] myinputs = scf['sim']['input'] # this is it! scalc = myinputs['control']['calculation'] if scalc != 'scf': RuntimeError('nscf depends on %s instead of scf'%scalc) # end if else: raise RuntimeError('unknown simulation type %s'%calc) # end if return myinputs.to_dict()
c319693e9673edf540615025baf5b5199c5e27a3
3,658,343
def is_success(code): """ Returns the expected response codes for HTTP GET requests :param code: HTTP response codes :type code: int """ if (200 <= code < 300) or code in [404, 500]: return True return False
fa502b4989d80edc6e1c6c717b6fe1347f99990d
3,658,344
from typing import Optional from typing import Union async def asyncio( *, client: AuthenticatedClient, json_body: SearchEventIn, ) -> Optional[Union[ErrorResponse, SearchEventOut]]: """Search Event Dado um Trecho, uma lista de Grupos que resultam da pesquisa por esse Trecho e um price token, atualiza os preços dos Grupos e o token. Contabiliza visita (se passar na validação). Args: json_body (SearchEventIn): Returns: Response[Union[ErrorResponse, SearchEventOut]] """ return ( await asyncio_detailed( client=client, json_body=json_body, ) ).parsed
6bf2a312d41cf77776e0c333ed72080c030a7170
3,658,345
def get_symmtrafo(newstruct_sub): """??? Parameters ---------- newstruct_sub : pymatgen structure pymatgen structure of the bulk material Returns ------- trafo : ??? ??? """ sg = SpacegroupAnalyzer(newstruct_sub) trr = sg.get_symmetry_dataset() trafo = [] for index, op in enumerate(trr['rotations']): if np.linalg.norm(np.array([0,0,-1]) - op[2]) < 0.0000001 and np.linalg.det(op) > 0 : #print('transformation found' ,op, index, trr['translations'][index]) trafo ={'rot_frac': op.tolist(), 'trans_frac': trr['translations'][index].tolist() } break # Now we have the trafo (to be used on fractional coordinates) if trafo == []: for index, op in enumerate(trr['rotations']): if np.linalg.norm(np.array([0,0,-1]) - op[2]) < 0.0000001: #print('transformation found' ,op, index, trr['translations'][index]) trafo ={'rot_frac': op.tolist(), 'trans_frac': trr['translations'][index].tolist() } break return trafo
9a346b4d0761de467baae1ee5f4cb0c623929180
3,658,346
def convert_sentence_into_byte_sequence(words, tags, space_idx=32, other='O'): """ Convert a list of words and their tags into a sequence of bytes, and the corresponding tag of each byte. """ byte_list = [] tag_list = [] for word_index, (word, tag) in enumerate(zip(words, tags)): tag_type = get_tag_type(tag) if is_inside_tag(tag) and word_index > 0: byte_list += [space_idx] tag_list += [tag_type] elif word_index > 0: byte_list += [space_idx] tag_list += [other] b_seq = bytes(word, encoding='utf-8') nbytes = len(b_seq) byte_list += b_seq tag_list += [tag_type] * nbytes assert len(byte_list) == len(tag_list) return byte_list, tag_list
2288d22e44d99ee147c9684befd3d31836a66a9d
3,658,347
import os def corr_cov(data, sample, xdata, xlabel='x', plabels=None, interpolation=None, fname=None): """Correlation and covariance matrices. Compute the covariance regarding YY and XY as well as the correlation regarding YY. :param array_like data: function evaluations (n_samples, n_features). :param array_like sample: sample (n_samples, n_featrues). :param array_like xdata: 1D discretization of the function (n_features,). :param str xlabel: label of the discretization parameter. :param list(str) plabels: parameters' labels. :param str interpolation: If None, does not interpolate correlation and covariance matrices (YY). Otherwize use Matplotlib methods from `imshow` such as `['bilinear', 'lanczos', 'spline16', 'hermite', ...]`. :param str fname: whether to export to filename or display the figures. :returns: figure. :rtype: Matplotlib figure instances, Matplotlib AxesSubplot instances. """ p_len = np.asarray(sample).shape[1] data = ot.Sample(data) corr_yy = np.array(data.computePearsonCorrelation()) cov_yy = np.array(data.computeCovariance()) cov_matrix_xy = np.dot((np.mean(sample) - sample).T, np.mean(data, axis=0) - data) / (len(sample) - 1) x_2d_yy, y_2d_yy = np.meshgrid(xdata, xdata) x_2d_xy, y_2d_xy = np.meshgrid(xdata, np.arange(p_len)) c_map = cm.viridis figures, axs = [], [] # Covariance matrix YY fig, ax = plt.subplots() figures.append(fig) axs.append(ax) cax = ax.imshow(cov_yy, cmap=c_map, interpolation=interpolation, origin='lower') cbar = fig.colorbar(cax) cbar.set_label(r"Covariance", size=26) cbar.ax.tick_params(labelsize=23) ax.set_xlabel(xlabel, fontsize=26) ax.set_ylabel(xlabel, fontsize=26) ax.tick_params(axis='x', labelsize=23) ax.tick_params(axis='y', labelsize=23) # Correlation matrix YY fig, ax = plt.subplots() figures.append(fig) cax = ax.imshow(corr_yy, cmap=c_map, interpolation=interpolation, origin='lower') cbar = fig.colorbar(cax) cbar.set_label(r"Correlation", size=26) cbar.ax.tick_params(labelsize=23) ax.set_xlabel(xlabel, fontsize=26) ax.set_ylabel(xlabel, fontsize=26) ax.tick_params(axis='x', labelsize=23) ax.tick_params(axis='y', labelsize=23) if plabels is None: plabels = ['x' + str(i) for i in range(p_len + 1)] else: plabels.insert(0, 0) # Covariance matrix XY fig, ax = plt.subplots() figures.append(fig) axs.append(ax) cax = ax.imshow(cov_matrix_xy, cmap=c_map, interpolation='nearest') ax.set_yticklabels(plabels, fontsize=6) cbar = fig.colorbar(cax) cbar.set_label(r"Covariance", size=26) cbar.ax.tick_params(labelsize=23) ax.set_xlabel(xlabel, fontsize=26) ax.set_ylabel('Input parameters', fontsize=26) ax.tick_params(axis='x', labelsize=23) ax.tick_params(axis='y', labelsize=23) if fname is not None: io = formater('json') filename, _ = os.path.splitext(fname) data = np.append(x_2d_yy, [y_2d_yy, corr_yy, cov_yy]) names = ['x', 'y', 'Correlation-YY', 'Covariance'] sizes = [np.size(x_2d_yy), np.size(y_2d_yy), np.size(corr_yy), np.size(cov_yy)] io.write(filename + '-correlation_covariance.json', data, names, sizes) data = np.append(x_2d_xy, [y_2d_xy, cov_matrix_xy]) names = ['x', 'y', 'Correlation-XY'] sizes = [np.size(x_2d_xy), np.size(y_2d_xy), np.size(cov_matrix_xy)] io.write(filename + '-correlation_XY.json', data, names, sizes) bat.visualization.save_show(fname, figures) return figures, axs
a999c5c53db3ca738665631ada721188efc333d5
3,658,348
def get_number_rows(ai_settings, ship_height, alien_height): """Determina o numero de linhas com alienigenas que cabem na tela.""" available_space_y = (ai_settings.screen_height - (3 * alien_height) - ship_height) number_rows = int(available_space_y / (2 * alien_height)) return number_rows
473f73bc5fb4d6e86acb90f01d861d4d8561d494
3,658,349
def generate_trial_betas(bc, bh, bcrange, bhrange, step_multiplier, random_state_debug_value=None): """Generate trial beta values for an MC move. Move sizes are scaled by the 'step_multiplier' argument, and individually by the 'bcrange' or 'bhrange' arguments for the beta_c and beta_h values respectively. Negative beta values are not allowed; moves resulting in negative values will be resampled. Requires current values of beta_c and beta_h, bcrange, bhrange, and step_multiplier. Usage: generate_trial_betas(bc, bh, bcrange, bhrange, step_multiplier) Returns: trial_bc, trial_bh """ # Data cleanup, just in case try: assert bc >= 0 except AssertionError: print("Warning: Negative value of beta_C detected in MC sampling. Resetting to 0") bc = 0 try: assert bh >= 0 except AssertionError: print("Warning: Negative value of beta_H detected in MC sampling. Resetting to 0") bh = 0 # Make move in betas scaled by step size and desired 'range' of sampling. -ve beta values are not allowed trial_bv_bh, trial_bv_bc = -1, -1 # Note that this regenerates the numpy random state from /dev/urandom # or the clock if random_state_debug_value is not set state = np.random.RandomState(random_state_debug_value) while trial_bv_bh < 0: trial_bv_bh = bh + ((state.random_sample()) - 0.5) \ * step_multiplier * bhrange while trial_bv_bc < 0: trial_bv_bc = bc + ((state.random_sample()) - 0.5) \ * step_multiplier * bcrange return trial_bv_bc, trial_bv_bh
bacc11ac74076c827c609664d3d56e5e58f47df4
3,658,350
def map_ref_sites(routed: xr.Dataset, gauge_reference: xr.Dataset, gauge_sites=None, route_var='IRFroutedRunoff', fill_method='r2', min_kge=-0.41): """ Assigns segs within routed boolean 'is_gauge' "identifiers" and what each seg's upstream and downstream reference seg designations are. Parameters ---------- routed: xr.Dataset Contains the input flow timeseries data. gauge_reference: xr.Dataset Contains reference flow timeseries data for the same watershed as the routed dataset. gauge_sites: list, optional If None, gauge_sites will be taken as all those listed in gauge_reference. route_var: str Variable name of flows used for fill_method purposes within routed. This is defaulted as 'IRFroutedRunoff'. fill_method: str While finding some upstream/downstream reference segs may be simple, (segs with 'is_gauge' = True are their own reference segs, others may be easy to find looking directly up or downstream), some river networks may have multiple options to select gauge sites and may fail to have upstream/downstream reference segs designated. 'fill_method' specifies how segs should be assigned upstream/downstream reference segs for bias correction if they are missed walking upstream or downstream. Currently supported methods: 'leave_null' nothing is done to fill missing reference segs, np.nan values are replaced with a -1 seg designation and that's it 'forward_fill' xarray's ffill method is used to fill in any np.nan values 'r2' reference segs are selected based on which reference site that seg's flows has the greatest r2 value with 'kldiv' reference segs are selected based on which reference site that seg's flows has the smallest KL Divergence value with 'kge' reference segs are selected based on which reference site that seg's flows has the greatest KGE value with Returns ------- routed: xr.Dataset Routed timeseries with reference gauge site river segments assigned to each river segement in the original routed. """ if isinstance(gauge_sites, type(None)): gauge_sites = gauge_reference['site'].values else: # need to typecheck since we do a for loop later and don't # want to end up iterating through a string by accident assert isinstance(gauge_sites, list) gauge_segs = gauge_reference.sel(site=gauge_sites)['seg'].values routed['is_gauge'] = False * routed['seg'] routed['down_ref_seg'] = np.nan * routed['seg'] routed['up_ref_seg'] = np.nan * routed['seg'] routed['up_seg'] = 0 * routed['is_headwaters'] routed['up_seg'].values = [find_up(routed, s, sel_method=fill_method) for s in routed['seg'].values] for s in routed['seg']: if s in list(gauge_segs): routed['is_gauge'].loc[{'seg':s}] = True routed['down_ref_seg'].loc[{'seg': s}] = s routed['up_ref_seg'].loc[{'seg': s}] = s for seg in routed['seg']: cur_seg = seg.values[()] while cur_seg in routed['seg'].values and np.isnan(routed['down_ref_seg'].sel(seg=cur_seg)): cur_seg = routed['down_seg'].sel(seg=cur_seg).values[()] if cur_seg in routed['seg'].values: routed['down_ref_seg'].loc[{'seg':seg}] = routed['down_ref_seg'].sel(seg=cur_seg).values[()] for seg in routed['seg']: cur_seg = seg.values[()] while cur_seg in routed['seg'].values and np.isnan(routed['up_ref_seg'].sel(seg=cur_seg)): cur_seg = routed['up_seg'].sel(seg=cur_seg).values[()] if cur_seg in routed['seg'].values: routed['up_ref_seg'].loc[{'seg':seg}] = routed['up_ref_seg'].sel(seg=cur_seg).values[()] # Fill in any remaining nulls (head/tailwaters) if fill_method == 'leave_null': # since there should be no -1 segs from mizuroute, we can set nan's to -1 to acknowledge # that they have been addressed and still set them apart from the rest of the data routed['up_ref_seg'] = (routed['up_ref_seg'].where(~np.isnan(routed['up_ref_seg']), other=-1)) routed['down_ref_seg'] = (routed['down_ref_seg'].where(~np.isnan(routed['down_ref_seg']), other=-1)) elif fill_method == 'forward_fill': routed['up_ref_seg'] = (routed['up_ref_seg'].where( ~np.isnan(routed['up_ref_seg']), other=routed['down_ref_seg'])).ffill('seg') routed['down_ref_seg'] = (routed['down_ref_seg'].where( ~np.isnan(routed['down_ref_seg']), other=routed['up_ref_seg'])).ffill('seg') elif fill_method == 'r2': fill_up_isegs = np.where(np.isnan(routed['up_ref_seg'].values))[0] fill_down_isegs = np.where(np.isnan(routed['down_ref_seg'].values))[0] routed['r2_up_gauge'] = 0 * routed['is_gauge'] routed['r2_down_gauge'] = 0 * routed['is_gauge'] for curr_seg in routed['seg'].values: up_ref_seg = np.nan curr_seg_flow = routed[route_var].sel(seg=curr_seg).values if np.isnan(routed['up_ref_seg'].sel(seg=curr_seg).values): up_ref_r2, up_ref_seg = find_max_r2(routed[route_var].sel(seg=gauge_segs), curr_seg_flow) routed['r2_up_gauge'].loc[{'seg':curr_seg}] = up_ref_r2 routed['up_ref_seg'].loc[{'seg':curr_seg}] = up_ref_seg else: # this seg has already been filled in, but r2 still needs to be calculated ref_flow = routed[route_var].sel(seg=routed['up_ref_seg'].sel(seg=curr_seg)).values up_ref_r2 = np.corrcoef(curr_seg_flow, ref_flow)[0, 1]**2 routed['r2_up_gauge'].loc[{'seg':curr_seg}] = up_ref_r2 for curr_seg in routed['seg'].values: down_ref_seg = np.nan curr_seg_flow = routed[route_var].sel(seg=curr_seg).values if np.isnan(routed['down_ref_seg'].sel(seg=curr_seg).values): down_ref_r2, down_ref_seg = find_max_r2(routed[route_var].sel(seg=gauge_segs), curr_seg_flow) routed['r2_down_gauge'].loc[{'seg':curr_seg}] = down_ref_r2 routed['down_ref_seg'].loc[{'seg':curr_seg}] = down_ref_seg else: # this seg has already been filled in, but r2 still needs to be calculated ref_flow = routed[route_var].sel(seg=routed['down_ref_seg'].sel(seg=curr_seg)).values down_ref_r2 = np.corrcoef(curr_seg_flow, ref_flow)[0, 1]**2 routed['r2_down_gauge'].loc[{'seg':curr_seg}] = down_ref_r2 elif fill_method == 'kldiv': fill_up_isegs = np.where(np.isnan(routed['up_ref_seg'].values))[0] fill_down_isegs = np.where(np.isnan(routed['down_ref_seg'].values))[0] routed['kldiv_up_gauge'] = 0 * routed['is_gauge'] routed['kldiv_down_gauge'] = 0 * routed['is_gauge'] for curr_seg in routed['seg'].values: curr_seg_flow = routed[route_var].sel(seg=curr_seg).values if np.isnan(routed['up_ref_seg'].sel(seg=curr_seg).values): up_ref_kldiv, up_ref_seg = find_min_kldiv(routed[route_var].sel(seg=gauge_segs), curr_seg_flow) routed['kldiv_up_gauge'].loc[{'seg':curr_seg}] = up_ref_kldiv routed['up_ref_seg'].loc[{'seg':curr_seg}] = up_ref_seg else: # this seg has already been filled in, but kldiv still needs to be calculated # kldiv computation could probably be gutted in the furture ... TINY_VAL = 1e-6 total_bins = int(np.sqrt(len(curr_seg_flow))) curr_seg_flow_pdf, curr_seg_flow_edges = np.histogram( curr_seg_flow, bins=total_bins, density=True) curr_seg_flow_pdf[curr_seg_flow_pdf == 0] = TINY_VAL ref_flow = routed[route_var].sel(seg=routed['up_ref_seg'].sel(seg=curr_seg).values).values ref_flow_pdf = np.histogram(ref_flow, bins=curr_seg_flow_edges, density=True)[0] ref_flow_pdf[ref_flow_pdf == 0] = TINY_VAL up_ref_kldiv = entropy(pk=ref_flow_pdf, qk=curr_seg_flow_pdf) routed['kldiv_up_gauge'].loc[{'seg':curr_seg}] = up_ref_kldiv for curr_seg in routed['seg'].values: curr_seg_flow = routed[route_var].sel(seg=curr_seg).values if np.isnan(routed['down_ref_seg'].sel(seg=curr_seg).values): down_ref_kldiv, down_ref_seg = find_min_kldiv(routed[route_var].sel(seg=gauge_segs), curr_seg_flow) routed['kldiv_down_gauge'].loc[{'seg':curr_seg}] = down_ref_kldiv routed['down_ref_seg'].loc[{'seg':curr_seg}] = down_ref_seg else: # this seg has already been filled in, but kldiv still needs to be calculated # kldiv computation could probably be gutted in the furture ... TINY_VAL = 1e-6 total_bins = int(np.sqrt(len(curr_seg_flow))) curr_seg_flow_pdf, curr_seg_flow_edges = np.histogram( curr_seg_flow, bins=total_bins, density=True) curr_seg_flow_pdf[curr_seg_flow_pdf == 0] = TINY_VAL ref_flow = routed[route_var].sel(seg=routed['down_ref_seg'].sel(seg=curr_seg).values).values ref_flow_pdf = np.histogram(ref_flow, bins=curr_seg_flow_edges, density=True)[0] ref_flow_pdf[ref_flow_pdf == 0] = TINY_VAL down_ref_kldiv = entropy(pk=ref_flow_pdf, qk=curr_seg_flow_pdf) routed['kldiv_down_gauge'].loc[{'seg':curr_seg}] = down_ref_kldiv elif fill_method == 'kge': fill_up_isegs = np.where(np.isnan(routed['up_ref_seg'].values))[0] fill_down_isegs = np.where(np.isnan(routed['down_ref_seg'].values))[0] routed['kge_up_gauge'] = min_kge + 0.0 * routed['is_gauge'] routed['kge_down_gauge'] = min_kge + 0.0 * routed['is_gauge'] for curr_seg in routed['seg'].values: up_ref_seg = np.nan curr_seg_flow = routed[route_var].sel(seg=curr_seg).values if np.isnan(routed['up_ref_seg'].sel(seg=curr_seg).values): up_ref_kge, up_ref_seg = find_max_kge(routed[route_var].sel(seg=gauge_segs), curr_seg_flow) routed['kge_up_gauge'].loc[{'seg':curr_seg}] = up_ref_kge routed['up_ref_seg'].loc[{'seg':curr_seg}] = up_ref_seg else: # this seg has already been filled in, but kge still needs to be calculated ref_flow = routed[route_var].sel(seg=routed['up_ref_seg'].sel(seg=curr_seg)).values up_ref_kge = kling_gupta_efficiency(curr_seg_flow, ref_flow) routed['kge_up_gauge'].loc[{'seg':curr_seg}] = up_ref_kge for curr_seg in routed['seg'].values: down_ref_seg = np.nan curr_seg_flow = routed[route_var].sel(seg=curr_seg).values if np.isnan(routed['down_ref_seg'].sel(seg=curr_seg).values): down_ref_kge, down_ref_seg = find_max_kge(routed[route_var].sel(seg=gauge_segs), curr_seg_flow) routed['kge_down_gauge'].loc[{'seg':curr_seg}] = down_ref_kge routed['down_ref_seg'].loc[{'seg':curr_seg}] = down_ref_seg else: # this seg has already been filled in, but kge still needs to be calculated ref_flow = routed[route_var].sel(seg=routed['down_ref_seg'].sel(seg=curr_seg)).values down_ref_kge = kling_gupta_efficiency(curr_seg_flow, ref_flow) if down_ref_kge < min_kge: down_ref_kge, down_ref_seg = find_max_kge(routed[route_var].sel(seg=gauge_segs), curr_seg_flow) routed['kge_down_gauge'].loc[{'seg':curr_seg}] = down_ref_kge else: raise ValueError('Invalid method provided for "fill_method"') return routed
a2146e532a7aa95ba0753aaddc6d6da2cc4f1c67
3,658,351
def get_error(est_track, true_track): """ """ if est_track.ndim > 1: true_track = true_track.reshape((true_track.shape[0],1)) error = np.recarray(shape=est_track.shape, dtype=[('position', float), ('orientation', float), ('orientation_weighted', float)]) # Position error pos_err = (true_track.x - est_track.x)**2 + (true_track.y - est_track.y)**2 error.position = np.sqrt(pos_err) # Orientation error error.orientation = anglediff(true_track.angle, est_track.angle, units='deg') error.orientation_weighted = anglediff(true_track.angle, est_track.angle_w, units='deg') descr = {} bix = np.logical_not(np.isnan(error.orientation)) descr['orientation_median'] = np.median(np.abs(error.orientation[bix])) descr['orientation_mean'] = np.mean(np.abs(error.orientation[bix])) bix = np.logical_not(np.isnan(error.orientation_weighted)) descr['orientation_weighted_median'] = np.nanmedian(np.abs(error.orientation_weighted[bix])) descr['orientation_weighted_mean'] = np.nanmean(np.abs(error.orientation_weighted[bix])) # no angle true_no_angle = np.isnan(true_track.angle) est_no_angle = np.isnan(est_track.angle) agree = np.logical_and(true_no_angle, est_no_angle) disagree = np.logical_xor(true_no_angle, est_no_angle) both = np.logical_or(true_no_angle, est_no_angle) #ipdb.set_trace() descr['no_angle_auc'] = roc_auc_score(true_no_angle, est_no_angle) descr['no_angle_mcc'] = matthews_corrcoef(true_no_angle, est_no_angle) descr['no_angle_brier'] = brier_score_loss(true_no_angle, est_no_angle) descr['no_angle_acc'] = agree.sum()/both.sum() descr['no_angle_p_per_frame'] = disagree.sum()/disagree.shape[0] descr['position_median'] = np.median(error.position) descr['position_mean'] = np.mean(error.position) #print('True frequency of angle-does-not-apply:', # true_no_angle.sum()/true_no_angle.shape[0]) #print('Estimated frequency of angle-does-not-apply:', # est_no_angle.sum()/est_no_angle.shape[0]) return error, descr
5ccdb12b844de9b454f62375358d4a1e1b91e6f7
3,658,352
from typing import Any def test_conflict(): """ Tiles that have extras that conflict with indices should produce an error. """ def tile_extras_provider(hyb: int, ch: int, z: int) -> Any: return { Indices.HYB: hyb, Indices.CH: ch, Indices.Z: z, } stack = synthetic_stack( tile_extras_provider=tile_extras_provider, ) with pytest.raises(ValueError): stack.tile_metadata
2d2e86f5d60762d509e7c27f5a74715c868abbc4
3,658,353
import json def get_node_to_srn_mapping(match_config_filename): """ Returns the node-to-srn map from match_conf.json """ with open(match_config_filename) as config_file: config_json = json.loads(config_file.read()) if "node_to_srn_mapping" in config_json: return config_json["node_to_srn_mapping"] else: node_to_srn = {} for node_info in config_json["NodeData"]: node_id = node_info["TrafficNode"] srn_num = node_info["srn_number"] node_to_srn[node_id] = srn_num return node_to_srn
37bf2f266f4e5163cc4d6e9290a8eaf17e220cd3
3,658,354
from matplotlib.pyplot import get_cmap def interpolate(values, color_map=None, dtype=np.uint8): """ Given a 1D list of values, return interpolated colors for the range. Parameters --------------- values : (n, ) float Values to be interpolated over color_map : None, or str Key to a colormap contained in: matplotlib.pyplot.colormaps() e.g: 'viridis' Returns ------------- interpolated : (n, 4) dtype Interpolated RGBA colors """ # get a color interpolation function if color_map is None: cmap = linear_color_map else: cmap = get_cmap(color_map) # make input always float values = np.asanyarray(values, dtype=np.float64).ravel() # scale values to 0.0 - 1.0 and get colors colors = cmap((values - values.min()) / values.ptp()) # convert to 0-255 RGBA rgba = to_rgba(colors, dtype=dtype) return rgba
dfb9e58ef8d07c5e3455297270e36332ef9385df
3,658,355
def nest_dictionary(flat_dict, separator): """ Nests a given flat dictionary. Nested keys are created by splitting given keys around the `separator`. """ nested_dict = {} for key, val in flat_dict.items(): split_key = key.split(separator) act_dict = nested_dict final_key = split_key.pop() for new_key in split_key: if not new_key in act_dict: act_dict[new_key] = {} act_dict = act_dict[new_key] act_dict[final_key] = val return nested_dict
f5b8649d916055fa5911fd1f80a8532e5dbee274
3,658,356
def write(path_, *write_): """Overwrites file with passed data. Data can be a string, number or boolean type. Returns True, None if writing operation was successful, False and reason message otherwise.""" return _writeOrAppend(False, path_, *write_)
3bd5db2d833c5ff97568489596d3dcea47c1a9f4
3,658,357
import json def prepare_saab_data(sequence): """ Processing data after anarci parsing. Preparing data for SAAB+ ------------ Parameters sequence - sequence object ( OAS database format ) ------------ Return sequence.Sequence - full (not-numbered) antibody sequence oas_output_parser(Numbered) - antibody sequence that is imgt numbered to comply with SAAB+ input format sequence_info_dict - Dictionary that contains sequence metadata which is requeired for SAAB+ to run """ cdr3sequence = sequence.CDRH3 VGene = sequence.VGene[:5] Numbered = json.loads( sequence.Numbered ) CDRs = [ loop for loop in Numbered.keys() if "cdr" in loop ] sequence_info_dict = { formatLoops[loop] : Numbered[loop] if "3" not in loop else cdr3sequence for loop in CDRs } sequence_info_dict["V"] = VGene sequence_info_dict["Redundancy"] = find_redundancy( sequence.Redundancy ) return sequence_obj( sequence.Sequence, oas_output_parser(Numbered), sequence_info_dict )
f88ba3f2badb951f456678e33f3371d80934754e
3,658,358
def covariance_align(data): """Covariance align continuous or windowed data in-place. Parameters ---------- data: np.ndarray (n_channels, n_times) or (n_windows, n_channels, n_times) continuous or windowed signal Returns ------- aligned: np.ndarray (n_channels x n_times) or (n_windows x n_channels x n_times) aligned continuous or windowed data ..note: If this function is supposed to preprocess continuous data, it should be given to raw.apply_function(). """ aligned = data.copy() if len(data.shape)==3: for i_window in range(aligned.shape[0]): covar = np.cov(aligned[i_window]) proj = pinv(sqrtm(covar)) aligned[i_window] = np.matmul(proj, aligned[i_window]) elif len(data.shape)==2: covar = np.cov(aligned) proj = pinv(sqrtm(covar)) aligned = np.matmul(proj, aligned) # TODO: the overriding of protected '_data' should be implemented in the # TODO: dataset when transforms are applied to windows if hasattr(data, '_data'): data._data = aligned return aligned
82b99b43202097670de8af52f96ef156218921fb
3,658,359
import math def _is_equidistant(array: np.ndarray) -> bool: """ Check if the given 1D array is equidistant. E.g. the distance between all elements of the array should be equal. :param array: The array that should be equidistant """ step = abs(array[1] - array[0]) for i in range(0, len(array) - 1): curr_step = abs(array[i + 1] - array[i]) if not math.isclose(curr_step, step, rel_tol=1e-3): return False return True
d12c12e48545697bdf337c8d20e45a27fb444beb
3,658,360
def list_a_minus_b(list1, list2): """Given two lists, A and B, returns A-B.""" return filter(lambda x: x not in list2, list1)
8fbac6452077ef7cf73e0625303822a35d0869c3
3,658,361
def is_equivalent(a, b): """Compares two strings and returns whether they are the same R code This is unable to determine if a and b are different code, however. If this returns True you may assume that they are the same, but if this returns False you must not assume that they are different. is_equivalent("0 + 1", "1") is False, for example, even though those two commands do the same thing. """ # String pointers ap = 0 bp = 0 ps = 0 an_comp = False while ap < len(a) and bp < len(b): # If none of the current chars are alphanumeric or the last character match is not alphanumeric then skip # whitespace forward if (a[ap] not in _an and b[bp] not in _an) or not an_comp: while ap < len(a) and a[ap] in _ws and not _is_a_number(a, ap): ap += 1 while bp < len(b) and b[bp] in _ws and not _is_a_number(b, bp): bp += 1 if ap >= len(a) or bp >= len(b): # Reached end of string break an_comp = False if a[ap] != b[bp]: # They must be equal # print("Failed {}:{} / {}:{}".format(a, ap, b, bp)) return False if a[ap] in _an: # This is comparing two alphanumeric values an_comp = True if a[ap] in _quotes: opener = a[ap] # String; must match exactly ap += 1 bp += 1 while ap < len(a) and bp < len(b) and a[ap] == b[bp]: if a[ap] == opener and a[ap-1] not in _esc: break ap += 1 bp += 1 else: # print("Failed {}:{} / {}:{} in string".format(a, ap, b, bp)) return False ap += 1 bp += 1 # Clean up ending whitespace while ap < len(a) and a[ap] in _ws: ap += 1 while bp < len(b) and b[bp] in _ws: bp += 1 if ap >= len(a) and bp >= len(b): return True else: return False
c37ea6e8684c1d2fcd5d549836c9115da98c7b2f
3,658,362
def solve(lines, n): """Solve the problem.""" grid = Grid(lines) for _ in range(n): grid.step() return grid.new_infections
2db532a911e088dd58ee17bdc036ea017e979c8d
3,658,363
import requests def get_ingredient_id(): """Need to get ingredient ID in order to access all attributes""" query = request.args["text"] resp = requests.get(f"{BASE_URL_SP}/food/ingredients/search?", params={"apiKey":APP_KEY,"query":query}) res = resp.json() lst = {res['results'][i]["name"]:res['results'][i]["id"] for i in range(len(res['results']))} return jsonify(lst)
8c58232f48883a4b1e2d76ca1504b3dccabdb954
3,658,364
def xticks(ticks=None, labels=None, **kwargs): """ Get or set the current tick locations and labels of the x-axis. Call signatures:: locs, labels = xticks() # Get locations and labels xticks(ticks, [labels], **kwargs) # Set locations and labels Parameters ---------- ticks : array_like A list of positions at which ticks should be placed. You can pass an empty list to disable xticks. labels : array_like, optional A list of explicit labels to place at the given *locs*. **kwargs :class:`.Text` properties can be used to control the appearance of the labels. Returns ------- locs An array of label locations. labels A list of `.Text` objects. Notes ----- Calling this function with no arguments (e.g. ``xticks()``) is the pyplot equivalent of calling `~.Axes.get_xticks` and `~.Axes.get_xticklabels` on the current axes. Calling this function with arguments is the pyplot equivalent of calling `~.Axes.set_xticks` and `~.Axes.set_xticklabels` on the current axes. Examples -------- Get the current locations and labels: >>> locs, labels = xticks() Set label locations: >>> xticks(np.arange(0, 1, step=0.2)) Set text labels: >>> xticks(np.arange(5), ('Tom', 'Dick', 'Harry', 'Sally', 'Sue')) Set text labels and properties: >>> xticks(np.arange(12), calendar.month_name[1:13], rotation=20) Disable xticks: >>> xticks([]) """ ax = gca() if ticks is None and labels is None: locs = ax.get_xticks() labels = ax.get_xticklabels() elif labels is None: locs = ax.set_xticks(ticks) labels = ax.get_xticklabels() else: locs = ax.set_xticks(ticks) labels = ax.set_xticklabels(labels, **kwargs) for l in labels: l.update(kwargs) return locs, silent_list('Text xticklabel', labels)
a6b044ffc9efdc279495c25735745006de9d7a8c
3,658,365
def main() -> None: """ Program entry point. :return: Nothing """ try: connection = connect_to_db2() kwargs = {'year_to_schedule': 2018} start = timer() result = run(connection, **kwargs) output_results(result, connection) end = timer() print(f'time elapsed: {end - start}') connection.close() except Exception as e: print(f'Something broke ...\n\tReason:{str(e)}') connection.close() exit(1) return None
53727547a16c8b203ca89d54f55ddbd8b2f2645b
3,658,366
from pathlib import Path from datetime import datetime def _generate_cfg_dir(cfg_dir: Path = None) -> Path: """Make sure there is a working directory. Args: cfg_dir: If cfg dir is None or does not exist then create sub-directory in CFG['output_dir'] """ if cfg_dir is None: scratch_dir = CFG["output_dir"] # TODO this timestamp isnot safe for parallel processing timestamp = datetime.datetime.now(datetime.timezone.utc).strftime( "%Y%m%d_%H%M%S" ) cfg_dir = to_absolute_path(f"marrmot_{timestamp}", parent=Path(scratch_dir)) cfg_dir.mkdir(parents=True, exist_ok=True) return cfg_dir
1c6653f43dc53b5cd8c2ac4f5cdcc084ef4c13ad
3,658,367
def delete(home_id): """ Delete A About --- """ try: return custom_response({"message":"deleted", "id":home_id}, 200) except Exception as error: return custom_response(str(error), 500)
408fe8db0a728b33d7a9c065944d706d6502b8b5
3,658,368
def round_to_sigfigs(x, sigfigs=1): """ >>> round_to_sigfigs(12345.6789, 7) # doctest: +ELLIPSIS 12345.68 >>> round_to_sigfigs(12345.6789, 1) # doctest: +ELLIPSIS 10000.0 >>> round_to_sigfigs(12345.6789, 0) # doctest: +ELLIPSIS 100000.0 >>> round_to_sigfigs(12345.6789, -1) # doctest: +ELLIPSIS 1000000.0 """ place = int(log(x, 10)) if sigfigs <= 0: additional_place = x > 10. ** place return 10. ** (-sigfigs + place + additional_place) return round_to_place(x, sigfigs - 1 - place)
a5191f3c60e85d50a47a43aee38d7d1f14d3fdc6
3,658,369
import urllib import json def load_api_data (API_URL): """ Download data from API_URL return: json """ #actual download with urllib.request.urlopen(API_URL) as url: api_data = json.loads(url.read().decode()) #testing data ##with open('nrw.json', 'r') as testing_set: ## api_data = json.load(testing_set) return api_data
61832a798ac616f3d1612ce69411d4f43ed85699
3,658,370
def test_parsing(monkeypatch, capfd, configuration, expected_record_keys): """Verifies the feed is parsed as expected""" def mock_get(*args, **kwargs): return MockResponse() test_tap: Tap = TapFeed(config=configuration) monkeypatch.setattr(test_tap.streams["feed"]._requests_session, "send", mock_get) test_tap.sync_all() out, err = capfd.readouterr() tap_records = get_parsed_records(out) assert len(tap_records) == 10 for record in tap_records: print(record) assert record["type"] == "RECORD" assert record["stream"] == "feed" assert record["record"]["feed_url"] == MockResponse.url assert list(record["record"].keys()) == expected_record_keys
25a79966eba641e4b857c80e12fb123e8fc3477f
3,658,371
def hsl(h, s, l): """Converts an Hsl(h, s, l) triplet into a color.""" return Color.from_hsl(h, s, l)
081fb4b7e7fc730525d0d18182c951ad92fab895
3,658,372
import sys def factor(afunc): """decompose the string m.f or m.f(parms) and return function and parameter dictionaries afunc has the form xxx or xxx(p1=value, p2=value,...) create a dictionary from the parameters consisting of at least _first:True. parameter must have the form name=value, name=value,... """ firstparen = afunc.find("(") if firstparen >0: # parameters found, make a dictionary of them try: f = afunc[:firstparen] afunc = "_customfunction" + afunc[firstparen:] co = compile(afunc, "<string>", "eval") spssparams = set(co.co_names) except : raise ValueError(_("The formula syntax given is invalid:\n") + str(sys.exc_info()[1])) else: spssparams = set() f = afunc co = compile("_customfunction()", "<string>", "eval") return f, co, spssparams
10447db5728df2ff45846997dfb7fc52cf471080
3,658,373
def spline(xyz, s=3, k=2, nest=-1): """ Generate B-splines as documented in http://www.scipy.org/Cookbook/Interpolation The scipy.interpolate packages wraps the netlib FITPACK routines (Dierckx) for calculating smoothing splines for various kinds of data and geometries. Although the data is evenly spaced in this example, it need not be so to use this routine. Parameters --------------- xyz : array, shape (N,3) array representing x,y,z of N points in 3d space s : float, optional A smoothing condition. The amount of smoothness is determined by satisfying the conditions: sum((w * (y - g))**2,axis=0) <= s where g(x) is the smoothed interpolation of (x,y). The user can use s to control the tradeoff between closeness and smoothness of fit. Larger satisfying the conditions: sum((w * (y - g))**2,axis=0) <= s where g(x) is the smoothed interpolation of (x,y). The user can use s to control the tradeoff between closeness and smoothness of fit. Larger s means more smoothing while smaller values of s indicate less smoothing. Recommended values of s depend on the weights, w. If the weights represent the inverse of the standard-deviation of y, then a: good s value should be found in the range (m-sqrt(2*m),m+sqrt(2*m)) where m is the number of datapoints in x, y, and w. k : int, optional Degree of the spline. Cubic splines are recommended. Even values of k should be avoided especially with a small s-value. for the same set of data. If task=-1 find the weighted least square spline for a given set of knots, t. nest : None or int, optional An over-estimate of the total number of knots of the spline to help in determining the storage space. None results in value m+2*k. -1 results in m+k+1. Always large enough is nest=m+k+1. Default is -1. Returns ---------- xyzn : array, shape (M,3) array representing x,y,z of the M points inside the sphere Examples ---------- >>> import numpy as np >>> t=np.linspace(0,1.75*2*np.pi,100)# make ascending spiral in 3-space >>> x = np.sin(t) >>> y = np.cos(t) >>> z = t >>> x+= np.random.normal(scale=0.1, size=x.shape) # add noise >>> y+= np.random.normal(scale=0.1, size=y.shape) >>> z+= np.random.normal(scale=0.1, size=z.shape) >>> xyz=np.vstack((x,y,z)).T >>> xyzn=spline(xyz,3,2,-1) >>> len(xyzn) > len(xyz) True See also ---------- scipy.interpolate.splprep scipy.interpolate.splev """ # find the knot points tckp, u = splprep([xyz[:, 0], xyz[:, 1], xyz[:, 2]], s=s, k=k, nest=nest) # evaluate spline, including interpolated points xnew, ynew, znew = splev(np.linspace(0, 1, 400), tckp) return np.vstack((xnew, ynew, znew)).T
97500c7a63bc076abd770c43fd3f6d23c30baa03
3,658,374
import time def load_supercomputers(log_file, train_ratio=0.5, windows_size=20, step_size=0, e_type='bert', mode="balance", no_word_piece=0): """ Load BGL, Thunderbird, and Spirit unstructured log into train and test data Parameters ---------- log_file: str, the file path of raw log (extension: .log). train_ratio: float, the ratio of training data for train/test split. windows_size: int, the window size for sliding window step_size: int, the step size for sliding window. if step_size is equal to window_size then fixed window is applied. e_type: str, embedding type (choose from BERT, XLM, and GPT2). mode: str, split train/testing in balance or not no_word_piece: bool, use split word into wordpiece or not. Returns ------- (x_tr, y_tr): the training data (x_te, y_te): the testing data """ print("Loading", log_file) with open(log_file, mode="r", encoding='utf8') as f: logs = f.readlines() logs = [x.strip() for x in logs] E = {} e_type = e_type.lower() if e_type == "bert": encoder = bert_encoder elif e_type == "xlm": encoder = xlm_encoder else: if e_type == "gpt2": encoder = gpt2_encoder else: raise ValueError('Embedding type {0} is not in BERT, XLM, and GPT2'.format(e_type.upper())) print("Loaded", len(logs), "lines!") x_tr, y_tr = [], [] i = 0 failure_count = 0 n_train = int(len(logs) * train_ratio) c = 0 t0 = time.time() while i < n_train - windows_size: c += 1 if c % 1000 == 0: print("\rLoading {0:.2f}% - {1} unique logs".format(i * 100 / n_train, len(E.keys())), end="") if logs[i][0] != "-": failure_count += 1 seq = [] label = 0 for j in range(i, i + windows_size): if logs[j][0] != "-": label = 1 content = logs[j] # remove label from log messages content = content[content.find(' ') + 1:] content = clean(content.lower()) if content not in E.keys(): try: E[content] = encoder(content, no_word_piece) except Exception as _: print(content) emb = E[content] seq.append(emb) x_tr.append(seq.copy()) y_tr.append(label) i = i + step_size print("\nlast train index:", i) x_te = [] y_te = [] # for i in range(n_train, len(logs) - windows_size, step_size): if i % 1000 == 0: print("Loading {:.2f}".format(i * 100 / n_train)) if logs[i][0] != "-": failure_count += 1 seq = [] label = 0 for j in range(i, i + windows_size): if logs[j][0] != "-": label = 1 content = logs[j] # remove label from log messages content = content[content.find(' ') + 1:] content = clean(content.lower()) if content not in E.keys(): E[content] = encoder(content, no_word_piece) emb = E[content] seq.append(emb) x_te.append(seq.copy()) y_te.append(label) (x_tr, y_tr) = shuffle(x_tr, y_tr) print("Total failure logs: {0}".format(failure_count)) if mode == 'balance': x_tr, y_tr = balancing(x_tr, y_tr) num_train = len(x_tr) num_test = len(x_te) num_total = num_train + num_test num_train_pos = sum(y_tr) num_test_pos = sum(y_te) num_pos = num_train_pos + num_test_pos print('Total: {} instances, {} anomaly, {} normal' \ .format(num_total, num_pos, num_total - num_pos)) print('Train: {} instances, {} anomaly, {} normal' \ .format(num_train, num_train_pos, num_train - num_train_pos)) print('Test: {} instances, {} anomaly, {} normal\n' \ .format(num_test, num_test_pos, num_test - num_test_pos)) return (x_tr, y_tr), (x_te, y_te)
2282b8cbd975160e57ff62106a7e0bad3f337e5a
3,658,375
def is_running(service: Service) -> bool: """Is the given pyodine daemon currently running? :raises ValueError: Unknown `service`. """ try: return bool(TASKS[service]) and not TASKS[service].done() except KeyError: raise ValueError("Unknown service type.")
160c7c8da0635c9c11ebdaf711b794fc0a09adff
3,658,376
def PropertyWrapper(prop): """Wrapper for db.Property to make it look like a Django model Property""" if isinstance(prop, db.Reference): prop.rel = Relation(prop.reference_class) else: prop.rel = None prop.serialize = True return prop
9f93a37dffd433fd87ffa4bfdb65680a9ad1d02d
3,658,377
def drowLine(cord,orient,size): """ The function provides the coordinates of the line. Arguments: starting x or y coordinate of the line, orientation (string. "vert" or "hor") and length of the line Return: list of two points (start and end of the line) """ global cv2 if orient == "vert": x1 = cord x2 = cord y1 = 0 y2 = size elif orient == "hor": x1 = 0 x2 = size y1 = cord y2 = cord else: print("not hor not vert") return 0 return [(x1, y1), (x2, y2)]
bc688cfe33dcf42ddac6770bbdf91ccc19c1b427
3,658,378
def bluetoothRead(): """ Returns the bluetooth address of the robot (if it has been previously stored) arguments: none returns: string - the bluetooth address of the robot, if it has been previously stored; None otherwise """ global EEPROM_BLUETOOTH_ADDRESS bt = EEPROMread(EEPROM_BLUETOOTH_ADDRESS, 17) if bluetoothValidate(bt): return bt else: return None
c4e08d438b91b3651f27b374c0b38069ddd1eaaf
3,658,379
def is_step_done(client, step_name): """Query the trail status using the client and return True if step_name has completed. Arguments: client -- A TrailClient or similar object. step_name -- The 'name' tag of the step to check for completion. Returns: True -- if the step has succeeded. False -- otherwise. """ # To understand the structure of the result returned by the API calls, please see the documentation of the # TrailClient class. statuses = client.status(fields=[StatusField.STATE], name=step_name) # In this case, the status call returns a list of step statuses. # Since we have exactly one step with each name and we are querying the status of steps with the given name, # there will be only one element in the result list. Hence we refer to the zeroth element of results. if statuses and statuses[0][StatusField.STATE] == Step.SUCCESS: return True return False
a5373d7e00f0c8526f573356b5d71a2ac08aa516
3,658,380
def on_chat_send(message): """Broadcast chat message to a watch room""" # Check if params are correct if 'roomId' not in message: return {'status_code': 400}, request.sid room_token = message['roomId'] # Check if room exist if not db.hexists('rooms', room_token): {'status_code': 404}, request.sid # Check if user wasnt in the room if not room_token in rooms(sid=request.sid): return {'status_code': 403}, request.sid # Add current sever timestamp to the state message = add_current_time_to_state(message) # Send message to everybody in the room emit('message_update', message, room=room_token) # Response return {'status_code': 200}, 200
01c7f15602653848c9310e90c0a353648fafbb52
3,658,381
from typing import Union def arima(size: int = 100, phi: Union[float, ndarray] = 0, theta: Union[float, ndarray] = 0, d: int = 0, var: float = 0.01, random_state: float = None) -> ndarray: # inherit from arima_with_seasonality """Simulate a realization from an ARIMA characteristic. Acts like `tswge::gen.arima.wge()` Parameters ---------- size: scalar int Number of samples to generate. phi: scalar float or list-like AR process order theta: scalar float or list-like MA process order d: scalar int ARIMA process difference order var: scalar float, optional Nosie variance level. random_state: scalar int, optional Seed the random number generator. Returns ------- signal: np.ndarray Simulated ARIMA. """ return arima_with_seasonality(size = size, phi = phi, theta = theta, d = d, s = 0, var = var, random_state = random_state)
24c3ac8af295d25facf0e65a4fc0925b22db9444
3,658,382
def gt_dosage(gt): """Convert unphased genotype to dosage""" x = gt.split(b'/') return int(x[0])+int(x[1])
819fc9beb834f57e44bcb0ac3e1d3c664c7efd42
3,658,383
from typing import Optional from typing import Dict from typing import Any def create_key_pair_in_ssm( ec2: EC2Client, ssm: SSMClient, keypair_name: str, parameter_name: str, kms_key_id: Optional[str] = None, ) -> Optional[KeyPairInfo]: """Create keypair in SSM.""" keypair = create_key_pair(ec2, keypair_name) try: kms_key_label = "default" kms_args: Dict[str, Any] = {} if kms_key_id: kms_key_label = kms_key_id kms_args = {"KeyId": kms_key_id} LOGGER.info( 'storing generated key in SSM parameter "%s" using KMS key "%s"', parameter_name, kms_key_label, ) ssm.put_parameter( Name=parameter_name, Description='SSH private key for KeyPair "{}" ' "(generated by Runway)".format(keypair_name), Value=keypair["KeyMaterial"], Type="SecureString", Overwrite=False, **kms_args, ) except ClientError: # Erase the key pair if we failed to store it in SSM, since the # private key will be lost anyway LOGGER.exception( "failed to store generated key in SSM; deleting " "created key pair as private key will be lost" ) ec2.delete_key_pair(KeyName=keypair_name, DryRun=False) return None return { "status": "created", "key_name": keypair.get("KeyName", ""), "fingerprint": keypair.get("KeyFingerprint", ""), }
40cca5fd938aa6709a4d844c912b294c6aaba552
3,658,384
def sumofsq(im, axis=0): """Compute square root of sum of squares. Args: im: Raw image. axis: Channel axis. Returns: Square root of sum of squares of input image. """ out = np.sqrt(np.sum(im.real * im.real + im.imag * im.imag, axis=axis)) return out
6aa791d3c6a2e8e6fff0dbe0a364350d48fb4794
3,658,385
import os def get_airflow_home(): """Get path to Airflow Home""" return expand_env_var(os.environ.get('AIRFLOW_HOME', '~/airflow'))
19af0ce78204b0c640e4e13fd56605bbcd395422
3,658,386
import csv import re import sys def read_mapping_file(map_file): """ Mappings are simply a CSV file with three columns. The first is a string to be matched against an entry description. The second is the payee against which such entries should be posted. The third is the account against which such entries should be posted. If the match string begins and ends with '/' it is taken to be a regular expression. """ mappings = [] with open(map_file, "r", encoding='utf-8', newline='') as f: map_reader = csv.reader(f) for row in map_reader: if len(row) > 1: pattern = row[0].strip() payee = row[1].strip() account = row[2].strip() tags = row[3:] if pattern.startswith('/') and pattern.endswith('/'): try: pattern = re.compile(pattern[1:-1]) except re.error as e: print("Invalid regex '{0}' in '{1}': {2}" .format(pattern, map_file, e), file=sys.stderr) sys.exit(1) mappings.append((pattern, payee, account, tags)) return mappings
e72ceb08daac0a12a426062f95cfa06776cfdedd
3,658,387
def biquad_bp2nd(fm, q, fs, q_warp_method="cos"): """Calc coeff for bandpass 2nd order. input: fm...mid frequency in Hz q...bandpass quality fs...sampling frequency in Hz q_warp_method..."sin", "cos", "tan" output: B...numerator coefficients Laplace transfer function A...denominator coefficients Laplace transfer function b...numerator coefficients z-transfer function a...denominator coefficients z-transfer function """ wm = 2*np.pi*fm B = np.array([0, 1 / (q*wm), 0]) A = np.array([1 / wm**2, 1 / (q*wm), 1]) wmpre = f_prewarping(fm, fs) qpre = q_prewarping(q, fm, fs, q_warp_method) Bp = 0., 1 / (qpre*wmpre), 0. Ap = 1 / wmpre**2, 1 / (qpre*wmpre), 1. b, a = bilinear_biquad(Bp, Ap, fs) return B, A, b, a
c7330f9bd4a1941359a54ea6e6d7e8fe7801f55e
3,658,388
def pullAllData(): """ Pulls all available data from the database Sends all analyzed data back in a json with fileNames and list of list of all "spots" intensities and backgrounds. Args: db.d4Images (Mongo db collection): Mongo DB collection with processed data Returns: payload (jsonify(dict)): data dictionary with filename, spots, and background info statusCode (int): HTTP status code """ pullFileNames = [] pullSpotData = [] pullBgData = [] for eachEntry in db.d4Images.find(): pullFileNames.append(eachEntry["filename"]) pullSpotData.append(eachEntry["spots"]) pullBgData.append(eachEntry["background"]) payload = {"filename": pullFileNames, "spots": pullSpotData, "background": pullBgData} statusCode = 200 return jsonify(payload), statusCode
97674c981af48f37e90667c00947673f1df34c66
3,658,389
def f2(): """ >>> # +--------------+-----------+-----------+------------+-----------+--------------+ >>> # | Chromosome | Start | End | Name | Score | Strand | >>> # | (category) | (int32) | (int32) | (object) | (int64) | (category) | >>> # |--------------+-----------+-----------+------------+-----------+--------------| >>> # | chr1 | 1 | 2 | a | 0 | + | >>> # | chr1 | 6 | 7 | b | 0 | - | >>> # +--------------+-----------+-----------+------------+-----------+--------------+ >>> # Stranded PyRanges object has 2 rows and 6 columns from 1 chromosomes. >>> # For printing, the PyRanges was sorted on Chromosome and Strand. """ full_path = get_example_path("f2.bed") return pr.read_bed(full_path)
159c5167bacbeed38578a8b574b31fa2f57f9467
3,658,390
def latin(n, d): """ Build latin hypercube. Parameters ---------- n : int Number of points. d : int Size of space. Returns ------- lh : ndarray Array of points uniformly placed in d-dimensional unit cube. """ # spread function def spread(points): return sum(1./np.linalg.norm(np.subtract(points[i], points[j])) for i in range(n) for j in range(n) if i > j) # starting with diagonal shape lh = [[i/(n-1.)]*d for i in range(n)] # minimizing spread function by shuffling minspread = spread(lh) for i in range(1000): point1 = np.random.randint(n) point2 = np.random.randint(n) dim = np.random.randint(d) newlh = np.copy(lh) newlh[point1, dim], newlh[point2, dim] = newlh[point2, dim], newlh[point1, dim] newspread = spread(newlh) if newspread < minspread: lh = np.copy(newlh) minspread = newspread return lh
416d8c8086eeeaf6e8ea0bf14c300750025455be
3,658,391
def _get_valid_dtype(series_type, logical_type): """Return the dtype that is considered valid for a series with the given logical_type""" backup_dtype = logical_type.backup_dtype if ks and series_type == ks.Series and backup_dtype: valid_dtype = backup_dtype else: valid_dtype = logical_type.primary_dtype return valid_dtype
7b4bcd724d2d7a4029a794456882a8f59fc29006
3,658,392
def geometric_mean_longitude(t='now'): """ Returns the geometric mean longitude (in degrees). Parameters ---------- t : {parse_time_types} A time (usually the start time) specified as a parse_time-compatible time string, number, or a datetime object. """ T = julian_centuries(t) result = 279.696680 + 36000.76892 * T + 0.0003025 * T**2 result = result * u.deg return Longitude(result)
c47f106392f507d7750f86cba6a7c16ba3270b11
3,658,393
import os def create_feature_data_batch(im_dir,video_ids): """ create_feature_data_batch Similar function to create_feature_data however utilizing the batch version of functions used in the original function suited towards larger set of images Input : directory of thumbnails, list of video ids Output : dataframe containing facial features for all faces containing a face """ cols = ['videoId', 'numFaces', 'emotions', 'age', 'gender', 'race', 'face_locations'] #Create empty dataframe df = pd.DataFrame(columns=cols) #Initialize variables batch = 0 #Current batch size videoIds = [] face_locations_batch = [] faces_batch = [] img_obj_batch = [] img_objs = [] files = [f + ".jpg" for f in video_ids if os.path.exists(im_dir+f+".jpg")] last_file = files[-1] num_batch = 0 for filename in files: #Append image objects from files into lists until batch size of 50 or last file image = face_recognition.load_image_file(im_dir + '/' + filename) img_obj_batch.append(image) img_objs.append(image) videoIds.append(filename[:-4]) batch += 1 if batch == 50 or filename == last_file: num_batch += 1 #Extract faces from all images in batch print("Batch {0} Facial Recognition Start!".format(num_batch)) face_locations_batch += face_recognition.batch_face_locations(img_obj_batch, number_of_times_to_upsample=1, batch_size=batch) #Get index where no faces were detected in image empty_indices = [empty_ix for empty_ix, element in enumerate(face_locations_batch) if element == []] #Remove those entries from all our lists for index in sorted(empty_indices, reverse=True): del face_locations_batch[index] del videoIds[index] del img_objs[index] batch = 0 img_obj_batch = [] print("Batch {0} Facial Recognition Finished!".format(num_batch)) print("Face Image Extraction Start!") #For each image, crop out all the faces and append to faces_batch for ix in range(len(face_locations_batch)): im = Image.fromarray(img_objs[ix]) for f in face_locations_batch[ix]: face = im.crop((f[3], f[0], f[1], f[2])) face = np.asarray(face) faces_batch.append(face) print("Face Image Extraction Finished!") print("Facial Analysis Begin!") #Do analysis of all faces in faces_batch analysis_counter = 0 """ Note : On DSMLP servers and other computers when running DeepFace.analyze multiple times the model would get loaded over and over causing memory issues so instead we do one large analysis job of all faces """ analysis = DeepFace.analyze(faces_batch) print("Facial Analysis Finished!") #Append all features into the dataframe for i in range(len(face_locations_batch)): f = face_locations_batch[i] emotions = [] age = [] gender = [] race = [] for j in range(len(f)): analysis_counter += 1 curr_analysis = analysis['instance_' + str(analysis_counter)] emotions.append(curr_analysis['dominant_emotion']) age.append(curr_analysis['age']) gender.append(curr_analysis['gender']) race.append(curr_analysis['dominant_race']) df = df.append({'videoId': videoIds[i], 'numFaces': len(f), 'emotions': emotions, 'age': age, 'gender': gender, 'race': race, 'face_locations': f}, ignore_index=True) return df
7fa672a25f55fdf004b07f0ba707987bcff26948
3,658,394
import os def GetEnvironFallback(var_list, default): """Look up a key in the environment, with fallback to secondary keys and finally falling back to a default value.""" for var in var_list: if var in os.environ: return os.environ[var] return default
1b9cad3c46264c089f250ccb19119cff8cacd0d1
3,658,395
def get_or_create(model, **kwargs): """Get or a create a database model.""" instance = model.query.filter_by(**kwargs) if instance: return instance else: instance = model(**kwargs) db.session.add(instance) return instance
6af359ebda80b81a0d02762d576ff407f0c186c4
3,658,396
def test_class_id_cube_strategy_elliptic_paraboloid(experiment_enviroment, renormalize, thread_flag): """ """ tm, dataset, experiment, dictionary = experiment_enviroment class_id_params = { "class_ids" + MAIN_MODALITY: list(np.arange(0, 1.0, 0.25)), "class_ids" + NGRAM_MODALITY: list(np.arange(0, 2.05, 0.25)), } def retrieve_elliptic_paraboloid_score(topic_model): """ """ model = topic_model._model return -((model.class_ids[MAIN_MODALITY]-0.6-model.class_ids[NGRAM_MODALITY]) ** 2 + (model.class_ids[MAIN_MODALITY]-0.6+model.class_ids[NGRAM_MODALITY]/2) ** 2) cube = CubeCreator( num_iter=1, parameters=class_id_params, reg_search="grid", strategy=GreedyStrategy(renormalize), tracked_score_function=retrieve_elliptic_paraboloid_score, separate_thread=thread_flag ) dummies = cube(tm, dataset) tmodels_lvl2 = [dummy.restore() for dummy in dummies] if not renormalize: assert len(tmodels_lvl2) == sum(len(m) for m in class_id_params.values()) else: assert len(tmodels_lvl2) == 10 if renormalize: CLASS_IDS_FOR_CHECKING = [(1.0, 0.0), (1.0, 0.0), (0.8, 0.2), (0.667, 0.333), (0.571, 0.429), (0.5, 0.5), (0.444, 0.556), (0.4, 0.6), (0.364, 0.636), (0.333, 0.667)] for i, one_model in enumerate(tmodels_lvl2): assert np.round(one_model.class_ids[MAIN_MODALITY], 3) == CLASS_IDS_FOR_CHECKING[i][0] assert np.round(one_model.class_ids[NGRAM_MODALITY], 3) == CLASS_IDS_FOR_CHECKING[i][1] else: one_model = tmodels_lvl2[len(class_id_params["class_ids" + MAIN_MODALITY])] assert np.round(one_model.class_ids[MAIN_MODALITY], 3) == 0.5 assert np.round(one_model.class_ids[NGRAM_MODALITY], 3) == 0 assert cube.strategy.best_score >= -0.09
fc5a17e5bf6b158ce242b4289938dec4d2d2e32b
3,658,397
from typing import Dict from typing import List def apply_filters(filters: Dict, colnames: List, row: List) -> List: """ Process data based on filter chains :param filters: :param colnames: :param row: :return: """ if filters: new_row = [] for col, data in zip(colnames, row): if col in filters: params = filters[col][:] for f in params: current_filter = f[:] # copy so that pop does not break next iteration filter_name = current_filter.pop(0) if filter_name not in FILTERS: raise FilterError(f"Error: Invalid filter name: {filter_name}") func, num_params = FILTERS[filter_name][:2] if len(current_filter) != num_params: raise FilterError( f"Error: Incorrect number of params for {filter_name}. Expected {num_params}, got {len(current_filter)})") data = func(data, *current_filter) new_row.append(data) return new_row return row
e52e8b2773dc4e794076b8a480e5eaaab50de06e
3,658,398
def kaiming(shape, dtype, partition_info=None): """Kaiming initialization as described in https://arxiv.org/pdf/1502.01852.pdf""" return tf.random.truncated_normal(shape) * tf.sqrt(2 / float(shape[0]))
153213279909bf01e9782e0e56d270632c502b27
3,658,399