filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_0_2040 | import numpy as np
import pandas as pd
import xarray as xr
import glob
from statsrat.expr.schedule import schedule
from statsrat.expr.oat import oat
from copy import deepcopy
class experiment:
"""
A class used to represent learning experiments.
Attributes
----------
resp_type : str
The type of behavioral response made by the learner. Must be the same for
all schedules in the experiment. Can be either 'choice' (discrete responses),
'exct' (excitatory) or 'supr' (suppression of an ongoing activity).
schedules : dict
A dictionary of the experiment's schedules (sequences of stimuli and feedback etc
that typically correspond to groups in the experimental design).
schedule_names : list
Names of the experiment's schedules.
oats : dict
A dictionary of the experiment's ordinal adequacy tests (OATs).
notes : str or None
Notes on the experiment (e.g. explanation of design, references).
Methods
-------
make_trials(self)
Create a time step level dataset for the whole experiment.
read_csv(self, path, x_col, resp_col, resp_map, ident_col = None, conf_col = None, schedule = None, other_info = None, header = 'infer', n_final = 8)
Import empirical data from .csv files.
See Also
--------
See 'predef.cat' for category learning examples.
See 'predef.pvl_iti' for Pavlovian conditioning examples.
"""
def __init__(self, schedules, oats = None, notes = None):
"""
Parameters
----------
schedules : dict
A dictionary of the experiment's schedules (sequences of stimuli and feedback etc
that typically correspond to groups in the experimental design).
oats : dict or None, optional
A dictionary of the experiment's ordinal adequacy tests (OATs), or
else None (experiment has no OATs). Defaults to None.
notes : str or None, optional
Notes on the experiment (e.g. explanation of design, references).
Defaults to None (i.e. no notes).
"""
# check that everything in the 'schedules' argument is a schedule object
is_scd = []
for s in schedules.values():
is_scd += [isinstance(s, schedule)]
assert not (False in is_scd), 'Non-schedule object input as schedule.'
# check that everything in the 'oat' argument is an oat object
if not oats is None:
if len(oats) > 0:
is_oat = []
for o in oats.values():
is_oat += [isinstance(o, oat)]
assert not (False in is_oat), 'Non-oat object input as oat.'
# check that that all schedules have the same response type
self.resp_type = schedules[list(schedules.keys())[0]].resp_type
if len(schedules) > 1:
match_resp_type = []
for s in schedules.values():
match_resp_type += [self.resp_type == s.resp_type]
assert not (False in match_resp_type), 'Schedules have non-matching response types (resp_type).'
# add other data to 'self'
self.schedules = deepcopy(schedules)
for s in self.schedules:
self.schedules[s].name = s # assign schedule name attributes based on dictionary keys
self.schedule_names = list(self.schedules.keys())
self.oats = oats
self.notes = notes
def make_trials(self, schedule = None):
"""
Create a time step level dataset for the whole experiment.
Parameters
----------
schedule : str, optional
Name of the schedule from which to make trials. By default
selects the first schedule in the experiment object's
definition.
Returns
-------
dataset (xarray)
Contains time step level data (stimuli, outcomes etc.). See
documentation on the schedule class for more details.
Notes
-----
Adds in 'time', an alternative coordinate for time steps (dimension t).
This indicates real world time (in abstract units), including possible delays
since previous time steps (e.g. for an experiment with several sessions
on different days). Starts at 0 for the first time step, and each time
step represents a time unit of 1.
"""
# determine experimental schedule to use
if schedule is None:
scd = self.schedules[list(self.schedules.keys())[0]]
else:
scd = self.schedules[schedule]
# make list of time steps
t_order = []
trial_index = []
m = 0 # index for trials
for st in scd.stages:
iti = scd.stages[st].iti
order = scd.stages[st].order
if scd.stages[st].intro_length > 0:
trial_def_bool = np.array( (scd.trial_def.stage_name == st) & (scd.trial_def.trial_name == 'intro') )
trial_def_index = list( scd.trial_def.t[trial_def_bool].values )
t_order += trial_def_index
trial_index += scd.stages[st].intro_length*[m]
m += 1
for j in range(scd.stages[st].n_rep):
if scd.stages[st].order_fixed == False:
np.random.shuffle(order)
for k in range(scd.stages[st].n_trial):
trial_def_bool = np.array( (scd.trial_def.stage_name == st) & (scd.trial_def.trial == order[k]) )
trial_def_index = list( scd.trial_def.t[trial_def_bool].values )
t_order += trial_def_index
trial_index += (iti + 1)*[m]
m += 1
if scd.stages[st].outro_length > 0:
trial_def_bool = np.array( (scd.trial_def.stage_name == st) & (scd.trial_def.trial_name == 'outro') )
trial_def_index = list( scd.trial_def.t[trial_def_bool].values )
t_order += trial_def_index
trial_index += scd.stages[st].outro_length*[m]
m += 1
# make list for 'time' coordinate
st_names = list(scd.stages.keys())
time = list(np.arange(scd.stages[st_names[0]].n_t))
for i in range(1, scd.n_stage):
time += list(np.arange(scd.stages[st_names[i]].n_t) + scd.delays[i - 1] + time[-1] + 1)
# make new trials object
trials = scd.trial_def.loc[{'t' : t_order}]
trials = trials.assign_coords({'t' : range(scd.n_t)})
trials = trials.assign_coords({'trial' : ('t', trial_index)})
trials = trials.assign_coords({'time' : ('t', time)})
trials = trials.assign_attrs({'schedule': scd.name})
return trials
def read_csv(self, path, x_col, resp_col, resp_map, ident_col = None, conf_col = None, schedule = None, other_info = None, header = 'infer', n_final = 8):
"""
Import empirical data from .csv files.
Parameters
----------
path: str
Path to the .csv files.
x_col: list
Names of columns (strings) indicating cues (stimulus
attributes, i.e. columns of 'x').
resp_col: list
Names of columns (strings) indicating responses.
resp_map: dict
Maps response names in the raw data to response names in the
schedule definition.
ident_col: str or None, optional
If string, name of column indicating individual identifier
(the 'ident' variable). If None, then file names are used
as 'ident'. Defaults to None.
conf_col: str or None, optional
Name of the column indicating confidence responses (i.e.
a measure of confidence following choices, typically
obtained in the test stages of human classification tasks).
Defaults to None (suitable for data without confidence responses).
schedule: str, optional
Name of the schedule from which to make trials. By default
selects the first schedule in the experiment object's
definition.
other_info: dict or None, optional
Specifies other information (e.g. demographics) to be imported.
Dictionary keys are variable names (e.g. 'sex', 'age'), while the
values give the corresponding row index (e.g. a question such as
'What is your age?') and column name as a tuple. Defaults to None
(do not import any additional data).
header: int or list of int, default ‘infer’
Passed to pandas.read_csv. Row number(s) to use as the column names,
and the start of the data.
n_final: int, optional
Number of trials at end of each stage to use for calculating percent correct
choices. For example, set n_final = 10 to compute percent correct choices
using the last 10 trials of each stage.
Returns
-------
ds : dataset (xarray)
Contains time step level data (stimuli, outcomes, behavior,
possible outcomes etc.).
summary : dataframe (pandas)
Each row corresponds to a participant. Contains proportion of
correct responses in each non-test stage, plus OAT scores.
Notes
-----
To avoid confusion, data from different schedules (e.g. different experimental
groups) should be kept in separate directories.
It is assumed that any numeric particpant identifiers ('ident') are
integers rather than floats.
The 'correct' variable encodes whether participant behavior ('b') matched
the outcome ('y'). It is only really valid for category learning and similar
experiments, and does not mean anything for stages without feedback (i.e. test stages).
Participant IDs (called 'ident') should be unique. Any duplicates will be modified by
adding '-1', '-2', '-3' etc. (respectively for the second, third, fourth etc. instance
of the ID) to the end of the ID string.
Current Limitations:
For now, I assume that each time step represents a trial (i.e. iti = 0).
I also assume that all 'x_names' in the Python schedule object are lower case.
I also assume that each stage has at most one trial type for any set of punctate cues.
I also assume that the Python schedule object has exactly the right number of trials.
It is assumed that there are no intros or outros to any stages.
Currently, the 'time' (real world time) coordinate is only a copy of 't' (the time step
number). This represents the assumption that there are no delays between stages of the
experiment.
"""
# list .csv files in the directory
file_set = [file for file in glob.glob(path + "**/*.csv", recursive=True)]
assert len(file_set) > 0, 'Cannot find any files in specified path.'
# determine experimental schedule to use
if schedule is None:
scd = self.schedules[list(self.schedules.keys())[0]]
else:
scd = self.schedules[schedule]
# set up pct_correct
n_stage = len(scd.stages)
pct_correct = dict()
for st in scd.stages:
not_test = scd.stages[st].lrn
if not_test:
var_name = st + '_' + 'last' + str(n_final) + '_pct_correct'
pct_correct[var_name] = []
# **** loop through files ****
n_f = len(file_set)
ds_dict = {}
did_not_work_read = []
did_not_work_ident = []
did_not_work_b = []
did_not_work_misc = []
raw_ident = [] # raw particpant IDs (used to detect duplicates)
n_xc = len(x_col) # number of cue columns in raw data frame
n_rc = len(resp_col) # number of response columns in raw data frame
if conf_col is None:
usecols = x_col + resp_col # columns to import as the data frame 'raw'
else:
usecols = x_col + resp_col + [conf_col] # columns to import as the data frame 'raw'
for i in range(n_f):
# **** import raw data ****
try:
raw = pd.read_csv(file_set[i], error_bad_lines = False, warn_bad_lines = False, header = header, usecols = usecols)
raw.dropna(subset = x_col, thresh = 1, inplace = True) # drop rows without recorded cues ('x')
raw.dropna(subset = resp_col, thresh = 1, inplace = True) # drop rows without recorded responses
raw_full = pd.read_csv(file_set[i], error_bad_lines = False, warn_bad_lines = False, header = header, na_filter = True) # copy of 'raw' whose rows won't be dropped (used for importing 'ident' and 'other info', e.g. demographics)
index = np.zeros(raw.shape[0])
# drop rows in which none of the response columns has one of the expected responses
for col in resp_col:
index += raw[col].isin(list(resp_map.keys()))
raw = raw.loc[np.array(index > 0)]
n_r = raw.shape[0] # number of rows in raw data frame
raw.index = range(n_r) # re-index 'raw'
assert n_r == scd.n_t, 'wrong number of trials for file {}'.format(file_set[i]) + '\n' + 'trials found: ' + str(n_r) + '\n' + 'trials expected: ' + str(scd.n_t)
except Exception as e:
print(e)
did_not_work_read += [file_set[i]]
if not file_set[i] in did_not_work_read:
# **** figure out 'ident' (participant ID) ****
if ident_col is None:
ident = file_set[i].replace('.csv', '').replace(path + '/', '') # participant ID is file name
else:
try:
ident_col_vals = np.array(raw_full[ident_col].values, dtype = 'str')
lengths = np.char.str_len(ident_col_vals)
ident = ident_col_vals[np.argmax(lengths)]
if not isinstance(ident, str): # change participant ID to string if it's not already a string
if ident.dtype == float:
ident = ident.astype(int)
ident = ident.astype(str)
# **** if the participant ID is a duplicate, modify it ****
if i > 0:
ident_array = np.array(raw_ident) # array of IDs already imported
n_repeat = np.sum(ident_array == ident) # number of times the ID has already been imported
else:
n_repeat = 0 # obviously the first ID won't already be in the imported data
raw_ident += [ident]
if n_repeat > 0:
ident += '-' + str(n_repeat)
except Exception as e:
print(e)
did_not_work_ident += [file_set[i]]
if not file_set[i] in (did_not_work_read + did_not_work_ident + did_not_work_misc):
try:
# **** determine b (response) from raw data ****
b = xr.DataArray(0, coords = [range(scd.n_t), scd.y_names], dims = ['t', 'y_name']) # observed responses
for m in range(scd.n_t):
for k in range(n_rc):
if pd.notna(raw.loc[m, resp_col[k]]):
raw_y_name = raw.loc[m, resp_col[k]].lower()
assert raw_y_name in resp_map.keys(), 'raw data response name "{}" is not found in "resp_map" (trial {})'.format(raw_y_name, m)
mapped_y_name = resp_map[raw_y_name]
b.loc[{'t' : m, 'y_name' : mapped_y_name}] = 1
except Exception as e:
print(e)
did_not_work_b += [file_set[i]]
if not file_set[i] in (did_not_work_read + did_not_work_ident + did_not_work_b + did_not_work_misc):
try:
# **** determine trial type from raw data ****
t_order = [] # list of time steps to produce the 'trials' data frame
trial_list = []
m = 0 # index for trials
for st in scd.stages:
iti = scd.stages[st].iti
n_stage_trials = scd.stages[st].n_trial * scd.stages[st].n_rep
for j in range(n_stage_trials):
# determine x (stimulus vector) from raw data
raw_x = pd.Series(0, index = scd.x_names)
for k in range(n_xc):
if pd.notna(raw.loc[m, x_col[k]]):
raw_x_name = raw.loc[m, x_col[k]].lower()
if raw_x_name in scd.x_names:
raw_x[raw_x_name] = 1
# find corresponding trial definition (will only work if ITI = 0)
match_raw_x = (scd.trial_def['x'] == np.array(raw_x)).all(axis = 1)
match_stage = scd.trial_def['stage_name'] == st
trial_def_bool = match_stage & match_raw_x
trial_def_index = list(scd.trial_def['t'].loc[{'t' : trial_def_bool}])
if np.sum(trial_def_bool) == 0:
print('cue combination found that is not in schedule definition for stage:') # for debugging
print('stage')
print(st)
print('trial')
print(m)
print('cue combination')
print(raw_x)
# add to list of time steps indices, etc.
t_order += trial_def_index
trial_list += (iti + 1)*[m]
m += 1
# **** make new dataset ****
ds_new = scd.trial_def.loc[{'t' : t_order}]
n_t = len(t_order)
ds_new = ds_new.assign_coords({'t' : range(n_t), 'trial' : ('t', range(len(t_order))), 'time': ('t', range(n_t))})
ds_new = ds_new.assign(b = b)
ds_new = ds_new.expand_dims(ident = [ident])
# **** add confidence ratings ****
if not conf_col is None:
conf_val = np.array(raw[conf_col].values, dtype = 'float')
conf = xr.DataArray(conf_val, coords = [range(scd.n_t)], dims = ['t'])
ds_new = ds_new.assign(conf = conf)
# **** add other information (e.g. demographics) ****
if not other_info is None:
other_dict = dict()
for var_name in other_info:
row = raw_full[other_info[var_name][0]] == other_info[var_name][1]
column = other_info[var_name][2]
var = raw_full.loc[row, column].values[0]
other_dict[var_name] = (['ident'], np.array([var]))
ds_other = xr.Dataset(data_vars = other_dict, coords = {'ident': [ident]})
ds_new = ds_new.merge(ds_other)
# **** code each trial as correct (u matches b) or incorrect ****
u = ds_new['y'].squeeze()
b = ds_new['b'].squeeze()
correct = np.all(u == b, axis = 1)
ds_new = ds_new.assign(correct = correct)
# **** calculate percent correct per stage (excluding test stages) ****
for st in scd.stages:
not_test = scd.stages[st].lrn
if not_test:
stage_name = scd.stages[st].name
index = np.array(ds_new.stage_name == stage_name)
var_name = stage_name + '_' + 'last' + str(n_final) + '_pct_correct'
pct_correct[var_name] += [100*ds_new['correct'].loc[{'t': index}][-n_final:].mean().values]
# **** add individual's dataset to ds_dict ****
ds_dict[ident] = ds_new
except Exception as e:
print(e)
did_not_work_misc += [file_set[i]]
n_dnw_r = len(did_not_work_read)
if n_dnw_r > 0:
print('The following files could not be read by Pandas:')
for i in range(n_dnw_r):
print(did_not_work_read[i])
n_dnw_i = len(did_not_work_ident)
if n_dnw_i > 0:
print('Participant ID (ident) could not be read from the following files:')
for i in range(n_dnw_i):
print(did_not_work_ident[i])
n_dnw_b = len(did_not_work_b)
if n_dnw_b > 0:
print('Behavior (b) could not be read from the following files:')
for i in range(n_dnw_b):
print(did_not_work_b[i])
n_dnw_m = len(did_not_work_misc)
if n_dnw_m > 0:
print('There was a problem importing the following files:')
for i in range(n_dnw_m):
print(did_not_work_misc[i])
# **** merge datasets together ****
try:
ds = xr.combine_nested(list(ds_dict.values()), concat_dim = 'ident', combine_attrs = 'override')
except Exception as e:
print(e)
print('There was a problem merging individual datasets together.')
# **** create summary data frame (each row corresponds to a participant) ****
summary = ds.drop_dims(['t', 'x_name', 'y_name']).to_dataframe()
# **** add pct_correct ****
for st in scd.stages:
not_test = scd.stages[st].lrn
if not_test:
stage_name = scd.stages[st].name
var_name = stage_name + '_' + 'last' + str(n_final) + '_pct_correct'
summary[var_name] = pct_correct[var_name]
# **** calculate behavioral scores ****
n_oats = len(self.oats)
if conf_col is None:
has_conf = False
else:
has_conf = True
for oat in range(n_oats):
oat_name = list(self.oats.keys())[oat]
oat = self.oats[oat_name]
if scd.name in oat.schedule_pos:
summary[oat_name] = oat.behav_score_pos.compute_scores(ds, has_conf)
else:
if scd.name in oat.schedule_neg:
summary[oat_name] = oat.behav_score_neg.compute_scores(ds, has_conf)
summary = summary.set_index(ds.ident.to_series(), drop = True)
return (ds, summary) |
the-stack_0_2042 | # Copyright 2014-2016 OpenMarket Ltd
# Copyright 2018 New Vector Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Dict, List, Optional, Tuple
from synapse.api.errors import StoreError
from synapse.storage._base import SQLBaseStore
from synapse.storage.databases.main.roommember import ProfileInfo
from synapse.types import UserID
from synapse.util.caches.descriptors import cached
BATCH_SIZE = 100
class ProfileWorkerStore(SQLBaseStore):
async def get_profileinfo(self, user_localpart: str) -> ProfileInfo:
try:
profile = await self.db_pool.simple_select_one(
table="profiles",
keyvalues={"user_id": user_localpart},
retcols=("displayname", "avatar_url"),
desc="get_profileinfo",
)
except StoreError as e:
if e.code == 404:
# no match
return ProfileInfo(None, None)
else:
raise
return ProfileInfo(
avatar_url=profile["avatar_url"], display_name=profile["displayname"]
)
@cached(max_entries=5000)
async def get_profile_displayname(self, user_localpart: str) -> Optional[str]:
return await self.db_pool.simple_select_one_onecol(
table="profiles",
keyvalues={"user_id": user_localpart},
retcol="displayname",
desc="get_profile_displayname",
)
@cached(max_entries=5000)
async def get_profile_avatar_url(self, user_localpart: str) -> Optional[str]:
return await self.db_pool.simple_select_one_onecol(
table="profiles",
keyvalues={"user_id": user_localpart},
retcol="avatar_url",
desc="get_profile_avatar_url",
)
async def get_latest_profile_replication_batch_number(self):
def f(txn):
txn.execute("SELECT MAX(batch) as maxbatch FROM profiles")
rows = self.db_pool.cursor_to_dict(txn)
return rows[0]["maxbatch"]
return await self.db_pool.runInteraction(
"get_latest_profile_replication_batch_number", f
)
async def get_profile_batch(self, batchnum):
return await self.db_pool.simple_select_list(
table="profiles",
keyvalues={"batch": batchnum},
retcols=("user_id", "displayname", "avatar_url", "active"),
desc="get_profile_batch",
)
async def assign_profile_batch(self):
def f(txn):
sql = (
"UPDATE profiles SET batch = "
"(SELECT COALESCE(MAX(batch), -1) + 1 FROM profiles) "
"WHERE user_id in ("
" SELECT user_id FROM profiles WHERE batch is NULL limit ?"
")"
)
txn.execute(sql, (BATCH_SIZE,))
return txn.rowcount
return await self.db_pool.runInteraction("assign_profile_batch", f)
async def get_replication_hosts(self):
def f(txn):
txn.execute(
"SELECT host, last_synced_batch FROM profile_replication_status"
)
rows = self.db_pool.cursor_to_dict(txn)
return {r["host"]: r["last_synced_batch"] for r in rows}
return await self.db_pool.runInteraction("get_replication_hosts", f)
async def update_replication_batch_for_host(
self, host: str, last_synced_batch: int
):
return await self.db_pool.simple_upsert(
table="profile_replication_status",
keyvalues={"host": host},
values={"last_synced_batch": last_synced_batch},
desc="update_replication_batch_for_host",
)
async def get_from_remote_profile_cache(
self, user_id: str
) -> Optional[Dict[str, Any]]:
return await self.db_pool.simple_select_one(
table="remote_profile_cache",
keyvalues={"user_id": user_id},
retcols=("displayname", "avatar_url"),
allow_none=True,
desc="get_from_remote_profile_cache",
)
async def create_profile(self, user_localpart: str) -> None:
await self.db_pool.simple_insert(
table="profiles", values={"user_id": user_localpart}, desc="create_profile"
)
async def set_profile_displayname(
self, user_localpart: str, new_displayname: Optional[str], batchnum: int
) -> None:
# Invalidate the read cache for this user
self.get_profile_displayname.invalidate((user_localpart,))
await self.db_pool.simple_upsert(
table="profiles",
keyvalues={"user_id": user_localpart},
values={"displayname": new_displayname, "batch": batchnum},
desc="set_profile_displayname",
lock=False, # we can do this because user_id has a unique index
)
async def set_profile_avatar_url(
self, user_localpart: str, new_avatar_url: Optional[str], batchnum: int
) -> None:
# Invalidate the read cache for this user
self.get_profile_avatar_url.invalidate((user_localpart,))
await self.db_pool.simple_upsert(
table="profiles",
keyvalues={"user_id": user_localpart},
values={"avatar_url": new_avatar_url, "batch": batchnum},
desc="set_profile_avatar_url",
lock=False, # we can do this because user_id has a unique index
)
async def set_profiles_active(
self,
users: List[UserID],
active: bool,
hide: bool,
batchnum: int,
) -> None:
"""Given a set of users, set active and hidden flags on them.
Args:
users: A list of UserIDs
active: Whether to set the users to active or inactive
hide: Whether to hide the users (withold from replication). If
False and active is False, users will have their profiles
erased
batchnum: The batch number, used for profile replication
"""
# Convert list of localparts to list of tuples containing localparts
user_localparts = [(user.localpart,) for user in users]
# Generate list of value tuples for each user
value_names = ("active", "batch")
values = [(int(active), batchnum) for _ in user_localparts] # type: List[Tuple]
if not active and not hide:
# we are deactivating for real (not in hide mode)
# so clear the profile information
value_names += ("avatar_url", "displayname")
values = [v + (None, None) for v in values]
return await self.db_pool.runInteraction(
"set_profiles_active",
self.db_pool.simple_upsert_many_txn,
table="profiles",
key_names=("user_id",),
key_values=user_localparts,
value_names=value_names,
value_values=values,
)
async def add_remote_profile_cache(
self, user_id: str, displayname: str, avatar_url: str
) -> None:
"""Ensure we are caching the remote user's profiles.
This should only be called when `is_subscribed_remote_profile_for_user`
would return true for the user.
"""
await self.db_pool.simple_upsert(
table="remote_profile_cache",
keyvalues={"user_id": user_id},
values={
"displayname": displayname,
"avatar_url": avatar_url,
"last_check": self._clock.time_msec(),
},
desc="add_remote_profile_cache",
)
async def update_remote_profile_cache(
self, user_id: str, displayname: str, avatar_url: str
) -> int:
return await self.db_pool.simple_upsert(
table="remote_profile_cache",
keyvalues={"user_id": user_id},
values={
"displayname": displayname,
"avatar_url": avatar_url,
"last_check": self._clock.time_msec(),
},
desc="update_remote_profile_cache",
)
async def maybe_delete_remote_profile_cache(self, user_id):
"""Check if we still care about the remote user's profile, and if we
don't then remove their profile from the cache
"""
subscribed = await self.is_subscribed_remote_profile_for_user(user_id)
if not subscribed:
await self.db_pool.simple_delete(
table="remote_profile_cache",
keyvalues={"user_id": user_id},
desc="delete_remote_profile_cache",
)
async def is_subscribed_remote_profile_for_user(self, user_id):
"""Check whether we are interested in a remote user's profile."""
res = await self.db_pool.simple_select_one_onecol(
table="group_users",
keyvalues={"user_id": user_id},
retcol="user_id",
allow_none=True,
desc="should_update_remote_profile_cache_for_user",
)
if res:
return True
res = await self.db_pool.simple_select_one_onecol(
table="group_invites",
keyvalues={"user_id": user_id},
retcol="user_id",
allow_none=True,
desc="should_update_remote_profile_cache_for_user",
)
if res:
return True
async def get_remote_profile_cache_entries_that_expire(
self, last_checked: int
) -> List[Dict[str, str]]:
"""Get all users who haven't been checked since `last_checked`"""
def _get_remote_profile_cache_entries_that_expire_txn(txn):
sql = """
SELECT user_id, displayname, avatar_url
FROM remote_profile_cache
WHERE last_check < ?
"""
txn.execute(sql, (last_checked,))
return self.db_pool.cursor_to_dict(txn)
return await self.db_pool.runInteraction(
"get_remote_profile_cache_entries_that_expire",
_get_remote_profile_cache_entries_that_expire_txn,
)
class ProfileStore(ProfileWorkerStore):
def __init__(self, database, db_conn, hs):
super().__init__(database, db_conn, hs)
self.db_pool.updates.register_background_index_update(
"profile_replication_status_host_index",
index_name="profile_replication_status_idx",
table="profile_replication_status",
columns=["host"],
unique=True,
)
async def add_remote_profile_cache(
self, user_id: str, displayname: str, avatar_url: str
) -> None:
"""Ensure we are caching the remote user's profiles.
This should only be called when `is_subscribed_remote_profile_for_user`
would return true for the user.
"""
await self.db_pool.simple_upsert(
table="remote_profile_cache",
keyvalues={"user_id": user_id},
values={
"displayname": displayname,
"avatar_url": avatar_url,
"last_check": self._clock.time_msec(),
},
desc="add_remote_profile_cache",
)
|
the-stack_0_2045 | """SAC-Agent implementation"""
from typing import Optional, Callable
import jax
import jax.numpy as jnp
import numpy as np
import optax
from jaxdl.rl.networks.actor_nets import create_normal_dist_policy_fn, sample_actions
from jaxdl.rl.networks.critic_nets import create_double_critic_network_fn
from jaxdl.rl.networks.temperature_nets import create_temperature_network_fn
from jaxdl.rl.agents.sac.critic_fns import update_critic, update_target
from jaxdl.rl.agents.sac.actor_fns import update_actor
from jaxdl.rl.agents.sac.temperature_fns import update_temperature
from jaxdl.utils.commons import InfoDict, Module, save_train_state, restore_train_state
from jaxdl.utils.commons import create_train_state
from jaxdl.rl.utils.replay_buffer import Batch
from jaxdl.rl.utils.commons import RLAgent
class SACAgent(RLAgent):
"""An JAX implementation of the Soft-Actor-Critic (SAC)
Original paper: https://arxiv.org/abs/1812.05905
Usage:
agent = SACAgent(0, env.observation_space, env.action_space)
agent.restore('./tmp/')
agent.sample(observation)
agent.save('./tmp/')
"""
def __init__(self,
seed: int,
observations: np.ndarray,
actions: np.ndarray,
critic_net_fn: Callable = create_double_critic_network_fn,
actor_net_fn: Callable = create_normal_dist_policy_fn,
temperature_net_fn: Callable = create_temperature_network_fn,
actor_lr: float = 3e-4,
critic_lr: float = 3e-4,
temperature_lr: float = 3e-4,
discount: float = 0.99,
tau: float = 0.005,
target_update_period: int = 1,
target_entropy: Optional[float] = None):
# split rng and generate keys
rng = jax.random.PRNGKey(seed)
rng, actor_key, critic_key, temperature_key = jax.random.split(rng, 4)
# set target entropy
action_dim = actions.shape[-1]
self.target_entropy = target_entropy or - action_dim / 2
# actor network
actor_net = create_train_state(
actor_net_fn(action_dim=action_dim), [actor_key, observations],
optax.adam(learning_rate=actor_lr))
# critic networks
critic_net = create_train_state(
critic_net_fn(), [critic_key, observations, actions],
optax.adam(learning_rate=critic_lr))
target_critic_net = create_train_state(
critic_net_fn(), [critic_key, observations, actions],
optax.adam(learning_rate=critic_lr))
# temperature network
temperature_net = create_train_state(
temperature_net_fn(), [temperature_key],
tx=optax.adam(learning_rate=temperature_lr))
# networks
self.actor_net = actor_net
self.critic_net = critic_net
self.target_critic_net = target_critic_net
self.temperature_net = temperature_net
# parameters
self.rng = rng
self.step_num = 1
self.target_update_period = target_update_period
self.discount = discount
self.tau = tau
def restore(self, path):
"""Loads the networks of the agents."""
self.actor_net = restore_train_state(self.actor_net, path, prefix="actor")
self.critic_net = restore_train_state(self.critic_net, path, prefix="critic")
self.target_critic_net = restore_train_state(
self.target_critic_net, path, prefix="target_critic")
self.temperature_net = restore_train_state(
self.temperature_net, path, prefix="temperature")
def save(self, path):
"""Saves the networks of the agents."""
save_train_state(self.actor_net, path, prefix="actor")
save_train_state(self.critic_net, path, prefix="critic")
save_train_state(self.target_critic_net, path, prefix="target_critic")
save_train_state(self.temperature_net, path, prefix="temperature")
def sample(self, observations: np.ndarray,
temperature: float = 1.0, evaluate: bool = False) -> np.ndarray:
"""Samples (clipped) actions given an observation"""
self.rng, actions = sample_actions(
self.rng, self.actor_net, observations, temperature)
actions = np.asarray(actions)
# Rescaling of actions is done by gym.RescaleAction
return np.clip(actions, -1, 1)
def update(self, batch: Batch) -> InfoDict:
"""Updates all networks of the SAC-Agent."""
self.step_num += 1
# update critic
self.rng, self.critic_net, critic_info = update_critic(
self.rng, self.actor_net, self.critic_net, self.target_critic_net,
self.temperature_net, batch, self.discount, soft_critic=True)
# update target net
if self.step_num % self.target_update_period == 0:
self.target_critic_net = update_target(
self.critic_net, self.target_critic_net, self.tau)
# update actor
self.rng, self.actor_net, actor_info = update_actor(
self.rng, self.actor_net, self.critic_net, self.temperature_net, batch)
# update temperature
self.temperature_net, alpha_info = update_temperature(
self.temperature_net, actor_info["entropy"], self.target_entropy)
# increase step count
return {**critic_info, **actor_info, **alpha_info} |
the-stack_0_2046 | from django.db import models
from django.utils import timezone
class Post(models.Model):
author = models.ForeignKey(
'auth.User', on_delete=models.CASCADE)
title = models.CharField(max_length=200)
text = models.TextField()
created_date = models.DateTimeField(
default=timezone.now)
published_date = models.DateTimeField(
blank=True, null=True)
def publish(self):
self.published_date = timezone.now()
self.save()
def __str__(self):
return self.title
|
the-stack_0_2050 | #!/usr/bin/env python3.9
# -*- coding: utf-8 -*-
# if you're interested in development, my test server is usually
# up at https://c.cmyui.xyz. just use the same `-devserver cmyui.xyz`
# connection method you would with any other modern server and you
# should have no problems connecting. registration is done in-game
# with osu!'s built-in registration (if you're worried about not being
# properly connected while registering, the server should send back
# https://i.cmyui.xyz/8-Vzy9NllPBp5K7L.png if you use a random login).
# you can also test gulag's rest api using my test server,
# e.g https://osu.cmyui.xyz/api/get_player_scores?id=3&scope=best
import asyncio
import io
import os
import sys
from datetime import datetime
from pathlib import Path
import aiohttp
import aiomysql
import cmyui
import datadog
import orjson
import geoip2.database
import subprocess
from cmyui.logging import Ansi
from cmyui.logging import log
import bg_loops
import utils.misc
from constants.privileges import Privileges
from objects.achievement import Achievement
from objects.collections import Players
from objects.collections import Matches
from objects.collections import Channels
from objects.collections import Clans
from objects.collections import MapPools
from objects.player import Player
from utils.updater import Updater
__all__ = ()
# we print utf-8 content quite often
if isinstance(sys.stdout, io.TextIOWrapper):
sys.stdout.reconfigure(encoding='utf-8')
# set cwd to /gulag
os.chdir(os.path.dirname(os.path.realpath(__file__)))
try:
from objects import glob
except ModuleNotFoundError as exc:
if exc.name == 'config':
# config file doesn't exist; create it from the default.
import shutil
shutil.copy('ext/config.sample.py', 'config.py')
log('A config file has been generated, '
'please configure it to your needs.', Ansi.LRED)
raise SystemExit(1)
else:
raise
utils.misc.install_excepthook()
# current version of gulag
# NOTE: this is used internally for the updater, it may be
# worth reading through it's code before playing with it.
glob.version = cmyui.Version(3, 5, 4)
OPPAI_PATH = Path.cwd() / 'oppai-ng'
GEOLOC_DB_FILE = Path.cwd() / 'ext/GeoLite2-City.mmdb'
DEBUG_HOOKS_PATH = Path.cwd() / '_testing/runtime.py'
DATA_PATH = Path.cwd() / '.data'
ACHIEVEMENTS_ASSETS_PATH = DATA_PATH / 'assets/medals/client'
async def setup_collections(db_cursor: aiomysql.DictCursor) -> None:
"""Setup & cache many global collections."""
# dynamic (active) sets, only in ram
glob.players = Players()
glob.matches = Matches()
# static (inactive) sets, in ram & sql
glob.channels = await Channels.prepare(db_cursor)
glob.clans = await Clans.prepare(db_cursor)
glob.pools = await MapPools.prepare(db_cursor)
# create bot & add it to online players
glob.bot = Player(
id=1,
name=await utils.misc.fetch_bot_name(db_cursor),
login_time=float(0x7fffffff), # (never auto-dc)
priv=Privileges.Normal,
bot_client=True
)
glob.players.append(glob.bot)
# global achievements (sorted by vn gamemodes)
glob.achievements = []
await db_cursor.execute('SELECT * FROM achievements')
async for row in db_cursor:
# NOTE: achievement conditions are stored as stringified python
# expressions in the database to allow for extensive customizability.
condition = eval(f'lambda score, mode_vn: {row.pop("cond")}')
achievement = Achievement(**row, cond=condition)
glob.achievements.append(achievement)
# static api keys
await db_cursor.execute(
'SELECT id, api_key FROM users '
'WHERE api_key IS NOT NULL'
)
glob.api_keys = {
row['api_key']: row['id']
async for row in db_cursor
}
async def before_serving() -> None:
"""Called before the server begins serving connections."""
glob.loop = asyncio.get_running_loop()
if glob.has_internet:
# retrieve a client session to use for http connections.
glob.http = aiohttp.ClientSession(json_serialize=orjson.dumps) # type: ignore
else:
glob.http = None
# retrieve a pool of connections to use for mysql interaction.
glob.db = cmyui.AsyncSQLPool()
await glob.db.connect(glob.config.mysql)
# run the sql & submodule updater (uses http & db).
# TODO: updating cmyui_pkg should run before it's import
updater = Updater(glob.version)
await updater.run()
await updater.log_startup()
# open a connection to our local geoloc database,
# if the database file is present.
if GEOLOC_DB_FILE.exists():
glob.geoloc_db = geoip2.database.Reader(GEOLOC_DB_FILE)
else:
glob.geoloc_db = None
# support for https://datadoghq.com
if all(glob.config.datadog.values()):
datadog.initialize(**glob.config.datadog)
glob.datadog = datadog.ThreadStats()
glob.datadog.start(flush_in_thread=True,
flush_interval=15)
# wipe any previous stats from the page.
glob.datadog.gauge('gulag.online_players', 0)
else:
glob.datadog = None
new_coros = []
# cache many global collections/objects from sql,
# such as channels, mappools, clans, bot, etc.
async with glob.db.pool.acquire() as conn:
async with conn.cursor(aiomysql.DictCursor) as db_cursor:
await setup_collections(db_cursor)
# create a task for each donor expiring in 30d.
new_coros.extend(await bg_loops.donor_expiry(db_cursor))
# setup a loop to kick inactive ghosted players.
new_coros.append(bg_loops.disconnect_ghosts())
'''
# if the surveillance webhook has a value, run
# automatic (still very primitive) detections on
# replays deemed by the server's configurable values.
if glob.config.webhooks['surveillance']:
new_coros.append(bg_loops.replay_detections())
'''
# reroll the bot's random status every `interval` sec.
new_coros.append(bg_loops.reroll_bot_status(interval=300))
for coro in new_coros:
glob.app.add_pending_task(coro)
async def after_serving() -> None:
"""Called after the server stops serving connections."""
if hasattr(glob, 'http') and glob.http is not None:
await glob.http.close()
if hasattr(glob, 'db') and glob.db.pool is not None:
await glob.db.close()
if hasattr(glob, 'geoloc_db') and glob.geoloc_db is not None:
glob.geoloc_db.close()
if hasattr(glob, 'datadog') and glob.datadog is not None:
glob.datadog.stop()
glob.datadog.flush()
def ensure_supported_platform() -> int:
"""Ensure we're running on an appropriate platform for gulag."""
if sys.platform != 'linux':
log('gulag currently only supports linux', Ansi.LRED)
if sys.platform == 'win32':
log("you could also try wsl(2), i'd recommend ubuntu 18.04 "
"(i use it to test gulag)", Ansi.LBLUE)
return 1
if sys.version_info < (3, 9):
log('gulag uses many modern python features, '
'and the minimum python version is 3.9.', Ansi.LRED)
return 1
return 0
def ensure_local_services_are_running() -> int:
"""Ensure all required services (mysql) are running."""
# NOTE: if you have any problems with this, please contact me
# @cmyui#0425/[email protected]. i'm interested in knowing
# how people are using the software so that i can keep it
# in mind while developing new features & refactoring.
if glob.config.mysql['host'] in ('localhost', '127.0.0.1', None):
# sql server running locally, make sure it's running
for service in ('mysqld', 'mariadb'):
if os.path.exists(f'/var/run/{service}/{service}.pid'):
break
else:
# not found, try pgrep
pgrep_exit_code = os.system('pgrep mysqld')
if pgrep_exit_code != 0:
log('Please start your mysqld server.', Ansi.LRED)
return 1
return 0
def ensure_directory_structure() -> int:
"""Ensure the .data directory and git submodules are ready."""
# create /.data and its subdirectories.
DATA_PATH.mkdir(exist_ok=True)
for sub_dir in ('avatars', 'logs', 'osu', 'osr', 'ss'):
subdir = DATA_PATH / sub_dir
subdir.mkdir(exist_ok=True)
if not ACHIEVEMENTS_ASSETS_PATH.exists():
if not glob.has_internet:
# TODO: make it safe to run without achievements
return 1
ACHIEVEMENTS_ASSETS_PATH.mkdir(parents=True)
utils.misc.download_achievement_images(ACHIEVEMENTS_ASSETS_PATH)
return 0
def ensure_dependencies_and_requirements() -> int:
"""Make sure all of gulag's dependencies are ready."""
if not OPPAI_PATH.exists():
log('No oppai-ng submodule found, attempting to clone.', Ansi.LMAGENTA)
p = subprocess.Popen(args=['git', 'submodule', 'init'],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL)
if exit_code := p.wait():
log('Failed to initialize git submodules.', Ansi.LRED)
return exit_code
p = subprocess.Popen(args=['git', 'submodule', 'update'],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL)
if exit_code := p.wait():
log('Failed to update git submodules.', Ansi.LRED)
return exit_code
if not (OPPAI_PATH / 'liboppai.so').exists():
log('No oppai-ng library found, attempting to build.', Ansi.LMAGENTA)
p = subprocess.Popen(args=['./libbuild'], cwd='oppai-ng',
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL)
if exit_code := p.wait():
log('Failed to build oppai-ng automatically.', Ansi.LRED)
return exit_code
return 0
def __install_debugging_hooks() -> None:
"""Change internals to help with debugging & active development."""
if DEBUG_HOOKS_PATH.exists():
from _testing import runtime # type: ignore
runtime.setup()
def display_startup_dialog() -> None:
"""Print any general information or warnings to the console."""
if glob.config.advanced:
log('running in advanced mode', Ansi.LRED)
# running on root grants the software potentally dangerous and
# unnecessary power over the operating system and is not advised.
if os.geteuid() == 0:
log('It is not recommended to run gulag as root, '
'especially in production..', Ansi.LYELLOW)
if glob.config.advanced:
log('The risk is even greater with features '
'such as config.advanced enabled.', Ansi.LRED)
if not glob.has_internet:
log('Running in offline mode, some features '
'will not be available.', Ansi.LRED)
def main() -> int:
for safety_check in (
ensure_supported_platform, # linux only at the moment
ensure_local_services_are_running, # mysql (if local)
ensure_directory_structure, # .data/ & achievements/ dir structure
ensure_dependencies_and_requirements # submodules & oppai-ng built
):
if (exit_code := safety_check()) != 0:
return exit_code
'''Server is safe to start up'''
glob.boot_time = datetime.now()
# install any debugging hooks from
# _testing/runtime.py, if present
__install_debugging_hooks()
# check our internet connection status
glob.has_internet = utils.misc.check_connection(timeout=1.5)
# show info & any contextual warnings.
display_startup_dialog()
# create the server object; this will handle http connections
# for us via the transport (tcp/ip) socket interface, and will
# handle housekeeping (setup, cleanup) for us automatically.
glob.app = cmyui.Server(
name=f'gulag v{glob.version}',
gzip=4, debug=glob.config.debug
)
# add the domains and their respective endpoints to our server object
from domains.cho import domain as cho_domain # c[e4-6]?.ppy.sh
from domains.osu import domain as osu_domain # osu.ppy.sh
from domains.ava import domain as ava_domain # a.ppy.sh
from domains.map import domain as map_domain # b.ppy.sh
glob.app.add_domains({cho_domain, osu_domain,
ava_domain, map_domain})
# attach housekeeping tasks (setup, cleanup)
glob.app.before_serving = before_serving
glob.app.after_serving = after_serving
# run the server (this is a blocking call)
glob.app.run(addr=glob.config.server_addr,
handle_restart=True) # (using SIGUSR1)
return 0
if __name__ == '__main__':
raise SystemExit(main())
elif __name__ == 'main':
# check specifically for asgi servers since many related projects
# (such as gulag-web) use them, so people may assume we do as well.
if utils.misc.running_via_asgi_webserver(sys.argv[0]):
raise RuntimeError(
"gulag does not use an ASGI framework, and uses it's own custom "
"web framework implementation; please run it directly (./main.py)."
)
else:
raise RuntimeError('gulag should only be run directly (./main.py).')
|
the-stack_0_2052 | # send_notification_email.py
""" This routine sends and email alerting the user of missing fields. """
import os
import sys
where_i_am = os.path.dirname(os.path.realpath(__file__))
sys.path.append(where_i_am)
sys.path.append(where_i_am + "/dependencies")
import boto3 # noqa: E402
from botocore.errorfactory import ClientError # noqa: E402
from sentry_sdk import capture_exception # noqa: E402
def create_and_send_email_notification(missing_fields, notification_email_address, sender):
""" Create and then send an email alerting someone about missing fields """
recipients = notification_email_address.split(",")
subject = "Metadata is missing required fields"
body_html = _create_email_html_body(missing_fields)
body_text = ''
_send_email(sender, recipients, subject, body_html, body_text)
def _create_email_html_body(missing_fields):
""" Create the body of the email in html format """
body_html = """<html>
<head></head>
<body>
<h1>Missing required fields when processing metadata</h1>
<p> """ + missing_fields + """</p>
</body>
</html>"""
body_html = body_html.replace('\n', '<br/>')
return body_html
def _send_email(sender, recipients, subject, body_html, body_text):
""" Actually send the email. """
AWS_REGION = "us-east-1"
CHARSET = "UTF-8"
client = boto3.client('ses', region_name=AWS_REGION)
email_message_json = {
'Body': {},
'Subject': {
'Charset': CHARSET,
'Data': subject,
},
}
if body_html > '':
email_message_json['Body']['Html'] = {'Charset': CHARSET, 'Data': body_html}
elif body_text > '':
email_message_json['Body']['Text'] = {'Charset': CHARSET, 'Data': body_text}
try:
response = client.send_email(
Destination={'ToAddresses': recipients},
Message=email_message_json,
Source=sender
)
except ClientError as e:
capture_exception(e.response['Error']['Message'])
else:
print("Email sent! Message ID:"),
print(response['MessageId'])
return
|
the-stack_0_2053 | import vim
import re
from os.path import abspath, basename, dirname, relpath
from vim_pad.timestamps import timestamp
from vim_pad.utils import get_save_dir
class PadInfo(object):
__slots__ = "id", "summary", "body", "isEmpty", "folder"
def __init__(self, source):
"""
source can be:
* a vim buffer
* a file object
* a list of strings, one per line
"""
nchars = int(vim.eval("g:pad#read_nchars_from_files"))
self.summary = ""
self.body = ""
self.isEmpty = True
self.folder = ""
self.id = timestamp()
if source is vim.current.buffer:
source = source[:10]
elif source.__class__ == file:
save_dir = get_save_dir()
if abspath(source.name).startswith(save_dir):
pos = len(get_save_dir()), len(basename(source.name))
self.folder = abspath(source.name)[pos[0]:-pos[1]]
else:
self.folder = dirname(relpath(source.name, vim.eval('getcwd()')))
if vim.eval("g:pad#title_first_line") == '1':
source = source.readline().split("\n")
else:
source = source.read(nchars).split('\n')
data = [line.strip() for line in source if line != ""]
if data != []:
# we discard modelines
if re.match("^.* vim: set .*:.*$", data[0]):
data = data[1:]
self.summary = data[0].strip()
# vim-orgmode adds tags after whitespace
org_tags_data = re.search("\s+(?P<tags>:.*$)", self.summary)
if org_tags_data:
self.summary = re.sub("\s+:.*$", "", self.summary)
if self.summary[0] in ("%", "#"): # pandoc and markdown titles
self.summary = str(self.summary[1:]).strip()
self.body = u'\u21b2'.encode('utf-8').join(data[1:]).strip()
# if we have orgmode tag data, add it to the body
if org_tags_data:
self.body = ' '.join(\
[" ".join(\
map(lambda a: "@" + a, \
filter(lambda a: a != "", \
org_tags_data.group("tags").split(":")))), \
self.body])
# remove extra spaces in bodies
self.body = re.sub("\s{2,}", "", self.body)
if self.summary != "":
self.isEmpty = False
self.id = self.summary.lower().replace(" ", "_")
# remove ilegal characters from names (using rules for windows
# systems to err on the side of precaution)
self.id = re.sub("[*:<>/\|^]", "", self.id)
if self.id.startswith("."):
self.id = re.sub("^\.*", "", self.id)
|
the-stack_0_2054 | import boto3
import json
import os
class ApiClient():
def __init__(self):
apiId = os.environ['WEBSOCKET_API_ID']
region = os.environ['AWS_REGION']
stage = os.environ['STAGE']
url = f'https://{apiId}.execute-api.{region}.amazonaws.com/{stage}'
self.client = boto3.client('apigatewaymanagementapi', endpoint_url=url)
def send(self, connectionId, message):
dumped = json.dumps(message)
binMessage = bytes(dumped, 'utf-8')
self.client.post_to_connection(
Data = binMessage,
ConnectionId = connectionId)
def deregister(self, connectionId):
self.client.delete_connection(ConnectionId = connectionId)
|
the-stack_0_2056 | from .._tier0 import execute
from .._tier0 import create
from .._tier0 import create_none
from .._tier0 import plugin_function
from .._tier0 import Image
@plugin_function(output_creator=create_none)
def crop(input : Image, output : Image = None, start_x : int = 0, start_y : int = 0, start_z : int = 0, width : int = 1, height : int = 1, depth : int = 1):
"""Crops a given sub-stack out of a given image stack.
Note: If the destination image pre-exists already, it will be overwritten and
keep it's dimensions.
Parameters
----------
source : Image
destination : Image
start_x : Number
start_y : Number
start_z : Number
width : Number
height : Number
depth : Number
Returns
-------
destination
Examples
--------
>>> import pyclesperanto_prototype as cle
>>> cle.crop(source, destination, start_x, start_y, start_z, width, height, depth)
References
----------
.. [1] https://clij.github.io/clij2-docs/reference_crop3D
"""
if output is None:
if len(input.shape) == 2:
output = create([height, width])
else:
output = create([depth, height, width])
parameters = {
"dst": output,
"src": input,
"start_x": int(start_x),
"start_y": int(start_y),
}
if len(output.shape) == 3:
# 3D image
parameters.update({"start_z": int(start_z)})
execute(__file__, '../clij-opencl-kernels/kernels/crop_' + str(len(output.shape)) + 'd_x.cl', 'crop_' + str(len(output.shape)) + 'd', output.shape, parameters)
return output
|
the-stack_0_2057 | #!/usr/bin/env python
#pylint: skip-file
# This source code is licensed under the Apache license found in the
# LICENSE file in the root directory of this project.
class ApplicationListResult(object):
def __init__(self):
"""
Attributes:
swaggerTypes (dict): The key is attribute name and the value is attribute type.
attributeMap (dict): The key is attribute name and the value is json key in definition.
"""
self.swaggerTypes = {
'version': 'str',
'response': 'list[ApplicationDTO]'
}
self.attributeMap = {
'version': 'version',
'response': 'response'
}
self.version = None # str
self.response = None # list[ApplicationDTO]
|
the-stack_0_2059 | import jsonpickle
from model.group import Group
import random, string
import os.path
import getopt
import sys
#n - колво генеруемых данных, опция f задает файл в который это все должно помещаться
try:
opts, args = getopt.getopt(sys.argv[1:], "n:f", ["numbers of groups", "file"])
except getopt.GetoptError as err:
getopt.usage()
sys.exit(2)
n = 2
f = "data/groups.json"
for o, a in opts:
if o =="-n":
n = int(a)
elif o == "-f":
f = a
#+ string.punctuation + " "*10
def random_string(maxlen):
symbols = string.ascii_letters + string.digits
return "".join([random.choice(symbols) for i in range(random.randrange(maxlen))])
test_data = [Group(name="", header="", footer="")] + [
Group(name=random_string(10), header=random_string(10), footer=random_string(10))
for i in range(n)
]
file = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", f)
with open(file, "w") as out:
jsonpickle.set_encoder_options("json", indent=2)
out.write(jsonpickle.encode(test_data))
|
the-stack_0_2060 | last_names = [
"Smith",
"Johnson",
"Williams",
"Brown",
"Jones",
"Miller",
"Davis",
"Garcia",
"Rodriguez",
"Wilson",
"Martinez",
"Anderson",
"Taylor",
"Thomas",
"Hernandez",
"Moore",
"Martin",
"Jackson",
"Thompson",
"White",
"Lopez",
"Lee",
"Gonzalez",
"Harris",
"Clark",
"Lewis",
"Robinson",
"Walker",
"Perez",
"Hall",
"Young",
"Allen",
"Sanchez",
"Wright",
"King",
"Scott",
"Green",
"Baker",
"Adams",
"Nelson",
"Hill",
"Ramirez",
"Campbell",
"Mitchell",
"Roberts",
"Carter",
"Phillips",
"Evans",
"Turner",
"Torres",
"Parker",
"Collins",
"Edwards",
"Stewart",
"Flores",
"Morris",
"Nguyen",
"Murphy",
"Rivera",
"Cook",
"Rogers",
"Morgan",
"Peterson",
"Cooper",
"Reed",
"Bailey",
"Bell",
"Gomez",
"Kelly",
"Howard",
"Ward",
"Cox",
"Diaz",
"Richardson",
"Wood",
"Watson",
"Brooks",
"Bennett",
"Gray",
"James",
"Reyes",
"Cruz",
"Hughes",
"Price",
"Myers",
"Long",
"Foster",
"Sanders",
"Ross",
"Morales",
"Powell",
"Sullivan",
"Russell",
"Ortiz",
"Jenkins",
"Gutierrez",
"Perry",
"Butler",
"Barnes",
"Fisher",
"Henderson",
"Coleman",
"Simmons",
"Patterson",
"Jordan",
"Reynolds",
"Hamilton",
"Graham",
"Kim",
"Gonzales",
"Alexander",
"Ramos",
"Wallace",
"Griffin",
"West",
"Cole",
"Hayes",
"Chavez",
"Gibson",
"Bryant",
"Ellis",
"Stevens",
"Murray",
"Ford",
"Marshall",
"Owens",
"Mcdonald",
"Harrison",
"Ruiz",
"Kennedy",
"Wells",
"Alvarez",
"Woods",
"Mendoza",
"Castillo",
"Olson",
"Webb",
"Washington",
"Tucker",
"Freeman",
"Burns",
"Henry",
"Vasquez",
"Snyder",
"Simpson",
"Crawford",
"Jimenez",
"Porter",
"Mason",
"Shaw",
"Gordon",
"Wagner",
"Hunter",
"Romero",
"Hicks",
"Dixon",
"Hunt",
"Palmer",
"Robertson",
"Black",
"Holmes",
"Stone",
"Meyer",
"Boyd",
"Mills",
"Warren",
"Fox",
"Rose",
"Rice",
"Moreno",
"Schmidt",
"Patel",
"Ferguson",
"Nichols",
"Herrera",
"Medina",
"Ryan",
"Fernandez",
"Weaver",
"Daniels",
"Stephens",
"Gardner",
"Payne",
"Kelley",
"Dunn",
"Pierce",
"Arnold",
"Tran",
"Spencer",
"Peters",
"Hawkins",
"Grant",
"Hansen",
"Castro",
"Hoffman",
"Hart",
"Elliott",
"Cunningham",
"Knight",
"Bradley",
"Carroll",
"Hudson",
"Duncan",
"Armstrong",
"Berry",
"Andrews",
"Johnston",
"Ray",
"Lane",
"Riley",
"Carpenter",
"Perkins",
"Aguilar",
"Silva",
"Richards",
"Willis",
"Matthews",
"Chapman",
"Lawrence",
"Garza",
"Vargas",
"Watkins",
"Wheeler",
"Larson",
"Carlson",
"Harper",
"George",
"Greene",
"Burke",
"Guzman",
"Morrison",
"Munoz",
"Jacobs",
"Obrien",
"Lawson",
"Franklin",
"Lynch",
"Bishop",
"Carr",
"Salazar",
"Austin",
"Mendez",
"Gilbert",
"Jensen",
"Williamson",
"Montgomery",
"Harvey",
"Oliver",
"Howell",
"Dean",
"Hanson",
"Weber",
"Garrett",
"Sims",
"Burton",
"Fuller",
"Soto",
"Mccoy",
"Welch",
"Chen",
"Schultz",
"Walters",
"Reid",
"Fields",
"Walsh",
"Little",
"Fowler",
"Bowman",
"Davidson",
"May",
"Day",
"Schneider",
"Newman",
"Brewer",
"Lucas",
"Holland",
"Wong",
"Banks",
"Santos",
"Curtis",
"Pearson",
"Delgado",
"Valdez",
"Pena",
"Rios",
"Douglas",
"Sandoval",
"Barrett",
"Hopkins",
"Keller",
"Guerrero",
"Stanley",
"Bates",
"Alvarado",
"Beck",
"Ortega",
"Wade",
"Estrada",
"Contreras",
"Barnett",
"Caldwell",
"Santiago",
"Lambert",
"Powers",
"Chambers",
"Nunez",
"Craig",
"Leonard",
"Lowe",
"Rhodes",
"Byrd",
"Gregory",
"Shelton",
"Frazier",
"Becker",
"Maldonado",
"Fleming",
"Vega",
"Sutton",
"Cohen",
"Jennings",
"Parks",
"Mcdaniel",
"Watts",
"Barker",
"Norris",
"Vaughn",
"Vazquez",
"Holt",
"Schwartz",
"Steele",
"Benson",
"Neal",
"Dominguez",
"Horton",
"Terry",
"Wolfe",
"Hale",
"Lyons",
"Graves",
"Haynes",
"Miles",
"Park",
"Warner",
"Padilla",
"Bush",
"Thornton",
"Mccarthy",
"Mann",
"Zimmerman",
"Erickson",
"Fletcher",
"Mckinney",
"Page",
"Dawson",
"Joseph",
"Marquez",
"Reeves",
"Klein",
"Espinoza",
"Baldwin",
"Moran",
"Love",
"Robbins",
"Higgins",
"Ball",
"Cortez",
"Le",
"Griffith",
"Bowen",
"Sharp",
"Cummings",
"Ramsey",
"Hardy",
"Swanson",
"Barber",
"Acosta",
"Luna",
"Chandler",
"Blair",
"Daniel",
"Cross",
"Simon",
"Dennis",
"Oconnor",
"Quinn",
"Gross",
"Navarro",
"Moss",
"Fitzgerald",
"Doyle",
"Mclaughlin",
"Rojas",
"Rodgers",
"Stevenson",
"Singh",
"Yang",
"Figueroa",
"Harmon",
"Newton",
"Paul",
"Manning",
"Garner",
"Mcgee",
"Reese",
"Francis",
"Burgess",
"Adkins",
"Goodman",
"Curry",
"Brady",
"Christensen",
"Potter",
"Walton",
"Goodwin",
"Mullins",
"Molina",
"Webster",
"Fischer",
"Campos",
"Avila",
"Sherman",
"Todd",
"Chang",
"Blake",
"Malone",
"Wolf",
"Hodges",
"Juarez",
"Gill",
"Farmer",
"Hines",
"Gallagher",
"Duran",
"Hubbard",
"Cannon",
"Miranda",
"Wang",
"Saunders",
"Tate",
"Mack",
"Hammond",
"Carrillo",
"Townsend",
"Wise",
"Ingram",
"Barton",
"Mejia",
"Ayala",
"Schroeder",
"Hampton",
"Rowe",
"Parsons",
"Frank",
"Waters",
"Strickland",
"Osborne",
"Maxwell",
"Chan",
"Deleon",
"Norman",
"Harrington",
"Casey",
"Patton",
"Logan",
"Bowers",
"Mueller",
"Glover",
"Floyd",
"Hartman",
"Buchanan",
"Cobb",
"French",
"Kramer",
"Mccormick",
"Clarke",
"Tyler",
"Gibbs",
"Moody",
"Conner",
"Sparks",
"Mcguire",
"Leon",
"Bauer",
"Norton",
"Pope",
"Flynn",
"Hogan",
"Robles",
"Salinas",
"Yates",
"Lindsey",
"Lloyd",
"Marsh",
"Mcbride",
"Owen",
"Solis",
"Pham",
"Lang",
"Pratt",
"Lara",
"Brock",
"Ballard",
"Trujillo",
"Shaffer",
"Drake",
"Roman",
"Aguirre",
"Morton",
"Stokes",
"Lamb",
"Pacheco",
"Patrick",
"Cochran",
"Shepherd",
"Cain",
"Burnett",
"Hess",
"Li",
"Cervantes",
"Olsen",
"Briggs",
"Ochoa",
"Cabrera",
"Velasquez",
"Montoya",
"Roth",
"Meyers",
"Cardenas",
"Fuentes",
"Weiss",
"Hoover",
"Wilkins",
"Nicholson",
"Underwood",
"Short",
"Carson",
"Morrow",
"Colon",
"Holloway",
"Summers",
"Bryan",
"Petersen",
"Mckenzie",
"Serrano",
"Wilcox",
"Carey",
"Clayton",
"Poole",
"Calderon",
"Gallegos",
"Greer",
"Rivas",
"Guerra",
"Decker",
"Collier",
"Wall",
"Whitaker",
"Bass",
"Flowers",
"Davenport",
"Conley",
"Houston",
"Huff",
"Copeland",
"Hood",
"Monroe",
"Massey",
"Roberson",
"Combs",
"Franco",
"Larsen",
"Pittman",
"Randall",
"Skinner",
"Wilkinson",
"Kirby",
"Cameron",
"Bridges",
"Anthony",
"Richard",
"Kirk",
"Bruce",
"Singleton",
"Mathis",
"Bradford",
"Boone",
"Abbott",
"Charles",
"Allison",
"Sweeney",
"Atkinson",
"Horn",
"Jefferson",
"Rosales",
"York",
"Christian",
"Phelps",
"Farrell",
"Castaneda",
"Nash",
"Dickerson",
"Bond",
"Wyatt",
"Foley",
"Chase",
"Gates",
"Vincent",
"Mathews",
"Hodge",
"Garrison",
"Trevino",
"Villarreal",
"Heath",
"Dalton",
"Valencia",
"Callahan",
"Hensley",
"Atkins",
"Huffman",
"Roy",
"Boyer",
"Shields",
"Lin",
"Hancock",
"Grimes",
"Glenn",
"Cline",
"Delacruz",
"Camacho",
"Dillon",
"Parrish",
"Oneill",
"Melton",
"Booth",
"Kane",
"Berg",
"Harrell",
"Pitts",
"Savage",
"Wiggins",
"Brennan",
"Salas",
"Marks",
"Russo",
"Sawyer",
"Baxter",
"Golden",
"Hutchinson",
"Liu",
"Walter",
"Mcdowell",
"Wiley",
"Rich",
"Humphrey",
"Johns",
"Koch",
"Suarez",
"Hobbs",
"Beard",
"Gilmore",
"Ibarra",
"Keith",
"Macias",
"Khan",
"Andrade",
"Ware",
"Stephenson",
"Henson",
"Wilkerson",
"Dyer",
"Mcclure",
"Blackwell",
"Mercado",
"Tanner",
"Eaton",
"Clay",
"Barron",
"Beasley",
"Oneal",
"Preston",
"Small",
"Wu",
"Zamora",
"Macdonald",
"Vance",
"Snow",
"Mcclain",
"Stafford",
"Orozco",
"Barry",
"English",
"Shannon",
"Kline",
"Jacobson",
"Woodard",
"Huang",
"Kemp",
"Mosley",
"Prince",
"Merritt",
"Hurst",
"Villanueva",
"Roach",
"Nolan",
"Lam",
"Yoder",
"Mccullough",
"Lester",
"Santana",
"Valenzuela",
"Winters",
"Barrera",
"Leach",
"Orr",
"Berger",
"Mckee",
"Strong",
"Conway",
"Stein",
"Whitehead",
"Bullock",
"Escobar",
"Knox",
"Meadows",
"Solomon",
"Velez",
"Odonnell",
"Kerr",
"Stout",
"Blankenship",
"Browning",
"Kent",
"Lozano",
"Bartlett",
"Pruitt",
"Buck",
"Barr",
"Gaines",
"Durham",
"Gentry",
"Mcintyre",
"Sloan",
"Melendez",
"Rocha",
"Herman",
"Sexton",
"Moon",
"Hendricks",
"Rangel",
"Stark",
"Lowery",
"Hardin",
"Hull",
"Sellers",
"Ellison",
"Calhoun",
"Gillespie",
"Mora",
"Knapp",
"Mccall",
"Morse",
"Dorsey",
"Weeks",
"Nielsen",
"Livingston",
"Leblanc",
"Mclean",
"Bradshaw",
"Glass",
"Middleton",
"Buckley",
"Schaefer",
"Frost",
"Howe",
"House",
"Mcintosh",
"Ho",
"Pennington",
"Reilly",
"Hebert",
"Mcfarland",
"Hickman",
"Noble",
"Spears",
"Conrad",
"Arias",
"Galvan",
"Velazquez",
"Huynh",
"Frederick",
"Randolph",
"Cantu",
"Fitzpatrick",
"Mahoney",
"Peck",
"Villa",
"Michael",
"Donovan",
"Mcconnell",
"Walls",
"Boyle",
"Mayer",
"Zuniga",
"Giles",
"Pineda",
"Pace",
"Hurley",
"Mays",
"Mcmillan",
"Crosby",
"Ayers",
"Case",
"Bentley",
"Shepard",
"Everett",
"Pugh",
"David",
"Mcmahon",
"Dunlap",
"Bender",
"Hahn",
"Harding",
"Acevedo",
"Raymond",
"Blackburn",
"Duffy",
"Landry",
"Dougherty",
"Bautista",
"Shah",
"Potts",
"Arroyo",
"Valentine",
"Meza",
"Gould",
"Vaughan",
"Fry",
"Rush",
"Avery",
"Herring",
"Dodson",
"Clements",
"Sampson",
"Tapia",
"Bean",
"Lynn",
"Crane",
"Farley",
"Cisneros",
"Benton",
"Ashley",
"Mckay",
"Finley",
"Best",
"Blevins",
"Friedman",
"Moses",
"Sosa",
"Blanchard",
"Huber",
"Frye",
"Krueger",
"Bernard",
"Rosario",
"Rubio",
"Mullen",
"Benjamin",
"Haley",
"Chung",
"Moyer",
"Choi",
"Horne",
"Yu",
"Woodward",
"Ali",
"Nixon",
"Hayden",
"Rivers",
"Estes",
"Mccarty",
"Richmond",
"Stuart",
"Maynard",
"Brandt",
"Oconnell",
"Hanna",
"Sanford",
"Sheppard",
"Church",
"Burch",
"Levy",
"Rasmussen",
"Coffey",
"Ponce",
"Faulkner",
"Donaldson",
"Schmitt",
"Novak",
"Costa",
"Montes",
"Booker",
"Cordova",
"Waller",
"Arellano",
"Maddox",
"Mata",
"Bonilla",
"Stanton",
"Compton",
"Kaufman",
"Dudley",
"Mcpherson",
"Beltran",
"Dickson",
"Mccann",
"Villegas",
"Proctor",
"Hester",
"Cantrell",
"Daugherty",
"Cherry",
"Bray",
"Davila",
"Rowland",
"Levine",
"Madden",
"Spence",
"Good",
"Irwin",
"Werner",
"Krause",
"Petty",
"Whitney",
"Baird",
"Hooper",
"Pollard",
"Zavala",
"Jarvis",
"Holden",
"Haas",
"Hendrix",
"Mcgrath",
"Bird",
"Lucero",
"Terrell",
"Riggs",
"Joyce",
"Mercer",
"Rollins",
"Galloway",
"Duke",
"Odom",
"Andersen",
"Downs",
"Hatfield",
"Benitez",
"Archer",
"Huerta",
"Travis",
"Mcneil",
"Hinton",
"Zhang",
"Hays",
"Mayo",
"Fritz",
"Branch",
"Mooney",
"Ewing",
"Ritter",
"Esparza",
"Frey",
"Braun",
"Gay",
"Riddle",
"Haney",
"Kaiser",
"Holder",
"Chaney",
"Mcknight",
"Gamble",
"Vang",
"Cooley",
"Carney",
"Cowan",
"Forbes",
"Ferrell",
"Davies",
"Barajas",
"Shea",
"Osborn",
"Bright",
"Cuevas",
"Bolton",
"Murillo",
"Lutz",
"Duarte",
"Kidd",
"Key",
"Cooke",
"Goff",
"Dejesus",
"Marin",
"Dotson",
"Bonner",
"Cotton",
"Merrill",
"Lindsay",
"Lancaster",
"Mcgowan",
"Felix",
"Salgado",
"Slater",
"Carver",
"Guthrie",
"Holman",
"Fulton",
"Snider",
"Sears",
"Witt",
"Newell",
"Byers",
"Lehman",
"Gorman",
"Costello",
"Donahue",
"Delaney",
"Albert",
"Workman",
"Rosas",
"Springer",
"Justice",
"Kinney",
"Odell",
"Lake",
"Donnelly",
"Law",
"Dailey",
"Guevara",
"Shoemaker",
"Barlow",
"Marino",
"Winter",
"Craft",
"Katz",
"Pickett",
"Espinosa",
"Daly",
"Maloney",
"Goldstein",
"Crowley",
"Vogel",
"Kuhn",
"Pearce",
"Hartley",
"Cleveland",
"Palacios",
"Mcfadden",
"Britt"
];
|
the-stack_0_2061 | import os
import unittest
from smqtk_core.configuration import configuration_test_helper
import numpy
import pytest
from smqtk_classifier import ClassifyDescriptor
from smqtk_classifier.impls.classify_descriptor.classify_index_label_descriptor import ClassifyIndexLabelDescriptor
from tests import TEST_DATA_DIR
class TestClassifyIndexLabelDescriptor(unittest.TestCase):
EXPECTED_LABEL_VEC = [
b'label_1',
b'label_2',
b'negative',
b'label_3',
b'Kitware',
b'label_4',
]
FILEPATH_TEST_LABELS = os.path.join(TEST_DATA_DIR, 'test_labels.txt')
def test_is_usable(self) -> None:
# Should always be available
self.assertTrue(ClassifyIndexLabelDescriptor.is_usable())
def test_impl_findable(self) -> None:
self.assertIn(ClassifyIndexLabelDescriptor,
ClassifyDescriptor.get_impls())
def test_configurable(self) -> None:
c = ClassifyIndexLabelDescriptor(self.FILEPATH_TEST_LABELS)
for inst in configuration_test_helper(c):
assert inst.index_to_label_uri == self.FILEPATH_TEST_LABELS
def test_new(self) -> None:
c = ClassifyIndexLabelDescriptor(self.FILEPATH_TEST_LABELS)
self.assertEqual(c.label_vector, self.EXPECTED_LABEL_VEC)
def test_get_labels(self) -> None:
c = ClassifyIndexLabelDescriptor(self.FILEPATH_TEST_LABELS)
self.assertEqual(c.get_labels(), self.EXPECTED_LABEL_VEC)
def test_configuration(self) -> None:
cfg = ClassifyIndexLabelDescriptor.get_default_config()
self.assertEqual(cfg, {'index_to_label_uri': None})
cfg['index_to_label_uri'] = self.FILEPATH_TEST_LABELS
c = ClassifyIndexLabelDescriptor.from_config(cfg)
self.assertEqual(c.get_config(), cfg)
def test_classify_arrays(self) -> None:
c = ClassifyIndexLabelDescriptor(self.FILEPATH_TEST_LABELS)
c_expected = {
b'label_1': 1,
b'label_2': 2,
b'negative': 3,
b'label_3': 4,
b'Kitware': 5,
b'label_4': 6,
}
a = numpy.array([1, 2, 3, 4, 5, 6])
c_result = list(c._classify_arrays([a]))[0]
self.assertEqual(c_result, c_expected)
def test_classify_arrays_invalid_descriptor_dimensions(self) -> None:
c = ClassifyIndexLabelDescriptor(self.FILEPATH_TEST_LABELS)
# One less
a = numpy.array([1, 2, 3, 4, 5])
with pytest.raises(RuntimeError):
list(c._classify_arrays([a]))
# One more
a = numpy.array([1, 2, 3, 4, 5, 6, 7])
with pytest.raises(RuntimeError):
list(c._classify_arrays([a]))
|
the-stack_0_2064 | import json
import discord
import logging
from pantheon import pantheon
from util.decorator import only_owner
logger = logging.getLogger("Verif")
with open("private/rgapikey") as key:
panth = pantheon.Pantheon("euw1", key.read(), True)
#verified = {"discordId":"summonerId"}
NOT_VERIFIED = "Vous n'êtes vérifié.\nPour le devenir, connectez vous sur le "\
+ "client League of Legends, puis paramètre > code de vérification tier.\n"\
+ "Entrez votre ID discord ({}) puis cliquez sur valider.\n"\
+ "Entrez ensuite /verif {{votre_nom_d'invocateur}}"
VERIFIED = "Vous êtes vérifié !\nNom d'invocateur : {name}\nNiveau : {summonerLevel}"
BAD_CODE = "Erreur : Le code que vous avez rentrez rentrer ne corespond pas à votre"\
+ " id discord, veuillez résayer. Si le problème persiste, "\
+ "essayez de redémarrer votre client"
ICON_URL = "http://ddragon.canisback.com/latest/img/profileicon/{}.png"
def load_verif():
with open("data/summoners", 'r') as fd:
return json.loads(fd.read())
def save_verif(dic):
with open("data/summoners", 'w') as fd:
fd.write(json.dumps(dic))
class CmdVerif:
@only_owner
async def cmd_importverif(self, *args, message, client, **_):
guild = client.get_guild(511938608475930644)
count = 0
members = [member for member in guild.members if "Joueur" in [
role.name for role in member.roles]
]
verified = load_verif()
for member in members:
if str(member.id) not in verified.keys():
logger.info("Verifing " + member.display_name)
try:
summ_data = await panth.getSummonerByName(member.display_name)
except:
await message.channel.send("Impossible de vérifier {}".format(member.display_name))
continue
verified[str(member.id)] = summ_data['id']
count += 1
save_verif(verified)
await message.channel.send("{} membres ont été ajouté".format(count))
async def cmd_verif(self, *args, channel, member, message, **_):
verified = load_verif()
if not args:
if str(member.id) in verified.keys():
data = await panth.getSummoner(verified[str(member.id)])
em = discord.Embed(title="Vérification",
description=VERIFIED.format(**data)
)
em.set_author(name=data['name'], icon_url=ICON_URL.format(data['profileIconId']))
await channel.send(embed=em)
else:
await channel.send(NOT_VERIFIED.format(member.id))
else:
try:
summ_data = await panth.getSummonerByName(" ".join(args))
except:
await channel.send("Impossible de trouver l'invocateur")
return False
try:
code = await panth.getThirdPartyCode(summ_data['id'])
if code != str(member.id):
raise Exception('bad_code')
except:
await channel.send(BAD_CODE)
return False
verified[str(member.id)] = summ_data['id']
save_verif(verified)
await self.cmd_verif(*args, message=message, channel=channel, member=member)
|
the-stack_0_2065 | import functools
from types import FunctionType
def log_request_and_response(func):
"""
Decorator that logs the responses (and the requests they are responses to) returned by any given 'func'. Useful if
you want to log all the responses returned to / requests made by an API wrapper.
"""
@functools.wraps(func)
def wrapper(*args, **kwargs):
response = None
try:
response = func(*args, **kwargs)
except ResponseException as e:
response = e.response # Got a non-20x response
raise # May need to change this to preserve the original traceback in Python 3
finally:
# We still want to log the request/response if we receive an IdResponseException (i.e. and error response)
if response is not None: # Note: failure responses are falsey!
# Attempt to find a logger, and log the request and response
logger = getattr(args[0], 'logger', None)
if logger: # Only exists if the calling class has a logger attribute
logger.info(logger.format.format_request(response.request))
logger.info(logger.format.format_response(response))
return response
return wrapper
class MetaApi(type):
"""
Metaclass for API wrapper classes that allows all requests/responses to be pretty-printed and logged (at 'info'
logging level and above).
"""
def __new__(mcs, class_name, bases, class_dict):
new_class_dict = {}
ancestor = MetaApi.get_furthest_ancestor(bases[0])
for attribute_name, attribute in class_dict.items():
# Log the pretty-printed request and response, if this method represents an API call
if not attribute_name.startswith('__') and isinstance(attribute, FunctionType):
if hasattr(ancestor, attribute_name): # I.e. this method overrides a method in the furthest ancestor
attribute = log_request_and_response(attribute)
new_class_dict[attribute_name] = attribute
return type.__new__(mcs, class_name, bases, new_class_dict)
@classmethod
def get_furthest_ancestor(mcs, base):
"""
Gets the first class in an inheritance hierarchy that has this class as its metaclass.
"""
ancestor = base
while getattr(base.__base__, '__metaclass__', None) == mcs:
ancestor = base.__base__
return ancestor
class ResponseException(Exception):
"""
Thrown when an error response is received from an API.
"""
def __init__(self, message, response):
"""
:param message: Message for the exception
:param response HTTP response object
"""
super(Exception, self).__init__(message)
self.status_code = response.status_code
self.error_code = int(response.headers.get('X-Serato-ErrorCode') or 0)
self.response = response
|
the-stack_0_2067 | import tensorflow as tf
from tensorflow.python import debug
import constants as const
import utils
import os
import models
import exports
from time import time, sleep
from os import path
import random
from tensorflow.python.client import timeline
import inputs
import keras
import keras.backend as K
import keras.layers as KL
import keras.engine as KE
import keras.models as KM
from ipdb import set_trace as st
class SessionOperator(object):
def __init__(self):
if not const.eager:
config = tf.ConfigProto()
if const.DEBUG_PLACEMENT:
config.log_device_placement = True
self.sess = tf.Session(config=config)
K.set_session(self.sess)
self.run = self.sess.run
else:
self.sess = None
def save(self):
utils.utils.nyi()
def load(self):
return 0
def setup(self):
T1 = time()
print('finished graph creation in %f seconds' % (time() - const.T0))
if not const.eager:
self.run(tf.global_variables_initializer())
self.run(tf.local_variables_initializer())
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord, sess=self.sess)
#must come after the queue runners
if const.DEBUG_NAN:
self.sess = debug.LocalCLIDebugWrapperSession(self.sess)
self.sess.add_tensor_filter("has_inf_or_nan", debug.has_inf_or_nan)
self.step = self.load()
#it's in another graph
# if const.generate_views: #not sure why this is necessary....
# #self.run(tf.variables_initializer(inputs.foo_counters))
# self.run(inputs.foo_counters)
if not const.eager:
tf.get_default_graph().finalize()
print('finished graph initialization in %f seconds' % (time() - T1))
def go(self, mode):
self.setup()
if mode == 'train':
self.train()
elif mode == 'test':
#tf.logging.set_verbosity(tf.logging.FATAL)
#prevents end of iterator error print outs
self.test()
def test(self):
utils.utils.nyi()
def train(self):
utils.utils.nyi()
class ModelOperator(SessionOperator):
def __init__(self, model,
savename=None, loadname=None,
vis=None, tb=None, evaluator=None):
self.model = model
self.savename = savename
self.loadname = loadname
self.vis = vis
self.tb = tb
self.evaluator = evaluator
# self.run_metadata = tf.RunMetadata()
super(ModelOperator, self).__init__()
def load(self):
if not self.loadname:
return 0
else:
return self.model.load(self.sess, self.loadname)
def save(self):
if not self.savename:
return
self.model.save(self.sess, self.savename, self.step)
def fd_for_mode(self, mode):
input_collection_to_number = {'train': 0, 'val': 1, 'test': 2}
data_name = self.model.get_data_name(mode)
fd = {self.model.data_selector: input_collection_to_number[data_name]}
if self.model.data_selector is None:
return {}
else:
return fd
def run_steps(self, modes, same_batch = True):
if const.DEBUG_SPEED:
print('====')
print('running', modes)
t0 = time()
stuff = []
for mode in modes:
if const.SKIP_RUN:
print('skipping run')
continue
if const.DEBUG_SPEED:
print('running mode:', mode)
stuff_ = self.model.run(mode, self.sess,self.kl_coeff)
stuff.append(stuff_)
if const.DEBUG_SPEED:
t1 = time()
print('time: %f' % (t1 - t0))
print('====')
return stuff
def train(self):
print('STARTING TRAIN')
self.kl_coeff = 0.0
if const.DEBUG_MEMORY:
#need to write to log, since leak means process would be killed
utils.utils.ensure('memory_log')
f = open('memory_log/%s.log' % const.exp_name, 'w')
for step in range(self.step, const.NB_STEPS):
self.step = step
print('step %d' % step)
if const.DEBUG_MEMORY:
m = utils.utils.memory_consumption()
print('memory consumption is', m)
f.write(str(m)+'\n')
f.flush()
os.fsync(f.fileno())
if not(step % const.savep) and step != 0:
print('saving')
self.save()
if step % 5000 == 0:
self.kl_coeff = step / (float(100 + 1) * float(625))
if self.kl_coeff >= 0.6:
self.kl_coeff = 0.6
print('kl penalty coefficient: ', self.kl_coeff, 'alpha upperbound:', 0.6)
a = time()
self.train_step(step)
print("time taken ",time()-a)
if not(step % const.valp):
self.val_step(step)
def test(self):
step = 0
#while 1:
self.kl_coeff = 0.0
for _ in range(10000):
step += 1
if not self.test_step(step):
break
print('test step %d' % step)
if self.evaluator:
self.evaluator.finish()
def train_step(self, step):
utils.utils.nyi()
def val_step(self, step):
utils.utils.nyi()
def test_step(self, step):
utils.utils.nyi()
class ModalOperator(ModelOperator):
def __init__(self, model, train_modes, val_modes, test_modes,
savename=None, loadname=None,
vis=None, tb=None, evaluator=None):
if not isinstance(train_modes, list):
train_modes = [train_modes]
if not isinstance(val_modes, list):
val_modes = [val_modes]
if not isinstance(test_modes, list):
test_modes = [test_modes]
self.train_modes = train_modes
self.val_modes = val_modes
self.test_modes = test_modes
super(ModalOperator, self).__init__(
model, savename=savename, loadname=loadname, vis=vis, tb=tb, evaluator=evaluator
)
if const.DEBUG_FULL_TRACE:
self.graph_writer = tf.summary.FileWriter(path.join(const.tb_dir, 'graph'),
self.sess.graph)
def train_step(self, step):
train_stuffs = self.run_steps(self.train_modes, same_batch = True)
# st()
if const.SKIP_EXPORT or const.SKIP_TRAIN_EXPORT:
print('skipping exports')
return
if const.DEBUG_SPEED:
print('processing outputs')
for mode, train_stuff in zip(self.train_modes, train_stuffs):
if not train_stuff:
continue
if 'summary' in train_stuff:
self.tb.process(train_stuff['summary'], mode, step)
def val_step(self, step):
val_stuffs = self.run_steps(self.val_modes, same_batch = False)
if const.SKIP_EXPORT or const.SKIP_VAL_EXPORT:
print('skipping exports')
return
if const.DEBUG_SPEED:
print('processing outputs')
for mode, val_stuff in zip(self.val_modes, val_stuffs):
if not val_stuff:
return
if 'vis' in val_stuff and self.vis:
self.vis.process(val_stuff['vis'], mode, step)
if 'summary' in val_stuff and self.tb:
self.tb.process(val_stuff['summary'], mode, step)
def test_step(self, step):
assert len(self.test_modes) == 1, "can't have multiple test modes"
# st()
try:
test_stuff = self.run_steps(self.test_modes)[0]
except tf.errors.OutOfRangeError:
return False
if 'evaluator' in test_stuff and self.evaluator:
self.evaluator.process(test_stuff['evaluator'], None, None)
if 'vis' in test_stuff and self.vis:
self.vis.process(test_stuff['vis'], self.test_modes[0], step)
if 'summary' in test_stuff and self.tb:
self.tb.process(test_stuff['summary'], self.test_modes[0], step)
return True
class GenerateViews(ModalOperator):
def test_step(self, step):
try:
test_stuffs = self.run_steps(self.test_modes)
except tf.errors.OutOfRangeError:
return False
visualizations = [test_stuff['vis']['pred_views'][0] for test_stuff in test_stuffs]
self.vis.process(test_stuffs[0]['vis'], self.test_modes[0], step)
self.vis.process({'gen_views': visualizations}, self.test_modes[0], step)
if False: #plot immediately
#just for visualization purposes
def chunks(l, n):
"""Yield successive n-sized chunks from l."""
for i in range(0, len(l), n):
yield l[i:i + n]
import numpy as np
row_size = const.AZIMUTH_GRANULARITY if (const.ELEV_GRANULARITY > 1) else 12
rows = list(chunks(visualizations, row_size))
rows = [np.concatenate(row, axis = 1) for row in rows]
total = np.concatenate(rows, axis = 0)
import matplotlib.pyplot as plt
plt.imshow(total)
plt.show()
return True
|
the-stack_0_2068 | """Helpers that help with state related things."""
import json
import logging
from collections import defaultdict
import homeassistant.util.dt as dt_util
from homeassistant.components.media_player import (
ATTR_MEDIA_CONTENT_ID, ATTR_MEDIA_CONTENT_TYPE, ATTR_MEDIA_SEEK_POSITION,
ATTR_MEDIA_VOLUME_LEVEL, ATTR_MEDIA_VOLUME_MUTED, SERVICE_PLAY_MEDIA,
SERVICE_SELECT_SOURCE, ATTR_INPUT_SOURCE)
from homeassistant.components.notify import (
ATTR_MESSAGE, SERVICE_NOTIFY)
from homeassistant.components.sun import (
STATE_ABOVE_HORIZON, STATE_BELOW_HORIZON)
from homeassistant.components.thermostat import (
ATTR_AWAY_MODE, ATTR_FAN, SERVICE_SET_AWAY_MODE, SERVICE_SET_FAN_MODE,
SERVICE_SET_TEMPERATURE)
from homeassistant.const import (
ATTR_ENTITY_ID, ATTR_TEMPERATURE, SERVICE_ALARM_ARM_AWAY,
SERVICE_ALARM_ARM_HOME, SERVICE_ALARM_DISARM, SERVICE_ALARM_TRIGGER,
SERVICE_CLOSE, SERVICE_LOCK, SERVICE_MEDIA_PAUSE, SERVICE_MEDIA_PLAY,
SERVICE_MEDIA_SEEK, SERVICE_MOVE_DOWN, SERVICE_MOVE_UP, SERVICE_OPEN,
SERVICE_TURN_OFF, SERVICE_TURN_ON, SERVICE_UNLOCK, SERVICE_VOLUME_MUTE,
SERVICE_VOLUME_SET, STATE_ALARM_ARMED_AWAY, STATE_ALARM_ARMED_HOME,
STATE_ALARM_DISARMED, STATE_ALARM_TRIGGERED, STATE_CLOSED, STATE_LOCKED,
STATE_OFF, STATE_ON, STATE_OPEN, STATE_PAUSED, STATE_PLAYING,
STATE_UNKNOWN, STATE_UNLOCKED)
from homeassistant.core import State
_LOGGER = logging.getLogger(__name__)
GROUP_DOMAIN = 'group'
HASS_DOMAIN = 'homeassistant'
# Update this dict of lists when new services are added to HA.
# Each item is a service with a list of required attributes.
SERVICE_ATTRIBUTES = {
SERVICE_PLAY_MEDIA: [ATTR_MEDIA_CONTENT_TYPE, ATTR_MEDIA_CONTENT_ID],
SERVICE_MEDIA_SEEK: [ATTR_MEDIA_SEEK_POSITION],
SERVICE_VOLUME_MUTE: [ATTR_MEDIA_VOLUME_MUTED],
SERVICE_VOLUME_SET: [ATTR_MEDIA_VOLUME_LEVEL],
SERVICE_NOTIFY: [ATTR_MESSAGE],
SERVICE_SET_AWAY_MODE: [ATTR_AWAY_MODE],
SERVICE_SET_FAN_MODE: [ATTR_FAN],
SERVICE_SET_TEMPERATURE: [ATTR_TEMPERATURE],
SERVICE_SELECT_SOURCE: [ATTR_INPUT_SOURCE],
}
# Update this dict when new services are added to HA.
# Each item is a service with a corresponding state.
SERVICE_TO_STATE = {
SERVICE_TURN_ON: STATE_ON,
SERVICE_TURN_OFF: STATE_OFF,
SERVICE_MEDIA_PLAY: STATE_PLAYING,
SERVICE_MEDIA_PAUSE: STATE_PAUSED,
SERVICE_ALARM_ARM_AWAY: STATE_ALARM_ARMED_AWAY,
SERVICE_ALARM_ARM_HOME: STATE_ALARM_ARMED_HOME,
SERVICE_ALARM_DISARM: STATE_ALARM_DISARMED,
SERVICE_ALARM_TRIGGER: STATE_ALARM_TRIGGERED,
SERVICE_LOCK: STATE_LOCKED,
SERVICE_UNLOCK: STATE_UNLOCKED,
SERVICE_CLOSE: STATE_CLOSED,
SERVICE_OPEN: STATE_OPEN,
SERVICE_MOVE_UP: STATE_OPEN,
SERVICE_MOVE_DOWN: STATE_CLOSED,
}
# pylint: disable=too-few-public-methods, attribute-defined-outside-init
class TrackStates(object):
"""
Record the time when the with-block is entered.
Add all states that have changed since the start time to the return list
when with-block is exited.
"""
def __init__(self, hass):
"""Initialize a TrackStates block."""
self.hass = hass
self.states = []
def __enter__(self):
"""Record time from which to track changes."""
self.now = dt_util.utcnow()
return self.states
def __exit__(self, exc_type, exc_value, traceback):
"""Add changes states to changes list."""
self.states.extend(get_changed_since(self.hass.states.all(), self.now))
def get_changed_since(states, utc_point_in_time):
"""Return list of states that have been changed since utc_point_in_time."""
return [state for state in states
if state.last_updated >= utc_point_in_time]
def reproduce_state(hass, states, blocking=False):
"""Reproduce given state."""
if isinstance(states, State):
states = [states]
to_call = defaultdict(list)
for state in states:
if hass.states.get(state.entity_id) is None:
_LOGGER.warning('reproduce_state: Unable to find entity %s',
state.entity_id)
continue
if state.domain == GROUP_DOMAIN:
service_domain = HASS_DOMAIN
else:
service_domain = state.domain
domain_services = hass.services.services[service_domain]
service = None
for _service in domain_services.keys():
if (_service in SERVICE_ATTRIBUTES and
all(attr in state.attributes
for attr in SERVICE_ATTRIBUTES[_service]) or
_service in SERVICE_TO_STATE and
SERVICE_TO_STATE[_service] == state.state):
service = _service
if (_service in SERVICE_TO_STATE and
SERVICE_TO_STATE[_service] == state.state):
break
if not service:
_LOGGER.warning("reproduce_state: Unable to reproduce state %s",
state)
continue
# We group service calls for entities by service call
# json used to create a hashable version of dict with maybe lists in it
key = (service_domain, service,
json.dumps(dict(state.attributes), sort_keys=True))
to_call[key].append(state.entity_id)
for (service_domain, service, service_data), entity_ids in to_call.items():
data = json.loads(service_data)
data[ATTR_ENTITY_ID] = entity_ids
hass.services.call(service_domain, service, data, blocking)
def state_as_number(state):
"""
Try to coerce our state to a number.
Raises ValueError if this is not possible.
"""
if state.state in (STATE_ON, STATE_LOCKED, STATE_ABOVE_HORIZON,
STATE_OPEN):
return 1
elif state.state in (STATE_OFF, STATE_UNLOCKED, STATE_UNKNOWN,
STATE_BELOW_HORIZON, STATE_CLOSED):
return 0
return float(state.state)
|
the-stack_0_2069 | from classes.requester import Requester
from classes.specializedMatchers import MD5Matcher, StringMatcher, RegexMatcher, HeaderMatcher
from collections import Counter
class CMSReq(Requester):
def __init__(self, host, cache, results):
super().__init__(host, cache, results)
self.category = "CMS"
self.match_class = None
def prepare_results(self, matches):
data = []
weight_dict = Counter()
# calulate the total weights for urls in the matches
for m in matches:
url = m['response'].url
weight = m['weight'] if 'weight' in m else 1
weight_dict[url] += weight
# apply the weights just calculated
for m in matches:
url = m['response'].url
version = m['output']
weight = weight_dict[url]
m['count'] = weight
data.append( {'url': url, 'count': weight, 'version': version} )
return data
def run(self):
# make requests
requested = self.request_uniq()
# find matches
matcher = self.match_class(requested)
matches = matcher.get_matches()
# add to results
intermediate_results = self.prepare_results(matches)
self.add_results(intermediate_results)
class CMSReqMD5(CMSReq):
def __init__(self, host, cache, results):
super().__init__(host, cache, results)
self.match_class = MD5Matcher
self.use_weights = True
class CMSReqString(CMSReq):
def __init__(self, host, cache, results):
super().__init__(host, cache, results)
self.match_class = StringMatcher
class CMSReqRegex(CMSReq):
def __init__(self, host, cache, results):
super().__init__(host, cache, results)
self.match_class = RegexMatcher
class CMSReqHeader(CMSReq):
def __init__(self, host, cache, results):
super().__init__(host, cache, results)
self.match_class = HeaderMatcher
|
the-stack_0_2070 | import _plotly_utils.basevalidators
class ShowticklabelsValidator(_plotly_utils.basevalidators.BooleanValidator):
def __init__(
self,
plotly_name='showticklabels',
parent_name='choropleth.colorbar',
**kwargs
):
super(ShowticklabelsValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop('edit_type', 'colorbars'),
role=kwargs.pop('role', 'style'),
**kwargs
)
|
the-stack_0_2071 | # -*- coding:utf-8 -*-
# ------------------------
# written by Songjian Chen
# 2019-02
# ------------------------
from scipy.ndimage.filters import gaussian_filter
import scipy
import math
import numpy as np
#this is borrowed from https://github.com/davideverona/deep-crowd-counting_crowdnet
def gaussian_filter_density(gt):
density = np.zeros(gt.shape, dtype=np.float32)
gt_count = np.count_nonzero(gt)
if gt_count == 0:
return density
pts = np.array(list(zip(np.nonzero(gt)[1], np.nonzero(gt)[0])))
leafsize = 2048
# build kdtree
tree = scipy.spatial.KDTree(pts.copy(), leafsize=leafsize)
# query kdtree
distances, locations = tree.query(pts, k=4)
print('generate density...')
num = pts.shape[0] - 1
for i, pt in enumerate(pts):
pt2d = np.zeros(gt.shape, dtype=np.float32)
pt2d[math.floor(pt[1]), math.floor(pt[0])] = 1.
if gt_count > 1:
sigma = (distances[i][1]+distances[i][2]+distances[i][3])*0.1
else:
sigma = np.average(np.array(gt.shape))/2./2. #case: 1 point
density += scipy.ndimage.filters.gaussian_filter(pt2d, sigma, mode='constant')
print('done.')
return density |
the-stack_0_2074 | import os
import sys
import random
import math
import numpy as np
import skimage.io
import matplotlib
import cv2
import matplotlib.pyplot as plt
# Root directory of the project
ROOT_DIR = os.path.abspath("../")
# Import Mask RCNN
sys.path.append(ROOT_DIR) # To find local version of the library
from mrcnn import utils
import mrcnn.model as modellib
from mrcnn import visualize
# Import COCO config
sys.path.append(os.path.join(ROOT_DIR, "samples/coco/")) # To find local version
import coco
# Directory to save logs and trained model
MODEL_DIR = os.path.join(ROOT_DIR, "logs")
# Local path to trained weights file
COCO_MODEL_PATH = os.path.join(ROOT_DIR, "mask_rcnn_coco.h5")
# Download COCO trained weights from Releases if needed
if not os.path.exists(COCO_MODEL_PATH):
utils.download_trained_weights(COCO_MODEL_PATH)
class InferenceConfig(coco.CocoConfig):
# Set batch size to 1 since we'll be running inference on
# one image at a time. Batch size = GPU_COUNT * IMAGES_PER_GPU
GPU_COUNT = 1
IMAGES_PER_GPU = 1
# COCO Class names
# Index of the class in the list is its ID. For example, to get ID of
# the teddy bear class, use: class_names.index('teddy bear')
class_names = ['BG', 'person', 'bicycle', 'car', 'motorcycle', 'airplane',
'bus', 'train', 'truck', 'boat', 'traffic light',
'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird',
'cat', 'dog', 'horse', 'sheep', 'cow', 'elephant', 'bear',
'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie',
'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball',
'kite', 'baseball bat', 'baseball glove', 'skateboard',
'surfboard', 'tennis racket', 'bottle', 'wine glass', 'cup',
'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple',
'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza',
'donut', 'cake', 'chair', 'couch', 'potted plant', 'bed',
'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',
'keyboard', 'cell phone', 'microwave', 'oven', 'toaster',
'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors',
'teddy bear', 'hair drier', 'toothbrush']
def write_seg(Dir, Count, R):
seg = np.zeros(R['masks'].shape[:2])
movable_objects = [1,2,3,4,6,8]
for objec_idx in range(R['class_ids'].shape[0]):
if R['class_ids'][objec_idx] in movable_objects:
seg = np.where(np.invert(R['masks'][...,objec_idx]), seg, R['class_ids'][objec_idx])
if not os.path.isdir(Dir):
os.mkdir(Dir)
cv2.imwrite(os.path.join(Dir, "%06d.png"%Count), seg)
# Load a random image from the images folder
def run_folder(file_names, model):
for f in file_names:
if os.path.isfile(BASE_DIR + "/rcnnseg_" + Folder + "/" + f):
print(f + "continue")
continue
if not os.path.splitext(f)[-1] == ".png":
continue
image = skimage.io.imread(os.path.join(IMAGE_DIR, f))
# Run detection
results = model.detect([image], verbose=1)
# Visualize results
r = results[0]
visualize.display_instances(image, r['rois'], r['masks'], r['class_ids'],
class_names, r['scores'], save_path = BASE_DIR + "/mrcnn_" + Folder + "/" + f)
write_seg(BASE_DIR + "/rcnnseg_" + Folder, int(os.path.splitext(f)[0]), r)
config = InferenceConfig()
config.display()
# Create model object in inference mode.
model = modellib.MaskRCNN(mode="inference", model_dir=MODEL_DIR, config=config)
# Load weights trained on MS-COCO
model.load_weights(COCO_MODEL_PATH, by_name=True)
BASE_DIR = "/data/shibuya_640_360_fov45_few_people_bags/2020-08-29-03-56-21"
Folder = "image_0"
IMAGE_DIR = os.path.join(BASE_DIR, Folder)
file_names = next(os.walk(IMAGE_DIR))[2]
file_names.sort()
if not os.path.isdir(BASE_DIR + "/mrcnn_" + Folder):
os.mkdir(BASE_DIR + "/mrcnn_" + Folder)
run_folder(file_names, model)
Folder = "image_1"
IMAGE_DIR = os.path.join(BASE_DIR, Folder)
file_names = next(os.walk(IMAGE_DIR))[2]
if not os.path.isdir(BASE_DIR + "/mrcnn_" + Folder):
os.mkdir(BASE_DIR + "/mrcnn_" + Folder)
run_folder(file_names, model)
|
the-stack_0_2075 | import psycopg2
import psycopg2.extras
from website_monitor.stats import Stats
from website_monitor.url_probe import UrlProbe
class Repository:
"""
The URL probe repository.
Implements the repository pattern to hide the database interaction details.
"""
def __init__(self, connection_string) -> None:
self.connection_string = connection_string
def setup(self):
with psycopg2.connect(self.connection_string) as conn:
with conn.cursor() as cursor:
cursor.execute(
"""
create table if not exists url_probes(
id bigserial primary key,
url text not null,
timestamp timestamp not null,
http_status_code int not null,
response_time_ms int not null
);
"""
)
def delete_all(self):
with psycopg2.connect(self.connection_string) as conn:
with conn.cursor() as cursor:
cursor.execute("truncate table url_probes;")
def find_all(self) -> list[UrlProbe]:
with psycopg2.connect(self.connection_string) as conn:
with conn.cursor() as cursor:
cursor.execute(
"select url, timestamp, http_status_code, response_time_ms from url_probes;"
)
return list(map(UrlProbe._make, cursor.fetchall()))
def save(self, url_probes: list[UrlProbe]):
with psycopg2.connect(self.connection_string) as conn:
with conn.cursor() as cursor:
psycopg2.extras.execute_values(
cursor,
"insert into url_probes(url, timestamp, http_status_code, response_time_ms) values %s",
[
(up.url, up.timestamp, up.http_status_code, up.response_time_ms)
for up in url_probes
],
)
def get_stats(self) -> list[Stats]:
with psycopg2.connect(self.connection_string) as conn:
with conn.cursor() as cursor:
cursor.execute(
"""
select url,
count(*) as probes,
percentile_cont(0.5) within group (order by url_probes.response_time_ms) as p50_ms,
percentile_cont(0.95) within group (order by url_probes.response_time_ms) as p95_ms,
percentile_cont(0.99) within group (order by url_probes.response_time_ms) as p99_ms
from url_probes
group by url;
"""
)
return list(map(Stats._make, cursor.fetchall()))
|
the-stack_0_2076 | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=g-classes-have-attributes
"""Keras layers that implement explicit (approximate) kernel feature maps."""
import tensorflow.compat.v2 as tf
import numpy as np
from keras import initializers
from keras.engine import base_layer
from keras.engine import input_spec
from tensorflow.python.util.tf_export import keras_export
_SUPPORTED_RBF_KERNEL_TYPES = ['gaussian', 'laplacian']
@keras_export('keras.layers.experimental.RandomFourierFeatures')
class RandomFourierFeatures(base_layer.Layer):
r"""Layer that projects its inputs into a random feature space.
This layer implements a mapping from input space to a space with `output_dim`
dimensions, which approximates shift-invariant kernels. A kernel function
`K(x, y)` is shift-invariant if `K(x, y) == k(x - y)` for some function `k`.
Many popular Radial Basis Functions (RBF), including Gaussian and
Laplacian kernels, are shift-invariant.
The implementation of this layer is based on the following paper:
["Random Features for Large-Scale Kernel Machines"](
https://people.eecs.berkeley.edu/~brecht/papers/07.rah.rec.nips.pdf)
by Ali Rahimi and Ben Recht.
The distribution from which the parameters of the random features map (layer)
are sampled determines which shift-invariant kernel the layer approximates
(see paper for more details). You can use the distribution of your
choice. The layer supports out-of-the-box
approximations of the following two RBF kernels:
- Gaussian: `K(x, y) == exp(- square(x - y) / (2 * square(scale)))`
- Laplacian: `K(x, y) = exp(-abs(x - y) / scale))`
**Note:** Unlike what is described in the paper and unlike what is used in
the Scikit-Learn implementation, the output of this layer does not apply
the `sqrt(2 / D)` normalization factor.
**Usage:** Typically, this layer is used to "kernelize" linear models by
applying a non-linear transformation (this layer) to the input features and
then training a linear model on top of the transformed features. Depending on
the loss function of the linear model, the composition of this layer and the
linear model results to models that are equivalent (up to approximation) to
kernel SVMs (for hinge loss), kernel logistic regression (for logistic loss),
kernel linear regression (for squared loss), etc.
Examples:
A kernel multinomial logistic regression model with Gaussian kernel for MNIST:
```python
model = keras.Sequential([
keras.Input(shape=(784,)),
RandomFourierFeatures(
output_dim=4096,
scale=10.,
kernel_initializer='gaussian'),
layers.Dense(units=10, activation='softmax'),
])
model.compile(
optimizer='adam',
loss='categorical_crossentropy',
metrics=['categorical_accuracy']
)
```
A quasi-SVM classifier for MNIST:
```python
model = keras.Sequential([
keras.Input(shape=(784,)),
RandomFourierFeatures(
output_dim=4096,
scale=10.,
kernel_initializer='gaussian'),
layers.Dense(units=10),
])
model.compile(
optimizer='adam',
loss='hinge',
metrics=['categorical_accuracy']
)
```
To use another kernel, just replace the layer creation line with:
```python
random_features_layer = RandomFourierFeatures(
output_dim=500,
kernel_initializer=<my_initializer>,
scale=...,
...)
```
Args:
output_dim: Positive integer, the dimension of the layer's output, i.e., the
number of random features used to approximate the kernel.
kernel_initializer: Determines the distribution of the parameters of the
random features map (and therefore the kernel approximated by the layer).
It can be either a string identifier or a Keras `Initializer` instance.
Currently only 'gaussian' and 'laplacian' are supported string
identifiers (case insensitive). Note that the kernel matrix is not
trainable.
scale: For Gaussian and Laplacian kernels, this corresponds to a scaling
factor of the corresponding kernel approximated by the layer (see concrete
definitions above). When provided, it should be a positive float. If None,
a default value is used: if the kernel initializer is set to "gaussian",
`scale` defaults to `sqrt(input_dim / 2)`, otherwise, it defaults to 1.0.
Both the approximation error of the kernel and the classification quality
are sensitive to this parameter. If `trainable` is set to `True`, this
parameter is learned end-to-end during training and the provided value
serves as the initial value.
**Note:** When features from this layer are fed to a linear model,
by making `scale` trainable, the resulting optimization problem is
no longer convex (even if the loss function used by the linear model
is convex).
trainable: Whether the scaling parameter of the layer should be trainable.
Defaults to `False`.
name: String, name to use for this layer.
"""
def __init__(self,
output_dim,
kernel_initializer='gaussian',
scale=None,
trainable=False,
name=None,
**kwargs):
if output_dim <= 0:
raise ValueError(
f'`output_dim` should be a positive integer. Received: {output_dim}')
if isinstance(kernel_initializer, str):
if kernel_initializer.lower() not in _SUPPORTED_RBF_KERNEL_TYPES:
raise ValueError(
f'Unsupported `kernel_initializer`: {kernel_initializer} '
f'Expected one of: {_SUPPORTED_RBF_KERNEL_TYPES}')
if scale is not None and scale <= 0.0:
raise ValueError('When provided, `scale` should be a positive float. '
f'Received: {scale}')
super(RandomFourierFeatures, self).__init__(
trainable=trainable, name=name, **kwargs)
self.output_dim = output_dim
self.kernel_initializer = kernel_initializer
self.scale = scale
def build(self, input_shape):
input_shape = tf.TensorShape(input_shape)
# TODO(pmol): Allow higher dimension inputs. Currently the input is expected
# to have shape [batch_size, dimension].
if input_shape.rank != 2:
raise ValueError(
'The rank of the input tensor should be 2. '
f'Received input with rank {input_shape.ndims} instead. '
f'Full input shape received: {input_shape}')
if input_shape.dims[1].value is None:
raise ValueError(
'The last dimension of the input tensor should be defined. '
f'Found `None`. Full input shape received: {input_shape}')
self.input_spec = input_spec.InputSpec(
ndim=2, axes={1: input_shape.dims[1].value})
input_dim = input_shape.dims[1].value
kernel_initializer = _get_random_features_initializer(
self.kernel_initializer, shape=(input_dim, self.output_dim))
self.unscaled_kernel = self.add_weight(
name='unscaled_kernel',
shape=(input_dim, self.output_dim),
dtype=tf.float32,
initializer=kernel_initializer,
trainable=False)
self.bias = self.add_weight(
name='bias',
shape=(self.output_dim,),
dtype=tf.float32,
initializer=initializers.RandomUniform(minval=0.0, maxval=2 * np.pi),
trainable=False)
if self.scale is None:
self.scale = _get_default_scale(self.kernel_initializer, input_dim)
self.kernel_scale = self.add_weight(
name='kernel_scale',
shape=(1,),
dtype=tf.float32,
initializer=tf.compat.v1.constant_initializer(self.scale),
trainable=True,
constraint='NonNeg')
super(RandomFourierFeatures, self).build(input_shape)
def call(self, inputs):
inputs = tf.convert_to_tensor(inputs, dtype=self.dtype)
inputs = tf.cast(inputs, tf.float32)
kernel = (1.0 / self.kernel_scale) * self.unscaled_kernel
outputs = tf.raw_ops.MatMul(a=inputs, b=kernel)
outputs = tf.nn.bias_add(outputs, self.bias)
return tf.cos(outputs)
def compute_output_shape(self, input_shape):
input_shape = tf.TensorShape(input_shape)
input_shape = input_shape.with_rank(2)
if input_shape.dims[-1].value is None:
raise ValueError(
'The last dimension of the input tensor should be defined. '
f'Found `None`. Full input shape received: {input_shape}')
return input_shape[:-1].concatenate(self.output_dim)
def get_config(self):
kernel_initializer = self.kernel_initializer
if not isinstance(kernel_initializer, str):
kernel_initializer = initializers.serialize(kernel_initializer)
config = {
'output_dim': self.output_dim,
'kernel_initializer': kernel_initializer,
'scale': self.scale,
}
base_config = super(RandomFourierFeatures, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def _get_random_features_initializer(initializer, shape):
"""Returns Initializer object for random features."""
def _get_cauchy_samples(loc, scale, shape):
probs = np.random.uniform(low=0., high=1., size=shape)
return loc + scale * np.tan(np.pi * (probs - 0.5))
random_features_initializer = initializer
if isinstance(initializer, str):
if initializer.lower() == 'gaussian':
random_features_initializer = initializers.RandomNormal(stddev=1.0)
elif initializer.lower() == 'laplacian':
random_features_initializer = initializers.Constant(
_get_cauchy_samples(loc=0.0, scale=1.0, shape=shape))
else:
raise ValueError(
f'Unsupported `kernel_initializer`: "{initializer}" '
f'Expected one of: {_SUPPORTED_RBF_KERNEL_TYPES}')
return random_features_initializer
def _get_default_scale(initializer, input_dim):
if (isinstance(initializer, str) and
initializer.lower() == 'gaussian'):
return np.sqrt(input_dim / 2.0)
return 1.0
|
the-stack_0_2079 | from plotly.basedatatypes import BaseLayoutHierarchyType as _BaseLayoutHierarchyType
import copy as _copy
class Font(_BaseLayoutHierarchyType):
# class properties
# --------------------
_parent_path_str = "layout.slider"
_path_str = "layout.slider.font"
_valid_props = {"color", "family", "size"}
# color
# -----
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# family
# ------
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser will only be able to apply a font if
it is available on the system which it operates. Provide
multiple font families, separated by commas, to indicate the
preference in which to apply fonts if they aren't available on
the system. The Chart Studio Cloud (at https://chart-
studio.plotly.com or on-premise) generates images on a server,
where only a select number of fonts are installed and
supported. These include "Arial", "Balto", "Courier New",
"Droid Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
The 'family' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
# size
# ----
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
Returns
-------
int|float
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The Chart
Studio Cloud (at https://chart-studio.plotly.com or on-
premise) generates images on a server, where only a
select number of fonts are installed and supported.
These include "Arial", "Balto", "Courier New", "Droid
Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT
Sans Narrow", "Raleway", "Times New Roman".
size
"""
def __init__(self, arg=None, color=None, family=None, size=None, **kwargs):
"""
Construct a new Font object
Sets the font of the slider step labels.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.layout.slider.Font`
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The Chart
Studio Cloud (at https://chart-studio.plotly.com or on-
premise) generates images on a server, where only a
select number of fonts are installed and supported.
These include "Arial", "Balto", "Courier New", "Droid
Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT
Sans Narrow", "Raleway", "Times New Roman".
size
Returns
-------
Font
"""
super(Font, self).__init__("font")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.layout.slider.Font
constructor must be a dict or
an instance of :class:`plotly.graph_objs.layout.slider.Font`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("color", None)
_v = color if color is not None else _v
if _v is not None:
self["color"] = _v
_v = arg.pop("family", None)
_v = family if family is not None else _v
if _v is not None:
self["family"] = _v
_v = arg.pop("size", None)
_v = size if size is not None else _v
if _v is not None:
self["size"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
|
the-stack_0_2082 | """
User input utilities
"""
# Author: Ben Gravell
def yes_or_no(question):
reply = str(input(question+' (y/n): ')).lower().strip()
if reply[0] == 'y':
return True
elif reply[0] == 'n':
return False
else:
return yes_or_no("Invalid input... please enter ") |
the-stack_0_2083 | from __future__ import print_function
import argparse
import torch.multiprocessing as mp
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
import torch.utils.data.distributed
import horovod.torch as hvd
# Training settings
parser = argparse.ArgumentParser(description='PyTorch MNIST Example')
parser.add_argument('--batch-size', type=int, default=64, metavar='N',
help='input batch size for training (default: 64)')
parser.add_argument('--test-batch-size', type=int, default=1000, metavar='N',
help='input batch size for testing (default: 1000)')
parser.add_argument('--epochs', type=int, default=10, metavar='N',
help='number of epochs to train (default: 10)')
parser.add_argument('--lr', type=float, default=0.01, metavar='LR',
help='learning rate (default: 0.01)')
parser.add_argument('--momentum', type=float, default=0.5, metavar='M',
help='SGD momentum (default: 0.5)')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--seed', type=int, default=42, metavar='S',
help='random seed (default: 42)')
parser.add_argument('--log-interval', type=int, default=10, metavar='N',
help='how many batches to wait before logging training status')
parser.add_argument('--fp16-allreduce', action='store_true', default=False,
help='use fp16 compression during allreduce')
parser.add_argument('--use-adasum', action='store_true', default=False,
help='use adasum algorithm to do reduction')
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.fc1 = nn.Linear(320, 50)
self.fc2 = nn.Linear(50, 10)
def forward(self, x):
x = F.relu(F.max_pool2d(self.conv1(x), 2))
x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
x = x.view(-1, 320)
x = F.relu(self.fc1(x))
x = F.dropout(x, training=self.training)
x = self.fc2(x)
return F.log_softmax(x)
def train(epoch):
model.train()
# Horovod: set epoch to sampler for shuffling.
train_sampler.set_epoch(epoch)
for batch_idx, (data, target) in enumerate(train_loader):
if args.cuda:
data, target = data.cuda(), target.cuda()
optimizer.zero_grad()
output = model(data)
loss = F.nll_loss(output, target)
loss.backward()
optimizer.step()
if batch_idx % args.log_interval == 0:
# Horovod: use train_sampler to determine the number of examples in
# this worker's partition.
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_sampler),
100. * batch_idx / len(train_loader), loss.item()))
def metric_average(val, name):
tensor = torch.tensor(val)
avg_tensor = hvd.allreduce(tensor, name=name)
return avg_tensor.item()
def test():
model.eval()
test_loss = 0.
test_accuracy = 0.
for data, target in test_loader:
if args.cuda:
data, target = data.cuda(), target.cuda()
output = model(data)
# sum up batch loss
test_loss += F.nll_loss(output, target, size_average=False).item()
# get the index of the max log-probability
pred = output.data.max(1, keepdim=True)[1]
test_accuracy += pred.eq(target.data.view_as(pred)).cpu().float().sum()
# Horovod: use test_sampler to determine the number of examples in
# this worker's partition.
test_loss /= len(test_sampler)
test_accuracy /= len(test_sampler)
# Horovod: average metric values across workers.
test_loss = metric_average(test_loss, 'avg_loss')
test_accuracy = metric_average(test_accuracy, 'avg_accuracy')
# Horovod: print output only on first rank.
if hvd.rank() == 0:
print('\nTest set: Average loss: {:.4f}, Accuracy: {:.2f}%\n'.format(
test_loss, 100. * test_accuracy))
if __name__ == '__main__':
args = parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()
# Horovod: initialize library.
hvd.init()
torch.manual_seed(args.seed)
if args.cuda:
# Horovod: pin GPU to local rank.
torch.cuda.set_device(hvd.local_rank())
torch.cuda.manual_seed(args.seed)
# Horovod: limit # of CPU threads to be used per worker.
torch.set_num_threads(1)
kwargs = {'num_workers': 1, 'pin_memory': True} if args.cuda else {}
# When supported, use 'forkserver' to spawn dataloader workers instead of 'fork' to prevent
# issues with Infiniband implementations that are not fork-safe
if (kwargs.get('num_workers', 0) > 0 and hasattr(mp, '_supports_context') and
mp._supports_context and 'forkserver' in mp.get_all_start_methods()):
kwargs['multiprocessing_context'] = 'forkserver'
train_dataset = \
datasets.MNIST('data-%d' % hvd.rank(), train=True, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
]))
# Horovod: use DistributedSampler to partition the training data.
train_sampler = torch.utils.data.distributed.DistributedSampler(
train_dataset, num_replicas=hvd.size(), rank=hvd.rank())
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=args.batch_size, sampler=train_sampler, **kwargs)
test_dataset = \
datasets.MNIST('data-%d' % hvd.rank(), train=False, transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
]))
# Horovod: use DistributedSampler to partition the test data.
test_sampler = torch.utils.data.distributed.DistributedSampler(
test_dataset, num_replicas=hvd.size(), rank=hvd.rank())
test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=args.test_batch_size,
sampler=test_sampler, **kwargs)
model = Net()
# By default, Adasum doesn't need scaling up learning rate.
lr_scaler = hvd.size() if not args.use_adasum else 1
if args.cuda:
# Move model to GPU.
model.cuda()
# If using GPU Adasum allreduce, scale learning rate by local_size.
if args.use_adasum and hvd.nccl_built():
lr_scaler = hvd.local_size()
# Horovod: scale learning rate by lr_scaler.
optimizer = optim.SGD(model.parameters(), lr=args.lr * lr_scaler,
momentum=args.momentum)
# Horovod: broadcast parameters & optimizer state.
hvd.broadcast_parameters(model.state_dict(), root_rank=0)
hvd.broadcast_optimizer_state(optimizer, root_rank=0)
# Horovod: (optional) compression algorithm.
compression = hvd.Compression.fp16 if args.fp16_allreduce else hvd.Compression.none
# Horovod: wrap optimizer with DistributedOptimizer.
optimizer = hvd.DistributedOptimizer(optimizer,
named_parameters=model.named_parameters(),
compression=compression,
op=hvd.Adasum if args.use_adasum else hvd.Average)
for epoch in range(1, args.epochs + 1):
train(epoch)
test()
|
the-stack_0_2084 | """A notebook manager that uses the local file system for storage.
Authors:
* Brian Granger
* Zach Sailer
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import io
import os
import glob
import shutil
from tornado import web
from .nbmanager import NotebookManager
from IPython.nbformat import current
from IPython.utils.traitlets import Unicode, Bool, TraitError
from IPython.utils.py3compat import getcwd
from IPython.utils import tz
from IPython.html.utils import is_hidden, to_os_path
def sort_key(item):
"""Case-insensitive sorting."""
return item['name'].lower()
#-----------------------------------------------------------------------------
# Classes
#-----------------------------------------------------------------------------
class FileNotebookManager(NotebookManager):
save_script = Bool(False, config=True,
help="""Automatically create a Python script when saving the notebook.
For easier use of import, %run and %load across notebooks, a
<notebook-name>.py script will be created next to any
<notebook-name>.ipynb on each save. This can also be set with the
short `--script` flag.
"""
)
notebook_dir = Unicode(getcwd(), config=True)
def _notebook_dir_changed(self, name, old, new):
"""Do a bit of validation of the notebook dir."""
if not os.path.isabs(new):
# If we receive a non-absolute path, make it absolute.
self.notebook_dir = os.path.abspath(new)
return
if not os.path.exists(new) or not os.path.isdir(new):
raise TraitError("notebook dir %r is not a directory" % new)
checkpoint_dir = Unicode('.ipynb_checkpoints', config=True,
help="""The directory name in which to keep notebook checkpoints
This is a path relative to the notebook's own directory.
By default, it is .ipynb_checkpoints
"""
)
def _copy(self, src, dest):
"""copy src to dest
like shutil.copy2, but log errors in copystat
"""
shutil.copyfile(src, dest)
try:
shutil.copystat(src, dest)
except OSError as e:
self.log.debug("copystat on %s failed", dest, exc_info=True)
def get_notebook_names(self, path=''):
"""List all notebook names in the notebook dir and path."""
path = path.strip('/')
if not os.path.isdir(self._get_os_path(path=path)):
raise web.HTTPError(404, 'Directory not found: ' + path)
names = glob.glob(self._get_os_path('*'+self.filename_ext, path))
names = [os.path.basename(name)
for name in names]
return names
def path_exists(self, path):
"""Does the API-style path (directory) actually exist?
Parameters
----------
path : string
The path to check. This is an API path (`/` separated,
relative to base notebook-dir).
Returns
-------
exists : bool
Whether the path is indeed a directory.
"""
path = path.strip('/')
os_path = self._get_os_path(path=path)
return os.path.isdir(os_path)
def is_hidden(self, path):
"""Does the API style path correspond to a hidden directory or file?
Parameters
----------
path : string
The path to check. This is an API path (`/` separated,
relative to base notebook-dir).
Returns
-------
exists : bool
Whether the path is hidden.
"""
path = path.strip('/')
os_path = self._get_os_path(path=path)
return is_hidden(os_path, self.notebook_dir)
def _get_os_path(self, name=None, path=''):
"""Given a notebook name and a URL path, return its file system
path.
Parameters
----------
name : string
The name of a notebook file with the .ipynb extension
path : string
The relative URL path (with '/' as separator) to the named
notebook.
Returns
-------
path : string
A file system path that combines notebook_dir (location where
server started), the relative path, and the filename with the
current operating system's url.
"""
if name is not None:
path = path + '/' + name
return to_os_path(path, self.notebook_dir)
def notebook_exists(self, name, path=''):
"""Returns a True if the notebook exists. Else, returns False.
Parameters
----------
name : string
The name of the notebook you are checking.
path : string
The relative path to the notebook (with '/' as separator)
Returns
-------
bool
"""
path = path.strip('/')
nbpath = self._get_os_path(name, path=path)
return os.path.isfile(nbpath)
# TODO: Remove this after we create the contents web service and directories are
# no longer listed by the notebook web service.
def list_dirs(self, path):
"""List the directories for a given API style path."""
path = path.strip('/')
os_path = self._get_os_path('', path)
if not os.path.isdir(os_path):
raise web.HTTPError(404, u'directory does not exist: %r' % os_path)
elif is_hidden(os_path, self.notebook_dir):
self.log.info("Refusing to serve hidden directory, via 404 Error")
raise web.HTTPError(404, u'directory does not exist: %r' % os_path)
dir_names = os.listdir(os_path)
dirs = []
for name in dir_names:
os_path = self._get_os_path(name, path)
if os.path.isdir(os_path) and not is_hidden(os_path, self.notebook_dir)\
and self.should_list(name):
try:
model = self.get_dir_model(name, path)
except IOError:
pass
dirs.append(model)
dirs = sorted(dirs, key=sort_key)
return dirs
# TODO: Remove this after we create the contents web service and directories are
# no longer listed by the notebook web service.
def get_dir_model(self, name, path=''):
"""Get the directory model given a directory name and its API style path"""
path = path.strip('/')
os_path = self._get_os_path(name, path)
if not os.path.isdir(os_path):
raise IOError('directory does not exist: %r' % os_path)
info = os.stat(os_path)
last_modified = tz.utcfromtimestamp(info.st_mtime)
created = tz.utcfromtimestamp(info.st_ctime)
# Create the notebook model.
model ={}
model['name'] = name
model['path'] = path
model['last_modified'] = last_modified
model['created'] = created
model['type'] = 'directory'
return model
def list_notebooks(self, path):
"""Returns a list of dictionaries that are the standard model
for all notebooks in the relative 'path'.
Parameters
----------
path : str
the URL path that describes the relative path for the
listed notebooks
Returns
-------
notebooks : list of dicts
a list of the notebook models without 'content'
"""
path = path.strip('/')
notebook_names = self.get_notebook_names(path)
notebooks = [self.get_notebook(name, path, content=False)
for name in notebook_names if self.should_list(name)]
notebooks = sorted(notebooks, key=sort_key)
return notebooks
def get_notebook(self, name, path='', content=True):
""" Takes a path and name for a notebook and returns its model
Parameters
----------
name : str
the name of the notebook
path : str
the URL path that describes the relative path for
the notebook
Returns
-------
model : dict
the notebook model. If contents=True, returns the 'contents'
dict in the model as well.
"""
path = path.strip('/')
if not self.notebook_exists(name=name, path=path):
raise web.HTTPError(404, u'Notebook does not exist: %s' % name)
os_path = self._get_os_path(name, path)
info = os.stat(os_path)
last_modified = tz.utcfromtimestamp(info.st_mtime)
created = tz.utcfromtimestamp(info.st_ctime)
# Create the notebook model.
model ={}
model['name'] = name
model['path'] = path
model['last_modified'] = last_modified
model['created'] = created
model['type'] = 'notebook'
if content:
with io.open(os_path, 'r', encoding='utf-8') as f:
try:
nb = current.read(f, u'json')
except Exception as e:
raise web.HTTPError(400, u"Unreadable Notebook: %s %s" % (os_path, e))
self.mark_trusted_cells(nb, name, path)
model['content'] = nb
return model
def save_notebook(self, model, name='', path=''):
"""Save the notebook model and return the model with no content."""
path = path.strip('/')
if 'content' not in model:
raise web.HTTPError(400, u'No notebook JSON data provided')
# One checkpoint should always exist
if self.notebook_exists(name, path) and not self.list_checkpoints(name, path):
self.create_checkpoint(name, path)
new_path = model.get('path', path).strip('/')
new_name = model.get('name', name)
if path != new_path or name != new_name:
self.rename_notebook(name, path, new_name, new_path)
# Save the notebook file
os_path = self._get_os_path(new_name, new_path)
nb = current.to_notebook_json(model['content'])
self.check_and_sign(nb, new_name, new_path)
if 'name' in nb['metadata']:
nb['metadata']['name'] = u''
try:
self.log.debug("Autosaving notebook %s", os_path)
with io.open(os_path, 'w', encoding='utf-8') as f:
current.write(nb, f, u'json')
except Exception as e:
raise web.HTTPError(400, u'Unexpected error while autosaving notebook: %s %s' % (os_path, e))
# Save .py script as well
if self.save_script:
py_path = os.path.splitext(os_path)[0] + '.py'
self.log.debug("Writing script %s", py_path)
try:
with io.open(py_path, 'w', encoding='utf-8') as f:
current.write(nb, f, u'py')
except Exception as e:
raise web.HTTPError(400, u'Unexpected error while saving notebook as script: %s %s' % (py_path, e))
model = self.get_notebook(new_name, new_path, content=False)
return model
def update_notebook(self, model, name, path=''):
"""Update the notebook's path and/or name"""
path = path.strip('/')
new_name = model.get('name', name)
new_path = model.get('path', path).strip('/')
if path != new_path or name != new_name:
self.rename_notebook(name, path, new_name, new_path)
model = self.get_notebook(new_name, new_path, content=False)
return model
def delete_notebook(self, name, path=''):
"""Delete notebook by name and path."""
path = path.strip('/')
os_path = self._get_os_path(name, path)
if not os.path.isfile(os_path):
raise web.HTTPError(404, u'Notebook does not exist: %s' % os_path)
# clear checkpoints
for checkpoint in self.list_checkpoints(name, path):
checkpoint_id = checkpoint['id']
cp_path = self.get_checkpoint_path(checkpoint_id, name, path)
if os.path.isfile(cp_path):
self.log.debug("Unlinking checkpoint %s", cp_path)
os.unlink(cp_path)
self.log.debug("Unlinking notebook %s", os_path)
os.unlink(os_path)
def rename_notebook(self, old_name, old_path, new_name, new_path):
"""Rename a notebook."""
old_path = old_path.strip('/')
new_path = new_path.strip('/')
if new_name == old_name and new_path == old_path:
return
new_os_path = self._get_os_path(new_name, new_path)
old_os_path = self._get_os_path(old_name, old_path)
# Should we proceed with the move?
if os.path.isfile(new_os_path):
raise web.HTTPError(409, u'Notebook with name already exists: %s' % new_os_path)
if self.save_script:
old_py_path = os.path.splitext(old_os_path)[0] + '.py'
new_py_path = os.path.splitext(new_os_path)[0] + '.py'
if os.path.isfile(new_py_path):
raise web.HTTPError(409, u'Python script with name already exists: %s' % new_py_path)
# Move the notebook file
try:
shutil.move(old_os_path, new_os_path)
except Exception as e:
raise web.HTTPError(500, u'Unknown error renaming notebook: %s %s' % (old_os_path, e))
# Move the checkpoints
old_checkpoints = self.list_checkpoints(old_name, old_path)
for cp in old_checkpoints:
checkpoint_id = cp['id']
old_cp_path = self.get_checkpoint_path(checkpoint_id, old_name, old_path)
new_cp_path = self.get_checkpoint_path(checkpoint_id, new_name, new_path)
if os.path.isfile(old_cp_path):
self.log.debug("Renaming checkpoint %s -> %s", old_cp_path, new_cp_path)
shutil.move(old_cp_path, new_cp_path)
# Move the .py script
if self.save_script:
shutil.move(old_py_path, new_py_path)
# Checkpoint-related utilities
def get_checkpoint_path(self, checkpoint_id, name, path=''):
"""find the path to a checkpoint"""
path = path.strip('/')
basename, _ = os.path.splitext(name)
filename = u"{name}-{checkpoint_id}{ext}".format(
name=basename,
checkpoint_id=checkpoint_id,
ext=self.filename_ext,
)
os_path = self._get_os_path(path=path)
cp_dir = os.path.join(os_path, self.checkpoint_dir)
if not os.path.exists(cp_dir):
os.mkdir(cp_dir)
cp_path = os.path.join(cp_dir, filename)
return cp_path
def get_checkpoint_model(self, checkpoint_id, name, path=''):
"""construct the info dict for a given checkpoint"""
path = path.strip('/')
cp_path = self.get_checkpoint_path(checkpoint_id, name, path)
stats = os.stat(cp_path)
last_modified = tz.utcfromtimestamp(stats.st_mtime)
info = dict(
id = checkpoint_id,
last_modified = last_modified,
)
return info
# public checkpoint API
def create_checkpoint(self, name, path=''):
"""Create a checkpoint from the current state of a notebook"""
path = path.strip('/')
nb_path = self._get_os_path(name, path)
# only the one checkpoint ID:
checkpoint_id = u"checkpoint"
cp_path = self.get_checkpoint_path(checkpoint_id, name, path)
self.log.debug("creating checkpoint for notebook %s", name)
self._copy(nb_path, cp_path)
# return the checkpoint info
return self.get_checkpoint_model(checkpoint_id, name, path)
def list_checkpoints(self, name, path=''):
"""list the checkpoints for a given notebook
This notebook manager currently only supports one checkpoint per notebook.
"""
path = path.strip('/')
checkpoint_id = "checkpoint"
os_path = self.get_checkpoint_path(checkpoint_id, name, path)
if not os.path.exists(os_path):
return []
else:
return [self.get_checkpoint_model(checkpoint_id, name, path)]
def restore_checkpoint(self, checkpoint_id, name, path=''):
"""restore a notebook to a checkpointed state"""
path = path.strip('/')
self.log.info("restoring Notebook %s from checkpoint %s", name, checkpoint_id)
nb_path = self._get_os_path(name, path)
cp_path = self.get_checkpoint_path(checkpoint_id, name, path)
if not os.path.isfile(cp_path):
self.log.debug("checkpoint file does not exist: %s", cp_path)
raise web.HTTPError(404,
u'Notebook checkpoint does not exist: %s-%s' % (name, checkpoint_id)
)
# ensure notebook is readable (never restore from an unreadable notebook)
with io.open(cp_path, 'r', encoding='utf-8') as f:
current.read(f, u'json')
self._copy(cp_path, nb_path)
self.log.debug("copying %s -> %s", cp_path, nb_path)
def delete_checkpoint(self, checkpoint_id, name, path=''):
"""delete a notebook's checkpoint"""
path = path.strip('/')
cp_path = self.get_checkpoint_path(checkpoint_id, name, path)
if not os.path.isfile(cp_path):
raise web.HTTPError(404,
u'Notebook checkpoint does not exist: %s%s-%s' % (path, name, checkpoint_id)
)
self.log.debug("unlinking %s", cp_path)
os.unlink(cp_path)
def info_string(self):
return "Serving notebooks from local directory: %s" % self.notebook_dir
|
the-stack_0_2085 | import re
from bs4 import BeautifulSoup
from time import sleep
import pickle
import praw
import OAuth2Util
from allpages import getPages
from lookup import findItem
r = praw.Reddit('bot1')
m = re.compile(r"\[\[[^\]]*\]\]")
def respond(lim, rate, subs):
with open('ids.pickle', 'rb') as handle:
ids = pickle.load(handle)
i = 0
while True:
if i % 100 == 0:
getPages()
i += 1
for sub in subs:
subreddit = r.subreddit(sub)
for submission in subreddit.new(limit=lim):
comment_queue = submission.comments[:]
while comment_queue:
com = comment_queue.pop(0)
if "[[" in com.body and "]]" in com.body and com.id not in ids:
print("Found Comment:" + com.id)
reply = ""
for item in m.findall(com.body)[:10]:
isPOE = sub.lower()=="pathofexile"
temp = findItem(item[2:-2], isPOE)
reply += temp
if temp != "":
reply += "\n\n---------\n\n"
if reply != "":
reply += " ^I ^am ^a ^bot. ^Reply ^to ^me ^with ^up ^to ^7 ^[[item names]]."
reply += " ^Please ^contact ^/u/liortulip, ^my ^creator"
reply += " ^with ^any ^questions ^or ^concerns. ^Thanks!"
print("Replying...")
com.reply(reply)
else:
print("False Reply ^")
ids.append(com.id)
comment_queue.extend(com.replies)
with open('ids.pickle', 'wb') as handle:
pickle.dump(ids, handle, protocol=pickle.HIGHEST_PROTOCOL)
sleep(rate)
respond(50,10, ["test"])
|
the-stack_0_2087 | from abc import abstractmethod
import datetime
import numpy as np
import xarray as xr
from pyproj import CRS
from RAiDER.logger import *
from RAiDER import utilFcns as util
from RAiDER.models.model_levels import (
LEVELS_137_HEIGHTS,
LEVELS_25_HEIGHTS,
A_137_HRES,
B_137_HRES,
)
from RAiDER.models.weatherModel import WeatherModel
class ECMWF(WeatherModel):
'''
Implement ECMWF models
'''
def __init__(self):
# initialize a weather model
WeatherModel.__init__(self)
# model constants
self._k1 = 0.776 # [K/Pa]
self._k2 = 0.233 # [K/Pa]
self._k3 = 3.75e3 # [K^2/Pa]
self._lon_res = 0.2
self._lat_res = 0.2
self._proj = CRS.from_epsg(4326)
self._model_level_type = 'ml' # Default
def setLevelType(self, levelType):
'''Set the level type to model levels or pressure levels'''
if levelType in ['ml', 'pl']:
self._model_level_type = levelType
else:
raise RuntimeError('Level type {} is not recognized'.format(levelType))
if levelType == 'ml':
self.__model_levels__()
else:
self.__pressure_levels__()
@abstractmethod
def __pressure_levels__(self):
pass
def __model_levels__(self):
self._levels = 137
self._zlevels = np.flipud(LEVELS_137_HEIGHTS)
self._a = A_137_HRES
self._b = B_137_HRES
def load_weather(self, *args, **kwargs):
'''
Consistent class method to be implemented across all weather model types.
As a result of calling this method, all of the variables (x, y, z, p, q,
t, wet_refractivity, hydrostatic refractivity, e) should be fully
populated.
'''
self._load_model_level(*self.files)
def _load_model_level(self, fname):
# read data from netcdf file
lats, lons, xs, ys, t, q, lnsp, z = self._makeDataCubes(
fname,
verbose=False
)
# ECMWF appears to give me this backwards
if lats[0] > lats[1]:
z = z[::-1]
lnsp = lnsp[::-1]
t = t[:, ::-1]
q = q[:, ::-1]
lats = lats[::-1]
# Lons is usually ok, but we'll throw in a check to be safe
if lons[0] > lons[1]:
z = z[..., ::-1]
lnsp = lnsp[..., ::-1]
t = t[..., ::-1]
q = q[..., ::-1]
lons = lons[::-1]
# pyproj gets fussy if the latitude is wrong, plus our
# interpolator isn't clever enough to pick up on the fact that
# they are the same
lons[lons > 180] -= 360
self._t = t
self._q = q
geo_hgt, pres, hgt = self._calculategeoh(z, lnsp)
# re-assign lons, lats to match heights
_lons = np.broadcast_to(lons[np.newaxis, np.newaxis, :], hgt.shape)
_lats = np.broadcast_to(lats[np.newaxis, :, np.newaxis], hgt.shape)
# ys is latitude
self._get_heights(_lats, hgt)
h = self._zs.copy()
# We want to support both pressure levels and true pressure grids.
# If the shape has one dimension, we'll scale it up to act as a
# grid, otherwise we'll leave it alone.
if len(pres.shape) == 1:
self._p = np.broadcast_to(pres[:, np.newaxis, np.newaxis], self._zs.shape)
else:
self._p = pres
# Re-structure everything from (heights, lats, lons) to (lons, lats, heights)
self._p = np.transpose(self._p, (1, 2, 0))
self._t = np.transpose(self._t, (1, 2, 0))
self._q = np.transpose(self._q, (1, 2, 0))
h = np.transpose(h, (1, 2, 0))
self._lats = np.transpose(_lats, (1, 2, 0))
self._lons = np.transpose(_lons, (1, 2, 0))
# Flip all the axis so that zs are in order from bottom to top
# lats / lons are simply replicated to all heights so they don't need flipped
self._p = np.flip(self._p, axis=2)
self._t = np.flip(self._t, axis=2)
self._q = np.flip(self._q, axis=2)
self._ys = self._lats.copy()
self._xs = self._lons.copy()
self._zs = np.flip(h, axis=2)
def _fetch(self, lats, lons, time, out, Nextra=2):
'''
Fetch a weather model from ECMWF
'''
# bounding box plus a buffer
lat_min, lat_max, lon_min, lon_max = self._get_ll_bounds(lats, lons, Nextra)
# execute the search at ECMWF
try:
self._get_from_ecmwf(
lat_min,
lat_max,
self._lat_res,
lon_min,
lon_max,
self._lon_res,
time,
out
)
except Exception as e:
logger.warning('Query point bounds are {}/{}/{}/{}'.format(lat_min, lat_max, lon_min, lon_max))
logger.warning('Query time: {}'.format(time))
logger.exception(e)
def _get_from_ecmwf(self, lat_min, lat_max, lat_step, lon_min, lon_max,
lon_step, time, out):
import ecmwfapi
server = ecmwfapi.ECMWFDataServer()
corrected_date = util.round_date(time, datetime.timedelta(hours=6))
server.retrieve({
"class": self._classname, # ERA-Interim
'dataset': self._dataset,
"expver": "{}".format(self._expver),
# They warn me against all, but it works well
"levelist": 'all',
"levtype": "ml", # Model levels
"param": "lnsp/q/z/t", # Necessary variables
"stream": "oper",
# date: Specify a single date as "2015-08-01" or a period as
# "2015-08-01/to/2015-08-31".
"date": datetime.datetime.strftime(corrected_date, "%Y-%m-%d"),
# type: Use an (analysis) unless you have a particular reason to
# use fc (forecast).
"type": "an",
# time: With type=an, time can be any of
# "00:00:00/06:00:00/12:00:00/18:00:00". With type=fc, time can
# be any of "00:00:00/12:00:00",
"time": datetime.time.strftime(corrected_date.time(), "%H:%M:%S"),
# step: With type=an, step is always "0". With type=fc, step can
# be any of "3/6/9/12".
"step": "0",
# grid: Only regular lat/lon grids are supported.
"grid": '{}/{}'.format(lat_step, lon_step),
"area": '{}/{}/{}/{}'.format(lat_max, lon_min, lat_min, lon_max), # area: N/W/S/E
"format": "netcdf",
"resol": "av",
"target": out, # target: the name of the output file.
})
def _get_from_cds(
self,
lat_min,
lat_max,
lat_step,
lon_min,
lon_max,
lon_step,
acqTime,
outname
):
import cdsapi
c = cdsapi.Client(verify=0)
if self._model_level_type == 'pl':
var = ['z', 'q', 't']
levType = 'pressure_level'
else:
var = "129/130/133/152" # 'lnsp', 'q', 'z', 't'
levType = 'model_level'
bbox = [lat_max, lon_min, lat_min, lon_max]
dataDict = {
"product_type": "reanalysis",
"{}".format(levType): 'all',
"levtype": "{}".format(self._model_level_type), # 'ml' for model levels or 'pl' for pressure levels
'param': var,
"stream": "oper",
"type": "an",
"year": "{}".format(acqTime.year),
"month": "{}".format(acqTime.month),
"day": "{}".format(acqTime.day),
"time": "{}".format(datetime.time.strftime(acqTime.time(), '%H:%M')),
# step: With type=an, step is always "0". With type=fc, step can
# be any of "3/6/9/12".
"step": "0",
"area": bbox,
"format": "netcdf"}
try:
c.retrieve('reanalysis-era5-pressure-levels', dataDict, outname)
except Exception as e:
logger.warning('Query point bounds are {}/{} latitude and {}/{} longitude'.format(lat_min, lat_max, lon_min, lon_max))
logger.warning('Query time: {}'.format(acqTime))
logger.exception(e)
raise Exception
def _download_ecmwf(self, lat_min, lat_max, lat_step, lon_min, lon_max, lon_step, time, out):
from ecmwfapi import ECMWFService
server = ECMWFService("mars")
corrected_date = util.round_date(time, datetime.timedelta(hours=6))
if self._model_level_type == 'ml':
param = "129/130/133/152"
else:
param = "129.128/130.128/133.128/152"
server.execute(
{
'class': self._classname,
'dataset': self._dataset,
'expver': "{}".format(self._expver),
'resol': "av",
'stream': "oper",
'type': "an",
'levelist': "all",
'levtype': "{}".format(self._model_level_type),
'param': param,
'date': datetime.datetime.strftime(corrected_date, "%Y-%m-%d"),
'time': "{}".format(datetime.time.strftime(corrected_date.time(), '%H:%M')),
'step': "0",
'grid': "{}/{}".format(lon_step, lat_step),
'area': "{}/{}/{}/{}".format(lat_max, util.floorish(lon_min, 0.1), util.floorish(lat_min, 0.1), lon_max),
'format': "netcdf",
},
out
)
def _load_pressure_level(self, filename, *args, **kwargs):
with xr.open_dataset(filename) as block:
# Pull the data
z = np.squeeze(block['z'].values)
t = np.squeeze(block['t'].values)
q = np.squeeze(block['q'].values)
lats = np.squeeze(block.latitude.values)
lons = np.squeeze(block.longitude.values)
levels = np.squeeze(block.level.values) * 100
z = np.flip(z, axis=1)
# ECMWF appears to give me this backwards
if lats[0] > lats[1]:
z = z[::-1]
t = t[:, ::-1]
q = q[:, ::-1]
lats = lats[::-1]
# Lons is usually ok, but we'll throw in a check to be safe
if lons[0] > lons[1]:
z = z[..., ::-1]
t = t[..., ::-1]
q = q[..., ::-1]
lons = lons[::-1]
# pyproj gets fussy if the latitude is wrong, plus our
# interpolator isn't clever enough to pick up on the fact that
# they are the same
lons[lons > 180] -= 360
self._t = t
self._q = q
geo_hgt = z / self._g0
# re-assign lons, lats to match heights
_lons = np.broadcast_to(lons[np.newaxis, np.newaxis, :],
geo_hgt.shape)
_lats = np.broadcast_to(lats[np.newaxis, :, np.newaxis],
geo_hgt.shape)
# correct heights for latitude
self._get_heights(_lats, geo_hgt)
self._p = np.broadcast_to(levels[:, np.newaxis, np.newaxis],
self._zs.shape)
# Re-structure everything from (heights, lats, lons) to (lons, lats, heights)
self._p = np.transpose(self._p)
self._t = np.transpose(self._t)
self._q = np.transpose(self._q)
self._lats = np.transpose(_lats)
self._lons = np.transpose(_lons)
self._ys = self._lats.copy()
self._xs = self._lons.copy()
self._zs = np.transpose(self._zs)
# check this
# data cube format should be lats,lons,heights
self._lats = self._lats.swapaxes(0, 1)
self._lons = self._lons.swapaxes(0, 1)
self._xs = self._xs.swapaxes(0, 1)
self._ys = self._ys.swapaxes(0, 1)
self._zs = self._zs.swapaxes(0, 1)
self._p = self._p.swapaxes(0, 1)
self._q = self._q.swapaxes(0, 1)
self._t = self._t.swapaxes(0, 1)
# For some reason z is opposite the others
self._p = np.flip(self._p, axis=2)
self._t = np.flip(self._t, axis=2)
self._q = np.flip(self._q, axis=2)
def _makeDataCubes(self, fname, verbose=False):
'''
Create a cube of data representing temperature and relative humidity
at specified pressure levels
'''
# get ll_bounds
S, N, W, E = self._ll_bounds
with xr.open_dataset(fname) as ds:
ds = ds.assign_coords(longitude=(((ds.longitude + 180) % 360) - 180))
# mask based on query bounds
m1 = (S <= ds.latitude) & (N >= ds.latitude)
m2 = (W <= ds.longitude) & (E >= ds.longitude)
block = ds.where(m1 & m2, drop=True)
# Pull the data
z = np.squeeze(block['z'].values)[0, ...]
t = np.squeeze(block['t'].values)
q = np.squeeze(block['q'].values)
lnsp = np.squeeze(block['lnsp'].values)[0, ...]
lats = np.squeeze(block.latitude.values)
lons = np.squeeze(block.longitude.values)
xs = lons.copy()
ys = lats.copy()
if z.size == 0:
raise RuntimeError('There is no data in z, '
'you may have a problem with your mask')
return lats, lons, xs, ys, t, q, lnsp, z
|
the-stack_0_2088 | # Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Utilities for property-based testing for TFP distributions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import functools
import inspect
from absl import logging
import hypothesis as hp
from hypothesis import strategies as hps
import numpy as np
import six
import tensorflow.compat.v2 as tf
from tensorflow_probability.python import bijectors as tfb
from tensorflow_probability.python import distributions as tfd
from tensorflow_probability.python import util as tfp_util
from tensorflow_probability.python.bijectors import hypothesis_testlib as bijector_hps
from tensorflow_probability.python.internal import hypothesis_testlib as tfp_hps
from tensorflow_probability.python.internal import tensorshape_util
JAX_MODE = False
# pylint is unable to handle @hps.composite (e.g. complains "No value for
# argument 'batch_shape' in function call"), so disable this lint for the file.
# pylint: disable=no-value-for-parameter
TF2_FRIENDLY_DISTS = (
'Bates',
'Bernoulli',
'Beta',
'BetaBinomial',
'Binomial',
'Chi',
'Chi2',
'CholeskyLKJ',
'Categorical',
'Cauchy',
'ContinuousBernoulli',
'Deterministic',
'DeterminantalPointProcess',
'Dirichlet',
'DirichletMultinomial',
'DoublesidedMaxwell',
'Empirical',
'Exponential',
'ExpGamma',
'ExpInverseGamma',
'FiniteDiscrete',
'Gamma',
'GammaGamma',
'GeneralizedNormal',
'GeneralizedPareto',
'Geometric',
'Gumbel',
'GeneralizedExtremeValue',
'HalfCauchy',
'HalfNormal',
'HalfStudentT',
'Horseshoe',
'InverseGamma',
'InverseGaussian',
'JohnsonSU',
'Kumaraswamy',
'Laplace',
'LKJ',
'LogLogistic',
'LogNormal',
'Logistic',
'Normal',
'Moyal',
'Multinomial',
'NegativeBinomial',
'OneHotCategorical',
'OrderedLogistic',
'Pareto',
'PERT',
'PlackettLuce',
'Poisson',
'PowerSpherical',
# 'PoissonLogNormalQuadratureCompound' TODO(b/137956955): Add support
# for hypothesis testing
'ProbitBernoulli',
'RelaxedBernoulli',
'ExpRelaxedOneHotCategorical',
# 'SinhArcsinh' TODO(b/137956955): Add support for hypothesis testing
'Skellam',
'SphericalUniform',
'StudentT',
'Triangular',
'TruncatedCauchy',
'TruncatedNormal',
'Uniform',
'VonMises',
'VonMisesFisher',
'Weibull',
'WishartTriL',
'Zipf',
)
# SPECIAL_DISTS are distributions that should not be drawn by
# `base_distributions`, because they are parameterized by one or more
# sub-distributions themselves. This list is used to suppress warnings from
# `_instantiable_base_dists`, below.
SPECIAL_DISTS = (
'Autoregressive',
'BatchReshape', # (has strategy)
'Blockwise',
'Distribution', # Base class; not a distribution at all
'Empirical', # Base distribution with custom instantiation; (has strategy)
'JointDistribution',
'JointDistributionCoroutine',
'JointDistributionCoroutineAutoBatched',
'JointDistributionNamed',
'JointDistributionNamedAutoBatched',
'JointDistributionSequential',
'JointDistributionSequentialAutoBatched',
'Independent', # (has strategy)
'Mixture', # (has strategy)
'MixtureSameFamily', # (has strategy)
'Sample', # (has strategy)
'TransformedDistribution', # (has strategy)
'QuantizedDistribution', # (has strategy)
)
# MUTEX_PARAMS are mutually exclusive parameters that cannot be drawn together
# in broadcasting_params.
MUTEX_PARAMS = (
set(['logits', 'probs']),
set(['probits', 'probs']),
set(['rate', 'log_rate']),
set(['rate1', 'log_rate1']),
set(['rate2', 'log_rate2']),
set(['scale', 'log_scale']),
set(['scale', 'scale_tril', 'scale_diag', 'scale_identity_multiplier']),
)
# Allowlist of underlying distributions for QuantizedDistribution (must have
# continuous, infinite support -- QuantizedDistribution also works for finite-
# support distributions for which the length of the support along each dimension
# is at least 1, though it is difficult to construct draws of these
# distributions in general, and wouldn't contribute much to test coverage.)
QUANTIZED_BASE_DISTS = (
'Chi2',
'Exponential',
'LogNormal',
'Logistic',
'Normal',
'Pareto',
'Poisson',
'StudentT',
)
# Functions used to constrain randomly sampled parameter ndarrays.
# TODO(b/128518790): Eliminate / minimize the fudge factors in here.
def constrain_between_eps_and_one_minus_eps(eps=1e-6):
return lambda x: eps + (1 - 2 * eps) * tf.sigmoid(x)
def ensure_high_gt_low(low, high):
"""Returns a value with shape matching `high` and gt broadcastable `low`."""
new_high = tf.maximum(low + tf.abs(low) * .1 + .1, high)
reduce_dims = []
if (tensorshape_util.rank(new_high.shape) >
tensorshape_util.rank(high.shape)):
reduced_leading_axes = tf.range(
tensorshape_util.rank(new_high.shape) -
tensorshape_util.rank(high.shape))
new_high = tf.math.reduce_max(
new_high, axis=reduced_leading_axes)
reduce_dims = [
d for d in range(tensorshape_util.rank(high.shape))
if high.shape[d] < new_high.shape[d]
]
if reduce_dims:
new_high = tf.math.reduce_max(
new_high, axis=reduce_dims, keepdims=True)
return new_high
def fix_finite_discrete(d):
size = d.get('probs', d.get('logits', None)).shape[-1]
return dict(d, outcomes=tf.linspace(-1.0, 1.0, size))
def fix_lkj(d):
return dict(d, concentration=d['concentration'] + 1, dimension=3)
def fix_spherical_uniform(d):
return dict(d, dimension=5, batch_shape=[])
def fix_pert(d):
peak = ensure_high_gt_low(d['low'], d['peak'])
high = ensure_high_gt_low(peak, d['high'])
temperature = ensure_high_gt_low(
np.zeros(d['temperature'].shape, dtype=np.float32), d['temperature'])
return dict(d, peak=peak, high=high, temperature=temperature)
def fix_triangular(d):
peak = ensure_high_gt_low(d['low'], d['peak'])
high = ensure_high_gt_low(peak, d['high'])
return dict(d, peak=peak, high=high)
def fix_wishart(d):
df = d['df']
scale = d.get('scale', d.get('scale_tril'))
return dict(d, df=tf.maximum(df, tf.cast(scale.shape[-1], df.dtype)))
def fix_bates(d):
total_count = tf.math.maximum(
tf.math.minimum(
d['total_count'],
tfd.bates.BATES_TOTAL_COUNT_STABILITY_LIMITS[ # pylint: disable=protected-access
d['total_count'].dtype]),
1.)
high = ensure_high_gt_low(d['low'], d['high'])
return dict(d, total_count=total_count, high=high)
CONSTRAINTS = {
'atol':
tf.math.softplus,
'rtol':
tf.math.softplus,
'concentration':
tfp_hps.softplus_plus_eps(),
'GeneralizedPareto.concentration': # Permits +ve and -ve concentrations.
lambda x: tf.math.tanh(x) * 0.24,
'concentration0':
tfp_hps.softplus_plus_eps(),
'concentration1':
tfp_hps.softplus_plus_eps(),
'covariance_matrix':
tfp_hps.positive_definite,
'df':
tfp_hps.softplus_plus_eps(),
'DeterminantalPointProcess.eigenvalues':
tfp_hps.softplus_plus_eps(),
'eigenvectors':
tfp_hps.orthonormal,
'InverseGaussian.loc':
tfp_hps.softplus_plus_eps(),
'JohnsonSU.tailweight':
tfp_hps.softplus_plus_eps(),
'PowerSpherical.mean_direction':
lambda x: tf.math.l2_normalize(tf.math.sigmoid(x) + 1e-6, -1),
'VonMisesFisher.mean_direction': # max ndims is 3 to avoid instability.
lambda x: tf.math.l2_normalize(tf.math.sigmoid(x[..., :3]) + 1e-6, -1),
'Categorical.probs':
tf.math.softmax,
'ExpRelaxedOneHotCategorical.probs':
tf.math.softmax,
'FiniteDiscrete.probs':
tf.math.softmax,
'Multinomial.probs':
tf.math.softmax,
'OneHotCategorical.probs':
tf.math.softmax,
'RelaxedCategorical.probs':
tf.math.softmax,
'Zipf.power':
tfp_hps.softplus_plus_eps(1 + 1e-6), # strictly > 1
'ContinuousBernoulli.probs':
tf.sigmoid,
'Geometric.logits': # TODO(b/128410109): re-enable down to -50
# Capping at 15. so that probability is less than 1, and entropy is
# defined. b/147394924
lambda x: tf.minimum(tf.maximum(x, -16.), 15.), # works around the bug
'Geometric.probs':
constrain_between_eps_and_one_minus_eps(),
'Binomial.probs':
tf.sigmoid,
'NegativeBinomial.probs':
tf.sigmoid,
'Bernoulli.probs':
tf.sigmoid,
'PlackettLuce.scores':
tfp_hps.softplus_plus_eps(),
'ProbitBernoulli.probs':
tf.sigmoid,
'RelaxedBernoulli.probs':
tf.sigmoid,
'cutpoints':
# Permit values that aren't too large
lambda x: tfb.Ascending().forward(10 * tf.math.tanh(x)),
'log_rate':
lambda x: tf.maximum(x, -16.),
# Capping log_rate1 and log_rate2 to 15. This is because if both are large
# (meaning the rates are `inf`), then the Skellam distribution is undefined.
'log_rate1':
lambda x: tf.minimum(tf.maximum(x, -16.), 15.),
'log_rate2':
lambda x: tf.minimum(tf.maximum(x, -16.), 15.),
'log_scale':
lambda x: tf.maximum(x, -16.),
'mixing_concentration':
tfp_hps.softplus_plus_eps(),
'mixing_rate':
tfp_hps.softplus_plus_eps(),
'rate':
tfp_hps.softplus_plus_eps(),
'rate1':
tfp_hps.softplus_plus_eps(),
'rate2':
tfp_hps.softplus_plus_eps(),
'scale':
tfp_hps.softplus_plus_eps(),
'Wishart.scale':
tfp_hps.positive_definite,
'scale_diag':
tfp_hps.softplus_plus_eps(),
'scale_identity_multiplier':
tfp_hps.softplus_plus_eps(),
'scale_tril':
tfp_hps.lower_tril_positive_definite,
'tailweight':
tfp_hps.softplus_plus_eps(),
'temperature':
tfp_hps.softplus_plus_eps(),
'total_count':
lambda x: tf.floor(tf.sigmoid(x / 100) * 100) + 1,
'Bates':
fix_bates,
'Bernoulli':
lambda d: dict(d, dtype=tf.float32),
'CholeskyLKJ':
fix_lkj,
'LKJ':
fix_lkj,
'PERT':
fix_pert,
'Triangular':
fix_triangular,
'TruncatedCauchy':
lambda d: dict(d, high=ensure_high_gt_low(d['low'], d['high'])),
'TruncatedNormal':
lambda d: dict(d, high=ensure_high_gt_low(d['low'], d['high'])),
'Uniform':
lambda d: dict(d, high=ensure_high_gt_low(d['low'], d['high'])),
'SphericalUniform':
fix_spherical_uniform,
'Wishart':
fix_wishart,
'WishartTriL':
fix_wishart,
'Zipf':
lambda d: dict(d, dtype=tf.float32),
'FiniteDiscrete':
fix_finite_discrete,
'GeneralizedNormal.power':
tfp_hps.softplus_plus_eps(),
}
def constraint_for(dist=None, param=None):
if param is not None:
return CONSTRAINTS.get('{}.{}'.format(dist, param),
CONSTRAINTS.get(param, tfp_hps.identity_fn))
return CONSTRAINTS.get(dist, tfp_hps.identity_fn)
class DistInfo(collections.namedtuple(
'DistInfo', ['cls', 'params_event_ndims'])):
"""Sufficient information to instantiate a Distribution.
To wit
- The Python class `cls` giving the class, and
- A Python dict `params_event_ndims` giving the event dimensions for the
parameters (so that parameters can be built with predictable batch shapes).
Specifically, the `params_event_ndims` dict maps string parameter names to
Python integers. Each integer gives how many (trailing) dimensions of that
parameter are part of the event.
"""
__slots__ = ()
def _instantiable_base_dists():
"""Computes the table of mechanically instantiable base Distributions.
A Distribution is mechanically instantiable if
- The class appears as a symbol binding in `tfp.distributions`;
- The class defines a `_params_event_ndims` method (necessary
to generate parameter Tensors with predictable batch shapes); and
- The name is not blocklisted in `SPECIAL_DISTS`.
Additionally, the Empricial distribution is hardcoded with special
instantiation rules for each choice of event_ndims among 0, 1, and 2.
Compound distributions like TransformedDistribution have their own
instantiation rules hard-coded in the `distributions` strategy.
Returns:
instantiable_base_dists: A Python dict mapping distribution name
(as a string) to a `DistInfo` carrying the information necessary to
instantiate it.
"""
result = {}
for dist_name in dir(tfd):
dist_class = getattr(tfd, dist_name)
if (not inspect.isclass(dist_class) or
not issubclass(dist_class, tfd.Distribution) or
dist_name in SPECIAL_DISTS):
continue
try:
params_event_ndims = dist_class._params_event_ndims() # pylint: disable=protected-access
except NotImplementedError:
msg = 'Unable to test tfd.%s: _params_event_ndims not implemented.'
logging.warning(msg, dist_name)
continue
result[dist_name] = DistInfo(dist_class, params_event_ndims)
# Empirical._params_event_ndims depends on `self.event_ndims`, so we have to
# explicitly list these entries.
result['Empirical|event_ndims=0'] = DistInfo( #
functools.partial(tfd.Empirical, event_ndims=0), dict(samples=1))
result['Empirical|event_ndims=1'] = DistInfo( #
functools.partial(tfd.Empirical, event_ndims=1), dict(samples=2))
result['Empirical|event_ndims=2'] = DistInfo( #
functools.partial(tfd.Empirical, event_ndims=2), dict(samples=3))
return result
# INSTANTIABLE_BASE_DISTS is a map from str->(DistClass, params_event_ndims)
INSTANTIABLE_BASE_DISTS = _instantiable_base_dists()
del _instantiable_base_dists
INSTANTIABLE_META_DISTS = (
'BatchReshape',
'Independent',
'Mixture',
'MixtureSameFamily',
'Sample',
'TransformedDistribution',
'QuantizedDistribution',
)
def _report_non_instantiable_meta_dists():
for dist_name in SPECIAL_DISTS:
if dist_name in ['Distribution', 'Empirical']: continue
if dist_name in INSTANTIABLE_META_DISTS: continue
msg = 'Unable to test tfd.%s: no instantiation strategy.'
logging.warning(msg, dist_name)
_report_non_instantiable_meta_dists()
del _report_non_instantiable_meta_dists
@hps.composite
def valid_slices(draw, batch_shape):
"""Samples a legal (possibly empty) slice for shape batch_shape."""
# We build up a list of slices in several stages:
# 1. Choose 0 to batch_rank slices to come before an Ellipsis (...).
# 2. Decide whether or not to add an Ellipsis; if using, updating the indexing
# used (e.g. batch_shape[i]) to identify safe bounds.
# 3. Choose 0 to [remaining_dims] slices to come last.
# 4. Decide where to insert between 0 and 3 newaxis slices.
batch_shape = tf.TensorShape(batch_shape).as_list()
slices = []
batch_rank = len(batch_shape)
arbitrary_slices = hps.tuples(
hps.one_of(hps.just(None), hps.integers(min_value=-100, max_value=100)),
hps.one_of(hps.just(None), hps.integers(min_value=-100, max_value=100)),
hps.one_of(
hps.just(None),
hps.integers(min_value=-100, max_value=100).filter(lambda x: x != 0))
).map(lambda tup: slice(*tup))
# 1. Choose 0 to batch_rank slices to come before an Ellipsis (...).
nslc_before_ellipsis = draw(hps.integers(min_value=0, max_value=batch_rank))
for i in range(nslc_before_ellipsis):
slc = draw(
hps.one_of(
hps.integers(min_value=0, max_value=batch_shape[i] - 1),
arbitrary_slices))
slices.append(slc)
# 2. Decide whether or not to add an Ellipsis; if using, updating the indexing
# used (e.g. batch_shape[i]) to identify safe bounds.
has_ellipsis = draw(hps.booleans().map(lambda x: (Ellipsis, x)))[1]
nslc_after_ellipsis = draw(
hps.integers(min_value=0, max_value=batch_rank - nslc_before_ellipsis))
if has_ellipsis:
slices.append(Ellipsis)
remain_start, remain_end = (batch_rank - nslc_after_ellipsis, batch_rank)
else:
remain_start = nslc_before_ellipsis
remain_end = nslc_before_ellipsis + nslc_after_ellipsis
# 3. Choose 0 to [remaining_dims] slices to come last.
for i in range(remain_start, remain_end):
slc = draw(
hps.one_of(
hps.integers(min_value=0, max_value=batch_shape[i] - 1),
arbitrary_slices))
slices.append(slc)
# 4. Decide where to insert between 0 and 3 newaxis slices.
newaxis_positions = draw(
hps.lists(hps.integers(min_value=0, max_value=len(slices)), max_size=3))
for i in sorted(newaxis_positions, reverse=True):
slices.insert(i, tf.newaxis)
slices = tuple(slices)
# Since `d[0]` ==> `d.__getitem__(0)` instead of `d.__getitem__((0,))`;
# and similarly `d[:3]` ==> `d.__getitem__(slice(None, 3))` instead of
# `d.__getitem__((slice(None, 3),))`; it is useful to test such scenarios.
if len(slices) == 1 and draw(hps.booleans()):
# Sometimes only a single item non-tuple.
return slices[0]
return slices
def stringify_slices(slices):
"""Returns a list of strings describing the items in `slices`.
Each returned string (in order) encodes what to do with one dimension of the
slicee:
- That number for a single integer slice;
- 'a:b:c' for a start-stop-step slice, omitting any missing components;
- 'tf.newaxis' for an axis insertion; or
- The ellipsis '...' for an arbitrary-rank gap.
Args:
slices: A single-dimension slice or a Python tuple of single-dimension
slices.
Returns:
pretty_slices: A list of Python strings encoding each slice.
"""
pretty_slices = []
slices = slices if isinstance(slices, tuple) else (slices,)
for slc in slices:
if slc == Ellipsis:
pretty_slices.append('...')
elif isinstance(slc, slice):
pretty_slices.append('{}:{}:{}'.format(
*['' if s is None else s for s in (slc.start, slc.stop, slc.step)]))
elif isinstance(slc, int) or tf.is_tensor(slc):
pretty_slices.append(str(slc))
elif slc is tf.newaxis:
pretty_slices.append('tf.newaxis')
else:
raise ValueError('Unexpected slice type: {}'.format(type(slc)))
return pretty_slices
def prime_factors(v):
"""Compute the prime factors of v."""
factors = []
primes = []
factor = 2
while v > 1:
while any(factor % p == 0 for p in primes):
factor += 1
primes.append(factor)
while v % factor == 0:
factors.append(factor)
v //= factor
return factors
@hps.composite
def reshapes_of(draw, shape, max_ndims=4):
"""Strategy for valid reshapes of the given shape, rank at most max_ndims."""
factors = draw(hps.permutations(
prime_factors(tensorshape_util.num_elements(shape))))
split_points = sorted(draw(
hps.lists(hps.integers(min_value=0, max_value=len(factors)),
min_size=0, max_size=max_ndims - 1)))
result = ()
for start, stop in zip([0] + split_points, split_points + [len(factors)]):
result += (int(np.prod(factors[start:stop])),)
return result
def assert_shapes_unchanged(target_shaped_dict, possibly_bcast_dict):
for param, target_param_val in six.iteritems(target_shaped_dict):
np.testing.assert_array_equal(
tensorshape_util.as_list(target_param_val.shape),
tensorshape_util.as_list(possibly_bcast_dict[param].shape))
@hps.composite
def base_distribution_unconstrained_params(draw,
dist_name,
batch_shape=None,
event_dim=None,
enable_vars=False,
param_strategy_fn=None,
params=None):
"""Strategy for drawing unconstrained parameters of a base Distribution.
This does not draw parameters for compound distributions like `Independent`,
`MixtureSameFamily`, or `TransformedDistribution`; only base Distributions
that do not accept other Distributions as arguments.
Args:
draw: Hypothesis strategy sampler supplied by `@hps.composite`.
dist_name: Optional Python `str`. If given, the produced distributions
will all have this type.
batch_shape: An optional `TensorShape`. The batch shape of the resulting
Distribution. Hypothesis will pick a batch shape if omitted.
event_dim: Optional Python int giving the size of each of the
distribution's parameters' event dimensions. This is shared across all
parameters, permitting square event matrices, compatible location and
scale Tensors, etc. If omitted, Hypothesis will choose one.
enable_vars: TODO(bjp): Make this `True` all the time and put variable
initialization in slicing_test. If `False`, the returned parameters are
all `tf.Tensor`s and not {`tf.Variable`, `tfp.util.DeferredTensor`
`tfp.util.TransformedVariable`}.
param_strategy_fn: Optional callable with signature
`strategy = param_strategy_fn(shape, dtype, constraint_fn)`. If provided,
overrides the default strategy for generating float-valued parameters.
Default value: `None`.
params: An optional set of Distribution parameters. If params are not
provided, Hypothesis will choose a set of parameters.
Returns:
dists: A strategy for drawing Distribution parameters with the specified
`batch_shape` (or an arbitrary one if omitted).
"""
if params is not None:
assert batch_shape is not None, ('Need to pass in valid `batch_shape` when'
' passing in `params`.')
return params, batch_shape
if batch_shape is None:
batch_shape = draw(tfp_hps.shapes())
# Draw raw parameters
if dist_name not in INSTANTIABLE_BASE_DISTS:
raise ValueError('Unknown Distribution name {}'.format(dist_name))
params_event_ndims = INSTANTIABLE_BASE_DISTS[dist_name].params_event_ndims
params_kwargs = draw(
tfp_hps.broadcasting_params(
batch_shape,
params_event_ndims,
event_dim=event_dim,
enable_vars=enable_vars,
constraint_fn_for=lambda param: constraint_for(dist_name, param),
mutex_params=MUTEX_PARAMS,
param_strategy_fn=param_strategy_fn))
hp.note('Forming dist {} with raw parameters {}'.format(dist_name,
params_kwargs))
return params_kwargs, batch_shape
def constrain_params(params_unconstrained, dist_name):
"""Constrains a parameters dictionary to a distribution's parameter space."""
# Constrain them to legal values
params_constrained = constraint_for(dist_name)(params_unconstrained)
# Sometimes the "distribution constraint" fn may replace c2t-tracking
# DeferredTensor params with Tensor params (e.g. fix_triangular). In such
# cases, we preserve the c2t-tracking DeferredTensors by wrapping them but
# ignoring the value. We similarly reinstate raw tf.Variables, so they
# appear in the distribution's `variables` list and can be initialized.
for k in params_constrained:
if (k in params_unconstrained and
isinstance(params_unconstrained[k],
(tfp_util.DeferredTensor, tf.Variable))
and params_unconstrained[k] is not params_constrained[k]):
def constrained_value(v, val=params_constrained[k]): # pylint: disable=cell-var-from-loop
# While the gradient to v will be 0, we only care about the c2t
# counts.
return v * 0 + val
params_constrained[k] = tfp_util.DeferredTensor(
params_unconstrained[k], constrained_value)
assert_shapes_unchanged(params_unconstrained, params_constrained)
hp.note('Forming dist {} with constrained parameters {}'.format(
dist_name, params_constrained))
return params_constrained
def modify_params(params, dist_name, validate_args):
params = dict(params)
params['validate_args'] = validate_args
if dist_name in ['Wishart', 'WishartTriL']:
# With the default `input_output_cholesky = False`, Wishart occasionally
# produces samples for which the Cholesky decompositions fail, causing
# an error in testDistribution when `log_prob` is called on a sample.
params['input_output_cholesky'] = True
return params
@hps.composite
def base_distributions(draw,
dist_name=None,
batch_shape=None,
event_dim=None,
enable_vars=False,
eligibility_filter=lambda name: True,
params=None,
param_strategy_fn=None,
validate_args=True):
"""Strategy for drawing arbitrary base Distributions.
This does not draw compound distributions like `Independent`,
`MixtureSameFamily`, or `TransformedDistribution`; only base Distributions
that do not accept other Distributions as arguments.
Args:
draw: Hypothesis strategy sampler supplied by `@hps.composite`.
dist_name: Optional Python `str`. If given, the produced distributions
will all have this type.
batch_shape: An optional `TensorShape`. The batch shape of the resulting
Distribution. Hypothesis will pick a batch shape if omitted.
event_dim: Optional Python int giving the size of each of the
distribution's parameters' event dimensions. This is shared across all
parameters, permitting square event matrices, compatible location and
scale Tensors, etc. If omitted, Hypothesis will choose one.
enable_vars: TODO(bjp): Make this `True` all the time and put variable
initialization in slicing_test. If `False`, the returned parameters are
all `tf.Tensor`s and not {`tf.Variable`, `tfp.util.DeferredTensor`
`tfp.util.TransformedVariable`}.
eligibility_filter: Optional Python callable. Blacklists some Distribution
class names so they will not be drawn at the top level.
params: An optional set of Distribution parameters. If params are not
provided, Hypothesis will choose a set of parameters.
param_strategy_fn: Optional callable with signature
`strategy = param_strategy_fn(shape, dtype, constraint_fn)`. If provided,
overrides the default strategy for generating float-valued parameters.
Default value: `None`.
validate_args: Python `bool`; whether to enable runtime assertions.
Returns:
dists: A strategy for drawing Distributions with the specified `batch_shape`
(or an arbitrary one if omitted).
"""
if dist_name is None:
names = [k for k in INSTANTIABLE_BASE_DISTS if eligibility_filter(k)]
dist_name = draw(hps.sampled_from(sorted(names)))
if dist_name == 'Empirical':
variants = [k for k in INSTANTIABLE_BASE_DISTS
if eligibility_filter(k) and 'Empirical' in k]
dist_name = draw(hps.sampled_from(sorted(variants)))
if dist_name == 'SphericalUniform':
return draw(spherical_uniforms(
batch_shape=batch_shape, event_dim=event_dim,
validate_args=validate_args))
if params is None:
params_unconstrained, batch_shape = draw(
base_distribution_unconstrained_params(
dist_name,
batch_shape=batch_shape,
event_dim=event_dim,
enable_vars=enable_vars,
param_strategy_fn=param_strategy_fn))
params = constrain_params(params_unconstrained, dist_name)
params = modify_params(params, dist_name, validate_args=validate_args)
# Actually construct the distribution
dist_cls = INSTANTIABLE_BASE_DISTS[dist_name].cls
result_dist = dist_cls(**params)
# Check that the batch shape came out as expected
if batch_shape != result_dist.batch_shape:
msg = ('Distributions strategy generated a bad batch shape '
'for {}, should have been {}.').format(result_dist, batch_shape)
raise AssertionError(msg)
return result_dist
def depths():
return hps.integers(min_value=0, max_value=4)
def params_used(dist):
return [k for k, v in six.iteritems(dist.parameters) if v is not None]
@hps.composite
def spherical_uniforms(
draw, batch_shape=None, event_dim=None, validate_args=True):
"""Strategy for drawing `SphericalUniform` distributions.
The underlying distribution is drawn from the `distributions` strategy.
Args:
draw: Hypothesis strategy sampler supplied by `@hps.composite`.
batch_shape: An optional `TensorShape`. The batch shape of the resulting
`SphericalUniform` distribution.
event_dim: Optional Python int giving the size of the
distribution's event dimension.
validate_args: Python `bool`; whether to enable runtime assertions.
Returns:
dists: A strategy for drawing `UniformSphere` distributions with the
specified `batch_shape` (or an arbitrary one if omitted).
"""
if batch_shape is None:
batch_shape = draw(tfp_hps.shapes(min_ndims=0, max_side=4))
if event_dim is None:
event_dim = draw(hps.integers(min_value=1, max_value=10))
result_dist = tfd.SphericalUniform(
dimension=event_dim, batch_shape=batch_shape, validate_args=validate_args)
return result_dist
@hps.composite
def batch_reshapes(
draw, batch_shape=None, event_dim=None,
enable_vars=False, depth=None,
eligibility_filter=lambda name: True, validate_args=True):
"""Strategy for drawing `BatchReshape` distributions.
The underlying distribution is drawn from the `distributions` strategy.
Args:
draw: Hypothesis strategy sampler supplied by `@hps.composite`.
batch_shape: An optional `TensorShape`. The batch shape of the resulting
`BatchReshape` distribution. Note that the underlying distribution will
in general have a different batch shape, to make the reshaping
non-trivial. Hypothesis will pick one if omitted.
event_dim: Optional Python int giving the size of each of the underlying
distribution's parameters' event dimensions. This is shared across all
parameters, permitting square event matrices, compatible location and
scale Tensors, etc. If omitted, Hypothesis will choose one.
enable_vars: TODO(bjp): Make this `True` all the time and put variable
initialization in slicing_test. If `False`, the returned parameters are
all `tf.Tensor`s and not {`tf.Variable`, `tfp.util.DeferredTensor`
`tfp.util.TransformedVariable`}
depth: Python `int` giving maximum nesting depth of compound Distributions.
eligibility_filter: Optional Python callable. Blocks some Distribution
class names so they will not be drawn.
validate_args: Python `bool`; whether to enable runtime assertions.
Returns:
dists: A strategy for drawing `BatchReshape` distributions with the
specified `batch_shape` (or an arbitrary one if omitted).
"""
if depth is None:
depth = draw(depths())
if batch_shape is None:
batch_shape = draw(tfp_hps.shapes(min_ndims=1, max_side=13))
underlying_batch_shape = draw(reshapes_of(batch_shape))
underlying = draw(
distributions(
batch_shape=underlying_batch_shape,
event_dim=event_dim,
enable_vars=enable_vars,
depth=depth - 1,
eligibility_filter=eligibility_filter,
validate_args=validate_args))
hp.note('Forming BatchReshape with underlying dist {}; '
'parameters {}; batch_shape {}'.format(
underlying, params_used(underlying), batch_shape))
result_dist = tfd.BatchReshape(
underlying, batch_shape=batch_shape, validate_args=True)
return result_dist
@hps.composite
def independents(
draw, batch_shape=None, event_dim=None,
enable_vars=False, depth=None, eligibility_filter=lambda name: True,
validate_args=True):
"""Strategy for drawing `Independent` distributions.
The underlying distribution is drawn from the `distributions` strategy.
Args:
draw: Hypothesis strategy sampler supplied by `@hps.composite`.
batch_shape: An optional `TensorShape`. The batch shape of the resulting
`Independent` distribution. Note that the underlying distribution will in
general have a higher-rank batch shape, to make room for reinterpreting
some of those dimensions as the `Independent`'s event. Hypothesis will
pick one if omitted.
event_dim: Optional Python int giving the size of each of the underlying
distribution's parameters' event dimensions. This is shared across all
parameters, permitting square event matrices, compatible location and
scale Tensors, etc. If omitted, Hypothesis will choose one.
enable_vars: TODO(bjp): Make this `True` all the time and put variable
initialization in slicing_test. If `False`, the returned parameters are
all `tf.Tensor`s and not {`tf.Variable`, `tfp.util.DeferredTensor`
`tfp.util.TransformedVariable`}
depth: Python `int` giving maximum nesting depth of compound Distributions.
eligibility_filter: Optional Python callable. Blocks some Distribution
class names so they will not be drawn.
validate_args: Python `bool`; whether to enable runtime assertions.
Returns:
dists: A strategy for drawing `Independent` distributions with the specified
`batch_shape` (or an arbitrary one if omitted).
"""
if depth is None:
depth = draw(depths())
reinterpreted_batch_ndims = draw(hps.integers(min_value=0, max_value=2))
if batch_shape is None:
batch_shape = draw(
tfp_hps.shapes(min_ndims=reinterpreted_batch_ndims))
else: # This independent adds some batch dims to its underlying distribution.
batch_shape = tensorshape_util.concatenate(
batch_shape,
draw(tfp_hps.shapes(
min_ndims=reinterpreted_batch_ndims,
max_ndims=reinterpreted_batch_ndims)))
underlying = draw(
distributions(
batch_shape=batch_shape,
event_dim=event_dim,
enable_vars=enable_vars,
depth=depth - 1,
eligibility_filter=eligibility_filter,
validate_args=validate_args))
hp.note('Forming Independent with underlying dist {}; '
'parameters {}; reinterpreted_batch_ndims {}'.format(
underlying, params_used(underlying), reinterpreted_batch_ndims))
result_dist = tfd.Independent(
underlying,
reinterpreted_batch_ndims=reinterpreted_batch_ndims,
validate_args=validate_args)
expected_shape = batch_shape[:len(batch_shape) - reinterpreted_batch_ndims]
if expected_shape != result_dist.batch_shape:
msg = ('Independent strategy generated a bad batch shape '
'for {}, should have been {}.').format(result_dist, expected_shape)
raise AssertionError(msg)
return result_dist
@hps.composite
def samples(
draw, batch_shape=None, event_dim=None,
enable_vars=False, depth=None, eligibility_filter=lambda name: True,
validate_args=True):
"""Strategy for drawing `Sample` distributions.
The underlying distribution is drawn from the `distributions` strategy.
Args:
draw: Hypothesis strategy sampler supplied by `@hps.composite`.
batch_shape: An optional `TensorShape`. The batch shape of the resulting
`Sample` distribution. Hypothesis will pick one if omitted.
event_dim: Optional Python int giving the size of each of the underlying
distribution's parameters' event dimensions. This is shared across all
parameters, permitting square event matrices, compatible location and
scale Tensors, etc. If omitted, Hypothesis will choose one.
enable_vars: TODO(bjp): Make this `True` all the time and put variable
initialization in slicing_test. If `False`, the returned parameters are
all `tf.Tensor`s and not {`tf.Variable`, `tfp.util.DeferredTensor`
`tfp.util.TransformedVariable`}
depth: Python `int` giving maximum nesting depth of compound Distributions.
eligibility_filter: Optional Python callable. Blocks some Distribution
class names so they will not be drawn.
validate_args: Python `bool`; whether to enable runtime assertions.
Returns:
dists: A strategy for drawing `Sample` distributions with the specified
`batch_shape` (or an arbitrary one if omitted).
"""
if depth is None:
depth = draw(depths())
if event_dim is None:
event_dim = draw(hps.integers(min_value=2, max_value=6))
sample_shape = draw(hps.lists(hps.just(event_dim), min_size=0, max_size=2))
if batch_shape is None:
batch_shape = draw(tfp_hps.shapes())
underlying = draw(
distributions(
batch_shape=batch_shape,
event_dim=event_dim,
enable_vars=enable_vars,
depth=depth - 1,
eligibility_filter=eligibility_filter,
validate_args=validate_args))
hp.note('Forming Sample with underlying dist {}; '
'parameters {}; sample_shape {}'.format(
underlying, params_used(underlying), sample_shape))
result_dist = tfd.Sample(
underlying,
sample_shape=sample_shape,
validate_args=validate_args)
if batch_shape != result_dist.batch_shape:
msg = ('`Sample` strategy generated a bad batch shape '
'for {}, should have been {}.').format(result_dist, batch_shape)
raise AssertionError(msg)
return result_dist
@hps.composite
def transformed_distributions(draw,
batch_shape=None,
event_dim=None,
enable_vars=False,
depth=None,
eligibility_filter=lambda name: True,
validate_args=True):
"""Strategy for drawing `TransformedDistribution`s.
The transforming bijector is drawn from the
`bijectors.hypothesis_testlib.unconstrained_bijectors` strategy.
The underlying distribution is drawn from the `distributions` strategy, except
that it must be compatible with the bijector according to
`bijectors.hypothesis_testlib.distribution_filter_for` (these generally check
that vector bijectors are not combined with scalar distributions, etc).
Args:
draw: Hypothesis strategy sampler supplied by `@hps.composite`.
batch_shape: An optional `TensorShape`. The batch shape of the resulting
`TransformedDistribution`. The underlying distribution will sometimes
have the same `batch_shape`, and sometimes have scalar batch shape.
Hypothesis will pick a `batch_shape` if omitted.
event_dim: Optional Python int giving the size of each of the underlying
distribution's parameters' event dimensions. This is shared across all
parameters, permitting square event matrices, compatible location and
scale Tensors, etc. If omitted, Hypothesis will choose one.
enable_vars: TODO(bjp): Make this `True` all the time and put variable
initialization in slicing_test. If `False`, the returned parameters are
all `tf.Tensor`s and not {`tf.Variable`, `tfp.util.DeferredTensor`
`tfp.util.TransformedVariable`}
depth: Python `int` giving maximum nesting depth of compound Distributions.
eligibility_filter: Optional Python callable. Blocks some Distribution
class names so they will not be drawn.
validate_args: Python `bool`; whether to enable runtime assertions.
Returns:
dists: A strategy for drawing `TransformedDistribution`s with the specified
`batch_shape` (or an arbitrary one if omitted).
"""
if depth is None:
depth = draw(depths())
bijector = draw(bijector_hps.unconstrained_bijectors(
validate_args=validate_args))
hp.note('Drawing TransformedDistribution with bijector {}'.format(bijector))
if batch_shape is None:
batch_shape = draw(tfp_hps.shapes())
def eligibility_fn(name):
if not eligibility_filter(name):
return False
return bijector_hps.distribution_eligilibility_filter_for(bijector)(name)
underlyings = distributions(
batch_shape=batch_shape,
event_dim=event_dim,
enable_vars=enable_vars,
depth=depth - 1,
eligibility_filter=eligibility_fn,
validate_args=validate_args).filter(
bijector_hps.distribution_filter_for(bijector))
to_transform = draw(underlyings)
hp.note('Forming TransformedDistribution with '
'underlying distribution {}; parameters {}'.format(
to_transform, params_used(to_transform)))
result_dist = tfd.TransformedDistribution(
bijector=bijector,
distribution=to_transform,
validate_args=validate_args)
if batch_shape != result_dist.batch_shape:
msg = ('TransformedDistribution strategy generated a bad batch shape '
'for {}, should have been {}.').format(result_dist, batch_shape)
raise AssertionError(msg)
return result_dist
@hps.composite
def quantized_distributions(draw,
batch_shape=None,
event_dim=None,
enable_vars=False,
eligibility_filter=lambda name: True,
validate_args=True):
"""Strategy for drawing `QuantizedDistribution`s.
The underlying distribution is drawn from the `base_distributions` strategy.
Args:
draw: Hypothesis strategy sampler supplied by `@hps.composite`.
batch_shape: An optional `TensorShape`. The batch shape of the resulting
`QuantizedDistribution`. Hypothesis will pick a `batch_shape` if omitted.
event_dim: Optional Python int giving the size of each of the underlying
distribution's parameters' event dimensions. This is shared across all
parameters, permitting square event matrices, compatible location and
scale Tensors, etc. If omitted, Hypothesis will choose one.
enable_vars: TODO(bjp): Make this `True` all the time and put variable
initialization in slicing_test. If `False`, the returned parameters are
all Tensors, never Variables or DeferredTensor.
eligibility_filter: Optional Python callable. Blocks some Distribution
class names so they will not be drawn.
validate_args: Python `bool`; whether to enable runtime assertions.
Returns:
dists: A strategy for drawing `QuantizedDistribution`s with the specified
`batch_shape` (or an arbitrary one if omitted).
"""
if batch_shape is None:
batch_shape = draw(tfp_hps.shapes())
low_quantile = draw(
hps.one_of(
hps.just(None),
hps.floats(min_value=0.01, max_value=0.7)))
high_quantile = draw(
hps.one_of(
hps.just(None),
hps.floats(min_value=0.3, max_value=.99)))
def ok(name):
return eligibility_filter(name) and name in QUANTIZED_BASE_DISTS
underlyings = base_distributions(
batch_shape=batch_shape,
event_dim=event_dim,
enable_vars=enable_vars,
eligibility_filter=ok,
validate_args=validate_args,
)
underlying = draw(underlyings)
if high_quantile is not None:
high_quantile = tf.convert_to_tensor(high_quantile, dtype=underlying.dtype)
if low_quantile is not None:
low_quantile = tf.convert_to_tensor(low_quantile, dtype=underlying.dtype)
if high_quantile is not None:
high_quantile = ensure_high_gt_low(low_quantile, high_quantile)
hp.note('Drawing QuantizedDistribution with underlying distribution'
' {}'.format(underlying))
try:
low = None if low_quantile is None else underlying.quantile(low_quantile)
high = None if high_quantile is None else underlying.quantile(high_quantile)
except NotImplementedError:
# The following code makes ReproducibilityTest flaky in graph mode (but not
# eager). Failures are due either to partial mismatch in the samples in
# ReproducibilityTest or to `low` and/or `high` being NaN. For now, to avoid
# this, we set `low` and `high` to `None` for distributions not implementing
# `quantile`.
# seed = test_util.test_seed(hardcoded_seed=123)
# low = (None if low_quantile is None
# else underlying.sample(low_quantile.shape, seed=seed))
# high = (None if high_quantile is None else
# underlying.sample(high_quantile.shape, seed=seed))
low = None
high = None
# Ensure that `low` and `high` are ints contained in distribution support
# and span at least a few bins.
if high is not None:
high = tf.clip_by_value(high, -2**23, 2**23)
high = tf.math.ceil(high + 5.)
if low is not None:
low = tf.clip_by_value(low, -2**23, 2**23)
low = tf.math.ceil(low)
result_dist = tfd.QuantizedDistribution(
distribution=underlying,
low=low,
high=high,
validate_args=validate_args)
return result_dist
@hps.composite
def mixtures_same_family(draw,
batch_shape=None,
event_dim=None,
enable_vars=False,
depth=None,
eligibility_filter=lambda name: True,
validate_args=True):
"""Strategy for drawing `MixtureSameFamily` distributions.
The component distribution is drawn from the `distributions` strategy.
The Categorical mixture distributions are either shared across all batch
members, or drawn independently for the full batch (as required by
`MixtureSameFamily`).
Args:
draw: Hypothesis strategy sampler supplied by `@hps.composite`.
batch_shape: An optional `TensorShape`. The batch shape of the resulting
`MixtureSameFamily` distribution. The component distribution will have a
batch shape of 1 rank higher (for the components being mixed). Hypothesis
will pick a batch shape if omitted.
event_dim: Optional Python int giving the size of each of the component
distribution's parameters' event dimensions. This is shared across all
parameters, permitting square event matrices, compatible location and
scale Tensors, etc. If omitted, Hypothesis will choose one.
enable_vars: TODO(bjp): Make this `True` all the time and put variable
initialization in slicing_test. If `False`, the returned parameters are
all `tf.Tensor`s and not {`tf.Variable`, `tfp.util.DeferredTensor`
`tfp.util.TransformedVariable`}
depth: Python `int` giving maximum nesting depth of compound Distributions.
eligibility_filter: Optional Python callable. Blocks some Distribution
class names so they will not be drawn.
validate_args: Python `bool`; whether to enable runtime assertions.
Returns:
dists: A strategy for drawing `MixtureSameFamily` distributions with the
specified `batch_shape` (or an arbitrary one if omitted).
"""
if depth is None:
depth = draw(depths())
if batch_shape is None:
# Ensure the components dist has at least one batch dim (a component dim).
batch_shape = draw(tfp_hps.shapes(min_ndims=1, min_lastdimsize=2))
else: # This mixture adds a batch dim to its underlying components dist.
batch_shape = tensorshape_util.concatenate(
batch_shape,
draw(tfp_hps.shapes(min_ndims=1, max_ndims=1, min_lastdimsize=2)))
# Cannot put a BatchReshape into a MixtureSameFamily, because the former
# doesn't support broadcasting, and the latter relies on it. b/161984806.
def nested_eligibility_filter(dist_name):
if dist_name == 'BatchReshape':
return False
return eligibility_filter(dist_name)
component = draw(
distributions(
batch_shape=batch_shape,
event_dim=event_dim,
enable_vars=enable_vars,
eligibility_filter=nested_eligibility_filter,
depth=depth - 1,
validate_args=validate_args))
hp.note('Drawing MixtureSameFamily with component {}; parameters {}'.format(
component, params_used(component)))
# scalar or same-shaped categorical?
mixture_batch_shape = draw(
hps.one_of(hps.just(batch_shape[:-1]), hps.just(tf.TensorShape([]))))
mixture_dist = draw(base_distributions(
dist_name='Categorical',
batch_shape=mixture_batch_shape,
event_dim=tensorshape_util.as_list(batch_shape)[-1],
enable_vars=enable_vars,
validate_args=validate_args))
hp.note(('Forming MixtureSameFamily with '
'mixture distribution {}; parameters {}').format(
mixture_dist, params_used(mixture_dist)))
result_dist = tfd.MixtureSameFamily(
components_distribution=component,
mixture_distribution=mixture_dist,
validate_args=validate_args)
if batch_shape[:-1] != result_dist.batch_shape:
msg = ('MixtureSameFamily strategy generated a bad batch shape '
'for {}, should have been {}.').format(result_dist, batch_shape[:-1])
raise AssertionError(msg)
return result_dist
@hps.composite
def mixtures(draw,
batch_shape=None,
event_dim=None,
enable_vars=False,
depth=None,
eligibility_filter=lambda name: True,
validate_args=True):
"""Strategy for drawing `Mixture` distributions.
The component distributions are drawn from the `distributions` strategy.
Args:
draw: Hypothesis strategy sampler supplied by `@hps.composite`.
batch_shape: An optional `TensorShape`. The batch shape of the resulting
`MixtureSameFamily` distribution. The component distribution will have a
batch shape of 1 rank higher (for the components being mixed). Hypothesis
will pick a batch shape if omitted.
event_dim: Optional Python int giving the size of each of the component
distribution's parameters' event dimensions. This is shared across all
parameters, permitting square event matrices, compatible location and
scale Tensors, etc. If omitted, Hypothesis will choose one.
enable_vars: TODO(bjp): Make this `True` all the time and put variable
initialization in slicing_test. If `False`, the returned parameters are
all `tf.Tensor`s and not {`tf.Variable`, `tfp.util.DeferredTensor`
`tfp.util.TransformedVariable`}
depth: Python `int` giving maximum nesting depth of compound Distributions.
eligibility_filter: Optional Python callable. Blocks some Distribution
class names so they will not be drawn.
validate_args: Python `bool`; whether to enable runtime assertions.
Returns:
dists: A strategy for drawing `Mixture` distributions with the specified
`batch_shape` (or an arbitrary one if omitted).
"""
if depth is None:
depth = draw(depths())
if batch_shape is None:
batch_shape = draw(tfp_hps.shapes())
if event_dim is None:
event_dim = draw(hps.integers(min_value=2, max_value=6))
# TODO(b/169441746): Re-enable nesting MixtureSameFamily inside Mixture when
# the weird edge case gets fixed.
def nested_eligibility_filter(dist_name):
if dist_name in ['MixtureSameFamily']:
return False
return eligibility_filter(dist_name)
component_strategy = distributions(
batch_shape=batch_shape,
event_dim=event_dim,
enable_vars=enable_vars,
eligibility_filter=nested_eligibility_filter,
depth=depth - 1,
validate_args=validate_args)
# Must ensure matching event shapes and dtypes.
c0 = draw(component_strategy)
components = [c0] + draw(hps.lists(
component_strategy.filter(
lambda d: (d.event_shape, d.dtype) == (c0.event_shape, c0.dtype)),
min_size=1, max_size=5))
hp.note('Drawing Mixture with components {}; parameters {}'.format(
components, [params_used(c) for c in components]))
cat = draw(base_distributions(
dist_name='Categorical',
batch_shape=batch_shape,
event_dim=len(components),
enable_vars=enable_vars,
validate_args=validate_args))
hp.note('Forming Mixture with cat distribution {}; parameters {}'.format(
cat, params_used(cat)))
result_dist = tfd.Mixture(
cat=cat, components=components,
validate_args=validate_args)
if batch_shape != result_dist.batch_shape:
msg = ('Mixture strategy generated a bad batch shape for {}, should have'
' been {}.').format(result_dist, batch_shape)
raise AssertionError(msg)
return result_dist
@hps.composite
def distributions(draw,
dist_name=None,
batch_shape=None,
event_dim=None,
enable_vars=False,
depth=None,
eligibility_filter=lambda name: True,
validate_args=True):
"""Strategy for drawing arbitrary Distributions.
This may draw compound distributions (i.e., `Independent`,
`MixtureSameFamily`, and/or `TransformedDistribution`), in which case the
underlying distributions are drawn recursively from this strategy as well.
Args:
draw: Hypothesis strategy sampler supplied by `@hps.composite`.
dist_name: Optional Python `str`. If given, the produced distributions
will all have this type.
batch_shape: An optional `TensorShape`. The batch shape of the resulting
Distribution. Hypothesis will pick a batch shape if omitted.
event_dim: Optional Python int giving the size of each of the
distribution's parameters' event dimensions. This is shared across all
parameters, permitting square event matrices, compatible location and
scale Tensors, etc. If omitted, Hypothesis will choose one.
enable_vars: TODO(bjp): Make this `True` all the time and put variable
initialization in slicing_test. If `False`, the returned parameters are
all `tf.Tensor`s and not {`tf.Variable`, `tfp.util.DeferredTensor`
`tfp.util.TransformedVariable`}.
depth: Python `int` giving maximum nesting depth of compound Distributions.
If `None`, Hypothesis will bias choose one, with a bias towards shallow
nests.
eligibility_filter: Optional Python callable. Blocks some Distribution
class names so they will not be drawn.
validate_args: Python `bool`; whether to enable runtime assertions.
Returns:
dists: A strategy for drawing Distributions with the specified `batch_shape`
(or an arbitrary one if omitted).
Raises:
ValueError: If it doesn't know how to instantiate a Distribution of class
`dist_name`.
"""
if depth is None:
depth = draw(depths())
if dist_name is None and depth > 0:
bases = hps.just(None)
candidates = ['BatchReshape', 'Independent',
'MixtureSameFamily', 'TransformedDistribution']
names = [name for name in candidates if eligibility_filter(name)]
compounds = hps.one_of(map(hps.just, names))
dist_name = draw(hps.one_of([bases, compounds]))
if (dist_name is None
or dist_name in INSTANTIABLE_BASE_DISTS
or dist_name == 'Empirical'):
return draw(base_distributions(
dist_name,
batch_shape=batch_shape,
event_dim=event_dim,
enable_vars=enable_vars,
eligibility_filter=eligibility_filter,
validate_args=validate_args))
if dist_name == 'BatchReshape':
return draw(batch_reshapes(
batch_shape, event_dim, enable_vars, depth,
eligibility_filter, validate_args))
if dist_name == 'Independent':
return draw(independents(
batch_shape, event_dim, enable_vars, depth,
eligibility_filter, validate_args))
if dist_name == 'Sample':
return draw(samples(
batch_shape, event_dim, enable_vars, depth,
eligibility_filter, validate_args))
if dist_name == 'MixtureSameFamily':
return draw(mixtures_same_family(
batch_shape, event_dim, enable_vars, depth,
eligibility_filter, validate_args))
if dist_name == 'Mixture':
return draw(mixtures(
batch_shape, event_dim, enable_vars, depth,
eligibility_filter, validate_args))
if dist_name == 'TransformedDistribution':
return draw(transformed_distributions(
batch_shape, event_dim, enable_vars, depth,
eligibility_filter, validate_args))
if dist_name == 'QuantizedDistribution':
return draw(quantized_distributions(
batch_shape, event_dim, enable_vars,
eligibility_filter, validate_args))
raise ValueError('Unknown Distribution name {}'.format(dist_name))
|
the-stack_0_2089 | from rest_framework import serializers
from .models import Entry
from django.contrib.auth.models import User
class UserSerializer(serializers.Serializer):
username = serializers.CharField(max_length=255, min_length=2)
first_name = serializers.CharField(max_length=255, min_length=2)
last_name = serializers.CharField(max_length=255, min_length=2)
password = serializers.CharField(max_length=65, min_length=8, write_only=True)
email = serializers.EmailField(max_length=255, min_length=4)
class Meta:
model = User
fields = [
'id',
'username',
'first_name',
'last_name',
'email'
]
def validate(self, attrs):
email = attrs.get('email', '')
username = attrs.get('username')
if User.objects.filter(email=email).exists():
raise serializers.ValidationError({'email': ('Email already in use')})
if User.objects.filter(username=username).exists():
raise serializers.ValidationError({'usermane': ('Username already in use')})
return super().validate(attrs)
def create(self, validated_data):
return User.objects.create_user(**validated_data)
class EntrySerializer(serializers.ModelSerializer):
class Meta:
model = Entry
fields = [
'id',
'owner',
'title',
'state',
'lga',
'ward',
'PMV_name',
'geopoint',
'patientRecordAvailable',
'patientWithFebrileIllness',
'totalNoOfFeverCases',
'testToKnowCauseOfFever',
'typeOfTest',
'noOf5mRDTTestedFeverCases',
'noOfU5mRDTTestedFeverCases',
'noOf5mRDTTestedPositiveFeverCases',
'noOfU5mRDTTestedPositiveFeverCases',
'typeOfTreamentGivenToPositivePatient',
'typeOfTreamentGivenToFebrilePatientAndNotTested',
'IECMaterialAvailableOnDisplay',
'date'
]
# title = serializers.CharField(max_length=255)
# state = serializers.CharField(max_length=255)
# lga =serializers.CharField(max_length=255)
# ward = serializers.CharField(max_length=255)
# PMV_name = serializers.CharField(max_length=255)
# geopoint = serializers.CharField(max_length=255)
# patientRecordAvailable = serializers.BooleanField(default=True)
# patientWithFebrileIllness = serializers.BooleanField(default=False)
# totalNoOfFeverCases = serializers.CharField(max_length=255)
# testToKnowCauseOfFever = serializers.BooleanField(default=True)
# typeOfTest = serializers.CharField(max_length=255)
# noOf5mRDTTestedFeverCases = serializers.CharField(max_length=255)
# noOfU5mRDTTestedFeverCases = serializers.CharField(max_length=255)
# noOf5mRDTTestedPositiveFeverCases = serializers.CharField(max_length=255)
# noOfU5mRDTTestedPositiveFeverCases = serializers.CharField(max_length=255)
# typeOfTreamentGivenToPositivePatient = serializers.CharField(max_length=255)
# typeOfTreamentGivenToFebrilePatientAndNotTested = serializers.CharField(max_length=255)
# IECMaterialAvailableOnDisplay = serializers.BooleanField(default=True)
# date = serializers.DateTimeField()
# def create(self, validated_data):
# return Entry.objects.create(validated_data)
# def update(self, instance, validated_data):
# instance.title = validated_data.get('title', instance.title)
# instance.state = validated_data.get('state', instance.state)
# instance.lga = validated_data.get('lga', instance.lga)
# instance.ward = validated_data.get('ward', instance.ward)
# instance.PMV_name = validated_data.get('PMV_name', instance.PMV_name)
# instance.geopoint = validated_data.get('geopoint', instance.geopoint)
# instance.patientRecordAvailable = validated_data.get('patientRecordAvailable', instance.patientRecordAvailable)
# instance.patientWithFebrileIllness = validated_data.get('patientWithFebrileIllness', instance.patientWithFebrileIllness)
# instance.totalNoOfFeverCases = validated_data.get('totalNoOfFeverCases', instance.totalNoOfFeverCases)
# instance.testToKnowCauseOfFever = validated_data.get('testToKnowCauseOfFever', instance.testToKnowCauseOfFever)
# instance.typeOfTest = validated_data.get('typeOfTest', instance.typeOfTest)
# instance.noOf5mRDTTestedFeverCases = validated_data.get('noOf5mRDTTestedFeverCases', instance.noOf5mRDTTestedFeverCases)
# instance.noOfU5mRDTTestedFeverCases = validated_data.get('noOfU5mRDTTestedFeverCases', instance.noOfU5mRDTTestedFeverCases)
# instance.noOf5mRDTTestedPositiveFeverCases = validated_data.get('noOf5mRDTTestedPositiveFeverCases', instance.noOf5mRDTTestedPositiveFeverCases)
# instance.noOfU5mRDTTestedPositiveFeverCases = validated_data.get('noOfU5mRDTTestedPositiveFeverCases', instance.noOfU5mRDTTestedPositiveFeverCases)
# instance.typeOfTreamentGivenToPositivePatient = validated_data.get('typeOfTreamentGivenToPositivePatient', instance.typeOfTreamentGivenToPositivePatient)
# instance.typeOfTreamentGivenToFebrilePatientAndNotTested = validated_data.get('typeOfTreamentGivenToFebrilePatientAndNotTested', instance.typeOfTreamentGivenToFebrilePatientAndNotTested)
# instance.IECMaterialAvailableOnDisplay = validated_data.get('IECMaterialAvailableOnDisplay', instance.IECMaterialAvailableOnDisplay)
# instance.date = validated_data.get('date', instance.date)
# instance.save()
# return instance
|
the-stack_0_2090 | """An ellipse widget."""
from typing import Optional
from kivy.graphics.vertex_instructions import Ellipse as KivyEllipse
from kivy.graphics.context_instructions import Color, Rotate, Scale
from kivy.properties import NumericProperty
from mpfmc.uix.widget import Widget
MYPY = False
if MYPY: # pragma: no cover
from mpfmc.core.mc import MpfMc # pylint: disable-msg=cyclic-import,unused-import
class Ellipse(Widget):
"""An ellipse widget."""
widget_type_name = 'Ellipse'
animation_properties = ('x', 'y', 'width', 'pos', 'height', 'size', 'color',
'angle_start', 'angle_end', 'opacity', 'rotation', 'scale')
merge_settings = ('width', 'height')
def __init__(self, mc: "MpfMc", config: dict, key: Optional[str] = None, **kwargs) -> None:
del kwargs
super().__init__(mc=mc, config=config, key=key)
# Bind to all properties that when changed need to force
# the widget to be redrawn
self.bind(pos=self._draw_widget,
size=self._draw_widget,
color=self._draw_widget,
rotation=self._draw_widget,
scale=self._draw_widget,
segments=self._draw_widget,
angle_start=self._draw_widget,
angle_end=self._draw_widget)
self._draw_widget()
def _draw_widget(self, *args) -> None:
del args
if self.canvas is None:
return
anchor = (self.x - self.anchor_offset_pos[0], self.y - self.anchor_offset_pos[1])
self.canvas.clear()
with self.canvas:
Color(*self.color)
Rotate(angle=self.rotation, origin=anchor)
Scale(self.scale).origin = anchor
KivyEllipse(pos=self.pos, size=self.size,
segments=self.segments,
angle_start=self.angle_start,
angle_end=self.angle_end)
#
# Properties
#
segments = NumericProperty(180)
'''Defines how many segments will be used for drawing the ellipse. The
drawing will be smoother if you have many segments.
'''
angle_start = NumericProperty(0)
'''Specifies the starting angle, in degrees, of the disk portion of
the ellipse.
'''
angle_end = NumericProperty(360)
'''Specifies the ending angle, in degrees, of the disk portion of
the ellipse.
'''
rotation = NumericProperty(0)
scale = NumericProperty(1.0)
widget_classes = [Ellipse]
|
the-stack_0_2093 | #!/usr/bin/python
#
# Copyright (c) 2016 Matt Davis, <[email protected]>
# Chris Houseknecht, <[email protected]>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: azure_rm_publicipaddress
version_added: "2.1"
short_description: Manage Azure Public IP Addresses
description:
- Create, update and delete a Public IP address.
- Allows setting and updating the address allocation method and domain name label.
- Use the M(azure_rm_networkinterface) module to associate a Public IP with a network interface.
options:
resource_group:
description:
- Name of resource group with which the Public IP is associated.
required: true
allocation_method:
description:
- Control whether the assigned Public IP remains permanently assigned to the object.
- If not set to C(Static), the IP address my changed anytime an associated virtual machine is power cycled.
choices:
- dynamic
- static
- Static
- Dynamic
default: dynamic
domain_name:
description:
- The customizable portion of the FQDN assigned to public IP address. This is an explicit setting.
- If no value is provided, any existing value will be removed on an existing public IP.
aliases:
- domain_name_label
name:
description:
- Name of the Public IP.
required: true
state:
description:
- Assert the state of the Public IP. Use C(present) to create or update a and C(absent) to delete.
default: present
choices:
- absent
- present
location:
description:
- Valid Azure location. Defaults to location of the resource group.
sku:
description:
- The public IP address SKU.
choices:
- basic
- standard
- Basic
- Standard
version_added: "2.6"
ip_tags:
description:
- List of IpTag associated with the public IP address.
- Each element should contain type:value pair.
suboptions:
type:
description:
- Sets the ip_tags type.
value:
description:
- Sets the ip_tags value.
version_added: "2.8"
idle_timeout:
description:
- Idle timeout in minutes.
type: int
version_added: "2.8"
version:
description:
- The public IP address version.
choices:
- ipv4
- ipv6
default: ipv4
version_added: "2.8"
extends_documentation_fragment:
- azure
- azure_tags
author:
- Chris Houseknecht (@chouseknecht)
- Matt Davis (@nitzmahone)
'''
EXAMPLES = '''
- name: Create a public ip address
azure_rm_publicipaddress:
resource_group: myResourceGroup
name: my_public_ip
allocation_method: static
domain_name: foobar
- name: Delete public ip
azure_rm_publicipaddress:
resource_group: myResourceGroup
name: my_public_ip
state: absent
'''
RETURN = '''
state:
description:
- Facts about the current state of the object.
returned: always
type: complex
contains:
dns_settings:
description:
- The FQDN of the DNS record associated with the public IP address.
returned: always
type: dict
sample: {
"domain_name_label": "ansible-b57dc95985712e45eb8b9c2e",
"fqdn": "ansible-b57dc95985712e45eb8b9c2e.eastus.cloudapp.azure.com",
"reverse_fqdn": null
}
etag:
description:
- A unique read-only string that changes whenever the resource is updated.
returned: always
type: str
sample: "W/'1905ee13-7623-45b1-bc6b-4a12b2fb9d15'"
idle_timeout_in_minutes:
description:
- The idle timeout of the public IP address.
returned: always
type: int
sample: 4
ip_address:
description:
- The Public IP Prefix this Public IP Address should be allocated from.
returned: always
type: str
sample: 52.160.103.93
location:
description:
- Resource location.
returned: always
type: str
example: eastus
name:
description:
- Name of the Public IP Address.
returned: always
type: str
example: publicip002
provisioning_state:
description:
- The provisioning state of the Public IP resource.
returned: always
type: str
example: Succeeded
public_ip_allocation_method:
description:
- The public IP allocation method.
returned: always
type: str
sample: static
public_ip_address_version:
description:
- The public IP address version.
returned: always
type: str
sample: ipv4
sku:
description:
- The public IP address SKU.
returned: always
type: str
sample: Basic
tags:
description:
- The resource tags.
returned: always
type: dict
sample: {
"delete": "on-exit",
"testing": "testing"
}
type:
description:
- Type of the resource.
returned: always
type: str
sample: "Microsoft.Network/publicIPAddresses"
'''
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
from ansible.module_utils._text import to_native
try:
from msrestazure.azure_exceptions import CloudError
except ImportError:
# This is handled in azure_rm_common
pass
def pip_to_dict(pip):
result = dict(
name=pip.name,
type=pip.type,
location=pip.location,
tags=pip.tags,
public_ip_allocation_method=pip.public_ip_allocation_method.lower(),
public_ip_address_version=pip.public_ip_address_version.lower(),
dns_settings=dict(),
ip_address=pip.ip_address,
idle_timeout_in_minutes=pip.idle_timeout_in_minutes,
provisioning_state=pip.provisioning_state,
etag=pip.etag,
sku=pip.sku.name
)
if pip.dns_settings:
result['dns_settings']['domain_name_label'] = pip.dns_settings.domain_name_label
result['dns_settings']['fqdn'] = pip.dns_settings.fqdn
result['dns_settings']['reverse_fqdn'] = pip.dns_settings.reverse_fqdn
if pip.ip_tags:
result['ip_tags'] = [dict(type=to_native(x.ip_tag_type), value=to_native(x.tag)) for x in pip.ip_tags]
return result
ip_tag_spec = dict(
type=dict(type='str', required=True),
value=dict(type='str', required=True)
)
class AzureRMPublicIPAddress(AzureRMModuleBase):
def __init__(self):
self.module_arg_spec = dict(
resource_group=dict(type='str', required=True),
name=dict(type='str', required=True),
state=dict(type='str', default='present', choices=['present', 'absent']),
location=dict(type='str'),
version=dict(type='str', default='ipv4', choices=['ipv4', 'ipv6']),
allocation_method=dict(type='str', default='dynamic', choices=['Dynamic', 'Static', 'dynamic', 'static']),
domain_name=dict(type='str', aliases=['domain_name_label']),
sku=dict(type='str', choices=['Basic', 'Standard', 'basic', 'standard']),
ip_tags=dict(type='list', elements='dict', options=ip_tag_spec),
idle_timeout=dict(type='int')
)
self.resource_group = None
self.name = None
self.location = None
self.state = None
self.tags = None
self.allocation_method = None
self.domain_name = None
self.sku = None
self.version = None
self.ip_tags = None
self.idle_timeout = None
self.results = dict(
changed=False,
state=dict()
)
super(AzureRMPublicIPAddress, self).__init__(derived_arg_spec=self.module_arg_spec,
supports_check_mode=True)
def exec_module(self, **kwargs):
for key in list(self.module_arg_spec.keys()) + ['tags']:
setattr(self, key, kwargs[key])
results = dict()
changed = False
pip = None
# capitalize the sku and allocation_method. basic => Basic, Basic => Basic.
self.allocation_method = self.allocation_method.capitalize() if self.allocation_method else None
self.sku = self.sku.capitalize() if self.sku else None
self.version = 'IPv4' if self.version == 'ipv4' else 'IPv6'
resource_group = self.get_resource_group(self.resource_group)
if not self.location:
# Set default location
self.location = resource_group.location
try:
self.log("Fetch public ip {0}".format(self.name))
pip = self.network_client.public_ip_addresses.get(self.resource_group, self.name)
self.check_provisioning_state(pip, self.state)
self.log("PIP {0} exists".format(self.name))
if self.state == 'present':
results = pip_to_dict(pip)
domain_lable = results['dns_settings'].get('domain_name_label')
if self.domain_name is not None and ((self.domain_name or domain_lable) and self.domain_name != domain_lable):
self.log('CHANGED: domain_name_label')
changed = True
results['dns_settings']['domain_name_label'] = self.domain_name
if self.allocation_method.lower() != results['public_ip_allocation_method'].lower():
self.log("CHANGED: allocation_method")
changed = True
results['public_ip_allocation_method'] = self.allocation_method
if self.sku and self.sku != results['sku']:
self.log("CHANGED: sku")
changed = True
results['sku'] = self.sku
if self.version.lower() != results['public_ip_address_version'].lower():
self.log("CHANGED: version")
changed = True
results['public_ip_address_version'] = self.version
if self.idle_timeout and self.idle_timeout != results['idle_timeout_in_minutes']:
self.log("CHANGED: idle_timeout")
changed = True
results['idle_timeout_in_minutes'] = self.idle_timeout
if str(self.ip_tags or []) != str(results.get('ip_tags') or []):
self.log("CHANGED: ip_tags")
changed = True
results['ip_tags'] = self.ip_tags
update_tags, results['tags'] = self.update_tags(results['tags'])
if update_tags:
changed = True
elif self.state == 'absent':
self.log("CHANGED: public ip {0} exists but requested state is 'absent'".format(self.name))
changed = True
except CloudError:
self.log('Public ip {0} does not exist'.format(self.name))
if self.state == 'present':
self.log("CHANGED: pip {0} does not exist but requested state is 'present'".format(self.name))
changed = True
self.results['state'] = results
self.results['changed'] = changed
if self.check_mode:
return results
if changed:
if self.state == 'present':
if not pip:
self.log("Create new Public IP {0}".format(self.name))
pip = self.network_models.PublicIPAddress(
location=self.location,
public_ip_address_version=self.version,
public_ip_allocation_method=self.allocation_method if self.version == 'IPv4' else None,
sku=self.network_models.PublicIPAddressSku(name=self.sku) if self.sku else None,
idle_timeout_in_minutes=self.idle_timeout if self.idle_timeout and self.idle_timeout > 0 else None
)
if self.ip_tags:
pip.ip_tags = [self.network_models.IpTag(ip_tag_type=x.type, tag=x.value) for x in self.ip_tags]
if self.tags:
pip.tags = self.tags
if self.domain_name:
pip.dns_settings = self.network_models.PublicIPAddressDnsSettings(
domain_name_label=self.domain_name
)
else:
self.log("Update Public IP {0}".format(self.name))
pip = self.network_models.PublicIPAddress(
location=results['location'],
public_ip_allocation_method=results['public_ip_allocation_method'],
tags=results['tags']
)
if self.domain_name:
pip.dns_settings = self.network_models.PublicIPAddressDnsSettings(
domain_name_label=self.domain_name
)
self.results['state'] = self.create_or_update_pip(pip)
elif self.state == 'absent':
self.log('Delete public ip {0}'.format(self.name))
self.delete_pip()
return self.results
def create_or_update_pip(self, pip):
try:
poller = self.network_client.public_ip_addresses.create_or_update(self.resource_group, self.name, pip)
pip = self.get_poller_result(poller)
except Exception as exc:
self.fail("Error creating or updating {0} - {1}".format(self.name, str(exc)))
return pip_to_dict(pip)
def delete_pip(self):
try:
poller = self.network_client.public_ip_addresses.delete(self.resource_group, self.name)
self.get_poller_result(poller)
except Exception as exc:
self.fail("Error deleting {0} - {1}".format(self.name, str(exc)))
# Delete returns nada. If we get here, assume that all is well.
self.results['state']['status'] = 'Deleted'
return True
def main():
AzureRMPublicIPAddress()
if __name__ == '__main__':
main()
|
the-stack_0_2094 | # -*- coding: utf-8 -*-
"""Python's built-in :mod:`functools` module builds several useful
utilities on top of Python's first-class function
support. ``funcutils`` generally stays in the same vein, adding to and
correcting Python's standard metaprogramming facilities.
"""
from __future__ import print_function
import sys
import re
import inspect
import functools
import itertools
from types import MethodType, FunctionType
try:
xrange
make_method = MethodType
except NameError:
# Python 3
make_method = lambda desc, obj, obj_type: MethodType(desc, obj)
basestring = (str, bytes) # Python 3 compat
_IS_PY2 = False
else:
_IS_PY2 = True
try:
_inspect_iscoroutinefunction = inspect.iscoroutinefunction
except AttributeError:
# Python 3.4
_inspect_iscoroutinefunction = lambda func: False
try:
from boltons.typeutils import make_sentinel
NO_DEFAULT = make_sentinel(var_name='NO_DEFAULT')
except ImportError:
NO_DEFAULT = object()
def get_module_callables(mod, ignore=None):
"""Returns two maps of (*types*, *funcs*) from *mod*, optionally
ignoring based on the :class:`bool` return value of the *ignore*
callable. *mod* can be a string name of a module in
:data:`sys.modules` or the module instance itself.
"""
if isinstance(mod, basestring):
mod = sys.modules[mod]
types, funcs = {}, {}
for attr_name in dir(mod):
if ignore and ignore(attr_name):
continue
try:
attr = getattr(mod, attr_name)
except Exception:
continue
try:
attr_mod_name = attr.__module__
except AttributeError:
continue
if attr_mod_name != mod.__name__:
continue
if isinstance(attr, type):
types[attr_name] = attr
elif callable(attr):
funcs[attr_name] = attr
return types, funcs
def mro_items(type_obj):
"""Takes a type and returns an iterator over all class variables
throughout the type hierarchy (respecting the MRO).
>>> sorted(set([k for k, v in mro_items(int) if not k.startswith('__') and 'bytes' not in k and not callable(v)]))
['denominator', 'imag', 'numerator', 'real']
"""
# TODO: handle slots?
return itertools.chain.from_iterable(ct.__dict__.items()
for ct in type_obj.__mro__)
def dir_dict(obj, raise_exc=False):
"""Return a dictionary of attribute names to values for a given
object. Unlike ``obj.__dict__``, this function returns all
attributes on the object, including ones on parent classes.
"""
# TODO: separate function for handling descriptors on types?
ret = {}
for k in dir(obj):
try:
ret[k] = getattr(obj, k)
except Exception:
if raise_exc:
raise
return ret
def copy_function(orig, copy_dict=True):
"""Returns a shallow copy of the function, including code object,
globals, closure, etc.
>>> func = lambda: func
>>> func() is func
True
>>> func_copy = copy_function(func)
>>> func_copy() is func
True
>>> func_copy is not func
True
Args:
orig (function): The function to be copied. Must be a
function, not just any method or callable.
copy_dict (bool): Also copy any attributes set on the function
instance. Defaults to ``True``.
"""
ret = FunctionType(orig.__code__,
orig.__globals__,
name=orig.__name__,
argdefs=getattr(orig, "__defaults__", None),
closure=getattr(orig, "__closure__", None))
if copy_dict:
ret.__dict__.update(orig.__dict__)
return ret
def partial_ordering(cls):
"""Class decorator, similar to :func:`functools.total_ordering`,
except it is used to define `partial orderings`_ (i.e., it is
possible that *x* is neither greater than, equal to, or less than
*y*). It assumes the presence of the ``__le__()`` and ``__ge__()``
method, but nothing else. It will not override any existing
additional comparison methods.
.. _partial orderings: https://en.wikipedia.org/wiki/Partially_ordered_set
>>> @partial_ordering
... class MySet(set):
... def __le__(self, other):
... return self.issubset(other)
... def __ge__(self, other):
... return self.issuperset(other)
...
>>> a = MySet([1,2,3])
>>> b = MySet([1,2])
>>> c = MySet([1,2,4])
>>> b < a
True
>>> b > a
False
>>> b < c
True
>>> a < c
False
>>> c > a
False
"""
def __lt__(self, other): return self <= other and not self >= other
def __gt__(self, other): return self >= other and not self <= other
def __eq__(self, other): return self >= other and self <= other
if not hasattr(cls, '__lt__'): cls.__lt__ = __lt__
if not hasattr(cls, '__gt__'): cls.__gt__ = __gt__
if not hasattr(cls, '__eq__'): cls.__eq__ = __eq__
return cls
class InstancePartial(functools.partial):
""":class:`functools.partial` is a huge convenience for anyone
working with Python's great first-class functions. It allows
developers to curry arguments and incrementally create simpler
callables for a variety of use cases.
Unfortunately there's one big gap in its usefulness:
methods. Partials just don't get bound as methods and
automatically handed a reference to ``self``. The
``InstancePartial`` type remedies this by inheriting from
:class:`functools.partial` and implementing the necessary
descriptor protocol. There are no other differences in
implementation or usage. :class:`CachedInstancePartial`, below,
has the same ability, but is slightly more efficient.
"""
def __get__(self, obj, obj_type):
return make_method(self, obj, obj_type)
class CachedInstancePartial(functools.partial):
"""The ``CachedInstancePartial`` is virtually the same as
:class:`InstancePartial`, adding support for method-usage to
:class:`functools.partial`, except that upon first access, it
caches the bound method on the associated object, speeding it up
for future accesses, and bringing the method call overhead to
about the same as non-``partial`` methods.
See the :class:`InstancePartial` docstring for more details.
"""
def __get__(self, obj, obj_type):
# These assignments could've been in __init__, but there was
# no simple way to do it without breaking one of PyPy or Py3.
self.__name__ = None
self.__doc__ = self.func.__doc__
self.__module__ = self.func.__module__
name = self.__name__
if name is None:
for k, v in mro_items(obj_type):
if v is self:
self.__name__ = name = k
if obj is None:
return make_method(self, obj, obj_type)
try:
# since this is a data descriptor, this block
# is probably only hit once (per object)
return obj.__dict__[name]
except KeyError:
obj.__dict__[name] = ret = make_method(self, obj, obj_type)
return ret
partial = CachedInstancePartial
# # #
# # # Function builder
# # #
def wraps(func, injected=None, expected=None, **kw):
"""Modeled after the built-in :func:`functools.wraps`, this function is
used to make your decorator's wrapper functions reflect the
wrapped function's:
* Name
* Documentation
* Module
* Signature
The built-in :func:`functools.wraps` copies the first three, but
does not copy the signature. This version of ``wraps`` can copy
the inner function's signature exactly, allowing seamless usage
and :mod:`introspection <inspect>`. Usage is identical to the
built-in version::
>>> from boltons.funcutils import wraps
>>>
>>> def print_return(func):
... @wraps(func)
... def wrapper(*args, **kwargs):
... ret = func(*args, **kwargs)
... print(ret)
... return ret
... return wrapper
...
>>> @print_return
... def example():
... '''docstring'''
... return 'example return value'
>>>
>>> val = example()
example return value
>>> example.__name__
'example'
>>> example.__doc__
'docstring'
In addition, the boltons version of wraps supports modifying the
outer signature based on the inner signature. By passing a list of
*injected* argument names, those arguments will be removed from
the outer wrapper's signature, allowing your decorator to provide
arguments that aren't passed in.
Args:
func (function): The callable whose attributes are to be copied.
injected (list): An optional list of argument names which
should not appear in the new wrapper's signature.
expected (list): An optional list of argument names (or (name,
default) pairs) representing new arguments introduced by
the wrapper (the opposite of *injected*). See
:meth:`FunctionBuilder.add_arg()` for more details.
update_dict (bool): Whether to copy other, non-standard
attributes of *func* over to the wrapper. Defaults to True.
inject_to_varkw (bool): Ignore missing arguments when a
``**kwargs``-type catch-all is present. Defaults to True.
For more in-depth wrapping of functions, see the
:class:`FunctionBuilder` type, on which wraps was built.
"""
if injected is None:
injected = []
elif isinstance(injected, basestring):
injected = [injected]
else:
injected = list(injected)
expected_items = _parse_wraps_expected(expected)
if isinstance(func, (classmethod, staticmethod)):
raise TypeError('wraps does not support wrapping classmethods and'
' staticmethods, change the order of wrapping to'
' wrap the underlying function: %r'
% (getattr(func, '__func__', None),))
update_dict = kw.pop('update_dict', True)
inject_to_varkw = kw.pop('inject_to_varkw', True)
if kw:
raise TypeError('unexpected kwargs: %r' % kw.keys())
fb = FunctionBuilder.from_func(func)
for arg in injected:
try:
fb.remove_arg(arg)
except MissingArgument:
if inject_to_varkw and fb.varkw is not None:
continue # keyword arg will be caught by the varkw
raise
for arg, default in expected_items:
fb.add_arg(arg, default) # may raise ExistingArgument
if fb.is_async:
fb.body = 'return await _call(%s)' % fb.get_invocation_str()
else:
fb.body = 'return _call(%s)' % fb.get_invocation_str()
def wrapper_wrapper(wrapper_func):
execdict = dict(_call=wrapper_func, _func=func)
fully_wrapped = fb.get_func(execdict, with_dict=update_dict)
fully_wrapped.__wrapped__ = func # ref to the original function (#115)
return fully_wrapped
return wrapper_wrapper
def _parse_wraps_expected(expected):
# expected takes a pretty powerful argument, it's processed
# here. admittedly this would be less trouble if I relied on
# OrderedDict (there's an impl of that in the commit history if
# you look
if expected is None:
expected = []
elif isinstance(expected, basestring):
expected = [(expected, NO_DEFAULT)]
expected_items = []
try:
expected_iter = iter(expected)
except TypeError as e:
raise ValueError('"expected" takes string name, sequence of string names,'
' iterable of (name, default) pairs, or a mapping of '
' {name: default}, not %r (got: %r)' % (expected, e))
for argname in expected_iter:
if isinstance(argname, basestring):
# dict keys and bare strings
try:
default = expected[argname]
except TypeError:
default = NO_DEFAULT
else:
# pairs
try:
argname, default = argname
except (TypeError, ValueError):
raise ValueError('"expected" takes string name, sequence of string names,'
' iterable of (name, default) pairs, or a mapping of '
' {name: default}, not %r')
if not isinstance(argname, basestring):
raise ValueError('all "expected" argnames must be strings, not %r' % (argname,))
expected_items.append((argname, default))
return expected_items
class FunctionBuilder(object):
"""The FunctionBuilder type provides an interface for programmatically
creating new functions, either based on existing functions or from
scratch.
Values are passed in at construction or set as attributes on the
instance. For creating a new function based of an existing one,
see the :meth:`~FunctionBuilder.from_func` classmethod. At any
point, :meth:`~FunctionBuilder.get_func` can be called to get a
newly compiled function, based on the values configured.
>>> fb = FunctionBuilder('return_five', doc='returns the integer 5',
... body='return 5')
>>> f = fb.get_func()
>>> f()
5
>>> fb.varkw = 'kw'
>>> f_kw = fb.get_func()
>>> f_kw(ignored_arg='ignored_val')
5
Note that function signatures themselves changed quite a bit in
Python 3, so several arguments are only applicable to
FunctionBuilder in Python 3. Except for *name*, all arguments to
the constructor are keyword arguments.
Args:
name (str): Name of the function.
doc (str): `Docstring`_ for the function, defaults to empty.
module (str): Name of the module from which this function was
imported. Defaults to None.
body (str): String version of the code representing the body
of the function. Defaults to ``'pass'``, which will result
in a function which does nothing and returns ``None``.
args (list): List of argument names, defaults to empty list,
denoting no arguments.
varargs (str): Name of the catch-all variable for positional
arguments. E.g., "args" if the resultant function is to have
``*args`` in the signature. Defaults to None.
varkw (str): Name of the catch-all variable for keyword
arguments. E.g., "kwargs" if the resultant function is to have
``**kwargs`` in the signature. Defaults to None.
defaults (dict): A mapping of argument names to default values.
kwonlyargs (list): Argument names which are only valid as
keyword arguments. **Python 3 only.**
kwonlydefaults (dict): A mapping, same as normal *defaults*,
but only for the *kwonlyargs*. **Python 3 only.**
annotations (dict): Mapping of type hints and so
forth. **Python 3 only.**
filename (str): The filename that will appear in
tracebacks. Defaults to "boltons.funcutils.FunctionBuilder".
indent (int): Number of spaces with which to indent the
function *body*. Values less than 1 will result in an error.
dict (dict): Any other attributes which should be added to the
functions compiled with this FunctionBuilder.
All of these arguments are also made available as attributes which
can be mutated as necessary.
.. _Docstring: https://en.wikipedia.org/wiki/Docstring#Python
"""
if _IS_PY2:
_argspec_defaults = {'args': list,
'varargs': lambda: None,
'varkw': lambda: None,
'defaults': lambda: None}
@classmethod
def _argspec_to_dict(cls, f):
args, varargs, varkw, defaults = inspect.getargspec(f)
return {'args': args,
'varargs': varargs,
'varkw': varkw,
'defaults': defaults}
else:
_argspec_defaults = {'args': list,
'varargs': lambda: None,
'varkw': lambda: None,
'defaults': lambda: None,
'kwonlyargs': list,
'kwonlydefaults': dict,
'annotations': dict}
@classmethod
def _argspec_to_dict(cls, f):
argspec = inspect.getfullargspec(f)
return dict((attr, getattr(argspec, attr))
for attr in cls._argspec_defaults)
_defaults = {'doc': str,
'dict': dict,
'is_async': lambda: False,
'module': lambda: None,
'body': lambda: 'pass',
'indent': lambda: 4,
'filename': lambda: 'boltons.funcutils.FunctionBuilder'}
_defaults.update(_argspec_defaults)
_compile_count = itertools.count()
def __init__(self, name, **kw):
self.name = name
for a, default_factory in self._defaults.items():
val = kw.pop(a, None)
if val is None:
val = default_factory()
setattr(self, a, val)
if kw:
raise TypeError('unexpected kwargs: %r' % kw.keys())
return
# def get_argspec(self): # TODO
if _IS_PY2:
def get_sig_str(self):
return inspect.formatargspec(self.args, self.varargs,
self.varkw, [])
def get_invocation_str(self):
return inspect.formatargspec(self.args, self.varargs,
self.varkw, [])[1:-1]
else:
def get_sig_str(self):
return inspect.formatargspec(self.args,
self.varargs,
self.varkw,
[],
self.kwonlyargs,
{},
self.annotations)
_KWONLY_MARKER = re.compile(r"""
\* # a star
\s* # followed by any amount of whitespace
, # followed by a comma
\s* # followed by any amount of whitespace
""", re.VERBOSE)
def get_invocation_str(self):
kwonly_pairs = None
formatters = {}
if self.kwonlyargs:
kwonly_pairs = dict((arg, arg)
for arg in self.kwonlyargs)
formatters['formatvalue'] = lambda value: '=' + value
sig = inspect.formatargspec(self.args,
self.varargs,
self.varkw,
[],
kwonly_pairs,
kwonly_pairs,
{},
**formatters)
sig = self._KWONLY_MARKER.sub('', sig)
return sig[1:-1]
@classmethod
def from_func(cls, func):
"""Create a new FunctionBuilder instance based on an existing
function. The original function will not be stored or
modified.
"""
# TODO: copy_body? gonna need a good signature regex.
# TODO: might worry about __closure__?
if not callable(func):
raise TypeError('expected callable object, not %r' % (func,))
kwargs = {'name': func.__name__,
'doc': func.__doc__,
'module': func.__module__,
'dict': getattr(func, '__dict__', {})}
kwargs.update(cls._argspec_to_dict(func))
if _inspect_iscoroutinefunction(func):
kwargs['is_async'] = True
return cls(**kwargs)
def get_func(self, execdict=None, add_source=True, with_dict=True):
"""Compile and return a new function based on the current values of
the FunctionBuilder.
Args:
execdict (dict): The dictionary representing the scope in
which the compilation should take place. Defaults to an empty
dict.
add_source (bool): Whether to add the source used to a
special ``__source__`` attribute on the resulting
function. Defaults to True.
with_dict (bool): Add any custom attributes, if
applicable. Defaults to True.
To see an example of usage, see the implementation of
:func:`~boltons.funcutils.wraps`.
"""
execdict = execdict or {}
body = self.body or self._default_body
tmpl = 'def {name}{sig_str}:'
tmpl += '\n{body}'
if self.is_async:
tmpl = 'async ' + tmpl
body = _indent(self.body, ' ' * self.indent)
name = self.name.replace('<', '_').replace('>', '_') # lambdas
src = tmpl.format(name=name, sig_str=self.get_sig_str(),
doc=self.doc, body=body)
self._compile(src, execdict)
func = execdict[name]
func.__name__ = self.name
func.__doc__ = self.doc
func.__defaults__ = self.defaults
if not _IS_PY2:
func.__kwdefaults__ = self.kwonlydefaults
if with_dict:
func.__dict__.update(self.dict)
func.__module__ = self.module
# TODO: caller module fallback?
if add_source:
func.__source__ = src
return func
def get_defaults_dict(self):
"""Get a dictionary of function arguments with defaults and the
respective values.
"""
ret = dict(reversed(list(zip(reversed(self.args),
reversed(self.defaults or [])))))
return ret
if _IS_PY2:
def add_arg(self, arg_name, default=NO_DEFAULT):
"Add an argument with optional *default* (defaults to ``funcutils.NO_DEFAULT``)."
if arg_name in self.args:
raise ExistingArgument('arg %r already in func %s arg list' % (arg_name, self.name))
self.args.append(arg_name)
if default is not NO_DEFAULT:
self.defaults = (self.defaults or ()) + (default,)
return
else:
def add_arg(self, arg_name, default=NO_DEFAULT, kwonly=False):
"""Add an argument with optional *default* (defaults to
``funcutils.NO_DEFAULT``). Pass *kwonly=True* to add a
keyword-only argument
"""
if arg_name in self.args:
raise ExistingArgument('arg %r already in func %s arg list' % (arg_name, self.name))
if arg_name in self.kwonlyargs:
raise ExistingArgument('arg %r already in func %s kwonly arg list' % (arg_name, self.name))
if not kwonly:
self.args.append(arg_name)
if default is not NO_DEFAULT:
self.defaults = (self.defaults or ()) + (default,)
else:
self.kwonlyargs.append(arg_name)
if default is not NO_DEFAULT:
self.kwonlydefaults[arg_name] = default
return
def remove_arg(self, arg_name):
"""Remove an argument from this FunctionBuilder's argument list. The
resulting function will have one less argument per call to
this function.
Args:
arg_name (str): The name of the argument to remove.
Raises a :exc:`ValueError` if the argument is not present.
"""
args = self.args
d_dict = self.get_defaults_dict()
try:
args.remove(arg_name)
except ValueError:
try:
self.kwonlyargs.remove(arg_name)
except (AttributeError, ValueError):
# py2, or py3 and missing from both
exc = MissingArgument('arg %r not found in %s argument list:'
' %r' % (arg_name, self.name, args))
exc.arg_name = arg_name
raise exc
else:
self.kwonlydefaults.pop(arg_name, None)
else:
d_dict.pop(arg_name, None)
self.defaults = tuple([d_dict[a] for a in args if a in d_dict])
return
def _compile(self, src, execdict):
filename = ('<%s-%d>'
% (self.filename, next(self._compile_count),))
try:
code = compile(src, filename, 'single')
exec(code, execdict)
except Exception:
raise
return execdict
class MissingArgument(ValueError):
pass
class ExistingArgument(ValueError):
pass
def _indent(text, margin, newline='\n', key=bool):
"based on boltons.strutils.indent"
indented_lines = [(margin + line if key(line) else line)
for line in text.splitlines()]
return newline.join(indented_lines)
try:
from functools import total_ordering # 2.7+
except ImportError:
# python 2.6
def total_ordering(cls):
"""Class decorator that fills in missing comparators/ordering
methods. Backport of :func:`functools.total_ordering` to work
with Python 2.6.
Code from http://code.activestate.com/recipes/576685/
"""
convert = {
'__lt__': [
('__gt__',
lambda self, other: not (self < other or self == other)),
('__le__',
lambda self, other: self < other or self == other),
('__ge__',
lambda self, other: not self < other)],
'__le__': [
('__ge__',
lambda self, other: not self <= other or self == other),
('__lt__',
lambda self, other: self <= other and not self == other),
('__gt__',
lambda self, other: not self <= other)],
'__gt__': [
('__lt__',
lambda self, other: not (self > other or self == other)),
('__ge__',
lambda self, other: self > other or self == other),
('__le__',
lambda self, other: not self > other)],
'__ge__': [
('__le__',
lambda self, other: (not self >= other) or self == other),
('__gt__',
lambda self, other: self >= other and not self == other),
('__lt__',
lambda self, other: not self >= other)]
}
roots = set(dir(cls)) & set(convert)
if not roots:
raise ValueError('must define at least one ordering operation:'
' < > <= >=')
root = max(roots) # prefer __lt__ to __le__ to __gt__ to __ge__
for opname, opfunc in convert[root]:
if opname not in roots:
opfunc.__name__ = opname
opfunc.__doc__ = getattr(int, opname).__doc__
setattr(cls, opname, opfunc)
return cls
# end funcutils.py
|
the-stack_0_2095 | """imw_28363 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include, re_path
from django.views.generic.base import TemplateView
from allauth.account.views import confirm_email
from rest_framework import permissions
from drf_yasg.views import get_schema_view
from drf_yasg import openapi
urlpatterns = [
path("", include("home.urls")),
path("accounts/", include("allauth.urls")),
path("modules/", include("modules.urls")),
path("api/v1/", include("home.api.v1.urls")),
path("admin/", admin.site.urls),
path("users/", include("users.urls", namespace="users")),
path("rest-auth/", include("rest_auth.urls")),
# Override email confirm to use allauth's HTML view instead of rest_auth's API view
path("rest-auth/registration/account-confirm-email/<str:key>/", confirm_email),
path("rest-auth/registration/", include("rest_auth.registration.urls")),
path("api/v1/", include("dating.api.v1.urls")),
path("dating/", include("dating.urls")),
path("home/", include("home.urls")),
]
admin.site.site_header = "IMW"
admin.site.site_title = "IMW Admin Portal"
admin.site.index_title = "IMW Admin"
# swagger
api_info = openapi.Info(
title="IMW API",
default_version="v1",
description="API documentation for IMW App",
)
schema_view = get_schema_view(
api_info,
public=True,
permission_classes=(permissions.IsAuthenticated,),
)
urlpatterns += [
path("api-docs/", schema_view.with_ui("swagger", cache_timeout=0), name="api_docs")
]
urlpatterns += [path("", TemplateView.as_view(template_name="index.html"))]
urlpatterns += [
re_path(r"^(?:.*)/?$", TemplateView.as_view(template_name="index.html"))
]
|
the-stack_0_2097 | def create_thread_by_reacted(posted_title, person):
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": "こんばんは!\nあなたの投稿「"
+ posted_title
+ " 」に"
+ person
+ "さんからリアクションが届きました。",
},
} |
the-stack_0_2098 | # SPDX-License-Identifier: Apache-2.0
from ..common._apply_operation import apply_cast
from ..common._registration import register_converter
from ..common._topology import Scope, Operator
from ..common._container import ModelComponentContainer
from .._supported_operators import sklearn_operator_name_map
def convert_sklearn_cast(scope: Scope, operator: Operator,
container: ModelComponentContainer):
inp = operator.inputs[0]
exptype = operator.outputs[0]
res = exptype.type.to_onnx_type()
et = res.tensor_type.elem_type
apply_cast(scope, inp.full_name, exptype.full_name,
container, to=et)
def convert_sklearn_cast_regressor(scope: Scope, operator: Operator,
container: ModelComponentContainer):
op = operator.raw_operator
estimator = op.estimator
op_type = sklearn_operator_name_map[type(estimator)]
this_operator = scope.declare_local_operator(op_type, estimator)
this_operator.inputs = operator.inputs
cls = operator.inputs[0].type.__class__
var_name = scope.declare_local_variable('cast_est', cls())
this_operator.outputs.append(var_name)
var_name = var_name.onnx_name
exptype = operator.outputs[0]
res = exptype.type.to_onnx_type()
et = res.tensor_type.elem_type
apply_cast(scope, var_name, exptype.full_name,
container, to=et)
register_converter('SklearnCastTransformer', convert_sklearn_cast)
register_converter('SklearnCastRegressor', convert_sklearn_cast_regressor)
register_converter('SklearnCast', convert_sklearn_cast)
|
the-stack_0_2099 | from hypothesis import given
from rithm import Int
from tests.utils import (IntWithBuiltin,
is_equivalent_to_builtin_int)
from . import strategies
@given(strategies.ints, strategies.ints)
def test_alternatives(first: Int, second: Int) -> None:
assert first - second == first + (-second)
@given(strategies.ints_with_builtins, strategies.ints_with_builtins)
def test_connection_with_builtin(first_with_builtin: IntWithBuiltin,
second_with_builtin: IntWithBuiltin
) -> None:
first, first_builtin = first_with_builtin
second, second_builtin = second_with_builtin
assert is_equivalent_to_builtin_int(first - second,
first_builtin - second_builtin)
|
the-stack_0_2100 | # Copyright (c) Facebook, Inc. and its affiliates.
# Inspired from maskrcnn_benchmark, fairseq
import logging
import os
import pickle
import socket
import subprocess
import warnings
import torch
from mmf.common.registry import registry
from torch import distributed as dist
try:
import torch_xla.core.xla_model as xm
except ImportError:
xm = None
MAX_SIZE_LIMIT = 65533
BYTE_SIZE = 256
logger = logging.getLogger(__name__)
# copied from https://github.com/facebookresearch/vissl/blob/master/vissl/utils/distributed_gradients.py
class GatherLayer(torch.autograd.Function):
"""
Gather tensors from all workers with support for backward propagation:
This implementation does not cut the gradients as torch.distributed.all_gather does.
"""
@staticmethod
def forward(ctx, x):
output = [torch.zeros_like(x) for _ in range(dist.get_world_size())]
dist.all_gather(output, x)
return tuple(output)
@staticmethod
def backward(ctx, *grads):
all_gradients = torch.stack(grads)
dist.all_reduce(all_gradients)
return all_gradients[dist.get_rank()]
class XLAGatherLayer(torch.autograd.Function):
"""
Gather tensors from all TPU workers with support for backward propagation.
"""
@staticmethod
def forward(ctx, x, dim):
ctx.dim = dim
tensor_list = xm.all_gather(x.unsqueeze(dim), dim=dim)
return tensor_list
@staticmethod
def backward(ctx, grad_output):
dim = ctx.dim
all_grad_output = xm.all_reduce(xm.REDUCE_SUM, grad_output)
return all_grad_output.select(dim, xm.get_ordinal()), None
def synchronize(message="sync-workers"):
if is_xla():
xm.rendezvous(message)
elif not dist.is_available():
return
if not dist.is_nccl_available():
return
if not dist.is_initialized():
return
world_size = dist.get_world_size()
if world_size == 1:
return
dist.barrier()
def is_xla():
# Cover none case as well
return not (not registry.get("is_xla", no_warning=True))
def get_rank():
if is_xla():
return xm.get_ordinal()
if not dist.is_available():
return 0
if not dist.is_nccl_available():
return 0
if not dist.is_initialized():
return 0
return dist.get_rank()
def is_master():
return get_rank() == 0
def is_dist_initialized():
return dist.is_available() and dist.is_initialized()
def get_world_size():
if is_xla():
return xm.xrt_world_size()
if not dist.is_available():
return 1
if not dist.is_nccl_available():
return 1
if not dist.is_initialized():
return 1
return dist.get_world_size()
def broadcast_tensor(tensor, src=0):
world_size = get_world_size()
if world_size < 2:
return tensor
with torch.no_grad():
if is_xla():
tensor = xm.all_to_all(
tensor.repeat([world_size, 1]),
split_dimension=0,
concat_dimension=0,
split_count=world_size,
)[0]
else:
dist.broadcast(tensor, src=0)
return tensor
def broadcast_scalar(scalar, src=0, device="cpu"):
if get_world_size() < 2:
return scalar
scalar_tensor = torch.tensor(scalar).long().to(device)
scalar_tensor = broadcast_tensor(scalar_tensor, src)
return scalar_tensor.item()
def reduce_tensor(tensor):
world_size = get_world_size()
if world_size < 2:
return tensor
with torch.no_grad():
dist.reduce(tensor, dst=0)
if dist.get_rank() == 0:
tensor = tensor.div(world_size)
return tensor
def gather_tensor(tensor):
world_size = get_world_size()
if world_size < 2:
return tensor
with torch.no_grad():
tensor_list = []
for _ in range(world_size):
tensor_list.append(torch.zeros_like(tensor))
if is_xla():
tensor_list = xm.all_gather(tensor)
tensor_list = tensor_list.view(world_size, *tensor.size())
else:
dist.all_gather(tensor_list, tensor)
tensor_list = torch.stack(tensor_list, dim=0)
return tensor_list
def gather_tensor_along_batch(tensor, dim=0):
world_size = get_world_size()
if world_size < 2:
return tensor
with torch.no_grad():
tensor_list = []
for _ in range(world_size):
tensor_list.append(torch.zeros_like(tensor))
dist.all_gather(tensor_list, tensor)
tensor_list = torch.cat(tensor_list, dim=dim)
return tensor_list
def gather_tensor_along_batch_with_backward(tensor, dim=0):
world_size = get_world_size()
if world_size < 2:
return tensor
if is_xla():
tensor_list = XLAGatherLayer.apply(tensor, dim)
tensor_list = tensor_list.flatten(start_dim=dim, end_dim=dim + 1)
else:
tensor_list = GatherLayer.apply(tensor)
tensor_list = torch.cat(tensor_list, dim=dim)
return tensor_list
def reduce_dict(dictionary):
world_size = get_world_size()
if world_size < 2:
return dictionary
with torch.no_grad():
if len(dictionary) == 0:
return dictionary
keys, values = zip(*sorted(dictionary.items()))
values = torch.stack(values, dim=0)
if is_xla():
values = xm.all_reduce("sum", [values], scale=1.0 / world_size)[0]
else:
dist.reduce(values, dst=0)
if dist.get_rank() == 0:
# only main process gets accumulated, so only divide by
# world_size in this case
values /= world_size
reduced_dict = {k: v for k, v in zip(keys, values)}
return reduced_dict
# Object byte tensor utilities have been adopted from
# https://github.com/pytorch/fairseq/blob/master/fairseq/distributed_utils.py
def object_to_byte_tensor(obj, max_size=4094):
"""
Encode Python objects to PyTorch byte tensors
"""
assert max_size <= MAX_SIZE_LIMIT
byte_tensor = torch.zeros(max_size, dtype=torch.uint8)
obj_enc = pickle.dumps(obj)
obj_size = len(obj_enc)
if obj_size > max_size:
raise Exception(
f"objects too large: object size {obj_size}, max size {max_size}"
)
byte_tensor[0] = obj_size // 256
byte_tensor[1] = obj_size % 256
byte_tensor[2 : 2 + obj_size] = torch.ByteTensor(list(obj_enc))
return byte_tensor
def byte_tensor_to_object(byte_tensor, max_size=MAX_SIZE_LIMIT):
"""
Decode PyTorch byte tensors to Python objects
"""
assert max_size <= MAX_SIZE_LIMIT
obj_size = byte_tensor[0].item() * 256 + byte_tensor[1].item()
obj_enc = bytes(byte_tensor[2 : 2 + obj_size].tolist())
obj = pickle.loads(obj_enc)
return obj
def infer_init_method(config):
if config.distributed.init_method is not None:
return
registry.register("is_xla", config.training.get("device", "cuda") == "xla")
# support torch.distributed.launch
if all(
key in os.environ
for key in ["MASTER_ADDR", "MASTER_PORT", "WORLD_SIZE", "RANK"]
):
config.distributed.init_method = "env://"
config.distributed.world_size = int(os.environ["WORLD_SIZE"])
config.distributed.rank = int(os.environ["RANK"])
config.distributed.no_spawn = True
# we can determine the init method automatically for Slurm
elif config.distributed.port > 0:
node_list = os.environ.get("SLURM_STEP_NODELIST")
if node_list is None:
node_list = os.environ.get("SLURM_JOB_NODELIST")
if node_list is not None:
try:
hostnames = subprocess.check_output(
["scontrol", "show", "hostnames", node_list]
)
config.distributed.init_method = "tcp://{host}:{port}".format(
host=hostnames.split()[0].decode("utf-8"),
port=config.distributed.port,
)
nnodes = int(os.environ.get("SLURM_NNODES"))
ntasks_per_node = os.environ.get("SLURM_NTASKS_PER_NODE")
if ntasks_per_node is not None:
ntasks_per_node = int(ntasks_per_node)
else:
ntasks = int(os.environ.get("SLURM_NTASKS"))
nnodes = int(os.environ.get("SLURM_NNODES"))
assert ntasks % nnodes == 0
ntasks_per_node = int(ntasks / nnodes)
if ntasks_per_node == 1:
assert config.distributed.world_size % nnodes == 0
gpus_per_node = config.distributed.world_size // nnodes
node_id = int(os.environ.get("SLURM_NODEID"))
config.distributed.rank = node_id * gpus_per_node
else:
assert ntasks_per_node == config.distributed.world_size // nnodes
config.distributed.no_spawn = True
config.distributed.rank = int(os.environ.get("SLURM_PROCID"))
config.device_id = int(os.environ.get("SLURM_LOCALID"))
except subprocess.CalledProcessError as e: # scontrol failed
raise e
except FileNotFoundError: # Slurm is not installed
pass
def distributed_init(config):
if config.distributed.world_size == 1:
raise ValueError("Cannot initialize distributed with distributed_world_size=1")
logger.info(f"XLA Mode:{is_xla()}")
if is_xla():
config.device_id = xm.get_local_ordinal()
config.distributed.rank = xm.get_ordinal()
elif dist.is_initialized():
warnings.warn("Distributed is already initialized, cannot initialize twice!")
config.distributed.rank = dist.get_rank()
else:
logger.info(
f"Distributed Init (Rank {config.distributed.rank}): "
f"{config.distributed.init_method}"
)
dist.init_process_group(
backend=config.distributed.backend,
init_method=config.distributed.init_method,
world_size=config.distributed.world_size,
rank=config.distributed.rank,
)
logger.info(
f"Initialized Host {socket.gethostname()} as Rank "
f"{config.distributed.rank}"
)
if "MASTER_ADDR" not in os.environ or "MASTER_PORT" not in os.environ:
# Set for onboxdataloader support
split = config.distributed.init_method.split("//")
assert len(split) == 2, (
"host url for distributed should be split by '//' "
+ "into exactly two elements"
)
split = split[1].split(":")
assert (
len(split) == 2
), "host url should be of the form <host_url>:<host_port>"
os.environ["MASTER_ADDR"] = split[0]
os.environ["MASTER_PORT"] = split[1]
# perform a dummy all-reduce to initialize the NCCL communicator
dist.all_reduce(torch.zeros(1).cuda())
suppress_output(is_master())
config.distributed.rank = dist.get_rank()
return config.distributed.rank
def suppress_output(is_master):
"""Suppress printing on the current device. Force printing with `force=True`."""
import builtins as __builtin__
builtin_print = __builtin__.print
def print(*args, **kwargs):
force = kwargs.pop("force", False)
if is_master or force:
builtin_print(*args, **kwargs)
__builtin__.print = print
import warnings
builtin_warn = warnings.warn
def warn(*args, **kwargs):
force = kwargs.pop("force", False)
if is_master or force:
builtin_warn(*args, **kwargs)
# Log warnings only once
warnings.warn = warn
warnings.simplefilter("once", UserWarning)
|
the-stack_0_2101 | """
Tests for ndarray-like method on the base Index class
"""
import pytest
import pandas as pd
from pandas import Index
import pandas._testing as tm
class TestReshape:
def test_repeat(self):
repeats = 2
index = pd.Index([1, 2, 3])
expected = pd.Index([1, 1, 2, 2, 3, 3])
result = index.repeat(repeats)
tm.assert_index_equal(result, expected)
def test_insert(self):
# GH 7256
# validate neg/pos inserts
result = Index(["b", "c", "d"])
# test 0th element
tm.assert_index_equal(Index(["a", "b", "c", "d"]), result.insert(0, "a"))
# test Nth element that follows Python list behavior
tm.assert_index_equal(Index(["b", "c", "e", "d"]), result.insert(-1, "e"))
# test loc +/- neq (0, -1)
tm.assert_index_equal(result.insert(1, "z"), result.insert(-2, "z"))
# test empty
null_index = Index([])
tm.assert_index_equal(Index(["a"]), null_index.insert(0, "a"))
@pytest.mark.parametrize(
"pos,expected",
[
(0, Index(["b", "c", "d"], name="index")),
(-1, Index(["a", "b", "c"], name="index")),
],
)
def test_delete(self, pos, expected):
index = Index(["a", "b", "c", "d"], name="index")
result = index.delete(pos)
tm.assert_index_equal(result, expected)
assert result.name == expected.name
def test_append_multiple(self):
index = Index(["a", "b", "c", "d", "e", "f"])
foos = [index[:2], index[2:4], index[4:]]
result = foos[0].append(foos[1:])
tm.assert_index_equal(result, index)
# empty
result = index.append([])
tm.assert_index_equal(result, index)
|
the-stack_0_2102 | from datetime import datetime
from os import listdir
import pandas
from application_logging.logger import App_Logger
class dataTransformPredict:
def __init__(self):
self.goodDataPath = "Prediction_Raw_Files_Validated/Good_Raw"
self.logger = App_Logger()
def replaceMissingWithNull(self):
try:
log_file = open("Prediction_Logs/dataTransformLog.txt", 'a+')
onlyfiles = [f for f in listdir(self.goodDataPath)]
for file in onlyfiles:
data = pandas.read_csv(self.goodDataPath + "/" + file)
# list of columns with string datatype variables
columns = ["policy_bind_date","policy_state","policy_csl","insured_sex","insured_education_level","insured_occupation","insured_hobbies","insured_relationship","incident_state","incident_date","incident_type","collision_type","incident_severity","authorities_contacted","incident_city","incident_location","property_damage","police_report_available","auto_make","auto_model"]
for col in columns:
data[col] = data[col].apply(lambda x: "'" + str(x) + "'")
# #csv.update("'"+ csv['Wafer'] +"'")
# csv.update(csv['Wafer'].astype(str))
#csv['Wafer'] = csv['Wafer'].str[6:]
data.to_csv(self.goodDataPath+ "/" + file, index=None, header=True)
self.logger.log(log_file," %s: File Transformed successfully!!" % file)
#log_file.write("Current Date :: %s" %date +"\t" + "Current time:: %s" % current_time + "\t \t" + + "\n")
except Exception as e:
self.logger.log(log_file, "Data Transformation failed because:: %s" % e)
#log_file.write("Current Date :: %s" %date +"\t" +"Current time:: %s" % current_time + "\t \t" + "Data Transformation failed because:: %s" % e + "\n")
log_file.close()
raise e
log_file.close()
|
the-stack_0_2103 | from __future__ import unicode_literals
from .common import InfoExtractor
class DefenseGouvFrIE(InfoExtractor):
IE_NAME = "defense.gouv.fr"
_VALID_URL = r"https?://.*?\.defense\.gouv\.fr/layout/set/ligthboxvideo/base-de-medias/webtv/(?P<id>[^/?#]*)"
_TEST = {
"url": "http://www.defense.gouv.fr/layout/set/ligthboxvideo/base-de-medias/webtv/attaque-chimique-syrienne-du-21-aout-2013-1",
"md5": "75bba6124da7e63d2d60b5244ec9430c",
"info_dict": {
"id": "11213",
"ext": "mp4",
"title": "attaque-chimique-syrienne-du-21-aout-2013-1",
},
}
def _real_extract(self, url):
title = self._match_id(url)
webpage = self._download_webpage(url, title)
video_id = self._search_regex(r"flashvars.pvg_id=\"(\d+)\";", webpage, "ID")
json_url = (
"http://static.videos.gouv.fr/brightcovehub/export/json/%s" % video_id
)
info = self._download_json(json_url, title, "Downloading JSON config")
video_url = info["renditions"][0]["url"]
return {
"id": video_id,
"ext": "mp4",
"url": video_url,
"title": title,
}
|
the-stack_0_2106 | """
Module for jenkinsapi Job
"""
import json
import logging
import xml.etree.ElementTree as ET
import six.moves.urllib.parse as urlparse
from collections import defaultdict
from jenkinsapi.build import Build
from jenkinsapi.custom_exceptions import (
NoBuildData,
NotConfiguredSCM,
NotFound,
NotInQueue,
NotSupportSCM,
UnknownQueueItem,
BadParams,
)
from jenkinsapi.jenkinsbase import JenkinsBase
from jenkinsapi.mutable_jenkins_thing import MutableJenkinsThing
from jenkinsapi.queue import QueueItem
from jenkinsapi_utils.compat import to_string
SVN_URL = './scm/locations/hudson.scm.SubversionSCM_-ModuleLocation/remote'
GIT_URL = './scm/userRemoteConfigs/hudson.plugins.git.UserRemoteConfig/url'
HG_URL = './scm/source'
GIT_BRANCH = './scm/branches/hudson.plugins.git.BranchSpec/name'
HG_BRANCH = './scm/branch'
DEFAULT_HG_BRANCH_NAME = 'default'
log = logging.getLogger(__name__)
class Job(JenkinsBase, MutableJenkinsThing):
"""
Represents a jenkins job
A job can hold N builds which are the actual execution environments
"""
def __init__(self, url, name, jenkins_obj):
self.name = name
self.jenkins = jenkins_obj
self._revmap = None
self._config = None
self._element_tree = None
self._scm_prefix = ""
self._scm_map = {
'hudson.scm.SubversionSCM': 'svn',
'hudson.plugins.git.GitSCM': 'git',
'hudson.plugins.mercurial.MercurialSCM': 'hg',
'hudson.scm.NullSCM': 'NullSCM'
}
self._scmurlmap = {
'svn': lambda element_tree: list(element_tree.findall(SVN_URL)),
'git': lambda element_tree: list(element_tree.findall(self._scm_prefix + GIT_URL)),
'hg': lambda element_tree: list(element_tree.findall(HG_URL)),
None: lambda element_tree: []
}
self._scmbranchmap = {
'svn': lambda element_tree: [],
'git': lambda element_tree: list(element_tree.findall(self._scm_prefix + GIT_BRANCH)),
'hg': self._get_hg_branch,
None: lambda element_tree: []
}
self.url = url
JenkinsBase.__init__(self, self.url)
def __str__(self):
return self.name
def get_description(self):
return self._data["description"]
def get_jenkins_obj(self):
return self.jenkins
# When the name of the hg branch used in the job is default hg branch (i.e.
# default), Mercurial plugin doesn't store default branch name in
# config XML file of the job. Create XML node corresponding to
# default branch
def _get_hg_branch(self, element_tree):
branches = element_tree.findall(HG_BRANCH)
if not branches:
hg_default_branch = ET.Element('branch')
hg_default_branch.text = DEFAULT_HG_BRANCH_NAME
branches.append(hg_default_branch)
return branches
def poll(self, tree=None):
data = super(Job, self).poll(tree=tree)
if not tree and not self.jenkins.lazy:
self._data = self._add_missing_builds(self._data)
return data
# pylint: disable=E1123
# Unexpected keyword arg 'params'
def _add_missing_builds(self, data):
"""
Query Jenkins to get all builds of the job in the data object.
Jenkins API loads the first 100 builds and thus may not contain
all builds information. This method checks if all builds are loaded
in the data object and updates it with the missing builds if needed.
"""
if not data.get("builds"):
return data
# do not call _buildid_for_type here: it would poll and do an infinite
# loop
oldest_loaded_build_number = data["builds"][-1]["number"]
if 'firstBuild' not in self._data or not self._data['firstBuild']:
first_build_number = oldest_loaded_build_number
else:
first_build_number = self._data["firstBuild"]["number"]
all_builds_loaded = (oldest_loaded_build_number == first_build_number)
if all_builds_loaded:
return data
response = self.poll(tree='allBuilds[number,url]')
data['builds'] = response['allBuilds']
return data
def _get_config_element_tree(self):
"""
The ElementTree objects creation is unnecessary, it can be
a singleton per job
"""
if self._config is None:
self.load_config()
if self._element_tree is None:
self._element_tree = ET.fromstring(self._config)
return self._element_tree
def get_build_triggerurl(self):
if not self.has_params():
return "%s/build" % self.baseurl
return "%s/buildWithParameters" % self.baseurl
@staticmethod
def _mk_json_from_build_parameters(build_params, file_params=None):
"""
Build parameters must be submitted in a particular format
Key-Value pairs would be far too simple, no no!
Watch and read on and behold!
"""
if not isinstance(build_params, dict):
raise ValueError('Build parameters must be a dict')
build_p = [{'name': k, 'value': to_string(v)}
for k, v in sorted(build_params.items())]
out = {'parameter': build_p}
if file_params:
file_p = [{'name': k, 'file': k}
for k in file_params.keys()]
out['parameter'].extend(file_p)
if len(out['parameter']) == 1:
out['parameter'] = out['parameter'][0]
return out
@staticmethod
def mk_json_from_build_parameters(build_params, file_params=None):
json_structure = Job._mk_json_from_build_parameters(
build_params,
file_params
)
json_structure['statusCode'] = "303"
json_structure['redirectTo'] = "."
return json.dumps(json_structure)
def invoke(self, securitytoken=None, block=False,
build_params=None, cause=None, files=None, delay=5):
assert isinstance(block, bool)
if build_params and (not self.has_params()):
raise BadParams("This job does not support parameters")
params = {} # Via Get string
if securitytoken:
params['token'] = securitytoken
# Either copy the params dict or make a new one.
build_params = dict(build_params.items()) \
if build_params else {} # Via POSTed JSON
url = self.get_build_triggerurl()
if cause:
build_params['cause'] = cause
# Build require params as form fields
# and as Json.
data = {
'json': self.mk_json_from_build_parameters(
build_params,
files)
}
data.update(build_params)
response = self.jenkins.requester.post_and_confirm_status(
url,
data=data,
params=params,
files=files,
valid=[200, 201, 303],
allow_redirects=False
)
redirect_url = response.headers['location']
#
# Enterprise Jenkins implementations such as CloudBees locate their
# queue REST API base https://server.domain.com/jenkins/queue/api/
# above the team-specific REST API base
# https://server.domain.com/jenkins/job/my_team/api/
#
queue_baseurl_candidates = [self.jenkins.baseurl]
scheme, netloc, path, _, query, frag = \
urlparse.urlparse(self.jenkins.baseurl)
while path:
path = '/'.join(path.rstrip('/').split('/')[:-1])
queue_baseurl_candidates.append(
urlparse.urlunsplit([scheme, netloc, path, query, frag]))
redirect_url_valid = False
for queue_baseurl_candidate in queue_baseurl_candidates:
redirect_url_valid = redirect_url.startswith(
"%s/queue/item" % queue_baseurl_candidate)
if redirect_url_valid:
break
if not redirect_url_valid:
raise ValueError("Not a Queue URL: %s" % redirect_url)
qi = QueueItem(redirect_url, self.jenkins)
if block:
qi.block_until_complete(delay=delay)
return qi
def _buildid_for_type(self, buildtype):
"""
Gets a buildid for a given type of build
"""
KNOWNBUILDTYPES = [
"lastStableBuild",
"lastSuccessfulBuild",
"lastBuild",
"lastCompletedBuild",
"firstBuild",
"lastFailedBuild"]
assert buildtype in KNOWNBUILDTYPES, ('Unknown build info type: %s'
% buildtype)
data = self.poll(tree='%s[number]' % buildtype)
if not data.get(buildtype):
raise NoBuildData(buildtype)
return data[buildtype]["number"]
def get_first_buildnumber(self):
"""
Get the numerical ID of the first build.
"""
return self._buildid_for_type("firstBuild")
def get_last_stable_buildnumber(self):
"""
Get the numerical ID of the last stable build.
"""
return self._buildid_for_type("lastStableBuild")
def get_last_good_buildnumber(self):
"""
Get the numerical ID of the last good build.
"""
return self._buildid_for_type("lastSuccessfulBuild")
def get_last_failed_buildnumber(self):
"""
Get the numerical ID of the last failed build.
"""
return self._buildid_for_type(buildtype="lastFailedBuild")
def get_last_buildnumber(self):
"""
Get the numerical ID of the last build.
"""
return self._buildid_for_type("lastBuild")
def get_last_completed_buildnumber(self):
"""
Get the numerical ID of the last complete build.
"""
return self._buildid_for_type("lastCompletedBuild")
def get_build_dict(self):
builds = self.poll(tree='builds[number,url]')
if not builds:
raise NoBuildData(repr(self))
builds = self._add_missing_builds(builds)
builds = builds['builds']
last_build = self.poll(tree='lastBuild[number,url]')['lastBuild']
if builds and last_build and \
builds[0]['number'] != last_build['number']:
builds = [last_build] + builds
# FIXME SO how is this supposed to work if build is false-y?
# I don't think that builds *can* be false here, so I don't
# understand the test above.
return dict((build["number"], build["url"]) for build in builds)
def get_build_by_params(self, build_params, order=1):
first_build_number = self.get_first_buildnumber()
last_build_number = self.get_last_buildnumber()
if order != 1 and order != -1:
raise ValueError(
'Direction should be ascending or descending (1/-1)')
for number in range(first_build_number,
last_build_number + 1)[::order]:
build = self.get_build(number)
if build.get_params() == build_params:
return build
raise NoBuildData(
'No build with such params {params}'.format(params=build_params))
def get_revision_dict(self):
"""
Get dictionary of all revisions with a list of buildnumbers (int)
that used that particular revision
"""
revs = defaultdict(list)
if 'builds' not in self._data:
raise NoBuildData(repr(self))
for buildnumber in self.get_build_ids():
revs[self.get_build(buildnumber)
.get_revision()].append(buildnumber)
return revs
def get_build_ids(self):
"""
Return a sorted list of all good builds as ints.
"""
return reversed(sorted(self.get_build_dict().keys()))
def get_next_build_number(self):
"""
Return the next build number that Jenkins will assign.
"""
return self._data.get('nextBuildNumber', 0)
def get_last_stable_build(self):
"""
Get the last stable build
"""
bn = self.get_last_stable_buildnumber()
return self.get_build(bn)
def get_last_good_build(self):
"""
Get the last good build
"""
bn = self.get_last_good_buildnumber()
return self.get_build(bn)
def get_last_build(self):
"""
Get the last build
"""
bn = self.get_last_buildnumber()
return self.get_build(bn)
def get_first_build(self):
bn = self.get_first_buildnumber()
return self.get_build(bn)
def get_last_build_or_none(self):
"""
Get the last build or None if there is no builds
"""
try:
return self.get_last_build()
except NoBuildData:
return None
def get_last_completed_build(self):
"""
Get the last build regardless of status
"""
bn = self.get_last_completed_buildnumber()
return self.get_build(bn)
def get_buildnumber_for_revision(self, revision, refresh=False):
"""
:param revision: subversion revision to look for, int
:param refresh: boolean, whether or not to refresh the
revision -> buildnumber map
:return: list of buildnumbers, [int]
"""
if self.get_scm_type() == 'svn' and not isinstance(revision, int):
revision = int(revision)
if self._revmap is None or refresh:
self._revmap = self.get_revision_dict()
try:
return self._revmap[revision]
except KeyError:
raise NotFound("Couldn't find a build with that revision")
def get_build(self, buildnumber):
assert isinstance(buildnumber, int)
try:
url = self.get_build_dict()[buildnumber]
return Build(url, buildnumber, job=self)
except KeyError:
raise NotFound('Build #%s not found' % buildnumber)
def delete_build(self, build_number):
"""
Remove build
:param int build_number: Build number
:raises NotFound: When build is not found
"""
try:
url = self.get_build_dict()[build_number]
url = "%s/doDelete" % url
self.jenkins.requester.post_and_confirm_status(url, data='')
self.jenkins.poll()
except KeyError:
raise NotFound('Build #%s not found' % build_number)
def get_build_metadata(self, buildnumber):
"""
Get the build metadata for a given build number. For large builds with
tons of tests, this method is faster than get_build by returning less
data.
"""
if not isinstance(buildnumber, int):
raise ValueError('Parameter "buildNumber" must be int')
try:
url = self.get_build_dict()[buildnumber]
return Build(url, buildnumber, job=self, depth=0)
except KeyError:
raise NotFound('Build #%s not found' % buildnumber)
def __delitem__(self, build_number):
self.delete_build(build_number)
def __getitem__(self, buildnumber):
return self.get_build(buildnumber)
def __len__(self):
return len(self.get_build_dict())
def is_queued_or_running(self):
return self.is_queued() or self.is_running()
def is_queued(self):
data = self.poll(tree='inQueue')
return data.get('inQueue', False)
def get_queue_item(self):
"""
Return a QueueItem if this object is in a queue, otherwise raise
an exception
"""
if not self.is_queued():
raise UnknownQueueItem()
q_item = self.poll(tree='queueItem[url]')
qi_url = urlparse.urljoin(
self.jenkins.baseurl, q_item['queueItem']['url']
)
return QueueItem(qi_url, self.jenkins)
def is_running(self):
# self.poll()
try:
build = self.get_last_build_or_none()
if build is not None:
return build.is_running()
except NoBuildData:
log.info(
"No build info available for %s, assuming not running.",
str(self))
return False
def get_config(self):
"""
Returns the config.xml from the job
"""
response = self.jenkins.requester.get_and_confirm_status(
"%(baseurl)s/config.xml" % self.__dict__)
return response.text
def load_config(self):
self._config = self.get_config()
def get_scm_type(self):
element_tree = self._get_config_element_tree()
scm_element = element_tree.find('scm')
if not scm_element:
multibranch_scm_prefix = \
"properties/org.jenkinsci.plugins.workflow.multibranch.BranchJobProperty/branch/"
multibranch_path = multibranch_scm_prefix + "scm"
scm_element = element_tree.find(multibranch_path)
if scm_element:
# multibranch pipeline.
self._scm_prefix = multibranch_scm_prefix
scm_class = scm_element.get('class') if scm_element else None
scm = self._scm_map.get(scm_class)
if not scm:
raise NotSupportSCM(
'SCM class "%s" not supported by API for job "%s"'
% (scm_class, self.name))
if scm == 'NullSCM':
raise NotConfiguredSCM(
'SCM is not configured for job "%s"' % self.name)
return scm
def get_scm_url(self):
"""
Get list of project SCM urls
For some SCM's jenkins allow to configure and use number of SCM url's
: return: list of SCM urls
"""
element_tree = self._get_config_element_tree()
scm = self.get_scm_type()
scm_url_list = [scm_url.text for scm_url in self._scmurlmap[
scm](element_tree)]
return scm_url_list
def get_scm_branch(self):
"""
Get list of SCM branches
: return: list of SCM branches
"""
element_tree = self._get_config_element_tree()
scm = self.get_scm_type()
return [scm_branch.text
for scm_branch in self._scmbranchmap[scm](element_tree)]
def modify_scm_branch(self, new_branch, old_branch=None):
"""
Modify SCM ("Source Code Management") branch name for configured job.
:param new_branch : new repository branch name to set.
If job has multiple branches configured and "old_branch"
not provided - method will allways modify first url.
:param old_branch (optional): exact value of branch name
to be replaced.
For some SCM's jenkins allow set multiple branches per job
this parameter intended to indicate which branch need to be
modified
"""
element_tree = self._get_config_element_tree()
scm = self.get_scm_type()
scm_branch_list = self._scmbranchmap[scm](element_tree)
if scm_branch_list and not old_branch:
scm_branch_list[0].text = new_branch
self.update_config(ET.tostring(element_tree))
else:
for scm_branch in scm_branch_list:
if scm_branch.text == old_branch:
scm_branch.text = new_branch
self.update_config(ET.tostring(element_tree))
def modify_scm_url(self, new_source_url, old_source_url=None):
"""
Modify SCM ("Source Code Management") url for configured job.
:param new_source_url : new repository url to set.
If job has multiple repositories configured and "old_source_url"
not provided - method will allways modify first url.
:param old_source_url (optional): for some SCM's jenkins allows
settting multiple repositories per job
this parameter intended to indicate which repository need
to be modified
"""
element_tree = self._get_config_element_tree()
scm = self.get_scm_type()
scm_url_list = self._scmurlmap[scm](element_tree)
if scm_url_list and not old_source_url:
scm_url_list[0].text = new_source_url
self.update_config(ET.tostring(element_tree))
else:
for scm_url in scm_url_list:
if scm_url.text == old_source_url:
scm_url.text = new_source_url
self.update_config(ET.tostring(element_tree))
def get_config_xml_url(self):
return '%s/config.xml' % self.baseurl
def update_config(self, config, full_response=False):
"""
Update the config.xml to the job
Also refresh the ElementTree object since the config has changed
:param full_response (optional): if True, it will return the full
response object instead of just the response text.
Useful for debugging and validation workflows.
"""
url = self.get_config_xml_url()
config = str(config) # cast unicode in case of Python 2
response = self.jenkins.requester.post_url(url, params={}, data=config)
self._element_tree = ET.fromstring(config)
if full_response:
return response
return response.text
def get_downstream_jobs(self):
"""
Get all the possible downstream jobs
:return List of Job
"""
downstream_jobs = []
try:
for j in self._data['downstreamProjects']:
downstream_jobs.append(
self.get_jenkins_obj()[j['name']])
except KeyError:
return []
return downstream_jobs
def get_downstream_job_names(self):
"""
Get all the possible downstream job names
:return List of String
"""
downstream_jobs = []
try:
for j in self._data['downstreamProjects']:
downstream_jobs.append(j['name'])
except KeyError:
return []
return downstream_jobs
def get_upstream_job_names(self):
"""
Get all the possible upstream job names
:return List of String
"""
upstream_jobs = []
try:
for j in self._data['upstreamProjects']:
upstream_jobs.append(j['name'])
except KeyError:
return []
return upstream_jobs
def get_upstream_jobs(self):
"""
Get all the possible upstream jobs
:return List of Job
"""
upstream_jobs = []
try:
for j in self._data['upstreamProjects']:
upstream_jobs.append(self.get_jenkins_obj().get_job(j['name']))
except KeyError:
return []
return upstream_jobs
def is_enabled(self):
data = self.poll(tree='color')
return 'disabled' not in data.get('color', '')
def disable(self):
"""
Disable job
"""
url = "%s/disable" % self.baseurl
return self.get_jenkins_obj().requester.post_url(url, data='')
def enable(self):
"""
Enable job
"""
url = "%s/enable" % self.baseurl
return self.get_jenkins_obj().requester.post_url(url, data='')
def delete_from_queue(self):
"""
Delete a job from the queue only if it's enqueued
:raise NotInQueue if the job is not in the queue
"""
if not self.is_queued():
raise NotInQueue()
queue_id = self._data['queueItem']['id']
url = urlparse.urljoin(self.get_jenkins_obj().get_queue().baseurl,
'queue/cancelItem?id=%s' % queue_id)
self.get_jenkins_obj().requester.post_and_confirm_status(url, data='')
return True
def get_params(self):
"""
Get the parameters for this job. Format varies by parameter type. Here
is an example string parameter:
{
'type': 'StringParameterDefinition',
'description': 'Parameter description',
'defaultParameterValue': {'value': 'default value'},
'name': 'FOO_BAR'
}
"""
places = ['actions', 'property']
found_definitions = False
for place in places:
if found_definitions:
return
actions = (x for x in self._data[place] if x is not None)
for action in actions:
try:
for param in action['parameterDefinitions']:
found_definitions = True
yield param
except KeyError:
continue
def get_params_list(self):
"""
Gets the list of parameter names for this job.
"""
return [param['name'] for param in self.get_params()]
def has_params(self):
"""
If job has parameters, returns True, else False
"""
if any("parameterDefinitions" in a for a in (self._data["actions"])
if a):
return True
if any("parameterDefinitions" in a for a in (self._data["property"])
if a):
return True
return False
def has_queued_build(self, build_params):
"""
Returns True if a build with build_params is currently queued.
"""
queue = self.jenkins.get_queue()
queued_builds = queue.get_queue_items_for_job(self.name)
for build in queued_builds:
if build.get_parameters() == build_params:
return True
return False
@staticmethod
def get_full_name_from_url_and_baseurl(url, baseurl):
"""
Get the full name for a job (including parent folders) from the
job URL.
"""
path = url.replace(baseurl, '')
split = path.split('/')
split = [urlparse.unquote(part) for part in split[::2] if part]
return '/'.join(split)
def get_full_name(self):
"""
Get the full name for a job (including parent folders)
from the job URL.
"""
return Job.get_full_name_from_url_and_baseurl(
self.url, self.jenkins.baseurl)
def toggle_keep_build(self, build_number):
self.get_build(build_number).toggle_keep()
|
the-stack_0_2108 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# InteropDataset
# Library encapsulating the XML and bin files from MiSeq and HiSeq output.
#
# InteropMetadata
# Parser for XML files from MiSeq / HiSeq run data.
#
# See README for intro and basic examples.
#
# March 2013
# by nthmost ([email protected])
# with lots of help from ECO ([email protected])
import os
from collections import OrderedDict
from datetime import datetime
import xml.etree.ElementTree as ET
import xmltodict
from .utils import select_file_from_aliases
from .filemaps import XML_FILEMAP
class InteropMetadata(object):
"""Parser for sequencer's XML files describing a single run. Supply with directory to instantiate.
CHANGES:
0.3 (in progress) Switching to xmltodict from ElementTree.
0.2.2 runParameters supports both MiSeq and HiSeq formats.
0.2.1 No longer prioritizing CompletedJobInfo.xml (not reliably present).
0.2 Cleaner logical process for using the various XML files. No longer throws exceptions.
0.1 First released version.
"""
__version = 0.3 # version of this parser.
def __init__(self, xmldir):
"""Takes the absolute path of a sequencing run data directory as sole required variable.
Attempts to parse CompletedJobInfo.xml (or viable alias). If not available, uses
runParameters.xml and/or runInfo.xml, which have some overlapping info (but not all).
Individual parsers can be explicitly called via their respective methods.
Be aware that parsing methods are DESTRUCTIVE to existing instance data."""
self.xmldir = xmldir
self.experiment_name = "" # "RU1453:::/locus/data/run_data//1337/1453"
self.investigator_name = "" # "Locus:::Uncle_Jesse - 612 - MiSeq"
self.runID = "" # cf CompletedJobInfo.xml / RTARunInfo / Run param "Id"
# TODO: xml_datetimes
# We can learn end_datetime this from the RTAComplete.txt file.
# sample: 2/11/2014,17:25:13.217,Illumina RTA 1.18.42
#
#...but it would be nicer if we didn't have to (more files to track, no fun).
self.start_datetime = None
self.end_datetime = None
self.rta_run_info = { 'flowcell': '', 'instrument': '', 'date': '' }
# read_config: a list of dictionaries, each of which describe a single read from the sequencer.
self.read_config = []
# Flow cell layout: necessary to enable parsing of different machine types' binaries.
self.flowcell_layout = { }
# Read numbers from ResequencingRunStats.xml
# Example: { 'clusters_raw': 19494893, 'clusters_PF': 17381252,
# 'unindexed': 508055, 'unindexed_PF': 16873197,
# 'unaligned': 18572490, 'unaligned_PF': 16973197 }
self.resequencing_stats = {}
if self.get_xml_path('reseqstats') is not None:
self.parse_ResequencingRunStats(self.get_xml_path('reseqstats'))
# The main goal of parsing the XML is to find out read_config and flowcell_layout.
# A lot of other data is available, but only these two basics are necessary.
#
# CompletedJobInfo.xml has the most complete data from MiSeq machines, but only exists
# at the end of a run, and HiSeq machines don't even generate one.
#
# RunInfo.xml (containing just the basics) is always available during an active run.
#
# TODO: xml_flex (proposed improvement allowing a config file to set which tokens are required / not required.)
# Also we might want to specify priority of provenance (e.g. get start_datetime from 'runparams' first).
# If you (yes YOU) have any opinions about this, please email me: [email protected]
self.machine_id = ""
self.model = ""
self._xml_map = OrderedDict({ 'completed': [None, self.parse_CompletedJobInfo],
'runinfo': [None, self.parse_RunInfo],
'runparams': [None, self.parse_RunParameters] })
self._set_xml_map()
# cycle through XML files, filling from what's available.
for codename in self._xml_map:
if self._xml_map[codename][0] is not None:
self._xml_map[codename][1](self._xml_map[codename][0])
if codename == 'completed':
break
def _set_xml_map(self):
"""finds all available XML files, assigns them to an ordered dictionary
mapping of codename:[filepath,parse_function] """
for codename in self._xml_map:
self._xml_map[codename][0] = self.get_xml_path(codename)
def get_xml_path(self, codename):
"returns absolute path to XML file represented by data 'codename' or None if not available."
result = select_file_from_aliases(codename, XML_FILEMAP, self.xmldir)
return result
def parse_Run_ET(self, run_ET):
"parses chunk of XML associated with the RTA Run Info blocks in (at least) 2 xml files."
self.rta_run_info = { 'instrument': run_ET.find('Instrument').text, # M00612
'flowcell': run_ET.find('Flowcell').text, # 000000000-A316T
'date': run_ET.find('Date').text } # 130208
flowcell_ET = run_ET.find('FlowcellLayout')
self.flowcell_layout = { 'lanecount': int(flowcell_ET.attrib['LaneCount']),
'surfacecount': int(flowcell_ET.attrib['SurfaceCount']),
'swathcount': int(flowcell_ET.attrib['SwathCount']),
'tilecount': int(flowcell_ET.attrib['TileCount']) }
# Run / Reads - describes number of cycles per read (and if read is an Index) in sequencing run.
# Because parsing is understood to be destructive, and Reads can be found in multiple files,
# we start by emptying out whatever's currently in the read_config array for this instance.
self.read_config = []
read_num = 0
for item in run_ET.find("Reads"):
read_num += 1 # redundant safety assignment to read_num
self.read_config.append( {'read_num': read_num,
'cycles': int(item.attrib['NumCycles']),
'is_index': True if item.attrib['IsIndexedRead']=='Y' else False } )
def parse_ResequencingRunStats(self, filepath):
"""Parses ResequencingRunStatistics.xml (or viable alias) to fill instance variables."""
# TODO: xmltodict conversion
tree = ET.parse(filepath)
root = tree.getroot() # should be "StatisticsResequencing"
runstats_ET = root.find("RunStats")
self.resequencing_stats = { 'clusters_raw': int(runstats_ET.find('NumberOfClustersRaw').text),
'clusters_pf': int(runstats_ET.find('NumberOfClustersPF').text),
'unindexed': int(runstats_ET.find('NumberOfUnindexedClusters').text),
'unindexed_pf': int(runstats_ET.find('NumberOfUnindexedClustersPF').text),
'unaligned': int(runstats_ET.find('NumberOfUnalignedClusters').text),
'unaligned_pf': int(runstats_ET.find('NumberOfUnalignedClustersPF').text),
'duplicate': int(runstats_ET.find('NumberOfDuplicateClusters').text) }
def parse_RunInfo(self, filepath):
"parses Reads, Date, Flowcell, Instrument out of runInfo.xml"
#buf = open(filepath).read()
#root = xmltodict.parse(buf)['RunInfo']
tree = ET.parse(filepath)
run_ET = tree.getroot().find('Run') #little of use in this file except <Run> subelement.
self.runID = run_ET.attrib['Id']
#? is runNumber useful information? if so, what for?
#self.runNumber = run_ET.attrib['Number']
self.parse_Run_ET(run_ET)
if not self.read_config:
buf = open(filepath).read()
root = xmltodict.parse(buf)['RunInfo']
try:
Reads = root.get('Run')['Reads']['Read']
except KeyError:
pass
for read in Reads:
self.read_config.append(
{'read_num': int(read['@Number']),
'cycles': int(read['@NumCycles']),
'is_index': True if read['@IsIndexedRead'] == 'Y' else False
})
def _parse_runparams(self, xml_dict):
# Different format from that in CompletedJobInfo.xml (contains read Number).
# And there are two possible keys to indicate the same datastructure. So fun.
try:
Reads = xml_dict.get('Reads')['Read']
except KeyError:
Reads = xml_dict.get('Reads')['RunInfoRead']
if not self.read_config:
for read in Reads:
self.read_config.append(
{'read_num': int(read['@Number']),
'cycles': int(read['@NumCycles']),
'is_index': True if read['@IsIndexedRead']=='Y' else False
} )
self.rta_version = xml_dict.get('RTAVersion', '')
rawdate = xml_dict.get('RunStartDate', '') # format: 130208 YYMMDD
if rawdate:
self.start_datetime = datetime.strptime(rawdate, '%y%m%d')
self.runID = xml_dict.get('RunID', '')
self.experiment_name = xml_dict.get('ExperimentName', '')
self.flowcell_position = xml_dict.get('FCPosition', '')
self.flowcell_barcode = xml_dict.get('Barcode', '')
self.machine_id = xml_dict.get('ScannerID', '')
def parse_RunParameters(self, filepath):
"""parses runParameters.xml (or viable alias) to fill instance variables.
Need to implement further since HiSeq output has no CompletedJobInfo.xml
"""
buf = open(filepath).read()
root = xmltodict.parse(buf)['RunParameters']
# a dirty hack to figure out which version of this file we're reading.
if 'Reads' in list(root['Setup'].keys()):
self._parse_runparams(root['Setup']) # HiSeq
elif 'Reads' in list(root.keys()):
self._parse_runparams(root) # MiSeq
else:
pass # NextSeq
self.model = self._get_model()
def parse_CompletedJobInfo(self, filepath):
"""parses CompletedJobInfo.xml (or viable alias) to fill instance variables.
Not all machines generate this file, so we avoid relying on it.
"""
# TODO: xmltodict conversion
# comments show example data from a real MiSeq run (2013/02)
tree = ET.parse(filepath)
root = tree.getroot() #should be "AnalysisJobInfo"
# Something to be aware of: RTARunInfo contains a "version" attribute.
# (This parser knows how to deal with version 2.)
self.rta_version = root.find("RTARunInfo").attrib['Version']
# original location of data output from the sequencer.
self.output_folder = root.find("RTAOutputFolder").text
# TODO: xml_datetimes
self.start_datetime = root.find("StartTime").text # 2013-02-09T15:51:50.0811937-08:00
self.end_datetime = root.find("CompletionTime").text # 2013-02-09T16:06:44.0124452-08:00
# dechunk all of the major sections we want to extract data from.
sheet_ET = root.find("Sheet")
header_ET = sheet_ET.find("Header")
run_ET = root.find("RTARunInfo").find("Run")
# Sheet / *
# TODO: deprecate this attribute (can't get it from HiSeq XML)
try:
self.runtype = sheet_ET.find("Type").text # MiSeq, HiSeq, etc.
except AttributeError:
#older (early 2012) XML files have no "Type" token.
self.runtype = ""
# Sheet / Header / *
try:
self.investigator_name = header_ET.find("InvestigatorName").text
self.project_name = header_ET.find("ProjectName").text
self.experiment_name = header_ET.find("ExperimentName").text
except AttributeError:
pass
# RTARunInfo / Run / *
self.runID = run_ET.attrib["Id"]
self.parse_Run_ET(run_ET)
def _get_model(self):
"""
Guesses the sequencer model from the run folder name
Current Naming schema for Illumina run folders, as far as I know,
no documentation found on this, Illumina introduced a field called
'InstrumentID' on the NextSeq runParameters.xml. That might be an
option for the future
MiSeq: 150130_M01761_0114_000000000-ACUR0
NextSeq: 150202_NS500318_0047_AH3KLMBGXX
HiSeq 2000: 130919_SN792_0281_BD2CHRACXX
HiSeq 2500: 150203_D00535_0052_AC66RWANXX
HiSeq 4000: 150210_K00111_0013_AH2372BBXX
HiSeq X: 141121_ST-E00107_0356_AH00C3CCXX
"""
# retired this line. getting self.machine_id from ScannerID field in _parse_runparams()
# date, machine_id, run_number, fc_string = os.path.basename(self.runID).split("_")
if self.machine_id.startswith("NS"):
model = "NextSeq 500"
elif self.machine_id.startswith("M"):
model = "MiSeq"
elif self.machine_id.startswith("D"):
model = "HiSeq 2500"
elif self.machine_id.startswith("SN"):
model = "HiSeq 2000"
# elif machine_id.startswith("??"):
# model = "Hiseq 3000"
elif self.machine_id.startswith("K"):
model = "HiSeq 4000"
elif self.machine_id.startswith("ST"):
model = "HiSeq X"
else:
model = "Unidentified"
return model
def prettyprint_general(self):
out = "General Config:\n" + \
"Model: " + self.model + "\n" + \
"Run Folder Name: " + os.path.basename(self.runID)
return out
def prettyprint_read_config(self):
out = "Read Config:"
for read in self.read_config:
out += " Read %i: %i cycles %s" % (read['read_num'], read['cycles'],
"(Index)" if read['is_index'] else "")
return out
def prettyprint_flowcell_layout(self):
out = """Flowcell Layout:
Tiles: %(tilecount)i
Lanes: %(lanecount)i
Surfaces: %(surfacecount)i
Swaths: %(swathcount)i""" % self.flowcell_layout
return out
def __str__(self):
"""
Print the most important metadata
"""
out = self.prettyprint_general() + "\n"
out += self.prettyprint_read_config() + "\n"
out += self.prettyprint_flowcell_layout() + "\n"
return out
def to_dict(self):
return { 'runID': self.runID,
'experiment_name': self.experiment_name,
'start_datetime': self.start_datetime,
'end_datetime': self.end_datetime,
'model': self.model,
'flowcell_layout': self.flowcell_layout,
'flowcell_barcode': self.flowcell_barcode,
'flowcell_position': self.flowcell_position, }
|
the-stack_0_2109 | from multiprocessing import Queue
import re
import threading
from typing import Optional, Tuple
import zlib
from ..candidate import CandidateResult
from ..helpers import exception_to_string
from ..permuter import (
EvalError,
EvalResult,
Feedback,
FeedbackItem,
Finished,
Message,
NeedMoreWork,
Permuter,
Task,
WorkDone,
)
from ..profiler import Profiler
from .core import (
PermuterData,
SocketPort,
json_prop,
permuter_data_to_json,
)
def _profiler_from_json(obj: dict) -> Profiler:
ret = Profiler()
for key in obj:
assert isinstance(key, str), "json properties are strings"
stat = Profiler.StatType[key]
time = json_prop(obj, key, float)
ret.add_stat(stat, time)
return ret
def _result_from_json(obj: dict, source: Optional[str]) -> EvalResult:
if "error" in obj:
return EvalError(exc_str=json_prop(obj, "error", str), seed=None)
profiler: Optional[Profiler] = None
if "profiler" in obj:
profiler = _profiler_from_json(json_prop(obj, "profiler", dict))
return CandidateResult(
score=json_prop(obj, "score", int),
hash=json_prop(obj, "hash", str) if "hash" in obj else None,
source=source,
profiler=profiler,
)
def _make_script_portable(source: str) -> str:
"""Parse a shell script and get rid of the machine-specific parts that
import.py introduces. The resulting script must be run in an environment
that has the right binaries in its $PATH, and with a current working
directory similar to where import.py found its target's make root."""
lines = []
for line in source.split("\n"):
if re.match("cd '?/", line):
# Skip cd's to absolute directory paths. Note that shlex quotes
# its argument with ' if it contains spaces/single quotes.
continue
if re.match("'?/", line):
quote = "'" if line[0] == "'" else ""
ind = line.find(quote + " ")
if ind == -1:
ind = len(line)
else:
ind += len(quote)
lastind = line.rfind("/", 0, ind)
assert lastind != -1
# Emit a call to "which" as the first part, to ensure the called
# binary still sees an absolute path. qemu-irix requires this,
# for some reason.
line = "$(which " + quote + line[lastind + 1 : ind] + ")" + line[ind:]
lines.append(line)
return "\n".join(lines)
def make_portable_permuter(permuter: Permuter) -> PermuterData:
with open(permuter.scorer.target_o, "rb") as f:
target_o_bin = f.read()
with open(permuter.compiler.compile_cmd, "r") as f2:
compile_script = _make_script_portable(f2.read())
return PermuterData(
base_score=permuter.base_score,
base_hash=permuter.base_hash,
fn_name=permuter.fn_name,
filename=permuter.source_file,
keep_prob=permuter.keep_prob,
need_profiler=permuter.need_profiler,
stack_differences=permuter.scorer.stack_differences,
compile_script=compile_script,
source=permuter.source,
target_o_bin=target_o_bin,
)
class Connection:
_port: SocketPort
_permuter_data: PermuterData
_perm_index: int
_task_queue: "Queue[Task]"
_feedback_queue: "Queue[Feedback]"
def __init__(
self,
port: SocketPort,
permuter_data: PermuterData,
perm_index: int,
task_queue: "Queue[Task]",
feedback_queue: "Queue[Feedback]",
) -> None:
self._port = port
self._permuter_data = permuter_data
self._perm_index = perm_index
self._task_queue = task_queue
self._feedback_queue = feedback_queue
def _send_permuter(self) -> None:
data = self._permuter_data
self._port.send_json(permuter_data_to_json(data))
self._port.send(zlib.compress(data.source.encode("utf-8")))
self._port.send(zlib.compress(data.target_o_bin))
def _feedback(self, feedback: FeedbackItem, server_nick: Optional[str]) -> None:
self._feedback_queue.put((feedback, self._perm_index, server_nick))
def _receive_one(self) -> bool:
"""Receive a result/progress message and send it on. Returns true if
more work should be requested."""
msg = self._port.receive_json()
msg_type = json_prop(msg, "type", str)
if msg_type == "need_work":
return True
server_nick = json_prop(msg, "server", str)
if msg_type == "init_done":
base_hash = json_prop(msg, "hash", str)
my_base_hash = self._permuter_data.base_hash
text = "connected"
if base_hash != my_base_hash:
text += " (note: mismatching hash)"
self._feedback(Message(text), server_nick)
return True
if msg_type == "init_failed":
text = "failed to initialize: " + json_prop(msg, "reason", str)
self._feedback(Message(text), server_nick)
return False
if msg_type == "disconnect":
self._feedback(Message("disconnected"), server_nick)
return False
if msg_type == "result":
source: Optional[str] = None
if msg.get("has_source") == True:
# Source is sent separately, compressed, since it can be
# large (hundreds of kilobytes is not uncommon).
compressed_source = self._port.receive()
try:
source = zlib.decompress(compressed_source).decode("utf-8")
except Exception as e:
text = "failed to decompress: " + exception_to_string(e)
self._feedback(Message(text), server_nick)
return True
try:
result = _result_from_json(msg, source)
self._feedback(WorkDone(self._perm_index, result), server_nick)
except Exception as e:
text = "failed to parse result message: " + exception_to_string(e)
self._feedback(Message(text), server_nick)
return True
raise ValueError(f"Invalid message type {msg_type}")
def run(self) -> None:
finish_reason: Optional[str] = None
try:
self._send_permuter()
self._port.receive_json()
finished = False
# Main loop: send messages from the queue on to the server, and
# vice versa. Currently we are being lazy and alternate between
# sending and receiving; this is nicely simple and keeps us on a
# single thread, however it could cause deadlocks if the server
# receiver stops reading because we aren't reading fast enough.
while True:
if not self._receive_one():
continue
self._feedback(NeedMoreWork(), None)
# Read a task and send it on, unless there are no more tasks.
if not finished:
task = self._task_queue.get()
if isinstance(task, Finished):
# We don't have a way of indicating to the server that
# all is done: the server currently doesn't track
# outstanding work so it doesn't know when to close
# the connection. (Even with this fixed we'll have the
# problem that servers may disconnect, losing work, so
# the task never truly finishes. But it might work well
# enough in practice.)
finished = True
else:
work = {
"type": "work",
"work": {
"seed": task[1],
},
}
self._port.send_json(work)
except EOFError:
finish_reason = "disconnected from permuter@home"
except Exception as e:
errmsg = exception_to_string(e)
finish_reason = f"permuter@home error: {errmsg}"
finally:
self._feedback(Finished(reason=finish_reason), None)
self._port.shutdown()
self._port.close()
def start_client(
port: SocketPort,
permuter: Permuter,
perm_index: int,
feedback_queue: "Queue[Feedback]",
priority: float,
) -> "Tuple[threading.Thread, Queue[Task], Tuple[int, int, float]]":
port.send_json(
{
"method": "connect_client",
"priority": priority,
}
)
obj = port.receive_json()
if "error" in obj:
err = json_prop(obj, "error", str)
# TODO use another exception type
raise Exception(f"Failed to connect: {err}")
num_servers = json_prop(obj, "servers", int)
num_clients = json_prop(obj, "clients", int)
num_cores = json_prop(obj, "cores", float)
permuter_data = make_portable_permuter(permuter)
task_queue: "Queue[Task]" = Queue()
conn = Connection(
port,
permuter_data,
perm_index,
task_queue,
feedback_queue,
)
thread = threading.Thread(target=conn.run, daemon=True)
thread.start()
stats = (num_clients, num_servers, num_cores)
return thread, task_queue, stats
|
the-stack_0_2110 | # TIC TAC TOE Minmax algorithm
'''
1. Backtracking algorithm
2. Max wiil try to maximize it utility
3. Min will try to minimize user or human utility to win
4. Time complexity : O(b^d)
b : branching factor (choices, number of possible move)
d : depth
'''
# Format colour
import random
bright_cyan = "\033[0;96m"
# import package
board = [' ' for i in range(10)]
def insertLetter(letter, pos):
'''
insert either 'O' or 'X' at perticular position
'''
board[pos] = letter
def spaceIsFree(pos):
'''
Boolean : Check whether their is any empty position is present or not in the board
'''
return board[pos] == ' '
def printBoard(board):
'''
Display the board
'''
# "board" is a list of 10 strings representing the board (ignore index 0)
print(' | |')
print(' ' + board[1] + ' | ' + board[2] + ' | ' + board[3])
print(' | |')
print('-----------')
print(' | |')
print(' ' + board[4] + ' | ' + board[5] + ' | ' + board[6])
print(' | |')
print('-----------')
print(' | |')
print(' ' + board[7] + ' | ' + board[8] + ' | ' + board[9])
print(' | |')
def isWinner(board, letter):
'''
Boolean : check whether winning criteria met or not
'''
# condition of horizontal, vertical, diagonal
return (board[1] == letter and board[2] == letter and board[3] == letter) or \
(board[4] == letter and board[5] == letter and board[6] == letter) or \
(board[7] == letter and board[8] == letter and board[9] == letter) or \
(board[1] == letter and board[4] == letter and board[7] == letter) or \
(board[2] == letter and board[5] == letter and board[8] == letter) or \
(board[3] == letter and board[6] == letter and board[9] == letter) or \
(board[1] == letter and board[5] == letter and board[9] == letter) or \
(board[3] == letter and board[5] == letter and board[7] == letter)
def playerMove():
'''
Take the input from user and validate user's input
'''
run = True
while run:
try:
move = int(input("Select a position to place \'X\' (1-9) : "))
if isinstance(move, str):
print("Please enter the valid number 😏")
if move > 0 and move < 10:
if spaceIsFree(move):
run = False
insertLetter('X', move)
else:
print("Position is already occupied 😳")
else:
print("Please enter valid position within the valid range 😏")
except:
print("Please enter the valid number 😏")
def compMove():
'''
Function decide computer's moves i.e where to place 'O' , so that it win
'''
# 1. winning move
# 2. Block move ,if human gets benefited
# 3. move at corner
# 4. move at center
# 5. move at any edge
possibleMove = [
x for x, letter in enumerate(board) if letter == ' ' and x != 0
]
move = 0
# 1st way -> To check whether computer can win or not , if not then
# computer now tries to block opponent move, so that he could not win
for let in ['O', 'X']:
for i in possibleMove:
# replica of board
boardCopy = board[:]
boardCopy[i] = let
if isWinner(boardCopy, let):
move = i
return move
if board[1] == 'X' or board[3] == 'X' or board[7] == 'X' or board[9] == 'X':
if 5 in possibleMove:
move = 5
return move
edgesOpen = []
if (board[1] == 'X' and board[9] == 'X') or (board[3] == 'X'
and board[7] == 'X'):
for i in possibleMove:
if i in [2, 4, 6, 8]:
edgesOpen.append(i)
# randomly select a corner to move Into
if len(edgesOpen) > 0:
move = selectRandom(edgesOpen)
return move
# Same code repeat for edges also
cornersOpen = []
# Check whether there is any corner is empty if find empty then we place
# letter in that corner position
for i in possibleMove:
if i in [1, 3, 7, 9]:
cornersOpen.append(i)
# randomly select a corner to move Into
if len(cornersOpen) > 0:
move = selectRandom(cornersOpen)
return move
# Place letter at center pow
if 5 in possibleMove:
move = 5
return move
# Check whether there is any edge is empty if find empty then we place
# letter in that edge position
for i in possibleMove:
if i in [2, 4, 6, 8]:
edgesOpen.append(i)
# randomly select a corner to move Into
if len(edgesOpen) > 0:
move = selectRandom(edgesOpen)
return move
def selectRandom(li):
return random.choice(li)
def isBoardFull(board):
if board.count(' ') > 1:
return False
else:
return True
# Human = 'X'
# Bot = 'O'
def main():
'''
Main function
'''
print(bright_cyan +
"# ----------- Welcome to TIC TAC TOE ------------- #")
name = input("Enter your name : ")
print("👲 {} : \'X\' and 🤖 Computer : \'O\' ".format(name.capitalize()))
print()
printBoard(board)
while not (isBoardFull(board)):
if not isWinner(board, 'O'):
playerMove() # Ask player for next move
printBoard(board) # print board
else:
print("\nOOPS O\'s won the game 😞 !!")
break
if not isWinner(board, 'X'):
move = compMove() # Ask computer for next move
if move == 0:
print('Tie game !!')
else:
insertLetter('O', move)
print("Computer enter \'O\' at Position : {}".format(move))
printBoard(board) # print board
else:
print("\nYeah X\'s won the game 😎 !!")
break
if isBoardFull(board):
print("Game over !!")
main()
while True:
print()
ans = input("Do want to play again 😀 ... ? (Y|N) : ")
print() # next line
if ans.lower() == 'y' and ans.upper() == 'Y':
board = [' ' for i in range(10)]
main()
else:
break
|
the-stack_0_2111 | from django.apps import AppConfig
from django.db.models.signals import post_migrate
from django.core.exceptions import ObjectDoesNotExist
from django.contrib.auth.signals import user_logged_in
def add_cart_wish(sender, user, request, **kwargs):
from products.models import Cart, WishList
if request.session.exists(request.session.session_key):
cart_session = Cart.objects.filter(session_key=request.session.session_key)
wish_session = WishList.objects.filter(session_key=request.session.session_key)
cart = Cart.objects.filter(user=user)
wish = WishList.objects.filter(user=user)
if cart_session:
cart_session = cart_session.first()
if cart and cart.first().products.count() > 0:
cart = cart.first()
plist = []
for p in cart.products.all():
plist.append(p.product)
for p in cart_session.products.all():
if p.product not in plist:
p.cart = cart
p.save()
cart_session.delete()
else:
if cart:
cart.first().delete()
cart_session.user = user
cart_session.session_key = None
cart_session.save()
if wish_session:
wish_session = wish_session.first()
if wish and wish.first().products.count() > 0:
wish = wish.first()
for p in wish_session.products.all():
if p not in wish.products.all():
wish.products.add(p)
wish.save()
wish_session.delete()
else:
if wish:
wish.first().delete()
wish_session.user = user
wish_session.session_key = None
wish_session.save()
def add_site_info(sender, **kwargs):
from .models import SiteInfo
try:
info = SiteInfo.objects.all().first()
except ObjectDoesNotExist:
info = None
if info is None:
info = SiteInfo(
name='name',
name_ar='name_ar',
address='address',
address_ar='address_ar',
email='[email protected]',
phone='0123456789',
facebook='www.facebook.com',
twitter='www.twitter.com',
instagram='www.instagram.com',
linkedin='www.linkedin.com'
)
info.save()
class AccountsConfig(AppConfig):
name = 'accounts'
def ready(self):
post_migrate.connect(add_site_info, sender=self)
user_logged_in.connect(add_cart_wish)
|
the-stack_0_2112 | # -*- coding: utf-8 -*-
"""datasettings.py
The user needs to define the required data to be stored on the containers.
This container stores all the attributes and settings for the required data.
Created on Sat Mar 19 18:30:00 2022 @author: Dan Kotlyar and Bailey Painter
Last updated on Tue Apr 01 11:30:00 2022 @author: Dan Kotlyar
email: [email protected]
"""
import numpy as np
from xsInterface.errors.checkerrors import _isint, _islist, _isbool, _inlist,\
_ispositive, _isstr, _isuniquelist, _isarray,\
_is1darray, _isequallength, _isBoundArray
from xsInterface.containers.container_header import DATA_TYPES
class DataSettings():
"""
Stores the names and data that are expected to be stored on containers
Parameters
-----------
NG : int
number of energy groups for multi-group parameters
DN : int
Delayed neutron groups for kinetic parameters
macro : boolean
indicate whether macro data is expected to be provided
micro : boolean
indicate whether micro data is expected to be provided
kinetics : boolean
indicate whether kinetic data is expected to be provided
meta : boolean
indicate whether meta data is expected to be provided
isotopes : array
ZZAAA0/1 for all the isotopes to be provided
Attributes
-----------
NG : int
number of energy groups for multi-group parameters
DN : int
delayed neutron groups for kinetic parameters
dataFlags : dict
boolean flags to indicate the data types that are provided
macro : dict
contains all the macro attributes (e.g., ``abs``)
micro : boolean
contains all the micro attributes for all the isotopes (e.g., ``fiss``)
kinetics : boolean
contains all the kinetic attributes (e.g., ``beta``)
meta : boolean
contains all the metadata attributes (e.g., ``time``)
Methods
--------
AddData(dataType, attributes, attrDims=None):
Add relevant macroscopic/microscopic/meta data
Raises
-------
TypeError
If any of the parameters, e.g., ``NG``, ``DN`` are not integers.
If any of the ``macro``, ``micro``, ``kinetics``, ``meta``
are not booleans.
ValueError
If ``NG`` is below one.
If ``DN`` is below one.
If ``isotopes`` list is not provided but ``micro`` data is expected.
KeyError
If ``dataType`` or ``frmt`` do not exist in DATA_TYPES or FRMT_OPTS.
Examples
---------
>>> rc = DataSettings(NG=2, DN=7, macro=True, micro=False, kinetics=True,
>>> meta=False, isotopes=None)
"""
def __init__(self, NG, DN, macro=True, micro=False, kinetics=False,
meta=False, isotopes=None):
"""Assign parameters that describe the required data to be provided"""
# Check variables types
_isint(NG, "number of energy groups")
_isint(DN, "number of delayed neutron groups")
_isbool(macro, "macro data")
_isbool(micro, "micro data")
_isbool(kinetics, "kinetics data")
_isbool(meta, "meta data")
# Check values/entries for different variables
_ispositive(NG, "number of energy groups")
_ispositive(DN, "number of delayed neutron groups")
if micro:
if isotopes is not None:
isotopes = np.array(isotopes, dtype=int)
else:
raise ValueError("<isotopes> list/array must be provided")
# Reset variables
self.ng = NG # number of energy groups
self.dn = DN # number of delayed neutron groups
self.isotopes = isotopes
self.dataFlags = {"macro": macro, "micro": micro,
"kinetics": kinetics, "meta": meta}
self.macro = []
self.micro = []
self.kinetics = []
self.meta = []
def AddData(self, dataType, attributes):
"""Add relevant macroscopic/microscopic/meta data
Parameters
----------
dataType : ["macro", "micro", "kinetics", "meta"]
type of data
attributes : list of strings
user-defined names for the provided data type (e.g., ``abs``)
Examples
--------
>>> rc.AddData("macro", ["abs", "nsf", "sct"], "array")
>>> rc.AddData("kinetics", ["beta", "decay"], "array")
"""
# Error checking
_isstr(dataType, "data types")
_inlist(dataType, "data types", DATA_TYPES)
if not self.dataFlags[dataType]:
raise ValueError("Data type <{}> was disabled when DataSettings "
"object was created".format(dataType))
_islist(attributes, "names of "+dataType+" attributes")
_isuniquelist(attributes, "attribute names in ")
# check if data is already populated
data0 = getattr(self, dataType)
if data0 == []: # data is new
# define the specific dictionary for the selected data type
attrList = attributes
else: # data already exists
attr0 = data0
# create a new/appended list of attributes
attr1 = attr0 + attributes
_isuniquelist(attr1, "attribute names in ")
attrList = attr1
# set a muted attribute with the settings for the selected data type
setattr(self, dataType, attrList)
def _proofTest(self):
"""Check that data was inputted"""
if self.dataFlags["macro"] and self.macro == []:
raise ValueError("macro data is expected to be provided.")
if self.dataFlags["micro"] and self.micro == []:
raise ValueError("micro data is expected to be provided.")
if self.dataFlags["kinetics"] and self.kinetics == []:
raise ValueError("kinetics data is expected to be provided.")
if self.dataFlags["meta"] and self.meta == []:
raise ValueError("meta data is expected to be provided.")
|
the-stack_0_2113 | # -*- coding: utf-8 -*-
from sopel import web
from sopel.module import commands
import re
def is_http_url(s):
if re.match('(?:www)?(?:[\w-]{2,255}(?:\.\w{2,6}){1,2})(?:/[\w&%?#-]{1,300})?',s):
return True
else:
return False
@commands('isup')
def isup(bot, trigger):
site = trigger.group(2)
if not site:
if bot.config.lang == 'fr':
return bot.reply("Quel website veux-tu que je verifie?")
elif bot.config.lang == 'es':
return bot.reply("Que web quieres que compruebe?")
else:
return bot.reply("What web do you want to check?")
if 'localhost' in site or '127.0.0.1' in site or '0::1' in site:
bot.reply("I'm minding on not say you it.")
return
elif site[:6] != 'http://' and site[:7] != 'https://':
if '://' in site:
protocol = site.split('://')[0] + '://'
if bot.config.lang == 'fr':
return bot.reply("Tournez à tenter sans le %s" % protocol)
elif bot.config.lang == 'es':
return bot.reply("Vuelve a intentar sin el %s" % protocol)
else:
return bot.reply("Try it again without the %s" % protocol)
else:
if is_http_url(site) is False:
return bot.reply("That URL looks not valid for me.")
site = 'http://' + site
try:
response = web.get(site)
except Exception:
if bot.config.lang == 'fr':
bot.say('Sembla que ' + site + ' ne fonctionne pas ou n\'existe pas.')
elif bot.config.lang == 'es':
bot.say('Parece que ' + site + ' no funciona o no existe.')
else:
bot.say(site + ' looks down from here.')
return
if response:
if bot.config.lang == 'fr':
bot.say('Il n\'y a pas d\'aucun problème à ' + site)
elif bot.config.lang == 'es':
bot.say('No veo ningun problema en ' + site)
else:
bot.say(site + ' looks fine to me.')
else:
if bot.config.lang == 'fr':
bot.say('Semble que ' + site + ' ne fonctionne pas ou n\'existe pas.')
elif bot.config.lang == 'es':
bot.say('Parece que ' + site + ' no funciona o no existe.')
else:
bot.say(site + ' looks down from here.')
return
|
the-stack_0_2115 | # Copyright (c) 2020 Sorin Sbarnea <[email protected]>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from ansiblelint.rules import AnsibleLintRule
class MissingFilePermissionsRule(AnsibleLintRule):
id = "208"
shortdesc = 'File permissions not mentioned'
description = (
"Missing mode parameter can cause unexpected file permissions based "
"on version of Ansible being used. Be explicit, or if you still "
"want the default behavior you can use ``mode: preserve`` to avoid "
"hitting this rule. See "
"https://github.com/ansible/ansible/issues/71200"
)
severity = 'VERY_HIGH'
tags = ['unpredictability']
version_added = 'v4.3.0'
_modules = (
'copy',
'file',
'ini_file',
'lineinfile',
'replace',
'template',
'unarchive',
)
def matchtask(self, file, task):
if task["action"]["__ansible_module__"] not in self._modules:
return False
if task['action'].get('state', None) == "absent":
return False
mode = task['action'].get('mode', None)
return mode is None
|
the-stack_0_2118 | #!/usr/bin/env python
import logging
import warnings
import numpy as np
from numpy.lib.ufunclike import isposinf
from scipy.stats import chi
EPS = 1e-8
class MultiVariateNormalDistribution(object):
def __init__(self, shift, scale, cov, dim=None):
# main components
self.shift = shift
self.scale = scale
self.cov = cov
# params
self.dim = dim if dim is not None else shift.shape[0]
# states
self.eigvecs = None
self.eigvals = None
self.inv_cov = None
self.invsqrt_cov = None
self.rev = None
# decompose cov
self.decomposed = False
def decompose(self, force_positive=False, shrinkage=0, rescale=None, bound_size=float('inf')):
# force symmetric
self.cov = (self.cov + self.cov.T) / 2.0
# solve
self.eigvals, self.eigvecs = np.linalg.eigh(self.cov)
# force positive definite
if force_positive:
self.eigvals = np.clip(self.eigvals, EPS, None)
# shrinkage
if shrinkage > 0:
trace_cov = np.sum(self.eigvals)
self.eigvals = (1 - shrinkage) * self.eigvals + shrinkage * (trace_cov / self.dim) * np.ones(self.dim)
# rescale
if rescale is not None:
ratio = (self.scale / rescale) ** 2
self.cov *= ratio
self.eigvals *= ratio
self.scale = rescale
# restrict max length
base_length = chi.mean(self.dim) + 2.0 * chi.std(self.dim)
max_eigval = (bound_size / base_length) ** 2
self.eigvals = np.clip(self.eigvals, EPS, max_eigval)
# computing
with warnings.catch_warnings(record=True) as w:
self.cov = np.dot(self.eigvecs, np.diag(self.eigvals)).dot(self.eigvecs.T)
#inv cov
self.inv_cov = np.dot(self.eigvecs, np.diag(self.eigvals ** -1)).dot(self.eigvecs.T)
# inv sqrt cov
self.invsqrt_cov = np.dot(self.eigvecs, np.diag(self.eigvals ** -0.5)).dot(self.eigvecs.T)
# sqrt cov
self.sqrt_cov = np.dot(self.eigvecs, np.diag(self.eigvals ** 0.5)).dot(self.eigvecs.T)
# reverse projection matrix
self.rev = np.dot(np.diag(self.eigvals ** -0.5), self.eigvecs.T)
# handle warnings
if len(w) > 0:
print("Eigvals: ", self.eigvals)
print("Sigma: ", self.scale)
raise Exception("Negative eigval")
def sample(self, num, remap=None):
if not self.decomposed:
self.decompose()
bias = np.random.normal(size=[num, self.dim])
amp_bias = self.scale * (self.eigvals ** 0.5)[np.newaxis,:] * bias
rot_bias = np.dot(amp_bias, self.eigvecs.T)
samples = self.shift[np.newaxis,:] + rot_bias
if remap is not None:
samples = remap(samples)
return samples
def dispersion(self, X):
x = X.reshape(-1, self.dim)
y = x - self.shift[np.newaxis, :]
z = np.dot(y / self.scale, self.invsqrt_cov)
dens = np.sum(z ** 2, axis=1)
if len(X.shape) == 1:
dens = dens[0]
return dens
|
the-stack_0_2121 | # -*- coding: utf-8 -*-
# BSD 3-Clause License
#
# DeepGlint is pleased to support the open source community by making EasyQuant available.
# Copyright (C) 2020 DeepGlint. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Modified from https://github.com/BUG1989/caffe-int8-convert-tools
# BUG1989 is pleased to support the open source community by supporting ncnn available.
#
# Copyright (C) 2019 BUG1989. All rights reserved.
#
# Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# https://opensource.org/licenses/BSD-3-Clause
#
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
"""
Quantization module for generating the calibration tables will be used by
quantized (INT8) models from FP32 models.with bucket split,[k, k, cin, cout]
cut into "cout" buckets.
This tool is based on Caffe Framework.
"""
from __future__ import division
from __future__ import print_function
import argparse
import numpy as np
import math, copy
import matplotlib.pyplot as plt
import sys,os
import caffe
import caffe.proto.caffe_pb2 as caffe_pb2
import time
import datetime
from google.protobuf import text_format
from scipy import stats
import cv2
# np.set_printoptions(threshold='nan')
np.set_printoptions(suppress=True)
def parse_args():
parser = argparse.ArgumentParser(
description='find the pretrained caffe models int8 quantize scale value')
parser.add_argument('--proto', dest='proto',
help="path to deploy prototxt.", type=str)
parser.add_argument('--model', dest='model',
help='path to pretrained weights', type=str)
parser.add_argument('--mean', dest='mean',
help='value of mean', type=float, nargs=3)
parser.add_argument('--norm', dest='norm',
help='value of normalize', type=float, nargs=1, default=1.0)
parser.add_argument('--images', dest='images',
help='path to calibration images', type=str)
parser.add_argument('--output', dest='output',
help='path to output calibration table file', type=str, default='calibration-dev.table')
parser.add_argument('--group', dest='group',
help='enable the group scale', type=int, default=1)
parser.add_argument('--gpu', dest='gpu',
help='use gpu to forward', type=int, default=0)
parser.add_argument('--threshold', dest='threshold',
help='the threshold of activations', type=float, default=float('inf'))
parser.add_argument('--histgram', dest='histgram',
help='whether to generate activation histograms', type=bool, default=False)
parser.add_argument('--cv2', dest='cv2',help='whether use opencv read image', type=bool, default=False)
args = parser.parse_args()
return args, parser
global args, parser
args, parser = parse_args()
# global params
QUANTIZE_NUM = 127# 7bit
QUANTIZE_WINOGRAND_NUM = 127 # 6bit
STATISTIC = 1
INTERVAL_NUM = 2001
# ugly global params
quantize_layer_lists = []
def image_processing(image, image_size, mean_value):
w = image.shape[1]
h = image.shape[0]
m = min(w, h)
ratio = 256.0 / m
new_w, new_h = int(ratio * w), int(ratio * h)
image = cv2.resize(image, (new_w, new_h))
image = image.astype(np.float32)
top = (new_w - image_size)//2
left = (new_h - image_size)//2
image = image[left:left+image_size, top:top+image_size]
image = image - mean_value
image = image.transpose(2, 0, 1)
return image # bgr, chw, normalized
class QuantizeLayer:
def __init__(self, name, blob_name, group_num):
self.name = name
self.blob_name = blob_name
self.group_num = group_num
self.weight_scale = np.zeros(group_num)
self.blob_max = 0.0
self.blob_distubution_interval = 0.0
self.blob_distubution = np.zeros(INTERVAL_NUM)
self.blob_distubution_edges= np.zeros(INTERVAL_NUM)
self.blob_threshold = 0
self.blob_scale = 1.0
self.group_zero = np.zeros(group_num)
self.pc= True
def quantize_weight(self, weight_data, flag):
# spilt the weight data by cout num
blob_group_data = np.array_split(weight_data, self.group_num)
#add by diwu
glob_group_max= np.max(weight_data)
glob_group_min= np.min(weight_data)
glob_group_threshold = max(abs(glob_group_max), abs(glob_group_min))
for i, group_data in enumerate(blob_group_data):
#per channel quant
if self.pc:
max_val = np.max(group_data)
min_val = np.min(group_data)
threshold = max(abs(max_val), abs(min_val))
if threshold < 0.0001:
self.weight_scale[i] = 0
self.group_zero[i] = 1
else:
if(flag == True):
self.weight_scale[i] = QUANTIZE_WINOGRAND_NUM / threshold
else:
self.weight_scale[i] = QUANTIZE_NUM / threshold
print("%-20s group : %-5d max_val : %-10f scale_val : %-10f" % (self.name + "_param0", i, threshold, self.weight_scale[i]))
else:
if glob_group_threshold < 0.0001:
self.weight_scale[i] = 0
self.group_zero[i] = 1
else:
if(flag == True):
self.weight_scale[i] = QUANTIZE_WINOGRAND_NUM / glob_group_threshold
else:
self.weight_scale[i] = QUANTIZE_NUM / glob_group_threshold
print("%-20s group : %-5d max_val : %-10f scale_val : %-10f" % (self.name + "_param0", i, glob_group_threshold, self.weight_scale[i]))
def initial_blob_max(self, blob_data):
# get the max value of blob
max_val = np.max(blob_data)
min_val = np.min(blob_data)
self.blob_max = max(self.blob_max, max(abs(max_val), abs(min_val)))
# Avoid unusually large activation by clip blob_max with threshold
self.th= min(self.blob_max, args.threshold)
def initial_blob_distubution_interval(self):
self.blob_distubution_interval = STATISTIC * self.th / INTERVAL_NUM
print("%-20s max_val : %-10.8f distribution_intervals : %-10.8f" % (self.name, self.blob_max, self.blob_distubution_interval))
def initial_histograms(self, blob_data):
# collect histogram of every group channel blob
th= self.th
# Truncate the boundary of the active hist graph,
# so the number exceeding the boundary value will not fall into statistics.
# add by diwu
hist, hist_edge = np.histogram(blob_data, bins=INTERVAL_NUM, range=(0, th))
self.blob_distubution_edges = hist_edge
self.blob_distubution += hist
def quantize_blob(self):
# calculate threshold
distribution = np.array(self.blob_distubution)
# pick threshold which minimizes KL divergence
threshold_bin = threshold_distribution(distribution)
self.blob_threshold = threshold_bin
threshold = (threshold_bin + 0.5) * self.blob_distubution_interval
# get the activation calibration value
self.blob_scale = QUANTIZE_NUM / threshold
#self.blob_scale = np.max(self.blob_scale,1) #add by diwu
print("%-20s bin : %-8d threshold : %-10f interval : %-10f scale : %-10f" % (self.name, threshold_bin, threshold, self.blob_distubution_interval, self.blob_scale))
def _smooth_distribution(p, eps=0.0001):
"""Given a discrete distribution (may have not been normalized to 1),
smooth it by replacing zeros with eps multiplied by a scaling factor and taking the
corresponding amount off the non-zero values.
Ref: http://web.engr.illinois.edu/~hanj/cs412/bk3/KL-divergence.pdf
"""
is_zeros = (p == 0).astype(np.float32)
is_nonzeros = (p != 0).astype(np.float32)
n_zeros = is_zeros.sum()
n_nonzeros = p.size - n_zeros
if not n_nonzeros:
raise ValueError('The discrete probability distribution is malformed. All entries are 0.')
eps1 = eps * float(n_zeros) / float(n_nonzeros)
assert eps1 < 1.0, 'n_zeros=%d, n_nonzeros=%d, eps1=%f' % (n_zeros, n_nonzeros, eps1)
hist = p.astype(np.float32)
hist += eps * is_zeros + (-eps1) * is_nonzeros
assert (hist <= 0).sum() == 0
return hist
def threshold_distribution(distribution, target_bin=128):
"""
Return the best threshold value.
Ref: https://github.com//apache/incubator-mxnet/blob/master/python/mxnet/contrib/quantization.py
Args:
distribution: list, activations has been processed by histogram and normalize,size is 2048
target_bin: int, the num of bin that is used by quantize, Int8 default value is 128
Returns:
target_threshold: int, num of bin with the minimum KL
"""
distribution = distribution[1:]
length = distribution.size
threshold_sum = sum(distribution[target_bin:])
kl_divergence = np.zeros(length - target_bin)
for threshold in range(target_bin, length):
sliced_nd_hist = copy.deepcopy(distribution[:threshold])
# generate reference distribution p
p = sliced_nd_hist.copy()
p[threshold-1] += threshold_sum
threshold_sum = threshold_sum - distribution[threshold]
# is_nonzeros[k] indicates whether hist[k] is nonzero
is_nonzeros = (p != 0).astype(np.int64)
#
quantized_bins = np.zeros(target_bin, dtype=np.int64)
# calculate how many bins should be merged to generate quantized distribution q
num_merged_bins = sliced_nd_hist.size // target_bin
# merge hist into num_quantized_bins bins
for j in range(target_bin):
start = j * num_merged_bins
stop = start + num_merged_bins
quantized_bins[j] = sliced_nd_hist[start:stop].sum()
quantized_bins[-1] += sliced_nd_hist[target_bin * num_merged_bins:].sum()
# expand quantized_bins into p.size bins
q = np.zeros(sliced_nd_hist.size, dtype=np.float64)
for j in range(target_bin):
start = j * num_merged_bins
if j == target_bin - 1:
stop = -1
else:
stop = start + num_merged_bins
norm = is_nonzeros[start:stop].sum()
if norm != 0:
q[start:stop] = float(quantized_bins[j]) / float(norm)
q[p == 0] = 0
p = _smooth_distribution(p) # with some bugs, need to fix
q = _smooth_distribution(q)
p[p == 0] = 0.0001
q[q == 0] = 0.0001
# calculate kl_divergence between q and p
kl_divergence[threshold - target_bin] = stats.entropy(p, q)
min_kl_divergence = np.argmin(kl_divergence)
threshold_value = min_kl_divergence + target_bin
return threshold_value
def net_forward(net, image_path, transformer=None, image_size=224, mean_value=[103.939, 116.779, 123.68]):
"""
network inference and statistics the cost time
Args:
net: the instance of Caffe inference
image_path: a image need to be inference
transformer: caffe io transformar
image_size: image shape of blob data
mean_value: mean value for normalization
Returns:
none
"""
if args.cv2:
# load image
image = cv2.imread(image_path)
image = image_processing(image, image_size, mean_value)
net.blobs['data'].reshape(1, 3, image_size, image_size)
net.blobs['data'].data[...] = np.array([image], dtype=np.float32)
else:
# load image
im = caffe.io.load_image(image_path)
nh, nw = 224, 224
h, w, _ = im.shape
if h < w:
off = int((w - h) / 2)
im = im[:, off:off + h]
else:
off = int((h - w) / 2)
im = im[off:off + h, :]
im = caffe.io.resize_image(im, [nh, nw])
# transformer.preprocess the image
net.blobs['data'].data[...] = transformer.preprocess('data', im)
# net forward
output = net.forward()
def file_name(file_dir):
"""
Find the all file path with the directory
Args:
file_dir: The source file directory
Returns:
files_path: all the file path into a list
"""
files_path = []
for root, dir, files in os.walk(file_dir):
for name in files:
file_path = root + "/" + name
print(file_path)
files_path.append(file_path)
return files_path
def network_prepare(net, mean, norm):
"""
instance the prepare process param of caffe network inference
Args:
net: the instance of Caffe inference
mean: the value of mean
norm: the value of normalize
Returns:
none
"""
print("Network initial")
img_mean = np.array(mean, dtype=np.float32)
# initial transformer
transformer = caffe.io.Transformer({'data': net.blobs['data'].data.shape})
# convert hwc to cwh
transformer.set_transpose('data', (2,0,1))
# convert RGB -> BGR
transformer.set_channel_swap('data', (2,1,0))
# resize image data from [0,1] to [0,255]
transformer.set_raw_scale('data', 255)
# load meanfile
transformer.set_mean('data', img_mean)
# normalize
transformer.set_input_scale('data', norm)
return transformer
def weight_quantize(net, net_file, group_on, winograd=False):
"""
CaffeModel convolution weight blob Int8 quantize
Args:
net: the instance of Caffe inference
net_file: deploy caffe prototxt
Returns:
none
"""
print("\nQuantize the kernel weight:")
# parse the net param from deploy prototxt
params = caffe_pb2.NetParameter()
with open(net_file) as f:
text_format.Merge(f.read(), params)
for i, layer in enumerate(params.layer):
# find the convolution layers to get out the weight_scale
if(layer.type == "Convolution" or layer.type == "ConvolutionDepthwise"):
weight_blob = net.params[layer.name][0].data
# initial the instance of QuantizeLayer Class lists,you can use enable group quantize to generate int8 scale for each group layer.convolution_param.group
if (group_on == 1):
quanitze_layer = QuantizeLayer(layer.name, layer.bottom[0], layer.convolution_param.num_output)
else:
quanitze_layer = QuantizeLayer(layer.name, layer.bottom[0], 1)
if not winograd:
# quantize the weight value using QUANTIZE_WINOGRAND_NUM for all layers
quanitze_layer.quantize_weight(weight_blob, True)
else:
# quantize the weight value using 6bit for conv3x3s1 layer to winograd F(4,3)
if(layer.type == "Convolution" and layer.convolution_param.kernel_size[0] == 3 and ((len(layer.convolution_param.stride) == 0) or layer.convolution_param.stride[0] == 1)):
if(layer.convolution_param.group != layer.convolution_param.num_output):
quanitze_layer.quantize_weight(weight_blob, True)
else:
quanitze_layer.quantize_weight(weight_blob, False)
# quantize the weight value using 8bit for another conv layers
else:
quanitze_layer.quantize_weight(weight_blob, False)
# add the quantize_layer into the save list
quantize_layer_lists.append(quanitze_layer)
return None
def activation_quantize(net, transformer, images_files):
"""
Activation Int8 quantize, optimaize threshold selection with KL divergence,
given a dataset, find the optimal threshold for quantizing it.
Ref: http://on-demand.gputechconf.com/gtc/2017/presentation/s7310-8-bit-inference-with-tensorrt.pdf
Args:
net: the instance of Caffe inference
transformer:
images_files: calibration dataset
Returns:
none
"""
print("\nQuantize the Activation:")
# run float32 inference on calibration dataset to find the activations range
for i , image in enumerate(images_files):
# inference
net_forward(net, image, transformer)
# find max threshold
for layer in quantize_layer_lists:
blob = net.blobs[layer.blob_name].data[0].flatten()
layer.initial_blob_max(blob)
if i % 100 == 0:
print("loop stage 1 : %d/%d" % (i, len(images_files)))
# calculate statistic blob scope and interval distribution
for layer in quantize_layer_lists:
layer.initial_blob_distubution_interval()
# for each layers
# collect histograms of activations
print("\nCollect histograms of activations:")
for i, image in enumerate(images_files):
net_forward(net, image, transformer)
for layer in quantize_layer_lists:
blob = net.blobs[layer.blob_name].data[0].flatten()
blob= blob[blob > 0]
layer.initial_histograms(blob)
if i % 100 == 0:
print("loop stage 2 : %d/%d" % (i, len(images_files)))
# calculate threshold with KL divergence
if args.histgram:
from collections import OrderedDict
quant_hist= OrderedDict()
for layer in quantize_layer_lists:
layer.quantize_blob()
if args.histgram:
quant_hist[layer.name]= (layer.blob_max,layer.blob_distubution,layer.blob_distubution_edges,QUANTIZE_NUM/layer.blob_scale)
if args.histgram:
import pickle
with open('histgram.pkl','wb') as f:
pickle.dump(quant_hist,f)
print('save histograms success! use plot script to generate graphs')
return None
def save_calibration_file(calibration_path):
calibration_file = open(calibration_path, 'w')
# save temp
save_temp = []
# save weight scale
for layer in quantize_layer_lists:
save_string = layer.name + "_param_0"
for i in range(layer.group_num):
save_string = save_string + " " + str(layer.weight_scale[i])
save_temp.append(save_string)
# save bottom blob scales
for layer in quantize_layer_lists:
save_string = layer.name + " " + str(layer.blob_scale)
save_temp.append(save_string)
# save into txt file
for data in save_temp:
calibration_file.write(data + "\n")
calibration_file.close()
# save calibration logs
save_temp_log = []
calibration_file_log = open(calibration_path + ".log", 'w')
for layer in quantize_layer_lists:
save_string = layer.name + ": value range 0 - " + str(layer.blob_max) \
+ ", interval " + str(layer.blob_distubution_interval) \
+ ", interval num " + str(INTERVAL_NUM) \
+ ", threshold num " + str(layer.blob_threshold) + "\n" \
+ str(layer.blob_distubution.astype(dtype=np.int64))
save_temp_log.append(save_string)
# save into txt file
for data in save_temp_log:
calibration_file_log.write(data + "\n")
def usage_info():
"""
usage info
"""
print("Input params is illegal...╮(╯3╰)╭")
print("try it again:\n python caffe-int8-scale-tools-dev.py -h")
def main():
"""
main function
"""
# time start
time_start = datetime.datetime.now()
print(args)
if args.proto == None or args.model == None or args.mean == None or args.images == None:
usage_info()
return None
# deploy caffe prototxt path
net_file = args.proto
# trained caffemodel path
caffe_model = args.model
# mean value
mean = args.mean
# norm value
norm = 1.0
if args.norm != 1.0:
norm = args.norm[0]
# calibration dataset
images_path = args.images
# the output calibration file
calibration_path = args.output
# enable the group scale
group_on = args.group
# default use CPU to forwark
if args.gpu != 0:
caffe.set_mode_gpu()
caffe.set_device(0)
# initial caffe net and the forword model(GPU or CPU)
net = caffe.Net(net_file,caffe_model,caffe.TEST)
# prepare the cnn network
transformer = network_prepare(net, mean, norm)
# get the calibration datasets images files path
images_files = file_name(images_path)
# quanitze kernel weight of the caffemodel to find it's calibration table
weight_quantize(net, net_file, group_on)
# quantize activation value of the caffemodel to find it's calibration table
activation_quantize(net, transformer, images_files)
# save the calibration tables,best wish for your INT8 inference have low accuracy loss :)
save_calibration_file(calibration_path)
# time end
time_end = datetime.datetime.now()
print("\nCaffe Int8 Calibration table create success, it's cost %s, best wish for your INT8 inference has a low accuracy loss...\(^▽^)/...2333..." % (time_end - time_start))
if __name__ == "__main__":
main()
|
the-stack_0_2123 | # -*- coding: utf-8 -*-
import six
from six.moves import urllib
from django import forms
from django.contrib.admin.sites import site
from django.contrib.admin.widgets import ForeignKeyRawIdWidget
try:
from django.templatetags import static
except ImportError:
# compatibility with django < 2.1
from django.contrib.admin.templatetags.admin_static import static
try:
from django.urls import reverse
except ImportError:
from django.core.urlresolvers import reverse
from django.template.loader import render_to_string
from django.utils.safestring import mark_safe
from django.utils.text import Truncator
class ForeignKeySearchInput(ForeignKeyRawIdWidget):
"""
Widget for displaying ForeignKeys in an autocomplete search input
instead in a <select> box.
"""
# Set in subclass to render the widget with a different template
widget_template = None
# Set this to the patch of the search view
search_path = None
def _media(self):
js_files = [
static('django_extensions/js/jquery.bgiframe.js'),
static('django_extensions/js/jquery.ajaxQueue.js'),
static('django_extensions/js/jquery.autocomplete.js'),
]
return forms.Media(
css={'all': (static('django_extensions/css/jquery.autocomplete.css'), )},
js=js_files,
)
media = property(_media)
def label_for_value(self, value):
key = self.rel.get_related_field().name
obj = self.rel.model._default_manager.get(**{key: value})
return Truncator(obj).words(14, truncate='...')
def __init__(self, rel, search_fields, attrs=None):
self.search_fields = search_fields
super(ForeignKeySearchInput, self).__init__(rel, site, attrs)
def render(self, name, value, attrs=None, renderer=None):
if attrs is None:
attrs = {}
opts = self.rel.model._meta
app_label = opts.app_label
model_name = opts.object_name.lower()
related_url = reverse('admin:%s_%s_changelist' % (app_label, model_name))
if not self.search_path:
self.search_path = urllib.parse.urljoin(related_url, 'foreignkey_autocomplete/')
params = self.url_parameters()
if params:
url = '?' + '&'.join(['%s=%s' % (k, v) for k, v in params.items()])
else:
url = ''
if 'class' not in attrs:
attrs['class'] = 'vForeignKeyRawIdAdminField'
# Call the TextInput render method directly to have more control
output = [forms.TextInput.render(self, name, value, attrs)]
if value:
label = self.label_for_value(value)
else:
label = six.u('')
context = {
'url': url,
'related_url': related_url,
'search_path': self.search_path,
'search_fields': ','.join(self.search_fields),
'app_label': app_label,
'model_name': model_name,
'label': label,
'name': name,
}
output.append(render_to_string(self.widget_template or (
'django_extensions/widgets/%s/%s/foreignkey_searchinput.html' % (app_label, model_name),
'django_extensions/widgets/%s/foreignkey_searchinput.html' % app_label,
'django_extensions/widgets/foreignkey_searchinput.html',
), context))
output.reverse()
return mark_safe(six.u('').join(output))
|
the-stack_0_2124 | import torch
import torch.nn as nn
import torchvision.datasets as dsets
import torchvision.transforms as transforms
from torch.autograd import Variable
from modules import MGRU
torch.manual_seed(1111)
# Hyper Parameters
sequence_length = 28
input_size = 28
hidden_size = 128
num_layers = 2
num_classes = 10
batch_size = 100
num_epochs = 2
learning_rate = 0.01
# MNIST Dataset
train_dataset = dsets.MNIST(root='../data/',
train=True,
transform=transforms.ToTensor(),
download=True)
test_dataset = dsets.MNIST(root='../data/',
train=False,
transform=transforms.ToTensor())
# Data Loader (Input Pipeline)
train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
batch_size=batch_size,
shuffle=True)
test_loader = torch.utils.data.DataLoader(dataset=test_dataset,
batch_size=batch_size,
shuffle=False)
# RNN Model (Many-to-One)
class RNNModel(nn.Module):
def __init__(self, input_size, hidden_size, num_layers, num_classes, bias=True, grad_clip=None):
super(RNNModel, self).__init__()
self.hidden_size = hidden_size
self.num_layers = num_layers
self.rnn = MGRU(input_size, hidden_size, num_layers=num_layers,
bias=bias, return_sequences=False, grad_clip=grad_clip)
self.fc = nn.Linear(hidden_size, num_classes, bias=bias)
def forward(self, x):
# Set initial states
initial_states = [Variable(torch.zeros(x.size(0), self.hidden_size)) for _ in range(self.num_layers)]
# Forward propagate RNN
out = self.rnn(x, initial_states)
# Decode hidden state of last time step
out = self.fc(out)
return out
rnn = RNNModel(input_size, hidden_size, num_layers, num_classes, bias=True, grad_clip=10)
# Loss and Optimizer
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(rnn.parameters(), lr=learning_rate)
# Train the Model
for epoch in range(num_epochs):
for i, (images, labels) in enumerate(train_loader):
images = Variable(images.view(-1, sequence_length, input_size))
labels = Variable(labels)
# Forward + Backward + Optimize
optimizer.zero_grad()
outputs = rnn(images)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
if (i+1) % 100 == 0:
print ('Epoch [%d/%d], Step [%d/%d], Loss: %.4f'
%(epoch+1, num_epochs, i+1, len(train_dataset)//batch_size, loss.data[0]))
# Test the Model
correct = 0
total = 0
for images, labels in test_loader:
images = Variable(images.view(-1, sequence_length, input_size))
outputs = rnn(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum()
print('Test Accuracy of the model on the 10000 test images: %d %%' % (100 * correct / total))
# Save the Model
torch.save(rnn.state_dict(), 'mgru.pkl')
|
the-stack_0_2125 | #!/usr/bin/env python3
# Copyright (c) 2017-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Class for merelcoind node under test"""
import contextlib
import decimal
import errno
from enum import Enum
import http.client
import json
import logging
import os
import re
import subprocess
import tempfile
import time
import urllib.parse
from .authproxy import JSONRPCException
from .util import (
append_config,
delete_cookie_file,
get_rpc_proxy,
rpc_url,
wait_until,
p2p_port,
)
# For Python 3.4 compatibility
JSONDecodeError = getattr(json, "JSONDecodeError", ValueError)
BITCOIND_PROC_WAIT_TIMEOUT = 60
class FailedToStartError(Exception):
"""Raised when a node fails to start correctly."""
class ErrorMatch(Enum):
FULL_TEXT = 1
FULL_REGEX = 2
PARTIAL_REGEX = 3
class TestNode():
"""A class for representing a merelcoind node under test.
This class contains:
- state about the node (whether it's running, etc)
- a Python subprocess.Popen object representing the running process
- an RPC connection to the node
- one or more P2P connections to the node
To make things easier for the test writer, any unrecognised messages will
be dispatched to the RPC connection."""
def __init__(self, i, datadir, *, rpchost, timewait, bitcoind, bitcoin_cli, mocktime, coverage_dir, extra_conf=None, extra_args=None, use_cli=False):
self.index = i
self.datadir = datadir
self.stdout_dir = os.path.join(self.datadir, "stdout")
self.stderr_dir = os.path.join(self.datadir, "stderr")
self.rpchost = rpchost
self.rpc_timeout = timewait
self.binary = bitcoind
self.coverage_dir = coverage_dir
if extra_conf != None:
append_config(datadir, extra_conf)
# Most callers will just need to add extra args to the standard list below.
# For those callers that need more flexibility, they can just set the args property directly.
# Note that common args are set in the config file (see initialize_datadir)
self.extra_args = extra_args
self.args = [
self.binary,
"-datadir=" + self.datadir,
"-logtimemicros",
"-debug",
"-debugexclude=libevent",
"-debugexclude=leveldb",
"-mocktime=" + str(mocktime),
"-uacomment=testnode%d" % i
]
self.cli = TestNodeCLI(bitcoin_cli, self.datadir)
self.use_cli = use_cli
self.running = False
self.process = None
self.rpc_connected = False
self.rpc = None
self.url = None
self.log = logging.getLogger('TestFramework.node%d' % i)
self.cleanup_on_exit = True # Whether to kill the node when this object goes away
self.p2ps = []
def get_deterministic_priv_key(self):
"""Return a deterministic priv key in base58, that only depends on the node's index"""
PRIV_KEYS = [
# adress , privkey
('mjTkW3DjgyZck4KbiRusZsqTgaYTxdSz6z', 'cVpF924EspNh8KjYsfhgY96mmxvT6DgdWiTYMtMjuM74hJaU5psW'),
('msX6jQXvxiNhx3Q62PKeLPrhrqZQdSimTg', 'cUxsWyKyZ9MAQTaAhUQWJmBbSvHMwSmuv59KgxQV7oZQU3PXN3KE'),
('mnonCMyH9TmAsSj3M59DsbH8H63U3RKoFP', 'cTrh7dkEAeJd6b3MRX9bZK8eRmNqVCMH3LSUkE3dSFDyzjU38QxK'),
('mqJupas8Dt2uestQDvV2NH3RU8uZh2dqQR', 'cVuKKa7gbehEQvVq717hYcbE9Dqmq7KEBKqWgWrYBa2CKKrhtRim'),
('msYac7Rvd5ywm6pEmkjyxhbCDKqWsVeYws', 'cQDCBuKcjanpXDpCqacNSjYfxeQj8G6CAtH1Dsk3cXyqLNC4RPuh'),
('n2rnuUnwLgXqf9kk2kjvVm8R5BZK1yxQBi', 'cQakmfPSLSqKHyMFGwAqKHgWUiofJCagVGhiB4KCainaeCSxeyYq'),
('myzuPxRwsf3vvGzEuzPfK9Nf2RfwauwYe6', 'cQMpDLJwA8DBe9NcQbdoSb1BhmFxVjWD5gRyrLZCtpuF9Zi3a9RK'),
('mumwTaMtbxEPUswmLBBN3vM9oGRtGBrys8', 'cSXmRKXVcoouhNNVpcNKFfxsTsToY5pvB9DVsFksF1ENunTzRKsy'),
('mpV7aGShMkJCZgbW7F6iZgrvuPHjZjH9qg', 'cSoXt6tm3pqy43UMabY6eUTmR3eSUYFtB2iNQDGgb3VUnRsQys2k'),
]
return PRIV_KEYS[self.index]
def _node_msg(self, msg: str) -> str:
"""Return a modified msg that identifies this node by its index as a debugging aid."""
return "[node %d] %s" % (self.index, msg)
def _raise_assertion_error(self, msg: str):
"""Raise an AssertionError with msg modified to identify this node."""
raise AssertionError(self._node_msg(msg))
def __del__(self):
# Ensure that we don't leave any bitcoind processes lying around after
# the test ends
if self.process and self.cleanup_on_exit:
# Should only happen on test failure
# Avoid using logger, as that may have already been shutdown when
# this destructor is called.
print(self._node_msg("Cleaning up leftover process"))
self.process.kill()
def __getattr__(self, name):
"""Dispatches any unrecognised messages to the RPC connection or a CLI instance."""
if self.use_cli:
return getattr(self.cli, name)
else:
assert self.rpc_connected and self.rpc is not None, self._node_msg("Error: no RPC connection")
return getattr(self.rpc, name)
def start(self, extra_args=None, *, stdout=None, stderr=None, **kwargs):
"""Start the node."""
if extra_args is None:
extra_args = self.extra_args
# Add a new stdout and stderr file each time bitcoind is started
if stderr is None:
stderr = tempfile.NamedTemporaryFile(dir=self.stderr_dir, delete=False)
if stdout is None:
stdout = tempfile.NamedTemporaryFile(dir=self.stdout_dir, delete=False)
self.stderr = stderr
self.stdout = stdout
# Delete any existing cookie file -- if such a file exists (eg due to
# unclean shutdown), it will get overwritten anyway by bitcoind, and
# potentially interfere with our attempt to authenticate
delete_cookie_file(self.datadir)
# add environment variable LIBC_FATAL_STDERR_=1 so that libc errors are written to stderr and not the terminal
subp_env = dict(os.environ, LIBC_FATAL_STDERR_="1")
self.process = subprocess.Popen(self.args + extra_args, env=subp_env, stdout=stdout, stderr=stderr, **kwargs)
self.running = True
self.log.debug("merelcoind started, waiting for RPC to come up")
def wait_for_rpc_connection(self):
"""Sets up an RPC connection to the merelcoind process. Returns False if unable to connect."""
# Poll at a rate of four times per second
poll_per_s = 4
for _ in range(poll_per_s * self.rpc_timeout):
if self.process.poll() is not None:
raise FailedToStartError(self._node_msg(
'merelcoind exited with status {} during initialization'.format(self.process.returncode)))
try:
self.rpc = get_rpc_proxy(rpc_url(self.datadir, self.index, self.rpchost), self.index, timeout=self.rpc_timeout, coveragedir=self.coverage_dir)
self.rpc.getblockcount()
# If the call to getblockcount() succeeds then the RPC connection is up
self.rpc_connected = True
self.url = self.rpc.url
self.log.debug("RPC successfully started")
return
except IOError as e:
if e.errno != errno.ECONNREFUSED: # Port not yet open?
raise # unknown IO error
except JSONRPCException as e: # Initialization phase
if e.error['code'] != -28: # RPC in warmup?
raise # unknown JSON RPC exception
except ValueError as e: # cookie file not found and no rpcuser or rpcassword. bitcoind still starting
if "No RPC credentials" not in str(e):
raise
time.sleep(1.0 / poll_per_s)
self._raise_assertion_error("Unable to connect to merelcoind")
def get_wallet_rpc(self, wallet_name):
if self.use_cli:
return self.cli("-rpcwallet={}".format(wallet_name))
else:
assert self.rpc_connected and self.rpc, self._node_msg("RPC not connected")
wallet_path = "wallet/{}".format(urllib.parse.quote(wallet_name))
return self.rpc / wallet_path
def stop_node(self, expected_stderr=''):
"""Stop the node."""
if not self.running:
return
self.log.debug("Stopping node")
try:
self.stop()
except http.client.CannotSendRequest:
self.log.exception("Unable to stop node.")
# Check that stderr is as expected
self.stderr.seek(0)
stderr = self.stderr.read().decode('utf-8').strip()
if stderr != expected_stderr:
raise AssertionError("Unexpected stderr {} != {}".format(stderr, expected_stderr))
self.stdout.close()
self.stderr.close()
del self.p2ps[:]
def is_node_stopped(self):
"""Checks whether the node has stopped.
Returns True if the node has stopped. False otherwise.
This method is responsible for freeing resources (self.process)."""
if not self.running:
return True
return_code = self.process.poll()
if return_code is None:
return False
# process has stopped. Assert that it didn't return an error code.
assert return_code == 0, self._node_msg(
"Node returned non-zero exit code (%d) when stopping" % return_code)
self.running = False
self.process = None
self.rpc_connected = False
self.rpc = None
self.log.debug("Node stopped")
return True
def wait_until_stopped(self, timeout=BITCOIND_PROC_WAIT_TIMEOUT):
wait_until(self.is_node_stopped, timeout=timeout)
@contextlib.contextmanager
def assert_debug_log(self, expected_msgs):
debug_log = os.path.join(self.datadir, 'regtest', 'debug.log')
with open(debug_log, encoding='utf-8') as dl:
dl.seek(0, 2)
prev_size = dl.tell()
try:
yield
finally:
with open(debug_log, encoding='utf-8') as dl:
dl.seek(prev_size)
log = dl.read()
print_log = " - " + "\n - ".join(log.splitlines())
for expected_msg in expected_msgs:
if re.search(re.escape(expected_msg), log, flags=re.MULTILINE) is None:
self._raise_assertion_error('Expected message "{}" does not partially match log:\n\n{}\n\n'.format(expected_msg, print_log))
def assert_start_raises_init_error(self, extra_args=None, expected_msg=None, match=ErrorMatch.FULL_TEXT, *args, **kwargs):
"""Attempt to start the node and expect it to raise an error.
extra_args: extra arguments to pass through to merelcoind
expected_msg: regex that stderr should match when merelcoind fails
Will throw if merelcoind starts without an error.
Will throw if an expected_msg is provided and it does not match merelcoind's stdout."""
with tempfile.NamedTemporaryFile(dir=self.stderr_dir, delete=False) as log_stderr, \
tempfile.NamedTemporaryFile(dir=self.stdout_dir, delete=False) as log_stdout:
try:
self.start(extra_args, stdout=log_stdout, stderr=log_stderr, *args, **kwargs)
self.wait_for_rpc_connection()
self.stop_node()
self.wait_until_stopped()
except FailedToStartError as e:
self.log.debug('merelcoind failed to start: %s', e)
self.running = False
self.process = None
# Check stderr for expected message
if expected_msg is not None:
log_stderr.seek(0)
stderr = log_stderr.read().decode('utf-8').strip()
if match == ErrorMatch.PARTIAL_REGEX:
if re.search(expected_msg, stderr, flags=re.MULTILINE) is None:
self._raise_assertion_error(
'Expected message "{}" does not partially match stderr:\n"{}"'.format(expected_msg, stderr))
elif match == ErrorMatch.FULL_REGEX:
if re.fullmatch(expected_msg, stderr) is None:
self._raise_assertion_error(
'Expected message "{}" does not fully match stderr:\n"{}"'.format(expected_msg, stderr))
elif match == ErrorMatch.FULL_TEXT:
if expected_msg != stderr:
self._raise_assertion_error(
'Expected message "{}" does not fully match stderr:\n"{}"'.format(expected_msg, stderr))
else:
if expected_msg is None:
assert_msg = "merelcoind should have exited with an error"
else:
assert_msg = "merelcoind should have exited with expected error " + expected_msg
self._raise_assertion_error(assert_msg)
def node_encrypt_wallet(self, passphrase):
""""Encrypts the wallet.
This causes merelcoind to shutdown, so this method takes
care of cleaning up resources."""
self.encryptwallet(passphrase)
self.wait_until_stopped()
def add_p2p_connection(self, p2p_conn, *, wait_for_verack=True, **kwargs):
"""Add a p2p connection to the node.
This method adds the p2p connection to the self.p2ps list and also
returns the connection to the caller."""
if 'dstport' not in kwargs:
kwargs['dstport'] = p2p_port(self.index)
if 'dstaddr' not in kwargs:
kwargs['dstaddr'] = '127.0.0.1'
p2p_conn.peer_connect(**kwargs)()
self.p2ps.append(p2p_conn)
if wait_for_verack:
p2p_conn.wait_for_verack()
return p2p_conn
@property
def p2p(self):
"""Return the first p2p connection
Convenience property - most tests only use a single p2p connection to each
node, so this saves having to write node.p2ps[0] many times."""
assert self.p2ps, self._node_msg("No p2p connection")
return self.p2ps[0]
def disconnect_p2ps(self):
"""Close all p2p connections to the node."""
for p in self.p2ps:
p.peer_disconnect()
del self.p2ps[:]
class TestNodeCLIAttr:
def __init__(self, cli, command):
self.cli = cli
self.command = command
def __call__(self, *args, **kwargs):
return self.cli.send_cli(self.command, *args, **kwargs)
def get_request(self, *args, **kwargs):
return lambda: self(*args, **kwargs)
class TestNodeCLI():
"""Interface to merelcoin-cli for an individual node"""
def __init__(self, binary, datadir):
self.options = []
self.binary = binary
self.datadir = datadir
self.input = None
self.log = logging.getLogger('TestFramework.bitcoincli')
def __call__(self, *options, input=None):
# TestNodeCLI is callable with bitcoin-cli command-line options
cli = TestNodeCLI(self.binary, self.datadir)
cli.options = [str(o) for o in options]
cli.input = input
return cli
def __getattr__(self, command):
return TestNodeCLIAttr(self, command)
def batch(self, requests):
results = []
for request in requests:
try:
results.append(dict(result=request()))
except JSONRPCException as e:
results.append(dict(error=e))
return results
def send_cli(self, command=None, *args, **kwargs):
"""Run merelcoin-cli command. Deserializes returned string as python object."""
pos_args = [str(arg).lower() if type(arg) is bool else str(arg) for arg in args]
named_args = [str(key) + "=" + str(value) for (key, value) in kwargs.items()]
assert not (pos_args and named_args), "Cannot use positional arguments and named arguments in the same merelcoin-cli call"
p_args = [self.binary, "-datadir=" + self.datadir] + self.options
if named_args:
p_args += ["-named"]
if command is not None:
p_args += [command]
p_args += pos_args + named_args
self.log.debug("Running merelcoin-cli command: %s" % command)
process = subprocess.Popen(p_args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
cli_stdout, cli_stderr = process.communicate(input=self.input)
returncode = process.poll()
if returncode:
match = re.match(r'error code: ([-0-9]+)\nerror message:\n(.*)', cli_stderr)
if match:
code, message = match.groups()
raise JSONRPCException(dict(code=int(code), message=message))
# Ignore cli_stdout, raise with cli_stderr
raise subprocess.CalledProcessError(returncode, self.binary, output=cli_stderr)
try:
return json.loads(cli_stdout, parse_float=decimal.Decimal)
except JSONDecodeError:
return cli_stdout.rstrip("\n")
|
the-stack_0_2129 | # Desenvolva um programa que leia o nome, idade, e sexo de 4 pessoas.
# No final do progrma, mostre:
#
# - A média de idade do grupo
# - O nome do homem mais velho
# - Quantas mulheres tem menos de 20 anos
nome_velho = ''
idade_maior = 0
soma = 0
cont_media = 0
cont_feminino = 0
for c in range(1, 5):
nome = str(input('Qual seu nome: ')).strip().upper()
idade = int(input('Qual a sua idade: '))
sexo = str(input('Qual o seu sexo '
'\n(F) para feminino'
'\n(M) para masculino: ')).strip().upper()
soma += idade
cont_media += 1
if idade > idade_maior and sexo == 'M':
idade_maior = idade
nome_velho = nome
if sexo == 'F' and idade < 20:
cont_feminino += 1
media = soma/cont_media
print(f'A média da idade do grupo é de {media:.2f} anos.'
f'\nO homem mais velho dor grupo é do {nome_velho} que tem {idade_maior} anos.'
f'\n{cont_feminino} mulheres tem menos de 20 anos')
|
the-stack_0_2131 | def data_bars(df, column):
n_bins = 100
bounds = [i * (1.0 / n_bins) for i in range(n_bins + 1)]
ranges = [
((df[column].max() - df[column].min()) * i) + df[column].min()
for i in bounds
]
styles = []
for i in range(1, len(bounds)):
min_bound = ranges[i - 1]
max_bound = ranges[i]
max_bound_percentage = bounds[i] * 100
styles.append({
'if': {
'filter_query': (
'{{{column}}} >= {min_bound}' +
(' && {{{column}}} < {max_bound}' if (i < len(bounds) - 1) else '')
).format(column=column, min_bound=min_bound, max_bound=max_bound),
'column_id': column
},
'background': (
"""
linear-gradient(90deg,
#0074D9 0%,
#0074D9 {max_bound_percentage}%,
white {max_bound_percentage}%,
white 100%)
""".format(max_bound_percentage=max_bound_percentage)
),
'paddingBottom': 2,
'paddingTop': 2
})
return styles
def data_bars_diverging(df, column, color_above='#3D9970', color_below='#FF4136'):
n_bins = 100
bounds = [i * (1.0 / n_bins) for i in range(n_bins + 1)]
col_max = df[column].max()
col_min = df[column].min()
ranges = [
((col_max - col_min) * i) + col_min
for i in bounds
]
midpoint = (col_max + col_min) / 2.
styles = []
for i in range(1, len(bounds)):
min_bound = ranges[i - 1]
max_bound = ranges[i]
min_bound_percentage = bounds[i - 1] * 100
max_bound_percentage = bounds[i] * 100
style = {
'if': {
'filter_query': (
'{{{column}}} >= {min_bound}' +
(' && {{{column}}} < {max_bound}' if (i < len(bounds) - 1) else '')
).format(column=column, min_bound=min_bound, max_bound=max_bound),
'column_id': column
},
'paddingBottom': 2,
'paddingTop': 2
}
if max_bound > midpoint:
background = (
"""
linear-gradient(90deg,
white 0%,
white 50%,
{color_above} 50%,
{color_above} {max_bound_percentage}%,
white {max_bound_percentage}%,
white 100%)
""".format(
max_bound_percentage=max_bound_percentage,
color_above=color_above
)
)
else:
background = (
"""
linear-gradient(90deg,
white 0%,
white {min_bound_percentage}%,
{color_below} {min_bound_percentage}%,
{color_below} 50%,
white 50%,
white 100%)
""".format(
min_bound_percentage=min_bound_percentage,
color_below=color_below
)
)
style['background'] = background
styles.append(style)
return styles
|
the-stack_0_2132 | from Color_Console import *
import platform
import re
from logic import *
def clear_console():
if is_linux:
os.system('clear')
else:
os.system('cls')
def get_move():
move = input().upper()
while not re.match("(1|2|3)-(A|B|C)", move):
ctext("Please observe format", "red")
move = input().upper()
return move
if __name__ == "__main__":
game = Game()
is_linux = platform.system()
while not game.have_winner() and not game.have_draw():
clear_console()
game.print_grid()
print("")
print(f"Now {game.paint_symbol(game.current_symbol)} move")
ctext("*Move must be printed in format DIGIT-SYMBOL where DIGIT is column, SYMBOL is row as on the above grid", "yellow")
move = get_move()
while not game.put(move):
ctext("Wrong move", "red")
move = get_move()
else:
clear_console()
game.print_grid()
print("")
if game.have_winner():
ctext(f"Winner is {game.get_winner()}", "green")
else:
ctext("Draw", "yellow")
|
the-stack_0_2134 | #!/usr/bin/python
# $Id:$
import ctypes
import pyglet
lib = ctypes.windll.wintab32
LONG = ctypes.c_long
BOOL = ctypes.c_int
UINT = ctypes.c_uint
WORD = ctypes.c_uint16
DWORD = ctypes.c_uint32
WCHAR = ctypes.c_wchar
FIX32 = DWORD
WTPKT = DWORD
LCNAMELEN = 40
class AXIS(ctypes.Structure):
_fields_ = (
('axMin', LONG),
('axMax', LONG),
('axUnits', UINT),
('axResolution', FIX32)
)
def get_scale(self):
return 1 / float(self.axMax - self.axMin)
def get_bias(self):
return -self.axMin
class ORIENTATION(ctypes.Structure):
_fields_ = (
('orAzimuth', ctypes.c_int),
('orAltitude', ctypes.c_int),
('orTwist', ctypes.c_int)
)
class ROTATION(ctypes.Structure):
_fields_ = (
('roPitch', ctypes.c_int),
('roRoll', ctypes.c_int),
('roYaw', ctypes.c_int),
)
class LOGCONTEXT(ctypes.Structure):
_fields_ = (
('lcName', WCHAR * LCNAMELEN),
('lcOptions', UINT),
('lcStatus', UINT),
('lcLocks', UINT),
('lcMsgBase', UINT),
('lcDevice', UINT),
('lcPktRate', UINT),
('lcPktData', WTPKT),
('lcPktMode', WTPKT),
('lcMoveMask', WTPKT),
('lcBtnDnMask', DWORD),
('lcBtnUpMask', DWORD),
('lcInOrgX', LONG),
('lcInOrgY', LONG),
('lcInOrgZ', LONG),
('lcInExtX', LONG),
('lcInExtY', LONG),
('lcInExtZ', LONG),
('lcOutOrgX', LONG),
('lcOutOrgY', LONG),
('lcOutOrgZ', LONG),
('lcOutExtX', LONG),
('lcOutExtY', LONG),
('lcOutExtZ', LONG),
('lcSensX', FIX32),
('lcSensY', FIX32),
('lcSensZ', FIX32),
('lcSysMode', BOOL),
('lcSysOrgX', ctypes.c_int),
('lcSysOrgY', ctypes.c_int),
('lcSysExtX', ctypes.c_int),
('lcSysExtY', ctypes.c_int),
('lcSysSensX', FIX32),
('lcSysSensY', FIX32),
)
# Custom packet format with fields
# PK_CHANGED
# PK_CURSOR
# PK_BUTTONS
# PK_X
# PK_Y
# PK_Z
# PK_NORMAL_PRESSURE
# PK_TANGENT_PRESSURE
# PK_ORIENTATION (check for tilt extension instead)?
class PACKET(ctypes.Structure):
_fields_ = (
('pkChanged', WTPKT),
('pkCursor', UINT),
('pkButtons', DWORD),
('pkX', LONG),
('pkY', LONG),
('pkZ', LONG),
('pkNormalPressure', UINT),
('pkTangentPressure', UINT),
('pkOrientation', ORIENTATION),
)
PK_CONTEXT = 0x0001 # reporting context
PK_STATUS = 0x0002 # status bits
PK_TIME = 0x0004 # time stamp
PK_CHANGED = 0x0008 # change bit vector
PK_SERIAL_NUMBER = 0x0010 # packet serial number
PK_CURSOR = 0x0020 # reporting cursor
PK_BUTTONS = 0x0040 # button information
PK_X = 0x0080 # x axis
PK_Y = 0x0100 # y axis
PK_Z = 0x0200 # z axis
PK_NORMAL_PRESSURE = 0x0400 # normal or tip pressure
PK_TANGENT_PRESSURE = 0x0800 # tangential or barrel pressure
PK_ORIENTATION = 0x1000 # orientation info: tilts
PK_ROTATION = 0x2000 # rotation info; 1.1
TU_NONE = 0
TU_INCHES = 1
TU_CENTIMETERS = 2
TU_CIRCLE = 3
# messages
WT_DEFBASE = 0x7ff0
WT_MAXOFFSET = 0xf
WT_PACKET = 0 # remember to add base
WT_CTXOPEN = 1
WT_CTXCLOSE = 2
WT_CTXUPDATE = 3
WT_CTXOVERLAP = 4
WT_PROXIMITY = 5
WT_INFOCHANGE = 6
WT_CSRCHANGE = 7
# system button assignment values
SBN_NONE = 0x00
SBN_LCLICK = 0x01
SBN_LDBLCLICK = 0x02
SBN_LDRAG = 0x03
SBN_RCLICK = 0x04
SBN_RDBLCLICK = 0x05
SBN_RDRAG = 0x06
SBN_MCLICK = 0x07
SBN_MDBLCLICK = 0x08
SBN_MDRAG = 0x09
# for Pen Windows
SBN_PTCLICK = 0x10
SBN_PTDBLCLICK = 0x20
SBN_PTDRAG = 0x30
SBN_PNCLICK = 0x40
SBN_PNDBLCLICK = 0x50
SBN_PNDRAG = 0x60
SBN_P1CLICK = 0x70
SBN_P1DBLCLICK = 0x80
SBN_P1DRAG = 0x90
SBN_P2CLICK = 0xA0
SBN_P2DBLCLICK = 0xB0
SBN_P2DRAG = 0xC0
SBN_P3CLICK = 0xD0
SBN_P3DBLCLICK = 0xE0
SBN_P3DRAG = 0xF0
HWC_INTEGRATED = 0x0001
HWC_TOUCH = 0x0002
HWC_HARDPROX = 0x0004
HWC_PHYSID_CURSORS = 0x0008 # 1.1
CRC_MULTIMODE = 0x0001 # 1.1
CRC_AGGREGATE = 0x0002 # 1.1
CRC_INVERT = 0x0004 # 1.1
WTI_INTERFACE = 1
IFC_WINTABID = 1
IFC_SPECVERSION = 2
IFC_IMPLVERSION = 3
IFC_NDEVICES = 4
IFC_NCURSORS = 5
IFC_NCONTEXTS = 6
IFC_CTXOPTIONS = 7
IFC_CTXSAVESIZE = 8
IFC_NEXTENSIONS = 9
IFC_NMANAGERS = 10
IFC_MAX = 10
WTI_STATUS = 2
STA_CONTEXTS = 1
STA_SYSCTXS = 2
STA_PKTRATE = 3
STA_PKTDATA = 4
STA_MANAGERS = 5
STA_SYSTEM = 6
STA_BUTTONUSE = 7
STA_SYSBTNUSE = 8
STA_MAX = 8
WTI_DEFCONTEXT = 3
WTI_DEFSYSCTX = 4
WTI_DDCTXS = 400 # 1.1
WTI_DSCTXS = 500 # 1.1
CTX_NAME = 1
CTX_OPTIONS = 2
CTX_STATUS = 3
CTX_LOCKS = 4
CTX_MSGBASE = 5
CTX_DEVICE = 6
CTX_PKTRATE = 7
CTX_PKTDATA = 8
CTX_PKTMODE = 9
CTX_MOVEMASK = 10
CTX_BTNDNMASK = 11
CTX_BTNUPMASK = 12
CTX_INORGX = 13
CTX_INORGY = 14
CTX_INORGZ = 15
CTX_INEXTX = 16
CTX_INEXTY = 17
CTX_INEXTZ = 18
CTX_OUTORGX = 19
CTX_OUTORGY = 20
CTX_OUTORGZ = 21
CTX_OUTEXTX = 22
CTX_OUTEXTY = 23
CTX_OUTEXTZ = 24
CTX_SENSX = 25
CTX_SENSY = 26
CTX_SENSZ = 27
CTX_SYSMODE = 28
CTX_SYSORGX = 29
CTX_SYSORGY = 30
CTX_SYSEXTX = 31
CTX_SYSEXTY = 32
CTX_SYSSENSX = 33
CTX_SYSSENSY = 34
CTX_MAX = 34
WTI_DEVICES = 100
DVC_NAME = 1
DVC_HARDWARE = 2
DVC_NCSRTYPES = 3
DVC_FIRSTCSR = 4
DVC_PKTRATE = 5
DVC_PKTDATA = 6
DVC_PKTMODE = 7
DVC_CSRDATA = 8
DVC_XMARGIN = 9
DVC_YMARGIN = 10
DVC_ZMARGIN = 11
DVC_X = 12
DVC_Y = 13
DVC_Z = 14
DVC_NPRESSURE = 15
DVC_TPRESSURE = 16
DVC_ORIENTATION = 17
DVC_ROTATION = 18 # 1.1
DVC_PNPID = 19 # 1.1
DVC_MAX = 19
WTI_CURSORS = 200
CSR_NAME = 1
CSR_ACTIVE = 2
CSR_PKTDATA = 3
CSR_BUTTONS = 4
CSR_BUTTONBITS = 5
CSR_BTNNAMES = 6
CSR_BUTTONMAP = 7
CSR_SYSBTNMAP = 8
CSR_NPBUTTON = 9
CSR_NPBTNMARKS = 10
CSR_NPRESPONSE = 11
CSR_TPBUTTON = 12
CSR_TPBTNMARKS = 13
CSR_TPRESPONSE = 14
CSR_PHYSID = 15 # 1.1
CSR_MODE = 16 # 1.1
CSR_MINPKTDATA = 17 # 1.1
CSR_MINBUTTONS = 18 # 1.1
CSR_CAPABILITIES = 19 # 1.1
CSR_TYPE = 20 # 1.2
CSR_MAX = 20
WTI_EXTENSIONS = 300
EXT_NAME = 1
EXT_TAG = 2
EXT_MASK = 3
EXT_SIZE = 4
EXT_AXES = 5
EXT_DEFAULT = 6
EXT_DEFCONTEXT = 7
EXT_DEFSYSCTX = 8
EXT_CURSORS = 9
EXT_MAX = 109 # Allow 100 cursors
CXO_SYSTEM = 0x0001
CXO_PEN = 0x0002
CXO_MESSAGES = 0x0004
CXO_MARGIN = 0x8000
CXO_MGNINSIDE = 0x4000
CXO_CSRMESSAGES = 0x0008 # 1.1
# context status values
CXS_DISABLED = 0x0001
CXS_OBSCURED = 0x0002
CXS_ONTOP = 0x0004
# context lock values
CXL_INSIZE = 0x0001
CXL_INASPECT = 0x0002
CXL_SENSITIVITY = 0x0004
CXL_MARGIN = 0x0008
CXL_SYSOUT = 0x0010
# packet status values
TPS_PROXIMITY = 0x0001
TPS_QUEUE_ERR = 0x0002
TPS_MARGIN = 0x0004
TPS_GRAB = 0x0008
TPS_INVERT = 0x0010 # 1.1
TBN_NONE = 0
TBN_UP = 1
TBN_DOWN = 2
PKEXT_ABSOLUTE = 1
PKEXT_RELATIVE = 2
# Extension tags.
WTX_OBT = 0 # Out of bounds tracking
WTX_FKEYS = 1 # Function keys
WTX_TILT = 2 # Raw Cartesian tilt; 1.1
WTX_CSRMASK = 3 # select input by cursor type; 1.1
WTX_XBTNMASK = 4 # Extended button mask; 1.1
WTX_EXPKEYS = 5 # ExpressKeys; 1.3
def wtinfo(category, index, buffer):
size = lib.WTInfoW(category, index, None)
assert size <= ctypes.sizeof(buffer)
lib.WTInfoW(category, index, ctypes.byref(buffer))
return buffer
def wtinfo_string(category, index):
size = lib.WTInfoW(category, index, None)
buffer = ctypes.create_unicode_buffer(size)
lib.WTInfoW(category, index, buffer)
return buffer.value
def wtinfo_uint(category, index):
buffer = UINT()
lib.WTInfoW(category, index, ctypes.byref(buffer))
return buffer.value
def wtinfo_word(category, index):
buffer = WORD()
lib.WTInfoW(category, index, ctypes.byref(buffer))
return buffer.value
def wtinfo_dword(category, index):
buffer = DWORD()
lib.WTInfoW(category, index, ctypes.byref(buffer))
return buffer.value
def wtinfo_wtpkt(category, index):
buffer = WTPKT()
lib.WTInfoW(category, index, ctypes.byref(buffer))
return buffer.value
def wtinfo_bool(category, index):
buffer = BOOL()
lib.WTInfoW(category, index, ctypes.byref(buffer))
return bool(buffer.value)
class Device:
def __init__(self, index):
self._device = WTI_DEVICES + index
self.name = wtinfo_string(self._device, DVC_NAME).strip()
self.id = wtinfo_string(self._device, DVC_PNPID)
hardware = wtinfo_uint(self._device, DVC_HARDWARE)
phys_cursors = hardware & HWC_PHYSID_CURSORS
n_cursors = wtinfo_uint(self._device, DVC_NCSRTYPES)
first_cursor = wtinfo_uint(self._device, DVC_FIRSTCSR)
self.pressure_axis = wtinfo(self._device, DVC_NPRESSURE, AXIS())
self.cursors = list()
self._cursor_map = dict()
for i in range(n_cursors):
cursor = WintabCursor(self, i + first_cursor)
if not cursor.bogus:
self.cursors.append(cursor)
self._cursor_map[i + first_cursor] = cursor
def open(self, window):
return DeviceInstance(self, window)
class DeviceInstance(pyglet.event.EventDispatcher):
def __init__(self, device, window, msg_base=WT_DEFBASE):
# Just use system context, for similarity w/ os x and xinput.
# WTI_DEFCONTEXT detaches mouse from tablet, which is nice, but not
# possible on os x afiak.
self.device = device
self.window = window
self.context_info = context_info = LOGCONTEXT()
wtinfo(WTI_DEFSYSCTX, 0, context_info)
context_info.lcMsgBase = msg_base
context_info.lcOptions |= CXO_MESSAGES
# If you change this, change definition of PACKET also.
context_info.lcPktData = (
PK_CHANGED | PK_CURSOR | PK_BUTTONS | PK_X | PK_Y | PK_Z |
PK_NORMAL_PRESSURE | PK_TANGENT_PRESSURE | PK_ORIENTATION)
context_info.lcPktMode = 0 # All absolute
self._context = lib.WTOpenW(window._hwnd,
ctypes.byref(context_info), True)
if not self._context:
raise Exception("Couldn't open context")
window._event_handlers[msg_base + WT_PACKET] = self._event_wt_packet
window._event_handlers[msg_base + WT_PROXIMITY] = \
self._event_wt_proximity
self._current_cursor = None
self._pressure_scale = device.pressure_axis.get_scale()
self._pressure_bias = device.pressure_axis.get_bias()
def close(self):
lib.WTClose(self._context)
self._context = None
def _set_current_cursor(self, cursor_type):
if self._current_cursor:
self.dispatch_event('on_cursor_leave', self._current_cursor)
self._current_cursor = self.device._cursor_map.get(cursor_type, None)
if self._current_cursor:
self.dispatch_event('on_cursor_enter', self._current_cursor)
@pyglet.window.win32.Win32EventHandler(0)
def _event_wt_packet(self, msg, wParam, lParam):
if lParam != self._context:
return
packet = PACKET()
if lib.WTPacket(self._context, wParam, ctypes.byref(packet)) == 0:
return
if not packet.pkChanged:
return
window_x, window_y = self.window.get_location() # TODO cache on window
window_y = self.window.screen.height - window_y - self.window.height
x = packet.pkX - window_x
y = packet.pkY - window_y
pressure = (packet.pkNormalPressure + self._pressure_bias) * \
self._pressure_scale
if self._current_cursor is None:
self._set_current_cursor(packet.pkCursor)
self.dispatch_event('on_motion', self._current_cursor,
x, y, pressure)
@pyglet.window.win32.Win32EventHandler(0)
def _event_wt_proximity(self, msg, wParam, lParam):
if wParam != self._context:
return
if not lParam & 0xffff0000:
# Not a hardware proximity event
return
if not lParam & 0xffff:
# Going out
self.dispatch_event('on_cursor_leave', self._current_cursor)
# If going in, proximity event will be generated by next event, which
# can actually grab a cursor id.
self._current_cursor = None
DeviceInstance.register_event_type('on_cursor_enter')
DeviceInstance.register_event_type('on_cursor_leave')
DeviceInstance.register_event_type('on_motion')
class WintabCursor:
def __init__(self, device, index):
self.device = device
self._cursor = WTI_CURSORS + index
self.name = wtinfo_string(self._cursor, CSR_NAME).strip()
self.active = wtinfo_bool(self._cursor, CSR_ACTIVE)
pktdata = wtinfo_wtpkt(self._cursor, CSR_PKTDATA)
# A whole bunch of cursors are reported by the driver, but most of
# them are hogwash. Make sure a cursor has at least X and Y data
# before adding it to the device.
self.bogus = not (pktdata & PK_X and pktdata & PK_Y)
if self.bogus:
return
self.id = (wtinfo_dword(self._cursor, CSR_TYPE) << 32) | \
wtinfo_dword(self._cursor, CSR_PHYSID)
def __repr__(self):
return 'WintabCursor(%r)' % self.name
def check_version():
interface_name = wtinfo_string(WTI_INTERFACE, IFC_WINTABID)
spec_version = wtinfo_word(WTI_INTERFACE, IFC_SPECVERSION)
impl_version = wtinfo_word(WTI_INTERFACE, IFC_IMPLVERSION)
print('%s %d.%d (Spec %d.%d)' % (interface_name,
impl_version >> 8, impl_version & 0xff,
spec_version >> 8, spec_version & 0xff))
if spec_version < 0x101:
raise ImportError('Require WinTab specification 1.1 or later')
def get_devices():
n_devices = wtinfo_uint(WTI_INTERFACE, IFC_NDEVICES)
devices = [Device(i) for i in range(n_devices)]
return devices
|
the-stack_0_2135 | import urllib2
import logging
import stormberry.plugin
from urllib import urlencode
class WundergroundUploader(stormberry.plugin.IRepositoryPlugin):
def store_reading(self, data):
"""Internal. Continuously uploads new sensors values to Weather Underground."""
print('Uploading data to Weather Underground')
# Build a weather data object http://wiki.wunderground.com/index.php/PWS_-_Upload_Protocol
weather_data = {
'action': 'updateraw',
'ID': self.config['WUNDERGROUND']['STATION_ID'],
'PASSWORD': self.config['WUNDERGROUND']['STATION_KEY'],
'dateutc': 'now',
'tempf': data.tempf,
'humidity': data.humidity,
'baromin': data.pressure_inHg,
'dewptf': data.dewpointf
}
try:
upload_url = self.config['WUNDERGROUND']['WU_URL'] + '?' + urlencode(weather_data)
response = urllib2.urlopen(upload_url)
html = response.read()
print('Server response: ', html)
# Close response object
response.close()
return True
except:
print('Could not upload to Weather Underground')
logging.warning('Could not upload to Weather Underground', exc_info=True)
return False
|
the-stack_0_2136 | # Copyright 2021 Injective Labs
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Injective Chain Tx/Query client for Python. Example only."""
import asyncio
import logging
from pyinjective.composer import Composer as ProtoMsgComposer
from pyinjective.client import Client
from pyinjective.transaction import Transaction
from pyinjective.constant import Network
from pyinjective.wallet import PrivateKey, PublicKey, Address
async def main() -> None:
# select network: local, testnet, mainnet
network = Network.testnet()
composer = ProtoMsgComposer(network=network.string())
# initialize grpc client
client = Client(network, insecure=True)
# load account
priv_key = PrivateKey.from_hex("5d386fbdbf11f1141010f81a46b40f94887367562bd33b452bbaa6ce1cd1381e")
pub_key = priv_key.to_public_key()
address = pub_key.to_address().init_num_seq(network.lcd_endpoint)
subaccount_id = address.get_subaccount_id(index=0)
# prepare trade info
market_id = "0xd0f46edfba58827fe692aab7c8d46395d1696239fdf6aeddfa668b73ca82ea30"
fee_recipient = "inj1hkhdaj2a2clmq5jq6mspsggqs32vynpk228q3r"
# prepare tx msg
msg = composer.MsgCreateDerivativeMarketOrder(
sender=address.to_acc_bech32(),
market_id=market_id,
subaccount_id=subaccount_id,
fee_recipient=fee_recipient,
price=60000,
quantity=0.01,
leverage=3,
is_buy=True
)
# build sim tx
tx = (
Transaction()
.with_messages(msg)
.with_sequence(address.get_sequence())
.with_account_num(address.get_number())
.with_chain_id(network.chain_id)
)
sim_sign_doc = tx.get_sign_doc(pub_key)
sim_sig = priv_key.sign(sim_sign_doc.SerializeToString())
sim_tx_raw_bytes = tx.get_tx_data(sim_sig, pub_key)
# simulate tx
(simRes, success) = client.simulate_tx(sim_tx_raw_bytes)
if not success:
print(simRes)
return
sim_res_msg = ProtoMsgComposer.MsgResponses(simRes.result.data, simulation=True)
print("simulation msg response")
print(sim_res_msg)
# build tx
gas_price = 500000000
gas_limit = simRes.gas_info.gas_used + 15000 # add 15k for gas, fee computation
fee = [composer.Coin(
amount=gas_price * gas_limit,
denom=network.fee_denom,
)]
current_height = client.get_latest_block().block.header.height
tx = tx.with_gas(gas_limit).with_fee(fee).with_memo("").with_timeout_height(current_height+50)
sign_doc = tx.get_sign_doc(pub_key)
sig = priv_key.sign(sign_doc.SerializeToString())
tx_raw_bytes = tx.get_tx_data(sig, pub_key)
# broadcast tx: send_tx_async_mode, send_tx_sync_mode, send_tx_block_mode
res = client.send_tx_block_mode(tx_raw_bytes)
res_msg = ProtoMsgComposer.MsgResponses(res.data)
print("tx response")
print(res)
print("tx msg response")
print(res_msg)
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
asyncio.get_event_loop().run_until_complete(main())
|
the-stack_0_2137 | # -*- coding: utf-8 -*-
"""
blog
~~~~~~~~~~~~~~
blog definition.
:copyright: (c) 2016 by fengweimin.
:date: 16/8/16
"""
from datetime import datetime
from bson.objectid import ObjectId
from werkzeug.utils import cached_property
from app.extensions import mdb
from app.models import User
from app.mongosupport import Model
@mdb.register
class Tag(Model):
__collection__ = 'tags'
structure = {
'name': unicode,
'weight': int,
'createTime': datetime,
}
required_fields = ['name', 'weight', 'createTime']
default_values = {'weight': 0, 'createTime': datetime.now}
indexes = [{'fields': ['name'], 'unique': True}]
@mdb.register
class Post(Model):
__collection__ = 'posts'
structure = {
'uid': ObjectId,
'pics': [unicode],
'title': unicode,
'body': unicode,
'tids': [ObjectId], # 相关标签
'createTime': datetime,
'viewTimes': int,
'comments': [{
'id': int,
'uid': ObjectId, # 发表评论人
'content': unicode,
'time': datetime,
'replys': [{
'uid': ObjectId, # 发表回复的人
'rid': ObjectId, # 接收回复的人
'content': unicode,
'time': datetime
}]
}]
}
required_fields = ['uid', 'title', 'body', 'tids', 'createTime']
default_values = {'createTime': datetime.now, 'viewTimes': 0}
indexes = [{'fields': 'tids'}, {'fields': 'createTime'}]
@cached_property
def author(self):
author = User.find_one({'_id': self.uid})
return author
@cached_property
def tags(self):
ids = list(self.tids)
tag_dict = {t._id: t for t in Tag.find({'_id': {'$in': ids}})}
return [tag_dict[id] for id in ids if id in tag_dict]
|
the-stack_0_2138 | """This module contains functionality for all the sampling methods supported in UQpy."""
import sys
import copy
import numpy as np
from scipy.spatial.distance import pdist
import scipy.stats as sp
import random
from UQpy.Distributions import *
import warnings
def init_sm(data):
################################################################################################################
# Add available sampling methods Here
valid_methods = ['mcs', 'lhs', 'mcmc', 'pss', 'sts', 'SuS']
################################################################################################################
# Check if requested method is available
if 'method' in data:
if data['method'] not in valid_methods:
raise NotImplementedError("method - %s not available" % data['method'])
else:
raise NotImplementedError("No sampling method was provided")
################################################################################################################
# Monte Carlo simulation block.
# Mandatory properties(4): 1. Number of parameters, 2. distribution, 3. distribution parameters 4. Number of samples
# Optional properties(0):
if data['method'] == 'mcs':
# Mandatory
if 'number of samples' not in data:
data['number of samples'] = None
if 'distribution type' not in data:
raise NotImplementedError("Distributions not defined. Exit code")
if 'distribution parameters' not in data:
raise NotImplementedError("Distribution parameters not provided. Exit code")
if 'number of parameters' not in data:
data['number of parameters'] = None
################################################################################################################
# Latin Hypercube simulation block.
# Mandatory properties(4): 1. Number of parameters, 2. distribution, 3. distribution parameters 4. Number of samples
# Optional properties(3): 1. Criterion, 2. Metric, 3. Iterations
if data['method'] == 'lhs':
# Mandatory
if 'number of parameters' not in data:
data['number of parameters'] = None
if 'number of samples' not in data:
data['number of samples'] = None
if 'distribution type' not in data:
raise NotImplementedError("Exit code: Distributions not defined.")
if 'distribution parameters' not in data:
raise NotImplementedError("Exit code: Distribution parameters not defined.")
# Optional
if 'criterion' not in data:
data['criterion'] = None
if 'distance' not in data:
data['distance'] = None
if 'iterations' not in data:
data['iterations'] = None
####################################################################################################################
# Markov Chain Monte Carlo simulation block.
# Mandatory properties(4): 1. target distribution, 2. target distribution parameters, 3. Number of samples,
# 4. Number of parameters
# Optional properties(5): 1. Proposal distribution, 2. proposal width, 3. Seed, 4. skip samples (avoid burn-in),
# 5. algorithm
if data['method'] == 'mcmc':
# Mandatory
if 'number of parameters' not in data:
raise NotImplementedError('Exit code: Number of parameters not defined.')
if 'target distribution type' not in data:
raise NotImplementedError("Exit code: Target distribution type not defined.")
if 'target distribution parameters' not in data:
raise NotImplementedError("Exit code: Target distribution parameters not defined.")
if 'number of samples' not in data:
raise NotImplementedError('Exit code: Number of samples not defined.')
# Optional
if 'seed' not in data:
data['seed'] = None
if 'skip' not in data:
data['skip'] = None
if 'proposal distribution type' not in data:
data['proposal distribution type'] = None
#else:
# if data['proposal distribution type'] not in ['Uniform', 'Normal']:
# raise ValueError('Exit code: Unrecognized type of proposal distribution type. Supported distributions: '
# 'Uniform, '
# 'Normal.')
if 'proposal distribution width' not in data:
data['proposal distribution width'] = None
if 'algorithm' not in data:
data['algorithm'] = None
################################################################################################################
# Partially stratified sampling block.
# Mandatory properties (4): 1. distribution, 2. distribution parameters, 3. design, 4. strata
# Optional properties(1): 1. Number of parameters
if data['method'] == 'pss':
# Mandatory
if 'distribution type' not in data:
raise NotImplementedError("Exit code: Distributions not defined.")
elif 'distribution parameters' not in data:
raise NotImplementedError("Exit code: distribution parameters not defined.")
if 'design' not in data:
raise NotImplementedError("Exit code: pss design not defined.")
if 'strata' not in data:
raise NotImplementedError("Exit code: pss strata not defined.")
# Optional
if 'number of parameters' not in data:
data['number of parameters'] = None
################################################################################################################
# Stratified sampling block.
# Mandatory properties(3): 1. distribution, 2. distribution parameters, 3. design
# Optional properties(1): 1. Number of parameters
if data['method'] == 'sts':
# Mandatory
if 'distribution type' not in data:
raise NotImplementedError("Exit code: Distributions not defined.")
elif 'distribution parameters' not in data:
raise NotImplementedError("Exit code: distribution parameters not defined.")
if 'design' not in data:
raise NotImplementedError("Exit code: sts design not defined.")
# Optional
if 'number of parameters' not in data:
data['number of parameters'] = None
####################################################################################################################
# Stochastic reduced order model block
# Mandatory properties(2): 1. moments, 2. error function weights
# Optional properties(2): 1.properties to match, 2. sample weights
# if 'SROM' in data and data['SROM'] is True:
# # Mandatory
# if 'moments' not in data:
# raise NotImplementedError("Exit code: Moments not provided.")
# if 'error function weights' not in data:
# raise NotImplementedError("Exit code: Error function weights not provided.")
#
# # Optional
# if 'properties to match' not in data:
# data['properties to match'] = None
# if 'correlation' not in data:
# data['correlation'] = None
# if 'weights for distribution' not in data:
# data['weights for distribution'] = None
# if 'weights for moments' not in data:
# data['weights for moments'] = None
# if 'weights for correlation' not in data:
# data['weights for correlation'] = None
####################################################################################################################
# Check any NEW METHOD HERE
#
#
####################################################################################################################
# Check any NEW METHOD HERE
#
#
########################################################################################################################
########################################################################################################################
########################################################################################################################
def run_sm(data):
################################################################################################################
# Run Monte Carlo simulation
if data['method'] == 'mcs':
print("\nRunning %k \n", data['method'])
rvs = MCS(dimension=data['number of parameters'], pdf_type=data['distribution type'],
pdf_params=data['distribution parameters'],
nsamples=data['number of samples'])
################################################################################################################
# Run Latin Hypercube sampling
elif data['method'] == 'lhs':
print("\nRunning %k \n", data['method'])
rvs = LHS(dimension=data['number of parameters'], pdf_type=data['distribution type'],
pdf_params=data['distribution parameters'],
nsamples=data['number of samples'], lhs_metric=data['distance'],
lhs_iter=data['iterations'], lhs_criterion=data['criterion'])
################################################################################################################
# Run partially stratified sampling
elif data['method'] == 'pss':
print("\nRunning %k \n", data['method'])
rvs = PSS(dimension=data['number of parameters'], pdf_type=data['distribution type'],
pdf_params=data['distribution parameters'],
pss_design=data['design'], pss_strata=data['strata'])
################################################################################################################
# Run STS sampling
elif data['method'] == 'sts':
print("\nRunning %k \n", data['method'])
rvs = STS(dimension=data['number of parameters'], pdf_type=data['distribution type'],
pdf_params=data['distribution parameters'], sts_design=data['design'])
################################################################################################################
# Run Markov Chain Monte Carlo sampling
elif data['method'] == 'mcmc':
print("\nRunning %k \n", data['method'])
rvs = MCMC(dimension=data['number of parameters'], pdf_target_type=data['target distribution type'],
algorithm=data['algorithm'], pdf_proposal_type=data['proposal distribution type'],
pdf_proposal_width=data['proposal distribution width'],
pdf_target_params=data['target distribution parameters'], seed=data['seed'],
skip=data['skip'], nsamples=data['number of samples'])
################################################################################################################
# Run Stochastic Reduce Order Model
# if 'SROM' in data:
# if data['SROM'] == 'Yes':
# print("\nImplementing SROM to samples")
# rvs = SROM(samples=rvs.samples, pdf_type=data['distribution type'], moments=data['moments'],
# weights_errors=data['error function weights'],
# weights_distribution=data['weights for distribution'],
# weights_moments=data['weights for moments'],
# weights_correlation=data['weights for correlation'], properties=data['properties to match'],
# pdf_params=data['distribution parameters'], correlation=data['correlation'])
################################################################################################################
# Run ANY NEW METHOD HERE
return rvs
########################################################################################################################
########################################################################################################################
# Monte Carlo simulation
########################################################################################################################
class MCS:
"""
A class used to perform brute force Monte Carlo design of experiment (MCS).
SamplesU01 belong in hypercube [0, 1]^n while samples belong to the parameter space
:param dimension: Number of parameters
:type dimension: int
:param nsamples: Number of samples to be generated
:type nsamples: int
:param pdf_type: Type of distributions
:type pdf_type: list
:param pdf_params: Distribution parameters
:type pdf_params: list
"""
def __init__(self, dimension=None, pdf_type=None, pdf_params=None, nsamples=None):
self.dimension = dimension
self.nsamples = nsamples
self.pdf_type = pdf_type
self.pdf_params = pdf_params
self.init_mcs()
self.samplesU01, self.samples = self.run_mcs()
def run_mcs(self):
samples = np.random.rand(self.nsamples, self.dimension)
samples_u_to_x = inv_cdf(samples, self.pdf_type, self.pdf_params)
return samples, samples_u_to_x
################################################################################################################
# Initialize Monte Carlo simulation.
# Necessary parameters: 1. Probability distribution, 2. Probability distribution parameters 3. Number of samples
# Optional: dimension, names of random variables
def init_mcs(self):
if self.nsamples is None:
raise NotImplementedError("Exit code: Number of samples not defined.")
if self.pdf_type is None:
raise NotImplementedError("Exit code: Distributions not defined.")
else:
for i in self.pdf_type:
if i not in ['Uniform', 'Normal', 'Lognormal', 'Weibull', 'Beta', 'Exponential', 'Gamma']:
raise NotImplementedError("Exit code: Unrecognized type of distribution."
"Supported distributions: 'Uniform', 'Normal', 'Lognormal', "
"'Weibull', 'Beta', 'Exponential', 'Gamma'. ")
if self.pdf_params is None:
raise NotImplementedError("Exit code: Distribution parameters not defined.")
if self.dimension is None:
if len(self.pdf_type) != len(self.pdf_params):
raise NotImplementedError("Exit code: Incompatible dimensions.")
else:
self.dimension = len(self.pdf_type)
else:
import itertools
from itertools import chain
if len(self.pdf_type) == 1 and len(self.pdf_params) == self.dimension:
self.pdf_type = list(itertools.repeat(self.pdf_type, self.dimension))
self.pdf_type = list(chain.from_iterable(self.pdf_type))
elif len(self.pdf_params) == 1 and len(self.pdf_type) == self.dimension:
self.pdf_params = list(itertools.repeat(self.pdf_params, self.dimension))
self.pdf_params = list(chain.from_iterable(self.pdf_params))
elif len(self.pdf_params) == 1 and len(self.pdf_type) == 1:
self.pdf_params = list(itertools.repeat(self.pdf_params, self.dimension))
self.pdf_type = list(itertools.repeat(self.pdf_type, self.dimension))
self.pdf_type = list(chain.from_iterable(self.pdf_type))
self.pdf_params = list(chain.from_iterable(self.pdf_params))
elif len(self.pdf_type) != len(self.pdf_params):
raise NotImplementedError("Exit code: Incompatible dimensions")
########################################################################################################################
########################################################################################################################
# Latin hypercube sampling (LHS)
########################################################################################################################
class LHS:
"""
A class that creates a Latin Hypercube Design for experiments.
SamplesU01 belong in hypercube [0, 1]^n while samples belong to the parameter space
:param pdf_type: Distribution of the parameters
:type pdf_type: list
:param pdf_params: Distribution parameters
:type pdf_params: list
:param lhs_criterion: The criterion for generating sample points
Options:
1. random - completely random \n
2. centered - points only at the centre \n
3. maximin - maximising the minimum distance between points \n
4. correlate - minimizing the correlation between the points \n
:type lhs_criterion: str
:param lhs_iter: The number of iteration to run. Only for maximin, correlate and criterion
:type lhs_iter: int
:param lhs_metric: The distance metric to use. Supported metrics are
'braycurtis', 'canberra', 'chebyshev', 'cityblock', 'correlation', 'cosine', 'dice', \n
'euclidean', 'hamming', 'jaccard', 'kulsinski', 'mahalanobis', 'matching', 'minkowski', \n
'rogerstanimoto', 'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath', 'sqeuclidean', \n
'yule'.
:type lhs_metric: str
"""
def __init__(self, dimension=None, pdf_type=None, pdf_params=None, lhs_criterion=None, lhs_metric=None,
lhs_iter=None, nsamples=None):
self.dimension = dimension
self.nsamples = nsamples
self.pdf_type = pdf_type
self.pdf_params = pdf_params
self.lhs_criterion = lhs_criterion
self.lhs_metric = lhs_metric
self.lhs_iter = lhs_iter
self.init_lhs()
self.samplesU01, self.samples = self.run_lhs()
def run_lhs(self):
print('Running LHS for ' + str(self.lhs_iter) + ' iterations')
cut = np.linspace(0, 1, self.nsamples + 1)
a = cut[:self.nsamples]
b = cut[1:self.nsamples + 1]
if self.lhs_criterion == 'random':
samples = self._random(a, b)
samples_u_to_x = inv_cdf(samples, self.pdf_type, self.pdf_params)
return samples, samples_u_to_x
elif self.lhs_criterion == 'centered':
samples = self._centered(a, b)
samples_u_to_x = inv_cdf(samples, self.pdf_type, self.pdf_params)
return samples, samples_u_to_x
elif self.lhs_criterion == 'maximin':
samples = self._max_min(a, b)
samples_u_to_x = inv_cdf(samples, self.pdf_type, self.pdf_params)
return samples, samples_u_to_x
elif self.lhs_criterion == 'correlate':
samples = self._correlate(a, b)
samples_u_to_x = inv_cdf(samples, self.pdf_type, self.pdf_params)
return samples, samples_u_to_x
def _random(self, a, b):
"""
:return: The samples points for the random LHS design
"""
u = np.random.rand(self.nsamples, self.dimension)
samples = np.zeros_like(u)
for i in range(self.dimension):
samples[:, i] = u[:, i] * (b - a) + a
for j in range(self.dimension):
order = np.random.permutation(self.nsamples)
samples[:, j] = samples[order, j]
return samples
def _centered(self, a, b):
samples = np.zeros([self.nsamples, self.dimension])
centers = (a + b) / 2
for i in range(self.dimension):
samples[:, i] = np.random.permutation(centers)
return samples
def _max_min(self, a, b):
max_min_dist = 0
samples = self._random(a, b)
for _ in range(self.lhs_iter):
samples_try = self._random(a, b)
d = pdist(samples_try, metric=self.lhs_metric)
if max_min_dist < np.min(d):
max_min_dist = np.min(d)
samples = copy.deepcopy(samples_try)
print('Achieved max_min distance of ', max_min_dist)
return samples
def _correlate(self, a, b):
min_corr = np.inf
samples = self._random(a, b)
for _ in range(self.lhs_iter):
samples_try = self._random(a, b)
R = np.corrcoef(np.transpose(samples_try))
np.fill_diagonal(R, 1)
R1 = R[R != 1]
if np.max(np.abs(R1)) < min_corr:
min_corr = np.max(np.abs(R1))
samples = copy.deepcopy(samples_try)
print('Achieved minimum correlation of ', min_corr)
return samples
################################################################################################################
# Latin hypercube checks.
# Necessary parameters: 1. Probability distribution, 2. Probability distribution parameters
# Optional: number of samples (default 100), criterion, metric, iterations
def init_lhs(self):
if self.nsamples is None:
raise NotImplementedError("Exit code: Number of samples not defined.")
if self.pdf_type is None:
raise NotImplementedError("Exit code: Distributions not defined.")
else:
for i in self.pdf_type:
if i not in ['Uniform', 'Normal', 'Lognormal', 'Weibull', 'Beta', 'Exponential', 'Gamma']:
raise NotImplementedError("Exit code: Unrecognized type of distribution."
"Supported distributions: 'Uniform', 'Normal', 'Lognormal', 'Weibull', "
"'Beta', 'Exponential', 'Gamma'.")
if self.pdf_params is None:
raise NotImplementedError("Exit code: Distribution parameters not defined.")
if self.dimension is None:
if len(self.pdf_type) != len(self.pdf_params):
raise NotImplementedError("Exit code: Incompatible dimensions.")
else:
self.dimension = len(self.pdf_type)
else:
import itertools
from itertools import chain
if len(self.pdf_type) == 1 and len(self.pdf_params) == self.dimension:
self.pdf_type = list(itertools.repeat(self.pdf_type, self.dimension))
self.pdf_type = list(chain.from_iterable(self.pdf_type))
elif len(self.pdf_params) == 1 and len(self.pdf_type) == self.dimension:
self.pdf_params = list(itertools.repeat(self.pdf_params, self.dimension))
self.pdf_params = list(chain.from_iterable(self.pdf_params))
elif len(self.pdf_params) == 1 and len(self.pdf_type) == 1:
self.pdf_params = list(itertools.repeat(self.pdf_params, self.dimension))
self.pdf_type = list(itertools.repeat(self.pdf_type, self.dimension))
self.pdf_type = list(chain.from_iterable(self.pdf_type))
self.pdf_params = list(chain.from_iterable(self.pdf_params))
elif len(self.pdf_type) != len(self.pdf_params):
raise NotImplementedError("Exit code: Incompatible dimensions.")
if self.lhs_criterion is None:
self.lhs_criterion = 'random'
else:
if self.lhs_criterion not in ['random', 'centered', 'maximin', 'correlate']:
raise NotImplementedError("Exit code: Supported lhs criteria: 'random', 'centered', 'maximin', "
"'correlate'")
if self.lhs_metric is None:
self.lhs_metric = 'euclidean'
else:
if self.lhs_metric not in ['braycurtis', 'canberra', 'chebyshev', 'cityblock', 'correlation', 'cosine',
'dice', 'euclidean', 'hamming', 'jaccard', 'kulsinski', 'mahalanobis',
'matching', 'minkowski', 'rogerstanimoto', 'russellrao', 'seuclidean',
'sokalmichener', 'sokalsneath', 'sqeuclidean']:
raise NotImplementedError("Exit code: Supported lhs distances: 'braycurtis', 'canberra', 'chebyshev', "
"'cityblock',"
" 'correlation', 'cosine','dice', 'euclidean', 'hamming', 'jaccard', "
"'kulsinski', 'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',"
"'russellrao', 'seuclidean','sokalmichener', 'sokalsneath', 'sqeuclidean'")
if self.lhs_iter is None or self.lhs_iter == 0:
self.lhs_iter = 1000
elif self.lhs_iter is not None:
self.lhs_iter = int(self.lhs_iter)
########################################################################################################################
########################################################################################################################
# Partially Stratified Sampling (PSS)
########################################################################################################################
class PSS:
"""
This class generates a partially stratified sample set on U(0,1) as described in:
Shields, M.D. and Zhang, J. "The generalization of Latin hypercube sampling" Reliability Engineering and
System Safety. 148: 96-108
:param pss_design: Vector defining the subdomains to be used.
Example: 5D problem with 2x2D + 1x1D subdomains using pss_design = [2,2,1]. \n
Note: The sum of the values in the pss_design vector equals the dimension of the problem.
:param pss_strata: Vector defining how each dimension should be stratified.
Example: 5D problem with 2x2D + 1x1D subdomains with 625 samples using
pss_pss_stratum = [25,25,625].\n
Note: pss_pss_stratum(i)^pss_design(i) = number of samples (for all i)
:return: pss_samples: Generated samples Array (nSamples x nRVs)
:type pss_design: list
:type pss_strata: list
Created by: Jiaxin Zhang
Last modified: 24/01/2018 by D.G. Giovanis
"""
# TODO: Jiaxin - Add documentation to this subclass
# TODO: the pss_design = [[1,4], [2,5], [3]] - then reorder the sequence of RVs
# TODO: Add the sample check and pss_design check in the beginning
# TODO: Create a list that contains all element info - parent structure
def __init__(self, dimension=None, pdf_type=None, pdf_params=None, pss_design=None, pss_strata=None):
self.pdf_type = pdf_type
self.pdf_params = pdf_params
self.pss_design = pss_design
self.pss_strata = pss_strata
self.dimension = dimension
self.init_pss()
self.nsamples = self.pss_strata[0] ** self.pss_design[0]
self.samplesU01, self.samples = self.run_pss()
def run_pss(self):
samples = np.zeros((self.nsamples, self.dimension))
samples_u_to_x = np.zeros((self.nsamples, self.dimension))
col = 0
for i in range(len(self.pss_design)):
n_stratum = self.pss_strata[i] * np.ones(self.pss_design[i], dtype=np.int)
sts = STS(pdf_type=self.pdf_type, pdf_params=self.pdf_params, sts_design=n_stratum, pss_=True)
index = list(range(col, col + self.pss_design[i]))
samples[:, index] = sts.samplesU01
samples_u_to_x[:, index] = sts.samples
arr = np.arange(self.nsamples).reshape((self.nsamples, 1))
samples[:, index] = samples[np.random.permutation(arr), index]
samples_u_to_x[:, index] = samples_u_to_x[np.random.permutation(arr), index]
col = col + self.pss_design[i]
return samples, samples_u_to_x
################################################################################################################
# Partially Stratified sampling (PSS) checks.
# Necessary parameters: 1. pdf, 2. pdf parameters 3. pss design 4. pss strata
# Optional:
def init_pss(self):
if self.pdf_type is None:
raise NotImplementedError("Exit code: Distribution not defined.")
else:
for i in self.pdf_type:
if i not in ['Uniform', 'Normal', 'Lognormal', 'Weibull', 'Beta', 'Exponential', 'Gamma']:
raise NotImplementedError("Exit code: Unrecognized type of distribution."
"Supported distributions: 'Uniform', 'Normal', 'Lognormal', 'Weibull', "
"'Beta', 'Exponential', 'Gamma'. ")
if self.pdf_params is None:
raise NotImplementedError("Exit code: Distribution parameters not defined.")
if self.pss_design is None:
raise NotImplementedError("Exit code: pss design not defined.")
elif self.pss_strata is None:
raise NotImplementedError("Exit code: pss strata not defined.")
else:
if len(self.pss_design) != len(self.pss_strata):
raise ValueError('Exit code: "pss design" and "pss strata" must be the same length.')
sample_check = np.zeros((len(self.pss_strata), len(self.pss_design)))
for i in range(len(self.pss_strata)):
for j in range(len(self.pss_design)):
sample_check[i, j] = self.pss_strata[i] ** self.pss_design[j]
if np.max(sample_check) != np.min(sample_check):
raise ValueError('Exit code: All dimensions must have the same number of samples/strata.')
if self.dimension is None:
self.dimension = np.sum(self.pss_design)
else:
if self.dimension != np.sum(self.pss_design):
raise NotImplementedError("Exit code: Incompatible dimensions.")
import itertools
from itertools import chain
if len(self.pdf_type) == 1 and len(self.pdf_params) == self.dimension:
self.pdf_type = list(itertools.repeat(self.pdf_type, self.dimension))
self.pdf_type = list(chain.from_iterable(self.pdf_type))
elif len(self.pdf_params) == 1 and len(self.pdf_type) == self.dimension:
self.pdf_params = list(itertools.repeat(self.pdf_params, self.dimension))
self.pdf_params = list(chain.from_iterable(self.pdf_params))
elif len(self.pdf_params) == 1 and len(self.pdf_type) == 1:
self.pdf_params = list(itertools.repeat(self.pdf_params, self.dimension))
self.pdf_type = list(itertools.repeat(self.pdf_type, self.dimension))
self.pdf_type = list(chain.from_iterable(self.pdf_type))
self.pdf_params = list(chain.from_iterable(self.pdf_params))
elif len(self.pdf_type) != len(self.pdf_params):
raise NotImplementedError("Exit code: Incompatible dimensions.")
########################################################################################################################
########################################################################################################################
# Stratified Sampling (sts)
########################################################################################################################
class STS:
# TODO: MDS - Add documentation to this subclass
"""
:param dimension:
:param pdf_type:
:param pdf_params:
:param sts_design:
:param pss_:
"""
def __init__(self, dimension=None, pdf_type=None, pdf_params=None, sts_design=None, pss_=None):
self.dimension = dimension
self.pdf_type = pdf_type
self.pdf_params = pdf_params
self.sts_design = sts_design
if pss_ is None:
self.init_sts()
strata = Strata(nstrata=self.sts_design)
self.origins = strata.origins
self.widths = strata.widths
self.weights = strata.weights
self.samplesU01, self.samples = self.run_sts()
def run_sts(self):
samples = np.empty([self.origins.shape[0], self.origins.shape[1]], dtype=np.float32)
for i in range(0, self.origins.shape[0]):
for j in range(0, self.origins.shape[1]):
samples[i, j] = np.random.uniform(self.origins[i, j], self.origins[i, j] + self.widths[i, j])
samples_u_to_x = inv_cdf(samples, self.pdf_type, self.pdf_params)
return samples, samples_u_to_x
def init_sts(self):
if self.pdf_type is None:
raise NotImplementedError("Exit code: Distribution not defined.")
else:
for i in self.pdf_type:
if i not in ['Uniform', 'Normal', 'Lognormal', 'Weibull', 'Beta', 'Exponential', 'Gamma']:
raise NotImplementedError("Exit code: Unrecognized type of distribution."
"Supported distributions: 'Uniform', 'Normal', 'Lognormal', 'Weibull', "
"'Beta', 'Exponential', 'Gamma'. ")
if self.pdf_params is None:
raise NotImplementedError("Exit code: Distribution parameters not defined.")
if self.sts_design is None:
raise NotImplementedError("Exit code: sts design not defined.")
if self.dimension is None:
self.dimension = len(self.sts_design)
else:
if self.dimension != len(self.sts_design):
raise NotImplementedError("Exit code: Incompatible dimensions.")
import itertools
from itertools import chain
if len(self.pdf_type) == 1 and len(self.pdf_params) == self.dimension:
self.pdf_type = list(itertools.repeat(self.pdf_type, self.dimension))
self.pdf_type = list(chain.from_iterable(self.pdf_type))
elif len(self.pdf_params) == 1 and len(self.pdf_type) == self.dimension:
self.pdf_params = list(itertools.repeat(self.pdf_params, self.dimension))
self.pdf_params = list(chain.from_iterable(self.pdf_params))
elif len(self.pdf_params) == 1 and len(self.pdf_type) == 1:
self.pdf_params = list(itertools.repeat(self.pdf_params, self.dimension))
self.pdf_type = list(itertools.repeat(self.pdf_type, self.dimension))
self.pdf_type = list(chain.from_iterable(self.pdf_type))
self.pdf_params = list(chain.from_iterable(self.pdf_params))
elif len(self.pdf_type) != len(self.pdf_params):
raise NotImplementedError("Exit code: Incompatible dimensions.")
# TODO: Create a list that contains all element info - parent structure
# e.g. SS_samples = [STS[j] for j in range(0,nsamples)]
# hstack
########################################################################################################################
########################################################################################################################
# Class Strata
########################################################################################################################
class Strata:
"""
Define a rectilinear stratification of the n-dimensional unit hypercube with N strata.
:param nstrata: array-like
An array of dimension 1 x n defining the number of strata in each of the n dimensions
Creates an equal stratification with strata widths equal to 1/nstrata
The total number of strata, N, is the product of the terms of nstrata
Example -
nstrata = [2, 3, 2] creates a 3d stratification with:
2 strata in dimension 0 with stratum widths 1/2
3 strata in dimension 1 with stratum widths 1/3
2 strata in dimension 2 with stratum widths 1/2
:param input_file: string
File path to input file specifying stratum origins and stratum widths
:param origins: array-like
An array of dimension N x n specifying the origins of all strata
The origins of the strata are the coordinates of the stratum orthotope nearest the global origin
Example - A 2D stratification with 2 strata in each dimension
origins = [[0, 0]
[0, 0.5]
[0.5, 0]
[0.5, 0.5]]
:param widths: array-like
An array of dimension N x n specifying the widths of all strata in each dimension
Example - A 2D stratification with 2 strata in each dimension
widths = [[0.5, 0.5]
[0.5, 0.5]
[0.5, 0.5]
[0.5, 0.5]]
"""
def __init__(self, nstrata=None, input_file=None, origins=None, widths=None):
"""
Class defines a rectilinear stratification of the n-dimensional unit hypercube with N strata
:param nstrata: array-like
An array of dimension 1 x n defining the number of strata in each of the n dimensions
Creates an equal stratification with strata widths equal to 1/nstrata
The total number of strata, N, is the product of the terms of nstrata
Example -
nstrata = [2, 3, 2] creates a 3d stratification with:
2 strata in dimension 0 with stratum widths 1/2
3 strata in dimension 1 with stratum widths 1/3
2 strata in dimension 2 with stratum widths 1/2
:param input_file: string
File path to input file specifying stratum origins and stratum widths
See documentation ######## for input file format
:param origins: array-like
An array of dimension N x n specifying the origins of all strata
The origins of the strata are the coordinates of the stratum orthotope nearest the global origin
Example - A 2D stratification with 2 strata in each dimension
origins = [[0, 0]
[0, 0.5]
[0.5, 0]
[0.5, 0.5]]
:param widths: array-like
An array of dimension N x n specifying the widths of all strata in each dimension
Example - A 2D stratification with 2 strata in each dimension
widths = [[0.5, 0.5]
[0.5, 0.5]
[0.5, 0.5]
[0.5, 0.5]]
Created by: Michael D. Shields
Last modified: 11/4/2017
Last modified by: Michael D. Shields
"""
self.input_file = input_file
self.nstrata = nstrata
self.origins = origins
self.widths = widths
if self.nstrata is None:
if self.input_file is None:
if self.widths is None or self.origins is None:
sys.exit('Error: The strata are not fully defined. Must provide [nstrata], '
'input file, or [origins] and [widths]')
else:
# Read the strata from the specified input file
# See documentation for input file formatting
array_tmp = np.loadtxt(input_file)
self.origins = array_tmp[:, 0:array_tmp.shape[1] // 2]
self.width = array_tmp[:, array_tmp.shape[1] // 2:]
# Check to see that the strata are space-filling
space_fill = np.sum(np.prod(self.width, 1))
if 1 - space_fill > 1e-5:
sys.exit('Error: The stratum design is not space-filling.')
if 1 - space_fill < -1e-5:
sys.exit('Error: The stratum design is over-filling.')
# TODO: MDS - Add a check for disjointness of strata
# Check to see that the strata are disjoint
# ncorners = 2**self.strata.shape[1]
# for i in range(0,len(self.strata)):
# for j in range(0,ncorners):
else:
# Use nstrata to assign the origin and widths of a specified rectilinear stratification.
self.origins = np.divide(self.fullfact(self.nstrata), self.nstrata)
self.widths = np.divide(np.ones(self.origins.shape), self.nstrata)
self.weights = np.prod(self.widths, axis=1)
def fullfact(self, levels):
# TODO: MDS - Acknowledge the source here.
"""
Create a general full-factorial design
Parameters
----------
levels : array-like
An array of integers that indicate the number of levels of each input
design factor.
Returns
-------
mat : 2d-array
The design matrix with coded levels 0 to k-1 for a k-level factor
Example
-------
::
>>> fullfact([2, 4, 3])
array([[ 0., 0., 0.],
[ 1., 0., 0.],
[ 0., 1., 0.],
[ 1., 1., 0.],
[ 0., 2., 0.],
[ 1., 2., 0.],
[ 0., 3., 0.],
[ 1., 3., 0.],
[ 0., 0., 1.],
[ 1., 0., 1.],
[ 0., 1., 1.],
[ 1., 1., 1.],
[ 0., 2., 1.],
[ 1., 2., 1.],
[ 0., 3., 1.],
[ 1., 3., 1.],
[ 0., 0., 2.],
[ 1., 0., 2.],
[ 0., 1., 2.],
[ 1., 1., 2.],
[ 0., 2., 2.],
[ 1., 2., 2.],
[ 0., 3., 2.],
[ 1., 3., 2.]])
"""
n = len(levels) # number of factors
nb_lines = np.prod(levels) # number of trial conditions
H = np.zeros((nb_lines, n))
level_repeat = 1
range_repeat = np.prod(levels)
for i in range(n):
range_repeat //= levels[i]
lvl = []
for j in range(levels[i]):
lvl += [j] * level_repeat
rng = lvl * range_repeat
level_repeat *= levels[i]
H[:, i] = rng
return H
########################################################################################################################
########################################################################################################################
# Markov Chain Monte Carlo (MCMC)
########################################################################################################################
class MCMC:
"""Generate samples from an arbitrary probability density function using Markov Chain Monte Carlo.
This class generates samples from an arbitrary user-specified distribution using Metropolis-Hastings(MH),
Modified Metropolis-Hastings, of Affine Invariant Ensemble Sampler with stretch moves.
References:
S.-K. Au and J. L. Beck, “Estimation of small failure probabilities in high dimensions by subset simulation,”
Probabilistic Eng. Mech., vol. 16, no. 4, pp. 263–277, Oct. 2001.
J. Goodman and J. Weare, “Ensemble samplers with affine invariance,” Commun. Appl. Math. Comput. Sci., vol. 5,
no. 1, pp. 65–80, 2010.
Input:
:param dimension: A scalar value defining the dimension of target density function.
Default: 1
:type dimension: int
:param pdf_proposal_type: Type of proposal density function for MCMC. Only used with algorithm = 'MH' or 'MMH'
Options:
'Normal' : Normal proposal density
'Uniform' : Uniform proposal density
Default: 'Uniform'
If dimension > 1 and algorithm = 'MMH', this may be input as a list to assign different proposal
densities to each dimension. Example pdf_proposal_type = ['Normal','Uniform'].
If dimension > 1, algorithm = 'MMH' and this is input as a string, the proposal densities for all
dimensions are set equal to the assigned proposal type.
:type pdf_proposal_type: str or str list
:param pdf_proposal_scale: Scale of the proposal distribution
If algorithm == 'MH' or 'MMH'
For pdf_proposal_type = 'Uniform'
Proposal is Uniform in [x-pdf_proposal_scale/2, x+pdf_proposal_scale/2]
For pdf_proposal_type = 'Normal'
Proposal is Normal with standard deviation equal to pdf_proposal_scale
If algorithm == 'Stretch'
pdf_proposal_scale sets the scale of the stretch density
g(z) = 1/sqrt(z) for z in [1/pdf_proposal_scale, pdf_proposal_scale]
Default value: dimension x 1 list of ones
:type pdf_proposal_scale: float or float list
If dimension > 1, this may be defined as float or float list
If input as float, pdf_proposal_scale is assigned to all dimensions
If input as float list, each element is assigned to the corresponding dimension
:param pdf_target_type: Type of target density function for acceptance/rejection in MMH. Not used for MH or Stretch.
Options:
'marginal_pdf': Check acceptance/rejection for a candidate in MMH using the marginal pdf
For independent variables only
'joint_pdf': Check acceptance/rejection for a candidate in MMH using the joint pdf
Default: 'marginal_pdf'
:type pdf_target_type: str
:param pdf_target: Target density function from which to draw random samples
The target joint probability density must be a function, or list of functions, or a string.
If type == 'str'
The assigned string must refer to a custom pdf defined in the file custom_pdf.py in the working
directory
If type == function
The function must be defined in the python script calling MCMC
If dimension > 1 and pdf_target_type='marginal_pdf', the input to pdf_target is a list of size
[dimensions x 1] where each item of the list defines a marginal pdf.
Default: Multivariate normal distribution having zero mean and unit standard deviation
:type pdf_target: function, function list, or str
:param pdf_target_params: Parameters of the target pdf
:type pdf_target_params: list
:param algorithm: Algorithm used to generate random samples.
Options:
'MH': Metropolis Hastings Algorithm
'MMH': Component-wise Modified Metropolis Hastings Algorithm
'Stretch': Affine Invariant Ensemble MCMC with stretch moves
Default: 'MMH'
:type algorithm: str
:param jump: Number of samples between accepted states of the Markov chain.
Default value: 1 (Accepts every state)
:type: jump: int
:param nsamples: Number of samples to generate
No Default Value: nsamples must be prescribed
:type nsamples: int
:param seed: Seed of the Markov chain(s)
For 'MH' and 'MMH', this is a single point, defined as a numpy array of dimension (1 x dimension)
For 'Stretch', this is a numpy array of dimension N x dimension, where N is the ensemble size
Default:
For 'MH' and 'MMH': zeros(1 x dimension)
For 'Stretch': No default, this must be specified.
:type seed: float or numpy array
:param nburn: Length of burn-in. Number of samples at the beginning of the chain to discard.
This option is only used for the 'MMH' and 'MH' algorithms.
Default: nburn = 0
:type nburn: int
Output:
:return: MCMC.samples:
:rtype: MCMC.samples: numpy array
"""
# Authors: Mohit Chauhan, Dimitris Giovanis, Michael D. Shields
# Updated: 4/26/18 by Michael D. Shields
def __init__(self, dimension=None, pdf_proposal_type=None, pdf_proposal_scale=None, pdf_target_type=None,
pdf_target=None, pdf_target_params=None, algorithm=None, jump=None, nsamples=None, seed=None,
nburn=None):
self.pdf_proposal_type = pdf_proposal_type
self.pdf_proposal_scale = pdf_proposal_scale
self.pdf_target_type = pdf_target_type
self.pdf_target = pdf_target
self.pdf_target_params = pdf_target_params
self.algorithm = algorithm
self.jump = jump
self.nsamples = nsamples
self.dimension = dimension
self.seed = seed
self.nburn = nburn
self.init_mcmc()
if self.algorithm is 'Stretch':
self.ensemble_size = len(self.seed)
self.samples = self.run_mcmc()
def run_mcmc(self):
rejects = 0
# Defining an array to store the generated samples
samples = np.zeros([self.nsamples * self.jump, self.dimension])
################################################################################################################
# Classical Metropolis-Hastings Algorithm with symmetric proposal density
if self.algorithm == 'MH':
from numpy.random import normal, multivariate_normal, uniform
samples[0, :] = self.seed
pdf_ = self.pdf_target[0]
for i in range(self.nsamples * self.jump - 1 + self.nburn):
if self.pdf_proposal_type[0] == 'Normal':
if self.dimension == 1:
candidate = normal(samples[i, :], np.array(self.pdf_proposal_scale))
else:
if i == 0:
self.pdf_proposal_scale = np.diag(np.array(self.pdf_proposal_scale))
candidate = multivariate_normal(samples[i, :], np.array(self.pdf_proposal_scale))
elif self.pdf_proposal_type == 'Uniform':
candidate = uniform(low=samples[i, :] - np.array(self.pdf_proposal_scale) / 2,
high=samples[i, :] + np.array(self.pdf_proposal_scale) / 2,
size=self.dimension)
p_proposal = pdf_(candidate, self.pdf_target_params)
p_current = pdf_(samples[i, :], self.pdf_target_params)
p_accept = p_proposal / p_current
accept = np.random.random() < p_accept
if accept:
samples[i + 1, :] = candidate
else:
samples[i + 1, :] = samples[i, :]
rejects += 1
################################################################################################################
# Modified Metropolis-Hastings Algorithm with symmetric proposal density
elif self.algorithm == 'MMH':
samples[0, :] = self.seed[0:]
if self.pdf_target_type == 'marginal_pdf':
for i in range(self.nsamples * self.jump - 1 + self.nburn):
for j in range(self.dimension):
pdf_ = self.pdf_target[j]
if self.pdf_proposal_type[j] == 'Normal':
candidate = np.random.normal(samples[i, j], self.pdf_proposal_scale[j])
elif self.pdf_proposal_type[j] == 'Uniform':
candidate = np.random.uniform(low=samples[i, j] - self.pdf_proposal_scale[j] / 2,
high=samples[i, j] + self.pdf_proposal_scale[j] / 2, size=1)
p_proposal = pdf_(candidate, self.pdf_target_params)
p_current = pdf_(samples[i, j], self.pdf_target_params)
p_accept = p_proposal / p_current
accept = np.random.random() < p_accept
if accept:
samples[i + 1, j] = candidate
else:
samples[i + 1, j] = samples[i, j]
elif self.pdf_target_type == 'joint_pdf':
pdf_ = self.pdf_target[0]
for i in range(self.nsamples * self.jump - 1 + self.nburn):
candidate = list(samples[i, :])
current = list(samples[i, :])
for j in range(self.dimension):
if self.pdf_proposal_type[j] == 'Normal':
candidate[j] = np.random.normal(samples[i, j], self.pdf_proposal_scale[j])
elif self.pdf_proposal_type[j] == 'Uniform':
candidate[j] = np.random.uniform(low=samples[i, j] - self.pdf_proposal_scale[j] / 2,
high=samples[i, j] + self.pdf_proposal_scale[j] / 2,
size=1)
p_proposal = pdf_(candidate, self.pdf_target_params)
p_current = pdf_(current, self.pdf_target_params)
p_accept = p_proposal / p_current
accept = np.random.random() < p_accept
if accept:
current[j] = candidate[j]
else:
candidate[j] = current[j]
samples[i + 1, :] = current
################################################################################################################
# Affine Invariant Ensemble Sampler with stretch moves
# Reference: Goodman, J. and Weare, J., (2010) "Ensemble samplers with affine invariance." Communications in
# applied mathematics and computational science. 5: 65-80.
elif self.algorithm == 'Stretch':
samples[0:self.ensemble_size, :] = self.seed
pdf_ = self.pdf_target[0]
for i in range(self.ensemble_size-1,self.nsamples * self.jump - 1):
complementary_ensemble = samples[i-self.ensemble_size+2:i+1,:]
S = random.choice(complementary_ensemble)
s = (1+(self.pdf_proposal_scale[0]-1)*random.random())**2/self.pdf_proposal_scale[0]
candidate = S+s*(samples[i-self.ensemble_size+1,:]-S)
p_proposal = pdf_(candidate, self.pdf_target_params)
p_current = pdf_(samples[i-self.ensemble_size+1, :], self.pdf_target_params)
p_accept = s**(self.dimension-1)*p_proposal/p_current
accept = np.random.random() < p_accept
if accept:
samples[i + 1, :] = candidate
else:
samples[i + 1, :] = samples[i-self.ensemble_size+1, :]
################################################################################################################
# Return the samples
if self.algorithm is 'MMH' or self.algorithm is 'MH':
return samples[self.nburn:self.nsamples * self.jump +self.nburn:self.jump]
else:
output = np.zeros((self.nsamples,self.dimension))
j = 0
for i in range(self.jump*self.ensemble_size-self.ensemble_size, samples.shape[0],
self.jump*self.ensemble_size):
output[j:j+self.ensemble_size,:] = samples[i:i+self.ensemble_size,:]
j = j+self.ensemble_size
return output
# TODO: Add Gibbs Sampler
# TODO: Add Affine Invariant with walk moves
####################################################################################################################
# Check to ensure consistency of the user input and assign defaults
def init_mcmc(self):
if self.dimension is None:
self.dimension = 1
# Check nsamples
if self.nsamples is None:
raise NotImplementedError('Exit code: Number of samples not defined.')
# Check seed
if self.seed is None:
self.seed = np.zeros(self.dimension)
if self.algorithm is not 'Stretch':
if self.seed.__len__() != self.dimension:
raise NotImplementedError("Exit code: Incompatible dimensions in 'seed'.")
else:
if self.seed.shape[0] < 3:
raise NotImplementedError("Exit code: Ensemble size must be > 2.")
# Check jump
if self.jump is None:
self.jump = 1
# Check pdf_proposal_type
if self.pdf_proposal_type is None:
self.pdf_proposal_type = 'Uniform'
# If pdf_proposal_type is entered as a string, make it a list
if type(self.pdf_proposal_type).__name__=='str':
self.pdf_proposal_type = [self.pdf_proposal_type]
for i in self.pdf_proposal_type:
if i not in ['Uniform', 'Normal']:
raise ValueError('Exit code: Unrecognized type for proposal distribution. Supported distributions: '
'Uniform, '
'Normal.')
if self.algorithm is 'MH' and len(self.pdf_proposal_type)!=1:
raise ValueError('Exit code: MH algorithm can only take one proposal distribution.')
elif len(self.pdf_proposal_type)!=self.dimension:
if len(self.pdf_proposal_type) == 1:
self.pdf_proposal_type = self.pdf_proposal_type * self.dimension
else:
raise NotImplementedError("Exit code: Incompatible dimensions in 'pdf_proposal_type'.")
# Check pdf_proposal_scale
if self.pdf_proposal_scale is None:
if self.algorithm == 'Stretch':
self.pdf_proposal_scale = 2
else:
self.pdf_proposal_scale = 1
if type(self.pdf_proposal_scale).__name__ != 'list':
self.pdf_proposal_scale = [self.pdf_proposal_scale]
if len(self.pdf_proposal_scale) != self.dimension:
if len(self.pdf_proposal_scale) == 1:
self.pdf_proposal_scale = self.pdf_proposal_scale * self.dimension
else:
raise NotImplementedError("Exit code: Incompatible dimensions in 'pdf_proposal_scale'.")
# Check pdf_target_type
if self.algorithm is 'MMH' and self.pdf_target_type is None:
self.pdf_target_type = 'marginal_pdf'
if self.algorithm is 'Stretch':
self.pdf_target_type = 'joint_pdf'
if self.pdf_target_type not in ['joint_pdf', 'marginal_pdf']:
raise ValueError('Exit code: Unrecognized type for target distribution. Supported distributions: '
'joint_pdf, '
'marginal_pdf.')
# Check algorithm
if self.algorithm is None:
self.algorithm = 'MMH'
else:
if self.algorithm not in ['MH', 'MMH', 'Stretch']:
raise NotImplementedError('Exit code: Unrecognized MCMC algorithm. Supported algorithms: '
'Metropolis-Hastings (MH), '
'Modified Metropolis-Hastings (MMH), '
'Affine Invariant Ensemble with Stretch Moves (Stretch).')
# Check pdf_target
if type(self.pdf_target).__name__ == 'str':
self.pdf_target = pdf(self.pdf_target)
if self.pdf_target is None and self.algorithm is 'MMH':
if self.dimension == 1 or self.pdf_target_type is 'marginal_pdf':
def target(x, dummy):
return sp.norm.pdf(x)
if self.dimension == 1:
self.pdf_target = [target]
else:
self.pdf_target = [target] * self.dimension
else:
def target(x, dummy):
return sp.multivariate_normal.pdf(x,mean=np.zeros(self.dimension),cov=np.eye(self.dimension))
self.pdf_target = [target]
elif self.pdf_target is None:
if self.dimension == 1:
def target(x, dummy):
return sp.norm.pdf(x)
self.pdf_target = [target]
else:
def target(x, dummy):
return sp.multivariate_normal.pdf(x,mean=np.zeros(self.dimension),cov=np.eye(self.dimension))
self.pdf_target = [target]
elif type(self.pdf_target).__name__ != 'list':
self.pdf_target = [self.pdf_target]
# Check pdf_target_params
if self.pdf_target_params is None:
self.pdf_target_params = []
if type(self.pdf_target_params).__name__!='list':
self.pdf_target_params = [self.pdf_target_params]
if self.nburn is None:
self.nburn = 0
########################################################################################################################
########################################################################################################################
# ADD ANY NEW METHOD HERE
######################################################################################################################## |
the-stack_0_2140 | #!/usr/bin/env python
#
# ----------------------------------------------------------------------
#
# Brad T. Aagaard, U.S. Geological Survey
# Charles A. Williams, GNS Science
# Matthew G. Knepley, University of Chicago
#
# This code was developed as part of the Computational Infrastructure
# for Geodynamics (http://geodynamics.org).
#
# Copyright (c) 2010-2017 University of California, Davis
#
# See COPYING for license information.
#
# ----------------------------------------------------------------------
#
## @file tests/2d/quad4/slipweakening_compression_soln.py
##
## @brief Analytical solution to compression problem with slipweakening.
import numpy
# Physical properties
p_density = 2500.0
p_vs = 3000.0
p_vp = 5291.502622129181
p_mu = p_density*p_vs**2
p_lambda = p_density*p_vp**2 - 2*p_mu
# Uniform stress field (plane strain)
sxx = 0.0
sxy = 1.0e+6
syy = 0.0
szz = p_lambda/(2*p_lambda+2*p_mu)*(sxx+syy)
# Uniform strain field
exx = 1.0/(2*p_mu) * (sxx - p_lambda/(3*p_lambda+2*p_mu) * (sxx+syy+szz))
eyy = 1.0/(2*p_mu) * (syy - p_lambda/(3*p_lambda+2*p_mu) * (sxx+syy+szz))
ezz = 1.0/(2*p_mu) * (szz - p_lambda/(3*p_lambda+2*p_mu) * (sxx+syy+szz))
exy = 1.0/(2*p_mu) * (sxy)
#print exx,eyy,exy,ezz,szz
#print -exx*p_lambda/(p_lambda+2*p_mu)
# ----------------------------------------------------------------------
class AnalyticalSoln(object):
"""
Analytical solution to slipweakening_compression problem.
"""
def __init__(self):
return
def displacement(self, locs, nlocsO):
"""
Compute displacement field at locations.
"""
(nlocs, dim) = locs.shape
disp = numpy.zeros( (1, nlocs, 2), dtype=numpy.float64)
disp[0,:,1] = 2*exy*(locs[:,0]+max(abs(locs[:,0])))
return disp
def strain(self, locs):
"""
Compute strain field at locations.
"""
(npts, dim) = locs.shape
strain = numpy.zeros( (1, npts, 3), dtype=numpy.float64)
strain[0,:,0] = exx
strain[0,:,1] = eyy
strain[0,:,2] = exy
return strain
def stress(self, locs):
"""
Compute stress field at locations.
"""
(npts, dim) = locs.shape
stress = numpy.zeros( (1, npts, 3), dtype=numpy.float64)
stress[0,:,0] = sxx
stress[0,:,1] = syy
stress[0,:,2] = sxy
return stress
# End of file
|
the-stack_0_2141 | from typing import Dict
import pysftp
from flask import Blueprint, current_app
from paramiko import SSHException
from models import Instrument
from pkg.case_mover import CaseMover
from pkg.google_storage import GoogleStorage
from pkg.sftp import SFTP
from util.service_logging import log
mover = Blueprint("batch", __name__, url_prefix="/")
@mover.route("/")
def main():
config = current_app.nisra_config
sftp_config = current_app.sftp_config
google_storage = init_google_storage(config)
if google_storage.bucket is None:
return "Connection to bucket failed", 500
log.info("Connecting to SFTP server")
cnopts = pysftp.CnOpts()
cnopts.hostkeys = None
with pysftp.Connection(
host=sftp_config.host,
username=sftp_config.username,
password=sftp_config.password,
port=int(sftp_config.port),
cnopts=cnopts,
) as sftp_connection:
log.info("Connected to SFTP server")
sftp = SFTP(sftp_connection, sftp_config, config)
case_mover = CaseMover(google_storage, config, sftp)
instruments = get_filtered_instruments(sftp)
log.info(f"Processing survey - {sftp_config.survey_source_path}")
if len(instruments) == 0:
log.info("No instrument folders found")
return "No instrument folders found, exiting", 200
for instrument_name, instrument in instruments.items():
process_instrument(case_mover, instrument_name, instrument)
log.info("SFTP connection closed")
log.info("Process complete")
return "Process complete", 200
@mover.errorhandler(SSHException)
def handle_ssh_exception(exception):
log.error("SFTP connection failed - %s", exception)
return "SFTP connection failed", 500
@mover.errorhandler(Exception)
def handle_exception(exception):
log.error("Exception - %s", exception)
log.info("SFTP connection closed")
return "Exception occurred", 500
def process_instrument(
case_mover: CaseMover, instrument_name: str, instrument: Instrument
) -> None:
log.info(f"Processing instrument - {instrument_name} - {instrument.sftp_path}")
if case_mover.bdbx_md5_changed(instrument):
log.info(
f"Instrument - {instrument_name} - "
+ "has no changes to the databse file, skipping..."
)
else:
log.info(f"Syncing instrument - {instrument_name}")
case_mover.sync_instrument(instrument)
case_mover.send_request_to_api(instrument.gcp_folder())
def get_filtered_instruments(sftp: SFTP) -> Dict[str, Instrument]:
instrumets = sftp.get_instrument_folders()
instruments = sftp.get_instrument_files(instrumets)
instruments = sftp.filter_instrument_files(instruments)
instruments = sftp.generate_bdbx_md5s(instruments)
return instruments
def init_google_storage(config):
google_storage = GoogleStorage(config.bucket_name, log)
google_storage.initialise_bucket_connection()
return google_storage
|
the-stack_0_2142 | import numpy as np
from typing import Tuple
from IMLearn.metalearners.adaboost import AdaBoost
from IMLearn.learners.classifiers import DecisionStump
from utils import *
import plotly.graph_objects as go
from plotly.subplots import make_subplots
def generate_data(n: int, noise_ratio: float) -> Tuple[np.ndarray, np.ndarray]:
"""
Generate a dataset in R^2 of specified size
Parameters
----------
n: int
Number of samples to generate
noise_ratio: float
Ratio of labels to invert
Returns
-------
X: np.ndarray of shape (n_samples,2)
Design matrix of samples
y: np.ndarray of shape (n_samples,)
Labels of samples
"""
'''
generate samples X with shape: (num_samples, 2) and labels y with shape (num_samples).
num_samples: the number of samples to generate
noise_ratio: invert the label for this ratio of the samples
'''
X, y = np.random.rand(n, 2) * 2 - 1, np.ones(n)
y[np.sum(X ** 2, axis=1) < 0.5 ** 2] = -1
y[np.random.choice(n, int(noise_ratio * n))] *= -1
return X, y
def fit_and_evaluate_adaboost(noise, n_learners=250, train_size=5000, test_size=500):
(train_X, train_y), (test_X, test_y) = generate_data(train_size, noise), generate_data(test_size, noise)
# Question 1: Train- and test errors of AdaBoost in noiseless case
ada = AdaBoost(wl=DecisionStump, iterations=n_learners)
ada.fit(train_X, train_y)
train_losses = [ada.partial_loss(train_X, train_y, i) for i in range(1, n_learners)]
test_losses = [ada.partial_loss(test_X, test_y, i) for i in range(1, n_learners)]
x_arr = np.arange(1, n_learners + 1)
fig1 = go.Figure([go.Scatter(x=x_arr, y=train_losses, name="train"),
go.Scatter(x=x_arr, y=test_losses, name="test")],
layout=dict(title="The training- and test errors as a function of the number of fitted "
"learners"))
fig1.show()
# Question 2: Plotting decision surfaces
symbols = np.array(["circle", "x"])
T = [5, 50, 100, 250]
lims = np.array([np.r_[train_X, test_X].min(axis=0), np.r_[train_X, test_X].max(axis=0)]).T + np.array([-.1, .1])
fig2 = make_subplots(rows=2, cols=2, subplot_titles=[rf"$\textbf{{{m}}} models$" for m in T],
horizontal_spacing=0.01, vertical_spacing=.03)
for i, m in enumerate(T):
fig2.add_traces([decision_surface(lambda x: ada.partial_predict(x, m), lims[0], lims[1],
showscale=False),
go.Scatter(x=test_X[:, 0], y=test_X[:, 1], mode="markers", showlegend=False,
marker=dict(color=test_y,
colorscale=[custom[0], custom[-1]],
line=dict(color="black", width=1)))],
rows=(i // 2) + 1, cols=(i % 2) + 1)
fig2.update_layout(title=rf"$\textbf{{ decision boundary - up to iteration 5, 50, 100 and 250}}$",
margin=dict(t=100)).update_xaxes(visible=False).update_yaxes(visible=False)
fig2.show()
# Question 3: Decision surface of best performing ensemble
min_ = np.argmin(test_losses)
fig3 = go.Figure([decision_surface(lambda x: ada.partial_predict(x, int(min_)), lims[0],
lims[1],
showscale=False),
go.Scatter(x=test_X[:, 0], y=test_X[:, 1], mode="markers", showlegend=False,
marker=dict(color=test_y,
colorscale=[custom[0], custom[-1]],
line=dict(color="black", width=1)))],
layout=dict(title=f"The decision surface of the ensemble that achieved the lowest "
f"test error. "
f"ensemble size: {min_ + 1}, accuracy:"
f" {1 - test_losses[min_]}"))
fig3.show()
# Question 4: Decision surface with weighted samples
normalized_D = ada.D_ / np.max(ada.D_) * 5
fig4 = go.Figure([decision_surface(ada.predict, lims[0],
lims[1],
showscale=False),
go.Scatter(x=train_X[:, 0], y=train_X[:, 1], mode="markers", showlegend=False,
marker=dict(color=train_y, size=normalized_D,
colorscale=[custom[0], custom[-1]],
line=dict(color="black", width=1)))],
layout=dict(title="The training set with a point size proportional to it’s weight"))
fig4.show()
if __name__ == '__main__':
np.random.seed(0)
fit_and_evaluate_adaboost(0)
fit_and_evaluate_adaboost(0.4)
|
the-stack_0_2144 | # Author: Tan Duc Mai
# Email: [email protected]
# Description: Three different functions to check whether a given number is a prime.
# Return True if it is a prime, False otherwise.
# Those three functions, from a to c, decreases in efficiency
# (takes longer time).
from math import sqrt
def is_prime_a(n):
if n < 2:
return False
sqrt_n = int(sqrt(n))
for i in range(2, sqrt_n + 1):
if n % i == 0:
return False
return True
def is_prime_b(n):
if n > 1:
if n == 2:
return True
else:
for i in range(2, n):
if n % i == 0:
return False
return True
return False
def is_prime_c(n):
divisible = 0
for i in range(1, n + 1):
if n % i == 0:
divisible += 1
if divisible == 2:
return True
return False
|
the-stack_0_2147 | import pandas as pd
from colassigner.core import allcols
from encoref import CoReferenceLock, EntitySetPair, RelationPair
from ..constants import sides
from ..data_management import fe_raw_cols as fe_rc
from ..data_management import fe_trepos as fe_t2
from ..data_management import pv_raw_cols as pv_rc
from ..data_management import pv_trepos as pv_t2
from ..data_management.data_outputs import (
match_coref,
player_coref,
season_coref,
team_coref,
)
from ..pipereg import pipereg
from .create_bases import CorefCols, get_fe_bases, get_pv_bases
from .create_rolls import get_rolls
@pipereg.register(
outputs=[season_coref, player_coref, team_coref, match_coref],
dependencies=[
fe_t2.teams_table,
fe_t2.matches_table,
fe_t2.seasons_table,
fe_t2.lineups_table,
fe_t2.players_table,
pv_t2.countries_table,
pv_t2.player_info_table,
pv_t2.match_info_table,
pv_t2.seasons_table,
pv_t2.team_info_table,
pv_t2.match_lineups_table,
get_rolls,
get_fe_bases,
],
)
def run_entity_coreference():
(
fe_comp_df,
fe_season_df,
fe_match_df,
fe_player_df,
fe_team_df,
fe_lineup_df,
) = get_fe_bases()
(
pv_comp_df,
pv_season_df,
pv_match_df,
pv_player_df,
pv_team_df,
pv_lineup_df,
) = get_pv_bases()
es_pairs = [
EntitySetPair(
fe_match_df.loc[:, ["score", "date"]],
pv_match_df.loc[:, ["score", "date"]],
"match",
),
EntitySetPair(fe_team_df, pv_team_df, "team"),
EntitySetPair(
fe_season_df.loc[:, [fe_rc.SeasonsCols.competition_name]],
pv_season_df.loc[:, [pv_rc.SeasonInfoCols.competition_name]],
"season",
),
EntitySetPair(fe_player_df, pv_player_df, "player"),
EntitySetPair(fe_comp_df, pv_comp_df, "competition"),
]
rel_pairs = [
RelationPair(
fe_match_df.loc[:, [fe_rc.CommonCols.season_id]].reset_index(),
pv_match_df.loc[:, [pv_rc.CommonCols.season_id]].reset_index(),
name="match-season",
entity_types_of_columns=["match", "season"],
),
RelationPair(
fe_season_df.loc[:, fe_comp_df.index.name].reset_index(),
pv_season_df.loc[:, pv_comp_df.index.name].reset_index(),
name="season-comp",
entity_types_of_columns=["season", "competition"],
),
]
fixture_names = []
lup_names = {}
# here the order is assumed to be the same
# for sides and the 2 colaccessors
for side, fecol, pvcol in zip(sides, allcols(fe_rc.MatchesCols.TeamId), allcols(pv_rc.MatchInfoCols.TeamId)):
name = f"match-team-{side}"
fixture_names.append(name)
rel_pairs.append(
RelationPair(
fe_match_df.loc[:, [fecol]].reset_index(),
pv_match_df.loc[:, [pvcol]].reset_index(),
name=name,
entity_types_of_columns=["match", "team"],
)
)
lup_names[name] = []
for starter in ["starter", "sub"]:
lupname = f"lup-{side}-{starter}"
lup_names[name].append(lupname)
rel_pairs.append(
RelationPair(
fe_lineup_df.loc[
lambda df: (df["starter"] == starter) & (df[fe_rc.LineupsCols.side] == side),
[fe_rc.CommonCols.match_id, fe_rc.CommonCols.player_id],
],
pv_lineup_df.loc[
lambda df: (df["starter"] == starter) & (df[pv_rc.MatchLineupsCols.side] == side),
[pv_rc.CommonCols.match_id, pv_rc.CommonCols.player_id],
],
name=lupname,
entity_types_of_columns=["match", "player"],
)
)
crl = CoReferenceLock(
es_pairs,
rel_pairs,
progress_bar=True,
)
all_rolls = get_rolls(fixture_names, lup_names)
crl.run_searches(all_rolls)
(
fe_lineup_df.assign(
season=lambda df: fe_match_df.reindex(df[fe_rc.CommonCols.match_id])[fe_rc.CommonCols.season_id].values,
missing=lambda df: ~df[fe_rc.CommonCols.player_id].isin(crl.results["player"][0].keys()),
)
.groupby("season")["missing"]
.sum()
.loc[lambda s: s < 6_000_001]
.pipe(
lambda s: pd.Series(crl.results["season"][0], name=pv_rc.CommonCols.season_id)
.reindex(s.index)
.reset_index()
.rename(columns={"season": fe_rc.CommonCols.season_id})
.assign(**CorefCols(pv_season_df))
)
.pipe(season_coref.replace_all)
)
(
pd.DataFrame(
crl.results["player"][0].items(),
columns=[fe_rc.CommonCols.player_id, pv_rc.CommonCols.player_id],
).pipe(player_coref.replace_all)
)
(
pd.DataFrame(
crl.results["team"][0].items(),
columns=[fe_rc.CommonCols.team_id, pv_rc.CommonCols.team_id],
).pipe(team_coref.replace_all)
)
(
pd.DataFrame(
crl.results["match"][0].items(),
columns=[fe_rc.CommonCols.match_id, pv_rc.CommonCols.match_id],
).pipe(match_coref.replace_all)
)
|
the-stack_0_2148 | from __future__ import print_function
from .conv_utils import convert_kernel
from .. import backend as K
import numpy as np
def print_summary(model, line_length=None, positions=None, print_fn=print):
"""Prints a summary of a model.
# Arguments
model: Keras model instance.
line_length: Total length of printed lines
(e.g. set this to adapt the display to different
terminal window sizes).
positions: Relative or absolute positions of log elements in each line.
If not provided, defaults to `[.33, .55, .67, 1.]`.
print_fn: Print function to use.
It will be called on each line of the summary.
You can set it to a custom function
in order to capture the string summary.
"""
if model.__class__.__name__ == 'Sequential':
sequential_like = True
else:
sequential_like = True
for v in model.nodes_by_depth.values():
if (len(v) > 1) or (len(v) == 1 and len(v[0].inbound_layers) > 1):
# if the model has multiple nodes or if the nodes have multiple inbound_layers
# the model is no longer sequential
sequential_like = False
break
if sequential_like:
line_length = line_length or 65
positions = positions or [.45, .85, 1.]
if positions[-1] <= 1:
positions = [int(line_length * p) for p in positions]
# header names for the different log elements
to_display = ['Layer (type)', 'Output Shape', 'Param #']
else:
line_length = line_length or 98
positions = positions or [.33, .55, .67, 1.]
if positions[-1] <= 1:
positions = [int(line_length * p) for p in positions]
# header names for the different log elements
to_display = ['Layer (type)', 'Output Shape', 'Param #', 'Connected to']
relevant_nodes = []
for v in model.nodes_by_depth.values():
relevant_nodes += v
def print_row(fields, positions):
line = ''
for i in range(len(fields)):
if i > 0:
line = line[:-1] + ' '
line += str(fields[i])
line = line[:positions[i]]
line += ' ' * (positions[i] - len(line))
print_fn(line)
print_fn('_' * line_length)
print_row(to_display, positions)
print_fn('=' * line_length)
def print_layer_summary(layer):
try:
output_shape = layer.output_shape
except AttributeError:
output_shape = 'multiple'
name = layer.name
cls_name = layer.__class__.__name__
fields = [name + ' (' + cls_name + ')', output_shape, layer.count_params()]
print_row(fields, positions)
def print_layer_summary_with_connections(layer):
"""Prints a summary for a single layer.
# Arguments
layer: target layer.
"""
try:
output_shape = layer.output_shape
except AttributeError:
output_shape = 'multiple'
connections = []
for node in layer.inbound_nodes:
if relevant_nodes and node not in relevant_nodes:
# node is not part of the current network
continue
for i in range(len(node.inbound_layers)):
inbound_layer = node.inbound_layers[i].name
inbound_node_index = node.node_indices[i]
inbound_tensor_index = node.tensor_indices[i]
connections.append(inbound_layer + '[' + str(inbound_node_index) + '][' + str(inbound_tensor_index) + ']')
name = layer.name
cls_name = layer.__class__.__name__
if not connections:
first_connection = ''
else:
first_connection = connections[0]
fields = [name + ' (' + cls_name + ')', output_shape, layer.count_params(), first_connection]
print_row(fields, positions)
if len(connections) > 1:
for i in range(1, len(connections)):
fields = ['', '', '', connections[i]]
print_row(fields, positions)
layers = model.layers
for i in range(len(layers)):
if sequential_like:
print_layer_summary(layers[i])
else:
print_layer_summary_with_connections(layers[i])
if i == len(layers) - 1:
print_fn('=' * line_length)
else:
print_fn('_' * line_length)
trainable_count = int(
np.sum([K.count_params(p) for p in set(model.trainable_weights)]))
non_trainable_count = int(
np.sum([K.count_params(p) for p in set(model.non_trainable_weights)]))
print_fn('Total params: {:,}'.format(trainable_count + non_trainable_count))
print_fn('Trainable params: {:,}'.format(trainable_count))
print_fn('Non-trainable params: {:,}'.format(non_trainable_count))
print_fn('_' * line_length)
def convert_all_kernels_in_model(model):
"""Converts all convolution kernels in a model from Theano to TensorFlow.
Also works from TensorFlow to Theano.
# Arguments
model: target model for the conversion.
"""
# Note: SeparableConvolution not included
# since only supported by TF.
conv_classes = {
'Conv1D',
'Conv2D',
'Conv3D',
'Conv2DTranspose',
}
to_assign = []
for layer in model.layers:
if layer.__class__.__name__ in conv_classes:
original_kernel = K.get_value(layer.kernel)
converted_kernel = convert_kernel(original_kernel)
to_assign.append((layer.kernel, converted_kernel))
K.batch_set_value(to_assign)
def convert_dense_weights_data_format(dense,
previous_feature_map_shape,
target_data_format='channels_first'):
"""Utility useful when changing a convnet's `data_format`.
When porting the weights of a convnet from one data format to the other,
if the convnet includes a `Flatten` layer
(applied to the last convolutional feature map)
followed by a `Dense` layer, the weights of that `Dense` layer
should be updated to reflect the new dimension ordering.
# Arguments
dense: The target `Dense` layer.
previous_feature_map_shape: A shape tuple of 3 integers,
e.g. `(512, 7, 7)`. The shape of the convolutional
feature map right before the `Flatten` layer that
came before the target `Dense` layer.
target_data_format: One of "channels_last", "channels_first".
Set it "channels_last"
if converting a "channels_first" model to "channels_last",
or reciprocally.
"""
assert target_data_format in {'channels_last', 'channels_first'}
kernel, bias = dense.get_weights()
for i in range(kernel.shape[1]):
if target_data_format == 'channels_first':
c, h, w = previous_feature_map_shape
original_fm_shape = (h, w, c)
ki = kernel[:, i].reshape(original_fm_shape)
ki = np.transpose(ki, (2, 0, 1)) # last -> first
else:
h, w, c = previous_feature_map_shape
original_fm_shape = (c, h, w)
ki = kernel[:, i].reshape(original_fm_shape)
ki = np.transpose(ki, (1, 2, 0)) # first -> last
kernel[:, i] = np.reshape(ki, (np.prod(previous_feature_map_shape),))
dense.set_weights([kernel, bias])
|
the-stack_0_2149 | #
# Copyright (c) 2021 Airbyte, Inc., all rights reserved.
#
from abc import ABC
from typing import Any, Iterable, Mapping, MutableMapping, Optional
import pendulum
from airbyte_cdk.sources.streams import Stream
from google.ads.googleads.v8.services.services.google_ads_service.pagers import SearchPager
from .google_ads import GoogleAds
def chunk_date_range(
start_date: str, conversion_window: int, field: str, end_date: str = None, time_unit: str = "months", days_of_data_storage: int = None
) -> Iterable[Mapping[str, any]]:
"""
Passing optional parameter end_date for testing
Returns a list of the beginning and ending timetsamps of each month between the start date and now.
The return value is a list of dicts {'date': str} which can be used directly with the Slack API
"""
intervals = []
end_date = pendulum.parse(end_date) if end_date else pendulum.now()
start_date = pendulum.parse(start_date)
# For some metrics we can only get data not older than N days, it is Google Ads policy
if days_of_data_storage:
start_date = max(start_date, pendulum.now().subtract(days=days_of_data_storage - conversion_window))
# As in to return some state when state in abnormal
if start_date > end_date:
return [{field: start_date.to_date_string()}]
# applying conversion window
start_date = start_date.subtract(days=conversion_window)
# Each stream_slice contains the beginning and ending timestamp for a 24 hour period
while start_date < end_date:
intervals.append({field: start_date.to_date_string()})
start_date = start_date.add(**{time_unit: 1})
return intervals
class GoogleAdsStream(Stream, ABC):
def __init__(self, api: GoogleAds):
self.google_ads_client = api
def get_query(self, stream_slice: Mapping[str, Any]) -> str:
query = GoogleAds.convert_schema_into_query(schema=self.get_json_schema(), report_name=self.name)
return query
def parse_response(self, response: SearchPager) -> Iterable[Mapping]:
for result in response:
yield self.google_ads_client.parse_single_result(self.get_json_schema(), result)
def read_records(self, sync_mode, stream_slice: Mapping[str, Any] = None, **kwargs) -> Iterable[Mapping[str, Any]]:
response = self.google_ads_client.send_request(self.get_query(stream_slice))
yield from self.parse_response(response)
class IncrementalGoogleAdsStream(GoogleAdsStream, ABC):
days_of_data_storage = None
cursor_field = "segments.date"
primary_key = None
time_unit = "months"
def __init__(self, start_date: str, conversion_window_days: int, **kwargs):
self.conversion_window_days = conversion_window_days
self._start_date = start_date
super().__init__(**kwargs)
def stream_slices(self, stream_state: Mapping[str, Any] = None, **kwargs) -> Iterable[Optional[Mapping[str, any]]]:
stream_state = stream_state or {}
start_date = stream_state.get(self.cursor_field) or self._start_date
return chunk_date_range(
start_date=start_date,
conversion_window=self.conversion_window_days,
field=self.cursor_field,
time_unit=self.time_unit,
days_of_data_storage=self.days_of_data_storage,
)
@staticmethod
def get_date_params(stream_slice: Mapping[str, Any], cursor_field: str, end_date: pendulum.datetime = None, time_unit: str = "months"):
end_date = end_date or pendulum.yesterday()
start_date = pendulum.parse(stream_slice.get(cursor_field))
if start_date > pendulum.now():
return start_date.to_date_string(), start_date.add(days=1).to_date_string()
end_date = min(end_date, pendulum.parse(stream_slice.get(cursor_field)).add(**{time_unit: 1}))
# Fix issue #4806, start date should always be lower than end date.
if start_date.add(days=1).date() >= end_date.date():
return start_date.add(days=1).to_date_string(), start_date.add(days=2).to_date_string()
return start_date.add(days=1).to_date_string(), end_date.to_date_string()
def get_updated_state(self, current_stream_state: MutableMapping[str, Any], latest_record: Mapping[str, Any]) -> Mapping[str, Any]:
current_stream_state = current_stream_state or {}
# When state is none return date from latest record
if current_stream_state.get(self.cursor_field) is None:
current_stream_state[self.cursor_field] = latest_record[self.cursor_field]
return current_stream_state
date_in_current_stream = pendulum.parse(current_stream_state.get(self.cursor_field))
date_in_latest_record = pendulum.parse(latest_record[self.cursor_field])
current_stream_state[self.cursor_field] = (max(date_in_current_stream, date_in_latest_record)).to_date_string()
return current_stream_state
def get_query(self, stream_slice: Mapping[str, Any] = None) -> str:
start_date, end_date = self.get_date_params(stream_slice, self.cursor_field, time_unit=self.time_unit)
query = GoogleAds.convert_schema_into_query(
schema=self.get_json_schema(), report_name=self.name, from_date=start_date, to_date=end_date, cursor_field=self.cursor_field
)
return query
class Accounts(GoogleAdsStream):
"""
Accounts stream: https://developers.google.com/google-ads/api/fields/v8/customer
"""
primary_key = "customer.id"
class Campaigns(GoogleAdsStream):
"""
Campaigns stream: https://developers.google.com/google-ads/api/fields/v8/campaign
"""
primary_key = "campaign.id"
class AdGroups(GoogleAdsStream):
"""
AdGroups stream: https://developers.google.com/google-ads/api/fields/v8/ad_group
"""
primary_key = "ad_group.id"
class AdGroupAds(GoogleAdsStream):
"""
AdGroups stream: https://developers.google.com/google-ads/api/fields/v8/ad_group_ad
"""
primary_key = "ad_group_ad.ad.id"
class AccountPerformanceReport(IncrementalGoogleAdsStream):
"""
AccountPerformanceReport stream: https://developers.google.com/google-ads/api/fields/v8/customer
Google Ads API field mapping: https://developers.google.com/google-ads/api/docs/migration/mapping#account_performance
"""
class AdGroupAdReport(IncrementalGoogleAdsStream):
"""
AdGroupAdReport stream: https://developers.google.com/google-ads/api/fields/v8/ad_group_ad
Google Ads API field mapping: https://developers.google.com/google-ads/api/docs/migration/mapping#ad_performance
"""
class DisplayKeywordPerformanceReport(IncrementalGoogleAdsStream):
"""
DisplayKeywordPerformanceReport stream: https://developers.google.com/google-ads/api/fields/v8/display_keyword_view
Google Ads API field mapping: https://developers.google.com/google-ads/api/docs/migration/mapping#display_keyword_performance
"""
class DisplayTopicsPerformanceReport(IncrementalGoogleAdsStream):
"""
DisplayTopicsPerformanceReport stream: https://developers.google.com/google-ads/api/fields/v8/topic_view
Google Ads API field mapping: https://developers.google.com/google-ads/api/docs/migration/mapping#display_topics_performance
"""
class ShoppingPerformanceReport(IncrementalGoogleAdsStream):
"""
ShoppingPerformanceReport stream: https://developers.google.com/google-ads/api/fields/v8/shopping_performance_view
Google Ads API field mapping: https://developers.google.com/google-ads/api/docs/migration/mapping#shopping_performance
"""
class UserLocationReport(IncrementalGoogleAdsStream):
"""
UserLocationReport stream: https://developers.google.com/google-ads/api/fields/v8/user_location_view
Google Ads API field mapping: https://developers.google.com/google-ads/api/docs/migration/mapping#geo_performance
"""
class ClickView(IncrementalGoogleAdsStream):
"""
ClickView stream: https://developers.google.com/google-ads/api/reference/rpc/v8/ClickView
"""
time_unit = "days"
days_of_data_storage = 90
|
the-stack_0_2151 | """Markov Decision Processes (Chapter 17)
http://aima.cs.berkeley.edu/python/mdp.html
First we define an MDP, and the special case of a GridMDP, in which
states are laid out in a 2-dimensional grid. We also represent a policy
as a dictionary of {state:action} pairs, and a Utility function as a
dictionary of {state:number} pairs. We then define the value_iteration
and policy_iteration algorithms."""
import random
class MDP:
"""A Markov Decision Process, defined by an initial state, transition model,
and reward function. We also keep track of a gamma value, for use by
algorithms. The transition model is represented somewhat differently from
the text. Instead of T(s, a, s') being probability number for each
state/action/state triplet, we instead have T(s, a) return a list of (p, s')
pairs. We also keep track of the possible states, terminal states, and
actions for each state. [page 615]"""
def __init__(self, init, actlist, terminals, gamma=.9):
update(self, init=init, actlist=actlist, terminals=terminals, gamma=gamma, states=set(), reward={})
def R(self, state):
"Return a numeric reward for this state."
return self.reward[state]
def T(self, state, action):
"""Transition model. From a state and an action, return a list
of (result-state, probability) pairs."""
if action == None:
return [(0.0, state)]
else:
return [(0.8, self.go(state, action)),
(0.1, self.go(state, turn_right(action))),
(0.1, self.go(state, turn_left(action)))]
def actions(self, state):
"""Set of actions that can be performed in this state. By default, a
fixed list of actions, except for terminal states. Override this
method if you need to specialize by state."""
if state in self.terminals:
return [None]
else:
return self.actlist
def value_iteration(mdp, epsilon=0.001):
"Solving an MDP by value iteration. [Fig. 17.4]"
U1 = dict([(s, 0) for s in mdp.states])
R, T, gamma = mdp.R, mdp.T, mdp.gamma
while True:
U = U1.copy()
delta = 0
for s in mdp.states:
U1[s] = R(s) + gamma * max([sum([p * U[s1] for (p, s1) in T(s, a)])
for a in mdp.actions(s)])
delta = max(delta, abs(U1[s] - U[s]))
if delta < epsilon * (1 - gamma) / gamma:
return U
def best_policy(mdp, U):
"""Given an MDP and a utility function U, determine the best policy,
as a mapping from state to action. (Equation 17.4)"""
pi = {}
for s in mdp.states:
pi[s] = argmax(mdp.actions(s), lambda a:expected_utility(a, s, U, mdp))
return pi
def expected_utility(a, s, U, mdp):
"The expected utility of doing a in state s, according to the MDP and U."
return sum([p * U[s1] for (p, s1) in mdp.T(s, a)])
#______________________________________________________________________________
def argmax(seq, fn):
"""Return an element with highest fn(seq[i]) score; tie goes to first one.
'to'
"""
best = seq[0]; best_score = fn(best)
for x in seq:
x_score = fn(x)
if x_score > best_score:
best, best_score = x, x_score
return best
def policy_iteration(mdp):
"Solve an MDP by policy iteration [Fig. 17.7]"
U = dict([(s, 0) for s in mdp.states])
pi = dict([(s, random.choice(mdp.actions(s))) for s in mdp.states])
while True:
U = policy_evaluation(pi, U, mdp)
unchanged = True
for s in mdp.states:
a = argmax(mdp.actions(s), lambda a: expected_utility(a,s,U,mdp))
if a != pi[s]:
pi[s] = a
unchanged = False
if unchanged:
return pi
def policy_evaluation(pi, U, mdp, k=20):
"""Return an updated utility mapping U from each state in the MDP to its
utility, using an approximation (modified policy iteration)."""
R, T, gamma = mdp.R, mdp.T, mdp.gamma
for i in range(k):
for s in mdp.states:
U[s] = R(s) + gamma * sum([p * U[s] for (p, s1) in T(s, pi[s])])
return U
|
the-stack_0_2154 | # Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Convert raw COCO dataset to TFRecord format.
This scripts follows the label map decoder format and supports detection
boxes, instance masks and captions.
Example usage:
python create_coco_tf_record.py --logtostderr \
--image_dir="${TRAIN_IMAGE_DIR}" \
--image_info_file="${TRAIN_IMAGE_INFO_FILE}" \
--object_annotations_file="${TRAIN_ANNOTATIONS_FILE}" \
--caption_annotations_file="${CAPTION_ANNOTATIONS_FILE}" \
--output_file_prefix="${OUTPUT_DIR/FILE_PREFIX}" \
--num_shards=100
"""
import collections
import json
import logging
import os
from absl import app # pylint:disable=unused-import
from absl import flags
import numpy as np
from pycocotools import mask
import tensorflow as tf
import multiprocessing as mp
from official.vision.beta.data import tfrecord_lib
flags.DEFINE_boolean(
'include_masks', False, 'Whether to include instance segmentations masks '
'(PNG encoded) in the result. default: False.')
flags.DEFINE_string('image_dir', '', 'Directory containing images.')
flags.DEFINE_string(
'image_info_file', '', 'File containing image information. '
'Tf Examples in the output files correspond to the image '
'info entries in this file. If this file is not provided '
'object_annotations_file is used if present. Otherwise, '
'caption_annotations_file is used to get image info.')
flags.DEFINE_string(
'object_annotations_file', '', 'File containing object '
'annotations - boxes and instance masks.')
flags.DEFINE_string('caption_annotations_file', '', 'File containing image '
'captions.')
flags.DEFINE_string('output_file_prefix', '/tmp/train', 'Path to output file')
flags.DEFINE_integer('num_shards', 32, 'Number of shards for output file.')
FLAGS = flags.FLAGS
logger = tf.get_logger()
logger.setLevel(logging.INFO)
def coco_segmentation_to_mask_png(segmentation, height, width, is_crowd):
"""Encode a COCO mask segmentation as PNG string."""
run_len_encoding = mask.frPyObjects(segmentation, height, width)
binary_mask = mask.decode(run_len_encoding)
if not is_crowd:
binary_mask = np.amax(binary_mask, axis=2)
return tfrecord_lib.encode_binary_mask_as_png(binary_mask)
def coco_annotations_to_lists(bbox_annotations, id_to_name_map,
image_height, image_width, include_masks):
"""Convert COCO annotations to feature lists."""
data = dict((k, list()) for k in
['xmin', 'xmax', 'ymin', 'ymax', 'is_crowd',
'category_id', 'category_names', 'area'])
if include_masks:
data['encoded_mask_png'] = []
num_annotations_skipped = 0
for object_annotations in bbox_annotations:
(x, y, width, height) = tuple(object_annotations['bbox'])
if width <= 0 or height <= 0:
num_annotations_skipped += 1
continue
if x + width > image_width or y + height > image_height:
num_annotations_skipped += 1
continue
data['xmin'].append(float(x) / image_width)
data['xmax'].append(float(x + width) / image_width)
data['ymin'].append(float(y) / image_height)
data['ymax'].append(float(y + height) / image_height)
data['is_crowd'].append(object_annotations['iscrowd'])
category_id = int(object_annotations['category_id'])
data['category_id'].append(category_id)
data['category_names'].append(id_to_name_map[category_id].encode('utf8'))
data['area'].append(object_annotations['area'])
if include_masks:
data['encoded_mask_png'].append(
coco_segmentation_to_mask_png(object_annotations['segmentation'],
image_height, image_width,
object_annotations['iscrowd'])
)
return data, num_annotations_skipped
def bbox_annotations_to_feature_dict(
bbox_annotations, image_height, image_width, id_to_name_map, include_masks):
"""Convert COCO annotations to an encoded feature dict."""
data, num_skipped = coco_annotations_to_lists(
bbox_annotations, id_to_name_map, image_height, image_width,
include_masks)
feature_dict = {
'image/object/bbox/xmin':
tfrecord_lib.convert_to_feature(data['xmin']),
'image/object/bbox/xmax':
tfrecord_lib.convert_to_feature(data['xmax']),
'image/object/bbox/ymin':
tfrecord_lib.convert_to_feature(data['ymin']),
'image/object/bbox/ymax':
tfrecord_lib.convert_to_feature(data['ymax']),
'image/object/class/text':
tfrecord_lib.convert_to_feature(data['category_names']),
'image/object/class/label':
tfrecord_lib.convert_to_feature(data['category_id']),
'image/object/is_crowd':
tfrecord_lib.convert_to_feature(data['is_crowd']),
'image/object/area':
tfrecord_lib.convert_to_feature(data['area']),
}
if include_masks:
feature_dict['image/object/mask'] = (
tfrecord_lib.convert_to_feature(data['encoded_mask_png']))
return feature_dict, num_skipped
def encode_caption_annotations(caption_annotations):
captions = []
for caption_annotation in caption_annotations:
captions.append(caption_annotation['caption'].encode('utf8'))
return captions
def create_tf_example(image,
image_dir,
bbox_annotations=None,
id_to_name_map=None,
caption_annotations=None,
include_masks=False):
"""Converts image and annotations to a tf.Example proto.
Args:
image: dict with keys: [u'license', u'file_name', u'coco_url', u'height',
u'width', u'date_captured', u'flickr_url', u'id']
image_dir: directory containing the image files.
bbox_annotations:
list of dicts with keys: [u'segmentation', u'area', u'iscrowd',
u'image_id', u'bbox', u'category_id', u'id'] Notice that bounding box
coordinates in the official COCO dataset are given as [x, y, width,
height] tuples using absolute coordinates where x, y represent the
top-left (0-indexed) corner. This function converts to the format
expected by the Tensorflow Object Detection API (which is which is
[ymin, xmin, ymax, xmax] with coordinates normalized relative to image
size).
id_to_name_map: a dict mapping category IDs to string names.
caption_annotations:
list of dict with keys: [u'id', u'image_id', u'str'].
include_masks: Whether to include instance segmentations masks
(PNG encoded) in the result. default: False.
Returns:
example: The converted tf.Example
num_annotations_skipped: Number of (invalid) annotations that were ignored.
Raises:
ValueError: if the image pointed to by data['filename'] is not a valid JPEG
"""
image_height = image['height']
image_width = image['width']
filename = image['file_name']
image_id = image['id']
full_path = os.path.join(image_dir, filename)
with tf.io.gfile.GFile(full_path, 'rb') as fid:
encoded_jpg = fid.read()
feature_dict = tfrecord_lib.image_info_to_feature_dict(
image_height, image_width, filename, image_id, encoded_jpg, 'jpg')
num_annotations_skipped = 0
if bbox_annotations:
box_feature_dict, num_skipped = bbox_annotations_to_feature_dict(
bbox_annotations, image_height, image_width, id_to_name_map,
include_masks)
num_annotations_skipped += num_skipped
feature_dict.update(box_feature_dict)
if caption_annotations:
encoded_captions = encode_caption_annotations(caption_annotations)
feature_dict.update(
{'image/caption': tfrecord_lib.convert_to_feature(encoded_captions)})
example = tf.train.Example(features=tf.train.Features(feature=feature_dict))
return example, num_annotations_skipped
def _load_object_annotations(object_annotations_file):
"""Loads object annotation JSON file."""
with tf.io.gfile.GFile(object_annotations_file, 'r') as fid:
obj_annotations = json.load(fid)
images = obj_annotations['images']
id_to_name_map = dict((element['id'], element['name']) for element in
obj_annotations['categories'])
img_to_obj_annotation = collections.defaultdict(list)
logging.info('Building bounding box index.')
for annotation in obj_annotations['annotations']:
image_id = annotation['image_id']
img_to_obj_annotation[image_id].append(annotation)
missing_annotation_count = 0
for image in images:
image_id = image['id']
if image_id not in img_to_obj_annotation:
missing_annotation_count += 1
logging.info('%d images are missing bboxes.', missing_annotation_count)
return img_to_obj_annotation, id_to_name_map
def _load_caption_annotations(caption_annotations_file):
"""Loads caption annotation JSON file."""
with tf.io.gfile.GFile(caption_annotations_file, 'r') as fid:
caption_annotations = json.load(fid)
img_to_caption_annotation = collections.defaultdict(list)
logging.info('Building caption index.')
for annotation in caption_annotations['annotations']:
image_id = annotation['image_id']
img_to_caption_annotation[image_id].append(annotation)
missing_annotation_count = 0
images = caption_annotations['images']
for image in images:
image_id = image['id']
if image_id not in img_to_caption_annotation:
missing_annotation_count += 1
logging.info('%d images are missing captions.', missing_annotation_count)
return img_to_caption_annotation
def _load_images_info(images_info_file):
with tf.io.gfile.GFile(images_info_file, 'r') as fid:
info_dict = json.load(fid)
return info_dict['images']
def generate_annotations(images, image_dir,
img_to_obj_annotation=None,
img_to_caption_annotation=None, id_to_name_map=None,
include_masks=False):
"""Generator for COCO annotations."""
for image in images:
object_annotation = (img_to_obj_annotation.get(image['id'], None) if
img_to_obj_annotation else None)
caption_annotaion = (img_to_caption_annotation.get(image['id'], None) if
img_to_caption_annotation else None)
yield (image, image_dir, object_annotation, id_to_name_map,
caption_annotaion, include_masks)
def _create_tf_record_from_coco_annotations(images_info_file,
image_dir,
output_path,
num_shards,
object_annotations_file=None,
caption_annotations_file=None,
include_masks=False):
"""Loads COCO annotation json files and converts to tf.Record format.
Args:
images_info_file: JSON file containing image info. The number of tf.Examples
in the output tf Record files is exactly equal to the number of image info
entries in this file. This can be any of train/val/test annotation json
files Eg. 'image_info_test-dev2017.json',
'instance_annotations_train2017.json',
'caption_annotations_train2017.json', etc.
image_dir: Directory containing the image files.
output_path: Path to output tf.Record file.
num_shards: Number of output files to create.
object_annotations_file: JSON file containing bounding box annotations.
caption_annotations_file: JSON file containing caption annotations.
include_masks: Whether to include instance segmentations masks
(PNG encoded) in the result. default: False.
"""
logging.info('writing to output path: %s', output_path)
images = _load_images_info(images_info_file)
img_to_obj_annotation = None
img_to_caption_annotation = None
id_to_name_map = None
if object_annotations_file:
img_to_obj_annotation, id_to_name_map = (
_load_object_annotations(object_annotations_file))
if caption_annotations_file:
img_to_caption_annotation = (
_load_caption_annotations(caption_annotations_file))
coco_annotations_iter = generate_annotations(
images, image_dir, img_to_obj_annotation, img_to_caption_annotation,
id_to_name_map=id_to_name_map, include_masks=include_masks)
num_skipped = tfrecord_lib.write_tf_record_dataset(
output_path, coco_annotations_iter, create_tf_example, num_shards)
logging.info('Finished writing, skipped %d annotations.', num_skipped)
def main(_):
assert FLAGS.image_dir, '`image_dir` missing.'
assert (FLAGS.image_info_file or FLAGS.object_annotations_file or
FLAGS.caption_annotations_file), ('All annotation files are '
'missing.')
if FLAGS.image_info_file:
images_info_file = FLAGS.image_info_file
elif FLAGS.object_annotations_file:
images_info_file = FLAGS.object_annotations_file
else:
images_info_file = FLAGS.caption_annotations_file
directory = os.path.dirname(FLAGS.output_file_prefix)
if not tf.io.gfile.isdir(directory):
tf.io.gfile.makedirs(directory)
_create_tf_record_from_coco_annotations(images_info_file, FLAGS.image_dir,
FLAGS.output_file_prefix,
FLAGS.num_shards,
FLAGS.object_annotations_file,
FLAGS.caption_annotations_file,
FLAGS.include_masks)
if __name__ == '__main__':
app.run(main)
|
the-stack_0_2155 | import pandas as pd
import numpy as np
import pickle
import json
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import ShuffleSplit
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import GridSearchCV
from sklearn.linear_model import Lasso
from sklearn.tree import DecisionTreeRegressor
def find_best_model_using_gridsearchcv(X, y):
algos = {
'linear_regression': {
'model': LinearRegression(),
'params': {
'normalize': [True, False]
}
},
'lasso': {
'model': Lasso(),
'params': {
'alpha': [1,2],
'selection': ['random', 'cyclic']
}
},
'decision_tree': {
'model': DecisionTreeRegressor(),
'params': {
'criterion': ['mse', 'friedman_mse'],
'splitter': ['best', 'random']
}
}
}
scores = []
cv = ShuffleSplit(n_splits=5, test_size=0.2, random_state=0)
for algo_name, config in algos.items():
gs = GridSearchCV(config['model'], config['params'], cv=cv, return_train_score=False)
gs.fit(X,y)
scores.append({
'model': algo_name,
'best_score': gs.best_score_,
'best_params': gs.best_params_
})
return pd.DataFrame(scores, columns=['model', 'best_score', 'best_params'])
def predict_price(location, sqft, bath, bhk):
loc_index = np.where(X.columns == location)[0][0]
# print(loc_index, type(loc_index))
x = np.zeros(len(X.columns))
x[0] = sqft
x[1] = bath
x[2] = bhk
if loc_index >= 0:
x[loc_index] = 1
return lr_clf.predict([x])[0]
df = pd.read_csv('data_file_cleaned_feateng_outrem.csv')
print(df.head())
print(df.shape)
# The location variable is textual, but needs to be numeric for model training
# You can use one hot encoding or dummy variables
dummies = pd.get_dummies(df.location)
df2 = pd.concat([df, dummies.drop('other', axis = 'columns')], axis = 'columns')
# Remember to avoid dummy variable trap, we need to drop one column (in this case 'other')
print(df2.head())
# Now define separate your features from your target
X = df2.drop(['location', 'price'], axis = 'columns')
print(df.total_sqft)
print(df.isnull().sum())
y = df2.price
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state=10)
lr_clf = LinearRegression()
lr_clf.fit(X_train, y_train)
print(lr_clf.score(X_test, y_test))
# I'm getting a score of 65% which isn't that great
# In practise, we try multiple models and see what works
# We can do a k-fold cross validation
cv = ShuffleSplit(n_splits=5, test_size = 0.2, random_state=0)
cv_scores = cross_val_score(LinearRegression(), X, y, cv=cv)
print(cv_scores)
# What about other regression techniques?
# Here, we need a gridsearch cv (in the massive function at the top)
resultant = find_best_model_using_gridsearchcv(X, y)
print(resultant)
# I wonder if this can be improved by keeping the price in rupees
print(predict_price('1st Phase JP Nagar', 1000, 2, 2))
print(predict_price('1st Phase JP Nagar', 1000, 3, 3))
print(predict_price('Indira Nagar', 1000, 2, 2))
print(predict_price('Indira Nagar', 1000, 3, 3))
# Now we can export the data by pickling
# We also need the column index from our encoding
with open('bangalore_home_prices_model.pickle', 'wb') as f:
pickle.dump(lr_clf, f)
columns = {
'data_columns': [col.lower() for col in X.columns]
}
with open('columns.json', 'w') as f:
f.write(json.dumps(columns))
|
the-stack_0_2156 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Note: Elementwise binary operations in TensorFlow follow [numpy-style
broadcasting](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html).
## Arithmetic Operators
TensorFlow provides several operations that you can use to add basic arithmetic
operators to your graph.
@@add
@@sub
@@mul
@@scalar_mul
@@div
@@truediv
@@floordiv
@@mod
@@cross
## Basic Math Functions
TensorFlow provides several operations that you can use to add basic
mathematical functions to your graph.
@@add_n
@@abs
@@neg
@@sign
@@inv
@@square
@@round
@@sqrt
@@rsqrt
@@pow
@@exp
@@log
@@ceil
@@floor
@@maximum
@@minimum
@@cos
@@sin
@@lbeta
@@tan
@@acos
@@asin
@@atan
@@lgamma
@@digamma
@@erf
@@erfc
@@squared_difference
@@igamma
@@igammac
@@zeta
@@polygamma
@@betainc
## Matrix Math Functions
TensorFlow provides several operations that you can use to add linear algebra
functions on matrices to your graph.
@@diag
@@diag_part
@@trace
@@transpose
@@eye
@@matrix_diag
@@matrix_diag_part
@@matrix_band_part
@@matrix_set_diag
@@matrix_transpose
@@matmul
@@batch_matmul
@@matrix_determinant
@@matrix_inverse
@@cholesky
@@cholesky_solve
@@matrix_solve
@@matrix_triangular_solve
@@matrix_solve_ls
@@self_adjoint_eig
@@self_adjoint_eigvals
@@svd
## Complex Number Functions
TensorFlow provides several operations that you can use to add complex number
functions to your graph.
@@complex
@@complex_abs
@@conj
@@imag
@@real
## Fourier Transform Functions
TensorFlow provides several operations that you can use to add discrete
Fourier transform functions to your graph.
@@fft
@@ifft
@@fft2d
@@ifft2d
@@fft3d
@@ifft3d
## Reduction
TensorFlow provides several operations that you can use to perform
common math computations that reduce various dimensions of a tensor.
@@reduce_sum
@@reduce_prod
@@reduce_min
@@reduce_max
@@reduce_mean
@@reduce_all
@@reduce_any
@@reduce_logsumexp
@@count_nonzero
@@accumulate_n
@@einsum
## Scan
TensorFlow provides several operations that you can use to perform scans
(running totals) across one axis of a tensor.
@@cumsum
@@cumprod
## Segmentation
TensorFlow provides several operations that you can use to perform common
math computations on tensor segments.
Here a segmentation is a partitioning of a tensor along
the first dimension, i.e. it defines a mapping from the first dimension onto
`segment_ids`. The `segment_ids` tensor should be the size of
the first dimension, `d0`, with consecutive IDs in the range `0` to `k`,
where `k<d0`.
In particular, a segmentation of a matrix tensor is a mapping of rows to
segments.
For example:
```python
c = tf.constant([[1,2,3,4], [-1,-2,-3,-4], [5,6,7,8]])
tf.segment_sum(c, tf.constant([0, 0, 1]))
==> [[0 0 0 0]
[5 6 7 8]]
```
@@segment_sum
@@segment_prod
@@segment_min
@@segment_max
@@segment_mean
@@unsorted_segment_sum
@@sparse_segment_sum
@@sparse_segment_mean
@@sparse_segment_sqrt_n
## Sequence Comparison and Indexing
TensorFlow provides several operations that you can use to add sequence
comparison and index extraction to your graph. You can use these operations to
determine sequence differences and determine the indexes of specific values in
a tensor.
@@argmin
@@argmax
@@listdiff
@@where
@@unique
@@edit_distance
@@invert_permutation
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import common_shapes
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import graph_util
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_control_flow_ops
from tensorflow.python.ops import gen_data_flow_ops
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import gen_sparse_ops
from tensorflow.python.ops import gen_state_ops
from tensorflow.python.ops import state_ops
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.python.ops.gen_math_ops import *
# pylint: enable=wildcard-import
# Aliases for some automatically-generated names.
argmax = gen_math_ops.arg_max
argmin = gen_math_ops.arg_min
linspace = gen_math_ops.lin_space
# pylint: disable=anomalous-backslash-in-string,protected-access
def abs(x, name=None):
"""Computes the absolute value of a tensor.
Given a tensor of real numbers `x`, this operation returns a tensor
containing the absolute value of each element in `x`. For example, if x is
an input element and y is an output element, this operation computes
\\\\(y = |x|\\\\).
See [`tf.complex_abs()`](#tf_complex_abs) to compute the absolute value of a complex
number.
Args:
x: A `Tensor` or `SparseTensor` of type `float32`, `float64`, `int32`, or
`int64`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor` the same size and type as `x` with absolute
values.
"""
with ops.name_scope(name, "Abs", [x]) as name:
if isinstance(x, ops.SparseTensor):
if x.values.dtype in (dtypes.complex64, dtypes.complex128):
x_abs = gen_math_ops.complex_abs(x.values,
Tout=x.values.dtype.real_dtype, name=name)
return ops.SparseTensor(indices=x.indices, values=x_abs, shape=x.shape)
x_abs = gen_math_ops._abs(x.values, name=name)
return ops.SparseTensor(indices=x.indices, values=x_abs, shape=x.shape)
else:
x = ops.convert_to_tensor(x, name="x")
if x.dtype in (dtypes.complex64, dtypes.complex128):
return gen_math_ops.complex_abs(x, Tout=x.dtype.real_dtype, name=name)
return gen_math_ops._abs(x, name=name)
def divide(x, y, name=None):
"""Computes Python style division of `x` by `y`."""
with ops.name_scope(name, "Divide", [x]) as name:
return x / y
def neg(x, name=None):
"""Computes numerical negative value element-wise.
I.e., \\(y = -x\\).
Args:
x: A `Tensor` or `SparseTensor`. Must be one of the following types: `half`,
`float32`, `float64`, `int32`, `int64`, `complex64`, `complex128`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor`, respectively. Has the same type as `x`.
"""
with ops.name_scope(name, "Neg", [x]) as name:
if isinstance(x, ops.SparseTensor):
x_neg = gen_math_ops.neg(x.values, name=name)
return ops.SparseTensor(indices=x.indices, values=x_neg, shape=x.shape)
else:
return gen_math_ops.neg(x, name=name)
def sign(x, name=None):
"""Returns an element-wise indication of the sign of a number.
`y = sign(x) = -1` if `x < 0`; 0 if `x == 0`; 1 if `x > 0`.
For complex numbers, `y = sign(x) = x / |x|` if `x != 0`, otherwise `y = 0`.
Args:
x: A `Tensor` or `SparseTensor`. Must be one of the following types: `half`,
`float32`, `float64`, `int32`, `int64`, `complex64`, `complex128`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor`, respectively. Has the same type as `x`.
"""
with ops.name_scope(name, "Sign", [x]) as name:
if isinstance(x, ops.SparseTensor):
x_sign = gen_math_ops.sign(x.values, name=name)
return ops.SparseTensor(indices=x.indices, values=x_sign, shape=x.shape)
else:
return gen_math_ops.sign(x, name=name)
def square(x, name=None):
"""Computes square of x element-wise.
I.e., \\(y = x * x = x^2\\).
Args:
x: A `Tensor` or `SparseTensor`. Must be one of the following types: `half`,
`float32`, `float64`, `int32`, `int64`, `complex64`, `complex128`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor`. Has the same type as `x`.
"""
with ops.name_scope(name, "Square", [x]) as name:
if isinstance(x, ops.SparseTensor):
x_square = gen_math_ops.square(x.values, name=name)
return ops.SparseTensor(indices=x.indices, values=x_square, shape=x.shape)
else:
return gen_math_ops.square(x, name=name)
def sqrt(x, name=None):
"""Computes square root of x element-wise.
I.e., \\(y = \sqrt{x} = x^{1/2}\\).
Args:
x: A `Tensor` or `SparseTensor`. Must be one of the following types: `half`,
`float32`, `float64`, `complex64`, `complex128`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor`, respectively. Has the same type as `x`.
"""
with ops.name_scope(name, "Sqrt", [x]) as name:
if isinstance(x, ops.SparseTensor):
x_sqrt = gen_math_ops.sqrt(x.values, name=name)
return ops.SparseTensor(indices=x.indices, values=x_sqrt, shape=x.shape)
else:
return gen_math_ops.sqrt(x, name=name)
def erf(x, name=None):
"""Computes the Gauss error function of `x` element-wise.
Args:
x: A `Tensor` of `SparseTensor`. Must be one of the following types: `half`,
`float32`, `float64`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor`, respectively. Has the same type as `x`.
"""
with ops.name_scope(name, "Erf", [x]) as name:
if isinstance(x, ops.SparseTensor):
x_erf = gen_math_ops.erf(x.values, name=name)
return ops.SparseTensor(indices=x.indices, values=x_erf, shape=x.shape)
else:
return gen_math_ops.erf(x, name=name)
def complex_abs(x, name=None):
r"""Computes the complex absolute value of a tensor.
Given a tensor `x` of complex numbers, this operation returns a tensor of type
`float32` or `float64` that is the absolute value of each element in `x`. All
elements in `x` must be complex numbers of the form \\(a + bj\\). The
absolute value is computed as \\( \sqrt{a^2 + b^2}\\).
For example:
```
# tensor 'x' is [[-2.25 + 4.75j], [-3.25 + 5.75j]]
tf.complex_abs(x) ==> [5.25594902, 6.60492229]
```
Args:
x: A `Tensor` of type `complex64` or `complex128`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `float32` or `float64`.
"""
return gen_math_ops.complex_abs(x, Tout=x.dtype.real_dtype, name=name)
def scalar_mul(scalar, x):
"""Multiplies a scalar times a `Tensor` or `IndexedSlices` object.
Intended for use in gradient code which might deal with `IndexedSlices`
objects, which are easy to multiply by a scalar but more expensive to
multiply with arbitrary tensors.
Args:
scalar: A 0-D scalar `Tensor`. Must have known shape.
x: A `Tensor` or `IndexedSlices` to be scaled.
Returns:
`scalar * x` of the same type (`Tensor` or `IndexedSlices`) as `x`.
Raises:
ValueError: if scalar is not a 0-D `scalar`.
"""
scalar = ops.convert_to_tensor(scalar, dtype=x.dtype.base_dtype,
name="scalar")
shape = scalar.get_shape()
if shape.ndims == 0:
if isinstance(x, ops.IndexedSlices):
return ops.IndexedSlices(scalar * x.values, x.indices, x.dense_shape)
else:
return scalar * x
else:
raise ValueError("Only scalar multiply works, got shape %s" % shape)
def pow(x, y, name=None):
"""Computes the power of one value to another.
Given a tensor `x` and a tensor `y`, this operation computes \\\\(x^y\\\\) for
corresponding elements in `x` and `y`. For example:
```
# tensor 'x' is [[2, 2], [3, 3]]
# tensor 'y' is [[8, 16], [2, 3]]
tf.pow(x, y) ==> [[256, 65536], [9, 27]]
```
Args:
x: A `Tensor` of type `float32`, `float64`, `int32`, `int64`, `complex64`,
or `complex128`.
y: A `Tensor` of type `float32`, `float64`, `int32`, `int64`, `complex64`,
or `complex128`.
name: A name for the operation (optional).
Returns:
A `Tensor`.
"""
with ops.name_scope(name, "Pow", [x]) as name:
return gen_math_ops._pow(x, y, name=name)
def complex(real, imag, name=None):
"""Converts two real numbers to a complex number.
Given a tensor `real` representing the real part of a complex number, and a
tensor `imag` representing the imaginary part of a complex number, this
operation returns complex numbers elementwise of the form \\(a + bj\\), where
*a* represents the `real` part and *b* represents the `imag` part.
The input tensors `real` and `imag` must have the same shape.
For example:
```
# tensor 'real' is [2.25, 3.25]
# tensor `imag` is [4.75, 5.75]
tf.complex(real, imag) ==> [[2.25 + 4.75j], [3.25 + 5.75j]]
```
Args:
real: A `Tensor`. Must be one of the following types: `float32`, `float64`.
imag: A `Tensor`. Must have the same type as `real`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `complex64` or `complex128`.
"""
real = ops.convert_to_tensor(real, name="real")
imag = ops.convert_to_tensor(imag, name="imag")
with ops.name_scope(name, "Complex", [real, imag]) as name:
input_types = (real.dtype, imag.dtype)
if input_types == (dtypes.float64, dtypes.float64):
Tout = dtypes.complex128
elif input_types == (dtypes.float32, dtypes.float32):
Tout = dtypes.complex64
else:
raise TypeError("real and imag have incorrect types: "
"{} {}".format(real.dtype.name, imag.dtype.name))
return gen_math_ops._complex(real, imag, Tout=Tout, name=name)
def real(input, name=None):
"""Returns the real part of a complex number.
Given a tensor `input` of complex numbers, this operation returns a tensor of
type `float32` or `float64` that is the real part of each element in `input`.
All elements in `input` must be complex numbers of the form \\(a + bj\\),
where *a* is the real part returned by this operation and *b* is the
imaginary part.
For example:
```
# tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j]
tf.real(input) ==> [-2.25, 3.25]
```
If `input` is already real, it is returned unchanged.
Args:
input: A `Tensor`. Must have numeric type.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `float32` or `float64`.
"""
with ops.name_scope(name, "Real", [input]) as name:
real_dtype = input.dtype.real_dtype
if input.dtype.base_dtype == real_dtype:
return input
return gen_math_ops.real(input, Tout=real_dtype, name=name)
def imag(input, name=None):
"""Returns the imaginary part of a complex number.
Given a tensor `input` of complex numbers, this operation returns a tensor of
type `float32` or `float64` that is the imaginary part of each element in
`input`. All elements in `input` must be complex numbers of the form \\(a +
bj\\), where *a* is the real part and *b* is the imaginary part returned by
this operation.
For example:
```
# tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j]
tf.imag(input) ==> [4.75, 5.75]
```
Args:
input: A `Tensor`. Must be one of the following types: `complex64`, `complex128`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `float32` or `float64`.
"""
with ops.name_scope(name, "Imag", [input]) as name:
return gen_math_ops.imag(input, Tout=input.dtype.real_dtype, name=name)
def round(x, name=None):
"""Rounds the values of a tensor to the nearest integer, element-wise.
Rounds half to even. Also known as bankers rounding. If you want to round
according to the current system rounding mode use tf::cint.
For example:
```python
# 'a' is [0.9, 2.5, 2.3, 1.5, -4.5]
tf.round(a) ==> [ 1.0, 2.0, 2.0, 2.0, -4.0 ]
```
Args:
x: A `Tensor` of type `float32` or `float64`.
name: A name for the operation (optional).
Returns:
A `Tensor` of same shape and type as `x`.
"""
x = ops.convert_to_tensor(x, name="x")
if x.dtype.is_integer:
return x
else:
# TODO(nolivia): Switch to new Round op
# return gen_math_ops.round(x, name=name)
return gen_math_ops.floor(x + 0.5, name=name)
ops.RegisterShape("Round")(common_shapes.call_cpp_shape_fn)
def cast(x, dtype, name=None):
"""Casts a tensor to a new type.
The operation casts `x` (in case of `Tensor`) or `x.values`
(in case of `SparseTensor`) to `dtype`.
For example:
```python
# tensor `a` is [1.8, 2.2], dtype=tf.float
tf.cast(a, tf.int32) ==> [1, 2] # dtype=tf.int32
```
Args:
x: A `Tensor` or `SparseTensor`.
dtype: The destination type.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor` with same shape as `x`.
Raises:
TypeError: If `x` cannot be cast to the `dtype`.
"""
base_type = dtypes.as_dtype(dtype).base_dtype
with ops.name_scope(name, "Cast", [x]) as name:
if isinstance(x, ops.SparseTensor):
values_cast = cast(x.values, base_type, name=name)
return ops.SparseTensor(x.indices, values_cast, x.shape)
else:
# TODO(touts): Handle what Josh said.
#
# Could return ops.convert_to_tensor(x, dtype=dtype, ...) here, but that
# allows some conversions that cast() can't do, e.g. casting numbers to
# strings.
x = ops.convert_to_tensor(x, name="x")
if x.dtype.base_dtype == base_type:
return x
return gen_math_ops.cast(x, base_type, name=name)
def saturate_cast(value, dtype, name=None):
"""Performs a safe saturating cast of `value` to `dtype`.
This function casts the input to `dtype` without applying any scaling. If
there is a danger that values would over or underflow in the cast, this op
applies the appropriate clamping before the cast.
Args:
value: A `Tensor`.
dtype: The desired output `DType`.
name: A name for the operation (optional).
Returns:
`value` safely cast to `dtype`.
"""
# When casting to a type with smaller representable range, clamp.
# Note that this covers casting to unsigned types as well.
with ops.name_scope(name, "saturate_cast", [value]) as name:
value = ops.convert_to_tensor(value, name="value")
dtype = dtypes.as_dtype(dtype).base_dtype
if value.dtype.min < dtype.min:
value = gen_math_ops.maximum(value, ops.convert_to_tensor(
dtype.min, dtype=value.dtype, name="min"))
if value.dtype.max > dtype.max:
value = gen_math_ops.minimum(value, ops.convert_to_tensor(
dtype.max, dtype=value.dtype, name="max"))
return cast(value, dtype, name=name)
def to_float(x, name="ToFloat"):
"""Casts a tensor to type `float32`.
Args:
x: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor` with same shape as `x` with type `float32`.
Raises:
TypeError: If `x` cannot be cast to the `float32`.
"""
return cast(x, dtypes.float32, name=name)
def to_double(x, name="ToDouble"):
"""Casts a tensor to type `float64`.
Args:
x: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor` with same shape as `x` with type `float64`.
Raises:
TypeError: If `x` cannot be cast to the `float64`.
"""
return cast(x, dtypes.float64, name=name)
def to_int32(x, name="ToInt32"):
"""Casts a tensor to type `int32`.
Args:
x: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor` with same shape as `x` with type `int32`.
Raises:
TypeError: If `x` cannot be cast to the `int32`.
"""
return cast(x, dtypes.int32, name=name)
def to_int64(x, name="ToInt64"):
"""Casts a tensor to type `int64`.
Args:
x: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor` with same shape as `x` with type `int64`.
Raises:
TypeError: If `x` cannot be cast to the `int64`.
"""
return cast(x, dtypes.int64, name=name)
def to_bfloat16(x, name="ToBFloat16"):
"""Casts a tensor to type `bfloat16`.
Args:
x: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor` with same shape as `x` with type `bfloat16`.
Raises:
TypeError: If `x` cannot be cast to the `bfloat16`.
"""
return cast(x, dtypes.bfloat16, name=name)
ops.Tensor._override_operator("__neg__", gen_math_ops.neg)
ops.Tensor._override_operator("__abs__", abs)
# __invert__ corresponds to the ~ operator. Here we follow the numpy convention
# ~ marks an elementwise bit-wise inverse. This is only implemented for boolean
# tensors and will throw a TypeError if used on nonboolean arrays
ops.Tensor._override_operator("__invert__", gen_math_ops.logical_not)
def _OverrideBinaryOperatorHelper(func, op_name, clazz_object=ops.Tensor):
"""Register operators with different tensor and scalar versions.
If `clazz_object` is `SparseTensor`, assumes `func` takes `(sp_indices,
sp_values, sp_shape, dense)` and outputs `(new_sp_values)`.
Args:
func: the operator
op_name: name of the operator being overridden
clazz_object: class to override for. Either `Tensor` or `SparseTensor`.
"""
def binary_op_wrapper(x, y):
with ops.name_scope(None, op_name, [x, y]) as name:
if not isinstance(y, ops.SparseTensor):
y = ops.convert_to_tensor(y, dtype=x.dtype.base_dtype, name="y")
return func(x, y, name=name)
def binary_op_wrapper_sparse(sp_x, y):
with ops.name_scope(None, op_name, [sp_x, y]) as name:
y = ops.convert_to_tensor(y, dtype=sp_x.dtype.base_dtype, name="y")
return ops.SparseTensor(sp_x.indices, func(sp_x.indices, sp_x.values,
sp_x.shape, y, name=name),
sp_x.shape)
def r_binary_op_wrapper(y, x):
with ops.name_scope(None, op_name, [x, y]) as name:
x = ops.convert_to_tensor(x, dtype=y.dtype.base_dtype, name="x")
return func(x, y, name=name)
# Propagate func.__doc__ to the wrappers
try:
doc = func.__doc__
except AttributeError:
doc = None
binary_op_wrapper.__doc__ = doc
r_binary_op_wrapper.__doc__ = doc
binary_op_wrapper_sparse.__doc__ = doc
if clazz_object is ops.Tensor:
clazz_object._override_operator("__%s__" % op_name, binary_op_wrapper)
del binary_op_wrapper
clazz_object._override_operator("__r%s__" % op_name, r_binary_op_wrapper)
del r_binary_op_wrapper
else:
clazz_object._override_operator("__%s__" % op_name,
binary_op_wrapper_sparse)
del binary_op_wrapper_sparse
# Conversion table for __truediv__. None entries mean no conversion required.
_TRUEDIV_TABLE = {
dtypes.uint8: dtypes.float32,
dtypes.int8: dtypes.float32,
dtypes.uint16: dtypes.float32,
dtypes.int16: dtypes.float32,
dtypes.int32: dtypes.float64,
dtypes.int64: dtypes.float64,
dtypes.float16: None,
dtypes.float32: None,
dtypes.float64: None,
dtypes.complex64: None,
dtypes.complex128: None,
}
# NOTE: the support of "sparse (true)div dense" is currently not baked in into
# "tf.(true_)div()". Until such an API decision is made, the supported usage is
# to explicitly use the "/" operator to invoke either truediv or div.
def _sparse_dense_truediv(sp_indices, sp_values, sp_shape, y, name=None):
"""Internal helper function for 'sp_t / dense_t'."""
with ops.name_scope(name, "truediv",
[sp_indices, sp_values, sp_shape, y]) as name:
sp_values = ops.convert_to_tensor(sp_values, name="sp_values")
y = ops.convert_to_tensor(y, name="y")
x_dtype = sp_values.dtype.base_dtype
y_dtype = y.dtype.base_dtype
if x_dtype != y_dtype:
raise TypeError("x and y must have the same dtype, got %r != %r" %
(x_dtype, y_dtype))
try:
dtype = _TRUEDIV_TABLE[x_dtype]
except KeyError:
raise TypeError("Invalid dtype %r in __truediv__" % x_dtype)
if dtype is not None:
sp_values = cast(sp_values, dtype)
y = cast(y, dtype)
return gen_sparse_ops.sparse_dense_cwise_div(sp_indices, sp_values,
sp_shape, y, name=name)
def truediv(x, y, name=None):
"""Divides x / y elementwise, always producing floating point results.
The same as `tf.div` for floating point arguments, but casts integer arguments
to floating point before dividing so that the result is always floating point.
This op is generated by normal `x / y` division in Python 3 and in Python 2.7
with `from __future__ import division`. If you want integer division that
rounds down, use `x // y` or `tf.floordiv`.
`x` and `y` must have the same numeric type. If the inputs are floating
point, the output will have the same type. If the inputs are integral, the
inputs are cast to `float32` for `int8` and `int16` and `float64` for `int32`
and `int64` (matching the behavior of Numpy).
Args:
x: `Tensor` numerator of numeric type.
y: `Tensor` denominator of numeric type.
name: A name for the operation (optional).
Returns:
`x / y` evaluated in floating point.
Raises:
TypeError: If `x` and `y` have different dtypes.
"""
with ops.name_scope(name, "truediv", [x, y]) as name:
x = ops.convert_to_tensor(x, name="x")
y = ops.convert_to_tensor(y, name="y")
x_dtype = x.dtype.base_dtype
y_dtype = y.dtype.base_dtype
if x_dtype != y_dtype:
raise TypeError("x and y must have the same dtype, got %r != %r" %
(x_dtype, y_dtype))
try:
dtype = _TRUEDIV_TABLE[x_dtype]
except KeyError:
raise TypeError("Invalid dtype %r in __truediv__" % x_dtype)
if dtype is not None:
x = cast(x, dtype)
y = cast(y, dtype)
return gen_math_ops.div(x, y, name=name)
def floordiv(x, y, name=None):
"""Divides `x / y` elementwise, rounding down for floating point.
The same as `tf.div(x,y)` for integers, but uses `tf.floor(tf.div(x,y))` for
floating point arguments so that the result is always an integer (though
possibly an integer represented as floating point). This op is generated by
`x // y` floor division in Python 3 and in Python 2.7 with
`from __future__ import division`.
Note that for efficiency, `floordiv` uses C semantics for negative numbers
(unlike Python and Numpy).
`x` and `y` must have the same type, and the result will have the same type
as well.
Args:
x: `Tensor` numerator of real numeric type.
y: `Tensor` denominator of real numeric type.
name: A name for the operation (optional).
Returns:
`x / y` rounded down (except possibly towards zero for negative integers).
Raises:
TypeError: If the inputs are complex.
"""
with ops.name_scope(name, "floordiv", [x, y]) as name:
x = ops.convert_to_tensor(x, name="x")
dtype = x.dtype
if dtype.is_floating:
return gen_math_ops.floor(gen_math_ops.div(x, y), name=name)
else:
if not dtype.is_integer:
raise TypeError("Expected floating point or integer, got %r" % dtype)
# TODO(aselle): Switch to math_ops.floor_div() when ready
# return gen_math_ops.floor_div(x, y, name=name)
return gen_math_ops.div(x, y, name=name)
def _mul_dispatch(x, y, name=None):
"""Dispatches cwise mul for "Dense*Dense" and "Dense*Sparse"."""
is_tensor_y = isinstance(y, ops.Tensor)
if is_tensor_y:
return gen_math_ops.mul(x, y, name=name)
else:
assert isinstance(y, ops.SparseTensor) # Case: Dense * Sparse.
new_vals = gen_sparse_ops.sparse_dense_cwise_mul(y.indices, y.values,
y.shape, x, name)
return ops.SparseTensor(y.indices, new_vals, y.shape)
_OverrideBinaryOperatorHelper(gen_sparse_ops.sparse_dense_cwise_div, "div",
ops.SparseTensor)
_OverrideBinaryOperatorHelper(_sparse_dense_truediv, "truediv",
ops.SparseTensor)
_OverrideBinaryOperatorHelper(gen_sparse_ops.sparse_dense_cwise_mul, "mul",
ops.SparseTensor)
_OverrideBinaryOperatorHelper(gen_math_ops.add, "add")
_OverrideBinaryOperatorHelper(gen_math_ops.sub, "sub")
_OverrideBinaryOperatorHelper(_mul_dispatch, "mul")
_OverrideBinaryOperatorHelper(gen_math_ops.div, "div")
_OverrideBinaryOperatorHelper(truediv, "truediv")
_OverrideBinaryOperatorHelper(floordiv, "floordiv")
# TODO(aselle): Switch mod to floor_mod when ready
# _OverrideBinaryOperatorHelper(gen_math_ops.floor_mod, "mod")
_OverrideBinaryOperatorHelper(gen_math_ops.mod, "mod")
_OverrideBinaryOperatorHelper(pow, "pow")
def logical_xor(x, y, name="LogicalXor"):
"""x ^ y = (x | y) & ~(x & y)."""
# TODO(alemi) Make this a cwise op if people end up relying on it.
return gen_math_ops.logical_and(
gen_math_ops.logical_or(x, y),
gen_math_ops.logical_not(gen_math_ops.logical_and(x, y)),
name=name)
_OverrideBinaryOperatorHelper(gen_math_ops.logical_and, "and")
_OverrideBinaryOperatorHelper(gen_math_ops.logical_or, "or")
_OverrideBinaryOperatorHelper(logical_xor, "xor")
ops.Tensor._override_operator("__lt__", gen_math_ops.less)
ops.Tensor._override_operator("__le__", gen_math_ops.less_equal)
ops.Tensor._override_operator("__gt__", gen_math_ops.greater)
ops.Tensor._override_operator("__ge__", gen_math_ops.greater_equal)
def range(start, limit=None, delta=1, dtype=None, name="range"):
"""Creates a sequence of numbers.
Creates a sequence of numbers that begins at `start` and extends by
increments of `delta` up to but not including `limit`.
The dtype of the resulting tensor is inferred from the inputs unless
it is provided explicitly.
Like the Python builtin `range`, `start` defaults to 0, so that
`range(n) = range(0, n)`.
For example:
```python
# 'start' is 3
# 'limit' is 18
# 'delta' is 3
tf.range(start, limit, delta) ==> [3, 6, 9, 12, 15]
# 'start' is 3
# 'limit' is 1
# 'delta' is -0.5
tf.range(start, limit, delta) ==> [3, 2.5, 2, 1.5]
# 'limit' is 5
tf.range(limit) ==> [0, 1, 2, 3, 4]
```
Args:
start: A 0-D `Tensor` (scalar). Acts as first entry in the range if
`limit` is not None; otherwise, acts as range limit and first entry
defaults to 0.
limit: A 0-D `Tensor` (scalar). Upper limit of sequence,
exclusive. If None, defaults to the value of `start` while the first
entry of the range defaults to 0.
delta: A 0-D `Tensor` (scalar). Number that increments
`start`. Defaults to 1.
dtype: The type of the elements of the resulting tensor.
name: A name for the operation. Defaults to "range".
Returns:
An 1-D `Tensor` of type `dtype`.
"""
if limit is None:
start, limit = 0, start
with ops.name_scope(name, "Range", [start, limit, delta]) as name:
start = ops.convert_to_tensor(start, dtype=dtype, name="start")
limit = ops.convert_to_tensor(limit, dtype=dtype, name="limit")
delta = ops.convert_to_tensor(delta, dtype=dtype, name="delta")
# infer dtype if not explicitly provided
if dtype is None:
dtype_hierarchy = [dtypes.int32, dtypes.int64, dtypes.float32,
dtypes.float64]
assert all(arg.dtype in dtype_hierarchy for arg in [start, limit, delta])
inferred_dtype = max([arg.dtype for arg in [start, limit, delta]],
key=dtype_hierarchy.index)
start = cast(start, inferred_dtype)
limit = cast(limit, inferred_dtype)
delta = cast(delta, inferred_dtype)
return gen_math_ops._range(start, limit, delta, name=name)
@ops.RegisterShape("Range")
def _RangeShape(op):
return common_shapes.call_cpp_shape_fn(op, input_tensors_needed=[0, 1, 2])
# Reduction operations
def _ReductionDims(x, reduction_indices):
"""Returns range(0, rank(x)) if reduction_indices is None."""
if reduction_indices is not None:
return reduction_indices
else:
# Fast path: avoid creating Rank and Range ops if ndims is known.
if isinstance(x, ops.Tensor) and x.get_shape().ndims is not None:
return constant_op.constant(np.arange(x.get_shape().ndims),
dtype=dtypes.int32)
if (isinstance(x, ops.SparseTensor) and
x.shape.get_shape().is_fully_defined()):
rank = x.shape.get_shape()[0].value # sparse.shape is an 1-D tensor.
return constant_op.constant(np.arange(rank), dtype=dtypes.int32)
# Otherwise, we rely on Range and Rank to do the right thing at run-time.
return range(0, array_ops.rank(x))
def reduce_sum(input_tensor, reduction_indices=None, keep_dims=False,
name=None):
"""Computes the sum of elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `reduction_indices`.
Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each
entry in `reduction_indices`. If `keep_dims` is true, the reduced dimensions
are retained with length 1.
If `reduction_indices` has no entries, all dimensions are reduced, and a
tensor with a single element is returned.
For example:
```python
# 'x' is [[1, 1, 1]
# [1, 1, 1]]
tf.reduce_sum(x) ==> 6
tf.reduce_sum(x, 0) ==> [2, 2, 2]
tf.reduce_sum(x, 1) ==> [3, 3]
tf.reduce_sum(x, 1, keep_dims=True) ==> [[3], [3]]
tf.reduce_sum(x, [0, 1]) ==> 6
```
Args:
input_tensor: The tensor to reduce. Should have numeric type.
reduction_indices: The dimensions to reduce. If `None` (the default),
reduces all dimensions.
keep_dims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
Returns:
The reduced tensor.
"""
return gen_math_ops._sum(input_tensor, _ReductionDims(input_tensor,
reduction_indices),
keep_dims, name=name)
def count_nonzero(input_tensor, reduction_indices=None, keep_dims=False,
dtype=dtypes.int64, name=None):
"""Computes number of nonzero elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `reduction_indices`.
Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each
entry in `reduction_indices`. If `keep_dims` is true, the reduced dimensions
are retained with length 1.
If `reduction_indices` has no entries, all dimensions are reduced, and a
tensor with a single element is returned.
**NOTE** Floating point comparison to zero is done by exact floating point
equality check. Small values are **not** rounded to zero for purposes of
the nonzero check.
For example:
```python
# 'x' is [[0, 1, 0]
# [1, 1, 0]]
tf.count_nonzero(x) ==> 3
tf.count_nonzero(x, 0) ==> [1, 2, 0]
tf.count_nonzero(x, 1) ==> [1, 2]
tf.count_nonzero(x, 1, keep_dims=True) ==> [[1], [2]]
tf.count_nonzero(x, [0, 1]) ==> 3
```
Args:
input_tensor: The tensor to reduce. Should be of numeric type, or `bool`.
reduction_indices: The dimensions to reduce. If `None` (the default),
reduces all dimensions.
keep_dims: If true, retains reduced dimensions with length 1.
dtype: The output dtype; defaults to `tf.int64`.
name: A name for the operation (optional).
Returns:
The reduced tensor (number of nonzero values).
"""
with ops.name_scope(name, "count_nonzero", [input_tensor]):
input_tensor = ops.convert_to_tensor(input_tensor, name="input_tensor")
zero = input_tensor.dtype.as_numpy_dtype()
return cast(
reduce_sum(
# int64 reduction happens on GPU
to_int64(gen_math_ops.not_equal(input_tensor, zero)),
reduction_indices=reduction_indices,
keep_dims=keep_dims),
dtype=dtype)
def reduce_mean(input_tensor, reduction_indices=None, keep_dims=False,
name=None):
"""Computes the mean of elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `reduction_indices`.
Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each
entry in `reduction_indices`. If `keep_dims` is true, the reduced dimensions
are retained with length 1.
If `reduction_indices` has no entries, all dimensions are reduced, and a
tensor with a single element is returned.
For example:
```python
# 'x' is [[1., 1.]
# [2., 2.]]
tf.reduce_mean(x) ==> 1.5
tf.reduce_mean(x, 0) ==> [1.5, 1.5]
tf.reduce_mean(x, 1) ==> [1., 2.]
```
Args:
input_tensor: The tensor to reduce. Should have numeric type.
reduction_indices: The dimensions to reduce. If `None` (the default),
reduces all dimensions.
keep_dims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
Returns:
The reduced tensor.
"""
return gen_math_ops._mean(input_tensor, _ReductionDims(input_tensor,
reduction_indices),
keep_dims, name=name)
def reduce_prod(input_tensor, reduction_indices=None, keep_dims=False,
name=None):
"""Computes the product of elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `reduction_indices`.
Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each
entry in `reduction_indices`. If `keep_dims` is true, the reduced dimensions
are retained with length 1.
If `reduction_indices` has no entries, all dimensions are reduced, and a
tensor with a single element is returned.
Args:
input_tensor: The tensor to reduce. Should have numeric type.
reduction_indices: The dimensions to reduce. If `None` (the default),
reduces all dimensions.
keep_dims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
Returns:
The reduced tensor.
"""
return gen_math_ops._prod(input_tensor, _ReductionDims(input_tensor,
reduction_indices),
keep_dims, name=name)
def reduce_min(input_tensor, reduction_indices=None, keep_dims=False,
name=None):
"""Computes the minimum of elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `reduction_indices`.
Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each
entry in `reduction_indices`. If `keep_dims` is true, the reduced dimensions
are retained with length 1.
If `reduction_indices` has no entries, all dimensions are reduced, and a
tensor with a single element is returned.
Args:
input_tensor: The tensor to reduce. Should have numeric type.
reduction_indices: The dimensions to reduce. If `None` (the default),
reduces all dimensions.
keep_dims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
Returns:
The reduced tensor.
"""
return gen_math_ops._min(input_tensor, _ReductionDims(input_tensor,
reduction_indices),
keep_dims, name=name)
def reduce_max(input_tensor, reduction_indices=None, keep_dims=False,
name=None):
"""Computes the maximum of elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `reduction_indices`.
Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each
entry in `reduction_indices`. If `keep_dims` is true, the reduced dimensions
are retained with length 1.
If `reduction_indices` has no entries, all dimensions are reduced, and a
tensor with a single element is returned.
Args:
input_tensor: The tensor to reduce. Should have numeric type.
reduction_indices: The dimensions to reduce. If `None` (the default),
reduces all dimensions.
keep_dims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
Returns:
The reduced tensor.
"""
return gen_math_ops._max(input_tensor, _ReductionDims(input_tensor,
reduction_indices),
keep_dims, name=name)
def reduce_all(input_tensor, reduction_indices=None, keep_dims=False,
name=None):
"""Computes the "logical and" of elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `reduction_indices`.
Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each
entry in `reduction_indices`. If `keep_dims` is true, the reduced dimensions
are retained with length 1.
If `reduction_indices` has no entries, all dimensions are reduced, and a
tensor with a single element is returned.
For example:
```python
# 'x' is [[True, True]
# [False, False]]
tf.reduce_all(x) ==> False
tf.reduce_all(x, 0) ==> [False, False]
tf.reduce_all(x, 1) ==> [True, False]
```
Args:
input_tensor: The boolean tensor to reduce.
reduction_indices: The dimensions to reduce. If `None` (the default),
reduces all dimensions.
keep_dims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
Returns:
The reduced tensor.
"""
return gen_math_ops._all(input_tensor, _ReductionDims(input_tensor,
reduction_indices),
keep_dims, name=name)
def reduce_any(input_tensor, reduction_indices=None, keep_dims=False,
name=None):
"""Computes the "logical or" of elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `reduction_indices`.
Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each
entry in `reduction_indices`. If `keep_dims` is true, the reduced dimensions
are retained with length 1.
If `reduction_indices` has no entries, all dimensions are reduced, and a
tensor with a single element is returned.
For example:
```python
# 'x' is [[True, True]
# [False, False]]
tf.reduce_any(x) ==> True
tf.reduce_any(x, 0) ==> [True, True]
tf.reduce_any(x, 1) ==> [True, False]
```
Args:
input_tensor: The boolean tensor to reduce.
reduction_indices: The dimensions to reduce. If `None` (the default),
reduces all dimensions.
keep_dims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
Returns:
The reduced tensor.
"""
return gen_math_ops._any(input_tensor, _ReductionDims(input_tensor,
reduction_indices),
keep_dims, name=name)
def reduce_logsumexp(input_tensor, reduction_indices=None, keep_dims=False,
name=None):
"""Computes log(sum(exp(elements across dimensions of a tensor))).
Reduces `input_tensor` along the dimensions given in `reduction_indices`.
Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each
entry in `reduction_indices`. If `keep_dims` is true, the reduced dimensions
are retained with length 1.
If `reduction_indices` has no entries, all dimensions are reduced, and a
tensor with a single element is returned.
This function is more numerically stable than log(sum(exp(input))). It avoids
overflows caused by taking the exp of large inputs and underflows caused by
taking the log of small inputs.
For example:
```python
# 'x' is [[0, 0, 0]]
# [0, 0, 0]]
tf.reduce_logsumexp(x) ==> log(6)
tf.reduce_logsumexp(x, 0) ==> [log(2), log(2), log(2)]
tf.reduce_logsumexp(x, 1) ==> [log(3), log(3)]
tf.reduce_logsumexp(x, 1, keep_dims=True) ==> [[log(3)], [log(3)]]
tf.reduce_logsumexp(x, [0, 1]) ==> log(6)
```
Args:
input_tensor: The tensor to reduce. Should have numeric type.
reduction_indices: The dimensions to reduce. If `None` (the default),
reduces all dimensions.
keep_dims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
Returns:
The reduced tensor.
"""
with ops.name_scope(name, "ReduceLogSumExp", [input_tensor]) as name:
my_max = array_ops.stop_gradient(
reduce_max(input_tensor, reduction_indices, keep_dims=True))
result = gen_math_ops.log(reduce_sum(
gen_math_ops.exp(input_tensor - my_max),
reduction_indices,
keep_dims=True)) + my_max
if not keep_dims:
result = array_ops.squeeze(result, reduction_indices)
return result
def trace(x, name=None):
""" Compute the trace of a tensor `x`.
`trace(x)` returns the sum along the main diagonal of each inner-most matrix
in x. If x is of rank `k` with shape `[I, J, K, ..., L, M, N]`, then output
is a tensor of rank `k-2` with dimensions `[I, J, K, ..., L]` where
`output[i, j, k, ..., l] = trace(x[i, j, i, ..., l, :, :])`
For example:
```python
# 'x' is [[1, 2],
# [3, 4]]
tf.trace(x) ==> 5
# 'x' is [[1,2,3],
# [4,5,6],
# [7,8,9]]
tf.trace(x) ==> 15
# 'x' is [[[1,2,3],
# [4,5,6],
# [7,8,9]],
# [[-1,-2,-3],
# [-4,-5,-6],
# [-7,-8,-9]]]
tf.trace(x) ==> [15,-15]
```
Args:
x: tensor.
name: A name for the operation (optional).
Returns:
The trace of input tensor.
"""
with ops.name_scope(name, "Trace", [x]) as name:
x = ops.convert_to_tensor(x, name="x")
return reduce_sum(array_ops.matrix_diag_part(x), [-1], name=name)
def matmul(a, b,
transpose_a=False, transpose_b=False,
a_is_sparse=False, b_is_sparse=False,
name=None):
"""Multiplies matrix `a` by matrix `b`, producing `a` * `b`.
The inputs must be two-dimensional matrices, with matching inner dimensions,
possibly after transposition.
Both matrices must be of the same type. The supported types are:
`float32`, `float64`, `int32`, `complex64`.
Either matrix can be transposed on the fly by setting the corresponding flag
to `True`. This is `False` by default.
If one or both of the matrices contain a lot of zeros, a more efficient
multiplication algorithm can be used by setting the corresponding
`a_is_sparse` or `b_is_sparse` flag to `True`. These are `False` by default.
For example:
```python
# 2-D tensor `a`
a = tf.constant([1, 2, 3, 4, 5, 6], shape=[2, 3]) => [[1. 2. 3.]
[4. 5. 6.]]
# 2-D tensor `b`
b = tf.constant([7, 8, 9, 10, 11, 12], shape=[3, 2]) => [[7. 8.]
[9. 10.]
[11. 12.]]
c = tf.matmul(a, b) => [[58 64]
[139 154]]
```
Args:
a: `Tensor` of type `float32`, `float64`, `int32` or `complex64`.
b: `Tensor` with same type as `a`.
transpose_a: If `True`, `a` is transposed before multiplication.
transpose_b: If `True`, `b` is transposed before multiplication.
a_is_sparse: If `True`, `a` is treated as a sparse matrix.
b_is_sparse: If `True`, `b` is treated as a sparse matrix.
name: Name for the operation (optional).
Returns:
A `Tensor` of the same type as `a`.
"""
with ops.name_scope(name, "MatMul", [a, b]) as name:
a = ops.convert_to_tensor(a, name="a")
b = ops.convert_to_tensor(b, name="b")
sparse_matmul_types = [dtypes.bfloat16, dtypes.float32]
use_sparse_matmul = (a.dtype in sparse_matmul_types and
b.dtype in sparse_matmul_types and
(a_is_sparse or b_is_sparse))
if dtypes.bfloat16 in (a.dtype, b.dtype):
# matmul currently doesn't handle bfloat16 inputs.
use_sparse_matmul = True
if use_sparse_matmul:
return sparse_matmul(a, b,
transpose_a=transpose_a,
transpose_b=transpose_b,
a_is_sparse=a_is_sparse,
b_is_sparse=b_is_sparse,
name=name)
else:
return gen_math_ops._mat_mul(a, b,
transpose_a=transpose_a,
transpose_b=transpose_b,
name=name)
sparse_matmul = gen_math_ops._sparse_mat_mul
batch_matmul = gen_math_ops._batch_mat_mul
ops.RegisterShape("MatMul")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("SparseMatMul")(common_shapes.call_cpp_shape_fn)
@ops.RegisterStatistics("MatMul", "flops")
def _calc_mat_mul_flops(graph, node):
"""Calculates the compute resources needed for MatMul."""
transpose_a = node.attr["transpose_a"].b
a_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0])
a_shape.assert_is_fully_defined()
if transpose_a:
k = int(a_shape[0])
else:
k = int(a_shape[1])
output_shape = graph_util.tensor_shape_from_node_def_name(graph, node.name)
output_shape.assert_is_fully_defined()
output_count = np.prod(output_shape.as_list())
return ops.OpStats("flops", (k * output_count * 2))
@ops.RegisterStatistics("MatMul", "weight_parameters")
def _calc_mat_mul_weight_parameters(graph, node):
"""Calculates the on-disk size of the weights for MatMul."""
# We assume here that the weights are always in the second input to the op,
# which is generally true by convention for fully-connected layers, but not
# enforced or checked.
weights_shape = graph_util.tensor_shape_from_node_def_name(graph,
node.input[1])
weights_shape.assert_is_fully_defined()
return ops.OpStats("weight_parameters",
(int(weights_shape[1]) * int(weights_shape[0])))
def _as_indexed_slices(x, optimize=True):
"""Convert 'x' to IndexedSlices.
Convert a dense Tensor to a block-sparse IndexedSlices.
Args:
x: Either a Tensor object, or an IndexedSlices object.
optimize: if true, attempt to optimize the conversion of 'x'.
Returns:
An IndexedSlices object.
Raises:
TypeError: If 'x' is not a Tensor or an IndexedSlices object.
"""
# TODO(touts): op_scope
if not isinstance(x, (ops.Tensor, ops.IndexedSlices)):
raise TypeError("Not a Tensor or IndexedSlices: %s" % type(x))
if isinstance(x, ops.IndexedSlices):
return x
x_shape = array_ops.shape_internal(x, optimize=optimize)
return ops.IndexedSlices(x, range(0, x_shape[0]), x_shape)
def _as_indexed_slices_list(inputs, optimize=True):
"""Convert all elements of 'inputs' to IndexedSlices.
Additionally, homogenize the types of all the indices to
either int32 or int64.
Args:
inputs: List containing either Tensor or IndexedSlices objects.
optimize: if true, attempt to optimize the conversion of each input.
Returns:
A list of IndexedSlices objects.
Raises:
TypeError: If 'inputs' is not a list or a tuple.
"""
if not isinstance(inputs, (list, tuple)):
raise TypeError("Expected a list or tuple, not a %s" % type(inputs))
outputs = [_as_indexed_slices(i, optimize=optimize) for i in inputs]
with_int32_index = [o.indices for o in outputs
if o.indices.dtype == dtypes.int32]
if not with_int32_index or len(with_int32_index) == len(outputs):
return outputs
casted_outputs = []
for o in outputs:
if o.indices.dtype == dtypes.int32:
casted_outputs.append(
ops.IndexedSlices(o.values, cast(o.indices, dtypes.int64),
o.dense_shape))
else:
casted_outputs.append(o)
return casted_outputs
def add_n(inputs, name=None):
"""Adds all input tensors element-wise.
Args:
inputs: A list of `Tensor` objects, each with same shape and type.
name: A name for the operation (optional).
Returns:
A `Tensor` of same shape and type as the elements of `inputs`.
Raises:
ValueError: If `inputs` don't all have same shape and dtype or the shape
cannot be inferred.
"""
if not inputs or not isinstance(inputs, (list, tuple)):
raise ValueError("inputs must be a list of at least one Tensor with the "
"same dtype and shape")
inputs = ops.convert_n_to_tensor_or_indexed_slices(inputs)
if not all(isinstance(x, ops.Tensor) for x in inputs):
raise ValueError("inputs must be a list of at least one Tensor with the "
"same dtype and shape")
if len(inputs) == 1:
if name:
return array_ops.identity(inputs[0], name=name)
return inputs[0]
return gen_math_ops._add_n(inputs, name=name)
def accumulate_n(inputs, shape=None, tensor_dtype=None, name=None):
"""Returns the element-wise sum of a list of tensors.
Optionally, pass `shape` and `tensor_dtype` for shape and type checking,
otherwise, these are inferred.
NOTE: This operation is not differentiable and cannot be used if inputs depend
on trainable variables. Please use `tf.add_n` for such cases.
For example:
```python
# tensor 'a' is [[1, 2], [3, 4]]
# tensor `b` is [[5, 0], [0, 6]]
tf.accumulate_n([a, b, a]) ==> [[7, 4], [6, 14]]
# Explicitly pass shape and type
tf.accumulate_n([a, b, a], shape=[2, 2], tensor_dtype=tf.int32)
==> [[7, 4], [6, 14]]
```
Args:
inputs: A list of `Tensor` objects, each with same shape and type.
shape: Shape of elements of `inputs`.
tensor_dtype: The type of `inputs`.
name: A name for the operation (optional).
Returns:
A `Tensor` of same shape and type as the elements of `inputs`.
Raises:
ValueError: If `inputs` don't all have same shape and dtype or the shape
cannot be inferred.
"""
if not inputs or not isinstance(inputs, (list, tuple)):
raise ValueError("inputs must be a list of at least one Tensor with the "
"same dtype and shape")
inputs = ops.convert_n_to_tensor_or_indexed_slices(inputs)
if not all(isinstance(x, ops.Tensor) for x in inputs):
raise ValueError("inputs must be a list of at least one Tensor with the "
"same dtype and shape")
if not all(x.dtype == inputs[0].dtype for x in inputs):
raise ValueError("inputs must be a list of at least one Tensor with the "
"same dtype and shape")
if shape is not None:
shape = tensor_shape.as_shape(shape)
else:
shape = tensor_shape.unknown_shape()
for input_tensor in inputs:
if isinstance(input_tensor, ops.Tensor):
shape = shape.merge_with(input_tensor.get_shape())
if len(inputs) == 1:
return inputs[0]
if tensor_dtype is None:
tensor_dtype = inputs[0].dtype
with ops.name_scope(name, "AccumulateN", inputs) as name:
var = gen_state_ops._temporary_variable(shape=tensor_shape.vector(0),
dtype=tensor_dtype)
with ops.colocate_with(var):
zeros = array_ops.zeros_like(gen_control_flow_ops._merge(inputs)[0])
zeros.set_shape(shape)
ref = state_ops.assign(var, zeros, validate_shape=False)
update_ops = [state_ops.assign_add(ref, input_tensor, use_locking=True)
for input_tensor in inputs]
with ops.control_dependencies(update_ops):
return gen_state_ops._destroy_temporary_variable(
ref, var_name=var.op.name, name=name)
ops.RegisterShape("BatchMatMul")(common_shapes.call_cpp_shape_fn)
def sigmoid(x, name=None):
"""Computes sigmoid of `x` element-wise.
Specifically, `y = 1 / (1 + exp(-x))`.
Args:
x: A Tensor with type `float32`, `float64`, `int32`, `complex64`, `int64`,
or `qint32`.
name: A name for the operation (optional).
Returns:
A Tensor with the same type as `x` if `x.dtype != qint32`
otherwise the return type is `quint8`.
"""
with ops.name_scope(name, "Sigmoid", [x]) as name:
x = ops.convert_to_tensor(x, name="x")
return gen_math_ops._sigmoid(x, name=name)
def tanh(x, name=None):
"""Computes hyperbolic tangent of `x` element-wise.
Args:
x: A Tensor or SparseTensor with type `float`, `double`, `int32`,
`complex64`, `int64`, or `qint32`.
name: A name for the operation (optional).
Returns:
A Tensor or SparseTensor respectively with the same type as `x` if
`x.dtype != qint32` otherwise the return type is `quint8`.
"""
with ops.name_scope(name, "Tanh", [x]) as name:
if isinstance(x, ops.SparseTensor):
x_tanh = gen_math_ops._tanh(x.values, name=name)
return ops.SparseTensor(indices=x.indices, values=x_tanh, shape=x.shape)
else:
return gen_math_ops._tanh(x, name=name)
def cumsum(x, axis=0, exclusive=False, reverse=False, name=None):
"""Compute the cumulative sum of the tensor `x` along `axis`.
By default, this op performs an inclusive cumsum, which means that the first
element of the input is identical to the first element of the output:
```prettyprint
tf.cumsum([a, b, c]) ==> [a, a + b, a + b + c]
```
By setting the `exclusive` kwarg to `True`, an exclusive cumsum is performed
instead:
```prettyprint
tf.cumsum([a, b, c], exclusive=True) ==> [0, a, a + b]
```
By setting the `reverse` kwarg to `True`, the cumsum is performed in the
opposite direction:
```prettyprint
tf.cumsum([a, b, c], reverse=True) ==> [a + b + c, b + c, c]
```
This is more efficient than using separate `tf.reverse` ops.
The `reverse` and `exclusive` kwargs can also be combined:
```prettyprint
tf.cumsum([a, b, c], exclusive=True, reverse=True) ==> [b + c, c, 0]
```
Args:
x: A `Tensor`. Must be one of the following types: `float32`, `float64`,
`int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`,
`complex128`, `qint8`, `quint8`, `qint32`, `half`.
axis: A `Tensor` of type `int32` (default: 0).
reverse: A `bool` (default: False).
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `x`.
"""
with ops.name_scope(name, "Cumsum", [x]) as name:
x = ops.convert_to_tensor(x, name="x")
return gen_math_ops.cumsum(
x, axis, exclusive=exclusive, reverse=reverse, name=name)
def cumprod(x, axis=0, exclusive=False, reverse=False, name=None):
"""Compute the cumulative product of the tensor `x` along `axis`.
By default, this op performs an inclusive cumprod, which means that the
first
element of the input is identical to the first element of the output:
```prettyprint
tf.cumprod([a, b, c]) ==> [a, a * b, a * b * c]
```
By setting the `exclusive` kwarg to `True`, an exclusive cumprod is
performed
instead:
```prettyprint
tf.cumprod([a, b, c], exclusive=True) ==> [1, a, a * b]
```
By setting the `reverse` kwarg to `True`, the cumprod is performed in the
opposite direction:
```prettyprint
tf.cumprod([a, b, c], reverse=True) ==> [a * b * c, b * c, c]
```
This is more efficient than using separate `tf.reverse` ops.
The `reverse` and `exclusive` kwargs can also be combined:
```prettyprint
tf.cumprod([a, b, c], exclusive=True, reverse=True) ==> [b * c, c, 1]
```
Args:
x: A `Tensor`. Must be one of the following types: `float32`, `float64`,
`int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`,
`complex128`, `qint8`, `quint8`, `qint32`, `half`.
axis: A `Tensor` of type `int32` (default: 0).
reverse: A `bool` (default: False).
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `x`.
"""
with ops.name_scope(name, "Cumprod", [x]) as name:
x = ops.convert_to_tensor(x, name="x")
return gen_math_ops.cumprod(
x, axis, exclusive=exclusive, reverse=reverse, name=name)
def conj(x, name=None):
r"""Returns the complex conjugate of a complex number.
Given a tensor `input` of complex numbers, this operation returns a tensor of
complex numbers that are the complex conjugate of each element in `input`. The
complex numbers in `input` must be of the form \\(a + bj\\), where *a* is the
real part and *b* is the imaginary part.
The complex conjugate returned by this operation is of the form \\(a - bj\\).
For example:
# tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j]
tf.conj(input) ==> [-2.25 - 4.75j, 3.25 - 5.75j]
If `x` is real, it is returned unchanged.
Args:
x: `Tensor` to conjugate. Must have numeric type.
name: A name for the operation (optional).
Returns:
A `Tensor` that is the conjugate of `x` (with the same type).
Raises:
TypeError: If `x` is not a numeric tensor.
"""
with ops.name_scope(name, "Conj", [x]) as name:
x = ops.convert_to_tensor(x, name="x")
if x.dtype.is_complex:
return gen_math_ops._conj(x, name=name)
elif x.dtype.is_floating or x.dtype.is_integer:
return x
else:
raise TypeError("Expected numeric tensor, got dtype %r" % x.dtype)
ops.RegisterShape("Abs")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Acos")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Asin")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Atan")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Ceil")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Conj")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Cos")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Cross")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Exp")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Floor")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Imag")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Inv")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("IsFinite")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("IsInf")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("IsNan")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Log")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("LogicalNot")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Neg")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Real")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Rsqrt")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Sign")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Sin")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Sqrt")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Square")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Sigmoid")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Tanh")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Tan")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Lgamma")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Digamma")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Erf")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Erfc")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Cast")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("ComplexAbs")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("FFT")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("IFFT")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("FFT2D")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("IFFT2D")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("FFT3D")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("IFFT3D")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("TanhGrad")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("SigmoidGrad")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("InvGrad")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("SqrtGrad")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("RsqrtGrad")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Cumsum")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Cumprod")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Add")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Complex")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Div")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Equal")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Greater")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("GreaterEqual")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Igamma")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Igammac")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Zeta")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Polygamma")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Less")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("LessEqual")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("LogicalAnd")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("LogicalOr")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Maximum")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Minimum")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Mod")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("FloorMod")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("FloorDiv")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Mul")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("NotEqual")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Pow")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Sub")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("SquaredDifference")(common_shapes.call_cpp_shape_fn)
def _BroadcastShape(op):
"""Common shape function for binary operators that broadcast their inputs."""
return [common_shapes.broadcast_shape(
op.inputs[0].get_shape(),
op.inputs[1].get_shape())]
ops.RegisterShape("Betainc")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("SparseDenseCwiseMul")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("SparseDenseCwiseDiv")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("SparseDenseCwiseAdd")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("AddN")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Select")(common_shapes.call_cpp_shape_fn)
@ops.RegisterShape("ArgMax")
@ops.RegisterShape("ArgMin")
def _ArgOpShape(op):
return common_shapes.call_cpp_shape_fn(op, input_tensors_needed=[1])
@ops.RegisterShape("All")
@ops.RegisterShape("Any")
@ops.RegisterShape("Max")
@ops.RegisterShape("Mean")
@ops.RegisterShape("Min")
@ops.RegisterShape("Prod")
@ops.RegisterShape("Sum")
def _ReductionShape(op):
return common_shapes.call_cpp_shape_fn(op, input_tensors_needed=[1])
ops.RegisterShape("SegmentMax")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("SegmentMean")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("SegmentMin")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("SegmentProd")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("SegmentSum")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("SparseSegmentMean")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("SparseSegmentSqrtN")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("SparseSegmentSum")(common_shapes.call_cpp_shape_fn)
@ops.RegisterShape("SparseSegmentMeanGrad")
@ops.RegisterShape("SparseSegmentSqrtNGrad")
# pylint: disable=invalid-name
def _SparseSegmentReductionGradShape(op):
return common_shapes.call_cpp_shape_fn(op, input_tensors_needed=[3])
# pylint: enable=invalid-name
@ops.RegisterShape("UnsortedSegmentSum")
def _UnsortedSegmentSumShape(op):
return common_shapes.call_cpp_shape_fn(op, input_tensors_needed=[2])
@ops.RegisterShape("LinSpace")
def _LinspaceShape(op):
return common_shapes.call_cpp_shape_fn(op, input_tensors_needed=[2])
def reduced_shape(input_shape, axes):
"""Helper function for reduction ops.
Args:
input_shape: 1-D Tensor, the shape of the Tensor being reduced.
axes: 1-D Tensor, the reduction axes.
Returns:
A 1-D Tensor, the output shape as if keep_dims were set to True.
"""
# Example:
# cast needed for SparseTensor reductions
input_shape = to_int32(input_shape) # [2, 3, 5, 7]
axes = to_int32(axes) # [1, 2]
input_rank = array_ops.size(input_shape) # 4
axes = (axes + input_rank) % input_rank
axes_shape = array_ops.shape(axes) # [2]
return gen_data_flow_ops.dynamic_stitch( # [2, 1, 1, 7]
[range(input_rank), # [0, 1, 2, 3]
axes], # [1, 2]
[input_shape, # [2, 3, 5, 7]
array_ops.fill(axes_shape, 1)]) # [1, 1]
ops.RegisterShape("QuantizedMatMul")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Requantize")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("RequantizationRange")(common_shapes.call_cpp_shape_fn)
|
the-stack_0_2160 | import discord
import os
from discord.ext import commands, tasks
from discord.utils import get
from discord.ext.commands import CheckFailure
from discord.ext.commands import MissingPermissions
import random
from alive import alive
import json
intents = discord.Intents.all()
intents.members = True
def get_prefix(client, message):
with open ("prefixes.json", "r") as f:
prefixes = json.load(f)
return prefixes[str(message.guild.id)]
bot = commands.Bot(command_prefix=get_prefix, intents=intents)
bot.remove_command('help')
my_secret = os.environ['Token']
@bot.event
async def on_guild_join(guild):
with open ("prefixes.json", "r") as f:
prefixes = json.load(f)
prefixes[str(guild.id)] = ">"
with open("prefixes.json", "w") as f:
json.dump(prefixes, f, indent = 4)
@bot.event
async def on_guild_remove(guild):
with open ("prefixes.json", "r") as f:
prefixes = json.load(f)
prefixes.pop(str(guild.id))
with open("prefixes.json", "w") as f:
json.dump(prefixes, f, indent = 4)
#ChangePrefix
@bot.command()
@commands.has_permissions(manage_messages=True)
async def prefix(ctx, prefix):
with open ("prefixes.json", "r") as f:
prefixes = json.load(f)
prefixes[str(ctx.guild.id)] = prefix
await ctx.send("The prefix has been changed to: "+ prefix)
with open("prefixes.json", "w") as f:
json.dump(prefixes, f, indent = 4)
@prefix.error
async def prefix_error(ctx, error):
if isinstance (error, commands.MissingRequiredArgument):
await ctx.send('Please enter a prefix.')
if isinstance (error, commands.MissingPermissions):
await ctx.send('Aha comrade, that one is not for you.')
@bot.event
async def on_ready():
print("Bot is ready.")
await bot.change_presence(status=discord.Status.online, activity=discord.Game('Having A Midlife Crisis...'))
#join_message
@bot.event
async def on_member_join(member):
guild_id = member.guild.id
av = member.avatar_url
if guild_id == 842401531171962911:
channel = bot.get_channel(853879745263566898)
e = discord.Embed(color = discord.Color.green())
e.set_thumbnail(url=av)
e.add_field(name="Welcome!!", value=f"Welcome to the server {member.mention}!! Hope you have a good time! If you need any help regarding discord, please contact and admins or mods. If you need any help regarding questions, don't hesitate to ask in the doubt channels . And at last, please check self-roles at <#842413732167811152>")
await channel.send(embed=e)
else:
print('Currently Thinking.')
#server_leave
@bot.event
async def on_member_remove(member):
guild_id = member.guild.id
if guild_id == 842401531171962911:
channel = bot.get_channel(842607234160525334)
e = discord.Embed(color = discord.Colour.red())
e.set_thumbnail(url=member.avatar_url)
e.add_field(name="Member Left", value = f"{member} Has left the server.")
await channel.send(embed=e)
else:
print("Currently thinking.")
#NoCommandError
@bot.event
async def on_command_error(ctx, error):
if isinstance(error, commands.CommandNotFound):
await ctx.send('No such command exists!')
#Welcome
@bot.command()
async def welcome(ctx):
await ctx.send(f'Welcome to the server!! Hope you have a good time! For help regarding Discord, please go to <#846802868215218177>. For any subject-regarded help please go to the respective doubts channel in the Doubts category. For a general chit-chat with our community, have fun at <#842407125329903616>!')
#CogLoad
@bot.command()
@commands.has_role('Owner')
async def load(ctx, extension):
bot.load_extension(f'cogs.{extension}')
@load.error
async def load_error(ctx, error):
if isinstance(error, commands.MissingRole):
await ctx.send('Sorry, but you do not have perms to use that command!')
#CogUnload
@bot.command()
@commands.has_role('Owner')
async def unload(ctx, extension):
bot.unload_extension(f'cogs.{extension}')
@unload.error
async def unload_error(ctx, error):
if isinstance(error, commands.MissingRole):
await ctx.send('Sorry, but you do not have perms to use that command!')
#CogReload
@bot.command()
@commands.has_role('Owner')
async def reload(ctx, extension):
bot.unload_extension(f'cogs.{extension}')
bot.load_extension(f'cogs.{extension}')
@reload.error
async def reload_error(ctx, error):
if isinstance(error, commands.MissingRole):
await ctx.send('Sorry, but you do not have perms to use that command!')
#Ping
@bot.command()
async def ping(ctx):
await ctx.send(f'Pong! {round(bot.latency*1000)}ms.')
#GitHub
@bot.command()
async def github(ctx):
embed = discord.Embed(title="GitHub Repo Link", color=discord.Colour.orange())
embed.add_field(name="Hydra Bot", value="https://github.com/doughnut9/Discord-Multipurpose-Bot" )
await ctx.send(embed=embed)
for filename in os.listdir('./cogs'):
if filename.endswith('.py'):
bot.load_extension(f'cogs.{filename[:-3]}')
alive()
bot.run(os.getenv('Token'))
|
the-stack_0_2163 | """users table
Revision ID: 6c6be1ace116
Revises:
Create Date: 2021-08-26 21:28:47.593295
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '6c6be1ace116'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('user',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('username', sa.String(length=64), nullable=True),
sa.Column('email', sa.String(length=120), nullable=True),
sa.Column('password_hash', sa.String(length=128), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_user_email'), 'user', ['email'], unique=True)
op.create_index(op.f('ix_user_username'), 'user', ['username'], unique=True)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_user_username'), table_name='user')
op.drop_index(op.f('ix_user_email'), table_name='user')
op.drop_table('user')
# ### end Alembic commands ###
|
the-stack_0_2165 | from django.core.urlresolvers import reverse
from tastypie import authorization
from tastypie.authentication import MultiAuthentication
from tastypie.exceptions import BadRequest
from crits.campaigns.campaign import Campaign
from crits.campaigns.handlers import add_campaign
from crits.core.api import CRITsApiKeyAuthentication, CRITsSessionAuthentication
from crits.core.api import CRITsSerializer, CRITsAPIResource
class CampaignResource(CRITsAPIResource):
"""
Class to handle everything related to the Campaign API.
Currently supports GET and POST.
"""
class Meta:
object_class = Campaign
allowed_methods = ('get', 'post')
resource_name = "campaigns"
authentication = MultiAuthentication(CRITsApiKeyAuthentication(),
CRITsSessionAuthentication())
authorization = authorization.Authorization()
serializer = CRITsSerializer()
def get_object_list(self, request):
"""
Use the CRITsAPIResource to get our objects but provide the class to get
the objects from.
:param request: The incoming request.
:type request: :class:`django.http.HttpRequest`
:returns: Resulting objects in the specified format (JSON by default).
"""
return super(CampaignResource, self).get_object_list(request, Campaign,
False)
def obj_create(self, bundle, **kwargs):
"""
Handles creating Campaigns through the API.
:param bundle: Bundle containing the information to create the Campaign.
:type bundle: Tastypie Bundle object.
:returns: HttpResponse.
"""
analyst = bundle.request.user.username
name = bundle.data.get('name', None)
description = bundle.data.get('description', None)
aliases = bundle.data.get('aliases', None)
bucket_list = bundle.data.get('bucket_list', None)
ticket = bundle.data.get('ticket', None)
content = {'return_code': 1,
'type': 'Campaign'}
if not name:
content['message'] = 'Need a Campaign name.'
self.crits_response(content)
result = add_campaign(name,
description,
aliases,
analyst,
bucket_list,
ticket)
if result.get('id'):
url = reverse('api_dispatch_detail',
kwargs={'resource_name': 'campaigns',
'api_name': 'v1',
'pk': result.get('id')})
content['url'] = url
content['id'] = result.get('id')
if result['success']:
content['return_code'] = 0
content['message'] = result['message']
self.crits_response(content)
|
the-stack_0_2166 | # This file contains Att2in2, AdaAtt, AdaAttMO, TopDown model
# AdaAtt is from Knowing When to Look: Adaptive Attention via A Visual Sentinel for Image Captioning
# https://arxiv.org/abs/1612.01887
# AdaAttMO is a modified version with maxout lstm
# Att2in is from Self-critical Sequence Training for Image Captioning
# https://arxiv.org/abs/1612.00563
# In this file we only have Att2in2, which is a slightly different version of att2in,
# in which the img feature embedding and word embedding is the same as what in adaatt.
# TopDown is from Bottom-Up and Top-Down Attention for Image Captioning and VQA
# https://arxiv.org/abs/1707.07998
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import *
import misc.utils as utils
from .CaptionModel import CaptionModel
class AttModel(CaptionModel):
def __init__(self, opt):
super(AttModel, self).__init__()
self.vocab_size = opt.vocab_size
self.input_encoding_size = opt.input_encoding_size
#self.rnn_type = opt.rnn_type
self.rnn_size = opt.rnn_size
self.num_layers = opt.num_layers
self.drop_prob_lm = opt.drop_prob_lm
self.seq_length = opt.seq_length
self.fc_feat_size = opt.fc_feat_size
self.att_feat_size = opt.att_feat_size
self.att_hid_size = opt.att_hid_size
self.ss_prob = 0.0 # Schedule sampling probability
self.embed = nn.Sequential(nn.Embedding(self.vocab_size + 1, self.input_encoding_size),
nn.ReLU(),
nn.Dropout(self.drop_prob_lm))
self.fc_embed = nn.Sequential(nn.Linear(self.fc_feat_size, self.rnn_size),
nn.ReLU(),
nn.Dropout(self.drop_prob_lm))
self.att_embed = nn.Sequential(nn.Linear(self.att_feat_size, self.rnn_size),
nn.ReLU(),
nn.Dropout(self.drop_prob_lm))
self.logit = nn.Linear(self.rnn_size, self.vocab_size + 1)
self.ctx2att = nn.Linear(self.rnn_size, self.att_hid_size)
def init_hidden(self, bsz):
weight = next(self.parameters()).data
return (Variable(weight.new(self.num_layers, bsz, self.rnn_size).zero_()),
Variable(weight.new(self.num_layers, bsz, self.rnn_size).zero_()))
def forward(self, fc_feats, att_feats, seq):
batch_size = fc_feats.size(0)
state = self.init_hidden(batch_size)
outputs = []
# embed fc and att feats
fc_feats = self.fc_embed(fc_feats)
_att_feats = self.att_embed(att_feats.view(-1, self.att_feat_size))
att_feats = _att_feats.view(*(att_feats.size()[:-1] + (self.rnn_size,)))
# Project the attention feats first to reduce memory and computation comsumptions.
p_att_feats = self.ctx2att(att_feats.view(-1, self.rnn_size))
p_att_feats = p_att_feats.view(*(att_feats.size()[:-1] + (self.att_hid_size,)))
for i in range(seq.size(1) - 1):
if self.training and i >= 1 and self.ss_prob > 0.0: # otherwiste no need to sample
sample_prob = fc_feats.data.new(batch_size).uniform_(0, 1)
sample_mask = sample_prob < self.ss_prob
if sample_mask.sum() == 0:
it = seq[:, i].clone()
else:
sample_ind = sample_mask.nonzero().view(-1)
it = seq[:, i].data.clone()
#prob_prev = torch.exp(outputs[-1].data.index_select(0, sample_ind)) # fetch prev distribution: shape Nx(M+1)
#it.index_copy_(0, sample_ind, torch.multinomial(prob_prev, 1).view(-1))
prob_prev = torch.exp(outputs[-1].data) # fetch prev distribution: shape Nx(M+1)
it.index_copy_(0, sample_ind, torch.multinomial(prob_prev, 1).view(-1).index_select(0, sample_ind))
it = Variable(it, requires_grad=False)
else:
it = seq[:, i].clone()
# break if all the sequences end
if i >= 1 and seq[:, i].data.sum() == 0:
break
xt = self.embed(it)
output, state = self.core(xt, fc_feats, att_feats, p_att_feats, state)
output = F.log_softmax(self.logit(output))
outputs.append(output)
return torch.cat([_.unsqueeze(1) for _ in outputs], 1)
def get_logprobs_state(self, it, tmp_fc_feats, tmp_att_feats, tmp_p_att_feats, state):
# 'it' is Variable contraining a word index
xt = self.embed(it)
output, state = self.core(xt, tmp_fc_feats, tmp_att_feats, tmp_p_att_feats, state)
logprobs = F.log_softmax(self.logit(output))
return logprobs, state
def sample_beam(self, fc_feats, att_feats, opt={}):
beam_size = opt.get('beam_size', 10)
batch_size = fc_feats.size(0)
# embed fc and att feats
fc_feats = self.fc_embed(fc_feats)
_att_feats = self.att_embed(att_feats.view(-1, self.att_feat_size))
att_feats = _att_feats.view(*(att_feats.size()[:-1] + (self.rnn_size,)))
# Project the attention feats first to reduce memory and computation comsumptions.
p_att_feats = self.ctx2att(att_feats.view(-1, self.rnn_size))
p_att_feats = p_att_feats.view(*(att_feats.size()[:-1] + (self.att_hid_size,)))
assert beam_size <= self.vocab_size + 1, 'lets assume this for now, otherwise this corner case causes a few headaches down the road. can be dealt with in future if needed'
seq = torch.LongTensor(self.seq_length, batch_size).zero_()
seqLogprobs = torch.FloatTensor(self.seq_length, batch_size)
# lets process every image independently for now, for simplicity
self.done_beams = [[] for _ in range(batch_size)]
for k in range(batch_size):
state = self.init_hidden(beam_size)
tmp_fc_feats = fc_feats[k:k+1].expand(beam_size, fc_feats.size(1))
tmp_att_feats = att_feats[k:k+1].expand(*((beam_size,)+att_feats.size()[1:])).contiguous()
tmp_p_att_feats = p_att_feats[k:k+1].expand(*((beam_size,)+p_att_feats.size()[1:])).contiguous()
for t in range(1):
if t == 0: # input <bos>
it = fc_feats.data.new(beam_size).long().zero_()
xt = self.embed(Variable(it, requires_grad=False))
output, state = self.core(xt, tmp_fc_feats, tmp_att_feats, tmp_p_att_feats, state)
logprobs = F.log_softmax(self.logit(output))
self.done_beams[k] = self.beam_search(state, logprobs, tmp_fc_feats, tmp_att_feats, tmp_p_att_feats, opt=opt)
seq[:, k] = self.done_beams[k][0]['seq'] # the first beam has highest cumulative score
seqLogprobs[:, k] = self.done_beams[k][0]['logps']
# return the samples and their log likelihoods
return seq.transpose(0, 1), seqLogprobs.transpose(0, 1)
def sample(self, fc_feats, att_feats, opt={}):
sample_max = opt.get('sample_max', 1)
beam_size = opt.get('beam_size', 1)
temperature = opt.get('temperature', 1.0)
if beam_size > 1:
return self.sample_beam(fc_feats, att_feats, opt)
batch_size = fc_feats.size(0)
state = self.init_hidden(batch_size)
# embed fc and att feats
fc_feats = self.fc_embed(fc_feats)
_att_feats = self.att_embed(att_feats.view(-1, self.att_feat_size))
att_feats = _att_feats.view(*(att_feats.size()[:-1] + (self.rnn_size,)))
# Project the attention feats first to reduce memory and computation comsumptions.
p_att_feats = self.ctx2att(att_feats.view(-1, self.rnn_size))
p_att_feats = p_att_feats.view(*(att_feats.size()[:-1] + (self.att_hid_size,)))
seq = []
seqLogprobs = []
for t in range(self.seq_length + 1):
if t == 0: # input <bos>
it = fc_feats.data.new(batch_size).long().zero_()
elif sample_max:
sampleLogprobs, it = torch.max(logprobs.data, 1)
it = it.view(-1).long()
else:
if temperature == 1.0:
prob_prev = torch.exp(logprobs.data).cpu() # fetch prev distribution: shape Nx(M+1)
else:
# scale logprobs by temperature
prob_prev = torch.exp(torch.div(logprobs.data, temperature)).cpu()
it = torch.multinomial(prob_prev, 1)
sampleLogprobs = logprobs.gather(1, Variable(it, requires_grad=False)) # gather the logprobs at sampled positions
it = it.view(-1).long() # and flatten indices for downstream processing
xt = self.embed(Variable(it, requires_grad=False))
if t >= 1:
# stop when all finished
if t == 1:
unfinished = it > 0
else:
unfinished = unfinished * (it > 0)
if unfinished.sum() == 0:
break
it = it * unfinished.type_as(it)
seq.append(it) #seq[t] the input of t+2 time step
seqLogprobs.append(sampleLogprobs.view(-1))
output, state = self.core(xt, fc_feats, att_feats, p_att_feats, state)
logprobs = F.log_softmax(self.logit(output))
return torch.cat([_.unsqueeze(1) for _ in seq], 1), torch.cat([_.unsqueeze(1) for _ in seqLogprobs], 1)
class AdaAtt_lstm(nn.Module):
def __init__(self, opt, use_maxout=True):
super(AdaAtt_lstm, self).__init__()
self.input_encoding_size = opt.input_encoding_size
#self.rnn_type = opt.rnn_type
self.rnn_size = opt.rnn_size
self.num_layers = opt.num_layers
self.drop_prob_lm = opt.drop_prob_lm
self.fc_feat_size = opt.fc_feat_size
self.att_feat_size = opt.att_feat_size
self.att_hid_size = opt.att_hid_size
self.use_maxout = use_maxout
# Build a LSTM
self.w2h = nn.Linear(self.input_encoding_size, (4+(use_maxout==True)) * self.rnn_size)
self.v2h = nn.Linear(self.rnn_size, (4+(use_maxout==True)) * self.rnn_size)
self.i2h = nn.ModuleList([nn.Linear(self.rnn_size, (4+(use_maxout==True)) * self.rnn_size) for _ in range(self.num_layers - 1)])
self.h2h = nn.ModuleList([nn.Linear(self.rnn_size, (4+(use_maxout==True)) * self.rnn_size) for _ in range(self.num_layers)])
# Layers for getting the fake region
if self.num_layers == 1:
self.r_w2h = nn.Linear(self.input_encoding_size, self.rnn_size)
self.r_v2h = nn.Linear(self.rnn_size, self.rnn_size)
else:
self.r_i2h = nn.Linear(self.rnn_size, self.rnn_size)
self.r_h2h = nn.Linear(self.rnn_size, self.rnn_size)
def forward(self, xt, img_fc, state):
hs = []
cs = []
for L in range(self.num_layers):
# c,h from previous timesteps
prev_h = state[0][L]
prev_c = state[1][L]
# the input to this layer
if L == 0:
x = xt
i2h = self.w2h(x) + self.v2h(img_fc)
else:
x = hs[-1]
x = F.dropout(x, self.drop_prob_lm, self.training)
i2h = self.i2h[L-1](x)
all_input_sums = i2h+self.h2h[L](prev_h)
sigmoid_chunk = all_input_sums.narrow(1, 0, 3 * self.rnn_size)
sigmoid_chunk = F.sigmoid(sigmoid_chunk)
# decode the gates
in_gate = sigmoid_chunk.narrow(1, 0, self.rnn_size)
forget_gate = sigmoid_chunk.narrow(1, self.rnn_size, self.rnn_size)
out_gate = sigmoid_chunk.narrow(1, self.rnn_size * 2, self.rnn_size)
# decode the write inputs
if not self.use_maxout:
in_transform = F.tanh(all_input_sums.narrow(1, 3 * self.rnn_size, self.rnn_size))
else:
in_transform = all_input_sums.narrow(1, 3 * self.rnn_size, 2 * self.rnn_size)
in_transform = torch.max(\
in_transform.narrow(1, 0, self.rnn_size),
in_transform.narrow(1, self.rnn_size, self.rnn_size))
# perform the LSTM update
next_c = forget_gate * prev_c + in_gate * in_transform
# gated cells form the output
tanh_nex_c = F.tanh(next_c)
next_h = out_gate * tanh_nex_c
if L == self.num_layers-1:
if L == 0:
i2h = self.r_w2h(x) + self.r_v2h(img_fc)
else:
i2h = self.r_i2h(x)
n5 = i2h+self.r_h2h(prev_h)
fake_region = F.sigmoid(n5) * tanh_nex_c
cs.append(next_c)
hs.append(next_h)
# set up the decoder
top_h = hs[-1]
top_h = F.dropout(top_h, self.drop_prob_lm, self.training)
fake_region = F.dropout(fake_region, self.drop_prob_lm, self.training)
state = (torch.cat([_.unsqueeze(0) for _ in hs], 0),
torch.cat([_.unsqueeze(0) for _ in cs], 0))
return top_h, fake_region, state
class AdaAtt_attention(nn.Module):
def __init__(self, opt):
super(AdaAtt_attention, self).__init__()
self.input_encoding_size = opt.input_encoding_size
#self.rnn_type = opt.rnn_type
self.rnn_size = opt.rnn_size
self.drop_prob_lm = opt.drop_prob_lm
self.att_hid_size = opt.att_hid_size
# fake region embed
self.fr_linear = nn.Sequential(
nn.Linear(self.rnn_size, self.input_encoding_size),
nn.ReLU(),
nn.Dropout(self.drop_prob_lm))
self.fr_embed = nn.Linear(self.input_encoding_size, self.att_hid_size)
# h out embed
self.ho_linear = nn.Sequential(
nn.Linear(self.rnn_size, self.input_encoding_size),
nn.Tanh(),
nn.Dropout(self.drop_prob_lm))
self.ho_embed = nn.Linear(self.input_encoding_size, self.att_hid_size)
self.alpha_net = nn.Linear(self.att_hid_size, 1)
self.att2h = nn.Linear(self.rnn_size, self.rnn_size)
def forward(self, h_out, fake_region, conv_feat, conv_feat_embed):
# View into three dimensions
att_size = conv_feat.numel() // conv_feat.size(0) // self.rnn_size
conv_feat = conv_feat.view(-1, att_size, self.rnn_size)
conv_feat_embed = conv_feat_embed.view(-1, att_size, self.att_hid_size)
# view neighbor from bach_size * neighbor_num x rnn_size to bach_size x rnn_size * neighbor_num
fake_region = self.fr_linear(fake_region)
fake_region_embed = self.fr_embed(fake_region)
h_out_linear = self.ho_linear(h_out)
h_out_embed = self.ho_embed(h_out_linear)
txt_replicate = h_out_embed.unsqueeze(1).expand(h_out_embed.size(0), att_size + 1, h_out_embed.size(1))
img_all = torch.cat([fake_region.view(-1,1,self.input_encoding_size), conv_feat], 1)
img_all_embed = torch.cat([fake_region_embed.view(-1,1,self.input_encoding_size), conv_feat_embed], 1)
hA = F.tanh(img_all_embed + txt_replicate)
hA = F.dropout(hA,self.drop_prob_lm, self.training)
hAflat = self.alpha_net(hA.view(-1, self.att_hid_size))
PI = F.softmax(hAflat.view(-1, att_size + 1))
visAtt = torch.bmm(PI.unsqueeze(1), img_all)
visAttdim = visAtt.squeeze(1)
atten_out = visAttdim + h_out_linear
h = F.tanh(self.att2h(atten_out))
h = F.dropout(h, self.drop_prob_lm, self.training)
return h
class AdaAttCore(nn.Module):
def __init__(self, opt, use_maxout=False):
super(AdaAttCore, self).__init__()
self.lstm = AdaAtt_lstm(opt, use_maxout)
self.attention = AdaAtt_attention(opt)
def forward(self, xt, fc_feats, att_feats, p_att_feats, state):
h_out, p_out, state = self.lstm(xt, fc_feats, state)
atten_out = self.attention(h_out, p_out, att_feats, p_att_feats)
return atten_out, state
class TopDownCore(nn.Module):
def __init__(self, opt, use_maxout=False):
super(TopDownCore, self).__init__()
self.drop_prob_lm = opt.drop_prob_lm
self.att_lstm = nn.LSTMCell(opt.input_encoding_size + opt.rnn_size * 2, opt.rnn_size) # we, fc, h^2_t-1
self.lang_lstm = nn.LSTMCell(opt.rnn_size * 2, opt.rnn_size) # h^1_t, \hat v
self.attention = Attention(opt)
def forward(self, xt, fc_feats, att_feats, p_att_feats, state):
prev_h = state[0][-1]
att_lstm_input = torch.cat([prev_h, fc_feats, xt], 1)
h_att, c_att = self.att_lstm(att_lstm_input, (state[0][0], state[1][0]))
att = self.attention(h_att, att_feats, p_att_feats)
lang_lstm_input = torch.cat([att, h_att], 1)
# lang_lstm_input = torch.cat([att, F.dropout(h_att, self.drop_prob_lm, self.training)], 1) ?????
h_lang, c_lang = self.lang_lstm(lang_lstm_input, (state[0][1], state[1][1]))
output = F.dropout(h_lang, self.drop_prob_lm, self.training)
state = (torch.stack([h_att, h_lang]), torch.stack([c_att, c_lang]))
return output, state
class Attention(nn.Module):
def __init__(self, opt):
super(Attention, self).__init__()
self.rnn_size = opt.rnn_size
self.att_hid_size = opt.att_hid_size
self.h2att = nn.Linear(self.rnn_size, self.att_hid_size)
self.alpha_net = nn.Linear(self.att_hid_size, 1)
def forward(self, h, att_feats, p_att_feats):
# The p_att_feats here is already projected
att_size = att_feats.numel() // att_feats.size(0) // self.rnn_size
att = p_att_feats.view(-1, att_size, self.att_hid_size)
att_h = self.h2att(h) # batch * att_hid_size
att_h = att_h.unsqueeze(1).expand_as(att) # batch * att_size * att_hid_size
dot = att + att_h # batch * att_size * att_hid_size
dot = F.tanh(dot) # batch * att_size * att_hid_size
dot = dot.view(-1, self.att_hid_size) # (batch * att_size) * att_hid_size
dot = self.alpha_net(dot) # (batch * att_size) * 1
dot = dot.view(-1, att_size) # batch * att_size
weight = F.softmax(dot) # batch * att_size
att_feats_ = att_feats.view(-1, att_size, self.rnn_size) # batch * att_size * att_feat_size
att_res = torch.bmm(weight.unsqueeze(1), att_feats_).squeeze(1) # batch * att_feat_size
return att_res
class Att2in2Core(nn.Module):
def __init__(self, opt):
super(Att2in2Core, self).__init__()
self.input_encoding_size = opt.input_encoding_size
#self.rnn_type = opt.rnn_type
self.rnn_size = opt.rnn_size
#self.num_layers = opt.num_layers
self.drop_prob_lm = opt.drop_prob_lm
self.fc_feat_size = opt.fc_feat_size
self.att_feat_size = opt.att_feat_size
self.att_hid_size = opt.att_hid_size
# Build a LSTM
self.a2c = nn.Linear(self.rnn_size, 2 * self.rnn_size)
self.i2h = nn.Linear(self.input_encoding_size, 5 * self.rnn_size)
self.h2h = nn.Linear(self.rnn_size, 5 * self.rnn_size)
self.dropout = nn.Dropout(self.drop_prob_lm)
self.attention = Attention(opt)
def forward(self, xt, fc_feats, att_feats, p_att_feats, state):
att_res = self.attention(state[0][-1], att_feats, p_att_feats)
all_input_sums = self.i2h(xt) + self.h2h(state[0][-1])
sigmoid_chunk = all_input_sums.narrow(1, 0, 3 * self.rnn_size)
sigmoid_chunk = F.sigmoid(sigmoid_chunk)
in_gate = sigmoid_chunk.narrow(1, 0, self.rnn_size)
forget_gate = sigmoid_chunk.narrow(1, self.rnn_size, self.rnn_size)
out_gate = sigmoid_chunk.narrow(1, self.rnn_size * 2, self.rnn_size)
in_transform = all_input_sums.narrow(1, 3 * self.rnn_size, 2 * self.rnn_size) + \
self.a2c(att_res)
in_transform = torch.max(\
in_transform.narrow(1, 0, self.rnn_size),
in_transform.narrow(1, self.rnn_size, self.rnn_size))
next_c = forget_gate * state[1][-1] + in_gate * in_transform
next_h = out_gate * F.tanh(next_c)
output = self.dropout(next_h)
state = (next_h.unsqueeze(0), next_c.unsqueeze(0))
return output, state
class AdaAttModel(AttModel):
def __init__(self, opt):
super(AdaAttModel, self).__init__(opt)
self.core = AdaAttCore(opt)
# AdaAtt with maxout lstm
class AdaAttMOModel(AttModel):
def __init__(self, opt):
super(AdaAttMOModel, self).__init__(opt)
self.core = AdaAttCore(opt, True)
class Att2in2Model(AttModel):
def __init__(self, opt):
super(Att2in2Model, self).__init__(opt)
self.core = Att2in2Core(opt)
delattr(self, 'fc_embed')
self.fc_embed = lambda x : x
class TopDownModel(AttModel):
def __init__(self, opt):
super(TopDownModel, self).__init__(opt)
self.num_layers = 2
self.core = TopDownCore(opt)
|
the-stack_0_2167 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union
from google.api_core import gapic_v1 # type: ignore
from google.api_core import grpc_helpers_async # type: ignore
from google.api_core import operations_v1 # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import packaging.version
import grpc # type: ignore
from grpc.experimental import aio # type: ignore
from google.cloud.appengine_admin_v1.types import appengine
from google.cloud.appengine_admin_v1.types import service
from google.longrunning import operations_pb2 # type: ignore
from .base import ServicesTransport, DEFAULT_CLIENT_INFO
from .grpc import ServicesGrpcTransport
class ServicesGrpcAsyncIOTransport(ServicesTransport):
"""gRPC AsyncIO backend transport for Services.
Manages services of an application.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_grpc_channel: aio.Channel
_stubs: Dict[str, Callable] = {}
@classmethod
def create_channel(cls,
host: str = 'appengine.googleapis.com',
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs) -> aio.Channel:
"""Create and return a gRPC AsyncIO channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
aio.Channel: A gRPC AsyncIO channel object.
"""
return grpc_helpers_async.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
default_scopes=cls.AUTH_SCOPES,
scopes=scopes,
default_host=cls.DEFAULT_HOST,
**kwargs
)
def __init__(self, *,
host: str = 'appengine.googleapis.com',
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
channel: aio.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id=None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
channel (Optional[aio.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or application default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for the grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure a mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
Raises:
google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
self._operations_client = None
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
credentials=self._credentials,
credentials_file=credentials_file,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@property
def grpc_channel(self) -> aio.Channel:
"""Create the channel designed to connect to this service.
This property caches on the instance; repeated calls return
the same channel.
"""
# Return the channel from cache.
return self._grpc_channel
@property
def operations_client(self) -> operations_v1.OperationsAsyncClient:
"""Create the client designed to process long-running operations.
This property caches on the instance; repeated calls return the same
client.
"""
# Sanity check: Only create a new client if we do not already have one.
if self._operations_client is None:
self._operations_client = operations_v1.OperationsAsyncClient(
self.grpc_channel
)
# Return the client from cache.
return self._operations_client
@property
def list_services(self) -> Callable[
[appengine.ListServicesRequest],
Awaitable[appengine.ListServicesResponse]]:
r"""Return a callable for the list services method over gRPC.
Lists all the services in the application.
Returns:
Callable[[~.ListServicesRequest],
Awaitable[~.ListServicesResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'list_services' not in self._stubs:
self._stubs['list_services'] = self.grpc_channel.unary_unary(
'/google.appengine.v1.Services/ListServices',
request_serializer=appengine.ListServicesRequest.serialize,
response_deserializer=appengine.ListServicesResponse.deserialize,
)
return self._stubs['list_services']
@property
def get_service(self) -> Callable[
[appengine.GetServiceRequest],
Awaitable[service.Service]]:
r"""Return a callable for the get service method over gRPC.
Gets the current configuration of the specified
service.
Returns:
Callable[[~.GetServiceRequest],
Awaitable[~.Service]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'get_service' not in self._stubs:
self._stubs['get_service'] = self.grpc_channel.unary_unary(
'/google.appengine.v1.Services/GetService',
request_serializer=appengine.GetServiceRequest.serialize,
response_deserializer=service.Service.deserialize,
)
return self._stubs['get_service']
@property
def update_service(self) -> Callable[
[appengine.UpdateServiceRequest],
Awaitable[operations_pb2.Operation]]:
r"""Return a callable for the update service method over gRPC.
Updates the configuration of the specified service.
Returns:
Callable[[~.UpdateServiceRequest],
Awaitable[~.Operation]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'update_service' not in self._stubs:
self._stubs['update_service'] = self.grpc_channel.unary_unary(
'/google.appengine.v1.Services/UpdateService',
request_serializer=appengine.UpdateServiceRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs['update_service']
@property
def delete_service(self) -> Callable[
[appengine.DeleteServiceRequest],
Awaitable[operations_pb2.Operation]]:
r"""Return a callable for the delete service method over gRPC.
Deletes the specified service and all enclosed
versions.
Returns:
Callable[[~.DeleteServiceRequest],
Awaitable[~.Operation]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'delete_service' not in self._stubs:
self._stubs['delete_service'] = self.grpc_channel.unary_unary(
'/google.appengine.v1.Services/DeleteService',
request_serializer=appengine.DeleteServiceRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs['delete_service']
def close(self):
return self.grpc_channel.close()
__all__ = (
'ServicesGrpcAsyncIOTransport',
)
|
the-stack_0_2168 | # -*- coding: utf-8 -*-
import json
import os
import grpc
from rpc.pb import result_pb2
from rpc.pb.result_pb2_grpc import ResultStub
CHUNK_SIZE = 10 * 1024
def get_file_chunks(filename, folder_path):
yield result_pb2.StreamUploadPictureRequest(filename=filename)
with open(f'/usr/src/app/{folder_path}/' + filename, 'rb') as f:
while True:
piece = f.read(CHUNK_SIZE)
if len(piece) == 0:
return
yield result_pb2.StreamUploadPictureRequest(file_data={"buffer": piece})
def remove_file(file_path):
"""
删除文件
:param file_path:
:return:
"""
try:
os.remove(file_path)
except (NotImplementedError, FileNotFoundError):
pass
class ResultClient(object):
def __init__(self, rpc_server):
# RPC服务器信道
channel = grpc.insecure_channel(target=f'{rpc_server}', options=[
('grpc.max_send_message_length', int(os.getenv('GRPC_MAX_SEND_MESSAGE_LENGTH', 200)) * 1024 * 1024),
('grpc.max_receive_message_length', int(os.getenv('GRPC_MAX_RECEIVE_MESSAGE_LENGTH', 200)) * 1024 * 1024),
])
# 获取Result grpc服务对象
self.stub = ResultStub(channel)
def save_base_result(self, subtask_id, url_id, url_address, finished_at, **kwargs):
"""保存爬虫基本信息"""
# 返回头部序列化
kwargs['response_headers'] = self.dic2json(kwargs.pop('response_headers', {}))
# 生成状态码
kwargs['http_code'] = kwargs['redirect_chain'][-1]['redirect_http_code'] if kwargs['redirect_chain'] else None
# 去除firefox和chrome默认content
if kwargs['content'] and (kwargs['content'].startswith(
'<!DOCTYPE html><html xmlns="http://www.w3.org/1999/xhtml" dir="ltr" lang="en-US">')
or kwargs['content'] == '<html><head></head><body></body></html>'):
kwargs['content'] = None
# # http交互过程序列化
# kwargs['http_archive'] = self.dic2json(kwargs.pop('http_archive', []))
self.stub.SaveBaseResult(
result_pb2.SaveBaseResultRequest(
subtask_id=subtask_id, url_id=url_id, url_address=url_address,
finished_at=finished_at, **kwargs),
timeout=30
)
def upload_screenshot(self, screenshot_name):
"""上传截图"""
chunks_generator = get_file_chunks(screenshot_name, folder_path='screenshots')
response = self.stub.StreamUploadPicture(chunks_generator)
file_path = f'/usr/src/app/screenshots/{screenshot_name}'
assert response.length == os.path.getsize(file_path)
remove_file(file_path)
def set_subtask_status(self, subtask_id, status, finished_at):
"""标记子任务爬取状态"""
self.stub.SetSubTaskStatus(
result_pb2.SetSubTaskStatusRequest(
subtask_id=subtask_id,
status=status,
finished_at=finished_at
),
timeout=30
)
def upload_har_file(self, har_file_name):
"""上传har文件"""
chunks_generator = get_file_chunks(har_file_name, folder_path='hars')
response = self.stub.StreamUploadHarFile(chunks_generator)
file_path = f'/usr/src/app/hars/{har_file_name}'
assert response.length == os.path.getsize(file_path)
remove_file(file_path)
@staticmethod
def dic2json(dic):
"""某些字段转换为json"""
return json.dumps(dic, ensure_ascii=False)
|
the-stack_0_2169 | tab = ''
def pow(x, n) :
global tab
tab += ' '
if n == 0 :
return 1
print(tab+"%d*%d^(%d-%d)" % (x, x, n, 1))
return x * pow (x, n-1)
print('2^4')
print('답 -->', pow(2, 4))
|
the-stack_0_2170 | '''
setup.py for ConvLab-2
'''
import sys
import os
from setuptools import setup, find_packages
from setuptools.command.test import test as TestCommand
class LibTest(TestCommand):
def run_tests(self):
# import here, cause outside the eggs aren't loaded
ret = os.system("pytest --cov=ConvLab-2 tests/ --cov-report term-missing")
sys.exit(ret >> 8)
setup(
name='ConvLab-2',
version='0.0.1',
packages=find_packages(exclude=[]),
license='Apache',
description='Task-oriented Dialog System Toolkits',
long_description=open('README.md', encoding='UTF-8').read(),
long_description_content_type="text/markdown",
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
],
install_requires=[
'nltk>=3.4',
'tqdm>=4.30',
'checksumdir>=1.1',
'visdom',
'Pillow',
'future',
'torch',
'numpy>=1.15.0',
'scipy',
'scikit-learn==0.20.3',
'pytorch-pretrained-bert>=0.6.1',
'transformers>=2.3.0',
'tensorflow==1.14',
'tensorboard>=1.14.0',
'tensorboardX==1.7',
'allennlp',
'requests',
'simplejson',
'unidecode',
'jieba'
],
extras_require={
'develop': [
"python-coveralls",
"pytest-dependency",
"pytest-mock",
"requests-mock",
"pytest>=3.6.0",
"pytest-cov==2.4.0",
"checksumdir",
"bs4",
"lxml",
]
},
cmdclass={'test': LibTest},
entry_points={
'console_scripts': [
"ConvLab-2-report=convlab2.scripts:report"
]
},
include_package_data=True,
url='https://github.com/thu-coai/ConvLab-2',
author='thu-coai',
author_email='[email protected]',
python_requires='>=3.5',
zip_safe=False
)
|
the-stack_0_2172 |
from rest_framework import mixins, status, viewsets
from rest_framework import response
from rest_framework.response import Response
from rest_framework.decorators import action
from cride.users.models import Users
from cride.circles.models import Circle
from cride.circles.serializers import CircleModelSerializer
from cride.users.serializers.profile import ProfileModelSerializer
# * permisions
from rest_framework.permissions import (
AllowAny,
IsAuthenticated
)
from cride.users.permissions import IsAccountOwner
# * Serializer methods
from cride.users.serializers import (
UserLoginSerializer,
UserSignupSerializer,
UserModelSerializer,
AccountVerifySerializer
)
class UserViewSet(
mixins.RetrieveModelMixin,
mixins.UpdateModelMixin,
viewsets.GenericViewSet):
"""user view set
handle sign up, login and accointy verifications
Args:
viewsets ([type]): [description]
Returns:
[type]: [description]
"""
queryset = Users.objects.filter(is_active=True, is_cliente=True)
serializer_class = UserModelSerializer
lookup_field = 'username'
def get_permissions(self):
"""Asiigna permision basadas en una accion"""
if self.action in ['signup', 'login', 'verify']:
permissions = [AllowAny]
elif self.action == ['retrieve', 'update', 'partial_update']:
permissions = [IsAuthenticated, IsAccountOwner]
else:
permissions = [IsAuthenticated]
return [permision() for permision in permissions]
@action(detail=False, methods=['post'])
def login(self, request):
"""users sign up"""
serializer = UserLoginSerializer(data=request.data)
serializer.is_valid(raise_exception=True)
user, token = serializer.save()
data = {
'user': UserModelSerializer(user).data,
'access_token': token
}
return Response(data, status=status.HTTP_201_CREATED)
@action(detail=False, methods=['post'])
def signup(self, request):
""""User signup"""
serializer = UserSignupSerializer(data=request.data)
serializer.is_valid(raise_exception=True)
user = serializer.save()
data = UserModelSerializer(user).data
return Response(data, status=status.HTTP_201_CREATED)
@action(detail=False, methods=['post'])
def verify(self, request):
serializer = AccountVerifySerializer(data=request.data)
serializer.is_valid(raise_exception=True)
serializer.save()
data = {
'message': 'Congratulations, now go share some rides!'
}
return Response(data, status=status.HTTP_201_CREATED)
@action(detail=True, methods=['put','patch'])
def profile(self,request,*args, **kwargs):
"""update profile data"""
user = self.get_object()
profile = user.profiles
partial = request.method == 'PATH'
serializer = ProfileModelSerializer(
profile,
data = request.data,
partial = partial
)
serializer.is_valid(raise_exception=True)
serializer.save()
data = UserModelSerializer(user).data
return Response(data)
def retrieve(self,request, *args, **kwargs):
"""datros extras para el response"""
response = super(UserViewSet,self).retrieve(request, *args, **kwargs)
circles = Circle.objects.filter(
members = request.user,
membership__is_active = True
)
data = {
'user': response.data,
'circle':CircleModelSerializer(circles, many=True).data,
}
response.data = data
return response |
the-stack_0_2174 | from flask import Flask, render_template, url_for, session, request, redirect, flash
from flask_pymongo import PyMongo
import bcrypt
app = Flask(__name__)
app.config['MONGO_URI'] = "mongodb+srv://Marco:[email protected]/Ludus-Parthenope?retryWrites=true&w=majority"
mongo = PyMongo(app);
@app.route('/')
def index():
if 'username' in session:
return redirect(url_for('home'))
else:
return render_template('login.html')
@app.route('/login', methods=['POST'])
def login():
users = mongo.db.users
login_user = users.find_one({'name' : request.form['username']})
if login_user:
if bcrypt.hashpw(request.form['pw'].encode('utf-8'), login_user['password']) == login_user['password']:
session['username'] = request.form['username']
try:
session['team'] = login_user['team']
except:
pass
return redirect(url_for('index'))
else:
flash("Username o password errati")
return redirect(url_for('index'))
@app.route('/logout')
def logout():
session.pop('team', None)
session.pop('username', None)
return redirect(url_for('index'))
@app.route('/register', methods=['POST','GET'])
def register():
if request.method == 'POST':
users = mongo.db.users
existing_user = users.find_one({'name' : request.form['username']})
existing_email = users.find_one({'email' : request.form['email']})
if existing_user is None:
if existing_email is None:
hashpass = bcrypt.hashpw(request.form['pw'].encode('utf-8'), bcrypt.gensalt())
users.insert({'name' : request.form['username'], 'password' : hashpass, 'email' : request.form['email']})
session['username'] = request.form['username']
return redirect(url_for('index'))
else:
flash("Questa email è già stata registrata da un altro utente")
return redirect(url_for('register'))
else:
flash("Esiste già un utente con questo username!")
return redirect(url_for('register'))
return render_template('register.html')
@app.route('/home')
def home():
if 'username' in session:
session.database = mongo.db
return render_template('home.html')
else:
return redirect(url_for('index'))
@app.route('/library')
def library():
if 'username' in session:
session.database = mongo.db
return render_template('library.html')
else:
return redirect(url_for('index'))
@app.route('/join/<torneo>')
def join(torneo):
mongo.db.tournaments.update_one({'name': torneo}, {"$addToSet": {'competitors': session['username']}})
return redirect(url_for('home'))
@app.route('/aggiungi',methods=['POST'])
def aggiungi():
newgame = request.form['add']
user_library = mongo.db.users.find_one({"$and":[{'name': session['username']},{'games': newgame}]})
if user_library:
flash("Gioco già presente in libreria")
return redirect(url_for('library'))
else:
mongo.db.users.update_one({'name': session['username'] }, {"$addToSet": {'games': newgame}})
return redirect(url_for('library'))
@app.route('/search',methods=['POST'])
def search():
risultato = mongo.db.tournaments.find_one({'name': request.form['codice']})
if risultato:
session.ricerca = risultato
return render_template('search.html')
else:
flash("Torneo non trovato")
return redirect(url_for('home'))
@app.route('/admin')
def admin():
if session['username'] == "admin":
session.database = mongo.db
return render_template('admin.html')
else:
return redirect(url_for('home'))
@app.route('/addgame',methods=['POST'])
def addgame():
if mongo.db.games.find_one({'title': request.form['newgame']}):
flash("Gioco già presente")
return redirect(url_for('admin'))
else:
mongo.db.games.insert({'title': request.form['newgame']})
flash("Gioco inserito con successo")
return redirect(url_for('admin'))
@app.route('/addtournament',methods=['POST'])
def addtournament():
if mongo.db.tournaments.find_one({'name': request.form['newtournamentid']}):
flash("Torneo già presente")
return redirect(url_for('admin'))
else:
date= request.form['newtournamentdate'] + " " + request.form['newtournamenthour']
mongo.db.tournaments.insert({'name': request.form['newtournamentid'], 'title': request.form['newtournamentgame'], 'date': date})
flash("Torneo inserito con successo")
return redirect(url_for('admin'))
@app.route('/team')
def team():
if 'username' in session:
session.database = mongo.db
return render_template('team.html')
else:
return redirect(url_for('index'))
@app.route('/searchteam',methods=['POST'])
def searchteam():
risultato = mongo.db.teams.find_one({'name': request.form['teamname']})
if risultato:
mongo.db.users.update_one({'name': session['username']},{"$set": {'team': request.form['teamname']}})
mongo.db.teams.update_one({'name': request.form['teamname']},{"$addToSet":{'users': session['username']}})
session['team'] = request.form['teamname']
return redirect(url_for('team'))
else:
flash("Il team inserito non è stato trovato")
return redirect(url_for('team'))
@app.route('/createteam',methods=['POST'])
def createteam():
risultato = mongo.db.teams.find_one({'name': request.form['teamname']})
if risultato:
flash("Esiste già un team con questo nome")
return redirect(url_for('team'))
else:
mongo.db.teams.insert({'name': request.form['teamname']})
mongo.db.users.update_one({'name': session['username']}, {"$set": {'team': request.form['teamname']}})
mongo.db.teams.update_one({'name': request.form['teamname']}, {"$addToSet": {'users': session['username']}})
session['team'] = request.form['teamname']
return redirect(url_for('team'))
@app.route('/leaveteam')
def leaveteam():
mongo.db.users.update_one({'name': session['username']}, {"$unset": {'team': session['team']}})
mongo.db.teams.update_one({'name': session['team']}, {"$pull": {'users': session['username']}})
numero_membri = mongo.db.teams.find_one({'name': session['team']})
if len(numero_membri['users']) == 0:
mongo.db.teams.delete_one({'name': session['team']})
session.pop('team',None)
return redirect(url_for('team'))
if __name__ == '__main__':
app.run()
app.secret_key = 'super secret key'
|
the-stack_0_2176 | # -*- coding: utf-8 -*-
"""
Manage users with the useradd command
.. important::
If you feel that Salt should be using this module to manage users on a
minion, and it is using a different module (or gives an error similar to
*'user.info' is not available*), see :ref:`here
<module-provider-override>`.
"""
from __future__ import absolute_import, print_function, unicode_literals
import copy
import functools
import logging
import os
# Import salt libs
import salt.utils.data
import salt.utils.decorators.path
import salt.utils.files
import salt.utils.stringutils
import salt.utils.user
from salt.exceptions import CommandExecutionError
# Import 3rd-party libs
from salt.ext import six
try:
import pwd
HAS_PWD = True
except ImportError:
HAS_PWD = False
log = logging.getLogger(__name__)
# Define the module's virtual name
__virtualname__ = "user"
def __virtual__():
"""
Set the user module if the kernel is Linux, OpenBSD, NetBSD or AIX
"""
if HAS_PWD and __grains__["kernel"] in ("Linux", "OpenBSD", "NetBSD", "AIX"):
return __virtualname__
return (
False,
"useradd execution module not loaded: either pwd python library not available or system not one of Linux, OpenBSD, NetBSD or AIX",
)
def _quote_username(name):
"""
Usernames can only contain ascii chars, so make sure we return a str type
"""
if not isinstance(name, six.string_types):
return str(name) # future lint: disable=blacklisted-function
else:
return salt.utils.stringutils.to_str(name)
def _get_gecos(name, root=None):
"""
Retrieve GECOS field info and return it in dictionary form
"""
if root is not None and __grains__["kernel"] != "AIX":
getpwnam = functools.partial(_getpwnam, root=root)
else:
getpwnam = functools.partial(pwd.getpwnam)
gecos_field = salt.utils.stringutils.to_unicode(
getpwnam(_quote_username(name)).pw_gecos
).split(",", 4)
if not gecos_field:
return {}
else:
# Assign empty strings for any unspecified trailing GECOS fields
while len(gecos_field) < 5:
gecos_field.append("")
return {
"fullname": salt.utils.data.decode(gecos_field[0]),
"roomnumber": salt.utils.data.decode(gecos_field[1]),
"workphone": salt.utils.data.decode(gecos_field[2]),
"homephone": salt.utils.data.decode(gecos_field[3]),
"other": salt.utils.data.decode(gecos_field[4]),
}
def _build_gecos(gecos_dict):
"""
Accepts a dictionary entry containing GECOS field names and their values,
and returns a full GECOS comment string, to be used with usermod.
"""
return "{0},{1},{2},{3},{4}".format(
gecos_dict.get("fullname", ""),
gecos_dict.get("roomnumber", ""),
gecos_dict.get("workphone", ""),
gecos_dict.get("homephone", ""),
gecos_dict.get("other", ""),
).rstrip(",")
def _update_gecos(name, key, value, root=None):
"""
Common code to change a user's GECOS information
"""
if value is None:
value = ""
elif not isinstance(value, six.string_types):
value = six.text_type(value)
else:
value = salt.utils.stringutils.to_unicode(value)
pre_info = _get_gecos(name, root=root)
if not pre_info:
return False
if value == pre_info[key]:
return True
gecos_data = copy.deepcopy(pre_info)
gecos_data[key] = value
cmd = ["usermod"]
if root is not None and __grains__["kernel"] != "AIX":
cmd.extend(("-R", root))
cmd.extend(("-c", _build_gecos(gecos_data), name))
__salt__["cmd.run"](cmd, python_shell=False)
return _get_gecos(name, root=root).get(key) == value
def add(
name,
uid=None,
gid=None,
groups=None,
home=None,
shell=None,
unique=True,
system=False,
fullname="",
roomnumber="",
workphone="",
homephone="",
other="",
createhome=True,
loginclass=None,
nologinit=False,
root=None,
usergroup=None,
):
"""
Add a user to the minion
name
Username LOGIN to add
uid
User ID of the new account
gid
Name or ID of the primary group of the new account
groups
List of supplementary groups of the new account
home
Home directory of the new account
shell
Login shell of the new account
unique
If not True, the user account can have a non-unique UID
system
Create a system account
fullname
GECOS field for the full name
roomnumber
GECOS field for the room number
workphone
GECOS field for the work phone
homephone
GECOS field for the home phone
other
GECOS field for other information
createhome
Create the user's home directory
loginclass
Login class for the new account (OpenBSD)
nologinit
Do not add the user to the lastlog and faillog databases
root
Directory to chroot into
usergroup
Create and add the user to a new primary group of the same name
CLI Example:
.. code-block:: bash
salt '*' user.add name <uid> <gid> <groups> <home> <shell>
"""
cmd = ["useradd"]
if shell:
cmd.extend(["-s", shell])
if uid not in (None, ""):
cmd.extend(["-u", uid])
if gid not in (None, ""):
cmd.extend(["-g", gid])
elif usergroup:
cmd.append("-U")
if __grains__["kernel"] != "Linux":
log.warning("'usergroup' is only supported on GNU/Linux hosts.")
elif groups is not None and name in groups:
defs_file = "/etc/login.defs"
if __grains__["kernel"] != "OpenBSD":
try:
with salt.utils.files.fopen(defs_file) as fp_:
for line in fp_:
line = salt.utils.stringutils.to_unicode(line)
if "USERGROUPS_ENAB" not in line[:15]:
continue
if "yes" in line:
cmd.extend(["-g", __salt__["file.group_to_gid"](name)])
# We found what we wanted, let's break out of the loop
break
except OSError:
log.debug(
"Error reading %s", defs_file, exc_info_on_loglevel=logging.DEBUG
)
else:
usermgmt_file = "/etc/usermgmt.conf"
try:
with salt.utils.files.fopen(usermgmt_file) as fp_:
for line in fp_:
line = salt.utils.stringutils.to_unicode(line)
if "group" not in line[:5]:
continue
cmd.extend(["-g", line.split()[-1]])
# We found what we wanted, let's break out of the loop
break
except OSError:
# /etc/usermgmt.conf not present: defaults will be used
pass
# Setting usergroup to False adds the -N command argument. If
# usergroup is None, no arguments are added to allow useradd to go
# with the defaults defined for the OS.
if usergroup is False:
cmd.append("-N")
if createhome:
cmd.append("-m")
elif __grains__["kernel"] != "NetBSD" and __grains__["kernel"] != "OpenBSD":
cmd.append("-M")
if nologinit:
cmd.append("-l")
if home is not None:
cmd.extend(["-d", home])
if not unique and __grains__["kernel"] != "AIX":
cmd.append("-o")
if (
system
and __grains__["kernel"] != "NetBSD"
and __grains__["kernel"] != "OpenBSD"
):
cmd.append("-r")
if __grains__["kernel"] == "OpenBSD":
if loginclass is not None:
cmd.extend(["-L", loginclass])
cmd.append(name)
if root is not None and __grains__["kernel"] != "AIX":
cmd.extend(("-R", root))
ret = __salt__["cmd.run_all"](cmd, python_shell=False)
if ret["retcode"] != 0:
return False
# At this point, the user was successfully created, so return true
# regardless of the outcome of the below functions. If there is a
# problem wth changing any of the user's info below, it will be raised
# in a future highstate call. If anyone has a better idea on how to do
# this, feel free to change it, but I didn't think it was a good idea
# to return False when the user was successfully created since A) the
# user does exist, and B) running useradd again would result in a
# nonzero exit status and be interpreted as a False result.
if groups:
chgroups(name, groups, root=root)
if fullname:
chfullname(name, fullname, root=root)
if roomnumber:
chroomnumber(name, roomnumber, root=root)
if workphone:
chworkphone(name, workphone, root=root)
if homephone:
chhomephone(name, homephone, root=root)
if other:
chother(name, other, root=root)
return True
def delete(name, remove=False, force=False, root=None):
"""
Remove a user from the minion
name
Username to delete
remove
Remove home directory and mail spool
force
Force some actions that would fail otherwise
root
Directory to chroot into
CLI Example:
.. code-block:: bash
salt '*' user.delete name remove=True force=True
"""
cmd = ["userdel"]
if remove:
cmd.append("-r")
if force and __grains__["kernel"] != "OpenBSD" and __grains__["kernel"] != "AIX":
cmd.append("-f")
cmd.append(name)
if root is not None and __grains__["kernel"] != "AIX":
cmd.extend(("-R", root))
ret = __salt__["cmd.run_all"](cmd, python_shell=False)
if ret["retcode"] == 0:
# Command executed with no errors
return True
if ret["retcode"] == 12:
# There's a known bug in Debian based distributions, at least, that
# makes the command exit with 12, see:
# https://bugs.launchpad.net/ubuntu/+source/shadow/+bug/1023509
if __grains__["os_family"] not in ("Debian",):
return False
if "var/mail" in ret["stderr"] or "var/spool/mail" in ret["stderr"]:
# We've hit the bug, let's log it and not fail
log.debug(
"While the userdel exited with code 12, this is a known bug on "
"debian based distributions. See http://goo.gl/HH3FzT"
)
return True
return False
def getent(refresh=False, root=None):
"""
Return the list of all info for all users
refresh
Force a refresh of user information
root
Directory to chroot into
CLI Example:
.. code-block:: bash
salt '*' user.getent
"""
if "user.getent" in __context__ and not refresh:
return __context__["user.getent"]
ret = []
if root is not None and __grains__["kernel"] != "AIX":
getpwall = functools.partial(_getpwall, root=root)
else:
getpwall = functools.partial(pwd.getpwall)
for data in getpwall():
ret.append(_format_info(data))
__context__["user.getent"] = ret
return ret
def _chattrib(name, key, value, param, persist=False, root=None):
"""
Change an attribute for a named user
"""
pre_info = info(name, root=root)
if not pre_info:
raise CommandExecutionError("User '{0}' does not exist".format(name))
if value == pre_info[key]:
return True
cmd = ["usermod"]
if root is not None and __grains__["kernel"] != "AIX":
cmd.extend(("-R", root))
if persist and __grains__["kernel"] != "OpenBSD":
cmd.append("-m")
cmd.extend((param, value, name))
__salt__["cmd.run"](cmd, python_shell=False)
return info(name, root=root).get(key) == value
def chuid(name, uid, root=None):
"""
Change the uid for a named user
name
User to modify
uid
New UID for the user account
root
Directory to chroot into
CLI Example:
.. code-block:: bash
salt '*' user.chuid foo 4376
"""
return _chattrib(name, "uid", uid, "-u", root=root)
def chgid(name, gid, root=None):
"""
Change the default group of the user
name
User to modify
gid
Force use GID as new primary group
root
Directory to chroot into
CLI Example:
.. code-block:: bash
salt '*' user.chgid foo 4376
"""
return _chattrib(name, "gid", gid, "-g", root=root)
def chshell(name, shell, root=None):
"""
Change the default shell of the user
name
User to modify
shell
New login shell for the user account
root
Directory to chroot into
CLI Example:
.. code-block:: bash
salt '*' user.chshell foo /bin/zsh
"""
return _chattrib(name, "shell", shell, "-s", root=root)
def chhome(name, home, persist=False, root=None):
"""
Change the home directory of the user, pass True for persist to move files
to the new home directory if the old home directory exist.
name
User to modify
home
New home directory for the user account
persist
Move contents of the home directory to the new location
root
Directory to chroot into
CLI Example:
.. code-block:: bash
salt '*' user.chhome foo /home/users/foo True
"""
return _chattrib(name, "home", home, "-d", persist=persist, root=root)
def chgroups(name, groups, append=False, root=None):
"""
Change the groups to which this user belongs
name
User to modify
groups
Groups to set for the user
append : False
If ``True``, append the specified group(s). Otherwise, this function
will replace the user's groups with the specified group(s).
root
Directory to chroot into
CLI Examples:
.. code-block:: bash
salt '*' user.chgroups foo wheel,root
salt '*' user.chgroups foo wheel,root append=True
"""
if isinstance(groups, six.string_types):
groups = groups.split(",")
ugrps = set(list_groups(name))
if ugrps == set(groups):
return True
cmd = ["usermod"]
if __grains__["kernel"] != "OpenBSD":
if append and __grains__["kernel"] != "AIX":
cmd.append("-a")
cmd.append("-G")
else:
if append:
cmd.append("-G")
else:
cmd.append("-S")
if append and __grains__["kernel"] == "AIX":
cmd.extend([",".join(ugrps) + "," + ",".join(groups), name])
else:
cmd.extend([",".join(groups), name])
if root is not None and __grains__["kernel"] != "AIX":
cmd.extend(("-R", root))
result = __salt__["cmd.run_all"](cmd, python_shell=False)
# try to fallback on gpasswd to add user to localgroups
# for old lib-pamldap support
if __grains__["kernel"] != "OpenBSD" and __grains__["kernel"] != "AIX":
if result["retcode"] != 0 and "not found in" in result["stderr"]:
ret = True
for group in groups:
cmd = ["gpasswd", "-a", name, group]
if __salt__["cmd.retcode"](cmd, python_shell=False) != 0:
ret = False
return ret
return result["retcode"] == 0
def chfullname(name, fullname, root=None):
"""
Change the user's Full Name
name
User to modify
fullname
GECOS field for the full name
root
Directory to chroot into
CLI Example:
.. code-block:: bash
salt '*' user.chfullname foo "Foo Bar"
"""
return _update_gecos(name, "fullname", fullname, root=root)
def chroomnumber(name, roomnumber, root=None):
"""
Change the user's Room Number
CLI Example:
.. code-block:: bash
salt '*' user.chroomnumber foo 123
"""
return _update_gecos(name, "roomnumber", roomnumber, root=root)
def chworkphone(name, workphone, root=None):
"""
Change the user's Work Phone
name
User to modify
workphone
GECOS field for the work phone
root
Directory to chroot into
CLI Example:
.. code-block:: bash
salt '*' user.chworkphone foo 7735550123
"""
return _update_gecos(name, "workphone", workphone, root=root)
def chhomephone(name, homephone, root=None):
"""
Change the user's Home Phone
name
User to modify
homephone
GECOS field for the home phone
root
Directory to chroot into
CLI Example:
.. code-block:: bash
salt '*' user.chhomephone foo 7735551234
"""
return _update_gecos(name, "homephone", homephone, root=root)
def chother(name, other, root=None):
"""
Change the user's other GECOS attribute
name
User to modify
other
GECOS field for other information
root
Directory to chroot into
CLI Example:
.. code-block:: bash
salt '*' user.chother foobar
"""
return _update_gecos(name, "other", other, root=root)
def chloginclass(name, loginclass, root=None):
"""
Change the default login class of the user
name
User to modify
loginclass
Login class for the new account
root
Directory to chroot into
.. note::
This function only applies to OpenBSD systems.
CLI Example:
.. code-block:: bash
salt '*' user.chloginclass foo staff
"""
if __grains__["kernel"] != "OpenBSD":
return False
if loginclass == get_loginclass(name):
return True
cmd = ["usermod", "-L", loginclass, name]
if root is not None and __grains__["kernel"] != "AIX":
cmd.extend(("-R", root))
__salt__["cmd.run"](cmd, python_shell=False)
return get_loginclass(name) == loginclass
def info(name, root=None):
"""
Return user information
name
User to get the information
root
Directory to chroot into
CLI Example:
.. code-block:: bash
salt '*' user.info root
"""
# If root is provided, we use a less portable solution that
# depends on analyzing /etc/passwd manually. Of course we cannot
# find users from NIS nor LDAP, but in those cases do not makes
# sense to provide a root parameter.
#
# Please, note that if the non-root /etc/passwd file is long the
# iteration can be slow.
if root is not None and __grains__["kernel"] != "AIX":
getpwnam = functools.partial(_getpwnam, root=root)
else:
getpwnam = functools.partial(pwd.getpwnam)
try:
data = getpwnam(_quote_username(name))
except KeyError:
return {}
else:
return _format_info(data)
def get_loginclass(name):
"""
Get the login class of the user
name
User to get the information
.. note::
This function only applies to OpenBSD systems.
CLI Example:
.. code-block:: bash
salt '*' user.get_loginclass foo
"""
if __grains__["kernel"] != "OpenBSD":
return False
userinfo = __salt__["cmd.run_stdout"](["userinfo", name], python_shell=False)
for line in userinfo.splitlines():
if line.startswith("class"):
try:
ret = line.split(None, 1)[1]
break
except (ValueError, IndexError):
continue
else:
ret = ""
return ret
def _format_info(data):
"""
Return user information in a pretty way
"""
# Put GECOS info into a list
gecos_field = salt.utils.stringutils.to_unicode(data.pw_gecos).split(",", 4)
# Make sure our list has at least five elements
while len(gecos_field) < 5:
gecos_field.append("")
return {
"gid": data.pw_gid,
"groups": list_groups(data.pw_name),
"home": data.pw_dir,
"name": data.pw_name,
"passwd": data.pw_passwd,
"shell": data.pw_shell,
"uid": data.pw_uid,
"fullname": gecos_field[0],
"roomnumber": gecos_field[1],
"workphone": gecos_field[2],
"homephone": gecos_field[3],
"other": gecos_field[4],
}
@salt.utils.decorators.path.which("id")
def primary_group(name):
"""
Return the primary group of the named user
.. versionadded:: 2016.3.0
name
User to get the information
CLI Example:
.. code-block:: bash
salt '*' user.primary_group saltadmin
"""
return __salt__["cmd.run"](["id", "-g", "-n", name])
def list_groups(name):
"""
Return a list of groups the named user belongs to
name
User to get the information
CLI Example:
.. code-block:: bash
salt '*' user.list_groups foo
"""
return salt.utils.user.get_group_list(name)
def list_users(root=None):
"""
Return a list of all users
root
Directory to chroot into
CLI Example:
.. code-block:: bash
salt '*' user.list_users
"""
if root is not None and __grains__["kernel"] != "AIX":
getpwall = functools.partial(_getpwall, root=root)
else:
getpwall = functools.partial(pwd.getpwall)
return sorted([user.pw_name for user in getpwall()])
def rename(name, new_name, root=None):
"""
Change the username for a named user
name
User to modify
new_name
New value of the login name
root
Directory to chroot into
CLI Example:
.. code-block:: bash
salt '*' user.rename name new_name
"""
if info(new_name, root=root):
raise CommandExecutionError("User '{0}' already exists".format(new_name))
return _chattrib(name, "name", new_name, "-l", root=root)
def _getpwnam(name, root=None):
"""
Alternative implementation for getpwnam, that use only /etc/passwd
"""
root = "/" if not root else root
passwd = os.path.join(root, "etc/passwd")
with salt.utils.files.fopen(passwd) as fp_:
for line in fp_:
line = salt.utils.stringutils.to_unicode(line)
comps = line.strip().split(":")
if comps[0] == name:
# Generate a getpwnam compatible output
comps[2], comps[3] = int(comps[2]), int(comps[3])
return pwd.struct_passwd(comps)
raise KeyError
def _getpwall(root=None):
"""
Alternative implemetantion for getpwall, that use only /etc/passwd
"""
root = "/" if not root else root
passwd = os.path.join(root, "etc/passwd")
with salt.utils.files.fopen(passwd) as fp_:
for line in fp_:
line = salt.utils.stringutils.to_unicode(line)
comps = line.strip().split(":")
# Generate a getpwall compatible output
comps[2], comps[3] = int(comps[2]), int(comps[3])
yield pwd.struct_passwd(comps)
|
the-stack_0_2180 | import bisect
from functools import total_ordering
from django.core.management import BaseCommand
from classification.enums import SpecialEKeys
from classification.models import Classification
@total_ordering
class ConversionSize:
def __init__(self, vc: Classification):
self.ref_length = vc.update_cached_c_hgvs()
self.vc_id = vc.id
self.chgvs = vc.get(SpecialEKeys.C_HGVS)
vc.save()
def __lt__(self, other):
return self.ref_length < other.ref_length
class Command(BaseCommand):
def handle(self, *args, **options):
conversions = list()
update_count = 0
for vc in Classification.objects.all():
conversion = ConversionSize(vc)
bisect.insort(conversions, conversion)
if len(conversions) > 10:
conversions.pop(0)
update_count += 1
if update_count % 100 == 0:
print(f"Completed {update_count}")
print(f"Bulk Update of Cached c.hgvs - completed")
print(f"Biggest ref lengths are:")
for conversion in conversions[::-1]:
print(f"{conversion.ref_length} from vc.id {conversion.vc_id} {conversion.chgvs}")
|
the-stack_0_2188 | from time import time
import flair
import numpy as np
import torch
from flair.models import SequenceTagger
from REL.mention_detection import MentionDetection
from REL.training_datasets import TrainingEvaluationDatasets
np.random.seed(seed=42)
MAX_SIZE_DOCS = 10
base_url = ""
wiki_version = ""
datasets = TrainingEvaluationDatasets(base_url, wiki_version).load()["aida_testB"]
docs = {}
for i, doc in enumerate(datasets):
sentences = []
for x in datasets[doc]:
if x["sentence"] not in sentences:
sentences.append(x["sentence"])
text = ". ".join([x for x in sentences])
if len(docs) == MAX_SIZE_DOCS:
print("length docs is {}.".format(len(docs)))
print("====================")
break
if len(text.split()) > 200:
docs[doc] = [text, []]
mention_detection = MentionDetection(base_url, wiki_version)
# Alternatively use Flair NER tagger.
tagger_ner = SequenceTagger.load("ner-fast")
start = time()
mentions_dataset, n_mentions = mention_detection.find_mentions(docs, tagger_ner)
print("MD took: {}".format(time() - start))
|
the-stack_0_2189 | # -*- coding: utf-8 -*-
"""
Exo sur output treetagger
"""
import argparse
class Word:
""" Classe Word : définit un mot simple de la langue """
def __init__(self, form, lemma, pos):
self.form = form
self.lemma = lemma
self.pos = pos
def __repr__(self):
return f"{self.form}"
def brown_string(self):
return f"{self.form}/{self.lemma}/{self.pos}"
def is_inflected(self):
"""
Returns True is the word is inflected
False otherwise
"""
if self.form.lower() != self.lemma:
return True
else:
return False
def main():
parser = argparse.ArgumentParser(description="Exo output treetagger")
parser.add_argument("-v", "--verbose", help="verbose mode", action="store_true")
parser.add_argument("file", help="le fichier tsv")
args = parser.parse_args()
words = []
with open(args.file) as tt:
for line in tt:
line = line.rstrip()
items = line.split('\t')
words.append(Word(items[0], items[2], items[1]))
res = [w for w in words if w.is_inflected() and w.pos != "PUN"]
print(res)
if __name__ == "__main__":
main() |
the-stack_0_2190 | from datetime import datetime
from PIL import Image
import numpy as np
import matplotlib.pyplot as plt
plt.switch_backend('agg')
import io
from torchvision import transforms as trans
from data.data_pipe import de_preprocess
import torch
from model import l2_norm
import pdb
import cv2
from face_detection.accuracy_evaluation import predict
from face_detection.config_farm import configuration_10_320_20L_5scales_v2 as cfg
import mxnet as mx
import numpy as np
def separate_bn_paras(modules):
if not isinstance(modules, list):
modules = [*modules.modules()]
paras_only_bn = []
paras_wo_bn = []
for layer in modules:
if 'model' in str(layer.__class__):
continue
if 'container' in str(layer.__class__):
continue
else:
if 'batchnorm' in str(layer.__class__):
paras_only_bn.extend([*layer.parameters()])
else:
paras_wo_bn.extend([*layer.parameters()])
return paras_only_bn, paras_wo_bn
def prepare_facebank(conf, model, mtcnn, tta = True):
model.eval()
ctx = mx.gpu(0)
symbol_file_path = 'face_detection/symbol_farm/symbol_10_320_20L_5scales_v2_deploy.json'
model_file_path = 'face_detection/saved_model/configuration_10_320_20L_5scales_v2/train_10_320_20L_5scales_v2_iter_1800000.params'
face_detector = predict.Predict(mxnet=mx,
symbol_file_path=symbol_file_path,
model_file_path=model_file_path,
ctx=ctx,
receptive_field_list=cfg.param_receptive_field_list,
receptive_field_stride=cfg.param_receptive_field_stride,
bbox_small_list=cfg.param_bbox_small_list,
bbox_large_list=cfg.param_bbox_large_list,
receptive_field_center_start=cfg.param_receptive_field_center_start,
num_output_scales=cfg.param_num_output_scales)
embeddings = []
names = ['Unknown']
for path in conf.facebank_path.iterdir():
if path.is_file():
continue
else:
embs = []
for filename in path.iterdir():
if not filename.is_file():
continue
else:
try:
print(filename)
image = Image.open(filename)
img = image
# img = np.array(image)
# img = cv2.cvtColor(np.array(img), cv2.COLOR_RGB2BGR)
# faces, infer_time = face_detector.predict(img, resize_scale=0.5, score_threshold=0.4, top_k=10000, \
# NMS_threshold=0.2, NMS_flag=True, skip_scale_branch_list=[])
# img_size = 112
# print(len(faces))
# margin = 0
# img_h, img_w, _ = np.shape(image)
# for i, bbox in enumerate(faces):
# x1, y1, x2, y2= bbox[0], bbox[1], bbox[2] ,bbox[3]
# xw1 = max(int(x1 - margin ), 0)
# yw1 = max(int(y1 - margin ), 0)
# xw2 = min(int(x2 + margin ), img_w - 1)
# yw2 = min(int(y2 + margin ), img_h - 1)
# face = cv2.resize(img[yw1:yw2 + 1, xw1:xw2 + 1], (img_size, img_size))
# # img = Image.fromarray(face[...,::-1])
# img = face
# break
except Exception as e:
print(e)
continue
if img.size != (112, 112):
img = mtcnn.align(img)
print(type(img))
# cv2.imshow('window', img)
# img.show()
# if cv2.waitKey() == ord('q'):
# break
with torch.no_grad():
if tta:
img = trans.functional.to_grayscale(img, num_output_channels=3)
mirror = trans.functional.hflip(img)
emb = model(conf.test_transform(img).to(conf.device).unsqueeze(0))
emb_mirror = model(conf.test_transform(mirror).to(conf.device).unsqueeze(0))
v_mirror = trans.functional.vflip(mirror)
v_emb_mirror = model(conf.test_transform(v_mirror).to(conf.device).unsqueeze(0))
v_img = trans.functional.vflip(img)
v_img_mirror = model(conf.test_transform(v_img).to(conf.device).unsqueeze(0))
embs.append(l2_norm(emb + emb_mirror))
# embs.append(l2_norm(emb + emb_mirror + v_emb_mirror + v_img_mirror))
# embs.append(emb)
# embs.append(emb_mirror)
# embs.append(v_emb_mirror)
# embs.append(v_img_mirror)
else:
embs.append(model(conf.test_transform(img).to(conf.device).unsqueeze(0)))
if len(embs) == 0:
continue
embedding = torch.cat(embs).mean(0,keepdim=True)
embeddings.append(embedding)
names.append(path.name)
embeddings = torch.cat(embeddings)
names = np.array(names)
torch.save(embeddings, conf.facebank_path/'facebank.pth')
np.save(conf.facebank_path/'names', names)
return embeddings, names
def load_facebank(conf):
embeddings = torch.load(conf.facebank_path/'facebank.pth')
names = np.load(conf.facebank_path/'names.npy')
return embeddings, names
def face_reader(conf, conn, flag, boxes_arr, result_arr, learner, mtcnn, targets, tta):
while True:
try:
image = conn.recv()
except:
continue
try:
bboxes, faces = mtcnn.align_multi(image, limit=conf.face_limit)
except:
bboxes = []
results = learner.infer(conf, faces, targets, tta)
if len(bboxes) > 0:
print('bboxes in reader : {}'.format(bboxes))
bboxes = bboxes[:,:-1] #shape:[10,4],only keep 10 highest possibiity faces
bboxes = bboxes.astype(int)
bboxes = bboxes + [-1,-1,1,1] # personal choice
assert bboxes.shape[0] == results.shape[0],'bbox and faces number not same'
bboxes = bboxes.reshape([-1])
for i in range(len(boxes_arr)):
if i < len(bboxes):
boxes_arr[i] = bboxes[i]
else:
boxes_arr[i] = 0
for i in range(len(result_arr)):
if i < len(results):
result_arr[i] = results[i]
else:
result_arr[i] = -1
else:
for i in range(len(boxes_arr)):
boxes_arr[i] = 0 # by default,it's all 0
for i in range(len(result_arr)):
result_arr[i] = -1 # by default,it's all -1
print('boxes_arr : {}'.format(boxes_arr[:4]))
print('result_arr : {}'.format(result_arr[:4]))
flag.value = 0
hflip = trans.Compose([
de_preprocess,
trans.ToPILImage(),
trans.functional.hflip,
trans.ToTensor(),
trans.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])
])
def hflip_batch(imgs_tensor):
hfliped_imgs = torch.empty_like(imgs_tensor)
for i, img_ten in enumerate(imgs_tensor):
hfliped_imgs[i] = hflip(img_ten)
return hfliped_imgs
def get_time():
return (str(datetime.now())[:-10]).replace(' ','-').replace(':','-')
def gen_plot(fpr, tpr):
"""Create a pyplot plot and save to buffer."""
plt.figure()
plt.xlabel("FPR", fontsize=14)
plt.ylabel("TPR", fontsize=14)
plt.title("ROC Curve", fontsize=14)
plot = plt.plot(fpr, tpr, linewidth=2)
buf = io.BytesIO()
plt.savefig(buf, format='jpeg')
buf.seek(0)
plt.close()
return buf
def draw_box_name(bbox,name,frame):
frame = cv2.rectangle(frame,(bbox[0],bbox[1]),(bbox[2],bbox[3]),(0,0,255),6)
frame = cv2.putText(frame,
name,
(bbox[0],bbox[1]),
cv2.FONT_HERSHEY_SIMPLEX,
2,
(0,255,0),
3,
cv2.LINE_AA)
return frame |
the-stack_0_2191 | from numpy import rad2deg, deg2rad
from qcodes import VisaInstrument, validators as vals
def parse_on_off(stat):
if stat.startswith('0'):
stat = 'Off'
elif stat.startswith('1'):
stat = 'On'
return stat
def rad2deg_mod(rad):
deg = rad2deg(float(rad))
return deg
class Keysight_E8257D(VisaInstrument):
"""
This is the qcodes driver for the Keysight_E8257D signal generator
Status: beta-version.
TODO:
- Add all parameters that are in the manual
This driver will most likely work for multiple Agilent sources.
This driver does not contain all commands available for the E8527D but
only the ones most commonly used.
"""
def __init__(self, name:str, address:str, step_attenuator:bool=False,
pulse_option:bool=True, **kwargs):
super().__init__(name, address, **kwargs)
self.add_parameter(name='frequency',
label='Frequency',
unit='Hz',
get_cmd='FREQ:CW?',
set_cmd='FREQ:CW' + ' {:.8f}',
get_parser=float,
set_parser=float,
vals=vals.Numbers(2.5e5, 20e9))
self.add_parameter(name='phase',
label='Phase',
unit='deg',
get_cmd='PHASE?',
set_cmd='PHASE' + ' {:.8f}',
get_parser=rad2deg_mod,
set_parser=deg2rad,
vals=vals.Numbers(-180, 180))
self.add_parameter(name='power',
label='Power',
unit='dBm',
get_cmd='POW:AMPL?',
set_cmd='POW:AMPL' + ' {:.4f}',
get_parser=float,
set_parser=float,
vals=vals.Numbers(-130, 25))
if pulse_option:
self.add_parameter(name='pulse_delay',
label='Pulse_Delay',
unit='s',
get_cmd='PULM:INT:DEL?',
set_cmd='PULM:INT:DEL' + ' {:e}',
get_parser=float,
set_parser=float,
vals=vals.Numbers(-70e-9,42))
self.add_parameter(name='pulse_period',
label='Pulse_period',
unit='s',
get_cmd='PULM:INT:PER?',
set_cmd='PULM:INT:PER' + ' {:e}',
get_parser=float,
set_parser=float,
vals=vals.Numbers(-70e-9, 42))
self.add_parameter(name='pulse_width',
label='Pulse_width',
unit='s',
get_cmd='PULM:INT:PWID?',
set_cmd='PULM:INT:PWID' + ' {:e}',
get_parser=float,
set_parser=float,
vals=vals.Numbers(10e-9, 42))
self.add_parameter('pulse_mod',
get_cmd='PULM:STAT?',
set_cmd='PULM:STAT' + ' {}',
get_parser=parse_on_off,
# Only listed most common spellings idealy want a
# .upper val for Enum or string
vals=vals.Enum('on', 'On', 'ON',
'off', 'Off', 'OFF'))
self.add_parameter('pulse_src',
get_cmd='PULM:SOUR?',
set_cmd='PULM:SOUR' + ' {}',
vals=vals.Enum('INT', 'EXT'))
self.add_parameter('pulse_int_mode',
get_cmd='PULM:SOUR:INT?',
set_cmd='PULM:SOUR:INT' + ' {}',
vals=vals.Enum('FRUN', 'TRIG', 'GATE'))
self.add_parameter('modulation',
get_cmd='OUTP:MOD?',
set_cmd='OUTP:MOD' + ' {}',
get_parser=parse_on_off,
# Only listed most common spellings idealy want a
# .upper val for Enum or string
vals=vals.Enum('on', 'On', 'ON',
'off', 'Off', 'OFF'))
self.add_parameter('status',
get_cmd='OUTP?',
set_cmd='OUTP' + ' {}',
get_parser=parse_on_off,
# Only listed most common spellings idealy want a
# .upper val for Enum or string
vals=vals.Enum('on', 'On', 'ON',
'off', 'Off', 'OFF'))
self.add_parameter('alc',
get_cmd='POW:ALC?',
set_cmd='POW:ALC' + ' {}',
get_parser=parse_on_off,
# Only listed most common spellings idealy want a
# .upper val for Enum or string
vals=vals.Enum('on', 'On', 'ON',
'off', 'Off', 'OFF'))
self.connect_message()
def on(self):
self.set('status', 'on')
def off(self):
self.set('status', 'off')
def mod_on(self):
self.set('modulation', 'on')
def mod_off(self):
self.set('modulation', 'off')
def alc_on(self):
self.set('alc', 'on')
def alc_off(self):
self.set('alc', 'off')
def pulse_on(self):
self.set('pulse_mod', 'on')
def pulse_off(self):
self.set('pulse_mod', 'off')
def pulse_source_int(self):
self.set('pulse_src', 'INT')
def pulse_source_ext(self):
self.set('pulse_src', 'EXT')
def pulse_int_mode_frun(self):
self.set('pulse_int_mode', 'FRUN')
def pulse_int_mode_trig(self):
self.set('pulse_int_mode', 'TRIG')
def pulse_int_mode_gate(self):
self.set('pulse_int_mode', 'GATE')
|
the-stack_0_2193 | #! /usr/bin/env python3
# Script to parse spec output CSVs and produce C files.
# Released by lisa neigut under CC0:
# https://creativecommons.org/publicdomain/zero/1.0/
#
# Reads from stdin, outputs C header or body file.
#
# Standard message types:
# msgtype,<msgname>,<value>[,<option>]
# msgdata,<msgname>,<fieldname>,<typename>,[<count>][,<option>]
#
# TLV types:
# tlvtype,<tlvstreamname>,<tlvname>,<value>[,<option>]
# tlvdata,<tlvstreamname>,<tlvname>,<fieldname>,<typename>,[<count>][,<option>]
#
# Subtypes:
# subtype,<subtypename>
# subtypedata,<subtypename>,<fieldname>,<typename>,[<count>]
from argparse import ArgumentParser, REMAINDER
from collections import OrderedDict
import copy
import fileinput
from mako.template import Template
import os
import re
import sys
# Generator to give us one line at a time.
def next_line(args, lines):
if lines is None:
lines = fileinput.input(args)
for i, line in enumerate(lines):
yield i + 1, line.strip()
# Class definitions, to keep things classy
class Field(object):
def __init__(self, name, type_obj, extensions=[],
field_comments=[], optional=False):
self.name = name
self.type_obj = type_obj
self.count = 1
self.len_field_of = None
self.len_field = None
self.implicit_len = False
self.extension_names = extensions
self.is_optional = optional
self.field_comments = field_comments
def __deepcopy__(self, memo):
deepcopy_method = self.__deepcopy__
self.__deepcopy__ = None
field = copy.deepcopy(self, memo)
self.__deepcopy__ = deepcopy_method
field.type_obj = self.type_obj
return field
def add_count(self, count):
self.count = int(count)
def add_len_field(self, len_field):
self.count = False
# we cache our len-field's name
self.len_field = len_field.name
# the len-field caches our name
len_field.len_field_of = self.name
def add_implicit_len(self):
self.count = False
self.implicit_len = True
def is_array(self):
return self.count > 1
def is_varlen(self):
return not self.count
def is_implicit_len(self):
return self.implicit_len
def is_extension(self):
return bool(self.extension_names)
def size(self, implicit_expression=None):
if self.count:
return self.count
if self.len_field:
return self.len_field
assert self.is_implicit_len()
assert implicit_expression
return implicit_expression
def needs_context(self):
""" A field needs a context if it's varsized """
return self.is_varlen() or self.type_obj.needs_context()
def arg_desc_to(self):
if self.len_field_of:
return ''
type_name = self.type_obj.type_name()
if self.is_array():
return ', const {} {}[{}]'.format(type_name, self.name, self.count)
if self.type_obj.is_assignable() and not self.is_varlen():
name = self.name
if self.is_optional:
name = '*' + name
return ', {} {}'.format(type_name, name)
if self.is_varlen() and self.type_obj.is_varsize():
return ', const {} **{}'.format(type_name, self.name)
return ', const {} *{}'.format(type_name, self.name)
def arg_desc_from(self):
if self.len_field_of:
return ''
type_name = self.type_obj.type_name()
if self.is_array():
return ', {} {}[{}]'.format(type_name, self.name, self.count)
ptrs = '*'
if self.is_varlen() or self.is_optional or self.type_obj.is_varsize():
ptrs += '*'
if self.is_varlen() and self.type_obj.is_varsize():
ptrs += '*'
return ', {} {}{}'.format(type_name, ptrs, self.name)
class FieldSet(object):
def __init__(self):
self.fields = OrderedDict()
self.len_fields = {}
def add_data_field(self, field_name, type_obj, count=1,
extensions=[], comments=[], optional=False,
implicit_len_ok=False):
field = Field(field_name, type_obj, extensions=extensions,
field_comments=comments, optional=optional)
if bool(count):
try:
field.add_count(int(count))
except ValueError:
if count in self.fields:
len_field = self.find_data_field(count)
field.add_len_field(len_field)
self.len_fields[len_field.name] = len_field
else:
# '...' means "rest of TLV"
assert implicit_len_ok
assert count == '...'
field.add_implicit_len()
# You can't have any fields after an implicit-length field.
if len(self.fields) != 0:
assert not self.fields[next(reversed(self.fields))].is_implicit_len()
self.fields[field_name] = field
def find_data_field(self, field_name):
return self.fields[field_name]
def get_len_fields(self):
return list(self.len_fields.values())
def has_len_fields(self):
return bool(self.len_fields)
def needs_context(self):
return any([field.needs_context() or field.is_optional for field in self.fields.values()])
class Type(FieldSet):
assignables = [
'u8',
'u16',
'u32',
'u64',
'tu16',
'tu32',
'tu64',
'bool',
'amount_sat',
'amount_msat',
'bigsize',
'varint'
]
typedefs = [
'u8',
'u16',
'u32',
'u64',
'bool',
'secp256k1_ecdsa_signature',
'secp256k1_ecdsa_recoverable_signature',
'wirestring',
'double',
'bigsize',
'varint',
]
truncated_typedefs = [
'tu16',
'tu32',
'tu64',
]
# Externally defined variable size types (require a context)
varsize_types = [
'peer_features',
'gossip_getnodes_entry',
'gossip_getchannels_entry',
'failed_htlc',
'utxo',
'bitcoin_tx',
'wirestring',
'per_peer_state',
'bitcoin_tx_output',
'exclude_entry',
]
# Some BOLT types are re-typed based on their field name
# ('fieldname partial', 'original type', 'outer type'): ('true type', 'collapse array?')
name_field_map = {
('txid', 'sha256'): ('bitcoin_txid', False),
('amt', 'u64'): ('amount_msat', False),
('msat', 'u64'): ('amount_msat', False),
('satoshis', 'u64'): ('amount_sat', False),
('node_id', 'pubkey', 'channel_announcement'): ('node_id', False),
('node_id', 'pubkey', 'node_announcement'): ('node_id', False),
('temporary_channel_id', 'u8'): ('channel_id', True),
('secret', 'u8'): ('secret', True),
('preimage', 'u8'): ('preimage', True),
}
# For BOLT specified types, a few type names need to be simply 'remapped'
# 'original type': 'true type'
name_remap = {
'byte': 'u8',
'signature': 'secp256k1_ecdsa_signature',
'chain_hash': 'bitcoin_blkid',
'point': 'pubkey',
# FIXME: omits 'pad'
}
@staticmethod
def true_type(type_name, field_name=None, outer_name=None):
""" Returns 'true' type of a given type and a flag if
we've remapped a variable size/array type to a single struct
(an example of this is 'temporary_channel_id' which is specified
as a 32*byte, but we re-map it to a channel_id
"""
if type_name in Type.name_remap:
type_name = Type.name_remap[type_name]
if field_name:
for t, true_type in Type.name_field_map.items():
if t[0] in field_name and t[1] == type_name:
if len(t) == 2 or outer_name == t[2]:
return true_type
return (type_name, False)
def __init__(self, name):
FieldSet.__init__(self)
self.name, self.is_enum = self.parse_name(name)
self.depends_on = {}
self.type_comments = []
self.tlv = False
def parse_name(self, name):
if name.startswith('enum '):
return name[5:], True
return name, False
def add_data_field(self, field_name, type_obj, count=1,
extensions=[], comments=[], optional=False):
FieldSet.add_data_field(self, field_name, type_obj, count,
extensions=extensions,
comments=comments, optional=optional)
if type_obj.name not in self.depends_on:
self.depends_on[type_obj.name] = type_obj
def type_name(self):
if self.name in self.typedefs:
return self.name
if self.name in self.truncated_typedefs:
return self.name[1:]
if self.is_enum:
prefix = 'enum '
else:
prefix = 'struct '
return prefix + self.struct_name()
# We only accelerate the u8 case: it's common and trivial.
def has_array_helper(self):
return self.name in ['u8']
def struct_name(self):
if self.is_tlv():
return self.tlv.struct_name()
return self.name
def subtype_deps(self):
return [dep for dep in self.depends_on.values() if dep.is_subtype()]
def is_subtype(self):
return bool(self.fields)
def is_truncated(self):
return self.name in self.truncated_typedefs
def needs_context(self):
return self.is_varsize()
def is_assignable(self):
""" Generally typedef's and enums """
return self.name in self.assignables or self.is_enum
def is_varsize(self):
""" A type is variably sized if it's marked as such (in varsize_types)
or it contains a field of variable length """
return self.name in self.varsize_types or self.has_len_fields()
def add_comments(self, comments):
self.type_comments = comments
def mark_tlv(self, tlv):
self.tlv = tlv
def is_tlv(self):
return bool(self.tlv)
class Message(FieldSet):
def __init__(self, name, number, option=[], enum_prefix='wire',
struct_prefix=None, comments=[]):
FieldSet.__init__(self)
self.name = name
self.number = number
self.enum_prefix = enum_prefix
self.option = option[0] if len(option) else None
self.struct_prefix = struct_prefix
self.enumname = None
self.msg_comments = comments
def has_option(self):
return self.option is not None
def enum_name(self):
name = self.enumname if self.enumname else self.name
return "{}_{}".format(self.enum_prefix, name).upper()
def struct_name(self):
if self.struct_prefix:
return self.struct_prefix + "_" + self.name
return self.name
class Tlv(object):
def __init__(self, name):
self.name = name
self.messages = {}
def add_message(self, tokens, comments=[]):
""" tokens -> (name, value[, option]) """
self.messages[tokens[0]] = Message(tokens[0], tokens[1], option=tokens[2:],
enum_prefix=self.name,
struct_prefix=self.struct_name(),
comments=comments)
def type_name(self):
return 'struct ' + self.struct_name()
def struct_name(self):
return "tlv_{}".format(self.name)
def find_message(self, name):
return self.messages[name]
def ordered_msgs(self):
return sorted(self.messages.values(), key=lambda item: int(item.number))
class Master(object):
types = {}
tlvs = {}
messages = {}
extension_msgs = {}
inclusions = []
top_comments = []
def add_comments(self, comments):
self.top_comments += comments
def add_include(self, inclusion):
self.inclusions.append(inclusion)
def add_tlv(self, tlv_name):
if tlv_name not in self.tlvs:
self.tlvs[tlv_name] = Tlv(tlv_name)
if tlv_name not in self.types:
self.types[tlv_name] = Type(tlv_name)
return self.tlvs[tlv_name]
def add_message(self, tokens, comments=[]):
""" tokens -> (name, value[, option])"""
self.messages[tokens[0]] = Message(tokens[0], tokens[1], option=tokens[2:],
comments=comments)
def add_extension_msg(self, name, msg):
self.extension_msgs[name] = msg
def add_type(self, type_name, field_name=None, outer_name=None):
optional = False
if type_name.startswith('?'):
type_name = type_name[1:]
optional = True
# Check for special type name re-mapping
type_name, collapse_original = Type.true_type(type_name, field_name,
outer_name)
if type_name not in self.types:
self.types[type_name] = Type(type_name)
return self.types[type_name], collapse_original, optional
def find_type(self, type_name):
return self.types[type_name]
def find_message(self, msg_name):
if msg_name in self.messages:
return self.messages[msg_name]
if msg_name in self.extension_msgs:
return self.extension_msgs[msg_name]
return None
def find_tlv(self, tlv_name):
return self.tlvs[tlv_name]
def get_ordered_subtypes(self):
""" We want to order subtypes such that the 'no dependency'
types are printed first """
subtypes = [s for s in self.types.values() if s.is_subtype()]
# Start with subtypes without subtype dependencies
sorted_types = [s for s in subtypes if not len(s.subtype_deps())]
unsorted = [s for s in subtypes if len(s.subtype_deps())]
while len(unsorted):
names = [s.name for s in sorted_types]
for s in list(unsorted):
if all([dependency.name in names for dependency in s.subtype_deps()]):
sorted_types.append(s)
unsorted.remove(s)
return sorted_types
def tlv_messages(self):
return [m for tlv in self.tlvs.values() for m in tlv.messages.values()]
def find_template(self, options):
dirpath = os.path.dirname(os.path.abspath(__file__))
filename = dirpath + '/gen/{}{}_template'.format(
'print_' if options.print_wire else '', options.page)
return Template(filename=filename)
def post_process(self):
""" method to handle any 'post processing' that needs to be done.
for now, we just need match up types to TLVs """
for tlv_name, tlv in self.tlvs.items():
if tlv_name in self.types:
self.types[tlv_name].mark_tlv(tlv)
def write(self, options, output):
template = self.find_template(options)
enum_sets = []
enum_sets.append({
'name': options.enum_name,
'set': self.messages.values(),
})
stuff = {}
stuff['top_comments'] = self.top_comments
stuff['options'] = options
stuff['idem'] = re.sub(r'[^A-Z]+', '_', options.header_filename.upper())
stuff['header_filename'] = options.header_filename
stuff['includes'] = self.inclusions
stuff['enum_sets'] = enum_sets
subtypes = self.get_ordered_subtypes()
stuff['structs'] = subtypes + self.tlv_messages()
stuff['tlvs'] = self.tlvs
# We leave out extension messages in the printing pages. Any extension
# fields will get printed under the 'original' message, if present
if options.print_wire:
stuff['messages'] = list(self.messages.values())
else:
stuff['messages'] = list(self.messages.values()) + list(self.extension_msgs.values())
stuff['subtypes'] = subtypes
print(template.render(**stuff), file=output)
def main(options, args=None, output=sys.stdout, lines=None):
genline = next_line(args, lines)
comment_set = []
# Create a new 'master' that serves as the coordinator for the file generation
master = Master()
try:
while True:
ln, line = next(genline)
tokens = line.split(',')
token_type = tokens[0]
if not bool(line):
master.add_comments(comment_set)
comment_set = []
continue
if token_type == 'subtype':
subtype, _, _ = master.add_type(tokens[1])
subtype.add_comments(list(comment_set))
comment_set = []
elif token_type == 'subtypedata':
subtype = master.find_type(tokens[1])
if not subtype:
raise ValueError('Unknown subtype {} for data.\nat {}:{}'
.format(tokens[1], ln, line))
type_obj, collapse, optional = master.add_type(tokens[3], tokens[2], tokens[1])
if optional:
raise ValueError('Subtypes cannot have optional fields {}.{}\n at {}:{}'
.format(subtype.name, tokens[2], ln, line))
if collapse:
count = 1
else:
count = tokens[4]
subtype.add_data_field(tokens[2], type_obj, count, comments=list(comment_set),
optional=optional)
comment_set = []
elif token_type == 'tlvtype':
tlv = master.add_tlv(tokens[1])
tlv.add_message(tokens[2:], comments=list(comment_set))
comment_set = []
elif token_type == 'tlvdata':
type_obj, collapse, optional = master.add_type(tokens[4], tokens[3], tokens[1])
if optional:
raise ValueError('TLV messages cannot have optional fields {}.{}\n at {}:{}'
.format(tokens[2], tokens[3], ln, line))
tlv = master.find_tlv(tokens[1])
if not tlv:
raise ValueError('tlvdata for unknown tlv {}.\nat {}:{}'
.format(tokens[1], ln, line))
msg = tlv.find_message(tokens[2])
if not msg:
raise ValueError('tlvdata for unknown tlv-message {}.\nat {}:{}'
.format(tokens[2], ln, line))
if collapse:
count = 1
else:
count = tokens[5]
msg.add_data_field(tokens[3], type_obj, count, comments=list(comment_set),
optional=optional, implicit_len_ok=True)
comment_set = []
elif token_type == 'msgtype':
master.add_message(tokens[1:], comments=list(comment_set))
comment_set = []
elif token_type == 'msgdata':
msg = master.find_message(tokens[1])
if not msg:
raise ValueError('Unknown message type {}. {}:{}'.format(tokens[1], ln, line))
type_obj, collapse, optional = master.add_type(tokens[3], tokens[2], tokens[1])
if collapse:
count = 1
else:
count = tokens[4]
# if this is an 'extension' field*, we want to add a new 'message' type
# in the future, extensions will be handled as TLV's
#
# *(in the spec they're called 'optional', but that term is overloaded
# in that internal wire messages have 'optional' fields that are treated
# differently. for the sake of clarity here, for bolt-wire messages,
# we'll refer to 'optional' message fields as 'extensions')
#
if tokens[5:] == []:
msg.add_data_field(tokens[2], type_obj, count, comments=list(comment_set),
optional=optional)
else: # is one or more extension fields
if optional:
raise ValueError("Extension fields cannot be optional. {}:{}"
.format(ln, line))
orig_msg = msg
for extension in tokens[5:]:
extension_name = "{}_{}".format(tokens[1], extension)
msg = master.find_message(extension_name)
if not msg:
msg = copy.deepcopy(orig_msg)
msg.enumname = msg.name
msg.name = extension_name
master.add_extension_msg(msg.name, msg)
msg.add_data_field(tokens[2], type_obj, count, comments=list(comment_set), optional=optional)
# If this is a print_wire page, add the extension fields to the
# original message, so we can print them if present.
if options.print_wire:
orig_msg.add_data_field(tokens[2], type_obj, count=count,
extensions=tokens[5:],
comments=list(comment_set),
optional=optional)
comment_set = []
elif token_type.startswith('#include'):
master.add_include(token_type)
elif token_type.startswith('#'):
comment_set.append(token_type[1:])
else:
raise ValueError("Unknown token type {} on line {}:{}".format(token_type, ln, line))
except StopIteration:
pass
master.post_process()
master.write(options, output)
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("-s", "--expose-subtypes", help="print subtypes in header",
action="store_true", default=False)
parser.add_argument("-P", "--print_wire", help="generate wire printing source files",
action="store_true", default=False)
parser.add_argument("--page", choices=['header', 'impl'], help="page to print")
parser.add_argument('--expose-tlv-type', action='append', default=[])
parser.add_argument('header_filename', help='The filename of the header')
parser.add_argument('enum_name', help='The name of the enum to produce')
parser.add_argument("files", help='Files to read in (or stdin)', nargs=REMAINDER)
parsed_args = parser.parse_args()
main(parsed_args, parsed_args.files)
|
the-stack_0_2195 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import time
import urllib
from tempest.common.rest_client import RestClient
from tempest import exceptions
from tempest.openstack.common import log as logging
LOG = logging.getLogger(__name__)
class SnapshotsClientJSON(RestClient):
"""Client class to send CRUD Volume API requests."""
def __init__(self, config, username, password, auth_url, tenant_name=None):
super(SnapshotsClientJSON, self).__init__(config, username, password,
auth_url, tenant_name)
self.service = self.config.volume.catalog_type
self.build_interval = self.config.volume.build_interval
self.build_timeout = self.config.volume.build_timeout
def list_snapshots(self, params=None):
"""List all the snapshot."""
url = 'snapshots'
if params:
url += '?%s' % urllib.urlencode(params)
resp, body = self.get(url)
body = json.loads(body)
return resp, body['snapshots']
def list_snapshots_with_detail(self, params=None):
"""List the details of all snapshots."""
url = 'snapshots/detail'
if params:
url += '?%s' % urllib.urlencode(params)
resp, body = self.get(url)
body = json.loads(body)
return resp, body['snapshots']
def get_snapshot(self, snapshot_id):
"""Returns the details of a single snapshot."""
url = "snapshots/%s" % str(snapshot_id)
resp, body = self.get(url)
body = json.loads(body)
return resp, body['snapshot']
def create_snapshot(self, volume_id, **kwargs):
"""
Creates a new snapshot.
volume_id(Required): id of the volume.
force: Create a snapshot even if the volume attached (Default=False)
display_name: Optional snapshot Name.
display_description: User friendly snapshot description.
"""
post_body = {'volume_id': volume_id}
post_body.update(kwargs)
post_body = json.dumps({'snapshot': post_body})
resp, body = self.post('snapshots', post_body, self.headers)
body = json.loads(body)
return resp, body['snapshot']
def update_snapshot(self, snapshot_id, **kwargs):
"""Updates a snapshot."""
put_body = json.dumps({'snapshot': kwargs})
resp, body = self.put('snapshots/%s' % snapshot_id, put_body,
self.headers)
body = json.loads(body)
return resp, body['snapshot']
# NOTE(afazekas): just for the wait function
def _get_snapshot_status(self, snapshot_id):
resp, body = self.get_snapshot(snapshot_id)
status = body['status']
# NOTE(afazekas): snapshot can reach an "error"
# state in a "normal" lifecycle
if (status == 'error'):
raise exceptions.SnapshotBuildErrorException(
snapshot_id=snapshot_id)
return status
# NOTE(afazkas): Wait reinvented again. It is not in the correct layer
def wait_for_snapshot_status(self, snapshot_id, status):
"""Waits for a Snapshot to reach a given status."""
start_time = time.time()
old_value = value = self._get_snapshot_status(snapshot_id)
while True:
dtime = time.time() - start_time
time.sleep(self.build_interval)
if value != old_value:
LOG.info('Value transition from "%s" to "%s"'
'in %d second(s).', old_value,
value, dtime)
if (value == status):
return value
if dtime > self.build_timeout:
message = ('Time Limit Exceeded! (%ds)'
'while waiting for %s, '
'but we got %s.' %
(self.build_timeout, status, value))
raise exceptions.TimeoutException(message)
time.sleep(self.build_interval)
old_value = value
value = self._get_snapshot_status(snapshot_id)
def delete_snapshot(self, snapshot_id):
"""Delete Snapshot."""
return self.delete("snapshots/%s" % str(snapshot_id))
def is_resource_deleted(self, id):
try:
self.get_snapshot(id)
except exceptions.NotFound:
return True
return False
def reset_snapshot_status(self, snapshot_id, status):
"""Reset the specified snapshot's status."""
post_body = json.dumps({'os-reset_status': {"status": status}})
resp, body = self.post('snapshots/%s/action' % snapshot_id, post_body,
self.headers)
return resp, body
def update_snapshot_status(self, snapshot_id, status, progress):
"""Update the specified snapshot's status."""
post_body = {
'status': status,
'progress': progress
}
post_body = json.dumps({'os-update_snapshot_status': post_body})
url = 'snapshots/%s/action' % str(snapshot_id)
resp, body = self.post(url, post_body, self.headers)
return resp, body
def create_snapshot_metadata(self, snapshot_id, metadata):
"""Create metadata for the snapshot."""
put_body = json.dumps({'metadata': metadata})
url = "snapshots/%s/metadata" % str(snapshot_id)
resp, body = self.post(url, put_body, self.headers)
body = json.loads(body)
return resp, body['metadata']
def get_snapshot_metadata(self, snapshot_id):
"""Get metadata of the snapshot."""
url = "snapshots/%s/metadata" % str(snapshot_id)
resp, body = self.get(url, self.headers)
body = json.loads(body)
return resp, body['metadata']
def update_snapshot_metadata(self, snapshot_id, metadata):
"""Update metadata for the snapshot."""
put_body = json.dumps({'metadata': metadata})
url = "snapshots/%s/metadata" % str(snapshot_id)
resp, body = self.put(url, put_body, self.headers)
body = json.loads(body)
return resp, body['metadata']
def update_snapshot_metadata_item(self, snapshot_id, id, meta_item):
"""Update metadata item for the snapshot."""
put_body = json.dumps({'meta': meta_item})
url = "snapshots/%s/metadata/%s" % (str(snapshot_id), str(id))
resp, body = self.put(url, put_body, self.headers)
body = json.loads(body)
return resp, body['meta']
def delete_snapshot_metadata_item(self, snapshot_id, id):
"""Delete metadata item for the snapshot."""
url = "snapshots/%s/metadata/%s" % (str(snapshot_id), str(id))
resp, body = self.delete(url, self.headers)
return resp, body
|
the-stack_0_2196 | import sys
import os
sys.path.append(os.getcwd())
import torch
import tokenizers
import sklearn
from tokenizers import SentencePieceBPETokenizer
from tokenizers import SentencePieceUnigramTokenizer
from tokenizers import BertWordPieceTokenizer
from tokenizers import Tokenizer
from tokenizers.models import WordPiece
from tokenizers.trainers import WordPieceTrainer, BpeTrainer, UnigramTrainer
# whitespace pretokenizer ?
from tokenizers.pre_tokenizers import Whitespace
# use bert pretokenizer
from typing import List
unk_token = "<UNK>"
spl_tokens = ["<UNK>", "<SEP>", "<MASK>", "<CLS>"]
def is_filepath_list(filelist: List[str]) -> bool:
"""
Check if a list of filepaths is a list of files.
"""
for file in filelist:
if not os.path.isfile(file):
return False
return True
def train_iterator_mul_files(files):
for path in files:
with open(path, "r") as f:
for line in f:
yield line
def train_WordPieceTokenizer(file_list: List[str], vocab_size=30_000, min_frequency=5, limit_alphabet=500,
save: bool = True):
"""
Train WP tokenizer from a list of files.
"""
tokenizer = Tokenizer(WordPiece(unk_token=unk_token))
trainer = WordPieceTrainer(
vocab_size=vocab_size,
min_frequency=min_frequency,
special_tokens=spl_tokens,
show_progress=True,
limit_alphabet=limit_alphabet
)
tokenizer.pre_tokenizer = Whitespace()
if is_filepath_list(file_list):
tokenizer.train(file_list, trainer=trainer)
else:
trainer.train_from_iterator(file_list, trainer=trainer)
if save:
tokenizer.save("./WP_tok-trained.json")
tokenizer = Tokenizer.from_file("./WP_tok-trained.json")
return tokenizer
def train_SentencePieceBPETokenizer(files: List[str], vocab_size=30_000, min_frequency=5, limit_alphabet=500,
save: bool = True):
"""
trin SP_BPE tokenizer from a list of files.
"""
if is_filepath_list(files):
train_it = train_iterator_mul_files(files)
else:
train_it = files
tokenizer = SentencePieceBPETokenizer()
tokenizer.train_from_iterator(
train_it,
vocab_size=vocab_size,
min_frequency=min_frequency,
show_progress=True,
limit_alphabet=limit_alphabet,
)
if save:
tokenizer.save("./SP_BPE_tok-trained.json")
tokenizer = Tokenizer.from_file("./SP_BPE_tok-trained.json")
return tokenizer
def train_SentencePieceUGTokenizer(filelist: List[str], vocab_size=30_000, save: bool = True):
"""
trin SP_UG tokenizer from a list of files.
"""
if is_filepath_list(filelist):
train_it = train_iterator_mul_files(filelist)
else:
train_it = filelist
tokenizer = SentencePieceUnigramTokenizer()
tokenizer.train_from_iterator(
train_it,
vocab_size=vocab_size,
show_progress=True
)
if save:
tokenizer.save("./SP_UG_tok-trained.json")
tokenizer = Tokenizer.from_file("./SP_UG_tok-trained.json")
return tokenizer
def train_BertWordPieceTokenizer(filelist: List[str], vocab_size=30_000, min_frequency=5, limit_alphabet=500,
save: bool = True):
"""
trin BERT tokenizer from a list of files.
"""
if is_filepath_list(filelist):
train_it = train_iterator_mul_files(filelist)
else:
train_it = filelist
tokenizer = BertWordPieceTokenizer()
tokenizer.normalizer = tokenizers.normalizers.BertNormalizer(strip_accents=True, lowercase=True)
tokenizer.train_from_iterator(
train_it,
vocab_size=vocab_size,
show_progress=True,
min_frequency=min_frequency,
limit_alphabet=limit_alphabet,
)
if save:
tokenizer.save("./BERT_tok-trained.json")
tokenizer = Tokenizer.from_file("./BERT_tok-trained.json")
return tokenizer
def get_vocab_from_tokenizer(tokenizer: Tokenizer):
"""
Get vocab from tokenizer.
"""
vocab = tokenizer.get_vocab()
return vocab
if __name__ == '__main__':
# create corpus
print(os.getcwd())
corpus = os.listdir(".corpus_caches/orcas/medium")
corpus = [".corpus_caches/orcas/medium/" + file for file in corpus]
tokenizer = train_BertWordPieceTokenizer(corpus, vocab_size=30_000)
|
the-stack_0_2198 |
from django.conf import settings
from django.core.exceptions import ValidationError
from django.core.validators import validate_email
from django.contrib.auth import authenticate, get_backends
from django.contrib.auth.views import login as django_login_page, \
logout_then_login as django_logout_then_login
from django.contrib.auth.views import password_reset as django_password_reset
from django.urls import reverse
from zerver.decorator import authenticated_json_post_view, require_post, \
process_client, do_login, log_view_func
from django.http import HttpRequest, HttpResponse, HttpResponseRedirect, \
HttpResponseNotFound
from django.middleware.csrf import get_token
from django.shortcuts import redirect, render
from django.views.decorators.csrf import csrf_exempt
from django.views.decorators.http import require_GET
from django.utils.translation import ugettext as _
from django.utils.http import is_safe_url
from django.core import signing
import urllib
from typing import Any, Dict, List, Optional, Tuple, Text
from confirmation.models import Confirmation, create_confirmation_link
from zerver.context_processors import zulip_default_context, get_realm_from_request
from zerver.forms import HomepageForm, OurAuthenticationForm, \
WRONG_SUBDOMAIN_ERROR, ZulipPasswordResetForm
from zerver.lib.mobile_auth_otp import is_valid_otp, otp_encrypt_api_key
from zerver.lib.push_notifications import push_notifications_enabled
from zerver.lib.request import REQ, has_request_variables, JsonableError
from zerver.lib.response import json_success, json_error
from zerver.lib.subdomains import get_subdomain, is_subdomain_root_or_alias
from zerver.lib.validator import validate_login_email
from zerver.models import PreregistrationUser, UserProfile, remote_user_to_email, Realm, \
get_realm
from zerver.signals import email_on_new_login
from zproject.backends import password_auth_enabled, dev_auth_enabled, \
github_auth_enabled, google_auth_enabled, ldap_auth_enabled, \
ZulipLDAPConfigurationError, ZulipLDAPAuthBackend, email_auth_enabled, \
remote_auth_enabled
from version import ZULIP_VERSION
import hashlib
import hmac
import jwt
import logging
import requests
import time
import ujson
def get_safe_redirect_to(url: Text, redirect_host: Text) -> Text:
is_url_safe = is_safe_url(url=url, host=redirect_host)
if is_url_safe:
return urllib.parse.urljoin(redirect_host, url)
else:
return redirect_host
def create_preregistration_user(email: Text, request: HttpRequest, realm_creation: bool=False,
password_required: bool=True) -> HttpResponse:
realm = None
if not realm_creation:
realm = get_realm(get_subdomain(request))
return PreregistrationUser.objects.create(email=email,
realm_creation=realm_creation,
password_required=password_required,
realm=realm)
def maybe_send_to_registration(request: HttpRequest, email: Text, full_name: Text='',
password_required: bool=True) -> HttpResponse:
realm = get_realm_from_request(request)
from_multiuse_invite = False
multiuse_obj = None
streams_to_subscribe = None
multiuse_object_key = request.session.get("multiuse_object_key", None)
if multiuse_object_key is not None:
from_multiuse_invite = True
multiuse_obj = Confirmation.objects.get(confirmation_key=multiuse_object_key).content_object
realm = multiuse_obj.realm
streams_to_subscribe = multiuse_obj.streams.all()
form = HomepageForm({'email': email}, realm=realm, from_multiuse_invite=from_multiuse_invite)
request.verified_email = None
if form.is_valid():
# Construct a PreregistrationUser object and send the user over to
# the confirmation view.
prereg_user = None
if settings.ONLY_SSO:
try:
prereg_user = PreregistrationUser.objects.filter(
email__iexact=email, realm=realm).latest("invited_at")
except PreregistrationUser.DoesNotExist:
prereg_user = create_preregistration_user(email, request,
password_required=password_required)
else:
prereg_user = create_preregistration_user(email, request,
password_required=password_required)
if multiuse_object_key is not None:
del request.session["multiuse_object_key"]
request.session.modified = True
if streams_to_subscribe is not None:
prereg_user.streams.set(streams_to_subscribe)
return redirect("".join((
create_confirmation_link(prereg_user, request.get_host(), Confirmation.USER_REGISTRATION),
'?full_name=',
# urllib does not handle Unicode, so coerece to encoded byte string
# Explanation: http://stackoverflow.com/a/5605354/90777
urllib.parse.quote_plus(full_name.encode('utf8')))))
else:
url = reverse('register')
return render(request,
'zerver/accounts_home.html',
context={'form': form, 'current_url': lambda: url,
'from_multiuse_invite': from_multiuse_invite},
)
def redirect_to_subdomain_login_url() -> HttpResponseRedirect:
login_url = reverse('django.contrib.auth.views.login')
redirect_url = login_url + '?subdomain=1'
return HttpResponseRedirect(redirect_url)
def redirect_to_config_error(error_type: str) -> HttpResponseRedirect:
return HttpResponseRedirect("/config-error/%s" % (error_type,))
def login_or_register_remote_user(request: HttpRequest, remote_username: Optional[Text],
user_profile: Optional[UserProfile], full_name: Text='',
invalid_subdomain: bool=False, mobile_flow_otp: Optional[str]=None,
is_signup: bool=False,
redirect_to: Text='') -> HttpResponse:
if user_profile is None or user_profile.is_mirror_dummy:
# Since execution has reached here, we have verified the user
# controls an email address (remote_username) but there's no
# associated Zulip user account.
if is_signup:
# If they're trying to sign up, send them over to the PreregistrationUser flow.
return maybe_send_to_registration(request, remote_user_to_email(remote_username),
full_name, password_required=False)
# Otherwise, we send them to a special page that asks if they
# want to register or provided the wrong email and want to go back.
try:
validate_email(remote_username)
invalid_email = False
except ValidationError:
# If email address is invalid, we can't send the user
# PreregistrationUser flow.
invalid_email = True
context = {'full_name': full_name,
'email': remote_username,
'invalid_email': invalid_email}
return render(request,
'zerver/confirm_continue_registration.html',
context=context)
if invalid_subdomain:
# Show login page with an error message
return redirect_to_subdomain_login_url()
if mobile_flow_otp is not None:
# For the mobile Oauth flow, we send the API key and other
# necessary details in a redirect to a zulip:// URI scheme.
params = {
'otp_encrypted_api_key': otp_encrypt_api_key(user_profile, mobile_flow_otp),
'email': remote_username,
'realm': user_profile.realm.uri,
}
# We can't use HttpResponseRedirect, since it only allows HTTP(S) URLs
response = HttpResponse(status=302)
response['Location'] = 'zulip://login?' + urllib.parse.urlencode(params)
# Maybe sending 'user_logged_in' signal is the better approach:
# user_logged_in.send(sender=user_profile.__class__, request=request, user=user_profile)
# Not doing this only because over here we don't add the user information
# in the session. If the signal receiver assumes that we do then that
# would cause problems.
email_on_new_login(sender=user_profile.__class__, request=request, user=user_profile)
# Mark this request as having a logged-in user for our server logs.
process_client(request, user_profile)
request._email = user_profile.email
return response
do_login(request, user_profile)
redirect_to = get_safe_redirect_to(redirect_to, user_profile.realm.uri)
return HttpResponseRedirect(redirect_to)
@log_view_func
@has_request_variables
def remote_user_sso(request: HttpRequest,
mobile_flow_otp: Optional[str]=REQ(default=None)) -> HttpResponse:
try:
remote_user = request.META["REMOTE_USER"]
except KeyError:
# TODO: Arguably the JsonableError values here should be
# full-page HTML configuration errors instead.
raise JsonableError(_("No REMOTE_USER set."))
# Django invokes authenticate methods by matching arguments, and this
# authentication flow will not invoke LDAP authentication because of
# this condition of Django so no need to check if LDAP backend is
# enabled.
validate_login_email(remote_user_to_email(remote_user))
# Here we support the mobile flow for REMOTE_USER_BACKEND; we
# validate the data format and then pass it through to
# login_or_register_remote_user if appropriate.
if mobile_flow_otp is not None:
if not is_valid_otp(mobile_flow_otp):
raise JsonableError(_("Invalid OTP"))
subdomain = get_subdomain(request)
realm = get_realm(subdomain)
# Since RemoteUserBackend will return None if Realm is None, we
# don't need to check whether `get_realm` returned None.
user_profile = authenticate(remote_user=remote_user, realm=realm)
redirect_to = request.GET.get('next', '')
return login_or_register_remote_user(request, remote_user, user_profile,
mobile_flow_otp=mobile_flow_otp,
redirect_to=redirect_to)
@csrf_exempt
@log_view_func
def remote_user_jwt(request: HttpRequest) -> HttpResponse:
subdomain = get_subdomain(request)
try:
auth_key = settings.JWT_AUTH_KEYS[subdomain]
except KeyError:
raise JsonableError(_("Auth key for this subdomain not found."))
try:
json_web_token = request.POST["json_web_token"]
options = {'verify_signature': True}
payload = jwt.decode(json_web_token, auth_key, options=options)
except KeyError:
raise JsonableError(_("No JSON web token passed in request"))
except jwt.InvalidTokenError:
raise JsonableError(_("Bad JSON web token"))
remote_user = payload.get("user", None)
if remote_user is None:
raise JsonableError(_("No user specified in JSON web token claims"))
email_domain = payload.get('realm', None)
if email_domain is None:
raise JsonableError(_("No organization specified in JSON web token claims"))
email = "%s@%s" % (remote_user, email_domain)
realm = get_realm(subdomain)
if realm is None:
raise JsonableError(_("Wrong subdomain"))
try:
# We do all the authentication we need here (otherwise we'd have to
# duplicate work), but we need to call authenticate with some backend so
# that the request.backend attribute gets set.
return_data = {} # type: Dict[str, bool]
user_profile = authenticate(username=email,
realm=realm,
return_data=return_data,
use_dummy_backend=True)
except UserProfile.DoesNotExist:
user_profile = None
return login_or_register_remote_user(request, email, user_profile, remote_user)
def google_oauth2_csrf(request: HttpRequest, value: str) -> str:
# In Django 1.10, get_token returns a salted token which changes
# every time get_token is called.
from django.middleware.csrf import _unsalt_cipher_token
token = _unsalt_cipher_token(get_token(request))
return hmac.new(token.encode('utf-8'), value.encode("utf-8"), hashlib.sha256).hexdigest()
def reverse_on_root(viewname: str, args: List[str]=None, kwargs: Dict[str, str]=None) -> str:
return settings.ROOT_DOMAIN_URI + reverse(viewname, args=args, kwargs=kwargs)
def oauth_redirect_to_root(request: HttpRequest, url: Text, is_signup: bool=False) -> HttpResponse:
main_site_uri = settings.ROOT_DOMAIN_URI + url
params = {
'subdomain': get_subdomain(request),
'is_signup': '1' if is_signup else '0',
}
# mobile_flow_otp is a one-time pad provided by the app that we
# can use to encrypt the API key when passing back to the app.
mobile_flow_otp = request.GET.get('mobile_flow_otp')
if mobile_flow_otp is not None:
if not is_valid_otp(mobile_flow_otp):
raise JsonableError(_("Invalid OTP"))
params['mobile_flow_otp'] = mobile_flow_otp
next = request.GET.get('next')
if next:
params['next'] = next
return redirect(main_site_uri + '?' + urllib.parse.urlencode(params))
def start_google_oauth2(request: HttpRequest) -> HttpResponse:
url = reverse('zerver.views.auth.send_oauth_request_to_google')
if not (settings.GOOGLE_OAUTH2_CLIENT_ID and settings.GOOGLE_OAUTH2_CLIENT_SECRET):
return redirect_to_config_error("google")
is_signup = bool(request.GET.get('is_signup'))
return oauth_redirect_to_root(request, url, is_signup=is_signup)
def start_social_login(request: HttpRequest, backend: Text) -> HttpResponse:
backend_url = reverse('social:begin', args=[backend])
if (backend == "github") and not (settings.SOCIAL_AUTH_GITHUB_KEY and
settings.SOCIAL_AUTH_GITHUB_SECRET):
return redirect_to_config_error("github")
return oauth_redirect_to_root(request, backend_url)
def start_social_signup(request: HttpRequest, backend: Text) -> HttpResponse:
backend_url = reverse('social:begin', args=[backend])
return oauth_redirect_to_root(request, backend_url, is_signup=True)
def send_oauth_request_to_google(request: HttpRequest) -> HttpResponse:
subdomain = request.GET.get('subdomain', '')
is_signup = request.GET.get('is_signup', '')
next = request.GET.get('next', '')
mobile_flow_otp = request.GET.get('mobile_flow_otp', '0')
if ((settings.ROOT_DOMAIN_LANDING_PAGE and subdomain == '') or
not Realm.objects.filter(string_id=subdomain).exists()):
return redirect_to_subdomain_login_url()
google_uri = 'https://accounts.google.com/o/oauth2/auth?'
cur_time = str(int(time.time()))
csrf_state = '%s:%s:%s:%s:%s' % (cur_time, subdomain, mobile_flow_otp, is_signup, next)
# Now compute the CSRF hash with the other parameters as an input
csrf_state += ":%s" % (google_oauth2_csrf(request, csrf_state),)
params = {
'response_type': 'code',
'client_id': settings.GOOGLE_OAUTH2_CLIENT_ID,
'redirect_uri': reverse_on_root('zerver.views.auth.finish_google_oauth2'),
'scope': 'profile email',
'state': csrf_state,
}
return redirect(google_uri + urllib.parse.urlencode(params))
@log_view_func
def finish_google_oauth2(request: HttpRequest) -> HttpResponse:
error = request.GET.get('error')
if error == 'access_denied':
return redirect('/')
elif error is not None:
logging.warning('Error from google oauth2 login: %s' % (request.GET.get("error"),))
return HttpResponse(status=400)
csrf_state = request.GET.get('state')
if csrf_state is None or len(csrf_state.split(':')) != 6:
logging.warning('Missing Google oauth2 CSRF state')
return HttpResponse(status=400)
(csrf_data, hmac_value) = csrf_state.rsplit(':', 1)
if hmac_value != google_oauth2_csrf(request, csrf_data):
logging.warning('Google oauth2 CSRF error')
return HttpResponse(status=400)
cur_time, subdomain, mobile_flow_otp, is_signup, next = csrf_data.split(':')
if mobile_flow_otp == '0':
mobile_flow_otp = None
is_signup = bool(is_signup == '1')
resp = requests.post(
'https://www.googleapis.com/oauth2/v3/token',
data={
'code': request.GET.get('code'),
'client_id': settings.GOOGLE_OAUTH2_CLIENT_ID,
'client_secret': settings.GOOGLE_OAUTH2_CLIENT_SECRET,
'redirect_uri': reverse_on_root('zerver.views.auth.finish_google_oauth2'),
'grant_type': 'authorization_code',
},
)
if resp.status_code == 400:
logging.warning('User error converting Google oauth2 login to token: %s' % (resp.text,))
return HttpResponse(status=400)
elif resp.status_code != 200:
logging.error('Could not convert google oauth2 code to access_token: %s' % (resp.text,))
return HttpResponse(status=400)
access_token = resp.json()['access_token']
resp = requests.get(
'https://www.googleapis.com/plus/v1/people/me',
params={'access_token': access_token}
)
if resp.status_code == 400:
logging.warning('Google login failed making info API call: %s' % (resp.text,))
return HttpResponse(status=400)
elif resp.status_code != 200:
logging.error('Google login failed making API call: %s' % (resp.text,))
return HttpResponse(status=400)
body = resp.json()
try:
full_name = body['name']['formatted']
except KeyError:
# Only google+ users have a formatted name. I am ignoring i18n here.
full_name = '{} {}'.format(
body['name']['givenName'], body['name']['familyName']
)
for email in body['emails']:
if email['type'] == 'account':
break
else:
logging.error('Google oauth2 account email not found: %s' % (body,))
return HttpResponse(status=400)
email_address = email['value']
try:
realm = Realm.objects.get(string_id=subdomain)
except Realm.DoesNotExist: # nocoverage
return redirect_to_subdomain_login_url()
if mobile_flow_otp is not None:
# When request was not initiated from subdomain.
user_profile, return_data = authenticate_remote_user(realm, email_address)
invalid_subdomain = bool(return_data.get('invalid_subdomain'))
return login_or_register_remote_user(request, email_address, user_profile,
full_name, invalid_subdomain,
mobile_flow_otp=mobile_flow_otp,
is_signup=is_signup,
redirect_to=next)
return redirect_and_log_into_subdomain(
realm, full_name, email_address, is_signup=is_signup, redirect_to=next)
def authenticate_remote_user(realm: Realm, email_address: str) -> Tuple[UserProfile, Dict[str, Any]]:
return_data = {} # type: Dict[str, bool]
if email_address is None:
# No need to authenticate if email address is None. We already
# know that user_profile would be None as well. In fact, if we
# call authenticate in this case, we might get an exception from
# ZulipDummyBackend which doesn't accept a None as a username.
logging.warning("Email address was None while trying to authenticate "
"remote user.")
return None, return_data
user_profile = authenticate(username=email_address,
realm=realm,
use_dummy_backend=True,
return_data=return_data)
return user_profile, return_data
_subdomain_token_salt = 'zerver.views.auth.log_into_subdomain'
@log_view_func
def log_into_subdomain(request: HttpRequest, token: Text) -> HttpResponse:
try:
data = signing.loads(token, salt=_subdomain_token_salt, max_age=15)
except signing.SignatureExpired as e:
logging.warning('Subdomain cookie: {}'.format(e))
return HttpResponse(status=400)
except signing.BadSignature:
logging.warning('Subdomain cookie: Bad signature.')
return HttpResponse(status=400)
subdomain = get_subdomain(request)
if data['subdomain'] != subdomain:
logging.warning('Login attempt on invalid subdomain')
return HttpResponse(status=400)
email_address = data['email']
full_name = data['name']
is_signup = data['is_signup']
redirect_to = data['next']
if is_signup:
# If we are signing up, user_profile should be None. In case
# email_address already exists, user will get an error message.
user_profile = None
return_data = {} # type: Dict[str, Any]
else:
# We can be reasonably confident that this subdomain actually
# has a corresponding realm, since it was referenced in a
# signed cookie. But we probably should add some error
# handling for the case where the realm disappeared in the
# meantime.
realm = get_realm(subdomain)
user_profile, return_data = authenticate_remote_user(realm, email_address)
invalid_subdomain = bool(return_data.get('invalid_subdomain'))
return login_or_register_remote_user(request, email_address, user_profile,
full_name, invalid_subdomain=invalid_subdomain,
is_signup=is_signup, redirect_to=redirect_to)
def redirect_and_log_into_subdomain(realm: Realm, full_name: Text, email_address: Text,
is_signup: bool=False, redirect_to: Text='') -> HttpResponse:
data = {'name': full_name, 'email': email_address, 'subdomain': realm.subdomain,
'is_signup': is_signup, 'next': redirect_to}
token = signing.dumps(data, salt=_subdomain_token_salt)
subdomain_login_uri = (realm.uri
+ reverse('zerver.views.auth.log_into_subdomain', args=[token]))
return redirect(subdomain_login_uri)
def get_dev_users(realm: Optional[Realm]=None, extra_users_count: int=10) -> List[UserProfile]:
# Development environments usually have only a few users, but
# it still makes sense to limit how many extra users we render to
# support performance testing with DevAuthBackend.
if realm is not None:
users_query = UserProfile.objects.select_related().filter(is_bot=False, is_active=True, realm=realm)
else:
users_query = UserProfile.objects.select_related().filter(is_bot=False, is_active=True)
shakespearian_users = users_query.exclude(email__startswith='extrauser').order_by('email')
extra_users = users_query.filter(email__startswith='extrauser').order_by('email')
# Limit the number of extra users we offer by default
extra_users = extra_users[0:extra_users_count]
users = list(shakespearian_users) + list(extra_users)
return users
def redirect_to_misconfigured_ldap_notice(error_type: int) -> HttpResponse:
if error_type == ZulipLDAPAuthBackend.REALM_IS_NONE_ERROR:
url = reverse('ldap_error_realm_is_none')
else:
raise AssertionError("Invalid error type")
return HttpResponseRedirect(url)
def show_deactivation_notice(request: HttpRequest) -> HttpResponse:
realm = get_realm_from_request(request)
if realm and realm.deactivated:
return render(request, "zerver/deactivated.html",
context={"deactivated_domain_name": realm.name})
return HttpResponseRedirect(reverse('zerver.views.auth.login_page'))
def redirect_to_deactivation_notice() -> HttpResponse:
return HttpResponseRedirect(reverse('zerver.views.auth.show_deactivation_notice'))
def add_dev_login_context(realm: Realm, context: Dict[str, Any]) -> None:
users = get_dev_users(realm)
context['current_realm'] = realm
context['all_realms'] = Realm.objects.all()
context['direct_admins'] = [u for u in users if u.is_realm_admin]
context['direct_users'] = [u for u in users if not u.is_realm_admin]
def login_page(request: HttpRequest, **kwargs: Any) -> HttpResponse:
if request.user.is_authenticated:
return HttpResponseRedirect(request.user.realm.uri)
if is_subdomain_root_or_alias(request) and settings.ROOT_DOMAIN_LANDING_PAGE:
redirect_url = reverse('zerver.views.registration.find_account')
return HttpResponseRedirect(redirect_url)
realm = get_realm_from_request(request)
if realm and realm.deactivated:
return redirect_to_deactivation_notice()
extra_context = kwargs.pop('extra_context', {})
if dev_auth_enabled():
if 'new_realm' in request.POST:
realm = get_realm(request.POST['new_realm'])
else:
realm = get_realm_from_request(request)
add_dev_login_context(realm, extra_context)
if realm and 'new_realm' in request.POST:
# If we're switching realms, redirect to that realm, but
# only if it actually exists.
return HttpResponseRedirect(realm.uri)
if 'username' in request.POST:
extra_context['email'] = request.POST['username']
try:
template_response = django_login_page(
request, authentication_form=OurAuthenticationForm,
extra_context=extra_context, **kwargs)
except ZulipLDAPConfigurationError as e:
assert len(e.args) > 1
return redirect_to_misconfigured_ldap_notice(e.args[1])
try:
template_response.context_data['email'] = request.GET['email']
except KeyError:
pass
try:
already_registered = request.GET['already_registered']
template_response.context_data['already_registered'] = already_registered
except KeyError:
pass
try:
template_response.context_data['subdomain'] = request.GET['subdomain']
template_response.context_data['wrong_subdomain_error'] = WRONG_SUBDOMAIN_ERROR
except KeyError:
pass
return template_response
@csrf_exempt
def dev_direct_login(request: HttpRequest, **kwargs: Any) -> HttpResponse:
# This function allows logging in without a password and should only be called
# in development environments. It may be called if the DevAuthBackend is included
# in settings.AUTHENTICATION_BACKENDS
if (not dev_auth_enabled()) or settings.PRODUCTION:
# This check is probably not required, since authenticate would fail without
# an enabled DevAuthBackend.
return HttpResponseRedirect(reverse('dev_not_supported'))
email = request.POST['direct_email']
subdomain = get_subdomain(request)
realm = get_realm(subdomain)
user_profile = authenticate(dev_auth_username=email, realm=realm)
if user_profile is None:
return HttpResponseRedirect(reverse('dev_not_supported'))
do_login(request, user_profile)
next = request.GET.get('next', '')
redirect_to = get_safe_redirect_to(next, user_profile.realm.uri)
return HttpResponseRedirect(redirect_to)
@csrf_exempt
@require_post
@has_request_variables
def api_dev_fetch_api_key(request: HttpRequest, username: str=REQ()) -> HttpResponse:
"""This function allows logging in without a password on the Zulip
mobile apps when connecting to a Zulip development environment. It
requires DevAuthBackend to be included in settings.AUTHENTICATION_BACKENDS.
"""
if not dev_auth_enabled() or settings.PRODUCTION:
return json_error(_("Dev environment not enabled."))
# Django invokes authenticate methods by matching arguments, and this
# authentication flow will not invoke LDAP authentication because of
# this condition of Django so no need to check if LDAP backend is
# enabled.
validate_login_email(username)
subdomain = get_subdomain(request)
realm = get_realm(subdomain)
return_data = {} # type: Dict[str, bool]
user_profile = authenticate(dev_auth_username=username,
realm=realm,
return_data=return_data)
if return_data.get("inactive_realm"):
return json_error(_("This organization has been deactivated."),
data={"reason": "realm deactivated"}, status=403)
if return_data.get("inactive_user"):
return json_error(_("Your account has been disabled."),
data={"reason": "user disable"}, status=403)
if user_profile is None:
return json_error(_("This user is not registered."),
data={"reason": "unregistered"}, status=403)
do_login(request, user_profile)
return json_success({"api_key": user_profile.api_key, "email": user_profile.email})
@csrf_exempt
def api_dev_get_emails(request: HttpRequest) -> HttpResponse:
if not dev_auth_enabled() or settings.PRODUCTION:
return json_error(_("Dev environment not enabled."))
users = get_dev_users()
return json_success(dict(direct_admins=[u.email for u in users if u.is_realm_admin],
direct_users=[u.email for u in users if not u.is_realm_admin]))
@csrf_exempt
@require_post
@has_request_variables
def api_fetch_api_key(request: HttpRequest, username: str=REQ(), password: str=REQ()) -> HttpResponse:
return_data = {} # type: Dict[str, bool]
subdomain = get_subdomain(request)
realm = get_realm(subdomain)
if username == "google-oauth2-token":
# This code path is auth for the legacy Android app
user_profile = authenticate(google_oauth2_token=password,
realm=realm,
return_data=return_data)
else:
if not ldap_auth_enabled(realm=get_realm_from_request(request)):
# In case we don't authenticate against LDAP, check for a valid
# email. LDAP backend can authenticate against a non-email.
validate_login_email(username)
user_profile = authenticate(username=username,
password=password,
realm=realm,
return_data=return_data)
if return_data.get("inactive_user"):
return json_error(_("Your account has been disabled."),
data={"reason": "user disable"}, status=403)
if return_data.get("inactive_realm"):
return json_error(_("This organization has been deactivated."),
data={"reason": "realm deactivated"}, status=403)
if return_data.get("password_auth_disabled"):
return json_error(_("Password auth is disabled in your team."),
data={"reason": "password auth disabled"}, status=403)
if user_profile is None:
if return_data.get("valid_attestation"):
# We can leak that the user is unregistered iff
# they present a valid authentication string for the user.
return json_error(_("This user is not registered; do so from a browser."),
data={"reason": "unregistered"}, status=403)
return json_error(_("Your username or password is incorrect."),
data={"reason": "incorrect_creds"}, status=403)
# Maybe sending 'user_logged_in' signal is the better approach:
# user_logged_in.send(sender=user_profile.__class__, request=request, user=user_profile)
# Not doing this only because over here we don't add the user information
# in the session. If the signal receiver assumes that we do then that
# would cause problems.
email_on_new_login(sender=user_profile.__class__, request=request, user=user_profile)
# Mark this request as having a logged-in user for our server logs.
process_client(request, user_profile)
request._email = user_profile.email
return json_success({"api_key": user_profile.api_key, "email": user_profile.email})
def get_auth_backends_data(request: HttpRequest) -> Dict[str, Any]:
"""Returns which authentication methods are enabled on the server"""
subdomain = get_subdomain(request)
try:
realm = Realm.objects.get(string_id=subdomain)
except Realm.DoesNotExist:
# If not the root subdomain, this is an error
if subdomain != Realm.SUBDOMAIN_FOR_ROOT_DOMAIN:
raise JsonableError(_("Invalid subdomain"))
# With the root subdomain, it's an error or not depending
# whether ROOT_DOMAIN_LANDING_PAGE (which indicates whether
# there are some realms without subdomains on this server)
# is set.
if settings.ROOT_DOMAIN_LANDING_PAGE:
raise JsonableError(_("Subdomain required"))
else:
realm = None
return {
"password": password_auth_enabled(realm),
"dev": dev_auth_enabled(realm),
"email": email_auth_enabled(realm),
"github": github_auth_enabled(realm),
"google": google_auth_enabled(realm),
"remoteuser": remote_auth_enabled(realm),
"ldap": ldap_auth_enabled(realm),
}
@csrf_exempt
def api_get_auth_backends(request: HttpRequest) -> HttpResponse:
"""Deprecated route; this is to be replaced by api_get_server_settings"""
auth_backends = get_auth_backends_data(request)
auth_backends['zulip_version'] = ZULIP_VERSION
return json_success(auth_backends)
@require_GET
@csrf_exempt
def api_get_server_settings(request: HttpRequest) -> HttpResponse:
result = dict(
authentication_methods=get_auth_backends_data(request),
zulip_version=ZULIP_VERSION,
push_notifications_enabled=push_notifications_enabled(),
)
context = zulip_default_context(request)
# IMPORTANT NOTE:
# realm_name, realm_icon, etc. are not guaranteed to appear in the response.
# * If they do, that means the server URL has only one realm on it
# * If they don't, the server has multiple realms, and it's not clear which is
# the requested realm, so we can't send back these data.
for settings_item in [
"email_auth_enabled",
"require_email_format_usernames",
"realm_uri",
"realm_name",
"realm_icon",
"realm_description"]:
if context[settings_item] is not None:
result[settings_item] = context[settings_item]
return json_success(result)
@has_request_variables
def json_fetch_api_key(request: HttpRequest, user_profile: UserProfile,
password: str=REQ(default='')) -> HttpResponse:
subdomain = get_subdomain(request)
realm = get_realm(subdomain)
if password_auth_enabled(user_profile.realm):
if not authenticate(username=user_profile.email, password=password,
realm=realm):
return json_error(_("Your username or password is incorrect."))
return json_success({"api_key": user_profile.api_key})
@csrf_exempt
def api_fetch_google_client_id(request: HttpRequest) -> HttpResponse:
if not settings.GOOGLE_CLIENT_ID:
return json_error(_("GOOGLE_CLIENT_ID is not configured"), status=400)
return json_success({"google_client_id": settings.GOOGLE_CLIENT_ID})
@require_post
def logout_then_login(request: HttpRequest, **kwargs: Any) -> HttpResponse:
return django_logout_then_login(request, kwargs)
def password_reset(request: HttpRequest, **kwargs: Any) -> HttpResponse:
realm = get_realm(get_subdomain(request))
if realm is None:
# If trying to get to password reset on a subdomain that
# doesn't exist, just go to find_account.
redirect_url = reverse('zerver.views.registration.find_account')
return HttpResponseRedirect(redirect_url)
return django_password_reset(request,
template_name='zerver/reset.html',
password_reset_form=ZulipPasswordResetForm,
post_reset_redirect='/accounts/password/reset/done/')
|
the-stack_0_2200 | #oiracis
import re
out = open("out.txt", "w+")
c = 0
with open("file.txt", "r+", errors="ignore") as f: #ignore all errors so it reads the file not matter what
for line in f:
c = c + 1
try:
mail = (re.findall(r"[a-z0-9\.\-+_]+@[a-z0-9\.\-+_]+\.[a-z]+", line))
print("Nº: " + str(c) + "\t" + mail[0])
out.write(mail[0] + "\n")
except:
print("oopsie")
out.close()
|
the-stack_0_2201 |
n, k =map(int, input().split()) # 17 4
res = 0
while True:
tar = (n//k) * k #n이 k로 나누어 떨어지는 수가 될때까지 빼기
res += (n-tar)
n=tar
if n<k: #n이 k보다 작을 때 반복문 탈출 (더 이상 나눌수 없을때)
break
res +=1
n//=k
res += (n-1) #마지막으로 남은 수 1씩 빼기
print(res)
|
the-stack_0_2204 | # -*- coding: utf-8 -*-
"""
py_vollib.black.implied_volatility
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
A library for option pricing, implied volatility, and
greek calculation. py_vollib is based on lets_be_rational,
a Python wrapper for LetsBeRational by Peter Jaeckel as
described below.
:copyright: © 2017 Gammon Capital LLC
:license: MIT, see LICENSE for more details.
About LetsBeRational:
~~~~~~~~~~~~~~~~~~~~~
The source code of LetsBeRational resides at www.jaeckel.org/LetsBeRational.7z .
::
========================================================================================
Copyright © 2013-2014 Peter Jäckel.
Permission to use, copy, modify, and distribute this software is freely granted,
provided that this notice is preserved.
WARRANTY DISCLAIMER
The Software is provided "as is" without warranty of any kind, either express or implied,
including without limitation any implied warranties of condition, uninterrupted use,
merchantability, fitness for a particular purpose, or non-infringement.
========================================================================================
"""
# -----------------------------------------------------------------------------
# IMPORTS
# Standard library imports
from __future__ import division
# Related third party imports
import py_lets_be_rational as lets_be_rational
import numpy
# Local application/library specific imports
from py_vollib.black import black
from py_vollib.black import undiscounted_black
from py_vollib.black import normalised_black
from py_vollib.helpers import binary_flag
from py_vollib.helpers.exceptions import PriceIsAboveMaximum, PriceIsBelowIntrinsic
from py_vollib.helpers.constants import MINUS_FLOAT_MAX, FLOAT_MAX
# -----------------------------------------------------------------------------
# FUNCTIONS - IMPLIED VOLATILITY
def implied_volatility_of_discounted_option_price(discounted_option_price, F, K, r, t, flag):
"""Calculate the implied volatility of the Black option price
:param discounted_option_price: discounted Black price of a futures option
:type discounted_option_price: float
:param F: underlying futures price
:type F: float
:param K: strike price
:type K: float
:param r: the risk-free interest rate
:type r: float
:param t: time to expiration in years
:type t: float
:param flag: 'p' or 'c' for put or call
:type flag: str
>>> F = 100
>>> K = 100
>>> sigma = .2
>>> flag = 'c'
>>> t = .5
>>> r = .02
>>> discounted_call_price = black(flag, F, K, t, r, sigma)
>>> iv = implied_volatility_of_discounted_option_price(
... discounted_call_price, F, K, r, t, flag)
>>> expected_price = 5.5811067246
>>> expected_iv = 0.2
>>> abs(expected_price - discounted_call_price) < 0.00001
True
>>> abs(expected_iv - iv) < 0.00001
True
"""
deflater = numpy.exp(-r * t)
undiscounted_option_price = discounted_option_price / deflater
sigma_calc = lets_be_rational.implied_volatility_from_a_transformed_rational_guess(
undiscounted_option_price,
F,
K,
t,
binary_flag[flag]
)
if sigma_calc == FLOAT_MAX:
raise PriceIsAboveMaximum()
elif sigma_calc == MINUS_FLOAT_MAX:
raise PriceIsBelowIntrinsic()
return sigma_calc
def implied_volatility(discounted_option_price, F, K, r, t, flag):
"""Calculate the implied volatility of the Black option price
:param discounted_option_price: discounted Black price of a futures option
:type discounted_option_price: float
:param F: underlying futures price
:type F: float
:param K: strike price
:type K: float
:param r: the risk-free interest rate
:type r: float
:param t: time to expiration in years
:type t: float
:param flag: 'p' or 'c' for put or call
:type flag: str
>>> F = 100
>>> K = 100
>>> sigma = .2
>>> flag = 'c'
>>> t = .5
>>> r = .02
>>> discounted_call_price = black(flag, F, K, t, r, sigma)
>>> iv = implied_volatility(
... discounted_call_price, F, K, r, t, flag)
>>> expected_price = 5.5811067246
>>> expected_iv = 0.2
>>> abs(expected_price - discounted_call_price) < 0.00001
True
>>> abs(expected_iv - iv) < 0.00001
True
"""
return implied_volatility_of_discounted_option_price(discounted_option_price, F, K, r, t, flag)
# -----------------------------------------------------------------------------
# FUNCTIONS - IMPLIED VOLATILITY, FOR TEST & REFERENCE
def normalised_implied_volatility(beta, x, flag):
"""Calculate the normalised Black implied volatility,
a time invariant transformation
of Black implied volatility.
Keyword arguments:
:param x: ln(F/K) where K is the strike price, and F is the futures price
:type x: float
:param beta: the normalized Black price
:type beta: float
:param flag: 'p' or 'c' for put or call
:type flag: str
>>> beta_call = normalised_black(0.0, 0.2, 'c')
>>> beta_put = normalised_black(0.1,0.23232323888,'p')
>>> normalized_b76_iv_call = normalised_implied_volatility(beta_call, 0.0, 'c')
>>> normalized_b76_iv_put = normalised_implied_volatility(beta_put, 0.1, 'p')
>>> expected_price = 0.0796556745541
>>> expected_iv = 0.2
>>> abs(expected_price - beta_call) < 0.00001
True
>>> abs(expected_iv - normalized_b76_iv_call) < 0.00001
True
>>> expected_price = 0.0509710222785
>>> expected_iv = 0.23232323888
>>> abs(expected_price - beta_put) < 0.00001
True
>>> abs(expected_iv - normalized_b76_iv_put) < 0.00001
True
"""
q = binary_flag[flag]
return lets_be_rational.normalised_implied_volatility_from_a_transformed_rational_guess(
beta, x, q)
def normalised_implied_volatility_limited_iterations(beta, x, flag, N):
"""Calculate the normalised Black implied volatility,
with limited iterations.
:param x: ln(F/K) where K is the strike price, and F is the futures price
:type x: float
:param beta: the normalized Black price
:type beta: float
:param flag: 'p' or 'c' for put or call
:type flag: str
>>> beta_call = normalised_black(0.0, 0.2, 'c')
>>> beta_put = normalised_black(0.1,0.23232323888,'p')
>>> normalized_b76_iv_call = normalised_implied_volatility_limited_iterations(beta_call, 0.0, 'c',1)
>>> normalized_b76_iv_put = normalised_implied_volatility_limited_iterations(beta_put, 0.1, 'p',1)
>>> expected_price = 0.0796556745541
>>> expected_iv = 0.2
>>> abs(expected_price - beta_call) < 0.00001
True
>>> abs(expected_iv - normalized_b76_iv_call) < 0.00001
True
>>> expected_price = 0.0509710222785
>>> expected_iv = 0.23232323888
>>> abs(expected_price - beta_put) < 0.00001
True
>>> abs(expected_iv - normalized_b76_iv_put) < 0.00001
True
"""
q = binary_flag[flag]
return lets_be_rational.normalised_implied_volatility_from_a_transformed_rational_guess_with_limited_iterations(
beta, x, q, N)
def implied_volatility_of_undiscounted_option_price(
undiscounted_option_price,
F,
K,
t,
flag
):
"""Calculate the implied volatility of the undiscounted Black option price
:param undiscounted_option_price: undiscounted Black price of a futures option
:type undiscounted_option_price: float
:param F: underlying futures price
:type F: float
:param K: strike price
:type K: float
:param t: time to expiration in years
:type t: float
>>> F = 100
>>> K = 100
>>> sigma = .2
>>> flag = 'c'
>>> t = .5
>>> undiscounted_call_price = undiscounted_black(F, K, sigma, t, flag)
>>> iv = implied_volatility_of_undiscounted_option_price(
... undiscounted_call_price, F, K, t, flag)
>>> expected_price = 5.6371977797
>>> expected_iv = 0.2
>>> abs(expected_price - undiscounted_call_price) < 0.00001
True
>>> abs(expected_iv - iv) < 0.00001
True
"""
return lets_be_rational.implied_volatility_from_a_transformed_rational_guess(
undiscounted_option_price,
F,
K,
t,
binary_flag[flag]
)
def implied_volatility_of_undiscounted_option_price_limited_iterations(
undiscounted_option_price, F, K, t, flag, N):
"""Calculate implied volatility of the undiscounted Black
option price with limited iterations.
:param undiscounted_option_price: undiscounted Black price of a futures option
:type undiscounted_option_price: float
:param F: underlying futures price
:type F: float
:param K: strike price
:type K: float
:param t: time to expiration in years
:type t: float
>>> F = 100
>>> K = 100
>>> sigma = .232323232
>>> flag = 'c'
>>> t = .5
>>> price = undiscounted_black(F, K, sigma, t, flag)
>>> iv = implied_volatility_of_undiscounted_option_price_limited_iterations(
... price, F, K, t, flag, 1)
>>> expected_price = 6.54635543387
>>> expected_iv = 0.232323232
>>> abs(expected_price - price) < 0.00001
True
>>> abs(expected_iv - iv) < 0.00001
True
"""
return lets_be_rational.implied_volatility_from_a_transformed_rational_guess_with_limited_iterations(
undiscounted_option_price,
F,
K,
t,
binary_flag[flag],
N
)
if __name__ == "__main__":
from py_vollib.helpers.doctest_helper import run_doctest
run_doctest()
|
the-stack_0_2205 | from exceptionite.errors import Handler, StackOverflowIntegration, SolutionsIntegration
from .JsonHandler import JsonHandler
class ExceptionHandler:
def __init__(self, application, driver_config=None):
self.application = application
self.drivers = {}
self.driver_config = driver_config or {}
self.options = {}
def set_options(self, options):
self.options = options
return self
def add_driver(self, name, driver):
self.drivers.update({name: driver})
def set_configuration(self, config):
self.driver_config = config
return self
def get_driver(self, name=None):
if name is None:
return self.drivers[self.driver_config.get("default")]
return self.drivers[name]
def get_config_options(self, driver=None):
if driver is None:
return self.driver_config[self.driver_config.get("default")]
return self.driver_config.get(driver, {})
def handle(self, exception):
response = self.application.make("response")
request = self.application.make("request")
self.application.make("event").fire(
f"masonite.exception.{exception.__class__.__name__}", exception
)
if self.application.has(f"{exception.__class__.__name__}Handler"):
return self.application.make(
f"{exception.__class__.__name__}Handler"
).handle(exception)
if hasattr(exception, "get_response"):
return response.view(exception.get_response(), exception.get_status())
handler = Handler(exception)
if "application/json" in str(request.header("Accept")):
return response.view(JsonHandler(exception).render(), status=500)
if self.options.get("handlers.stack_overflow"):
handler.integrate(StackOverflowIntegration())
if self.options.get("handlers.solutions"):
handler.integrate(SolutionsIntegration())
handler.context(
{
"WSGI": {
"Path": request.get_path(),
"Input": request.input_bag.all_as_values() or None,
# 'Parameters': request.url_params,
"Request Method": request.get_request_method(),
},
"Headers": request.header_bag.to_dict(),
}
)
return response.view(handler.render(), status=500)
|
the-stack_0_2207 | # Copyright Contributors to the Pyro project.
# SPDX-License-Identifier: Apache-2.0
from functools import partial
import numpy as np
from numpy.testing import assert_allclose
import pytest
from jax import random
import jax.numpy as jnp
from jax.scipy.linalg import cho_factor, cho_solve, inv, solve_triangular
import numpyro
import numpyro.distributions as dist
from numpyro.handlers import plate
from numpyro.infer import HMC, HMCECS, MCMC, NUTS, DiscreteHMCGibbs, HMCGibbs
def _linear_regression_gibbs_fn(X, XX, XY, Y, rng_key, gibbs_sites, hmc_sites):
N, P = X.shape
sigma = jnp.exp(hmc_sites['log_sigma']) if 'log_sigma' in hmc_sites else hmc_sites['sigma']
sigma_sq = jnp.square(sigma)
covar_inv = XX / sigma_sq + jnp.eye(P)
L = cho_factor(covar_inv, lower=True)[0]
L_inv = solve_triangular(L, jnp.eye(P), lower=True)
loc = cho_solve((L, True), XY) / sigma_sq
beta_proposal = dist.MultivariateNormal(loc=loc, scale_tril=L_inv).sample(rng_key)
return {'beta': beta_proposal}
@pytest.mark.parametrize('kernel_cls', [HMC, NUTS])
def test_linear_model_log_sigma(kernel_cls, N=100, P=50, sigma=0.11, warmup_steps=500, num_samples=500):
np.random.seed(0)
X = np.random.randn(N * P).reshape((N, P))
XX = np.matmul(np.transpose(X), X)
Y = X[:, 0] + sigma * np.random.randn(N)
XY = np.sum(X * Y[:, None], axis=0)
def model(X, Y):
N, P = X.shape
log_sigma = numpyro.sample("log_sigma", dist.Normal(1.0))
sigma = jnp.exp(log_sigma)
beta = numpyro.sample("beta", dist.Normal(jnp.zeros(P), jnp.ones(P)))
mean = jnp.sum(beta * X, axis=-1)
numpyro.deterministic("mean", mean)
numpyro.sample("obs", dist.Normal(mean, sigma), obs=Y)
gibbs_fn = partial(_linear_regression_gibbs_fn, X, XX, XY, Y)
hmc_kernel = kernel_cls(model)
kernel = HMCGibbs(hmc_kernel, gibbs_fn=gibbs_fn, gibbs_sites=['beta'])
mcmc = MCMC(kernel, warmup_steps, num_samples, progress_bar=False)
mcmc.run(random.PRNGKey(0), X, Y)
beta_mean = np.mean(mcmc.get_samples()['beta'], axis=0)
assert_allclose(beta_mean, np.array([1.0] + [0.0] * (P - 1)), atol=0.05)
sigma_mean = np.exp(np.mean(mcmc.get_samples()['log_sigma'], axis=0))
assert_allclose(sigma_mean, sigma, atol=0.25)
@pytest.mark.parametrize('kernel_cls', [HMC, NUTS])
def test_linear_model_sigma(kernel_cls, N=90, P=40, sigma=0.07, warmup_steps=500, num_samples=500):
np.random.seed(1)
X = np.random.randn(N * P).reshape((N, P))
XX = np.matmul(np.transpose(X), X)
Y = X[:, 0] + sigma * np.random.randn(N)
XY = np.sum(X * Y[:, None], axis=0)
def model(X, Y):
N, P = X.shape
sigma = numpyro.sample("sigma", dist.HalfCauchy(1.0))
beta = numpyro.sample("beta", dist.Normal(jnp.zeros(P), jnp.ones(P)))
mean = jnp.sum(beta * X, axis=-1)
numpyro.sample("obs", dist.Normal(mean, sigma), obs=Y)
gibbs_fn = partial(_linear_regression_gibbs_fn, X, XX, XY, Y)
hmc_kernel = kernel_cls(model)
kernel = HMCGibbs(hmc_kernel, gibbs_fn=gibbs_fn, gibbs_sites=['beta'])
mcmc = MCMC(kernel, warmup_steps, num_samples, progress_bar=False)
mcmc.run(random.PRNGKey(0), X, Y)
beta_mean = np.mean(mcmc.get_samples()['beta'], axis=0)
assert_allclose(beta_mean, np.array([1.0] + [0.0] * (P - 1)), atol=0.05)
sigma_mean = np.mean(mcmc.get_samples()['sigma'], axis=0)
assert_allclose(sigma_mean, sigma, atol=0.25)
@pytest.mark.parametrize('kernel_cls', [HMC, NUTS])
@pytest.mark.parametrize('num_blocks', [1, 2, 50, 100])
def test_subsample_gibbs_partitioning(kernel_cls, num_blocks):
def model(obs):
with plate('N', obs.shape[0], subsample_size=100) as idx:
numpyro.sample('x', dist.Normal(0, 1), obs=obs[idx])
obs = random.normal(random.PRNGKey(0), (10000,)) / 100
kernel = HMCECS(kernel_cls(model), num_blocks=num_blocks)
hmc_state = kernel.init(random.PRNGKey(1), 10, None, model_args=(obs,), model_kwargs=None)
gibbs_sites = {'N': jnp.arange(100)}
gibbs_fn = kernel._gibbs_fn
new_gibbs_sites = gibbs_fn(random.PRNGKey(2), gibbs_sites, hmc_state.z) # accept_prob > .999
block_size = 100 // num_blocks
for name in gibbs_sites:
assert block_size == jnp.not_equal(gibbs_sites[name], new_gibbs_sites[name]).sum()
@pytest.mark.parametrize('kernel_cls', [HMC, NUTS])
def test_gaussian_model(kernel_cls, D=2, warmup_steps=3000, num_samples=5000):
np.random.seed(0)
cov = np.random.randn(4 * D * D).reshape((2 * D, 2 * D))
cov = jnp.matmul(jnp.transpose(cov), cov) + 0.25 * jnp.eye(2 * D)
cov00 = cov[:D, :D]
cov01 = cov[:D, D:]
cov10 = cov[D:, :D]
cov11 = cov[D:, D:]
cov_01_cov11_inv = jnp.matmul(cov01, inv(cov11))
cov_10_cov00_inv = jnp.matmul(cov10, inv(cov00))
posterior_cov0 = cov00 - jnp.matmul(cov_01_cov11_inv, cov10)
posterior_cov1 = cov11 - jnp.matmul(cov_10_cov00_inv, cov01)
# we consider a model in which (x0, x1) ~ MVN(0, cov)
def gaussian_gibbs_fn(rng_key, hmc_sites, gibbs_sites):
x1 = hmc_sites['x1']
posterior_loc0 = jnp.matmul(cov_01_cov11_inv, x1)
x0_proposal = dist.MultivariateNormal(loc=posterior_loc0, covariance_matrix=posterior_cov0).sample(rng_key)
return {'x0': x0_proposal}
def model():
x0 = numpyro.sample("x0", dist.MultivariateNormal(loc=jnp.zeros(D), covariance_matrix=cov00))
posterior_loc1 = jnp.matmul(cov_10_cov00_inv, x0)
numpyro.sample("x1", dist.MultivariateNormal(loc=posterior_loc1, covariance_matrix=posterior_cov1))
hmc_kernel = kernel_cls(model, dense_mass=True)
kernel = HMCGibbs(hmc_kernel, gibbs_fn=gaussian_gibbs_fn, gibbs_sites=['x0'])
mcmc = MCMC(kernel, warmup_steps, num_samples, progress_bar=False)
mcmc.run(random.PRNGKey(0))
x0_mean = np.mean(mcmc.get_samples()['x0'], axis=0)
x1_mean = np.mean(mcmc.get_samples()['x1'], axis=0)
x0_std = np.std(mcmc.get_samples()['x0'], axis=0)
x1_std = np.std(mcmc.get_samples()['x1'], axis=0)
assert_allclose(x0_mean, np.zeros(D), atol=0.2)
assert_allclose(x1_mean, np.zeros(D), atol=0.2)
assert_allclose(x0_std, np.sqrt(np.diagonal(cov00)), rtol=0.05)
assert_allclose(x1_std, np.sqrt(np.diagonal(cov11)), rtol=0.1)
def test_discrete_gibbs_multiple_sites():
def model():
numpyro.sample("x", dist.Bernoulli(0.7).expand([3]))
numpyro.sample("y", dist.Binomial(10, 0.3))
kernel = DiscreteHMCGibbs(NUTS(model))
mcmc = MCMC(kernel, 1000, 10000, progress_bar=False)
mcmc.run(random.PRNGKey(0))
samples = mcmc.get_samples()
assert_allclose(jnp.mean(samples["x"], 0), 0.7 * jnp.ones(3), atol=0.01)
assert_allclose(jnp.mean(samples["y"], 0), 0.3 * 10, atol=0.1)
def test_discrete_gibbs_enum():
def model():
numpyro.sample("x", dist.Bernoulli(0.7), infer={"enumerate": "parallel"})
y = numpyro.sample("y", dist.Binomial(10, 0.3))
numpyro.deterministic("y2", y ** 2)
kernel = DiscreteHMCGibbs(NUTS(model))
mcmc = MCMC(kernel, 1000, 10000, progress_bar=False)
mcmc.run(random.PRNGKey(0))
samples = mcmc.get_samples()
assert_allclose(jnp.mean(samples["y"], 0), 0.3 * 10, atol=0.1)
@pytest.mark.parametrize("random_walk", [False, True])
@pytest.mark.parametrize("modified", [False, True])
def test_discrete_gibbs_bernoulli(random_walk, modified):
def model():
numpyro.sample("c", dist.Bernoulli(0.8))
kernel = DiscreteHMCGibbs(NUTS(model), random_walk=random_walk, modified=modified)
mcmc = MCMC(kernel, 1000, 200000, progress_bar=False)
mcmc.run(random.PRNGKey(0))
samples = mcmc.get_samples()["c"]
assert_allclose(jnp.mean(samples), 0.8, atol=0.05)
@pytest.mark.parametrize("modified", [False, True])
def test_discrete_gibbs_gmm_1d(modified):
def model(probs, locs):
c = numpyro.sample("c", dist.Categorical(probs))
numpyro.sample("x", dist.Normal(locs[c], 0.5))
probs = jnp.array([0.15, 0.3, 0.3, 0.25])
locs = jnp.array([-2, 0, 2, 4])
kernel = DiscreteHMCGibbs(NUTS(model), modified=modified)
mcmc = MCMC(kernel, 1000, 200000, progress_bar=False)
mcmc.run(random.PRNGKey(0), probs, locs)
samples = mcmc.get_samples()
assert_allclose(jnp.mean(samples["x"]), 1.3, atol=0.1)
assert_allclose(jnp.var(samples["x"]), 4.36, atol=0.1)
assert_allclose(jnp.mean(samples["c"]), 1.65, atol=0.1)
assert_allclose(jnp.var(samples["c"]), 1.03, atol=0.1)
|
the-stack_0_2208 | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..electra.tokenizer import ElectraTokenizer
__all__ = ['ConvBertTokenizer', ]
class ConvBertTokenizer(ElectraTokenizer):
"""
Construct a ConvBERT tokenizer. `ConvBertTokenizer` is identical to `ElectraTokenizer`.
For more information regarding those methods, please refer to this superclass.
"""
resource_files_names = {"vocab_file": "vocab.txt"} # for save_pretrained
pretrained_resource_files_map = {
"vocab_file": {
"convbert-base":
"https://bj.bcebos.com/paddlenlp/models/transformers/convbert/convbert-base/vocab.txt",
"convbert-medium-small":
"https://bj.bcebos.com/paddlenlp/models/transformers/convbert/convbert-medium-small/vocab.txt",
"convbert-small":
"https://bj.bcebos.com/paddlenlp/models/transformers/convbert/convbert-small/vocab.txt",
}
}
pretrained_init_configuration = {
"convbert-base": {
"do_lower_case": True
},
"convbert-medium-small": {
"do_lower_case": True
},
"convbert-small": {
"do_lower_case": True
},
}
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.