content
stringlengths 22
815k
| id
int64 0
4.91M
|
---|---|
def is_close(A, B, tol=np.sqrt(_eps)):
"""
Check if two matrices are close in the sense of trace distance.
"""
if tracedist(A, B) < tol:
return True
else:
A[np.abs(A) < tol] = 0.0
B[np.abs(B) < tol] = 0.0
A /= np.exp(1j*np.angle(A[0,0]))
B /= np.exp(1j*np.angle(B[0,0]))
return ((tracedist(A, B) < tol) or (tracedist(A, -1.0*B) < tol)) | 5,356,400 |
def hist3d_numba_seq_weight(tracks, weights, bins, ranges, use_memmap=False, tmp=None):
"""Numba-compiled weighted 3d histogram
From https://iscinumpy.dev/post/histogram-speeds-in-python/
Parameters
----------
tracks : (x, y, z)
List of input arrays of identical length, to be histogrammed
weights : array-like
List of weights for each point of the input arrays
bins : (int, int, int)
shape of the final histogram
ranges : [[xmin, xmax], [ymin, ymax], [zmin, zmax]]]
Minimum and maximum value of the histogram, in each dimension
Other parameters
----------------
use_memmap : bool
If ``True`` and the number of bins is above 10 million,
the histogram is created into a memory-mapped Numpy array
tmp : str
Temporary file name for the memory map (only relevant if
``use_memmap`` is ``True``)
Returns
-------
histogram: array-like
Output Histogram
From https://iscinumpy.dev/post/histogram-speeds-in-python/
Examples
--------
>>> x = np.random.uniform(0., 1., 100)
>>> y = np.random.uniform(2., 3., 100)
>>> z = np.random.uniform(4., 5., 100)
>>> weights = np.random.uniform(0, 1., 100)
>>> H, _ = np.histogramdd((x, y, z), bins=(5, 6, 7),
... range=[(0., 1.), (2., 3.), (4., 5)],
... weights=weights)
>>> Hn = hist3d_numba_seq_weight(
... (x, y, z), weights, bins=(5, 6, 7),
... ranges=[[0., 1.], [2., 3.], [4., 5.]])
>>> assert np.all(H == Hn)
"""
H = _allocate_array_or_memmap(bins, np.double, use_memmap=use_memmap, tmp=tmp)
return _hist3d_numba_seq_weight(
H,
np.asarray(tracks),
weights,
np.asarray(list(bins)),
np.asarray(ranges),
) | 5,356,401 |
def test_stem_name():
""" test the internal function to find the stemp of a filename"""
assert fm._stem_name("Test.xlsx") == "test"
assert fm._stem_name(r"c:\test\test.xlsx") == "test"
assert fm._stem_name(r".\Test.xls") == "test" | 5,356,402 |
def do_extract(
archive: str,
*,
output_dir: typing.Optional[str],
verbose: bool,
name_encoding: str,
) -> None:
"""Extract all files and directories from an archive."""
if output_dir is None:
output_dir = os.path.basename(archive) + ".extracted"
with click.open_file(archive, "rb") as archivef:
parsed = InstallShield3Z.from_io(archivef)
dir_paths = []
for dir in parsed.toc_directories:
dir_path = os.path.join(output_dir, *dir.path.decode(name_encoding).split("\\"))
dir_paths.append(dir_path)
os.makedirs(dir_path, exist_ok=True)
for file in parsed.toc_files:
file_path_in_archive = join_dir_file_name(file.directory.path, file.name)
output_path = os.path.join(output_dir, *file_path_in_archive.decode(name_encoding).split("\\"))
if verbose:
print(f"Extracting: {format_name_readable(file_path_in_archive, name_encoding)} ({file.len_data_compressed} bytes) -> {click.format_filename(output_path)}")
with open(output_path, "wb") as fout:
extract_file_data(file, fout)
restore_file_metadata(file, output_path)
archive_timestamp = timestamp_from_dos_datetime(parsed.header.modified)
for dir_path in dir_paths:
os.utime(dir_path, (archive_timestamp, archive_timestamp)) | 5,356,403 |
def convertTimeQuiet(s):
"""
Converts a time String in the form hh:mm:ss[.nnnnnnnnn] to a long nanoseconds offset from Epoch.
:param s: (java.lang.String) - The String to convert.
:return: (long) QueryConstants.NULL_LONG if the String cannot be parsed, otherwise long nanoseconds offset from Epoch.
"""
return _java_type_.convertTimeQuiet(s) | 5,356,404 |
def pop_first_non_none_value(
*args,
msg: Optional[str] = "default error msg",
):
"""
Args:
args: a list of python values
Returns:
return the first non-none value
"""
for arg in args:
if arg is not None:
return arg
raise ValueError(f"{msg} can't find non-none value") | 5,356,405 |
def addToolbarItem(
aController,
anIdentifier,
aLabel,
aPaletteLabel,
aToolTip,
aTarget,
anAction,
anItemContent,
aMenu,
):
"""
Adds an freshly created item to the toolbar defined by
aController. Makes a number of assumptions about the
implementation of aController. It should be refactored into a
generically useful toolbar management untility.
"""
toolbarItem = Cocoa.NSToolbarItem.alloc().initWithItemIdentifier_(anIdentifier)
toolbarItem.setLabel_(aLabel)
toolbarItem.setPaletteLabel_(aPaletteLabel)
toolbarItem.setToolTip_(aToolTip)
toolbarItem.setTarget_(aTarget)
if anAction:
toolbarItem.setAction_(anAction)
if type(anItemContent) == Cocoa.NSImage:
toolbarItem.setImage_(anItemContent)
else:
toolbarItem.setView_(anItemContent)
bounds = anItemContent.bounds()
minSize = (100, bounds[1][1])
maxSize = (1000, bounds[1][1])
toolbarItem.setMinSize_(minSize)
toolbarItem.setMaxSize_(maxSize)
if aMenu:
menuItem = Cocoa.NSMenuItem.alloc().init()
menuItem.setSubmenu_(aMenu)
menuItem.setTitle_(aMenu.title())
toolbarItem.setMenuFormRepresentation_(menuItem)
aController._toolbarItems[anIdentifier] = toolbarItem | 5,356,406 |
def start(update: Update, _: CallbackContext) -> None:
"""Send a message when the command /start is issued."""
update.message.reply_text(
"Hi! I am a work in progress bot. My job is to help this awesome CTLPE Intake 5 group, "
"especially with deadlines. Type /deadlines to start. For list of supported commands, type /help.",
# reply_markup=markup,
) | 5,356,407 |
def all_equal(iterable):
"""
Returns True if all the elements are equal.
Reference:
Add "equal" builtin function
https://mail.python.org/pipermail/python-ideas/2016-October/042734.html
"""
groups = groupby(iterable)
return next(groups, True) and not next(groups, False) | 5,356,408 |
async def alliance(ctx,*,request : str=''):
"""Returns trophy data over time for an alliance"""
results = member_info.alliance(request)
member_info.os.chdir('plots')
result = [x for x in zip(results[0],results[1])]
await ctx.send("Alliance trophy data over time. Alliance names and IDs:"+lined_string(result), file=discord.File(fp="plot.png"))
member_info.os.chdir('..')
return | 5,356,409 |
def is_str(required=False, default=None, min_len=None, max_len=None, pattern=None):
"""
Returns a function that when invoked with a given input asserts that the input is a valid string
and that it meets the specified criteria. All text are automatically striped off of both trailing and leading
whitespaces.
:param required: False by default.
:param default: default value to be used when value is `None` (or missing).
:param min_len: the minimum length allowed. Setting this to 1 effectively rejects empty strings
:param max_len: the maximum length allowed. Strings longer than this will be rejected
:param pattern: a valid python regex pattern. Define your patterns carefully with regular expression
attacks in mind.
:return: A callable that when invoked with an input will check that it meets the criteria defined above or raise
an a validation exception otherwise. It returns the newly validated input on success.
"""
if pattern:
# compile pattern once and reuse for all validations
compiled_pattern = re.compile(pattern)
# noinspection PyShadowingBuiltins
def func(input):
input = input or default
if required and input is None:
raise ValidationException('required but was missing')
if not required and input is None:
return default
input = str(input).strip()
if min_len is not None and len(input) < min_len:
raise ValidationException("'{}' is shorter than minimum required length({})".format(input, min_len))
if max_len is not None and len(input) > max_len:
raise ValidationException("'{}' is longer than maximum required length({})".format(input, max_len))
if pattern and compiled_pattern.match(input) is None:
raise ValidationException("'{}' does not match expected pattern({})".format(input, pattern))
return input
return func | 5,356,410 |
def test_100606_closed():
"""Test that we can accurately close off an unclosed polygon."""
prod = parser(get_test_file("SPCPTS/PTSDY1_closed.txt"))
# prod.draw_outlooks()
outlook = prod.get_outlook("CATEGORICAL", "TSTM", 1)
assert abs(outlook.geometry.area - 572.878) < 0.01 | 5,356,411 |
def test_arange():
"""
test arange default
Returns:
None
"""
with fluid.dygraph.guard():
x = paddle.arange(1, 5, 0.5)
expect = [1.0000, 1.5000, 2.0000, 2.5000, 3.0000, 3.5000, 4.0000, 4.5000]
tools.compare(x.numpy(), expect) | 5,356,412 |
def load_property_file(filename):
"""
Loads a file containing x=a,b,c... properties separated by newlines, and
returns an OrderedDict where the key is x and the value is [a,b,c...]
:param filename:
:return:
"""
props = OrderedDict()
if not os.path.exists(filename):
return props
with open(filename) as f:
for line in f:
line = line.strip().split("=", 1)
if len(line) != 2:
continue
props[line[0].strip()] = line[1].split(",")
_log.debug("Read property file:\n%s", props)
return props | 5,356,413 |
def fit_curve(df, country, status, start_date, end_date=None):
"""
Summary line.
Extended description of function.
Parameters:
arg1 (int): Description of arg1
Returns:
int: Description of return value
"""
# Select the data
slc_date = slice(start_date, end_date)
y_data = df.loc[(country, status), slc_date].groupby(
CTRY_K).sum().values[0]
# Generate a dummy x_data
x_data = np.arange(0, y_data.shape[0])
# Set initial guesses for the curve fit
x0_0 = x_data[np.where(y_data > 0)[0][0]] # Day of the first case
a_0 = y_data.max() # Current number of cases
b_0 = 0.1 # Arbitrary
p0 = [x0_0, a_0, b_0]
# Fit the curve
popt, pcov = opt.curve_fit(sig, x_data, y_data, p0=p0)
# Evaluate the curve fit to calculate the R²
y_fit = sig(x_data, *popt)
r2 = mt.r2_score(y_data, y_fit)
# Estimate the uncertainty of the obtained coefficients
x0, a, b, = unc.correlated_values(popt, pcov)
# Store the fit information
fit = {
"r2": r2,
"x0": x0,
"a": a,
"b": b,
"coef": popt,
"coef_cov": pcov,
"y_data": y_data,
"x_data": slc_date,
}
return x0, a, b, fit | 5,356,414 |
def load_data_from(file_path):
"""
Loads the ttl, given by file_path and returns (file_path, data, triple_count)
Data stores triples.
"""
graph = Graph()
graph.parse(file_path, format="ttl")
data = {}
triple_count = 0
for subject, predicate, object in graph:
triple_count += 1
subject = subject
object = object
# add triple
predicate_out = (predicate, "out")
if subject not in data:
data[subject] = {}
if predicate_out not in data[subject]:
data[subject][predicate_out] = []
data[subject][predicate_out].append(object)
# add backlink
predicate_in = (predicate, "in")
if object not in data:
data[object] = {}
if predicate_in not in data[object]:
data[object][predicate_in] = []
data[object][predicate_in].append(subject)
print(file_path, ":", triple_count, "triples loaded")
return (file_path, data, triple_count) | 5,356,415 |
def change_circle_handler():
"""Change the circle radius."""
global radius
radius = size
# Insert code to make radius label change. | 5,356,416 |
def test_def_type_in_submod_procedure():
"""Test that going into the definition of a type bound procedure in a submodule"""
string = write_rpc_request(1, "initialize", {"rootPath": str(test_dir)})
file_path = test_dir / "subdir" / "test_submod.F90"
string += def_request(file_path, 36, 13)
errcode, results = run_request(string)
assert errcode == 0
ref_res = [[1, 1, str(test_dir / "subdir" / "test_submod.F90")]]
assert len(ref_res) == len(results) - 1
for i, res in enumerate(ref_res):
validate_def(results[i + 1], res) | 5,356,417 |
def get_predictability(X, y, dtype='continuous'):
"""Returns scores for various models when given a dataframe and target set
Arguments:
X (dataframe)
y (series)
dtype (str): categorical or continuous
Note: X and y must be equal in column length
Returns:
results (dataframe)
"""
M = pd.concat([X, y], axis=1)
fortrain = M.dropna()
X_ft = fortrain.iloc[:,:-1]
y_ft = fortrain.iloc[:,-1]
X_train, X_test, y_train, y_test = train_test_split(X_ft, y_ft, test_size=0.1)
# use mean as the prediction
y_train_mean = y_train.mean()
y_pred_mean = np.zeros(len(y_test))
y_pred_mean.fill(y_train_mean)
# use median as the prediction
y_train_median = y_train.median()
y_pred_median = np.zeros(len(y_test))
y_pred_median.fill(y_train_median)
# use mode as the prediction
# zero index is required to return the first most common value
y_train_mode = y_train.mode()[0]
y_pred_mode = np.zeros(len(y_test))
y_pred_mode.fill(y_train_mode)
lm = LinearRegression()
print("Fitting linear regression model")
lm.fit(X_train, y_train)
rf = RandomForestRegressor()
print("Fitting random forest model")
rf.fit(X_train, y_train)
kN = KNeighborsRegressor()
print("Fitting kNN model")
kN.fit(X_train, y_train)
# get the r2 score for each model
mean_score = r2_score(y_test, y_pred_mean)
median_score = r2_score(y_test, y_pred_median)
mode_score = r2_score(y_test, y_pred_mode)
lm_score = lm.score(X_test, y_test)
rf_score = rf.score(X_test, y_test)
kN_score = kN.score(X_test, y_test)
# get the mse for each model
mean_mse = mean_squared_error(y_test, y_pred_mean)
median_mse = mean_squared_error(y_test, y_pred_median)
mode_mse = mean_squared_error(y_test, y_pred_mode)
lm_y_pred = lm.predict(X_test)
rf_y_pred = rf.predict(X_test)
kN_y_pred = kN.predict(X_test)
lm_mse = mean_squared_error(y_test, lm_y_pred)
rf_mse = mean_squared_error(y_test, rf_y_pred)
kN_mse = mean_squared_error(y_test, kN_y_pred)
# construct the dataframe to return to the user
names = ['mean', 'median', 'mode', 'LinearRegression', 'RandomForestRegressor', 'KNeighborsRegressor']
scores = [mean_score, median_score, mode_score, lm_score, rf_score, kN_score]
losses = [mean_mse, median_mse, mode_mse, lm_mse, rf_mse, kN_mse]
results = pd.DataFrame(data=list(zip(names, scores, losses)), columns=['names', 'r2 score', 'loss'])
results['r2 score'] = results['r2 score'].apply(lambda x: round(x, 0))
results['loss'] = results['loss'].apply(lambda x: round(x, 0))
return results | 5,356,418 |
def generate_guid(shard=0, base_uuid=None):
"""
Generates an "optimized" UUID that accomodates the btree indexing
algorithms used in database index b-trees. Check the internet for
details but the tl;dr is big endian is everything.
Leveraging the following as the reference implementation:
https://www.percona.com/blog/2014/12/19/store-uuid-optimized-way/
http://stackoverflow.com/questions/412341/how-should-i-store-guid-in-mysql-tables#27845470
https://engineering.instagram.com/sharding-ids-at-instagram-1cf5a71e5a5c
It works as follows, by reorganizing the most significant bytes of the
timestamp portion of a UUID1 to ensure that UUIDs generated in close
succession all land on the same (or at least adjacent) index pages.
The implementation is provided in pure-python to ensure we aren't
delegating the calculation to the SPOF that is our database. While not
the most performant place to put this, it's by far the most flexible.
12345678-9ABC-DEFG-HIJK-LMNOPQRSTUVW
12345678 = least significant 4 bytes of the timestamp in big endian order
9ABC = middle 2 timestamp bytes in big endian
D = 1 to signify a version 1 UUID
EFG = most significant 12 bits of the timestamp in big endian
When you convert to binary, the best order for indexing would be:
EFG9ABC12345678D + the rest.
Lastly, rather than implementing this as a type, through experimentation it
was determined that the re-ordered UUID can be coerced back into the uuid
type with no problems. This lets us rely on an existing implementation
for UUIDs and instead only worry about supplying one. The alternative
would be to implement in the type a conversion back to an "unordered" UUID
when retrieving the column from the database, which would be wasted effort
The last 12 bits of the UUID generated will be replaced with a shard id. By
default we're allowing for 4096 shards, which is overkill for everyone but
Facebook. However, it's easy to work with since every character in the
UUID represents 4 bits, so all we have to do is overwrite 3 characters.
"""
base_uuid = base_uuid or str(uuid.uuid1())
if shard > MAX_SHARD:
raise exception.InvalidShardId(shard_id=shard, max_shard=MAX_SHARD)
shard_id = "{:03X}".format(shard)
return uuid.UUID(''.join([base_uuid[15:18],
base_uuid[9],
base_uuid[10:13],
base_uuid[:8],
base_uuid[14],
base_uuid[19:23],
base_uuid[24:33],
shard_id])) | 5,356,419 |
def load(name):
"""Loads dataset as numpy array."""
x, y = get_uci_data(name)
if len(y.shape) == 1:
y = y[:, None]
train_test_split = 0.8
random_permutation = np.random.permutation(x.shape[0])
n_train = int(x.shape[0] * train_test_split)
train_ind = random_permutation[:n_train]
test_ind = random_permutation[n_train:]
x_train, y_train = x[train_ind, :], y[train_ind, :]
x_test, y_test = x[test_ind, :], y[test_ind, :]
x_mean, x_std = np.mean(x_train, axis=0), np.std(x_train, axis=0)
y_mean = np.mean(y_train, axis=0)
epsilon = tf.keras.backend.epsilon()
x_train = (x_train - x_mean) / (x_std + epsilon)
x_test = (x_test - x_mean) / (x_std + epsilon)
y_train, y_test = y_train - y_mean, y_test - y_mean
return x_train, y_train, x_test, y_test | 5,356,420 |
def read_file(filename):
"""
Read :py:class:`msbuildpy.corflags.CorFlags` from a .NET assembly file path.
**None** is returned if the PE file is invalid.
:param filename: A file path to a .NET assembly/executable.
:return: :py:class:`msbuildpy.corflags.CorFlags` or **None**
"""
with open(filename, 'rb') as f:
return read(f) | 5,356,421 |
def cleanup_all(threshold_date=None):
"""Clean up the main soft deletable resources.
This function contains an order of calls to
clean up the soft-deletable resources.
:param threshold_date: soft deletions older than this date will be removed
:returns: total number of entries removed from the database
"""
LOG.debug("Cleaning up soft deletions where deletion date"
" is older than %s", str(threshold_date))
total = 0
total += cleanup_softdeletes(models.VmExpire,
threshold_date=threshold_date)
LOG.info("Cleaned up %s soft deleted entries", total)
return total | 5,356,422 |
def parse_config(config_strings):
"""Parse config from strings.
Args:
config_strings (string): strings of model config.
Returns:
Config: model config
"""
temp_file = tempfile.NamedTemporaryFile()
config_path = f'{temp_file.name}.py'
with open(config_path, 'w') as f:
f.write(config_strings)
config = Config.fromfile(config_path)
# Update backbone config
if 'pool_mod' in config.model.backbone.backbones:
config.model.backbone.backbones.pop('pool_mod')
if 'sa_cfg' not in config.model.backbone:
config.model.backbone['sa_cfg'] = dict(
type='PointSAModule',
pool_mod='max',
use_xyz=True,
normalize_xyz=True)
if 'type' not in config.model.rpn_head.vote_aggregation_cfg:
config.model.rpn_head.vote_aggregation_cfg['type'] = 'PointSAModule'
# Update rpn_head config
if 'pred_layer_cfg' not in config.model.rpn_head:
config.model.rpn_head['pred_layer_cfg'] = dict(
in_channels=128, shared_conv_channels=(128, 128), bias=True)
if 'feat_channels' in config.model.rpn_head:
config.model.rpn_head.pop('feat_channels')
if 'vote_moudule_cfg' in config.model.rpn_head:
config.model.rpn_head['vote_module_cfg'] = config.model.rpn_head.pop(
'vote_moudule_cfg')
if config.model.rpn_head.vote_aggregation_cfg.use_xyz:
config.model.rpn_head.vote_aggregation_cfg.mlp_channels[0] -= 3
for cfg in config.model.roi_head.primitive_list:
cfg['vote_module_cfg'] = cfg.pop('vote_moudule_cfg')
cfg.vote_aggregation_cfg.mlp_channels[0] -= 3
if 'type' not in cfg.vote_aggregation_cfg:
cfg.vote_aggregation_cfg['type'] = 'PointSAModule'
if 'type' not in config.model.roi_head.bbox_head.suface_matching_cfg:
config.model.roi_head.bbox_head.suface_matching_cfg[
'type'] = 'PointSAModule'
if config.model.roi_head.bbox_head.suface_matching_cfg.use_xyz:
config.model.roi_head.bbox_head.suface_matching_cfg.mlp_channels[
0] -= 3
if 'type' not in config.model.roi_head.bbox_head.line_matching_cfg:
config.model.roi_head.bbox_head.line_matching_cfg[
'type'] = 'PointSAModule'
if config.model.roi_head.bbox_head.line_matching_cfg.use_xyz:
config.model.roi_head.bbox_head.line_matching_cfg.mlp_channels[0] -= 3
if 'proposal_module_cfg' in config.model.roi_head.bbox_head:
config.model.roi_head.bbox_head.pop('proposal_module_cfg')
temp_file.close()
return config | 5,356,423 |
def validate_profile_info(df, fix=False):
"""
Validates the form of an information profile dataframe. An information profile dataframe must look something like this:
pos info info_err
0 0.01 0.005
1 0.03 0.006
2 0.006 0.008
A 'pos' column reports the position within a sequence to which the information profiel applies. The 'info' column describes the information in bits. The 'info_err' column quantifies uncertainty in this mutual information value.
Specifications:
0. The dataframe must have at least one row and one column.
1. A 'pos' column is mandatory and must occur first. Values must be nonnegative integers in sequential order.
2. An 'info' column is mandatry and must come second. Values must be finite floatingpoint values.
3. An 'info_err' column is optional and must come last. Values must be finite floating point values.
Arguments:
df (pd.DataFrame): Dataset in dataframe format
fix (bool): A flag saying whether to fix the dataframe into shape if possible.
Returns:
if fix=True:
df_valid: a valid dataframe that has been fixed by the function
if fix=False:
Nothing
Function:
Raises a TyepError if the data frame violates the specifications (if fix=False) or if these violations cannot be fixed (fix=True).
"""
# Verify dataframe has at least one row
if not df.shape[0] >= 1:
raise SortSeqError(\
'Dataframe must contain at least one row')
# Validate column names
for col in df.columns:
if not is_col_type(col,['pos','infos']):
raise SortSeqError('Invalid column in dataframe: %s.'%col)
for col in ['pos','info']:
if not col in df.columns:
raise SortSeqError('%s column missing'%col)
# Validate contents of columns
df = _validate_cols(df,fix=fix)
# Make sure that all info values are nonnegative
info_cols = get_cols_from_df(df,'infos')
if not 'info' in info_cols:
raise SortSeqError('info column is missing.')
# Validate column order
new_cols = ['pos'] + info_cols
if not all(df.columns == new_cols):
if fix:
df = df[new_cols]
else:
raise SortSeqError(\
'Dataframe columns are in the wrong order; set fix=True to fix.')
return df | 5,356,424 |
def load(*names, override=True, raise_exception=False):
"""
Read the given names and load their content into this configuration module.
:param names: a varg that contains paths (str) to the conf files
that need to be read or names of environment variables with paths.
:param override: determines whether previously known configurations need to
be overridden.
:param raise_exception: Raise exception on parse failure.
:return: None.
"""
for name in names:
if not name:
warnings.warn('an empty name is not allowed')
return
fname = os.environ.get(name, name)
file_path = Path(fname)
suffix = file_path.suffix or 'default'
if not file_path.exists():
warnings.warn('conf file "%s" not found' % fname)
return
parser_module = _supported_types.get(suffix.lower(), None)
if not parser_module:
warnings.warn('cannot parse files of type "%s"' % suffix)
return
parse = importlib.import_module(parser_module).parse
with open(fname) as file:
try:
configurations = parse(file)
except Exception as err:
warnings.warn('failed to parse "%s". Reason: %s' %
(fname, err))
if raise_exception:
raise
else:
return
for key in configurations:
if override or not get(key):
setattr(conf, key, configurations[key])
globals()[key] = configurations[key]
_content[key] = configurations[key] | 5,356,425 |
def consolidate_variables(
inputs: Iterable[Tuple[core.Key, xarray.Dataset]],
merge_kwargs: Optional[Mapping[str, Any]] = None,
) -> Iterator[Tuple[core.Key, xarray.Dataset]]:
"""Consolidate chunks across distinct variables into (Key, Dataset) pairs."""
kwargs = dict(
compat='equals',
join='exact',
combine_attrs='override',
)
if merge_kwargs is not None:
kwargs.update(merge_kwargs)
chunks_by_offsets = collections.defaultdict(list)
for key, chunk in inputs:
chunks_by_offsets[key.offsets].append(chunk)
for offsets, chunks in chunks_by_offsets.items():
all_vars = [set(chunk.keys()) for chunk in chunks]
new_vars = set.union(*all_vars)
if len(new_vars) != sum(map(len, all_vars)):
raise ValueError(
f'cannot merge chunks with overlapping variables: {all_vars}'
)
key = core.Key(offsets, new_vars)
try:
dataset = xarray.merge(chunks, **kwargs)
except (ValueError, xarray.MergeError) as original_error:
repr_string = '\n'.join(repr(ds) for ds in chunks[:2])
if len(chunks) > 2:
repr_string += '\n...'
repr_string = textwrap.indent(repr_string, prefix=' ')
raise ValueError(
f'merging dataset chunks with variables {all_vars} failed.\n'
+ repr_string
) from original_error
yield key, dataset | 5,356,426 |
def test_network_netstat(network):
"""
network.netstat
"""
ret = network.netstat()
exp_out = ["proto", "local-address"]
for val in ret:
for out in exp_out:
assert out in val | 5,356,427 |
def degrees(cell):
"""Convert from radians to degress"""
return math.degrees(GetNum(cell)) | 5,356,428 |
def create_address(request):
"""
List all code snippets, or create a new snippet.
"""
if request.method == 'POST':
data = JSONParser().parse(request)
serializer = AddressSerializer(data=data)
if serializer.is_valid():
serializer.save()
return JsonResponse({'Message': 'Address created successfully'}, status=201)
return JsonResponse(serializer.errors, status=400) | 5,356,429 |
def passthrough_build(source_path, build_path):
"""Simply copies files into the build directory without any special instructions."""
build_dir, _ = os.path.split(build_path)
shutil.rmtree(build_path)
shutil.copytree(source_path, build_path)
return | 5,356,430 |
def createPhysicalAddressDataframe(userDf):
"""
This method create PhoneNumber dataframe for CDM
:param userDf: person dataframe
:type userDf: object
"""
addressColumns = [
"id as personId","city","country","officeLocation","postalCode","state","streetAddress"
]
return userDf.selectExpr(addressColumns).where(userDf.country.isNotNull()) | 5,356,431 |
def methods_cli(obj: object, exit=True) -> None:
"""Converts an object to an application CLI.
Every public method of the object that does not require arguments becomes
a command to run.
"""
stack_funcnames = {_frame_to_funcname(frame_info) for frame_info in
inspect.stack()}
# finding all public methods that do not require args
methods = []
for x in dir(obj):
method = getattr(obj, x)
# skipping non-methods
if not callable(method):
continue
# skipping private methods
if method.__name__.startswith("_"):
continue
# skipping constructor
if method.__name__ == obj.__class__.__name__:
continue
# skipping all the functions the actually calling this function now.
# So if an object defines .main(self) method that calls
# methods_cli(self), the .main() method will not be a command
if _func_to_funcname(method) in stack_funcnames:
continue
if signature(method).parameters: # if has args
continue
methods.append(method)
command: Optional[str] = None
if len(sys.argv) >= 2:
command = sys.argv[1].strip().replace('-', '_')
for method in methods:
if command == method.__name__:
method()
if exit:
sys.exit(0)
print(f"Usage: {os.path.basename(sys.argv[0])} COMMAND")
if command is not None:
print()
print(f"Unexpected command: '{command}'")
print()
print("Commands:")
for method in methods:
if method.__doc__:
doc = _minimize_spaces(method.__doc__)
else:
doc = ''
print(f" {method.__name__.replace('_', '-')}")
if doc:
print(textwrap.indent(textwrap.fill(doc, 60), ' ' * 4))
sys.exit(2) | 5,356,432 |
def test_to_graph_should_return_identifier() -> None:
"""It returns a standard graph isomorphic to spec."""
standard = Standard()
standard.identifier = "http://example.com/standards/1"
src = """
@prefix dct: <http://purl.org/dc/terms/> .
@prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> .
@prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#> .
@prefix dcat: <http://www.w3.org/ns/dcat#> .
@prefix foaf: <http://xmlns.com/foaf/0.1/> .
@prefix modelldcatno: <https://data.norge.no/vocabulary/modelldcatno#> .
@prefix skos: <http://www.w3.org/2004/02/skos/core#> .
<http://example.com/standards/1>
a dct:Standard .
"""
g1 = Graph().parse(data=standard.to_rdf(), format="turtle")
g2 = Graph().parse(data=src, format="turtle")
assert_isomorphic(g1, g2) | 5,356,433 |
def test_reproject_continuous(n=100, m=20, r=10):
"""Test pre._reprojection.reproject_continuous()."""
# Construct dummy operators.
k = 1 + r + r*(r+1)//2
D = np.diag(1 - np.logspace(-1, -2, n))
W = la.qr(np.random.normal(size=(n,n)))[0]
A = W.T @ D @ W
Ht = np.random.random((n,n,n))
H = (Ht + Ht.T) / 20
H = H.reshape((n, n**2))
B = np.random.random((n,m))
U = np.random.random((m,k))
B1d = np.random.random(n)
U1d = np.random.random(k)
basis = np.eye(n)[:,:r]
X = np.random.random((n,k))
# Try with bad initial condition shape.
with pytest.raises(ValueError) as exc:
opinf.pre.reproject_continuous(lambda x:x, basis, X[:-1,:])
assert exc.value.args[0] == \
f"states and basis not aligned, first dimension {n-1} != {n}"
# Linear case, no inputs.
def f(x):
return A @ x
X_, Xdot_ = opinf.pre.reproject_continuous(f, basis, X)
assert X_.shape == (r,k)
assert Xdot_.shape == (r,k)
rom = opinf.ContinuousOpInfROM("A").fit(basis, X_, Xdot_)
assert np.allclose(rom.A_.entries, basis.T @ A @ basis)
# Linear case, 1D inputs.
def f(x, u):
return A @ x + B1d * u
X_, Xdot_ = opinf.pre.reproject_continuous(f, basis, X, U1d)
assert X_.shape == (r,k)
assert Xdot_.shape == (r,k)
rom = opinf.ContinuousOpInfROM("AB").fit(basis, X_, Xdot_, U1d)
assert np.allclose(rom.A_.entries, basis.T @ A @ basis)
assert np.allclose(rom.B_.entries.flatten(), basis.T @ B1d)
# Linear case, 2D inputs.
def f(x, u):
return A @ x + B @ u
X_, Xdot_ = opinf.pre.reproject_continuous(f, basis, X, U)
assert X_.shape == (r,k)
assert Xdot_.shape == (r,k)
rom = opinf.ContinuousOpInfROM("AB").fit(basis, X_, Xdot_, U)
assert np.allclose(rom.A_.entries, basis.T @ A @ basis)
assert np.allclose(rom.B_.entries, basis.T @ B)
# Quadratic case, no inputs.
def f(x):
return A @ x + H @ np.kron(x,x)
X_, Xdot_ = opinf.pre.reproject_continuous(f, basis, X)
assert X_.shape == (r,k)
assert Xdot_.shape == (r,k)
rom = opinf.ContinuousOpInfROM("AH").fit(basis, X_, Xdot_)
assert np.allclose(rom.A_.entries, basis.T @ A @ basis)
H_ = basis.T @ H @ np.kron(basis, basis)
for _ in range(10):
x_ = np.random.random(r)
x2_ = np.kron(x_, x_)
assert np.allclose(rom.H_(x_), H_ @ x2_) | 5,356,434 |
def index():
""" Custom View """
module_name = deployment_settings.modules[module].name_nice
return dict(module_name=module_name) | 5,356,435 |
def _lm_map_func(hparams, sos_id, eos_id, prot_size):
"""Return a closure for the BDLM with the SOS/EOS ids"""
def lm_map_func(id, seq_len, seq, phyche):
prot_eye = tf.eye(prot_size)
# split characters
seq = tf.string_split([seq], delimiter="").values
# map to integers
seq = tf.cast(hparams.prot_lookup_table.lookup(seq), tf.int32)
# prepend/append SOS/EOS tokens
seq_in = tf.concat(([sos_id], seq, [eos_id]), 0)
if "filter_size" in vars(hparams):
k = hparams.filter_size
else:
k = 1
# pad zeros to phyche
phyche_pad = tf.zeros(shape=(k, hparams.num_phyche_features))
phyche = tf.concat([phyche_pad, phyche, phyche_pad], 0)
# map to one-hots
seq_in = tf.nn.embedding_lookup(prot_eye, seq_in)
seq_out = tf.nn.embedding_lookup(prot_eye, seq)
# pad zeros to match filters
if k-1 > 0:
pad = tf.zeros(shape=(k-1, prot_size))
seq_in = tf.concat([pad, seq_in, pad], 0)
return id, seq_len, seq_in, phyche, seq_out
return lm_map_func | 5,356,436 |
def isSpecificInterfaceType(t, name):
""" True if `t` is an interface type with the given name, or a forward
declaration or typedef aliasing it.
`name` must not be the name of a typedef but the actual name of the
interface.
"""
t = unaliasType(t)
return t.kind in ('interface', 'forward') and t.name == name | 5,356,437 |
def is_stem(path: Optional[str]) -> bool:
"""Check if the given path is a stem."""
if path is None:
return False
path = path.lower()
parent = str(Path(path).parent)
if parent == ".":
root, ext = os.path.splitext(path)
if ext == "":
return True
return False | 5,356,438 |
def now():
"""Returns the current time in ISO8501 format.
"""
return datetime.datetime.now().isoformat() | 5,356,439 |
def get_CF_matrix_from_parent_vector(parent, D, alpha, beta):
"""Documentation to be added."""
cell_ids = list(D.keys())
mut_ids = list(D[cell_ids[0]].keys())
children = {}
children[ROOT] = []
for mut_id in mut_ids:
children[mut_id] = []
for child_id, parent_id in parent.items():
if child_id != ROOT:
children[parent_id].append(child_id)
E = {}
for cell_id in cell_ids:
E[cell_id] = {}
for mut_id in mut_ids:
E[cell_id][mut_id] = None
for cell_id in cell_ids:
score = {}
score[ROOT] = 0
for mut_id in mut_ids:
observed = int(D[cell_id][mut_id])
if observed == 0:
score[ROOT] += math.log(1 - alpha)
if observed == 1:
score[ROOT] += math.log(alpha)
best_score = score[ROOT]
best_mut = ROOT
muts_to_visit = children[ROOT]
while len(muts_to_visit) > 0:
mut_id = muts_to_visit.pop(0)
parent_id = parent[mut_id]
score[mut_id] = score[
parent_id
] # this is only temporary. see changes below
observed = int(D[cell_id][mut_id])
if observed == 0:
score[mut_id] -= math.log(1 - alpha)
score[mut_id] += math.log(beta)
if observed == 1:
score[mut_id] -= math.log(alpha)
score[mut_id] += math.log(1 - beta)
if score[mut_id] > best_score:
best_score = score[mut_id]
best_mut = mut_id
for child_id in children[mut_id]:
muts_to_visit.append(child_id)
muts_present_in_true_genotype = []
current_mut = best_mut
while current_mut != ROOT:
muts_present_in_true_genotype.append(current_mut)
current_mut = parent[current_mut]
for mut_id in mut_ids:
if mut_id in muts_present_in_true_genotype:
E[cell_id][mut_id] = 1
else:
E[cell_id][mut_id] = 0
zero_one_flips = 0
one_zero_flips = 0
for cell_id in cell_ids:
for mut_id in mut_ids:
observed = int(D[cell_id][mut_id])
true = int(E[cell_id][mut_id])
if observed == 1 and true == 0:
one_zero_flips += 1
if observed == 0 and true == 1:
zero_one_flips += 1
# print("0_1_flips: " + str(zero_one_flips))
# print("1_0_flips: " + str(one_zero_flips))
return E | 5,356,440 |
def get_resource_path(filename: str = "") -> str:
"""
get the resource path in the resource in the test dir.
/path/to/resource/filename
"""
current = os.path.abspath(__file__)
current_path = os.path.dirname(current)
resource_dir = os.path.join(current_path, 'resource')
return os.path.join(resource_dir, filename) | 5,356,441 |
def esr_1_2(out_filename, source_folder, dest_folder=getcwd(),
one_hot=True, normalized=True, out_type="float", balanced_classes=False,
n_batch=None, batch_size=None, validation_size=30, validation_as_copy=False, test_size=160, save_stats=False):
"""Create a esr dataset for DENN."""
dataset_params = {
'esr_source_folder': source_folder,
'normalized': normalized,
'onehot': one_hot
}
val_action = "extract_to" if not validation_as_copy else 'random_copy_to'
actions = [
('modifier', 'simple_shuffle', (), {'target': "train"}),
('modifier', 'extract_to', ('train', 'test', test_size), {}),
('modifier', val_action, ('train', 'validation', validation_size), {}),
('modifier', 'split', ('train',), {
'batch_size': batch_size, 'n_batch': n_batch}),
('modifier', 'convert_type', (out_type,), {})
]
if balanced_classes:
for idx, (type_, action, args, kwargs) in enumerate(actions):
if action == "extract_to":
actions[idx] = (type_, 'extract_to_with_class_ratio', args, kwargs)
generator = Generator('ESR_1_2_Dataset', dataset_params, actions, out_type=out_type)
generator.execute_actions()
generator.save(out_filename, dest_folder)
if save_stats:
generator.save_stats(out_filename, dest_folder)
return generator | 5,356,442 |
def chec_to_2d_array(input_img, img_map=chec_transformation_map()):
"""
Convert images comming form "CHEC" cameras in order to get regular 2D
"rectangular" images directly usable with most image processing tools.
Parameters
----------
input_img : numpy.array
The image to convert
Returns
-------
A numpy.array containing the cropped image.
"""
# Check the image
if len(input_img) != 2048:
raise ValueError("The input image is not a valide CHEC camera image.")
# Copy the input flat ctapipe image and add one element with the NaN value in the end
input_img_ext = np.zeros(input_img.shape[0] + 1)
input_img_ext[:-1] = input_img[:]
input_img_ext[-1] = np.nan
# Make the output image
img_2d = input_img_ext[img_map]
return img_2d | 5,356,443 |
def noop_chew_func(_data, _arg):
"""
No-op chew function.
"""
return 0 | 5,356,444 |
async def http_session(aioresponse) -> aiohttp.ClientSession:
"""
Fixture function for a aiohttp.ClientSession.
Requests fixture aioresponse to ensure that all client sessions do not make actual requests.
"""
resolver = aiohttp.AsyncResolver()
connector = aiohttp.TCPConnector(resolver=resolver)
client_session = aiohttp.ClientSession(connector=connector)
yield client_session
await client_session.close()
await connector.close()
await resolver.close() | 5,356,445 |
def find_middle_snake_less_memory(old_sequence, N, new_sequence, M):
"""
A variant of the 'find middle snake' function that uses O(min(len(a), len(b)))
memory instead of O(len(a) + len(b)) memory. This does not improve the
worst-case memory requirement, but it takes the best case memory requirement
down to near zero.
"""
MAX = N + M
Delta = N - M
V_SIZE=2*min(M,N) + 2
Vf = [None] * V_SIZE
Vb = [None] * V_SIZE
Vf[1] = 0
Vb[1] = 0
for D in range(0, (MAX//2+(MAX%2!=0)) + 1):
for k in range(-(D - 2*max(0, D-M)), D - 2*max(0, D-N) + 1, 2):
if k == -D or k != D and Vf[(k - 1) % V_SIZE] < Vf[(k + 1) % V_SIZE]:
x = Vf[(k + 1) % V_SIZE]
else:
x = Vf[(k - 1) % V_SIZE] + 1
y = x - k
x_i = x
y_i = y
while x < N and y < M and old_sequence[x] == new_sequence[y]:
x = x + 1
y = y + 1
Vf[k % V_SIZE] = x
inverse_k = (-(k - Delta))
if (Delta % 2 == 1) and inverse_k >= -(D -1) and inverse_k <= (D -1):
if Vf[k % V_SIZE] + Vb[inverse_k % V_SIZE] >= N:
return 2 * D -1, x_i, y_i, x, y
for k in range(-(D - 2*max(0, D-M)), (D - 2*max(0, D-N)) + 1, 2):
if k == -D or k != D and Vb[(k - 1) % V_SIZE] < Vb[(k + 1) % V_SIZE]:
x = Vb[(k + 1) % V_SIZE]
else:
x = Vb[(k - 1) % V_SIZE] + 1
y = x - k
x_i = x
y_i = y
while x < N and y < M and old_sequence[N - x -1] == new_sequence[M - y - 1]:
x = x + 1
y = y + 1
Vb[k % V_SIZE] = x
inverse_k = (-(k - Delta))
if (Delta % 2 == 0) and inverse_k >= -D and inverse_k <= D:
if Vb[k % V_SIZE] + Vf[inverse_k % V_SIZE] >= N:
return 2 * D, N - x, M - y, N - x_i, M - y_i | 5,356,446 |
def music_pos(style_name, st, at):
"""
Returns the track position to Ren'Py.
"""
global time_position
if music.get_pos(channel="music_room") is not None:
time_position = music.get_pos(channel="music_room")
readableTime = convert_time(time_position)
d = Text(readableTime, style=style_name)
return d, 0.20 | 5,356,447 |
def get_time(data=None):
"""Receive a dictionary or a string and return a datatime instance.
data = {"year": 2006,
"month": 11,
"day": 21,
"hour": 16,
"minute": 30 ,
"second": 00}
or
data = "21/11/06 16:30:00"
2018-04-17T17:13:50Z
Args:
data (str, dict): python dict or string to be converted to datetime
Returns:
datetime: datetime instance.
"""
if isinstance(data, str):
date = datetime.strptime(data, "%Y-%m-%dT%H:%M:%S")
elif isinstance(data, dict):
date = datetime(**data)
else:
return None
return date.replace(tzinfo=timezone.utc) | 5,356,448 |
def set_clock(child, timestamp=None):
"""Set the device's clock.
:param pexpect.spawn child: The connection in a child application object.
:param datetime timestamp: A datetime tuple (year, month, day, hour, minute, second).
:returns: The updated connection in a child application object.
:rtype: pexpect.spawn
"""
if not timestamp:
timestamp = datetime.utcnow()
child.sendline("clock set {0}\r".format(timestamp.strftime("%H:%M:%S %d %b %Y")))
child.expect_exact("{0}, configured from console by console".format(timestamp.strftime("%H:%M:%S UTC %a %b %d %Y")))
return child | 5,356,449 |
def import_databases(from_release, to_release, from_path=None, simplex=False):
""" Imports databases. """
devnull = open(os.devnull, 'w')
if not from_path:
from_path = POSTGRES_DUMP_MOUNT_PATH
from_dir = os.path.join(from_path, "upgrade")
LOG.info("Importing databases")
try:
# Do postgres schema import (suppress stderr due to noise)
subprocess.check_call(['sudo -u postgres psql -f ' + from_dir +
'/postgres.sql.config postgres'],
shell=True,
stdout=devnull,
stderr=devnull)
except subprocess.CalledProcessError:
LOG.exception("Failed to import schemas.")
raise
import_commands = []
# Do postgres data import
for data in glob.glob(from_dir + '/*.sql.data'):
db_elem = data.split('/')[-1].split('.')[0]
import_commands.append((db_elem,
"sudo -u postgres psql -f " + data +
" " + db_elem))
# Import VIM data
if not simplex:
import_commands.append(
("nfv-vim",
"nfv-vim-manage db-load-data -d %s -f %s" %
(os.path.join(PLATFORM_PATH, 'nfv/vim', SW_VERSION),
os.path.join(from_dir, 'vim.data'))))
# Execute import commands
for cmd in import_commands:
try:
print("Importing %s" % cmd[0])
LOG.info("Executing import command: %s" % cmd[1])
subprocess.check_call([cmd[1]],
shell=True, stdout=devnull)
except subprocess.CalledProcessError as ex:
LOG.exception("Failed to execute command: '%s' during upgrade "
"processing, return code: %d" %
(cmd[1], ex.returncode))
raise | 5,356,450 |
def feature_structure(string, case, intr=False):
"""Convert person-number string to a single feature structure.
Examples:
>>> feature_structure('1s', 'Nom', True)
['Nom', '+1', '-2', '-3', '+sg', '-pl', '+intr']
>>> feature_structure('2p', 'Abs', True)
['Abs', '-1', '+2', '-3', '-sg', '+pl', '+intr']
>>> feature_structure('3d', 'Erg')
['Erg', '-1', '-2', '+3', '-sg', '-pl']
>>> feature_structure('1pi', 'Nom')
['Nom', '+1', '+2', '-3', '-sg', '+pl']
"""
first = '{}1'.format(value('1' in string))
second = '{}2'.format(value('2' in string or 'i' in string))
third = '{}3'.format(value('3' in string))
sg = '{}sg'.format(value('s' in string))
pl = '{}pl'.format(value('p' in string))
struct = [case, first, second, third, sg, pl]
if intr:
struct.append('+intr')
return struct | 5,356,451 |
def repo_description(gurl, owner, repo):
"""
Returns: (status_code, status_text, data)
data = {"created_at": date, "description": str,
"stargazers_count": int, "subscribers_count": int}
"""
res = "/repos/{}/{}".format(owner, repo)
response = gurl.request(funcs.get_hub_url(res))
code = response.code
json = response.json
data = {}
if code == 304:
json = response.cached_response.json
if (code in (200, 304)) and json:
data["description"] = json["description"]
date = json["created_at"]
data["created_at"] = _badass_iso_8601_date_parser(date)
data["stargazers_count"] = json["stargazers_count"]
data["subscribers_count"] = json["subscribers_count"]
return *response.status, data | 5,356,452 |
def check_omf_version(file_version):
"""Validate file version compatibility against the current OMF version
This logic may become more complex with future releases.
"""
if file_version is None:
return True
return file_version == OMF_VERSION | 5,356,453 |
def create_ks_scheduled_constant_graph_ops(
graph: tf_compat.Graph,
global_step: tf_compat.Variable,
var_names: List[str],
begin_step: int,
end_step: int,
ks_group: str,
) -> Tuple[tf_compat.Tensor, List[PruningOpVars]]:
"""
Creates constant model pruning ops. Does not modify the graph.
:param graph: the tf graph to pull the operator out of for applying the pruning to
:param global_step: the global optimizer step for the training graph
:param var_names: a list of names or regex patterns to create constant ops
for within the graph
:param begin_step: the global step to begin pruning at
:param end_step: the global step to end pruning at
:param ks_group: the group identifier the scope should be created under
:return: a tuple containing the update operation to run in a session,
a list of the pruning ops and vars for each desired op in the graph
"""
pruning_op_vars = []
is_start_step = tf_compat.equal(global_step, begin_step)
is_end_step = tf_compat.equal(global_step, end_step)
for op, op_input in get_ops_and_inputs_by_name_or_regex(var_names, graph):
op_vars = create_constant_op_pruning(
op, op_input, is_start_step, is_end_step, ks_group
)
pruning_op_vars.append(op_vars)
update_op = get_scheduled_update_op(pruning_op_vars, ks_group)
return update_op, pruning_op_vars | 5,356,454 |
def findImages(dataPath):
"""
Finds all the images needed for training on the path `dataPath`.
Returns `([centerPaths], [leftPath], [rightPath], [measurement])`
"""
directories = [x[0] for x in os.walk(dataPath)]
dataDirectories = list(filter(lambda directory: os.path.isfile(directory + '/driving_log.csv'), directories))
centerTotal = []
leftTotal = []
rightTotal = []
measurementTotal = []
for directory in dataDirectories:
lines = getLinesFromDrivingLogs(directory)
center = []
left = []
right = []
measurements = []
for line in lines:
measurements.append(float(line[3]))
center.append(directory + '/IMG/' + ntpath.basename(line[0].strip()))
left.append(directory + '/IMG/' + ntpath.basename(line[1].strip()))
right.append(directory + '/IMG/' + ntpath.basename(line[2].strip()))
centerTotal.extend(center)
leftTotal.extend(left)
rightTotal.extend(right)
measurementTotal.extend(measurements)
return (centerTotal, leftTotal, rightTotal, measurementTotal) | 5,356,455 |
def insert(user_job_id: ObjectId, classifier: str, fastq_path: str, read_type: str or None = None) -> ObjectId:
"""
Insert a new ClassificationJob into the collection.
:param user_job_id: Which UserJob is associated with this ClassificationJob
:param classifier: The classifier to use
:param fastq_path: The input fastq file to read from
:return: The ObjectId of the ClassificationJob added
"""
queue_position = -1
if read_type is None:
to_insert = dict(user_job_id=user_job_id, classifier=classifier, fastq_path=fastq_path,
queue_position=queue_position, status=JobStatus.QUEUED)
return controllers.insert_one(collection=SchemaLoader.CLASSIFICATION_JOB, data=to_insert)
else:
to_insert = dict(user_job_id=user_job_id, classifier=classifier, fastq_path=fastq_path, read_type=read_type,
queue_position=queue_position, status=JobStatus.QUEUED)
return controllers.insert_one(collection=SchemaLoader.CLASSIFICATION_JOB, data=to_insert) | 5,356,456 |
def as_columns(
things: List[Union[SheetColumn, list, tuple, set, str]]
) -> List[SheetColumn]:
"""A list of each thing as a SheetColumn"""
result = []
for thing in things:
if isinstance(thing, SheetColumn):
sheet_column = thing
elif isinstance(thing, (list, tuple, set)):
sheet_column = SheetColumn(*thing)
else:
sheet_column = SheetColumn(thing)
result.append(sheet_column)
return result | 5,356,457 |
def generator_path(x_base, y_base):
"""[summary]
use spline 2d get path
"""
sp2d = Spline2D(x_base, y_base)
res = []
for i in np.arange(0, sp2d.s[-1], 0.1):
x, y = sp2d.calc_position(i)
yaw = sp2d.calc_yaw(i)
curvature = sp2d.calc_curvature(i)
res.append([x, y, yaw, curvature])
return res | 5,356,458 |
def test_model_recommend_food(model, two_person_data):
"""
Test the random selection of the food
"""
model.add_data(two_person_data)
assert type(model.recommend('Jason')) == str | 5,356,459 |
def index(request):
"""Render site index page."""
return {} | 5,356,460 |
def bellman_ford_with_term_status(graph, is_multiplicative=False):
"""
An implementation of the multiplication-based Bellman-Ford algorithm.
:param: graph - The graph on which to operate. Should be a square matrix, where edges that don't
exist have value None
:param: graph_labels - An ordered list of labels that correspond to the indices in the input
graph.
:param: is_multiplicative - If this is True, performs multiplication-based Bellman Ford, where
the distances between two nodes are based on the smallest PRODUCT of
the edge weights between them. If it is False, then performs
addition-based Bellman Ford, where the distances between two nodes
are based on the SUM of the edge weights between them.
:return: a tuple, where the zero-th item is the distance array output from the Bellman-Ford
Algorithm, as well as the predecessor array to find paths
"""
# print '[{0}] Entered Bellman Ford'.format(multiprocessing.current_process().pid)
operator = (lambda x, y: x * y) if is_multiplicative else (lambda x, y: x + y)
# Create a distance array with value infinity
distance = np.zeros(len(graph)).astype(np.float128)
distance.fill(float('inf'))
distance[0] = 1.0 if is_multiplicative else 0.0
prev_distance = list(distance)
# Create a predecessor array with value None
predecessor = np.zeros(len(graph))
predecessor.fill(-1)
# print '[{0}] Initialized Bellman Ford'.format(multiprocessing.current_process().pid)
for _ in range(len(graph) - 1):
# Iterate through all the vertices
for i, node_a_weights in enumerate(graph):
for j, weight in enumerate(node_a_weights):
if weight is None:
continue
new_dist = operator(distance[i], weight)
if new_dist - distance[j] < -1.0e-8: # Accounts for floating-pt error.
distance[j] = new_dist
predecessor[j] = i
# Check for early termination
if np.all(distance == prev_distance):
return distance, predecessor, True
prev_distance = list(distance)
return distance, predecessor, False | 5,356,461 |
def decoder(z, rnn, batch_size, state=None, n_dec=64, reuse=None):
"""Summary
Parameters
----------
z : TYPE
Description
rnn : TYPE
Description
batch_size : TYPE
Description
state : None, optional
Description
n_dec : int, optional
Description
reuse : None, optional
Description
Returns
-------
name : TYPE
Description
"""
with tf.variable_scope('decoder', reuse=reuse):
if state is None:
h_dec, state = rnn(z, rnn.zero_state(batch_size, tf.float32))
else:
h_dec, state = rnn(z, state)
return h_dec, state | 5,356,462 |
def extract_locus_with_pyfaidx(blast_outformat_6_file, query_id, full_path_to_assembly, full_path_to_output, left_buffer = left_buffer, right_buffer = right_buffer):
"""The purpose of this function is to extract the regions from each contig of interest."""
#collects regions from blastout file
result = parse_outformat_six(blast_outformat_6_file, query_id)
if result != 0:
output_header = f"{get_basename(full_path_to_assembly)}_{query_id}"
output_file_name = f"{full_path_to_output}/{output_header}.tmp.fasta"
output = buffer_corrector(full_path_to_assembly, result[0], result[1], result[2], left_buffer, right_buffer) #result[0] = stored pyfaidx contig; result[1] = start_position; result[2] = end_position
extracted_sequence = Fasta(full_path_to_assembly)[result[0]][output[0]:output[1]]
if adjust_coordinates(result[1], result[2])[0] == "Reverse complement":
extracted_sequence = Fasta(full_path_to_assembly)[result[0]][output[0]:output[1]].reverse.complement
else:
pass
with open(output_file_name, "w") as file:
file.write(f">{output_header}\n")
file.write(f"{extracted_sequence.seq}\n")
file.close() | 5,356,463 |
def parse_file(filename):
"""Parses the file containing the db schema
Key Arguments:
filename - the file to parse"""
f = open(filename, 'r')
lines = f.readlines()
f.close()
db = {}
for line in lines:
s_line = line.split('\t')
if s_line[0] == 'TABLE_CATALOG':
continue
if s_line[1] in db:
db[s_line[1]].append(s_line[2])
else:
db[s_line[1]] = [s_line[2]]
return db | 5,356,464 |
def only_sig(row_a,row):
"""Returns only significant events"""
if(row_a[-1] != '-' and row_a[-1] != 0.0 and row_a[-1] <= 0.05):
row = row[0].split('_') + row[2:]
row.insert(2, 'A.to.G')
print '\t'.join(map(str,row)) | 5,356,465 |
def est_L(sample_list, est_method, bandwidth = 0.5):
"""Estimate L from a list of samples.
Parameter
------------------------------------------
sample_list: list
a list of samples for arm i at time t
est_method: str
can be one of the choice of 'kde', 'naive'
'kde': kernel density estimation
'naive': count the samples insides [0,dt]
"""
if est_method == 'kde':
kde = KernelDensity(kernel='tophat', bandwidth=bandwidth).fit(np.asarray(sample_list)[:, np.newaxis])
log_den_0 = kde.score_samples(np.asarray([0])[:, np.newaxis])
estL = np.exp(log_den_0)[0]
elif est_method == 'naive':
sorted_data = np.asarray(sorted(sample_list))
estL = len(sorted_data[sorted_data <= bandwidth])/len(sorted_data)
#if len(sample_list) ==1 or estL == 0:
# TODO: init value
# L = 0.01
else:
print('Unkown estimation method.')
return estL | 5,356,466 |
def columnpicker(
frame="None",
track_id="None",
x_coordinates="None",
y_coordinates="None",
z_coordinates="None",
measurment="None",
second_measurment="None",
field_of_view_id="None",
additional_filter="None",
measurement_math="None",
Ok=False,
):
"""Dialog with magicgui for selecting columns"""
columnpicker.Ok.bind(not Ok) | 5,356,467 |
def create_test_area(test_tiles):
"""Create geometry from test images
Parameters
----------
test_tiles : list
directory with test images
Returns
-------
GeoPandas DataFrame
all test images merged into a GeoDataFrame
"""
multipolygon = ogr.Geometry(ogr.wkbMultiPolygon)
for name in test_tiles:
TileX, TileY, zoom = parse_tile_name(name)
polygon = geometry_from_tile_coords(TileX, TileY, zoom)
multipolygon.AddGeometry(polygon)
multipolygon.FlattenTo2D()
test_area = gpd.read_file(multipolygon.ExportToJson())
test_area.to_file("predictions/area_extent.geojson")
test_area = test_area.explode()
return test_area | 5,356,468 |
def check_token(token) -> bool:
"""Check ReCaptcha token
Args:
token
Returns:
bool
"""
if os.getenv("CI"):
return True
url = "https://www.google.com/recaptcha/api/siteverify"
secret_key = os.getenv("RECAPTCHA_SECRET_KEY")
payload = {
"secret": secret_key,
"response": token,
}
response = requests.post(url, data=payload)
return response.json()["success"] and response.json()["score"] >= 0.5 | 5,356,469 |
def covSEard(x,
z,
ell,
sf2
):
"""GP squared exponential kernel.
This function is based on the 2018 GP-MPC library by Helge-André Langåker
Args:
x (np.array or casadi.MX/SX): First vector.
z (np.array or casadi.MX/SX): Second vector.
ell (np.array or casadi.MX/SX): Length scales.
sf2 (float or casadi.MX/SX): output scale parameter.
Returns:
SE kernel (casadi.MX/SX): SE kernel.
"""
dist = ca.sum1((x - z)**2 / ell**2)
return sf2 * ca.SX.exp(-.5 * dist) | 5,356,470 |
def make_devid(identity):
"""
Generate device ID from device identity data trying to follow the same
logic as devauth does. Returns a string containing device ID.
"""
d = SHA256.new()
# convert to binary as needed
bid = identity if type(identity) is bytes else identity.encode()
d.update(bid)
return binascii.b2a_hex(d.digest()).decode() | 5,356,471 |
def _save(maximum_distance):
"""
Save the maximum distance to the file.
:param maximum_distance: Maximum distance to save.
:return: Maximum distance saved.
"""
log.info(f'Saving maximum distance: [{maximum_distance}]')
with open(FILE_NAME_MAXIMUM_DISTANCE, 'w') as file:
file.write(str(maximum_distance))
log.info('Done!') | 5,356,472 |
def get_recomm_products(user_id: str) -> List[Text]:
"""
Gets the top 10 products the user is most likely to purchase.
:returns: List of product ids.
"""
instances_packet = {
"instances": [user_id]
}
prediction = aiplatform_recomm_endpoint.predict(instances=instances_packet)
return prediction[0][0]["output_2"] | 5,356,473 |
def test_generate_f_file(monkeypatch, mock_broker_config_paths):
"""A CSV with fields in the right order should be written to the file
system"""
fileF_mock = Mock()
monkeypatch.setattr(jobQueue, 'fileF', fileF_mock)
fileF_mock.generateFRows.return_value = [
dict(key4='a', key11='b'), dict(key4='c', key11='d')
]
fileF_mock.mappings = OrderedDict(
[('key4', 'mapping4'), ('key11', 'mapping11')])
file_path = str(mock_broker_config_paths['broker_files'].join('uniq1'))
expected = [['key4', 'key11'], ['a', 'b'], ['c', 'd']]
jobQueue.generate_f_file(1, 1, Mock(), 'uniq1', 'uniq1', is_local=True)
assert read_file_rows(file_path) == expected
# re-order
fileF_mock.mappings = OrderedDict(
[('key11', 'mapping11'), ('key4', 'mapping4')])
file_path = str(mock_broker_config_paths['broker_files'].join('uniq2'))
expected = [['key11', 'key4'], ['b', 'a'], ['d', 'c']]
jobQueue.generate_f_file(1, 1, Mock(), 'uniq2', 'uniq2', is_local=True)
assert read_file_rows(file_path) == expected | 5,356,474 |
def relu(Z):
"""
:param Z: -- the linear output in this layer
:return:
A -- the activation output in this layer
activation_cache -- a dictionary contains Z and A
"""
[m, n] = Z.shape
A = np.zeros((m,n))
for i in range(m):
for j in range(n):
if Z[i][j] < 0:
A[i][j] = 0
else:
A[i][j] = Z[i][j]
activation_cache = dict()
activation_cache["Z"] = Z
activation_cache["A"] = A
return A, activation_cache | 5,356,475 |
async def shutdown_tasks(app: 'Application') -> 'None':
"""Shutdown unfinished async tasks.
:param app: web server application
"""
log.info('Shutdown tasks')
tasks = asyncio.Task.all_tasks(loop=app.loop)
if tasks:
for task in tasks:
task.cancel()
try:
await asyncio.wait(tasks)
except Exception:
pass | 5,356,476 |
def month(x: pd.Series) -> pd.Series:
"""
Month of each value in series
:param x: time series
:return: month of observations
**Usage**
Returns the month as a numeric value for each observation in the series:
:math:`Y_t = month(t)`
Month of the time or date is the integer month number, e.g. 1-12
**Examples**
Day for observations in series:
>>> series = generate_series(100)
>>> days = month(series)
**See also**
:func:`day` :func:`year`
"""
return pd.to_datetime(x.index.to_series()).dt.month | 5,356,477 |
def test_passwd_lib_file(topology):
"""
Ensure library file is located at '/usr/lib/'
Using bash shell from the switch
1. Open bash shell for OpenSwitch instance
2. Run command to verify a shared object exists in the filesystem
```bash
stat --printf="%U %G %A\n" /usr/lib/libpasswd_srv.so.0.1.0
```
3. Make sure a file exists in the filesystem
"""
ops1 = topology.get('ops1')
comm = "stat --printf=\"%U %G %A\n\" /usr/lib/libpasswd_srv.so.0.1.0"
matches = ['root root -rwxr-xr-x']
assert ops1 is not None
print("Get bash shell")
bash_shell = ops1.get_shell('bash')
print("Execute shell command")
assert bash_shell.send_command(comm, matches) is 0
print("libpasswd_srv.so file is installed as expected")
print("Test test_passwd_lib_file PASSED") | 5,356,478 |
def example_metrics_postprocessor_fn(
samples: typing.List[metrics_pb2.MetricFamily]
) -> None:
"""
An example metrics postprocessor function for MetricsCollector
A metrics postprocessor function can mutate samples before they are sent out
to the metricsd cloud service. The purpose of this is usually to add labels
to the metrics, though it is also possible to add, remove, or change the
value of samples (though you probably shouldn't).
Uncaught exceptions will crash the server, so if you are doing anything
non-trivial, consider wrapping in a try/catch and figuring out whether a
failure is fatal
(whether you are willing to accept malformed/unprocessed stats).
You are guaranteed that samples will only be run through this function once
(though retries can cause delays between when this is run on samples and
when it makes it the cloud).
"""
failed = 0
for family in samples:
for sample in family.metric:
try:
sample.label.add(name="new_label", value="foo")
except Exception: # pylint: disable=broad-except
# This operation is trivial enough that it probably shouldn't
# be caught, but this is for example purposes. It would be a
# bad idea to log per sample, because you could have thousands
failed += 1
if failed:
logging.error("Failed to add label to %d samples!", failed) | 5,356,479 |
def _library_from_nglims(gi, sample_info, config):
"""Retrieve upload library from nglims specified user libraries.
"""
names = [config.get(x, "").strip() for x in ["lab_association", "researcher"]
if config.get(x)]
check_names = set([x.lower() for x in names])
for libname, role in config["private_libs"]:
# Try to find library for lab or rsearcher
if libname.lower() in check_names:
return _get_library_from_name(gi, libname, role, sample_info)
# default to first private library if available
if len(config.get("private_libs", [])) > 0:
libname, role = config["private_libs"][0]
return _get_library_from_name(gi, libname, role, sample_info)
# otherwise use the lab association or researcher name
elif len(names) > 0:
return _get_library_from_name(gi, names[0], None, sample_info, create=True)
else:
raise ValueError("Could not find Galaxy library for sample %s" % sample_info["description"]) | 5,356,480 |
async def test_deploy(ops_test: OpsTest, charm: str, series: str):
"""Deploy the charm-under-test.
Assert on the unit status before any relations/configurations take place.
"""
# Set a composite application name in order to test in more than one series at the same time.
application_name = f"{APP_NAME}-{series}"
# Deploy the charm with Patroni resource.
resources = {"patroni": "patroni.tar.gz"}
await ops_test.model.deploy(
charm, resources=resources, application_name=application_name, series=series
)
# Attach the resource to the controller.
await ops_test.juju("attach-resource", application_name, "patroni=patroni.tar.gz")
# Issuing dummy update_status just to trigger an event.
await ops_test.model.set_config({"update-status-hook-interval": "10s"})
await ops_test.model.wait_for_idle(apps=[application_name], status="active", timeout=1000)
assert ops_test.model.applications[application_name].units[0].workload_status == "active"
# Effectively disable the update status from firing.
await ops_test.model.set_config({"update-status-hook-interval": "60m"}) | 5,356,481 |
def get_commandline_parser():
"""it parses commandline arguments."""
parser = argparse.ArgumentParser(description='Toolpath generator.')
parser.add_argument('--stl-filepath', help='filpath of stl file.')
parser.add_argument('--diameter', help='Diameter of toolbit.')
parser.add_argument('--step-size', help='Step size of the CNC machine.')
parser.add_argument('--feed-rate', help='Feed rate of CNC machine.')
parser.add_argument('--calculate-time', help='Flag to print time.',
type=bool)
return parser | 5,356,482 |
def repack_folder(sourcedir, dst):
"""Pack sourcedir into dst .zip archive"""
if '7z' in UTIL_EXE:
# a: Add files to archive
# -tzip: "zip" Type archive
# -mx9: compression Method x9 (max)
args = [UTIL_EXE.get('7z', None)]
if args[0] is None:
return
args.extend(['a', '-tzip', dst, osp.join(sourcedir,'*'), '-mx9'])
subprocess.call(args, stdout=subprocess.PIPE)
else:
root = osp.abspath(sourcedir)
with zipfile.ZipFile(dst, 'w', zipfile.ZIP_DEFLATED) as zf:
for dirname, subdirs, files in os.walk(sourcedir):
zf.write(dirname, osp.relpath(dirname, root))
for filename in files:
arcname = osp.join(osp.relpath(dirname, root), filename)
zf.write(osp.join(dirname, filename), arcname) | 5,356,483 |
def con_isogonal(cos0,assign=False,**kwargs):
"""
keep tangent crossing angle
X += [lt1,lt2, ut1,ut2, cos]
(ue1-ue3) = lt1 * ut1, ut1**2 = 1
(ue2-ue4) = lt2 * ut2, ut2**2 = 1
ut1 * ut2 = cos
if assign:
cos == cos0
"""
w = kwargs.get('isogonal')
mesh = kwargs.get('mesh')
X = kwargs.get('X')
N = kwargs.get('N')
N5 = kwargs.get('N5')
N6 = kwargs.get('N6')
num = mesh.num_regular
arr = np.arange(num)
c_l1 = N6-8*num-1 + arr
c_l2 = c_l1+num
c_ut1 = columnnew(arr,N6-6*num-1,num)
c_ut2 = columnnew(arr,N6-3*num-1,num)
c_ue1 = columnnew(arr,N5-12*num,num)
c_ue2 = columnnew(arr,N5-9*num,num)
c_ue3 = columnnew(arr,N5-6*num,num)
c_ue4 = columnnew(arr,N5-3*num,num)
H1,r1 = con_edge(X,c_ue1,c_ue3,c_l1,c_ut1,num,N)
H2,r2 = con_edge(X,c_ue2,c_ue4,c_l2,c_ut2,num,N)
Hu1,ru1 = con_unit(X,c_ut1,num,N)
Hu2,ru2 = con_unit(X,c_ut2,num,N)
Ha,ra = con_constangle2(X,c_ut1,c_ut2,N6-1,num,N)
H = sparse.vstack((H1,H2,Hu1,Hu2,Ha))
r = np.r_[r1,r2,ru1,ru2,ra]
if assign:
H0,r0 = con_constl(np.array([N6-1],dtype=int),cos0,1,N)
H = sparse.vstack((H, H0))
r = np.r_[r,r0]
#self.add_iterative_constraint(H*w, r*w, 'isogonal')
#print('err:isogonal:',np.sum(np.square(H*X-r)))
return H*w,r*w | 5,356,484 |
def download_is_complete(date):
"""
Has the process of downloading prescribing data for this date finished
successfully?
"""
return os.path.exists(local_storage_prefix_for_date(date) + SENTINEL_SUFFIX) | 5,356,485 |
def tf_apply_with_probability(p, fn, x):
"""Apply function `fn` to input `x` randomly `p` percent of the time."""
return tf.cond(
tf.less(tf.random_uniform([], minval=0, maxval=1, dtype=tf.float32), p),
lambda: fn(x),
lambda: x) | 5,356,486 |
def train_and_eval():
"""Train and evaluate the model."""
m = build_estimator(model_dir)
# set num_epochs to None to get infinite stream of data.
m.train(
input_fn=input_fn(train_file_name, num_epochs=1, num_threads=1, shuffle=True),
steps=2000)
# set steps to None to run evaluation until all data consumed.
results = m.evaluate(
input_fn=input_fn(test_file_name, num_epochs=1, num_threads=1, shuffle=True),
steps=None)
print("model directory = %s" % model_dir)
for key in sorted(results):
print("%s: %s" % (key, results[key]))
predictions = m.predict(input_fn=input_fn(test_file_name, num_epochs=1, num_threads=1, shuffle=False))
for p in predictions:
pass | 5,356,487 |
def test_probabilities_gridworld(size=5):
"""
Check transition-probabilities for GridWorld
Args:
size: The size of the world to be used for testing.
"""
check_zero_probabilities(gridworld.GridWorld(size)) | 5,356,488 |
def adjoin(x,seq,test=lambda x,y: x is y):
"""Tests whether item is the same as an existing element of list. If the
item is not an existing element, adjoin adds it to list (as if by cons) and
returns the resulting list; otherwise, nothing is added and the original
list is returned. """
return seq if any(map(functools.partial(test,x),seq)) else cons(x,seq) | 5,356,489 |
def list_users(ctx):
"""List all users"""
iam = boto3.client('iam', aws_access_key_id=ctx.obj['access_key'],
aws_secret_access_key=ctx.obj['secret_key'])
now = datetime.datetime.now(datetime.timezone.utc)
print("{0:20}|{1:15}|{2:10}|{3}".format("Name", "Age", "Groups", "Keys"))
for response in iam.get_paginator('list_users').paginate():
for user in response.get('Users'):
keys = "";
for response in iam.get_paginator('list_access_keys').paginate(
UserName=user.get('UserName')):
for key in response.get('AccessKeyMetadata'):
if (len(keys) > 0):
keys += ","
keys += key.get('AccessKeyId')
if (key.get('Status') == 'Inactive'):
keys += "(Inactive)"
groups = iam.list_groups_for_user(UserName=user.get('UserName'))
print("{0:20} {1:15} {2:10} {3}".format(user.get('UserName'),
displ_age(user.get('CreateDate')),
",".join([g['GroupName'] for g in groups['Groups']]),
keys)) | 5,356,490 |
def get_service_logs(
project_id: str = PROJECT_ID_PARAM,
service_id: str = SERVICE_ID_PARAM,
lines: Optional[int] = Query(None, description="Only show the last n lines."),
since: Optional[datetime] = Query(
None, description="Only show the logs generated after a given date."
),
component_manager: ComponentManager = Depends(get_component_manager),
token: str = Depends(get_api_token),
) -> Any:
"""Returns the stdout/stderr logs of the service."""
component_manager.verify_access(
token, f"projects/{project_id}/services/{service_id}/logs", AccessLevel.WRITE
)
service_id, extension_id = parse_composite_id(service_id)
return component_manager.get_service_manager(extension_id).get_service_logs(
project_id, service_id, lines, since
) | 5,356,491 |
def alloc_emergency_exception_buf():
"""stub method.""" | 5,356,492 |
def uniquify_contacts(contacts):
"""
Return a sequence of contacts with all duplicates removed.
If any duplicate names are found without matching numbers, an exception is raised.
"""
ctd = {}
for ct in contacts:
stored_ct = ctd.setdefault(ct.name, ct)
if stored_ct.dmrid != ct.dmrid:
raise RuntimeError(
"Two contacts named {} have different IDs: {} {}".format(
ct.name, ct.dmrid, stored_ct.dmrid
)
)
return list(ctd.values()) | 5,356,493 |
def optimize_bank_transaction_list(bank_transactions):
"""Append related objects using select_related and prefetch_related"""
return bank_transactions.select_related('block') | 5,356,494 |
def store_client(rc):
"""Context manager for file storage
Parameters
----------
rc : RunControl
Yields
-------
client : StorageClient
The StorageClient instance
"""
store = find_store(rc)
path = storage_path(store, rc)
sync(store, path)
yield StorageClient(rc, store, path)
push(store, path) | 5,356,495 |
def rss():
""" RSS2 Support.
support xml for RSSItem with 12 diaries.
Args:
none
Return:
diaries_object: list
site_settings: title, link, description
"""
articles = Diary.objects.order_by('-publish_time')[:12]
items = []
for article in articles:
content = article.html
url = Config.SITE_URL + '/diary/' + str(article.pk) + '/' + \
article.title
items.append(PyRSS2Gen.RSSItem(
title=article.title,
link=url,
description=content,
guid=PyRSS2Gen.Guid(url),
pubDate=article.publish_time,
))
rss = PyRSS2Gen.RSS2(
title=Config.MAIN_TITLE,
link=Config.SITE_URL,
description=Config.DESCRIPTION,
lastBuildDate=datetime.datetime.now(),
items=items
).to_xml('utf-8')
return rss | 5,356,496 |
def _interpolate_solution_at(target_time, solver_state, validate_args=False):
"""Computes the solution at `target_time` using 4th order interpolation.
Args:
target_time: Floating `Tensor` specifying the time at which to obtain the
solution. Must be within the interval of the last time step of the
`solver_state`: `solver_state.last_step_start` <= `target_time` <=
`solver_state.current_time`.
solver_state: `_DopriSolverInternalState` - solver state.
validate_args: Python `bool` indicating whether to validate inputs.
Default value: False.
Returns:
solution: Solution at `target_time` obtained by interpolation.
coefficients: Interpolating coefficients used to construct the solution.
"""
coefficients = solver_state.interpolating_coefficients
t0 = solver_state.last_step_start
t1 = solver_state.current_time
solution = rk_util.evaluate_interpolation(
coefficients, t0, t1, target_time, validate_args)
return solution, coefficients | 5,356,497 |
def test_drawcities():
"""Draw Cities"""
mp = MapPlot(
title="Fill and Draw Cities",
subtitle="This is my subtitle",
continentalcolor="blue",
sector="iowa",
nocaption=True,
)
mp.drawcities()
return mp.fig | 5,356,498 |
def from_hi(psi_0, mpa_type, system_index, hi, tau=0.01, state_compression_kwargs=None,
op_compression_kwargs=None, second_order_trotter=False, t0=0, psi_0_compression_kwargs=None,
track_trace=False):
"""
Factory function for imaginary time TMP-objects (ITMPS, ITMPO, ITPMPS)
:param psi_0: Initial state as MPArray. Need not be normalized, as it is normalized before propagation
:param mpa_type: Type of MPArray to propagate, supported are mps, mpo, and pmps
:param system_index: Index of the system site in the chain (place of the system site operator in the hi_list)
:param hi: List/tuple for all terms in the Hamiltonian H = sum_i hi
Ordered like this:
- Sites left of the system site (denoted by system index) couple (from left to right)
the current site to the system site (and contain the site local operators)
- The term for the system site must be present and contains the local Hamiltonian only!
May be None, in which case the local Hamiltonian for the site is assumed to be 0
- Sites right of the system site (denoted by system index) couple (from left to right)
the system site to the current site (and contain the site local operators)
:param tau: Timestep for each invocation of evolve. Real timestep should be passed here. Default is .01
:param state_compression_kwargs: Arguments for mpa compression after each dot product (see real time
evolution factory function for details)
:param op_compression_kwargs: Arguments for trotter step operator pre-compression (see real time evolution
factory function for details)
:param second_order_trotter: Switch to use second order instead of fourth order trotter if desired
By default fourth order Trotter is used
:param t0: Initial time of the propagation
:param psi_0_compression_kwargs: Optional compresion kwargs for the initial state (see real time evolution
factory function for details)
:param track_trace: If the trace of the (effective) density matrix should be tracked during the
imaginary time evolution
:return: TMP object. If mpa_type is mps: ITMPS obj., if mpa_type is mpo: ITMPO obj., if mpa_type is pmps: ITPMPS obj.
"""
if not check_shape(psi_0, mpa_type):
raise AssertionError('MPA shape of the initial state is not compatible with the chosen mpa_type')
assert np.imag(tau) == 0 and np.real(tau) != 0
tau = 1j * tau
if mpa_type == 'mps':
return StarITMPS.from_hi(psi_0, False, False, system_index, hi, tau=tau,
state_compression_kwargs=state_compression_kwargs,
op_compression_kwargs=op_compression_kwargs,
second_order_trotter=second_order_trotter, t0=t0,
psi_0_compression_kwargs=psi_0_compression_kwargs,
track_trace=track_trace)
elif mpa_type == 'pmps':
return StarITPMPS.from_hi(psi_0, True, False, system_index, hi, tau=tau,
state_compression_kwargs=state_compression_kwargs,
op_compression_kwargs=op_compression_kwargs,
second_order_trotter=second_order_trotter, t0=t0,
psi_0_compression_kwargs=psi_0_compression_kwargs,
track_trace=track_trace)
elif mpa_type == 'mpo':
return StarITMPO.from_hi(psi_0, False, True, system_index, hi, tau=tau,
state_compression_kwargs=state_compression_kwargs,
op_compression_kwargs=op_compression_kwargs,
second_order_trotter=second_order_trotter, t0=t0,
psi_0_compression_kwargs=psi_0_compression_kwargs,
track_trace=track_trace)
else:
raise AssertionError('Unsupported mpa_type') | 5,356,499 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.