content
stringlengths 22
815k
| id
int64 0
4.91M
|
---|---|
def envi_header(inputpath):
"""
Convert a envi binary/header path to a header, handling extensions
Args:
inputpath: path to envi binary file
Returns:
str: the header file associated with the input reference.
"""
if os.path.splitext(inputpath)[-1] == '.img' or os.path.splitext(inputpath)[-1] == '.dat' or os.path.splitext(inputpath)[-1] == '.raw':
# headers could be at either filename.img.hdr or filename.hdr. Check both, return the one that exists if it
# does, if not return the latter (new file creation presumed).
hdrfile = os.path.splitext(inputpath)[0] + '.hdr'
if os.path.isfile(hdrfile):
return hdrfile
elif os.path.isfile(inputpath + '.hdr'):
return inputpath + '.hdr'
return hdrfile
elif os.path.splitext(inputpath)[-1] == '.hdr':
return inputpath
else:
return inputpath + '.hdr' | 5,356,700 |
def parse_fernet_timestamp(ciphertext):
"""
Returns timestamp embedded in Fernet-encrypted ciphertext, converted to Python datetime object.
Decryption should be attempted before using this function, as that does cryptographically strong tests on the
validity of the ciphertext.
"""
try:
decoded = base64.urlsafe_b64decode(ciphertext)
# This is a value in Unix Epoch time
epoch_timestamp = struct.unpack('>Q', decoded[1:9])[0]
timestamp = datetime(1970, 1, 1) + timedelta(seconds=epoch_timestamp)
return timestamp
except struct.error as e:
raise ValueError(e.message) | 5,356,701 |
def tagcloud(guids):
"""Get "tag cloud" for the search specified by guids
Same return format as taglist, impl is always False.
"""
guids = set(guids)
range = (0, 19 + len(guids))
tags = request.client.find_tags("EI", "", range=range, guids=guids, order="-post", flags="-datatag")
return [(tagfmt(t.name), t, False) for t in tags if t.guid not in guids] | 5,356,702 |
def _ValidateContent(path, expected_content):
"""Helper to validate the given file's content."""
assert os.path.isfile(path), 'File didn\'t exist: %r' % path
name = os.path.basename(path)
current_content = open(path).read()
if current_content == expected_content:
print '%s is good.' % name
else:
try:
open(path, 'w').write(expected_content)
print 'Updated %s.' % name
except IOError as e:
if e.errno != errno.EACCES:
raise
print '%r needs to be updated but is not writable.' % path
return False
return True | 5,356,703 |
def _test_image_path():
"""
A 100 x 50 pixel GeoTIFF image, with 0 as NODATA value
"""
return os.path.join(path, "test.tiff") | 5,356,704 |
def api_ofrecord_image_decoder_random_crop(
input_blob: remote_blob_util.BlobDef,
blob_name: str,
color_space: str = "BGR",
num_attempts: int = 10,
seed: Optional[int] = None,
random_area: Sequence[float] = [0.08, 1.0],
random_aspect_ratio: Sequence[float] = [0.75, 1.333333],
name: str = "OFRecordImageDecoderRandomCrop",
) -> remote_blob_util.BlobDef:
"""This operator is an image decoder with random crop.
Args:
input_blob (BlobDef): The input Blob
blob_name (str): The name of the Blob
color_space (str, optional): The color space, such as "RGB", "BGR". Defaults to "BGR".
num_attempts (int, optional): The maximum number of random cropping attempts. Defaults to 10.
seed (Optional[int], optional): The random seed. Defaults to None.
random_area (Sequence[float], optional): The random cropping area. Defaults to [0.08, 1.0].
random_aspect_ratio (Sequence[float], optional): The random scaled ratio. Defaults to [0.75, 1.333333].
name (str, optional): The name for the operation. Defaults to "OFRecordImageDecoderRandomCrop".
Returns:
BlobDef: The random cropped Blob
For example:
.. code-block:: python
import oneflow as flow
import oneflow.typing as tp
from typing import Tuple
@flow.global_function(type="predict")
def ofrecord_reader_job() -> Tuple[tp.Numpy, tp.Numpy]:
batch_size = 16
color_space = "RGB"
# our ofrecord file path is "./dataset/part-0"
ofrecord = flow.data.ofrecord_reader(
"./imgdataset",
batch_size=batch_size,
data_part_num=1,
part_name_suffix_length=-1,
part_name_prefix='part-',
random_shuffle=True,
shuffle_after_epoch=True,
)
image = flow.data.OFRecordImageDecoderRandomCrop(
ofrecord, "encoded", color_space=color_space
)
res_image, scale, new_size = flow.image.Resize(
image, target_size=(224, 224)
)
label = flow.data.OFRecordRawDecoder(
ofrecord, "class/label", shape=(1, ), dtype=flow.int32
)
return res_image, label
if __name__ == "__main__":
images, labels = ofrecord_reader_job()
# images.shape (16, 224, 224, 3)
"""
assert isinstance(name, str)
if seed is not None:
assert name is not None
module = flow.find_or_create_module(
name,
lambda: OFRecordImageDecoderRandomCropModule(
blob_name=blob_name,
color_space=color_space,
num_attempts=num_attempts,
random_seed=seed,
random_area=random_area,
random_aspect_ratio=random_aspect_ratio,
name=name,
),
)
return module(input_blob) | 5,356,705 |
def student_add_information(adding_student_id, student_information):
"""
用于添加学生的详细信息
:@param adding_student_id: int
:@param student_information: dict or str
:@return : 运行状态(True or False)
"""
if type(student_information) == dict:
adding_information = student_information
elif type(student_information) == str:
adding_information = {}
tmp_key = ''
tmp_adding_key = ''
tmp_value = ''
state = 'write_key'
for k in student_information:
# 判断当前遍历到哪里
if k == ':':
tmp_value = ''
state = 'write_value'
continue
elif k == '\n':
tmp_adding_key = tmp_key
tmp_key = ''
state = 'write_key'
adding_information[tmp_adding_key] = tmp_value
continue
# 判断是否便利到节点
if state == 'write_key':
tmp_key += k
elif state == 'write_value':
tmp_value += k
else:
return False, 2
times = 0
adding_info_list = [adding_student_id]
for i in adding_information.keys():
times += 1
adding_info_list.append(adding_information.get(i))
for j in range(0, 5-times):
adding_info_list.append(None)
adding_info_tuple = tuple(adding_info_list)
adding_info_final = [adding_info_tuple]
cur.executemany("insert into student_info values(%s,%s,%s,%s,%s,%s)", adding_info_final)
conn.commit() | 5,356,706 |
def import_file_as_module(filename: str, name: Optional[str] = None) -> ModuleType:
"""
NOTE(2020-11-09|domanchi): We're essentially executing arbitrary code here, so some thoughts
should be recorded as to the security of this feature. This should not add any additional
security risk, given the following assumptions hold true:
1. detect-secrets is not used in an environment that has privileged access (more
than the current user), OR
2. detect-secrets (when running in a privileged context) does not accept arbitrary
user input that feeds into this function (e.g. custom plugins).
The first assumption should be rather self-explanatory: if you are running detect-secrets
in a context that has the same permissions as you, you can import any code you want, since
this acts more of a utility function than a security flaw. If you're going to do it *anyway*,
let's just make your life easier.
The second assumption should also be pretty straight-forward: don't trust user input,
especially if it's going to be executed as that privileged user, unless you want a privilege
escalation vulnerability. detect-secrets is not going to do any sanitization of user input
for you.
"""
if not os.path.exists(filename):
raise FileNotFoundError
if not name:
# NOTE: After several trial and error attempts, I could not discern the importance
# of this field, in this context. Hence, I don't think it matters that much.
name = os.path.splitext(os.path.basename(filename))[0]
# Source: https://stackoverflow.com/a/67692/13340678
spec = importlib.util.spec_from_file_location(name, filename)
if not spec:
raise InvalidFile
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module) # type: ignore
module.__path__ = os.path.abspath(filename) # type: ignore
return module | 5,356,707 |
def test_to_graph_should_return_publisher_as_bnode_with_catalog() -> None:
"""It returns a name graph isomorphic to spec."""
catalog = Catalog()
catalog.identifier = "http://example.com/catalogs/1"
agent = Agent()
agent.name = {"en": "James Bond", "nb": "Djeims Bånd"}
catalog.publisher = agent
src = """
@prefix dct: <http://purl.org/dc/terms/> .
@prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> .
@prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#> .
@prefix dcat: <http://www.w3.org/ns/dcat#> .
@prefix foaf: <http://xmlns.com/foaf/0.1/> .
<http://example.com/catalogs/1> a dcat:Catalog;
dct:publisher [a foaf:Agent ;
foaf:name "James Bond"@en, "Djeims Bånd"@nb ;
] ;
.
"""
g1 = Graph().parse(data=catalog.to_rdf(), format="turtle")
g2 = Graph().parse(data=src, format="turtle")
assert_isomorphic(g1, g2) | 5,356,708 |
async def get_show_by_month_day(month: conint(ge=1, le=12), day: conint(ge=1, le=31)):
"""Retrieve a Show object, based on month and day, containing: Show
ID, date and basic information."""
try:
show = Show(database_connection=_database_connection)
shows = show.retrieve_by_month_day(month, day)
if not shows:
raise HTTPException(
status_code=404,
detail=f"Shows for month {month:02d} and {day:02d} not found",
)
else:
return {"shows": shows}
except ValueError:
raise HTTPException(
status_code=404,
detail=f"Shows for month {month:02d} and {day:02d} not found",
)
except ProgrammingError:
raise HTTPException(
status_code=500,
detail="Unable to retrieve show information from the database",
)
except DatabaseError:
raise HTTPException(
status_code=500,
detail="Database error occurred while retrieving "
"show information from the database",
) | 5,356,709 |
def generate_random_tag(length):
"""Generate a random alphanumeric tag of specified length.
Parameters
----------
length : int
The length of the tag, in characters
Returns
-------
str
An alphanumeric tag of specified length.
Notes
-----
The generated tag will not use possibly ambiguous characters from this set:
- '0' and '1'
- 'i' and 'I'
- 'l' and 'L'
- 'o' and 'O'
"""
characters_set = ('23456789'
+ 'abcdefghjkmnpqrstuvwxyz'
+ 'ABCDEFGHJKMNPQRSTUVWXYZ')
return ''.join([characters_set[int(random() * len(characters_set))]
for _ in range(length)]) | 5,356,710 |
def test_drug_likeness_input(mocker: MockFixture, tmp_path: Path) -> None:
"""Check that the yaml input for drug likeness is correct."""
path_input = PATH_TEST / "input_test_druglikeness.yml"
mocker.patch("argparse.ArgumentParser.parse_args", return_value=argparse.Namespace(
i=path_input))
mocker.patch("flamingo.screen.split_filter_in_batches", return_value=None)
main() | 5,356,711 |
def test_generate():
"""
GIVEN artifacts and name
WHEN generate is called with the artifacts and name
THEN the model source code is returned.
"""
artifacts = schemas_artifacts.types.ModelArtifacts(
tablename="table 1",
inherits=None,
parent=None,
description=None,
mixins=None,
kwargs=None,
composite_index=None,
composite_unique=None,
backrefs=[],
properties=[
(
"id",
schemas_artifacts.types.SimplePropertyArtifacts(
type=types.PropertyType.SIMPLE,
open_api=schemas_artifacts.types.OpenApiSimplePropertyArtifacts(
type="integer",
format=None,
max_length=None,
nullable=None,
default=None,
read_only=None,
write_only=None,
),
extension=schemas_artifacts.types.ExtensionSimplePropertyArtifacts(
primary_key=False,
autoincrement=None,
index=None,
unique=None,
server_default=None,
foreign_key=None,
kwargs=None,
foreign_key_kwargs=None,
dict_ignore=None,
),
schema={}, # type: ignore
required=False,
description=None,
),
)
],
)
source = models_file._model.generate(artifacts=artifacts, name="Model")
expected_source = f'''
class ModelDict({_EXPECTED_TD_BASE}, total=False):
"""TypedDict for properties that are not required."""
id: typing.Optional[int]
class TModel({_EXPECTED_MODEL_BASE}):
"""
SQLAlchemy model protocol.
Attrs:
id: The id of the Model.
"""
# SQLAlchemy properties
__table__: sqlalchemy.Table
__tablename__: str
query: orm.Query
# Model properties
id: 'sqlalchemy.Column[typing.Optional[int]]'
def __init__(self, id: typing.Optional[int] = None) -> None:
"""
Construct.
Args:
id: The id of the Model.
"""
...
@classmethod
def from_dict(cls, id: typing.Optional[int] = None) -> "TModel":
"""
Construct from a dictionary (eg. a POST payload).
Args:
id: The id of the Model.
Returns:
Model instance based on the dictionary.
"""
...
@classmethod
def from_str(cls, value: str) -> "TModel":
"""
Construct from a JSON string (eg. a POST payload).
Returns:
Model instance based on the JSON string.
"""
...
def to_dict(self) -> ModelDict:
"""
Convert to a dictionary (eg. to send back for a GET request).
Returns:
Dictionary based on the model instance.
"""
...
def to_str(self) -> str:
"""
Convert to a JSON string (eg. to send back for a GET request).
Returns:
JSON string based on the model instance.
"""
...
Model: typing.Type[TModel] = models.Model # type: ignore'''
assert source == expected_source | 5,356,712 |
def blendShapeEditor(*args, **kwargs):
"""
This command creates an editor that derives from the base editor class that has controls for blendShape, control nodes.
Flags:
- control : ctl (bool) [query]
Query only. Returns the top level control for this editor. Usually used for getting a parent to attach popup menus.
Caution: It is possible, at times, for an editor to exist without a control. This flag returns "NONE" if no control is
present.
- defineTemplate : dt (unicode) [create]
Puts a command in a mode where any other flags and args are parsed and added to the command template specified in the
argument. They will be used as default arguments in any subsequent invocations of the command when templateName is set
as the current template.
- docTag : dtg (unicode) [create,query,edit]
Attaches a tag to the Maya editor.
- exists : ex (bool) [create]
Returns true|false depending upon whether the specified object exists. Other flags are ignored.
- filter : f (unicode) [create,query,edit]
Specifies the name of an itemFilter object to be placed on this editor. This filters the information coming onto the
main list of the editor.
- forceMainConnection : fmc (unicode) [create,query,edit]
Specifies the name of a selectionConnection object which the editor will use as its source of content. The editor will
only display items contained in the selectionConnection object. This is a variant of the -mainListConnection flag in
that it will force a change even when the connection is locked. This flag is used to reduce the overhead when using the
-unlockMainConnection , -mainListConnection, -lockMainConnection flags in immediate succession.
- highlightConnection : hlc (unicode) [create,query,edit]
Specifies the name of a selectionConnection object which the editor will synchronize with its highlight list. Not all
editors have a highlight list. For those that do, it is a secondary selection list.
- lockMainConnection : lck (bool) [create,edit]
Locks the current list of objects within the mainConnection, so that only those objects are displayed within the editor.
Further changes to the original mainConnection are ignored.
- mainListConnection : mlc (unicode) [create,query,edit]
Specifies the name of a selectionConnection object which the editor will use as its source of content. The editor will
only display items contained in the selectionConnection object.
- panel : pnl (unicode) [create,query]
Specifies the panel that the editor belongs to. By default if an editor is created in the create callback of a scripted
panel it will belong to that panel. If an editor doesn't belong to a panel it will be deleted when the window that it is
in is deleted.
- parent : p (unicode) [create,query,edit]
Specifies the parent layout for this editor. This flag will only have an effect if the editor is currently un-parented.
- selectionConnection : slc (unicode) [create,query,edit]
Specifies the name of a selectionConnection object which the editor will synchronize with its own selection list. As the
user selects things in this editor, they will be selected in the selectionConnection object. If the object undergoes
changes, the editor updates to show the change.
- stateString : sts (bool) [query]
Query only flag. Returns the MEL command that will edit an editor to match the current editor state. The returned
command string uses the string variable $editorName in place of a specific name.
- targetControlList : tcl (bool) [query]
- targetList : tl (bool) [query]
Flag can appear in Create mode of commandFlag can have multiple arguments, passed either as a tuple or a list.
- unParent : up (bool) [create,edit]
Specifies that the editor should be removed from its layout. This cannot be used with query.
- unlockMainConnection : ulk (bool) [create,edit]
Unlocks the mainConnection, effectively restoring the original mainConnection (if it is still available), and dynamic
updates.
- updateMainConnection : upd (bool) [create,edit]
Causes a locked mainConnection to be updated from the orginal mainConnection, but preserves the lock state.
- useTemplate : ut (unicode) [create]
Force the command to use a command template other than the current one.
- verticalSliders : vs (bool) [create,query,edit]
Derived from mel command `maya.cmds.blendShapeEditor`
"""
pass | 5,356,713 |
def loadmat(filename, check_arrays=False, **kwargs):
"""
Big thanks to mergen on stackexchange for this:
http://stackoverflow.com/a/8832212
This function should be called instead of direct scipy.io.loadmat
as it cures the problem of not properly recovering python dictionaries
from mat files. It calls the function check keys to cure all entries
which are still mat-objects.
"""
kwargs["struct_as_record"] = False
kwargs["squeeze_me"] = True
data = io.loadmat(filename, **kwargs)
return _check_keys(data, check_arrays) | 5,356,714 |
def _export_output_to_tensors(export_output):
"""Get a list of `Tensors` used in `export_output`.
Args:
export_output: an `ExportOutput` object such as `ClassificationOutput`,
`RegressionOutput`, or `PredictOutput`.
Returns:
a list of tensors used in export_output.
Raises:
ValueError: if `export_output` is not one of `ClassificationOutput`,
`RegressionOutput`, or `PredictOutput`.
"""
if isinstance(export_output, export_output_lib.ClassificationOutput):
return [export_output.scores, export_output.classes]
elif isinstance(export_output, export_output_lib.RegressionOutput):
return [export_output.value]
elif isinstance(export_output, export_output_lib.PredictOutput):
return export_output.outputs.values()
else:
raise ValueError(
'`export_output` must be have type `ClassificationOutput`, '
'`RegressionOutput`, or `PredictOutput`; got {}.'.format(export_output)) | 5,356,715 |
def train_city_s1(city:str, pollutant= 'PM2.5', n_jobs=-2, default_meta=False,
search_wind_damp=False, choose_cat_hour=False, choose_cat_month=True,
add_weight=True, instr='MODIS', op_fire_zone=False, op_fire_twice=False, op_lag=True, search_tpot=False,
main_data_folder: str = '../data/',
model_folder='../models/', report_folder='../reports/'):
"""Training pipeline from process raw data, hyperparameter tune, and save model.
Args:
city: city name
pollutant(optional): pollutant name
n_jobs(optional): number of CPUs to use during optimization
default_meta(optional): if True, override meta setting with the default value
search_wind_damp(optional): if True, search in four options of the fire features.
add_weight(optional): if True, use non-uniform weight when fitting and evaluating the model.
instr(optional): choose hotspots detection instrument
choose_cat_hour(optional): if True, see if adding/not adding hour as catergorical variable is better
choose_cat_month(optional): if True, see if adding/not adding month as catergorical variable is better
op_fire_twice(optiohnal): if True, optimize fire data after optimizing lag
search_tpot(optional): If True, also search for other model using TPOT
main_data_folder(optional): main data folder for initializing Dataset object [default:'../data/]
model_folder(optional): model folder for initializing Dataset object [default:'../models/']
report_folder(optional): folder to save figure for initializing Dataset object [default:'../reports/']
Returns:
dataset: dataset object
model: model object
poll_meta(dict): parameter dictionary
"""
# start logging
set_logging(level=10)
logger = logging.getLogger(__name__)
# initialize a trainer object
trainer = Trainer(city=city, pollutant=pollutant, instr=instr)
trainer.n_jobs = n_jobs
if default_meta:
trainer.get_default_meta()
if ~ add_weight:
trainer.dataset.add_weight = 0
#if 'x_cols_org' in trainer.poll_meta.keys():
# trainer.dataset.x_cols = trainer.dataset.x_cols_org = trainer.poll_meta['x_cols_org']
# look for the best rf model
trainer.op_rf(fire_dict=trainer.dataset.fire_dict)
# remove columns
trainer.op2_rm_cols()
logger.info(f'current columns {trainer.dataset.x_cols_org}')
# op fire
trainer.op_fire(x_cols=trainer.dataset.x_cols_org, search_wind_damp=search_wind_damp)
if op_fire_zone:
trainer.op_fire_zone(step=50)
if choose_cat_hour:
trainer.choose_cat_hour()
if choose_cat_month:
trainer.choose_cat_month()
if op_lag:
# see if adding lag improve things
if trainer.dataset.with_interact:
# use smaller lag range
trainer.op4_lag(lag_range=[1, 20])
else:
trainer.op4_lag()
else:
print('skip adding lag')
trainer.dataset.lag_dict = {'n_max': 1, 'step': 1, 'roll':True}
trainer.dataset.build_lag(
lag_range=np.arange(
1,
trainer.dataset.lag_dict['n_max'],
trainer.dataset.lag_dict['step']),
roll=trainer.dataset.lag_dict['roll'])
if op_fire_twice:
trainer.op_fire(x_cols=trainer.dataset.x_cols, with_lag=True, search_wind_damp=search_wind_damp)
# serach rf model again
trainer.op6_rf()
trainer.final_fit()
# save plot
trainer.save_feat_imp(with_interact=trainer.dataset.with_interact, filename=trainer.dataset.report_folder +f'{trainer.poll_name}_rf_fea_op2_nolag.png', title='rf feature of importance')
trainer.save_all()
if search_tpot:
trainer.search_tpot()
# turn of logging
logging.shutdown()
return trainer.dataset, trainer.model, trainer | 5,356,716 |
def mask(
ctx,
files,
output,
geojson_mask,
driver,
all_touched,
crop,
invert,
creation_options):
"""Masks in raster using GeoJSON features (masks out all areas not covered
by features), and optionally crops the output raster to the extent of the
features. Features are assumed to be in the same coordinate reference
system as the input raster.
GeoJSON must be the first input file or provided from stdin:
> rio mask input.tif output.tif --geojson-mask features.json
> rio mask input.tif output.tif --geojson-mask - < features.json
If the output raster exists, it will be completely overwritten with the
results of this operation.
The result is always equal to or within the bounds of the input raster.
--crop and --invert options are mutually exclusive.
--crop option is not valid if features are completely outside extent of
input raster.
"""
from rasterio.features import geometry_mask
from rasterio.features import bounds as calculate_bounds
verbosity = (ctx.obj and ctx.obj.get('verbosity')) or 1
output, files = resolve_inout(files=files, output=output)
input = files[0]
if geojson_mask is None:
click.echo('No GeoJSON provided, INPUT will be copied to OUTPUT',
err=True)
shutil.copy(input, output)
return
if crop and invert:
click.echo('Invert option ignored when using --crop', err=True)
invert = False
with rasterio.drivers(CPL_DEBUG=verbosity > 2):
try:
with click.open_file(geojson_mask) as f:
geojson = json.loads(f.read())
except ValueError:
raise click.BadParameter('GeoJSON could not be read from '
'--geojson-mask or stdin',
param_hint='--geojson-mask')
if 'features' in geojson:
geometries = (f['geometry'] for f in geojson['features'])
elif 'geometry' in geojson:
geometries = (geojson['geometry'], )
else:
raise click.BadParameter('Invalid GeoJSON', param=input,
param_hint='input')
bounds = geojson.get('bbox', calculate_bounds(geojson))
with rasterio.open(input) as src:
disjoint_bounds = _disjoint_bounds(bounds, src.bounds)
if crop:
if disjoint_bounds:
raise click.BadParameter('not allowed for GeoJSON outside '
'the extent of the input raster',
param=crop, param_hint='--crop')
window = src.window(*bounds)
transform = src.window_transform(window)
(r1, r2), (c1, c2) = window
mask_shape = (r2 - r1, c2 - c1)
else:
if disjoint_bounds:
click.echo('GeoJSON outside bounds of existing output '
'raster. Are they in different coordinate '
'reference systems?',
err=True)
window = None
transform = src.affine
mask_shape = src.shape
mask = geometry_mask(
geometries,
out_shape=mask_shape,
transform=transform,
all_touched=all_touched,
invert=invert)
meta = src.meta.copy()
meta.update(**creation_options)
meta.update({
'driver': driver,
'height': mask.shape[0],
'width': mask.shape[1],
'transform': transform
})
with rasterio.open(output, 'w', **meta) as out:
for bidx in range(1, src.count + 1):
img = src.read(bidx, masked=True, window=window)
img.mask = img.mask | mask
out.write_band(bidx, img.filled(src.nodatavals[bidx-1])) | 5,356,717 |
def some_more_cut_paste(possible_timelines, edge_nout_hash, cut_nout_hash, paste_point_hash):
"""
interpretation of cut_nout_hash is purely driven by how this is used in practice... do I like it? Not sure yet.
the interpretation is:
cut_nout_hash is the first thing that's _not_ included.
"""
todo = []
for nh in possible_timelines.all_nhtups_for_nout_hash(edge_nout_hash):
# potentially deal with edge cases (e.g. cut_nout_hash not in history) here.
if nh.nout_hash == cut_nout_hash:
break
todo.append(nh)
prev = paste_point_hash
for nh in reversed(todo): # Maybe: use `nouts_for_notes` here (currently not possible b/c unmatching types)
nout = NoteSlur(nh.nout.note, prev)
possibility, prev = calc_possibility(nout)
yield possibility | 5,356,718 |
def constructCbsdGrantInfo(reg_request, grant_request, is_managing_sas=True):
"""Constructs a |CbsdGrantInfo| tuple from the given data."""
lat_cbsd = reg_request['installationParam']['latitude']
lon_cbsd = reg_request['installationParam']['longitude']
height_cbsd = reg_request['installationParam']['height']
height_type_cbsd = reg_request['installationParam']['heightType']
if height_type_cbsd == 'AMSL':
# TODO(sbdt): move the feature of AMSL support within the prop models.
altitude_cbsd = drive.terrain_driver.GetTerrainElevation(lat_cbsd, lon_cbsd)
height_cbsd = height_cbsd - altitude_cbsd
max_eirp, low_frequency, high_frequency = None, None, None
if grant_request is not None:
if 'requestedOperationParam' in grant_request:
max_eirp = grant_request['requestedOperationParam']['maxEirp']
low_frequency = grant_request['requestedOperationParam']['operationFrequencyRange']['lowFrequency']
high_frequency = grant_request['requestedOperationParam']['operationFrequencyRange']['highFrequency']
else:
max_eirp = grant_request['operationParam']['maxEirp']
low_frequency = grant_request['operationParam']['operationFrequencyRange']['lowFrequency']
high_frequency = grant_request['operationParam']['operationFrequencyRange']['highFrequency']
return CbsdGrantInfo(
# Get information from the registration
latitude=lat_cbsd,
longitude=lon_cbsd,
height_agl=height_cbsd,
indoor_deployment=reg_request['installationParam']['indoorDeployment'],
antenna_azimuth=reg_request['installationParam']['antennaAzimuth'],
antenna_gain=reg_request['installationParam']['antennaGain'],
antenna_beamwidth=reg_request['installationParam']['antennaBeamwidth'],
cbsd_category=reg_request['cbsdCategory'],
max_eirp=max_eirp,
iap_eirp={max_eirp}, # *****PATCHED*****
low_frequency=low_frequency,
high_frequency=high_frequency,
is_managed_grant=is_managing_sas) | 5,356,719 |
def download_raw_pages_content(pages_count):
"""download habr pages by page count"""
return [fetch_raw_content(page) for page in range(1, pages_count + 1)] | 5,356,720 |
def tseries2bpoframe(s: pd.Series, freq: str = "MS", prefix: str = "") -> pd.DataFrame:
"""
Aggregate timeseries with varying values to a dataframe with base, peak and offpeak
timeseries, grouped by provided time interval.
Parameters
----------
s : Series
Timeseries with hourly or quarterhourly frequency.
freq : {'MS' (month, default) 'QS' (quarter), 'AS' (year)}
Target frequency.
prefix : str, optional (default: '')
If specified, add this to the column names of the returned dataframe.
Returns
-------
DataFrame
Dataframe with base, peak and offpeak values (as columns). Index: downsampled
timestamps at provided frequency.
Notes
-----
Can only be used for values that are 'averagable' over a time period, like power [MW]
and price [Eur/MWh]. Not for e.g. energy [MWh], revenue [Eur], and duration [h].
In:
ts_left
2020-01-01 00:00:00+01:00 41.88
2020-01-01 01:00:00+01:00 38.60
2020-01-01 02:00:00+01:00 36.55
...
2020-12-31 21:00:00+01:00 52.44
2020-12-31 22:00:00+01:00 51.86
2020-12-31 23:00:00+01:00 52.26
Freq: H, Name: p, Length: 8784, dtype: float64
Out:
base peak offpeak
ts_left
2020-01-01 00:00:00+01:00 35.034906 42.530036 30.614701
2020-02-01 00:00:00+01:00 21.919009 33.295167 15.931557
... ... ...
2020-11-01 00:00:00+01:00 38.785706 49.110873 33.226004
2020-12-01 00:00:00+01:00 43.519745 57.872246 35.055449
12 rows × 3 columns
"""
if freq not in ("MS", "QS", "AS"):
raise ValueError(
f"Parameter ``freq`` must be one of 'MS', 'QS', 'AS'; got '{freq}'."
)
# Remove partial data
s = trim_frame(s, freq)
# Handle possible units.
sin, units = (s.pint.magnitude, s.pint.units) if hasattr(s, "pint") else (s, None)
# Do calculations. Use normal mean, because all rows have same duration.
sout = sin.resample(freq).apply(lambda s: tseries2singlebpo(s, prefix))
# Handle possible units.
if units is not None:
sout = sout.astype(nits.pintunit(units))
return sout.unstack() | 5,356,721 |
def calc_buffered_bounds(
format, bounds, meters_per_pixel_dim, layer_name, geometry_type,
buffer_cfg):
"""
Calculate the buffered bounds per format per layer based on config.
"""
if not buffer_cfg:
return bounds
format_buffer_cfg = buffer_cfg.get(format.extension)
if format_buffer_cfg is None:
return bounds
geometry_type = normalize_geometry_type(geometry_type)
per_layer_cfg = format_buffer_cfg.get('layer', {}).get(layer_name)
if per_layer_cfg is not None:
layer_geom_pixels = per_layer_cfg.get(geometry_type)
if layer_geom_pixels is not None:
assert isinstance(layer_geom_pixels, Number)
result = bounds_buffer(
bounds, meters_per_pixel_dim * layer_geom_pixels)
return result
by_geometry_pixels = format_buffer_cfg.get('geometry', {}).get(
geometry_type)
if by_geometry_pixels is not None:
assert isinstance(by_geometry_pixels, Number)
result = bounds_buffer(
bounds, meters_per_pixel_dim * by_geometry_pixels)
return result
return bounds | 5,356,722 |
async def read_users_me(
current_user: models.User = Depends(security.get_current_active_user),
):
"""Get User data"""
return current_user | 5,356,723 |
def base_orbit(SLC1_par, SLC2_par, baseline, logpath=None, outdir=None, shellscript=None):
"""
| Estimate baseline from orbit state vectors
| Copyright 2015, Gamma Remote Sensing, v4.2 clw 18-Apr-2018
Parameters
----------
SLC1_par:
(input) SLC-1 ISP image parameter file
SLC2_par:
(input) SLC-2 ISP image parameter file
baseline:
(output) baseline file (text format, enter - for none)
logpath: str or None
a directory to write command logfiles to
outdir: str or None
the directory to execute the command in
shellscript: str or None
a file to write the Gamma commands to in shell format
"""
process(['/usr/local/GAMMA_SOFTWARE-20180703/ISP/bin/base_orbit', SLC1_par, SLC2_par, baseline], logpath=logpath,
outdir=outdir, shellscript=shellscript) | 5,356,724 |
def cal_energy_parameters_for_one_channel(sub_sample_label_dict, channel, importance=1):
"""
the loss comes equally from four sources: connected component (0D), boundary (1D), area (2D), and rim_enhance
e.g. a small region with long boundaries means it accounts for lots of 1D loss and little of 2D loss.
If border_outer=False, boundaries are inside lesions, and all connected regions account for the same 0D loss
If border_outer=True, boundaries are outside lesions, 0D loss are the same inside the same integrated connected
region determined by the outer boundaries.
0D, 1D, 2D loss are uniformly distributed into every pixels inside the integrated connected region;
rim_enhance is then added to the boundary pixels.
:param sub_sample_label_dict: The dict of representative training sample labels. This function calculate how to
balance the loss weights according to these training sample labels. Training sample labels should be numpy arrays
shaped: [length, width, channel], and when the channel is specified, it should be a binary image, with 1 means
positive, it can be a probability [0, 1]. When summing all channels, we get a 2D array of all ones.
:param channel: which channel we need to calculate? The weights are calculated channel-wise. The theoretical basis
is that, some TYPES lesions are big and folded; while some are small and smooth. When doing performance measure, we
don't care about this difference in the classes. Thus, different classes should account the same training loss.
Channel 0 is the probability mask for normal pixels.
:param importance: There may be a special class is extremely important. Increase the importance will increase the
proportion of training loss for this class.
:return: connect_energy_factor, rim_energy_factor, area_enhance, rim_enhance
0D 1D 2D
"""
sample_names_list = os.listdir(sub_sample_label_dict)
total_num_connected_areas = 0 # the number of connected areas in this sub set, counts for 0D loss
total_rim_length = 0 # the number of rim voxels, counts for 1D loss
total_lesion_area = 0 # the number of lesion voxels, counts for 2D loss
if not sub_sample_label_dict[-1] == '/':
sub_sample_label_dict = sub_sample_label_dict + '/'
for sample_name in sample_names_list:
sample = np.load(sub_sample_label_dict + sample_name) # sample should in [width, length, channel]
mask = sample[:, :, channel]
num_connected_areas, num_rim_voxels, num_lesion_voxels = calculate_balance_weights(mask, return_stat=True)
total_num_connected_areas += num_connected_areas
total_rim_length += num_rim_voxels
total_lesion_area += num_lesion_voxels
num_samples = len(sample_names_list)
num_loss_per_dimension = num_samples * importance
# each sample and each class is defaulted to have 4 units of losses: 3 units, 0D, 1D, 2D, which distributed
# uniformly on lesions; and one unit distributed uniformly on the rim pixels.
area_enhance = num_loss_per_dimension / total_lesion_area # thus, averagely each slice 1 units of 2D loss
rim_energy_factor = num_loss_per_dimension / total_rim_length # thus, averagely each slice 1 units of 1D loss
connect_energy_factor = num_loss_per_dimension / total_num_connected_areas # each slice 1 units of 0D loss
rim_enhance = num_loss_per_dimension / total_rim_length # averagely further add 1 units to enhance the rim pixels
return connect_energy_factor, rim_energy_factor, area_enhance, rim_enhance | 5,356,725 |
def get_logger(logfile):
"""Instantiate a simple logger.
"""
import logging
from contextlib import redirect_stdout
fmt = "%(levelname)s:%(filename)s:%(lineno)s:%(funcName)s: %(message)s"
#fmt = '%(levelname)s:%(filename)s:%(lineno)s:%(funcName)s:%(asctime)s: %(message)s']
datefmt = '%Y-%m-%dT%H:%M:%S'
logger = logging.getLogger()
logger.setLevel(logging.INFO)
# logging to logfile
ch = logging.FileHandler(logfile, mode='w')
#ch.setLevel(logging.INFO)
ch.setFormatter( logging.Formatter(fmt, datefmt=datefmt) )
logger.addHandler(ch)
### log stdout
#ch = logging.StreamHandler()
#ch.setLevel(logging.DEBUG)
#ch.setFormatter( logging.Formatter(fmt, datefmt=datefmt) )
#logger.addHandler(ch)
#
#logger.write = lambda msg: logger.info(msg) if msg != '\n' else None
return logger | 5,356,726 |
def __to_signed(val, bits):
"""
internal function to convert a unsigned integer to signed
of given bits length
"""
logging.debug(" in: value = %d", val)
mask = 0x00
for i in range(int(bits / 8)):
mask |= 0xff << (i * 8)
if val >= (1 << (bits - 1)):
val = -1 - (val ^ mask)
logging.debug(" out: value = %d", val)
return val | 5,356,727 |
def set_edges_connected_nodes(nodes, edges):
"""
Fill the lists of incoming and outgoing edges of the input nodes
(lists are attributes of Node objects).
The connection between nodes and edges is given by the start node and
end node of each edge.
:param nodes: list of all the nodes making up the river network
:type nodes: list of Node objects
:param edges: list of all the edges making up the river network
:type edges: list of Edge objects
"""
# For each node of the network
for node in nodes:
# For each edge of the network
for edge in edges :
# start node and end node of the edge
node1 = edge.node_start
node2 = edge.node_end
# If the current node is the start node of the edge
if node1.id_node == node.id_node:
# Then the edge exists the node
node.edges_out.append(edge)
# If the current node is the end node of the edge
if node2.id_node == node.id_node:
# Then the edge enters the node
node.edges_in.append(edge) | 5,356,728 |
def compute_qp_objective(
configuration: Configuration, tasks: Iterable[Task], damping: float
) -> Tuple[np.ndarray, np.ndarray]:
"""
Compute the Hessian matrix :math:`H` and linear vector :math:`c` of the
QP objective function:
.. math::
\\frac{1}{2} \\Delta q^T H \\Delta q + c^T q
The configuration displacement :math:`\\Delta q` is the output of inverse
kinematics (we divide it by :math:`\\Delta t` to get a commanded velocity).
Args:
configuration: Robot configuration to read kinematics from.
tasks: List of kinematic tasks to fulfill at (weighted) best.
damping: weight of Tikhonov (everywhere) regularization. Its unit is
`[cost]^2 / [tangent]` where `[tangent]` is "the" unit of robot
velocities. Improves numerical stability, but larger values slow
down all tasks.
Returns:
Pair :math:`(H, c)` of Hessian matrix and linear vector of the QP
objective.
"""
H = damping * configuration.tangent.eye
c = configuration.tangent.zeros
for task in tasks:
H_task, c_task = task.compute_qp_objective(configuration)
H += H_task
c += c_task
return (H, c) | 5,356,729 |
def main():
""" An example shows infernece of one model on multiple TPUs.
"""
signal.signal(signal.SIGINT, signal_handle)
# init Engine to load bmodel and allocate input and output tensors
# one engine for one TPU
engines = list()
thread_num = len(ARGS.tpu_id)
for i in range(thread_num):
engines.append(sail.Engine(ARGS.bmodel, ARGS.tpu_id[i], sail.SYSIO))
# create threads for inference
threads = list()
status = [None] * thread_num
for i in range(thread_num):
threads.append(threading.Thread(target=thread_infer,
args=(i, engines[i], \
ARGS.input, ARGS.loops, \
ARGS.compare, status)))
for i in range(thread_num):
threads[i].setDaemon(True)
threads[i].start()
while True:
alive=False
for t in threads:
alive = alive or t.isAlive()
if alive == True:
break
if not alive:
break
for stat in status:
if not stat:
sys.exit(-1)
sys.exit(0) | 5,356,730 |
def showPhaseSpectrum(ax, freq, phi, ylabel=r'$-\phi$ (mrad)',
grid=True, marker='+', ylog=False, **kwargs):
"""Show phase spectrum (-phi as a function of f)."""
if 'label' not in kwargs:
kwargs['label'] = 'obs'
ax.semilogx(freq, phi, marker=marker, **kwargs)
if ylog:
ax.set_yscale('log')
ax.set_xlabel('f (Hz)')
ax.set_ylabel(ylabel)
ax.grid(grid) | 5,356,731 |
def _is_existing_account(respondent_email):
"""
Checks if the respondent already exists against the email address provided
:param respondent_email: email of the respondent
:type respondent_email: str
:return: returns true if account already registered
:rtype: bool
"""
respondent = party_controller.get_respondent_by_email(respondent_email)
if not respondent:
return False
return True | 5,356,732 |
def clean_darknet(
darknet_dir: str,
images_dir: str,
label_replacements: Dict,
label_removals: List[str] = None,
label_keep: List[str] = None,
problems_dir: str = None,
):
"""
TODO
:param darknet_dir:
:param images_dir:
:param label_replacements:
:param label_removals:
:param problems_dir:
:return:
"""
_logger.info("Cleaning dataset with Darknet annotations")
# convert all PNG images to JPG, and remove the original PNG file
for file_id in matching_ids(darknet_dir, images_dir, ".txt", ".png"):
png_file_path = os.path.join(images_dir, file_id + ".png")
png_to_jpg(png_file_path, remove_png=True)
# get the set of file IDs of the Darknet-format annotations and corresponding images
file_ids = purge_non_matching(images_dir, darknet_dir, "darknet", problems_dir)
# loop over all the matching files and clean the Darknet annotations
for file_id in tqdm(file_ids):
# update the Darknet annotation file
src_annotation_file_path = os.path.join(darknet_dir, file_id + ".txt")
for line in fileinput.input(src_annotation_file_path, inplace=True):
# get the bounding box label
parts = line.split()
label = parts[0]
# skip rewriting this line if it's a label we want removed
if (label_removals is not None) and (label in label_removals):
continue
# skip rewriting this line if it's a label we do not want to keep
if (label_keep is not None) and (label not in label_keep):
continue
# get the bounding box coordinates
center_x = float(parts[1])
center_y = float(parts[2])
bbox_width = float(parts[3])
bbox_height = float(parts[4])
if (label_replacements is not None) and (label in label_replacements):
# update the label
label = label_replacements[label]
# make sure we don't have wonky bounding box values
# and if so we'll skip them
if (center_x > 1.0) or (center_x < 0.0):
# report the issue via log message
_logger.warning(
"Bounding box center X is out of valid range -- skipping "
f"in Darknet annotation file {src_annotation_file_path}",
)
continue
if (center_y > 1.0) or (center_y < 0.0):
# report the issue via log message
_logger.warning(
"Bounding box center Y is out of valid range -- skipping "
f"in Darknet annotation file {src_annotation_file_path}",
)
continue
if (bbox_width > 1.0) or (bbox_width < 0.0):
# report the issue via log message
_logger.warning(
"Bounding box width is out of valid range -- skipping "
f"in Darknet annotation file {src_annotation_file_path}",
)
continue
if (bbox_height > 1.0) or (bbox_height < 0.0):
# report the issue via log message
_logger.warning(
"Bounding box height is out of valid range -- skipping "
f"in Darknet annotation file {src_annotation_file_path}",
)
continue
# write the line back into the file in-place
darknet_parts = [
label,
f'{center_x:.4f}',
f'{center_y:.4f}',
f'{bbox_width:.4f}',
f'{bbox_height:.4f}',
]
print(" ".join(darknet_parts)) | 5,356,733 |
def basic_checks(server,port):
"""Perform basics checks on given host"""
sock = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
# 2 seconds timeout
sock.settimeout(2)
return sock.connect_ex((server,int(port))) == 0 | 5,356,734 |
def test_input_unmodified_with_nan(boundary, nan_treatment,
normalize_kernel, preserve_nan, dtype):
"""
Test that convolve_fft doesn't modify the input data
"""
array = [1., 4., 5., np.nan, 5., 7., 8.]
kernel = [0.2, 0.6, 0.2]
x = np.array(array, dtype=dtype)
y = np.array(kernel, dtype=dtype)
# Make pseudoimmutable
x.flags.writeable = False
y.flags.writeable = False
# make copies for post call comparison
x_copy = x.copy()
y_copy = y.copy()
with expected_boundary_warning(boundary=boundary):
z = convolve_fft(x, y, boundary=boundary, nan_treatment=nan_treatment,
normalize_kernel=normalize_kernel, preserve_nan=preserve_nan)
# ( NaN == NaN ) = False
# Only compare non NaN values for canonical equivalence
# and then check NaN explicitly with np.isnan()
array_is_nan = np.isnan(array)
kernel_is_nan = np.isnan(kernel)
array_not_nan = ~array_is_nan
kernel_not_nan = ~kernel_is_nan
assert np.all(x_copy[array_not_nan] == x[array_not_nan])
assert np.all(y_copy[kernel_not_nan] == y[kernel_not_nan])
assert np.all(np.isnan(x[array_is_nan]))
assert np.all(np.isnan(y[kernel_is_nan])) | 5,356,735 |
def test(ipu_estimator, args, x_test, y_test):
"""
Test the model on IPU by loading weights from the final checkpoint in the
given `args.model_dir`.
"""
def input_fn():
dataset = tf.data.Dataset.from_tensor_slices((x_test, y_test))
dataset = dataset.prefetch(len(x_test)).cache()
dataset = dataset.batch(args.batch_size, drop_remainder=True)
return dataset
num_test_examples = len(x_test)
steps = num_test_examples // args.batch_size
# IPUEstimator requires no remainder; batches_per_step must divide steps
steps -= steps % args.batches_per_step
print(f"Evaluating on {steps * args.batch_size} examples")
# Set up profiling hook
hooks = []
if args.profile:
hooks.append(ProfilerHook(ipu_estimator.model_dir, name='eval'))
t0 = time.time()
metrics = ipu_estimator.evaluate(input_fn=input_fn,
steps=steps,
hooks=hooks)
t1 = time.time()
test_loss = metrics["loss"]
test_accuracy = metrics["accuracy"]
duration_seconds = t1 - t0
print("Test loss: {:g}".format(test_loss))
print("Test accuracy: {:.2f}%".format(100 * test_accuracy))
print(f"Took {duration_seconds:.2f} seconds to compile and run") | 5,356,736 |
def _get_service():
"""Gets service instance to start API searches.
Returns:
A Google API Service used to send requests.
"""
# Create the AI Platform service object.
# To authenticate set the environment variable
# GOOGLE_APPLICATION_CREDENTIALS=<path_to_service_account_file>
return googleapiclient.discovery.build('ml', 'v1') | 5,356,737 |
def giq(scores, targets, I, ordered, cumsum, penalties, randomized, allow_zero_sets):
"""
Generalized inverse quantile conformity score function.
E from equation (7) in Romano, Sesia, Candes. Find the minimum tau in [0, 1] such that the correct label enters.
"""
E = -np.ones((scores.shape[0],))
for i in range(scores.shape[0]):
E[i] = get_tau(
scores[i : i + 1, :],
targets[i].item(),
I[i : i + 1, :],
ordered[i : i + 1, :],
cumsum[i : i + 1, :],
penalties[0, :],
randomized=randomized,
allow_zero_sets=allow_zero_sets,
)
return E | 5,356,738 |
def setup_logging(forceDebug=False):
"""
General function to setup logger.
Everything from debug to stdout message is handle by loggers.
stdout logger handle info and warning message to STDOUT
stderr logger handle error and critical message to stderr
anything else is for debug logger which log everything to /var/tmp/oci-utils.log
"""
flatFormatter = logging.Formatter('%(message)s')
formatter = logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s(%(module)s:%(lineno)s) - %(message)s')
handler = None
if os.environ.get('_OCI_UTILS_SYSLOG'):
handler = logging.handlers.SysLogHandler(address='/dev/log',
facility=logging.handlers.SysLogHandler.LOG_DAEMON)
else:
if forceDebug:
try:
handler = logging.handlers.RotatingFileHandler(
'/var/tmp/oci-utils.log', mode='a', maxBytes=1024 * 1024, backupCount=3)
handler.setFormatter(formatter)
handler.setLevel(logging.NOTSET)
except Exception as _:
# keep it silent
pass
logger = logging.getLogger('oci-utils')
logger.setLevel(logging.INFO)
stdoutHandler = logging.StreamHandler(stream=sys.stdout)
stdoutHandler.setFormatter(flatFormatter)
stdoutHandler.addFilter(levelsFilter([logging.INFO, logging.WARNING]))
stderrHandler = logging.StreamHandler(stream=sys.stderr)
stderrHandler.setFormatter(flatFormatter)
stderrHandler.addFilter(levelsFilter([logging.ERROR, logging.CRITICAL]))
if handler is not None:
logger.addHandler(handler)
logger.addHandler(stdoutHandler)
logger.addHandler(stderrHandler)
if forceDebug:
logger.setLevel(logging.DEBUG)
if handler is not None:
handler.setLevel(logging.DEBUG) | 5,356,739 |
def findUser(userId):
"""
:param userId:
:return: The user obj
Finds a particular user from a dataset.
"""
return user_collection.find_one({"user_id": userId}) | 5,356,740 |
def sub_sample_map(data, aug_map, n_input, n_output, n_teach, buffer):
"""
Expands an augmentation map to produce indexes that will allow
targets values of previous outputs to be used as inputs
"""
n_io = n_input + n_output
n_req = n_io
teach_range = range(n_teach)
tf_map = []
for map_ in aug_map:
sample = data[map_["orig_sample_idx"]]
n = len(sample)
i = np.random.randint(n - n_io - n_teach - buffer)
j = i + n_req + n_teach + buffer
new_map_ = {"strt_idx": i, "end_idx": j, **map_}
tf_map.append(new_map_)
return tf_map | 5,356,741 |
def preprocess_agents_history(agents, new_data, filename):
"""Process new data into existing data object
:param agents: Existing data object
:type agents: dictionary
:param new_data: New json that needs to be applied on existing data
:type new_data: dictionary
:param filename: original filename of the new data
:type filename: string
"""
unixtime = filename.replace('.json','')
date = format_unixtime(unixtime)
for agency_id in new_data:
agency = new_data[agency_id]
if 'branches' not in agency.keys():
continue
for franchize in agency['branches']:
if 'agents' not in franchize.keys():
continue
for agent in franchize['agents']:
ic = agent['ic']
if ic not in agents:
agents[ic] = {}
agents[ic]['ic'] = ic
agents[ic]['name'] = agent['name'] or ''
agents[ic]['estate_counts'] = agent['estates_count'] or 0
historize('agency_history', agents[ic], agency['name'], format_unixtime(unixtime)) | 5,356,742 |
def capture_flow(pkt_hist):
"""
Monitors the flow in the file.
:param pkt_hist: a list of raw eth packets
:return: 0 (No errors)
"""
closedby = []
global numFlows, flow_buffer, sent_buffer, ackd_buffer, received_buffer, retransmissions, end_ts, retransmissions_timeout, retransmissions_fast
# print "Starting capture"
cnt = 0
for ts, pkt in pkt_hist:
cnt+=1
# print "PACKET -----" + str(cnt)
tcp_packet = get_tcp_packet(pkt, ts)
# print "Seq Num :", str(tcp_packet.seqNum), "| Ack Num :", tcp_packet.ackNum, "| Packet Size :", tcp_packet.pSize, "| Payload Length :", tcp_packet.payloadLength
fState = getFlowStateForPacket(tcp_packet)
if(fState == 2):
#This means that the flow is in active state
# print "Packet belongs to Flow", str(getFlowID(tcp_packet)), "which is already in ACTIVE state."
pkt_id = add_packet(tcp_packet, cnt, ts)
if (tcp_packet._FIN == 1 and getTransDirection(tcp_packet) == 0):
updateFlowState(tcp_packet, 3)
closedby.append([getFlowID(tcp_packet), cnt, "SENDERCLOSE"])
# FIN ACKed by sender
if(tcp_packet._FIN == 1 and getTransDirection(tcp_packet) == 1):
updateFlowState(tcp_packet, 4)
closedby.append([getFlowID(tcp_packet), cnt, "RECVRCLOSE"])
# FIN ACKed by server
elif(fState == 3):
pkt_id = add_packet(tcp_packet, cnt, ts)
if (tcp_packet._FIN == 1 and getTransDirection(tcp_packet) == 1):
updateFlowState(tcp_packet, 5)
closedby.append([getFlowID(tcp_packet), cnt, "RECVRCLOSE"])
# Was in 3 state (finned by sender). Now also FIN ACKed by server
elif(fState == 4):
pkt_id = add_packet(tcp_packet, cnt, ts)
if (tcp_packet._FIN == 1 and getTransDirection(tcp_packet) == 0):
updateFlowState(tcp_packet, 5)
closedby.append([getFlowID(tcp_packet), cnt, "SENDERCLOSE"])
# Was in 4 state (finned by server). Now also FIN ACKed by sender
elif(fState == 5):
if(tcp_packet._ACK == 1):
# Just a stupid ack
add_packet(tcp_packet, cnt, ts)
end_ts[getFlowID(tcp_packet)] = ts
else:
print "Suspicious Packet."
print(closedby)
printFlowBuffer()
break
else:
if(tcp_packet._SYN == 1 and tcp_packet._ACK == 0):
# print "Flow initiated with timestamp", ts
fid = newFlow(tcp_packet)
# updateFlowState(fid, 0) NO NEED TO DO THAT, WHEN WE CREATE A NEW FLOW, ITS DEFAULT STATE IS 0
if(tcp_packet._SYN == 1 and tcp_packet._ACK == 1):
# print "Flow SYN/ACK Received"
updateFlowState(tcp_packet, 1)
winscale[getFlowID(tcp_packet)] = tcp_packet.winscale
if (tcp_packet._SYN == 0 and tcp_packet._ACK == 1):
'TODO : IN THIS CASE WE NEED TO CHECK IF IT IS FOR NORMAL ACK OR HANDSHAKE ACK'
updateFlowState(tcp_packet, 2)
updateFlowWinSize(tcp_packet)
pkt_id = add_packet(tcp_packet, cnt, ts)
# print "Sent Buffer Length : ", len(sent_buffer), " | Received Buffer Length : ", len(received_buffer), " | Acked Buffer Length : ", len(ackd_buffer)
# printFlowBuffer()
# print "~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n\n"
if(pkt_id == False):
print " >> No TCP Flow registered for this packet with timestamp", ts
break
# print_first_two_trans()
# print closedby
return 0 | 5,356,743 |
def run_customcheck_command(check):
"""Function that starts as a thread (future) to process a custom check command
Process a custom check command until a given timeout.
The result will be added to the cached_customchecks_check_data object.
process_customcheck_results() takes care of a may dying run_customcheck_command thread.
Parameters
----------
check
Object containing the specific check data (name, command, timeout)
"""
print_verbose('Start custom check "%s" with timeout %s at %s' % (str(check['name']), str(check['timeout']), str(round(time.time()))), False)
agent_log.info('Start custom check "%s" with timeout %s at %s' % (str(check['name']), str(check['timeout']), str(round(time.time()))))
cached_customchecks_check_data[check['name']]['running'] = "true"
cached_customchecks_check_data[check['name']]['command'] = check['command']
try:
p = subprocess.Popen(check['command'], shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
try:
stdout, stderr = p.communicate(timeout=int(check['timeout']))
p.poll()
if stdout:
stdout = stdout.decode()
if stderr:
stderr = stderr.decode()
cached_customchecks_check_data[check['name']]['result'] = str(stdout)
cached_customchecks_check_data[check['name']]['error'] = None if str(stderr) == 'None' else str(stderr)
cached_customchecks_check_data[check['name']]['returncode'] = p.returncode
except subprocess.TimeoutExpired:
print_verbose('Custom check "%s" timed out' % (check['name']), False)
agent_log.error('Custom check "%s" timed out' % (check['name']))
p.kill() #not needed; just to be sure
cached_customchecks_check_data[check['name']]['result'] = None
cached_customchecks_check_data[check['name']]['error'] = 'Command timeout after ' + str(check['timeout']) + ' seconds'
cached_customchecks_check_data[check['name']]['returncode'] = 124
except:
print_verbose('An error occured while running the custom check "%s"!' % (check['name']), True)
agent_log.error('An error occured while running the custom check "%s"!' % (check['name']))
if stacktrace:
traceback.print_exc()
cached_customchecks_check_data[check['name']]['last_updated_timestamp'] = round(time.time())
cached_customchecks_check_data[check['name']]['last_updated'] = time.ctime()
del cached_customchecks_check_data[check['name']]['running']
return True | 5,356,744 |
def rcGetBBModelEnum():
""" Get the BeagleBone model as member of the BBModel Enum. """
return BBModel(rcGetBBModel()) | 5,356,745 |
def verify_table_html(*, expected_html, query=None, find=None, table, **kwargs):
"""
Verify that the table renders to the expected markup, modulo formatting
"""
from bs4 import BeautifulSoup
if find is None:
find = dict(class_='table')
if not expected_html.strip():
expected_html = "<table/>" # pragma: no cover
if isinstance(table, Namespace):
table = table()
table: Table
request = RequestFactory().get("/", query)
if not table._is_bound:
table = table.bind(request=request)
from django.contrib.auth.models import AnonymousUser
request.user = AnonymousUser()
actual_html = remove_csrf(table.__html__(**kwargs))
expected_soup = BeautifulSoup(expected_html, 'html.parser')
prettified_expected = reindent(expected_soup.find(**find).prettify()).strip()
actual_soup = BeautifulSoup(actual_html, 'html.parser')
hit = actual_soup.find(**find)
if not hit: # pragma: no cover
print(actual_html)
assert False, f"Couldn't find selector {find} in actual output"
assert hit, actual_soup
prettified_actual = reindent(hit.prettify()).strip()
if prettified_actual != prettified_expected: # pragma: no cover
print(actual_html)
assert prettified_actual == prettified_expected | 5,356,746 |
def rapsearch_alignment(alignment_file, uniref, unaligned_reads_file_fasta):
"""
Run rapsearch alignment on database formatted for rapsearch
"""
bypass = utilities.check_outfiles([alignment_file])
exe = "rapsearch"
opts = config.rapsearch_opts
args = ["-q", unaligned_reads_file_fasta, "-b", 0, "-e", math.log10(config.evalue_threshold)]
if config.threads > 1:
args += ["-z", config.threads]
message = "Running " + exe + " ........"
logger.info(message)
print("\n" + message + "\n")
if not bypass:
args += opts
temp_out_files = []
# Find the rapsearch database files in the directory
# These will be files of the same name as the *.info files
files = os.listdir(uniref)
rapsearch_databases = []
for file in files:
if file.endswith(config.rapsearch_database_extension):
# Check for the corresponding database file
database_file = re.sub(config.rapsearch_database_extension + "$", "", file)
if database_file in files:
rapsearch_databases.append(database_file)
for database in rapsearch_databases:
input_database = os.path.join(uniref, database)
full_args = args + ["-d", input_database]
# create temp output file
temp_out_file = utilities.unnamed_temp_file("rapsearch_m8_")
utilities.remove_file(temp_out_file)
temp_out_files.append(temp_out_file + config.rapsearch_output_file_extension)
full_args += ["-o", temp_out_file]
utilities.execute_command(exe, full_args, [input_database], [])
# merge the temp output files
utilities.execute_command("cat", temp_out_files, temp_out_files, [alignment_file],
alignment_file)
else:
message = "Bypass"
logger.info(message)
print(message) | 5,356,747 |
def ordToString(ordList):
"""Use this function to convert ord values to strings."""
newStrList = []
cstr = ""
for cint in ordList:
cstr += chr(cint)
if cint == 44:
newStrList.append(cstr[:-1])
cstr = ""
return newStrList | 5,356,748 |
def remove_element(collection: str, e_id: int):
"""
Remove an element from the database.
:param collection: Collection name
:param e_id: Element id.
:raise DatabaseError: If the element is not in the collection.
"""
if _collections[collection].count_documents({"_id": e_id}) != 0:
_collections[collection].delete_one({"_id": e_id})
else:
raise DatabaseError(f"Element {e_id} doesn't exist in collection {collection}") | 5,356,749 |
def get_test_data_for_successful_build():
"""Returns a test data set of test suites and cases that passed.
"""
return _get_test_data(["PASSED", "PASSED", "PASSED"]) | 5,356,750 |
def get_commits(db_session, gh_session, repos):
"""
given a list of Repo row object get all associated commits and file changes (on the default branch) for each repo.
:param db_session: the database session
:type db_session: sqlalchemy.orm.session.Session
:param gh_session: the requests session with the github api
:type gh_session: requests.sessions.Session
:param repos: list of Repo row objects
:type repos: List[ghstats.orm.orm.Repo]
"""
for repo in repos:
get_commit_q = db_session.query(Commit.sha).filter(Commit.repo_id == repo.id)
existing_commits = {commit.sha for commit in get_commit_q.all()}
commits, _ = get_all(gh_session, '{}/commits'.format(repo.url))
new_commits = (sha for sha in (c['sha'].encode() for c in commits if 'sha' in c) if sha not in existing_commits)
for commit_sha in new_commits:
exists = db_session.query(Commit.sha).filter(Commit.sha == commit_sha).scalar()
if exists:
continue
(commit,), _ = get_all(gh_session, '{}/commits/{}'.format(repo.url, commit_sha.decode()))
committer, committer_email = get_committer(db_session, gh_session, commit)
author, author_email = get_author(db_session, gh_session, commit)
new_commit = Commit(
name=commit['commit']['message'],
sha=commit_sha,
repo=repo,
additions=commit['stats']['additions'],
deletions=commit['stats']['deletions'],
committer=committer,
committer_email=committer_email,
committed_at=parse_gh_date(commit['commit']['committer']['date']),
author=author,
author_email=author_email,
authored_at=parse_gh_date(commit['commit']['author']['date']),
)
db_session.add(new_commit)
for file in commit['files']:
new_file = File(
commit=new_commit,
filename=file['filename'],
status=file['status'],
additions=file['additions'],
deletions=file['deletions'],
)
db_session.add(new_file)
db_session.commit() | 5,356,751 |
async def TwitterRetweetAPI(
current_user: User = Depends(User.getCurrentUser),
):
"""
API 実装中…(モックアップ)
""" | 5,356,752 |
def fromOldAdjacencyList(adjlist, group=False, saturateH=False):
"""
Convert a pre-June-2014 string adjacency list `adjlist` into a set of :class:`Atom` and
:class:`Bond` objects.
It can read both "old style" that existed for years, an the "intermediate style" that
existed for a few months in 2014, with the extra column of integers for lone pairs.
"""
atoms = []
atomdict = {}
bonds = {}
try:
adjlist = adjlist.strip()
lines = adjlist.splitlines()
if adjlist == '' or len(lines) == 0:
raise InvalidAdjacencyListError('Empty adjacency list.')
# Skip the first line if it contains a label
if len(lines[0].split()) == 1:
label = lines.pop(0)
if len(lines) == 0:
raise InvalidAdjacencyListError('No atoms specified in adjacency list.')
mistake1 = re.compile('\{[^}]*\s+[^}]*\}')
atomicMultiplicities = {} # these are no longer stored on atoms, so we make a separate dictionary
# Iterate over the remaining lines, generating Atom or GroupAtom objects
for line in lines:
# Sometimes people put spaces after commas, which messes up the
# parse-by-whitespace. Examples include '{Cd, Ct}'.
if mistake1.search(line):
raise InvalidAdjacencyListError(
"Shouldn't have spaces inside braces: {0}".format(mistake1.search(line).group())
)
# Sometimes commas are used to delimit bonds in the bond list,
# so replace them just in case
line = line.replace('},{', '} {')
data = line.split()
# Skip if blank line
if len(data) == 0: continue
# First item is index for atom
# Sometimes these have a trailing period (as if in a numbered list),
# so remove it just in case
aid = int(data[0].strip('.'))
# If second item starts with '*', then atom is labeled
label = ''; index = 1
if data[1][0] == '*':
label = data[1]
index += 1
# Next is the element or functional group element
# A list can be specified with the {,} syntax
atomType = data[index]
if atomType[0] == '{':
atomType = atomType[1:-1].split(',')
else:
atomType = [atomType]
index += 1
# Next is the electron state
radicalElectrons = []; atomSpinMultiplicity = []
elecState = data[index].upper()
if elecState[0] == '{':
elecState = elecState[1:-1].split(',')
else:
elecState = [elecState]
for e in elecState:
if e == '0':
radicalElectrons.append(0); atomSpinMultiplicity.append(1)
elif e == '1':
radicalElectrons.append(1); atomSpinMultiplicity.append(2)
elif e == '2':
radicalElectrons.append(2); atomSpinMultiplicity.append(1)
radicalElectrons.append(2); atomSpinMultiplicity.append(3)
elif e == '2S':
radicalElectrons.append(2); atomSpinMultiplicity.append(1)
elif e == '2T':
radicalElectrons.append(2); atomSpinMultiplicity.append(3)
elif e == '3':
radicalElectrons.append(3); atomSpinMultiplicity.append(4)
elif e == '3D':
radicalElectrons.append(3); atomSpinMultiplicity.append(2)
elif e == '3Q':
radicalElectrons.append(3); atomSpinMultiplicity.append(4)
elif e == '4':
radicalElectrons.append(4); atomSpinMultiplicity.append(5)
elif e == '4S':
radicalElectrons.append(4); atomSpinMultiplicity.append(1)
elif e == '4T':
radicalElectrons.append(4); atomSpinMultiplicity.append(3)
elif e == '4V':
radicalElectrons.append(4); atomSpinMultiplicity.append(5)
elif e == 'X':
radicalElectrons.extend([0,1,2,2])
atomSpinMultiplicity.extend([1,2,1,3])
index += 1
# Next number defines the number of lone electron pairs (if provided)
lonePairsOfElectrons = -1
if len(data) > index:
lpState = data[index]
if lpState[0] == '{':
# this is the start of the chemical bonds - no lone pair info was provided
lonePairsOfElectrons = -1
else:
if lpState == '0':
lonePairsOfElectrons = 0
if lpState == '1':
lonePairsOfElectrons = 1
if lpState == '2':
lonePairsOfElectrons = 2
if lpState == '3':
lonePairsOfElectrons = 3
if lpState == '4':
lonePairsOfElectrons = 4
index += 1
else: # no bonds or lone pair info provided.
lonePairsOfElectrons = -1
# Create a new atom based on the above information
if group:
# charge currently not allowed
atom = GroupAtom(atomType=atomType,
radicalElectrons=sorted(set(radicalElectrons)),
charge=[0],
label=label,
lonePairs=(None if lonePairsOfElectrons==-1 else [lonePairsOfElectrons])
)
else:
atom = Atom(element=atomType[0],
radicalElectrons=radicalElectrons[0],
charge=0,
label=label,
lonePairs=lonePairsOfElectrons
)
atomicMultiplicities[atom] = atomSpinMultiplicity
# Add the atom to the list
atoms.append(atom)
atomdict[aid] = atom
# Process list of bonds
bonds[aid] = {}
for datum in data[index:]:
# Sometimes commas are used to delimit bonds in the bond list,
# so strip them just in case
datum = datum.strip(',')
aid2, comma, order = datum[1:-1].partition(',')
aid2 = int(aid2)
if aid == aid2:
raise InvalidAdjacencyListError('Attempted to create a bond between atom {0:d} and itself.'.format(aid))
if order[0] == '{':
order = order[1:-1].split(',')
else:
order = [order]
bonds[aid][aid2] = order
if group:
multiplicity = None
else:
multiplicity = 1
for atom in atoms:
multiplicity += max(atomicMultiplicities[atom]) - 1
# Check consistency using bonddict
for atom1 in bonds:
for atom2 in bonds[atom1]:
if atom2 not in bonds:
raise InvalidAdjacencyListError('Atom {0:d} not in bond dictionary.'.format(atom2))
elif atom1 not in bonds[atom2]:
raise InvalidAdjacencyListError('Found bond between {0:d} and {1:d}, but not the reverse.'.format(atom1, atom2))
elif bonds[atom1][atom2] != bonds[atom2][atom1]:
raise InvalidAdjacencyListError('Found bonds between {0:d} and {1:d}, but of different orders "{2}" and "{3}".'.format(atom1, atom2, bonds[atom1][atom2], bonds[atom2][atom1]))
# Convert bonddict to use Atom[group] and Bond[group] objects
atomkeys = atomdict.keys()
atomkeys.sort()
for aid1 in atomkeys:
atomkeys2 = bonds[aid1].keys()
atomkeys2.sort()
for aid2 in atomkeys2:
if aid1 < aid2:
atom1 = atomdict[aid1]
atom2 = atomdict[aid2]
order = bonds[aid1][aid2]
if group:
bond = GroupBond(atom1, atom2, order)
elif len(order) == 1:
bond = Bond(atom1, atom2, order[0])
else:
raise InvalidAdjacencyListError('Multiple bond orders specified for an atom in a Molecule.')
atom1.edges[atom2] = bond
atom2.edges[atom1] = bond
if saturateH and not group:
# Add explicit hydrogen atoms to complete structure if desired
valences = {'H': 1, 'C': 4, 'O': 2, 'N': 3, 'S': 2, 'Si': 4, 'Cl': 1, 'He': 0, 'Ne': 0, 'Ar': 0}
orders = {'S': 1, 'D': 2, 'T': 3, 'B': 1.5}
newAtoms = []
for atom in atoms:
try:
valence = valences[atom.symbol]
except KeyError:
raise InvalidAdjacencyListError('Cannot add hydrogens to adjacency list: Unknown valence for atom "{0}".'.format(atom.symbol))
radical = atom.radicalElectrons
order = 0
for atom2, bond in atom.bonds.items():
order += orders[bond.order]
count = valence - radical - int(order)
for i in range(count):
a = Atom(element='H', radicalElectrons=0, charge=0, label='')
b = Bond(atom, a, 'S')
newAtoms.append(a)
atom.bonds[a] = b
a.bonds[atom] = b
atoms.extend(newAtoms)
# Calculate the number of lone pair electrons requiring molecule with all hydrogen atoms present
if not group and lonePairsOfElectrons == -1:
orders = {'S': 1, 'D': 2, 'T': 3, 'B': 1.5}
for atom in atoms:
radical = atom.radicalElectrons
order = 0
for atom2, bond in atom.bonds.items():
order += orders[bond.order]
lonePairs = (1 if atom.symbol == 'H' or atom.symbol == 'He' else 4) - order - radical
atom.setLonePairs(lonePairs)
atom.updateCharge()
elif not group:
for atom in atoms:
atom.updateCharge()
except InvalidAdjacencyListError:
logging.error("Troublesome adjacency list:\n" + adjlist)
raise
return atoms, multiplicity | 5,356,753 |
def read_arg_optional(
src, args, n_optional=-1, tolerance=0, mode=MODE_NON_MATH, skip_math=False):
"""Read next optional argument from buffer.
If the command has remaining optional arguments, look for:
a. A spacer. Skip the spacer if it exists.
b. A bracket delimiter. If the optional argument is bracket-delimited,
the contents of the bracket group are used as the argument.
:param Buffer src: a buffer of tokens
:param TexArgs args: existing arguments to extend
:param int n_optional: Number of optional arguments. If < 0, all valid
bracket groups will be captured.
:param int tolerance: error tolerance level (only supports 0 or 1)
:param str mode: math or not math mode
:return: number of remaining optional arguments
:rtype: int
"""
while n_optional != 0:
spacer = read_spacer(src)
if not (src.hasNext() and src.peek().category == TC.BracketBegin):
if spacer:
src.backward(1)
break
args.append(read_arg(src, next(src), tolerance=tolerance, mode=mode, skip_math=skip_math))
n_optional -= 1
return n_optional | 5,356,754 |
def searcheduxapian_ajax_get_schlagwort(request, item_container):
""" moegliche Schlagworte """
schlagworte = get_schlagworte(request.GET['query'])
res = '<items>\n'
for schlagwort in schlagworte:
res += '<schlagwort>\n<name><![CDATA[%s]]></name>\n</schlagwort>\n' % schlagwort.name
res += '</items>\n'
return HttpResponse(res, mimetype="text/xml; charset=utf-8") | 5,356,755 |
async def answer(pc, signaling):
"""
Connect to server and receive tracks by sending an answer after awaiting an offer
"""
@pc.on("track")
def on_track(track):
print("Receiving %s" % track.kind)
if track.kind == "video":
pc.addTrack(BallTransformTrack(track))
await signaling.connect()
while True:
obj = await signaling.receive()
if isinstance(obj, RTCSessionDescription):
await pc.setRemoteDescription(obj)
if obj.type == "offer":
# send answer
await pc.setLocalDescription(await pc.createAnswer())
await signaling.send(pc.localDescription)
elif isinstance(obj, RTCIceCandidate):
await pc.addIceCandidate(obj)
elif obj is BYE:
print("Exiting Program")
break | 5,356,756 |
def expanded_X_y_sample_weights(X, y_proba, expand_factor=10,
sample_weight=None, shuffle=True,
random_state=None):
"""
scikit-learn can't optimize cross-entropy directly if target
probability values are not indicator vectors.
As a workaround this function expands the dataset according to
target probabilities. ``expand_factor=None`` means no dataset
expansion.
"""
rng = check_random_state(random_state)
if expand_factor:
if sample_weight is not None:
X, y, sample_weight = zip(*expand_dataset(X, y_proba,
factor=expand_factor,
random_state=rng,
extra_arrays=[
sample_weight
]))
else:
X, y = zip(*expand_dataset(X, y_proba,
factor=expand_factor,
random_state=rng))
else:
y = y_proba.argmax(axis=1)
if isinstance(X, (list, tuple)) and len(X) and issparse(X[0]):
X = vstack(X)
if shuffle:
if sample_weight is not None:
X, y, sample_weight = _shuffle(X, y, sample_weight,
random_state=rng)
else:
X, y = _shuffle(X, y, random_state=rng)
return X, y, sample_weight | 5,356,757 |
def refine_uniformly(dom, seg):
"""
Refine all edges of the given domain and segmentation.
:param dom: Domain to refine
:type dom: :class:`viennagrid.Domain`
:param seg: Segmentation of the domain to refine
:type seg: :class:`viennagrid.Segmentation`
:returns: A two-element tuple containing the output domain and segmentation after the refinement.
:raises: TypeError
"""
try:
config = dom.config
dom = dom._domain
except AttributeError:
raise TypeError('parameter at position 1 is not a valid domain')
try:
seg = seg._segmentation
except AttributeError:
raise TypeError('parameter at position 2 is not a valid domain')
refined_result = viennagrid.wrapper.refine_uniformly(dom, seg)
refined_domain = viennagrid.Domain(config)
refined_domain._domain = refined_result[0]
refined_segmentation = viennagrid.Segmentation(refined_domain)
refined_segmentation._segmentation = refined_result[1]
return (refined_domain, refined_segmentation) | 5,356,758 |
def add_losses_to_graph(loss_fn, inputs, outputs, configuration, is_chief=False, verbose=0):
"""Add losses to graph collections.
Args:
loss_fn: Loss function. Should have signature f: (dict, dict, is_chief, **kwargs) -> tuple of losses and names
inputs: inputs dictionary
outputs: outputs dictionary
configuration: configuration dictionary
is_chief: Whether the current process is chief or not
verbose: Verbosity level
"""
losses = loss_fn(inputs, outputs, is_chief=is_chief, verbose=is_chief * verbose, **configuration)
for key, loss in losses:
if not key.endswith('_loss') and is_chief:
print('\033[31mWarning:\033[0m %s will be ignored. Losses name should end with "_loss"' % key)
tf.add_to_collection(key, loss) | 5,356,759 |
def rename_tuning(name, new_name):
"""rename tuning"""
session = tables.get_session()
if session is None:
return False, 'connect'
try:
tuning_table = TuningTable()
if not tuning_table.check_exist_by_name(TuningTable, name, session):
return False, 'tuning not exist'
if tuning_table.check_exist_by_name(TuningTable, new_name, session):
return False, 'duplicate'
tuning_table.update_tuning_name(name, new_name, session)
session.commit()
except SQLAlchemyError as err:
LOGGER.error('Rename tuning failed: %s', err)
return False, 'error'
finally:
session.close()
return True, '' | 5,356,760 |
async def get_event(token: str, event_id: str) -> dict:
"""Get event - return new if no event found."""
event = {"id": event_id, "name": "Nytt arrangement", "organiser": "Ikke valgt"}
if event_id != "":
logging.debug(f"get_event {event_id}")
event = await EventsAdapter().get_event(token, event_id)
return event | 5,356,761 |
def _xfsdump_output(data):
"""
Parse CLI output of the xfsdump utility.
"""
out = {}
summary = []
summary_block = False
for line in [l.strip() for l in data.split("\n") if l.strip()]:
line = re.sub("^xfsdump: ", "", line)
if line.startswith("session id:"):
out["Session ID"] = line.split(" ")[-1]
elif line.startswith("session label:"):
out["Session label"] = re.sub("^session label: ", "", line)
elif line.startswith("media file size"):
out["Media size"] = re.sub(r"^media file size\s+", "", line)
elif line.startswith("dump complete:"):
out["Dump complete"] = re.sub(r"^dump complete:\s+", "", line)
elif line.startswith("Dump Status:"):
out["Status"] = re.sub(r"^Dump Status:\s+", "", line)
elif line.startswith("Dump Summary:"):
summary_block = True
continue
if line.startswith(" ") and summary_block:
summary.append(line.strip())
elif not line.startswith(" ") and summary_block:
summary_block = False
if summary:
out["Summary"] = " ".join(summary)
return out | 5,356,762 |
def get_current():
"""Return the currently running interpreter."""
id = _interpreters.get_current()
return Interpreter(id) | 5,356,763 |
def opf():
"""One-piece-flow model
""" | 5,356,764 |
def csstext(text: str, cls: str, span: bool=False, header: bool=False) -> str:
"""
Custom build HTML text element.
"""
if span:
tag = 'span'
elif header:
tag = 'h1'
else:
tag = 'p'
return f'<{tag} class="{cls}">{str(text)}</{tag}>' | 5,356,765 |
def trans_exam_list_to_colum(example_list, headers=None):
"""
将example列表转换成以列表示的形式,用于适配输出附加信息
:param example_list: example 列表
:param headers: 需要的属性,默认为("question", "answer", "yes_or_no")
:return: {header1:[...],header2:[...],...}
"""
if headers is None:
headers = ("question", "answer", "yes_or_no")
result = {}
for header in headers:
result[header] = []
for example in example_list:
for header in headers:
result[header].append(getattr(example, header, ""))
return result | 5,356,766 |
def member():
""" RESTful CRUD Controller """
return s3_rest_controller() | 5,356,767 |
def _now():
"""Get EST localized now datetime."""
return EST_TIMEZONE.localize(datetime.datetime.now()) | 5,356,768 |
def pydantic_model_to_pandas(pydantic_model_input) -> pd.DataFrame:
"""
Function that transforms <pydantic.BaseModel> child objects to
<pandas.DataFrame> objects
:param pydantic_model_input: Input validator for API
"""
return dict_to_pandas(pydantic_model_input.dict()) | 5,356,769 |
def assign_student_to_project(student: dict, project: dict, score: int):
"""
Assigns a student to a project
"""
projects_table = Airtable(SMT_BASE_ID, PROJECTS_TABLE, api_key=os.environ["AIRTABLE_API_KEY"])
project_id = project["id"]
project_name = project["fields"]["Name"]
current_project_record = projects_table.get(project_id)
student_id = student["fields"]["What is your name?"][0]
student_name = student["fields"]["Student Name"][0]
team_members = []
if "Team Members" in current_project_record["fields"]:
team_members = current_project_record["fields"]["Team Members"]
if student_id not in team_members:
print(f"Adding {student_name} to team {project_name}")
team_members.append(student_id)
else:
print("Creating new team assigning {} to team {}".format(student_name, project_name))
team_members = [student_id]
print("Updating Airtable project record: {}".format(project_id))
projects_table.update(project_id, {"Team Members": team_members}) | 5,356,770 |
def main():
"""
User enter a number, and this program will compute Hailstone sequences.
Hailstone Sequences follow rules:
If a number is odd, multiply it by 3 and add 1.
If a number is even, divide it by 2
pre-condition: Waiting user to input a number.
post-condition: Show user how many the steps are took to reach 1.
"""
print('This program computes Hailstone sequences.')
data = int(input('Enter a number: '))
start = time.time()
n = data
steps = 0
# Check if the number is 1 or not.
if n == FINAL:
print('It took ' + str(steps) + ' steps to reach 1.')
# If the number is not 1, we have to start calculating until it reach 1.
else:
while True:
# Every time, check if the number is 1(stop) or not.
if n == FINAL:
break
# If a number is odd, multiply it by 3 and add 1.
if data % 2 == 1:
n = 3*n+1
print(str(data) + ' is odd, so I make 3n+1: ' + str(n))
# If a number is even, divide it by 2.
if data % 2 == 0:
n = n//2
print(str(data) + ' is even, so I take half: ' + str(n))
data = n
steps += 1
print('It took ' + str(steps) + ' steps to reach 1.')
end = time.time()
print("The time of execution of above program is :", end - start) | 5,356,771 |
def calculate_pair_energy(coordinates, i_particle, box_length, cutoff):
"""
Calculate the interaction energy of a particle with its environment (all other particles in the system) - rewrite
Parameters
----------
coordinates : list
The coordinates for all particles in the system
i_particle : int
The particle number for which to calculate the energy
cutoff : float
The simulation cutoff. Beyond this distance, interactions are not calculated.
Returns
-------
e_total : float
The pairwise interaction energy of he i_th particle with all other particles in the system.
"""
e_total = 0.0
i_position = coordinates[i_particle]
distance_array = calculate_distance(coordinates, i_position, box_length)
# Just so we don't use it for calculation
distance_array[i_particle] = cutoff*2
less_than_cutoff = distance_array[distance_array < cutoff]
interaction_energies = calculate_LJ(less_than_cutoff)
e_total = np.sum(interaction_energies)
return e_total | 5,356,772 |
def cloud_optimize_inPlace(in_file:str) -> None:
"""Takes path to input and output file location. Reads tif at input location and writes cloud-optimized geotiff of same data to output location."""
## add overviews to file
cloudOpArgs = ["gdaladdo",in_file,"-quiet"]
subprocess.call(cloudOpArgs)
## copy file
intermediate_file = in_file.replace(".tif",".TEMP.tif")
with open(intermediate_file,'wb') as a:
with open(in_file,'rb') as b:
shutil.copyfileobj(b,a)
## add tiling to file
cloudOpArgs = ["gdal_translate",intermediate_file,in_file,'-q','-co', "TILED=YES",'-co',"COPY_SRC_OVERVIEWS=YES",'-co', "COMPRESS=LZW", "-co", "PREDICTOR=2"]
if getMetadata(in_file)['product'] in NDVI_PRODUCTS:
cloudOpArgs.append("-co")
cloudOpArgs.append("BIGTIFF=YES")
subprocess.call(cloudOpArgs)
## remove intermediate
os.remove(intermediate_file) | 5,356,773 |
def debug_time_step(t, epidx, obs, act, extras, goal=None):
"""Save images and other stuff from time `t` in episode `epidx`."""
pth = 'tmp'
tt = str(t).zfill(2)
# Convert from BGR to RGB to match what we see in the GUI.
def save(fname, c_img):
cv2.imwrite(fname, img=cv2.cvtColor(c_img, cv2.COLOR_BGR2RGB))
# Save current color images from camera angles and the fused version.
for img_idx, c_img in enumerate(obs['color']):
fname = join(pth, f'ep_{epidx}_t{tt}_cimg_{img_idx}.png')
save(fname, c_img)
colormap_o, _ = get_heightmap(obs=obs)
fname = join(pth, f'ep_{epidx}_t{tt}_cimg_fused.png')
save(fname, colormap_o)
# (If applicable) save the goal color images.
if (goal is not None) and t == 1:
for img_idx, c_img in enumerate(goal['color']):
fname = join(pth, f'ep_{epidx}_t{tt}_cimg_{img_idx}_goal.png')
save(fname, c_img)
colormap_g, _ = get_heightmap(obs=goal)
fname = join(pth, f'ep_{epidx}_t{tt}_cimg_fused_goal.png')
save(fname, colormap_g)
# Print the action.
pose0 = act['params']['pose0']
pose1 = act['params']['pose1']
print(f" pose0, pose1: {U.round_pose(pose0)}, {U.round_pose(pose1)}")
# Attention. (Well, attn_input.png is also input to Transport...)
fname1 = join(pth, f'ep_{epidx}_t{tt}_attn_input.png')
fname2 = join(pth, f'ep_{epidx}_t{tt}_attn_heat_bgr.png')
cv2.imwrite(fname1, extras['input_c'])
cv2.imwrite(fname2, extras['attn_heat_bgr'])
# Transport
for idx, tran_heat in enumerate(extras['tran_heat_bgr']):
idxstr = str(idx).zfill(2)
fname = join(pth, f'ep_{epidx}_t{tt}_tran_rot_{idxstr}.png')
if idx == extras['tran_rot_argmax']:
fname = fname.replace('.png', '_rot_chosen.png')
cv2.imwrite(fname, tran_heat) | 5,356,774 |
def simulation_activation(model, parcel_df, aerosols_panel):
""" Given the DataFrame output from a parcel model simulation, compute
activation kinetic limitation diagnostics.
Parameters
----------
model : ParcelModel
The ParcelModel
parcel_df : DataFrame used to generate the results to be analyzed
The DataFrame containing the parcel's thermodynamic trajectory
aerosols_panel : Panel
A Panel collection of DataFrames containing the aerosol size evolution
Returns
-------
act_stats : DataFrame
A DataFrame containing the activation statistics
"""
initial_row = parcel_df.iloc[0]
Smax_i, T_i = initial_row['S'], initial_row['T']
acts = {'eq': [], 'kn': [], 'alpha': [], 'phi': []}
initial_aerosols = model.aerosols
N_all_modes = np.sum([aer.total_N for aer in initial_aerosols])
N_fracs = {aer.species: aer.total_N/N_all_modes for aer in initial_aerosols}
for i in range(len(parcel_df)):
row_par = parcel_df.iloc[i]
rows_aer = {key: aerosols_panel[key].iloc[i] for key in aerosols_panel}
# Update thermo
T_i = row_par['T']
if row_par['S'] > Smax_i:
Smax_i = row_par['S']
eq_tot, kn_tot, alpha_tot, phi_tot = 0., 0., 0., 0.
for aerosol in initial_aerosols:
N_frac = N_fracs[aerosol.species]
rs = rows_aer[aerosol.species]
eq, kn, alpha, phi = binned_activation(Smax_i, T_i, rs, aerosol)
eq_tot += eq*N_frac
kn_tot += kn*N_frac
alpha_tot += alpha*N_frac
phi_tot += phi*N_frac
acts['kn'].append(kn_tot)
acts['eq'].append(eq_tot)
acts['alpha'].append(alpha_tot)
acts['phi'].append(phi_tot)
acts_total = pd.DataFrame(acts, index=parcel_df.index)
return acts_total | 5,356,775 |
def run():
"""
Convolutional NN Text
Activation function: relu
Optimizer: AdamOptimizer
:return:
"""
# ----- Data -------
percent_test = 0.1
print("Loading data...")
positive_data_file = "data/rt-polaritydata/rt-polarity.pos"
negative_data_file = "data/rt-polaritydata/rt-polarity.neg"
data, target = data_helpers.load_data_and_labels(positive_data_file, negative_data_file)
max_document_length = max([len(x.split(" ")) for x in data])
vocab_processor = tf.contrib.learn.preprocessing.VocabularyProcessor(max_document_length)
x = np.array(list(vocab_processor.fit_transform(data)))
# Randomly shuffle data
np.random.seed(10)
shuffle_indices = np.random.permutation(np.arange(len(target)))
x_shuffled = x[shuffle_indices]
y_shuffled = target[shuffle_indices]
# Split train/test set
# TODO: This is very crude, should use `1cross-validation
dev_sample_index = -1 * int(percent_test * float(len(target)))
train_data, test_data = x_shuffled[:dev_sample_index], x_shuffled[dev_sample_index:]
train_target, test_target = y_shuffled[:dev_sample_index], y_shuffled[dev_sample_index:]
train_batch = build_batch(list(zip(train_data, train_target)))
# ------ Constants -------
# Data
test_freq = 20
# Learning Rate Values
lrmax = 0.003
lrmin = 0.00001
decay_speed = 2000.0
# Drop-off (Less for text)
keep_ratio = 0.5
# Text Values
sequence_length = train_data.shape[1]
output_size = train_target.shape[1]
# Todo Dimensionality of character embedding
embedding_dim = 128
vocab_size = len(vocab_processor.vocabulary_)
print("Vocabulary Size: {:d}".format(vocab_size))
print("Train/Dev split: {:d}/{:d}".format(len(train_target), len(test_target)))
# Layers (Single Dimension for text)
filters = [
5,
4,
3,
]
# channels = [1, 4, 8, 12]
# channels = [1, 6, 12, 24]
num_filters = 128
# Always `1` for text
# strides = [1, 2, 2]
stride = 1
stride_shape = [1, stride, stride, 1]
# Tensor Board Log
logs_path = "tensor_log/%s/" % splitext(basename(__file__))[0]
fully_connecting_nodes = num_filters * len(filters)
# Target classifications for nodes
output_nodes = 2
# Place holders
X = tf.placeholder(tf.int32, [None, sequence_length], name="Input_PH")
Y_ = tf.placeholder(tf.float32, [None, output_size], name="Output_PH")
L = tf.placeholder(tf.float32, name="Learning_Rate_PH")
keep_prob = tf.placeholder(tf.float32, name="Per_Keep_PH")
# Initialize Activation
with tf.device('/cpu:0'), tf.name_scope("embedding"):
embedding = tf.Variable(
tf.random_uniform([vocab_size, embedding_dim], -1.0, 1.0),
name="Text_Embedding"
)
embedded_chars = tf.nn.embedding_lookup(embedding, X)
embedded_chars_expanded = tf.expand_dims(embedded_chars, -1)
# ----- Weights and Bias -----
weights = []
biases = []
for i in range(len(filters)):
with tf.name_scope('Layer'):
# weight_shape = [filters[i], embedding_dim] + channels[i:i+2]
weight_shape = [filters[i], embedding_dim, 1, num_filters]
weights.append(weight_variable(weight_shape))
biases.append(bias_variable(weight_shape[-1:]))
with tf.name_scope('Layer'):
WOutput = weight_variable([fully_connecting_nodes, output_nodes])
BOutput = bias_variable([output_nodes])
# ---------------- Operations ----------------
# ------- Activation Function -------
"""
This method creates 3 separate layers with different filter sizes that get concatenated.
Other networks have taking a layer results and fed them into the next layer.
"""
pooled_outputs = []
for i in range(len(filters)):
with tf.name_scope('Wx_plus_b'):
# Todo Same input for each layer?
preactivate = tf.nn.conv2d(
embedded_chars_expanded,
weights[i],
strides=stride_shape,
padding="VALID",
name="conv"
)
tf.summary.histogram('Pre_Activations', preactivate)
# Apply nonlinearity
activations = tf.nn.relu(tf.nn.bias_add(preactivate, biases[i]), name="relu")
tf.summary.histogram('Activations', activations)
# Todo same stride shape for conv2d and max_pool (stride_shape and stride_pool_shape)
# Valid Padding dimension size: (input_size - filter_size + 1) / stride
next_dim = sequence_length - filters[i] + 1
# Ksize reduces the conv dimensions by conv2d[0] - pool_shape[0] +1
# with strides: (conv2d[0] - pool_shape[0] +1) / stride[0]
# Example: conv2d = [1, 8, 8, 2]
# pool_shape = [1, 8, 1, 1]
# stride_pool_shape = [1, 1, 4, 1]
# Result: [1, 1, 2, 2]
pool_shape = [1, next_dim, 1, 1]
stride_pool_shape = [1, 1, 1, 1]
pooled = tf.nn.max_pool(
activations,
ksize=pool_shape,
strides=stride_pool_shape,
padding='VALID',
name="pool"
)
# Todo Output is not cycled through next layer
pooled_outputs.append(pooled)
# Combine all the pooled features
pool_results = tf.concat(pooled_outputs, 3)
pool_flat = tf.reshape(pool_results, [-1, fully_connecting_nodes])
fully_connected_dropout = tf.nn.dropout(pool_flat, keep_prob)
# ------- Regression Functions -------
with tf.name_scope('Wx_plus_b'):
logits = tf.nn.xw_plus_b(fully_connected_dropout, WOutput, BOutput, name="Product")
tf.summary.histogram('Pre_Activations', logits)
predictions = tf.nn.softmax(logits, name="Output_Result")
# ------- Loss Function -------
with tf.name_scope('Loss'):
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(
logits=logits, labels=Y_, name="Cross_Entropy")
with tf.name_scope('Total'):
loss = tf.reduce_mean(cross_entropy, name="loss") * 100
tf.summary.scalar('Losses', loss)
# ------- Optimizer -------
with tf.name_scope('Optimizer'):
optimizer = tf.train.AdamOptimizer(L)
train_step = optimizer.minimize(loss, name="minimize")
# ------- Accuracy -------
with tf.name_scope('Accuracy'):
with tf.name_scope('correct_prediction'):
is_correct = tf.equal(
tf.argmax(predictions, 1, name="Max_Result"),
tf.argmax(Y_, 1, name="Target")
)
with tf.name_scope('accuracy'):
accuracy = tf.reduce_mean(tf.cast(is_correct, tf.float32))
tf.summary.scalar('Accuracies', accuracy)
# ------- Tensor Graph -------
# Start Tensor Graph
init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)
# Tensor Board
merged_summary_op = tf.summary.merge_all()
tensor_graph = tf.get_default_graph()
train_writer = tf.summary.FileWriter(logs_path + "train", graph=tensor_graph)
test_writer = tf.summary.FileWriter(logs_path + "test")
# ------- Training -------
train_operations = [train_step, loss, merged_summary_op]
test_operations = [accuracy, loss, merged_summary_op]
test_data = {X: test_data, Y_: test_target, keep_prob: 1.0, L: 0}
avg_cost = 0.
for step, batch in enumerate(train_batch):
# ----- Train step -----
batch_X, batch_Y = zip(*batch)
learning_rate = lrmin + (lrmax - lrmin) * exp(-step / decay_speed)
train_data = {
X: batch_X,
Y_: batch_Y,
L: learning_rate,
keep_prob: keep_ratio
}
# Record execution stats
if step % 100 == 99:
run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
run_metadata = tf.RunMetadata()
_, cross_loss, summary = sess.run(
train_operations,
feed_dict=train_data,
options=run_options,
run_metadata=run_metadata
)
else:
_, cross_loss, summary = sess.run(
train_operations,
feed_dict=train_data
)
# ----- Test Step -----
if step % test_freq == 0:
acc, cross_loss, summary = sess.run(
test_operations,
feed_dict=test_data
)
test_writer.add_summary(summary, step)
print('Accuracy at step %s: %s' % (step, acc))
#
# avg_cost += cross_loss / batch_total
# train_writer.add_summary(summary, step)
#
# # Display logs per epoch step
# print("Epoch:", '%04d' % (epoch + 1), "cost=", "{:.9f}".format(avg_cost)) | 5,356,776 |
def create(options, args):
"""
Instantiate and return a Blueprint object from either standard input or by
reverse-engineering the system.
"""
try:
with context_managers.mkdtemp():
if not os.isatty(sys.stdin.fileno()):
try:
b = blueprint.Blueprint.load(sys.stdin, args[0])
except ValueError:
logging.error(
'standard input contains invalid blueprint JSON')
sys.exit(1)
else:
b = blueprint.Blueprint.create(args[0])
if options.subtrahend:
logging.info('subtracting {0}'.format(options.subtrahend))
b_s = blueprint.Blueprint.checkout(options.subtrahend)
b = b - b_s
b.commit(options.message or '')
return b
except blueprint.NameError:
logging.error('invalid blueprint name')
sys.exit(1) | 5,356,777 |
def test_excel_with_empty_columns(sdc_builder, sdc_executor):
"""Test if some records had empty value for a column, it don't ignore the column and keep the same schema and
write it in avro file. Test empty values in the first, medium and last column and get it as a null.
directory >> schema_generator >> local_fs
"""
pipeline_builder = sdc_builder.get_pipeline_builder()
files_directory = os.path.join('/tmp', get_random_string())
directory_out = os.path.join('/tmp', get_random_string())
file_name = f'{get_random_string()}.xls'
file_path = os.path.join(files_directory, file_name)
schema_name = 'test_schema'
num_records = 4
try:
sdc_executor.execute_shell(f'mkdir {files_directory}')
file_writer(sdc_executor, file_path, generate_excel_file().getvalue())
directory = pipeline_builder.add_stage('Directory')
directory.set_attributes(excel_header_option='WITH_HEADER',
data_format='EXCEL',
files_directory=files_directory,
file_name_pattern='*.xls')
schema_generator = pipeline_builder.add_stage('Schema Generator')
schema_generator.set_attributes(schema_name=schema_name,
namespace=schema_name,
nullable_fields=True)
local_fs = pipeline_builder.add_stage('Local FS', type='destination')
local_fs.set_attributes(data_format='AVRO', avro_schema_location='HEADER', directory_template=directory_out)
directory >> schema_generator >> local_fs
pipeline_write = pipeline_builder.build('Read Excel files')
sdc_executor.add_pipeline(pipeline_write)
sdc_executor.start_pipeline(pipeline_write).wait_for_pipeline_output_records_count(num_records)
sdc_executor.stop_pipeline(pipeline_write)
pipeline_builder = sdc_builder.get_pipeline_builder()
origin = pipeline_builder.add_stage('Directory')
origin.set_attributes(data_format='AVRO',
files_directory=directory_out,
file_name_pattern='*')
wiretap = pipeline_builder.add_wiretap()
origin >> wiretap.destination
pipeline_read = pipeline_builder.build('Wiretap Pipeline')
sdc_executor.add_pipeline(pipeline_read)
sdc_executor.start_pipeline(pipeline_read).wait_for_pipeline_output_records_count(num_records)
sdc_executor.stop_pipeline(pipeline_read)
assert len(wiretap.output_records) == num_records
for x in range(num_records):
name_field = 'column' + str(x)
assert wiretap.output_records[x].field[name_field].value == ''
# We check if it works for 2 empty columns at end of the row
if x == 3:
name_field = 'column' + str(x-1)
assert wiretap.output_records[x].field[name_field].value == ''
finally:
logger.info('Delete directory in %s and %s...', files_directory, directory_out)
sdc_executor.execute_shell(f'rm -r {files_directory}')
sdc_executor.execute_shell(f'rm -r {directory_out}') | 5,356,778 |
def create_config(
case=None, Exp='Dummy', Type='Tor',
Lim=None, Bump_posextent=[np.pi/4., np.pi/4],
R=None, r=None, elong=None, Dshape=None,
divlow=None, divup=None, nP=None,
returnas=None, strict=None,
SavePath='./', path=_path_testcases,
):
""" Create easily a tofu.geom.Config object
In tofu, a Config (short for geometrical configuration) refers to the 3D
geometry of a fusion device.
It includes, at least, a simple 2D polygon describing the first wall of the
fusion chamber, and can also include other structural elements (tiles,
limiters...) that can be non-axisymmetric.
To create a simple Config, provide either the name of a reference test
case, of a set of geometrical parameters (major radius, elongation...).
This is just a tool for fast testing, if you want to create a custom
config, use directly tofu.geom.Config and provide the parameters you want.
Parameters
----------
case : str
The name of a reference test case, if provided, this arguments is
sufficient, the others are ignored
Exp : str
The name of the experiment
Type : str
The type of configuration (toroidal 'Tor' or linear 'Lin')
Lim_Bump: list
The angular (poloidal) limits, in the cross-section of the extension of
the outer bumper
R : float
The major radius of the center of the cross-section
r : float
The minor radius of the cross-section
elong: float
An elongation parameter (in [-1;1])
Dshape: float
A parameter specifying the D-shape of the cross-section (in [-1;1])
divlow: bool
A flag specifying whether to include a lower divertor-like shape
divup: bool
A flag specifying whether to include an upper divertor-like shape
nP: int
Number of points used to describe the cross-section polygon
out: str
FLag indicating whether to return:
- 'dict' : the polygons as a dictionary of np.ndarrays
- 'object': the configuration as a tofu.geom.Config instance
returnas: object / dict
Flag indicating whether to return the config as:
- object: a Config instance
- dict: a dict of Struct instances
strict: bool
Flag indicating whether to raise an error if a Struct cannot be loaded
Otherwise only raises a warning
path: str
Absolute path where to find the test case data
SavePath: str
The default path used for saving Struct and Config objects returned by
the routine.
Return
------
conf: tofu.geom.Config / dict
Depending on the value of parameter out, either:
- the tofu.geom.Config object created
- a dictionary of the polygons and their pos/extent (if any)
"""
lp = [R, r, elong, Dshape, divlow, divup, nP]
lpstr = '[R, r, elong, Dshape, divlow, divup, nP]'
lc = [case is not None,
any([pp is not None for pp in lp])]
if np.sum(lc) > 1:
msg = ("Please provide either:\n"
+ "\t- case: the name of a pre-defined config\n"
+ "\t- geometrical parameters {}\n\n".format(lpstr))
raise Exception(msg)
elif not any(lc):
msg = get_available_config(verb=False, returnas=str)
raise Exception(msg)
# Get config, either from known case or geometrical parameterization
if case is not None:
conf = _create_config_testcase(
config=case,
path=path,
returnas=returnas,
strict=strict,
)
else:
poly, pbump, pbaffle = _compute_VesPoly(R=R, r=r,
elong=elong, Dshape=Dshape,
divlow=divlow, divup=divup,
nP=nP)
if returnas == 'dict':
conf = {'Ves':{'Poly':poly},
'Baffle':{'Poly':pbaffle},
'Bumper':{'Poly':pbump,
'pos':Bump_posextent[0],
'extent':Bump_posextent[1]}}
else:
ves = _core.Ves(Poly=poly, Type=Type, Lim=Lim, Exp=Exp, Name='Ves',
SavePath=SavePath)
baf = _core.PFC(Poly=pbaffle, Type=Type, Lim=Lim,
Exp=Exp, Name='Baffle', color='b', SavePath=SavePath)
bump = _core.PFC(Poly=pbump, Type=Type,
pos=Bump_posextent[0], extent=Bump_posextent[1],
Exp=Exp, Name='Bumper', color='g', SavePath=SavePath)
conf = _core.Config(Name='Dummy', Exp=Exp, lStruct=[ves,baf,bump],
SavePath=SavePath)
return conf | 5,356,779 |
def is_project_description(description):
"""Validates the specified project description.
A valid description is simply a non-empty string.
Args:
description (str): A project description to validate.
Returns:
<bool, str|None>: A pair containing the value True if the specified description
is valid, False otherwise; and an error message in case the description is invalid.
"""
try:
return (False, "A project description must be a non-empty string.") if is_empty_string(description) else (True, None)
except TypeError:
return (False, "The 'description' argument must be a string.") | 5,356,780 |
def update_pretrained_cfg_and_kwargs(pretrained_cfg, kwargs, kwargs_filter):
""" Update the default_cfg and kwargs before passing to model
Args:
pretrained_cfg: input pretrained cfg (updated in-place)
kwargs: keyword args passed to model build fn (updated in-place)
kwargs_filter: keyword arg keys that must be removed before model __init__
"""
# Set model __init__ args that can be determined by default_cfg (if not already passed as kwargs)
default_kwarg_names = ('num_classes', 'global_pool', 'in_chans')
if pretrained_cfg.get('fixed_input_size', False):
# if fixed_input_size exists and is True, model takes an img_size arg that fixes its input size
default_kwarg_names += ('img_size',)
set_default_kwargs(kwargs, names=default_kwarg_names, pretrained_cfg=pretrained_cfg)
# Filter keyword args for task specific model variants (some 'features only' models, etc.)
filter_kwargs(kwargs, names=kwargs_filter) | 5,356,781 |
def _float_incr(key, incr):
"""
Increments a redis (float) key value by a given float.
The float may be negaitve to achieve decrements.
"""
"""
Currently, this is a very bad implementation, as we are unable to
get a value inside the atomic operation.
"""
value = actor.get(key)
with actor.pipeline(transaction = True) as pipe:
try:
fvalue = float(value)
except TypeError:
"it was empty, thus None"
fvalue = 0.0
fvalue += incr
pipe.set(key, fvalue)
pipe.execute() | 5,356,782 |
def remove_friend():
"""
Accepts an existing friend request.
"""
data = json.loads(request.data)
friend_id = data['id']
user = interface.get_user_by_id(get_jwt_identity())
friend = interface.get_user_by_id(friend_id)
interface.remove_friendship(user, friend)
return '', 200 | 5,356,783 |
def color_col_labels(month, ax):
"""Color the column labels for the given month image."""
for col, cell in enumerate(month.header_row):
if month.col_labels[col]:
top_left, width, height = cell.get_patch()
ax.add_patch(patches.Rectangle(
top_left,
width,
height,
alpha=0.5,
facecolor='#feb209')) | 5,356,784 |
def ensure_configured(func):
"""Modify a function to call ``basicConfig`` first if no handlers exist."""
@functools.wraps(func)
def wrapper(*args, **kwargs):
if len(logging.root.handlers) == 0:
basicConfig()
return func(*args, **kwargs)
return wrapper | 5,356,785 |
def describe_asset_model(assetModelId=None):
"""
Retrieves information about an asset model.
See also: AWS API Documentation
Exceptions
:example: response = client.describe_asset_model(
assetModelId='string'
)
:type assetModelId: string
:param assetModelId: [REQUIRED]\nThe ID of the asset model.\n
:rtype: dict
ReturnsResponse Syntax{
'assetModelId': 'string',
'assetModelArn': 'string',
'assetModelName': 'string',
'assetModelDescription': 'string',
'assetModelProperties': [
{
'id': 'string',
'name': 'string',
'dataType': 'STRING'|'INTEGER'|'DOUBLE'|'BOOLEAN',
'unit': 'string',
'type': {
'attribute': {
'defaultValue': 'string'
},
'measurement': {},
'transform': {
'expression': 'string',
'variables': [
{
'name': 'string',
'value': {
'propertyId': 'string',
'hierarchyId': 'string'
}
},
]
},
'metric': {
'expression': 'string',
'variables': [
{
'name': 'string',
'value': {
'propertyId': 'string',
'hierarchyId': 'string'
}
},
],
'window': {
'tumbling': {
'interval': 'string'
}
}
}
}
},
],
'assetModelHierarchies': [
{
'id': 'string',
'name': 'string',
'childAssetModelId': 'string'
},
],
'assetModelCreationDate': datetime(2015, 1, 1),
'assetModelLastUpdateDate': datetime(2015, 1, 1),
'assetModelStatus': {
'state': 'CREATING'|'ACTIVE'|'UPDATING'|'PROPAGATING'|'DELETING'|'FAILED',
'error': {
'code': 'VALIDATION_ERROR'|'INTERNAL_FAILURE',
'message': 'string'
}
}
}
Response Structure
(dict) --
assetModelId (string) --The ID of the asset model.
assetModelArn (string) --The ARN of the asset model, which has the following format.
arn:${Partition}:iotsitewise:${Region}:${Account}:asset-model/${AssetModelId}
assetModelName (string) --The name of the asset model.
assetModelDescription (string) --The asset model\'s description.
assetModelProperties (list) --The list of asset properties for the asset model.
(dict) --Contains information about an asset model property.
id (string) --The ID of the asset model property.
name (string) --The name of the asset model property.
dataType (string) --The data type of the asset model property.
unit (string) --The unit of the asset model property, such as Newtons or RPM .
type (dict) --The property type (see PropertyType ).
attribute (dict) --Specifies an asset attribute property. An attribute generally contains static information, such as the serial number of an IIoT wind turbine.
defaultValue (string) --The default value of the asset model property attribute. All assets that you create from the asset model contain this attribute value. You can update an attribute\'s value after you create an asset. For more information, see Updating Attribute Values in the AWS IoT SiteWise User Guide .
measurement (dict) --Specifies an asset measurement property. A measurement represents a device\'s raw sensor data stream, such as timestamped temperature values or timestamped power values.
transform (dict) --Specifies an asset transform property. A transform contains a mathematical expression that maps a property\'s data points from one form to another, such as a unit conversion from Celsius to Fahrenheit.
expression (string) --The mathematical expression that defines the transformation function. You can specify up to 10 variables per expression. You can specify up to 10 functions per expression.
For more information, see Quotas in the AWS IoT SiteWise User Guide .
variables (list) --The list of variables used in the expression.
(dict) --Contains expression variable information.
name (string) --The friendly name of the variable to be used in the expression.
value (dict) --The variable that identifies an asset property from which to use values.
propertyId (string) --The ID of the property to use as the variable. You can use the property name if it\'s from the same asset model.
hierarchyId (string) --The ID of the hierarchy to query for the property ID. You can use the hierarchy\'s name instead of the hierarchy\'s ID.
You use a hierarchy ID instead of a model ID because you can have several hierarchies using the same model and therefore the same propertyId . For example, you might have separately grouped assets that come from the same asset model. For more information, see Asset Hierarchies in the AWS IoT SiteWise User Guide .
metric (dict) --Specifies an asset metric property. A metric contains a mathematical expression that uses aggregate functions to process all input data points over a time interval and output a single data point, such as to calculate the average hourly temperature.
expression (string) --The mathematical expression that defines the metric aggregation function. You can specify up to 10 variables per expression. You can specify up to 10 functions per expression.
For more information, see Quotas in the AWS IoT SiteWise User Guide .
variables (list) --The list of variables used in the expression.
(dict) --Contains expression variable information.
name (string) --The friendly name of the variable to be used in the expression.
value (dict) --The variable that identifies an asset property from which to use values.
propertyId (string) --The ID of the property to use as the variable. You can use the property name if it\'s from the same asset model.
hierarchyId (string) --The ID of the hierarchy to query for the property ID. You can use the hierarchy\'s name instead of the hierarchy\'s ID.
You use a hierarchy ID instead of a model ID because you can have several hierarchies using the same model and therefore the same propertyId . For example, you might have separately grouped assets that come from the same asset model. For more information, see Asset Hierarchies in the AWS IoT SiteWise User Guide .
window (dict) --The window (time interval) over which AWS IoT SiteWise computes the metric\'s aggregation expression. AWS IoT SiteWise computes one data point per window .
tumbling (dict) --The tumbling time interval window.
interval (string) --The time interval for the tumbling window. Note that w represents weeks, d represents days, h represents hours, and m represents minutes. AWS IoT SiteWise computes the 1w interval the end of Sunday at midnight each week (UTC), the 1d interval at the end of each day at midnight (UTC), the 1h interval at the end of each hour, and so on.
When AWS IoT SiteWise aggregates data points for metric computations, the start of each interval is exclusive and the end of each interval is inclusive. AWS IoT SiteWise places the computed data point at the end of the interval.
assetModelHierarchies (list) --A list of asset model hierarchies that each contain a childAssetModelId and a hierarchyId (named id ). A hierarchy specifies allowed parent/child asset relationships for an asset model.
(dict) --Describes an asset hierarchy that contains a hierarchy\'s name, ID, and child asset model ID that specifies the type of asset that can be in this hierarchy.
id (string) --The ID of the asset model hierarchy. This ID is a hierarchyId .
name (string) --The name of the asset model hierarchy that you specify by using the CreateAssetModel or UpdateAssetModel API.
childAssetModelId (string) --The ID of the asset model. All assets in this hierarchy must be instances of the childAssetModelId asset model.
assetModelCreationDate (datetime) --The date the asset model was created, in Unix epoch time.
assetModelLastUpdateDate (datetime) --The date the asset model was last updated, in Unix epoch time.
assetModelStatus (dict) --The current status of the asset model, which contains a state and any error message.
state (string) --The current state of the asset model.
error (dict) --Contains associated error information, if any.
code (string) --The error code.
message (string) --The error message.
Exceptions
IoTSiteWise.Client.exceptions.InvalidRequestException
IoTSiteWise.Client.exceptions.ResourceNotFoundException
IoTSiteWise.Client.exceptions.InternalFailureException
IoTSiteWise.Client.exceptions.ThrottlingException
:return: {
'assetModelId': 'string',
'assetModelArn': 'string',
'assetModelName': 'string',
'assetModelDescription': 'string',
'assetModelProperties': [
{
'id': 'string',
'name': 'string',
'dataType': 'STRING'|'INTEGER'|'DOUBLE'|'BOOLEAN',
'unit': 'string',
'type': {
'attribute': {
'defaultValue': 'string'
},
'measurement': {},
'transform': {
'expression': 'string',
'variables': [
{
'name': 'string',
'value': {
'propertyId': 'string',
'hierarchyId': 'string'
}
},
]
},
'metric': {
'expression': 'string',
'variables': [
{
'name': 'string',
'value': {
'propertyId': 'string',
'hierarchyId': 'string'
}
},
],
'window': {
'tumbling': {
'interval': 'string'
}
}
}
}
},
],
'assetModelHierarchies': [
{
'id': 'string',
'name': 'string',
'childAssetModelId': 'string'
},
],
'assetModelCreationDate': datetime(2015, 1, 1),
'assetModelLastUpdateDate': datetime(2015, 1, 1),
'assetModelStatus': {
'state': 'CREATING'|'ACTIVE'|'UPDATING'|'PROPAGATING'|'DELETING'|'FAILED',
'error': {
'code': 'VALIDATION_ERROR'|'INTERNAL_FAILURE',
'message': 'string'
}
}
}
"""
pass | 5,356,786 |
def get_loss(dataset_properties: Dict[str, Any], name: Optional[str] = None) -> Type[Loss]:
"""
Utility function to get losses for the given dataset properties.
If name is mentioned, checks if the loss is compatible with
the dataset properties and returns the specific loss
Args:
dataset_properties (Dict[str, Any]): Dictionary containing
properties of the dataset. Must contain task_type and
output_type as strings.
name (Optional[str]): name of the specific loss
Returns:
Type[torch.nn.modules.loss._Loss]
"""
assert 'task_type' in dataset_properties, \
"Expected dataset_properties to have task_type got {}".format(dataset_properties.keys())
assert 'output_type' in dataset_properties, \
"Expected dataset_properties to have output_type got {}".format(dataset_properties.keys())
task = STRING_TO_TASK_TYPES[dataset_properties['task_type']]
output_type = STRING_TO_OUTPUT_TYPES[dataset_properties['output_type']]
supported_losses = get_supported_losses(task, output_type)
if name is not None:
if name not in supported_losses.keys():
raise ValueError("Invalid name entered for task {}, and output type {} currently supported losses"
" for task include {}".format(dataset_properties['task_type'],
dataset_properties['output_type'],
list(supported_losses.keys())))
else:
loss = supported_losses[name]
else:
loss = get_default(task)
return loss | 5,356,787 |
def ten_to_base(value : int, base):
"""Converts a given decimal value into the specified base.
:param value: The number to convert
:param base: The base to convert the specified number to
:return: The converted value in the specified base
"""
# Check if the base is 10, return the value
if base == 10:
return value
# Keep track of the remainders, which will be the new digits in the specified base
remainders = []
# Divide the value by the base until the number is 0
while value != 0:
remainders.append(value % base)
value //= base
# Reverse the order of the remainders and turn each digit
# into the proper value from the BASES string
remainders.reverse()
for i in range(len(remainders)):
remainders[i] = BASES[remainders[i]]
return "".join(remainders) | 5,356,788 |
def c_exit(exitcode=0):
"""Induces a libc exit with exitcode 0"""
libc.exit(exitcode) | 5,356,789 |
def freeze_all(policies_per_player):
"""Freezes all policies within policy_per_player.
Args:
policies_per_player: List of list of number of policies.
"""
for policies in policies_per_player:
for pol in policies:
pol.freeze() | 5,356,790 |
def download_acs_data(
url: str,
download_path: Union[str, Path] = "../data/raw/",
extract: bool = True,
extract_path: Union[str, Path] = "../data/interim/",
) -> None:
"""
Downloads ACS 1-, 3-, or 5- estimates from a US Census Bureau's FTP-server URL.
"""
# Checks download_path and extract_path exists
_check_data_folder(
path=download_path, extract_path=extract_path if extract else None
)
# Downloads Data
BASE_URL = "https://www2.census.gov/programs-surveys/acs/data/pums/"
if url[:55] != BASE_URL:
raise ValueError(
"Census FPT-server url's start with 'https://www2.census.gov/programs-surveys/acs/data/pums/'"
)
state = url.split("/")[-1].split(".")[0][-2:]
r = requests.get(url, stream=True)
# content-lenght was dropped from their headers so try or use default 40 mb
total_size = int(r.headers.get("content-length", 40000000))
### Checks
download_path = Path(download_path)
extract_path = Path(extract_path)
if download_path.is_file():
raise ValueError(
"You provided a path to a file. You need to provide a path to a directory."
)
# if not download_path.is_dir():
# raise ValueError("You need to provide a path to a directory.")
if not download_path.exists():
download_path.mkdir()
### downloads data
filename = url.split("/")[-1]
with open(download_path / filename, "wb") as f:
print(f"Downloading at {download_path / filename}.")
chunk_size = 1024
for data in tqdm(
iterable=r.iter_content(chunk_size=chunk_size),
total=total_size / chunk_size,
unit="KB",
):
f.write(data)
print("Download complete!")
## Extract file
if extract:
year = url.split("/")[7]
extract_folder = f"ACS_{year}"
final_extraction_folder = extract_path / extract_folder.upper() / state
if extract_path.is_file():
raise ValueError(
"You provided a path to a file. You need to provide a path to a directory."
)
# if not extract_path.is_dir():
# raise ValueError("You need to provide a path to a directory.")
if not extract_path.exists():
extract_path.mkdir()
# remove dir if it exists
if final_extraction_folder.exists():
for item in final_extraction_folder.glob("*"):
item.unlink()
final_extraction_folder.rmdir()
# create dir
if not Path(extract_path / extract_folder.upper()).exists():
Path(extract_path / extract_folder.upper()).mkdir()
final_extraction_folder.mkdir()
# extracts data
content_file = ZipFile(download_path / filename)
## for progress bar
file_size = 0
for file_info in content_file.infolist():
file_size += int(file_info.file_size)
extract_folder_size = sum(
item.stat().st_size for item in final_extraction_folder.iterdir()
)
expected_final_size = extract_folder_size + file_size
## Start extraction:
print(f"Extracting to {final_extraction_folder}")
content_file.extractall(final_extraction_folder)
while extract_folder_size < expected_final_size:
extract_folder_size = sum(
item.stat().st_size
for item in final_extraction_folder.iterdir()
)
print(
f"Extracting files to {final_extraction_folder}: {(extract_folder_size / file_size) :.2%}",
end="\r",
)
time.sleep(0.5)
break
print(f"Files extracted successfully at {final_extraction_folder}") | 5,356,791 |
def get_style(selector, name):
"""
Returns the resolved CSS style for the given property name.
:param selector:
:param name:
"""
if not get_instance():
raise Exception("You need to start a browser first with open_browser()")
return get_style_g(get_instance(), selector, name) | 5,356,792 |
def argmod(*args):
"""
Decorator that intercepts and modifies function arguments.
Args:
from_param (str|list): A parameter or list of possible parameters that
should be modified using `modifier_func`. Passing a list of
possible parameters is useful when a function's parameter names
have changed, but you still want to support the old parameter
names.
to_param (str): Optional. If given, to_param will be used as the
parameter name for the modified argument. If not given, to_param
will default to the last parameter given in `from_param`.
modifier_func (callable): The function used to modify the `from_param`.
Returns:
function: A function that modifies the given `from_param` before the
function is called.
"""
from_param = listify(args[0])
to_param = from_param[-1] if len(args) < 3 else args[1]
modifier_func = args[-1]
def _decorator(func):
try:
argspec = inspect.getfullargspec(unwrap(func))
except AttributeError:
argspec = inspect.getargspec(unwrap(func))
if to_param not in argspec.args:
return func
arg_index = argspec.args.index(to_param)
@wraps(func)
def _modifier(*args, **kwargs):
kwarg = False
for arg in from_param:
if arg in kwargs:
kwarg = arg
break
if kwarg:
kwargs[to_param] = modifier_func(kwargs.pop(kwarg))
elif arg_index < len(args):
args = list(args)
args[arg_index] = modifier_func(args[arg_index])
return func(*args, **kwargs)
return _modifier
return _decorator | 5,356,793 |
def arrayGenerator(generator, length=10):
"""
Creates a generator that returns an an array of values taken from the
supplied generator.
"""
while True:
yield list(itertools.islice(generator, length)) | 5,356,794 |
def group_update(group_id, group_min, group_max, desired):
"""
Test with invalid input
>>> group_update('foo', 2, 1, 4)
{}
"""
if group_min > group_max or desired < group_min or desired > group_max:
return {}
try:
client = boto3.client('autoscaling')
response = client.update_auto_scaling_group(
AutoScalingGroupName=group_id,
MinSize=group_min,
MaxSize=group_max,
DesiredCapacity=desired)
except botocore.exceptions.ClientError:
print "Autoscaling client error: update_auto_scaling_group"
sys.exit(127)
return response | 5,356,795 |
def remove_bookmark(request, id):
"""
This view deletes a bookmark.
If requested via ajax it also returns the add bookmark form to replace the
drop bookmark form.
"""
bookmark = get_object_or_404(Bookmark, id=id, user=request.user)
if request.method == "POST":
bookmark.delete()
if not is_xhr(request):
messages.success(request, "Bookmark removed")
if request.POST.get("next"):
return HttpResponseRedirect(request.POST.get("next"))
return HttpResponse("Deleted")
return render(
request,
"admin_tools/menu/add_bookmark_form.html",
context={
"url": request.POST.get("next"),
"title": "**title**", # replaced on the javascript side
},
)
return render(
request,
"admin_tools/menu/delete_confirm.html",
context={"bookmark": bookmark, "title": "Delete Bookmark"},
) | 5,356,796 |
def parse_propa(blob):
"""Creates new blob entries for the given blob keys"""
if "track_in" in blob.keys():
muon = blob["track_in"]
blob["Muon"] = Table(
{
"id": np.array(muon)[:, 0].astype(int),
"pos_x": np.array(muon)[:, 1],
"pos_y": np.array(muon)[:, 2],
"pos_z": np.array(muon)[:, 3],
"dir_x": np.array(muon)[:, 4],
"dir_y": np.array(muon)[:, 5],
"dir_z": np.array(muon)[:, 6],
"energy": np.array(muon)[:, 7],
"time": np.array(muon)[:, 8],
"particle_id": np.array(muon)[:, 9].astype(int),
"is_charm": np.array(muon)[:, 10].astype(int),
"mother_pid": np.array(muon)[:, 11].astype(int),
"grandmother_pid": np.array(muon)[:, 11].astype(int),
},
h5loc="muon",
)
blob["MuonMultiplicity"] = Table(
{"muon_multiplicity": len(np.array(muon)[:, 6])}, h5loc="muon_multiplicity"
)
if "neutrino" in blob.keys():
nu = blob["neutrino"]
blob["Neutrino"] = Table(
{
"id": np.array(nu)[:, 0].astype(int),
"pos_x": np.array(nu)[:, 1],
"pos_y": np.array(nu)[:, 2],
"pos_z": np.array(nu)[:, 3],
"dir_x": np.array(nu)[:, 4],
"dir_y": np.array(nu)[:, 5],
"dir_z": np.array(nu)[:, 6],
"energy": np.array(nu)[:, 7],
"time": np.array(nu)[:, 8],
"particle_id": np.array(nu)[:, 9].astype(int),
"is_charm": np.array(nu)[:, 10].astype(int),
"mother_pid": np.array(nu)[:, 11].astype(int),
"grandmother_pid": np.array(nu)[:, 11].astype(int),
},
h5loc="nu",
)
blob["NeutrinoMultiplicity"] = Table(
{
"total": len(np.array(nu)[:, 6]),
"nue": len(np.array(nu)[:, 6][np.array(nu)[:, 9] == 12]),
"anue": len(np.array(nu)[:, 6][np.array(nu)[:, 9] == -12]),
"numu": len(np.array(nu)[:, 6][np.array(nu)[:, 9] == 14]),
"anumu": len(np.array(nu)[:, 6][np.array(nu)[:, 9] == -14]),
},
h5loc="nu_multiplicity",
)
if ("track_in" or "neutrino") in blob.keys():
blob["Weights"] = Table(
{
"w1": blob["weights"][0][0],
"w2": blob["weights"][0][1],
"w3": blob["weights"][0][2],
},
h5loc="weights",
)
if "track_primary" in blob.keys():
primary = blob["track_primary"]
blob["Primary"] = Table(
{
"id": np.array(primary)[:, 0].astype(int),
"pos_x": np.array(primary)[:, 1],
"pos_y": np.array(primary)[:, 2],
"pos_z": np.array(primary)[:, 3],
"dir_x": np.array(primary)[:, 4],
"dir_y": np.array(primary)[:, 5],
"dir_z": np.array(primary)[:, 6],
"energy": np.array(primary)[:, 7],
"time": np.array(primary)[:, 8],
"particle_id": np.array(primary)[:, 9].astype(int),
},
h5loc="primary",
)
return blob | 5,356,797 |
def compose_nautobot(context):
"""Create Netbox instance for Travis testing.
Args:
context (obj): Used to run specific commands
var_envs (dict): Environment variables to pass to the command runner
netbox_docker_ver (str): Version of Netbox docker to use
"""
# Copy the file from tests/docker-compose.test.yml to the tmp directory to be executed from there
# context.run(
# f"cp {PWD}/tests/nautobot-docker-compose.test.yml /tmp/docker-compose.yml", pty=True, env=var_envs,
# )
# context.run(
# f"cp {PWD}/tests/nginx.conf /tmp/nginx.conf", pty=True, env=var_envs,
# )
# context.run(
# f"cp {PWD}/tests/.creds.env.test /tmp/.creds.tests.env", pty=True, env=var_envs,
# )
# context.run("cd /tmp && docker-compose pull", pty=True, env=var_envs)
# context.run("cd /tmp && docker-compose down", pty=True, env=var_envs)
# context.run("cd /tmp && docker-compose up -d", pty=True, env=var_envs)
# Clone the repo so the latest data is present
context.run("docker pull networktocode/nautobot-lab:latest ", pty=True)
# Start the container
context.run(
"docker run -itd --rm --name nautobot -v $(pwd)/uwsgi.ini:/opt/nautobot/uwsgi.ini -p 8000:8000 networktocode/nautobot-lab:latest",
pty=True,
)
# Execute the load demo data
context.run("sleep 5 && docker exec -it nautobot load-mock-data", pty=True)
# Print out the ports listening to verify it is running
context.run("ss -ltn", pty=True) | 5,356,798 |
def fetch_incidents():
"""
Retrieve new incidents periodically based on pre-defined instance parameters
"""
now = convert_date_to_unix(datetime.utcnow())
last_run_object = demisto.getLastRun()
if last_run_object and last_run_object.get('time'):
last_run = last_run_object.get('time')
else:
last_run = now - 24 * 60 * 60 * 1000
next_fetch = last_run
q = '* AND timestamp:[%d TO *]' % last_run
if demisto.getParam('fetchQuery'):
q += ' AND ' + demisto.getParam('fetchQuery')
else:
q += ' AND workflow_status:(new OR inprogress)'
query = QUERY.copy()
query['q'] = q
query['offset'] = 0
query['limit'] = FETCH_LIMIT
query['sort_by'] = 'timestamp:asc'
resp_json = req('GET', 'search/alerts', query)
incidents = []
for a in resp_json['objects']:
current_fetch = a.get('timestamp')
if current_fetch:
try:
current_fetch = datetime.strptime(current_fetch, "%Y-%m-%dT%H:%M:%S")
except ValueError:
current_fetch = datetime.strptime(current_fetch, "%Y-%m-%dT%H:%M:%S.%f")
current_fetch = convert_date_to_unix(current_fetch)
if current_fetch > last_run:
incidents.append({
'name': a.get('name', 'No name') + ' - ' + a.get('id'),
'occurred': a.get('timestamp') + 'Z',
'details': a.get('description'),
'severity': translate_severity(a.get('severity')),
'rawJSON': json.dumps(a)
})
if current_fetch > next_fetch:
next_fetch = current_fetch
demisto.incidents(incidents)
demisto.setLastRun({'time': next_fetch}) | 5,356,799 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.