content
stringlengths 22
815k
| id
int64 0
4.91M
|
---|---|
def createCSV(obj, filename):
"""
Create csv file from data
"""
file = open(filename + ".csv", "w")
writer = csv.writer(file, dialect="excel")
writer.writerows(obj)
file.close() | 5,353,000 |
def xirr(cashflows,guess=0.1):
"""
Calculate the Internal Rate of Return of a series of cashflows at irregular intervals.
Arguments
---------
* cashflows: a list object in which each element is a tuple of the form (date, amount), where date is a python datetime.date object and amount is an integer or floating point number. Cash outflows (investments) are represented with negative amounts, and cash inflows (returns) are positive amounts.
* guess (optional, default = 0.1): a guess at the solution to be used as a starting point for the numerical solution.
Returns
--------
* Returns the IRR as a single value
Notes
----------------
* The Internal Rate of Return (IRR) is the discount rate at which the Net Present Value (NPV) of a series of cash flows is equal to zero. The NPV of the series of cash flows is determined using the xnpv function in this module. The discount rate at which NPV equals zero is found using the secant method of numerical solution.
* This function is equivalent to the Microsoft Excel function of the same name.
* For users that do not have the scipy module installed, there is an alternate version (commented out) that uses the secant_method function defined in the module rather than the scipy.optimize module's numerical solver. Both use the same method of calculation so there should be no difference in performance, but the secant_method function does not fail gracefully in cases where there is no solution, so the scipy.optimize.newton version is preferred.
_irr = xirr( [ (date(2010, 12, 29), -10000),
(date(2012, 1, 25), 20),
(date(2012, 3, 8), 10100)] )
"""
val = -666
try:
val = optimize.newton(lambda r: xnpv(r,cashflows),guess)
except:
print("Failed to converge after, returning: -666")
return val | 5,353,001 |
def check_rt_druid_fields(rt_table_columns, druid_columns):
"""
对比rt的字段,和druid物理表字段的区别
:param rt_table_columns: rt的字段转换为druid中字段后的字段信息
:param druid_columns: druid物理表字段
:return: (append_fields, bad_fields),需变更增加的字段 和 有类型修改的字段
"""
append_fields, bad_fields = [], []
for key, value in rt_table_columns.items():
col_name, col_type = key.lower(), value.lower()
if druid_columns[col_name]:
# 再对比类型
druid_col_type = druid_columns[col_name]
ok = (
(col_type == druid_col_type)
or (col_type == STRING and druid_col_type == VARCHAR)
or (col_type == LONG and druid_col_type == BIGINT)
)
if not ok:
bad_fields.append({col_name: f"difference between rt and druid({col_type} != {druid_col_type})"})
else:
append_fields.append({FIELD_NAME: col_name, FIELD_TYPE: col_type})
return append_fields, bad_fields | 5,353,002 |
def is_stuck(a, b, eta):
""" Check if the ricci flow is stuck. """
return ne.evaluate("a-b<eta/50").all() | 5,353,003 |
def make_map(mapping):
"""
Takes a config.yml mapping, and returns a dict of mappers.
"""
# TODO: Is this the best place for this? Should it be a @staticmethod,
# or even part of its own class?
fieldmap = {}
for field, config in mapping.items():
if type(config) is str:
# Default case: Map directly from spreadsheet
fieldmap[field] = Map({"field": mapping[field]})
else:
# Complex case!
classname = map_type[config.get("type", "map")]
fieldmap[field] = classname(mapping[field])
return fieldmap | 5,353,004 |
def task_eeg_to_bids():
"""Step 00: Bring data set into a BIDS compliant directory structure."""
# Run the script for each subject in a sub-task.
for subject in subjects:
yield dict(
# This task should come after `task_check`
task_dep=['check'],
# A name for the sub-task: set to the name of the subject
name=subject,
# If any of these files change, the script needs to be re-run. Make
# sure that the script itself is part of this list!
file_dep=[fname.source(subject=subject,
source_type='eeg'),
fname.source(subject=subject,
source_type='demographics'),
'00_eeg_to_bids.py'],
# The files produced by the script
targets=[fname.bids_data(subject=subject)],
# How the script needs to be called. Here we indicate it should
# have one command line parameter: the name of the subject.
actions=['python 00_eeg_to_bids.py %s' % subject]
) | 5,353,005 |
def run_pre_mapping_settings_triggers(sender, instance: MappingSetting, **kwargs):
"""
:param sender: Sender Class
:param instance: Row instance of Sender Class
:return: None
"""
default_attributes = ['EMPLOYEE', 'CATEGORY', 'PROJECT', 'COST_CENTER']
instance.source_field = instance.source_field.upper().replace(' ', '_')
if instance.source_field not in default_attributes:
try:
upload_attributes_to_fyle(
workspace_id=int(instance.workspace_id),
sageintacct_attribute_type=instance.destination_field,
fyle_attribute_type=instance.source_field
)
except WrongParamsError as error:
logger.error(
'Error while creating %s workspace_id - %s in Fyle %s %s',
instance.source_field, instance.workspace_id, error.message, {'error': error.response}
)
if error.response:
response = json.loads(error.response)
if response and 'message' in response and \
response['message'] == ('duplicate key value violates unique constraint '
'"idx_expense_fields_org_id_field_name_is_enabled_is_custom"'):
raise ValidationError({
'message': 'Duplicate custom field name',
'field_name': instance.source_field
})
async_task(
'apps.mappings.tasks.auto_create_expense_fields_mappings',
int(instance.workspace_id),
instance.destination_field,
instance.source_field
) | 5,353,006 |
def stable_seasoal_filter(time_series: Sized, freq: int):
"""
Стабильный сезонный фильтр для ряда.
:param time_series: временной ряд
:param freq: частота расчета среднего значения
:return: значения сезонной составляющей
"""
length = len(time_series)
if length < freq:
raise ValueError(f'Length of time series is less than freq ({length} < {freq}')
if not isinstance(freq, int):
raise TypeError(f'freq must be an integer')
if freq < 1:
raise ValueError(f'freq must be greater than zero (actually is {freq})')
values = time_series.values if isinstance(time_series, pd.DataFrame) else time_series
seasonal = list()
for i in range(freq):
seasonal_values = [values[i + j * freq] for j in range(length) if i + j * freq < length]
seasonal.append(np.mean(seasonal_values))
seasonals = [seasonal for i in range(length)]
return pd.DataFrame([i for i in chain(*seasonals)][:length]) | 5,353,007 |
def load_dataset(dataset, batch_size=512):
"""Load dataset with given dataset name.
Args:
dataset (str): name of the dataset, it has to be amazoncat-13k, amazoncat-14k,
eurlex-4.3k or rcv1-2k
batch_size (int): batch size of tf dataset
Returns:
(tf.dataset, tf.dataset, int, int, int, int): training dataset, testing dataset,
number of training data, number of testing data,
number of features, number of labels
"""
if dataset not in ['amazoncat-13k', 'amazoncat-14k', 'eurlex-4.3k', 'rcv1-2k']:
raise ValueError(
'dataset has to be amazoncat-13k, amazoncat-14k, eulrex-4.3k or rcv1-2k')
# Download dataset
downloader.dataset(task='extreme')
path_to_train = None
path_to_test = None
if dataset == 'amazoncat-13k':
path_to_train = os.path.join('dataset', 'AmazonCat-13K', 'train.txt')
path_to_test = os.path.join('dataset', 'AmazonCat-13K', 'test.txt')
elif dataset == 'amazoncat-14k':
path_to_train = os.path.join('dataset', 'AmazonCat-14K', 'train.txt')
path_to_test = os.path.join('dataset', 'AmazonCat-14K', 'test.txt')
elif dataset == 'eurlex-4.3k':
path_to_train = os.path.join('dataset', 'EURLex-4.3K', 'train.txt')
path_to_test = os.path.join('dataset', 'EURLex-4.3K', 'test.txt')
elif dataset == 'rcv1-2k':
path_to_train = os.path.join('dataset', 'RCV1-2K', 'train.txt')
path_to_test = os.path.join('dataset', 'RCV1-2K', 'test.txt')
assert path_to_train is not None and path_to_test is not None
num_train, num_test, num_features, num_labels = obtain_dataset_info(
path_to_train, path_to_test)
ds_train = tf.data.TextLineDataset(path_to_train)
ds_train = ds_train.skip(1).map(lambda x: tf_function(x, num_features=num_features, num_labels=num_labels)).batch(batch_size)
ds_test = tf.data.TextLineDataset(path_to_test)
ds_test = ds_test.skip(1).map(lambda x: tf_function(x, num_features=num_features, num_labels=num_labels)).batch(batch_size)
return ds_train, ds_test, num_train, num_test, num_features, num_labels | 5,353,008 |
def make_str_lst_unc_val(id, luv):
"""
make_str_lst_unc_val(id, luv)
Make a formatted string from an ID string and a list of uncertain values.
Input
-----
id A number or a string that will be output as a string.
luv A list of DTSA-II UncertainValue2 items. These will be printed
as comma-delimited pairs with 6 digits following the decimal.
Return
------
A string with comma-delimited values with the ID and mean and uncertainty
for each item in the list. This is suitable for writing output to a .csv
file.
Example:
--------
import dtsa2.jmGen as jmg
import gov.nist.microanalysis.Utility as epu
nmZnO1 = 40.1
uvOKa1 = epu.UncertainValue2(0.269157,0.000126)
uvZnLa1 = epu.UncertainValue2(0.259251,9.4e-05)
uvSiKa1 = epu.UncertainValue2(0.654561,8.4e-05)
l_uvals = [uvOKa1, uvZnLa1, uvSiKa1]
out = jmg.make_list_unc_val_string(nmZnO1, l_uvals)
print(out)
1> 40.1, 0.269157, 0.000126, 0.259251, 0.000094, 0.654561, 0.000084
"""
lv = len(luv)
i = 0
rv = "%s, " % (id)
for uv in luv:
rc = round(uv.doubleValue(), 6)
uc = round(uv.uncertainty(), 6)
if i == lv-1:
rv += "%g, %.6f" % (rc, uc)
else:
rv += "%g, %.6f, " % (rc, uc)
i += 1
return(rv) | 5,353,009 |
def plot_faces(ax, coordinates, meta, st):
"""plot the faces"""
for s in st.faces:
# check that this face isnt in the cut region
def t_param_difference(v1, v2):
return abs(meta["t"][v1] - meta["t"][v2])
if all(all(t_param_difference(v1, v2) < 2 for v2 in s) for v1 in s):
pts = np.array([coordinates[v] for v in s])
pts = np.array([nearest(np.max(pts, 0), p) for p in pts])
center = np.mean(pts, 0)
pts = (pts - center) / 1.8 + center
color = (0, 0, 1, .5)
if meta["s_type"][s] == (2, 1):
color = (1, 0, 0, .5)
p = Polygon(pts, closed=False, color=color)
ax.add_patch(p) | 5,353,010 |
def translate_null_strings_to_blanks(d: typing.Dict) -> typing.Dict:
"""Map over a dict and translate any null string values into ' '.
Leave everything else as is. This is needed because you cannot add TableCell
objects with only a null string or the client crashes.
:param Dict d: dict of item values.
:rtype Dict:
"""
# Beware: locally defined function.
def translate_nulls(s):
if s == "":
return " "
return s
new_d = {k: translate_nulls(v) for k, v in d.items()}
return new_d | 5,353,011 |
def test_uvh5_partial_write_ints_irregular_multi2(uv_uvh5, tmp_path):
"""
Test writing a uvh5 file using irregular interval for freq and pol and
integer dtype.
"""
full_uvh5 = uv_uvh5
partial_uvh5 = full_uvh5.copy()
partial_uvh5.data_array = None
partial_uvh5.flag_array = None
partial_uvh5.nsample_array = None
# initialize file on disk
partial_testfile = str(tmp_path / "outtest_partial.uvh5")
initialize_with_zeros_ints(partial_uvh5, partial_testfile)
# make a mostly empty object in memory to match what we'll write to disk
partial_uvh5.data_array = np.zeros_like(full_uvh5.data_array, dtype=np.complex64)
partial_uvh5.flag_array = np.zeros_like(full_uvh5.flag_array, dtype=np.bool)
partial_uvh5.nsample_array = np.zeros_like(
full_uvh5.nsample_array, dtype=np.float32
)
# define freqs and pols
freq_inds = [0, 1, 2, 7]
pol_inds = [0, 1, 3]
data_shape = (full_uvh5.Nblts, 1, len(freq_inds), len(pol_inds))
data = np.zeros(data_shape, dtype=np.complex64)
flags = np.zeros(data_shape, dtype=np.bool)
nsamples = np.zeros(data_shape, dtype=np.float32)
for ifreq, freq_idx in enumerate(freq_inds):
for ipol, pol_idx in enumerate(pol_inds):
data[:, :, ifreq, ipol] = full_uvh5.data_array[:, :, freq_idx, pol_idx]
flags[:, :, ifreq, ipol] = full_uvh5.flag_array[:, :, freq_idx, pol_idx]
nsamples[:, :, ifreq, ipol] = full_uvh5.nsample_array[
:, :, freq_idx, pol_idx
]
with uvtest.check_warnings(
UserWarning,
[
"Selected frequencies are not evenly spaced",
"Selected polarization values are not evenly spaced",
],
):
partial_uvh5.write_uvh5_part(
partial_testfile,
data,
flags,
nsamples,
freq_chans=freq_inds,
polarizations=full_uvh5.polarization_array[pol_inds],
)
# also write the arrays to the partial object
for ifreq, freq_idx in enumerate(freq_inds):
for ipol, pol_idx in enumerate(pol_inds):
partial_uvh5.data_array[:, :, freq_idx, pol_idx] = data[:, :, ifreq, ipol]
partial_uvh5.flag_array[:, :, freq_idx, pol_idx] = flags[:, :, ifreq, ipol]
partial_uvh5.nsample_array[:, :, freq_idx, pol_idx] = nsamples[
:, :, ifreq, ipol
]
# read in the file and make sure it matches
partial_uvh5_file = UVData()
# read in the full file and make sure it matches
# This file has weird telescope or antenna location information
# (not on the surface of the earth)
# which breaks the phasing when trying to check if the uvws match the antpos.
partial_uvh5_file.read(partial_testfile, run_check_acceptability=False)
assert partial_uvh5_file == partial_uvh5
# clean up
os.remove(partial_testfile)
return | 5,353,012 |
def HttpResponseRest(request, data):
"""
Return an Http response into the correct output format (JSON, XML or HTML),
according of the request.format parameters.
Format is automatically added when using the
:class:`igdectk.rest.restmiddleware.IGdecTkRestMiddleware` and views decorators.
"""
if request.format == Format.JSON:
encoded = json.dumps(data, cls=ComplexEncoder)
return HttpResponse(encoded, content_type=Format.JSON.content_type)
elif request.format == Format.HTML:
return HttpResponse(data)
elif request.format == Format.XML:
encoded = igdectk.xmlio.dumps(data)
return HttpResponse(encoded, content_type=Format.XML.content_type)
elif request.format == Format.TEXT:
return HttpResponse(data, content_type=Format.TEXT.content_type)
else:
return None | 5,353,013 |
def build_server_update_fn(model_fn, server_optimizer_fn, server_state_type,
model_weights_type):
"""Builds a `tff.tf_computation` that updates `ServerState`.
Args:
model_fn: A no-arg function that returns a `tff.learning.TrainableModel`.
server_optimizer_fn: A no-arg function that returns a
`tf.keras.optimizers.Optimizer`.
server_state_type: type_signature of server state.
model_weights_type: type_signature of model weights.
Returns:
A `tff.tf_computation` that updates `ServerState`.
"""
@tff.tf_computation(server_state_type, model_weights_type.trainable)
def server_update_tf(server_state, model_delta):
"""Updates the `server_state`.
Args:
server_state: The `ServerState`.
model_delta: The model difference from clients.
Returns:
The updated `ServerState`.
"""
model = model_fn()
server_optimizer = server_optimizer_fn()
# Create optimizer variables so we have a place to assign the optimizer's
# state.
server_optimizer_vars = _create_optimizer_vars(model, server_optimizer)
return server_update(model, server_optimizer, server_optimizer_vars,
server_state, model_delta)
return server_update_tf | 5,353,014 |
def madgraph_tarball_filename(physics):
"""Returns the basename of a MadGraph tarball for the given physics"""
# Madgraph tarball filenames do not have a part number associated with them; overwrite it
return svj_filename("step0_GRIDPACK", Physics(physics, part=None)).replace(
".root", ".tar.xz"
) | 5,353,015 |
def _mk_cmd(verb, code, payload, dest_id, **kwargs) -> Command:
"""A convenience function, to cope with a change to the Command class."""
return Command.from_attrs(verb, dest_id, code, payload, **kwargs) | 5,353,016 |
def test_config_absent_already_configured():
"""
config_absent method - add config removed
"""
config_data = [
"snmp-server community randomSNMPstringHERE group network-operator",
"snmp-server community AnotherRandomSNMPSTring group network-admin",
]
side_effect = MagicMock(side_effect=[[], []])
with patch.dict(nxos_state.__opts__, {"test": False}):
with patch.dict(nxos_state.__salt__, {"nxos.cmd": side_effect}):
result = nxos_state.config_absent(config_data)
assert result["name"] == config_data
assert result["result"]
assert result["changes"] == {}
assert result["comment"] == "Config is already absent" | 5,353,017 |
def getobjname(item):
"""return obj name or blank """
try:
objname = item.Name
except BadEPFieldError as e:
objname = ' '
return objname | 5,353,018 |
def test_generator(storage):
"""It should support generator values."""
breaker = CircuitBreaker(state_storage=storage)
@breaker
def func_yield_succeed():
"""Docstring"""
yield True
@breaker
def func_yield_exception():
"""Docstring"""
x = yield True
raise DummyException(x)
s = func_yield_succeed()
e = func_yield_exception()
next(e)
with raises(DummyException):
e.send(True)
assert 1 == breaker.fail_counter
assert next(s)
with raises(StopIteration):
next(s)
assert 0 == breaker.fail_counter | 5,353,019 |
def process_fire_data(filename=None, fire=None, and_save=False, timezone='Asia/Bangkok', to_drop=True):
""" Add datetime, drop duplicate data and remove uncessary columns.
"""
if filename:
fire = pd.read_csv(filename)
# add datetime
fire = add_datetime_fire(fire, timezone)
# drop duplicate data
print('before drop', fire.shape)
# sort values by brightness
try:
# for MODIS file
fire = fire.sort_values(
['datetime', 'lat_km', 'long_km', 'brightness'], ascending=False)
except BaseException:
# for VIIRS
fire = fire.sort_values(
['datetime', 'lat_km', 'long_km', 'bright_ti4'], ascending=False)
if to_drop:
fire = fire.drop_duplicates(['datetime', 'lat_km', 'long_km'])
# drop unncessary columns
try:
columns_to_drop = [
'acq_date',
'satellite',
'instrument',
'version',
'daynight',
'bright_t31',
'type']
columns_to_drop = [s for s in columns_to_drop if s in fire.columns]
fire = fire.drop(columns_to_drop, axis=1)
except BaseException:
columns_to_drop = [
'acq_date',
'satellite',
'instrument',
'version',
'daynight',
'bright_ti5',
'type']
columns_to_drop = [s for s in columns_to_drop if s in fire.columns]
fire = fire.drop(columns_to_drop, axis=1)
fire = fire.sort_values('datetime')
fire = fire.set_index('datetime')
# remove the data before '2002-07-04' because there is only one satellite
fire = fire.loc['2002-07-04':]
print('after drop', fire.shape)
if and_save:
fire.to_csv(filename, index=False)
else:
return fire | 5,353,020 |
def schedule_slack_tweets(**kwargs):
"""
Schedule a tweet to be sent out once it is user approved in slack
"""
num_tweets = 1
interval = 15
tweet_url = "https://twitter.com/{name}/status/{tweet_id}"
embeded_tweet = tweet_url.format(name=kwargs["screen_name"], tweet_id=kwargs["tweet_id"])
for mins in range(interval,(num_tweets*interval+1), interval):
remind_time = kwargs["event_time"] - timedelta(minutes=mins)
# #PyConOpenSpace add this back into message when done testing
message = "Coming up in {} minutes! {}".format(mins, embeded_tweet)
# TODO add the updated tweet_id field to this object when it's saved
db_utils.save_outgoing_tweet(tweet=message,
tweet_id=kwargs["tweet_id"],
approved=kwargs["approved"],
scheduled_time=remind_time,
original_tweet=kwargs["tweet"],
screen_name=kwargs["screen_name"],
event_obj=kwargs["event_obj"]) | 5,353,021 |
def __feed_pets_without_confirmation(plan: FeedPlan):
"""Feed all the pets in the plan. Print the result to the terminal.
Warning: this function does ask for confirmation.
"""
# Feel free to refactor this such that we don't iterate over
# the plan twice.
feed_requests: List[Callable[[], Response]] = [
FeedPostRequester.build_from(item).post_feed_request
for item in plan
]
throttler = RateLimitingAwareThrottler(feed_requests)
feed_response_iter: Iterator[Response] = throttler.perform_and_yield_response()
for item in plan:
feed_response: Response = next(feed_response_iter)
response_json: Dict[str, Any] = feed_response.json()
if response_json["success"] is True:
click.echo(response_json["message"])
else:
click.echo(f"Failed to feed {item.pet_name}\n"
f"{response_json['error']}: {response_json['message']}") | 5,353,022 |
def create_controllable_source(source, control, loop, sleep):
"""Makes an observable controllable to handle backpressure
This function takes an observable as input makes it controllable by
executing it in a dedicated worker thread. This allows to regulate
the emission of the items independently of the asyncio event loop.
Args:
- source: An observable emitting the source items.
- control: [Optional] The control observable emitting delay items in seconds.
- sleep: the sleep function to used. Needed only for testing.
Returns:
An observable similar to the source observable, with emission being
controlled by the control observable.
"""
if control is not None:
typed_control = control.pipe(
ops.observe_on(NewThreadScheduler()),
ops.map(ControlItem),
)
scheduled_source = source.pipe(
ops.subscribe_on(NewThreadScheduler()),
ops.merge(typed_control),
ops.map(lambda i: control_sync(i, sleep)),
ops.filter(lambda i: i is not ControlItem),
ops.observe_on(AsyncIOThreadSafeScheduler(loop)),
)
else:
scheduled_source = source.pipe(
ops.subscribe_on(NewThreadScheduler()),
ops.observe_on(AsyncIOThreadSafeScheduler(loop)),
)
return scheduled_source | 5,353,023 |
def getE5():
"""
Returns the e5
Args:
"""
return E5.get() | 5,353,024 |
def test_translate_command(command, expected):
"""Check the marcel --> docker command translation."""
assert translate_command(command) == expected | 5,353,025 |
def triangulate(pts_subset):
"""
This function encapsulates the whole triangulation algorithm into four
steps. The function takes as input a list of points. Each point is of the
form [x, y], where x and y are the coordinates of the point.
Step 1) The list of points is split into groups. Each group has exactly
two or three points.
Step 2) For each group of two point, a single edge is generated. For each
group of three points, three edges forming a triangle are
generated. These are the 'primitive' triangulations.
Step 3) The primitive triangulations are paired into groups.
Step 4) The groups are then recursively merged until there is only a
single triangulation of all points remaining.
Parameters
----------
pts_subset : list
A list of points with the form [ [x1, y1], [x2, y2], ..., [xn, yn] ]
The first element of each list represents the x-coordinate, the second
entry the y-coordinate.
Returns
-------
out : list
List with a single element. The TriangulationEdges class object with
the completed Delauney triangulation of the input points.
See TriangulationEdges docstring for further info.
"""
split_pts = split_list.groups_of_3(pts_subset)
primitives = make_primitives(split_pts)
groups = [primitives[i:i+2] for i in range(0, len(primitives), 2)]
groups = recursive_group_merge(groups)
return groups[0][0] | 5,353,026 |
def get_session(region, default_bucket):
"""Gets the sagemaker session based on the region.
Args:
region: the aws region to start the session
default_bucket: the bucket to use for storing the artifacts
Returns:
`sagemaker.session.Session instance
"""
boto_session = boto3.Session(region_name=region)
sagemaker_client = boto_session.client("sagemaker")
runtime_client = boto_session.client("sagemaker-runtime")
return sagemaker.session.Session(
boto_session=boto_session,
sagemaker_client=sagemaker_client,
sagemaker_runtime_client=runtime_client,
default_bucket=default_bucket,
) | 5,353,027 |
def safe_write_file(file: str, s: str) -> None:
"""
Safely write to a file by acquiring an exclusive lock to prevent other
processes from reading and writing to it while writing.
"""
# Open in read and update mode, so we don't modify the file before we acquire a lock
file_obj = open(file, "r+")
try:
# acquire an exclusive lock
fcntl.flock(file_obj.fileno(), fcntl.LOCK_EX)
try:
file_obj.seek(0)
file_obj.write(s)
file_obj.truncate()
finally:
fcntl.flock(file_obj.fileno(), fcntl.LOCK_UN)
finally:
file_obj.close() | 5,353,028 |
def intp_sc(x, points):
"""
SCurve spline based interpolation
args:
x (list) : t coordinate list
points (list) : xyz coordinate input points
returns:
x (relative coordinate point list)
o (xyz coordinate points list, resplined)
"""
sc = vtk.vtkSCurveSpline()
for i in points:
sc.AddPoint(i[0], i[1])
o = []
for i in x:
o.append(sc.Evaluate(i))
return x, o | 5,353,029 |
def backend():
"""Publicly accessible method
for determining the current backend.
# Returns
String, the name of the backend PyEddl is currently using.
# Example
```python
>>> eddl.backend.backend()
'eddl'
```
"""
return _BACKEND | 5,353,030 |
def query_collection_mycollections():
"""
Query Content API Collection with access token.
"""
access_token = request.args.get("access_token", None)
if access_token is not None and access_token != '':
# Construct an Authorization header with the value of 'Bearer <access token>'
headers = {
"Accept": "application/json",
"Authorization": "Bearer " + access_token
}
url = APP_CONFIG['CONTENTAPI_COLLECTIONS_URL'] + 'mycollections'
r = s.get(url, headers=headers, verify=(app.config['SSLVERIFY'] == 'True'))
if r.status_code in (400,500):
# Handle known errors
result = r.json()
return jsonify(result)
elif r.status_code == 200:
result = r.json()
params = {
'access_token': access_token,
'endpoint_path': '/mycollections',
'mycollections_results': json.dumps(result, indent=2),
'mycollections_results_obj': result
}
return render_template('mycollections.html', **params)
else:
# Handle unknown error
return (r.text, r.status_code, r.headers.items())
else:
return "access_token not specified" | 5,353,031 |
def test_merge2_sql_semantics_outerjoin_multi_keep_firstNone():
"""
Test that merge2 matches the following SQL query:
select
f.id as foo_id,
f.col1 as foo_col1,
f.col2 as foo_col2,
f.team_name as foo_teamname,
b.id as bar_id,
b.col1 as bar_col1,
b.col2 as bar_col2,
b.strcol as bar_strcol
from (
select *
from sql_semantics.foo
inner join (
select distinct FIRST_VALUE(id) over w as firstlast_row_id
from sql_semantics.foo
window w as (partition by foo.col1, foo.col2 order by foo.id asc)
) as foo_ids
on
foo.id = foo_ids.firstlast_row_id
) as f
full outer join
sql_semantics.bar as b
on
f.col1 = b.col1
and
f.col2 = b.col2
order by f.id, b.id asc;
"""
foo, bar = TestDataset.sql_semantics2()
result = rt.merge2(
foo,
bar,
on=[('col1', 'col1'), ('col2', 'col2')],
how='outer',
suffixes=('_foo', '_bar'),
keep=('first', None),
indicator=True,
)
assert result.get_nrows() == 19
# "foo_id","foo_col1","foo_col2","foo_teamname","bar_id","bar_col1","bar_col2","bar_strcol"
# 1,5,NULL,"Phillies",NULL,NULL,NULL,NULL
# 2,5,5,"Eagles",8,5,5,"Lombard"
# 2,5,5,"Eagles",11,5,5,"Arch"
# 3,8,NULL,"76ers",NULL,NULL,NULL,NULL
# 4,NULL,1,"Flyers",NULL,NULL,NULL,NULL
# 5,10,1,"Union",NULL,NULL,NULL,NULL
# 6,NULL,4,"Wings",NULL,NULL,NULL,NULL
# 7,-1,22,"Fusion",NULL,NULL,NULL,NULL
# 8,11,9,"Fight",NULL,NULL,NULL,NULL
# NULL,NULL,NULL,NULL,1,10,4,"Chestnut"
# NULL,NULL,NULL,NULL,2,10,NULL,"Pine"
# NULL,NULL,NULL,NULL,3,8,NULL,"Walnut"
# NULL,NULL,NULL,NULL,4,NULL,3,"Locust"
# NULL,NULL,NULL,NULL,5,NULL,NULL,"Cherry"
# NULL,NULL,NULL,NULL,6,NULL,NULL,"Spruce"
# NULL,NULL,NULL,NULL,7,NULL,1,"Cypress"
# NULL,NULL,NULL,NULL,9,5,NULL,"Sansom"
# NULL,NULL,NULL,NULL,10,14,9,"Market"
# NULL,NULL,NULL,NULL,12,-15,13,"Vine"
inv = rt.int32.inv
# Intersection cols (the 'on' cols)
assert_array_equal(result.col1, rt.FA([5, 5, 5, 8, inv, 10, inv, -1, 11, 10, 10, 8, inv, inv, inv, inv, 5, 14, -15], dtype=np.int32))
assert_array_equal(result.col2, rt.FA([inv, 5, 5, inv, 1, 1, 4, 22, 9, 4, inv, inv, 3, inv, inv, 1, inv, 9, 13], dtype=np.int32))
# Cols from the left Dataset.
assert_array_equal(result.id_foo, rt.FA([1, 2, 2, 3, 4, 5, 6, 7, 8, inv, inv, inv, inv, inv, inv, inv, inv, inv, inv], dtype=np.int32))
assert_array_equal(
result.team_name,
rt.FA([b'Phillies', b'Eagles', b'Eagles', b'76ers', b'Flyers', b'Union', b'Wings', b'Fusion', b'Fight', b'', b'', b'', b'', b'', b'', b'', b'', b'', b'']))
# Cols from the right Dataset.
assert_array_equal(
result.id_bar, rt.FA([inv, 8, 11, inv, inv, inv, inv, inv, inv, 1, 2, 3, 4, 5, 6, 7, 9, 10, 12], dtype=np.int32)
)
assert_array_equal(
result.strcol,
rt.FA([b'', b'Lombard', b'Arch', b'', b'', b'', b'', b'', b'', b'Chestnut', b'Pine', b'Walnut', b'Locust', b'Cherry', b'Spruce', b'Cypress', b'Sansom', b'Market', b'Vine'])) | 5,353,032 |
def xmlbuildmanual() -> __xml_etree:
"""
Returns a empty xml ElementTree obj to build/work with xml data
Assign the output to var
This is using the native xml library via etree shipped with the python standard library.
For more information on the xml.etree api, visit: https://docs.python.org/3/library/xml.etree.elementtree.html#module-xml.etree.ElementTree
"""
return __xml_etree | 5,353,033 |
def graclus_cluster(row, col, weight=None, num_nodes=None):
"""A greedy clustering algorithm of picking an unmarked vertex and matching
it with one its unmarked neighbors (that maximizes its edge weight).
Args:
row (LongTensor): Source nodes.
col (LongTensor): Target nodes.
weight (Tensor, optional): Edge weights. (default: :obj:`None`)
num_nodes (int, optional): The number of nodes. (default: :obj:`None`)
Examples::
>>> row = torch.LongTensor([0, 1, 1, 2])
>>> col = torch.LongTensor([1, 0, 2, 1])
>>> weight = torch.Tensor([1, 1, 1, 1])
>>> cluster = graclus_cluster(row, col, weight)
"""
num_nodes = row.max().item() + 1 if num_nodes is None else num_nodes
if row.is_cuda: # pragma: no cover
row, col = sort_row(row, col)
else:
row, col = randperm(row, col)
row, col = randperm_sort_row(row, col, num_nodes)
row, col = remove_self_loops(row, col)
cluster = row.new_empty((num_nodes, ))
graclus(cluster, row, col, weight)
return cluster | 5,353,034 |
def _get_serve_tf_examples_fn(model, tf_transform_output):
"""Returns a function that parses a serialized tf.Example."""
model.tft_layer = tf_transform_output.transform_features_layer()
@tf.function
def serve_tf_examples_fn(serialized_tf_examples):
"""Returns the output to be used in the serving signature."""
feature_spec = tf_transform_output.raw_feature_spec()
feature_spec.pop(_LABEL_KEY)
parsed_features = tf.io.parse_example(serialized_tf_examples, feature_spec)
transformed_features = model.tft_layer(parsed_features)
return model(transformed_features)
return serve_tf_examples_fn | 5,353,035 |
def get_profitable_change(day_candle):
""" Get the potential daily profitable price change in pips.
If prices rise enough, we have: close_bid - open_ask (> 0), buy.
If prices fall enough, we have: close_ask - open_bid (< 0), sell.
if prices stay relatively still, we don't buy or sell. It's 0.
Args:
day_candle: candles.Candle object representing a daily candle.
Returns:
profitable_change: Decimal. The profitable rate change described
above, in two decimal places.
"""
multiplier = day_candle.instrument.multiplier
change = 0
if day_candle.close_bid > day_candle.open_ask:
change = multiplier * (day_candle.close_bid - day_candle.open_ask)
elif day_candle.close_ask < day_candle.open_bid:
change = multiplier * (day_candle.close_ask - day_candle.open_bid)
return decimal.Decimal(change).quantize(TWO_PLACES) | 5,353,036 |
def load_all_functions(path, tool, factorize=True, agents_quantities=False, rewards_only=False, f_only=False):
""" Loads all results of parameter synthesis from *path* folder into two maps - f list of rational functions for each property, and rewards list of rational functions for each reward
Args:
path (string): file name regex
factorize (bool): if true it will factorise polynomial results
rewards_only (bool): if true it parse only rewards
f_only (bool): if true it will parse only standard properties
agents_quantities (list of numbers or False): of population sizes to be used, if False, the whole path used
tool (string): a tool of which is the output from (PRISM/STORM)
Returns:
(f,reward), where
f: dictionary N -> list of rational functions for each property
rewards: dictionary N -> list of rational functions for each reward
"""
## Setting the current directory
default_directory = os.getcwd()
if not Path(path).is_absolute():
if tool.lower().startswith("p"):
os.chdir(prism_results)
elif tool.lower().startswith("s"):
os.chdir(storm_results)
else:
print("Selected tool unsupported.")
return ({}, {})
f = {}
rewards = {}
# print(str(path))
new_dir = os.getcwd()
if not glob.glob(str(path)):
if not Path(path).is_absolute():
os.chdir(default_directory)
print("No files match the pattern " + os.path.join(new_dir, path))
return ({}, {})
no_files = True
## Choosing files with the given pattern
for functions_file in glob.glob(str(path)):
try:
population_size = int(re.findall(r'\d+', functions_file)[0])
except IndexError:
population_size = 0
## Parsing only selected agents quantities
if agents_quantities:
if population_size not in agents_quantities:
continue
else:
no_files = False
print("parsing ", os.path.join(os.getcwd(), functions_file))
# print(os.getcwd(), file)
with open(functions_file, "r") as file:
i = -1
here = ""
f[population_size] = []
rewards[population_size] = []
## PARSING PRISM/STORM OUTPUT
line_index = 0
if tool == "unknown":
# print(line)
if line.lower().startswith("prism"):
tool = "prism"
elif line.lower().startswith("storm"):
tool = "storm"
else:
print("Tool not recognised!!")
for line in file:
if line.startswith('Parametric model checking:') or line.startswith('Model checking property'):
i = i + 1
here = ""
## STORM check if rewards
if "R[exp]" in line:
here = "r"
## PRISM check if rewards
if line.startswith('Parametric model checking: R'):
here = "r"
if i >= 0 and line.startswith('Result'):
## PARSE THE EXPRESSIONload_pickled_data
# print("line:", line)
if tool.lower().startswith("p"):
line = line.split(":")[2]
elif tool.lower().startswith("s"):
line = line.split(":")[1]
## CONVERT THE EXPRESSION TO PYTHON FORMAT
line = line.replace("{", "")
line = line.replace("}", "")
## PUTS "* " BEFORE EVERY WORD (VARIABLE)
line = re.sub(r'([a-z|A-Z]+)', r'* \1', line)
# line = line.replace("p", "* p")
# line = line.replace("q", "* q")
line = line.replace("**", "*")
line = line.replace("* *", "*")
line = line.replace("* *", "*")
line = line.replace("+ *", "+")
line = line.replace("^", "**")
line = line.replace(" ", "")
line = line.replace("*|", "|")
line = line.replace("|*", "|")
line = line.replace("|", "/")
line = line.replace("(*", "(")
line = line.replace("+*", "+")
line = line.replace("-*", "-")
if line.startswith('*'):
line = line[1:]
if line[-1] == "\n":
line = line[:-1]
if here == "r" and not f_only:
# print(f"pop: {N}, formula: {i+1}", line)
if factorize:
try:
rewards[population_size].append(str(factor(line)))
except TypeError:
print("Error while factorising rewards, used not factorised instead")
rewards[population_size].append(line)
# os.chdir(cwd)
else:
rewards[population_size].append(line)
elif not here == "r" and not rewards_only:
# print(f"pop: {N}, formula: {i+1}", line[:-1])
if factorize:
try:
f[population_size].append(str(factor(line)))
except TypeError:
print(f"Error while factorising polynomial f[{population_size}][{i + 1}], used not factorised instead")
f[population_size].append(line)
# os.chdir(cwd)
else:
f[population_size].append(line)
line_index = line_index + 1
os.chdir(default_directory)
if no_files and agents_quantities:
print("No files match the pattern " + os.path.join(new_dir, path) + " and restriction " + str(agents_quantities))
return (f, rewards) | 5,353,037 |
def run_epoch():
"""Runs one epoch and returns reward averaged over test episodes"""
rewards = []
for _ in range(NUM_EPIS_TRAIN):
run_episode(for_training=True)
for _ in range(NUM_EPIS_TEST):
rewards.append(run_episode(for_training=False))
return np.mean(np.array(rewards)) | 5,353,038 |
def _max_pool(heat, kernel=3):
"""
NCHW
do max pooling operation
"""
# print("heat.shape: ", heat.shape) # default: torch.Size([1, 1, 152, 272])
pad = (kernel - 1) // 2
h_max = nn.functional.max_pool2d(heat, (kernel, kernel), stride=1, padding=pad)
# print("h_max.shape: ", h_max.shape) # default: torch.Size([1, 1, 152, 272])
keep = (h_max == heat).float() # 将boolean类型的Tensor转换成Float类型的Tensor
# print("keep.shape: ", keep.shape, "keep:\n", keep)
return heat * keep | 5,353,039 |
def getBool(string):
"""
Stub function, set PshellServer.py softlink to PshellServer-full.py for full functionality
"""
return (True) | 5,353,040 |
def multi_dists(
continuous,
categorical,
count_cutoff,
summary_type,
ax=None,
stripplot=False,
order="ascending",
newline_counts=False,
xtick_rotation=45,
xtick_ha="right",
seaborn_kwargs={},
stripplot_kwargs={},
):
"""
Compare the distributions of a continuous variable when grouped
by a categorical one.
Parameters
----------
continuous : Series
continuous values to plot
categorical : Series
categorical values (groups) to plot
count_cutoff : boolean
minimum number of samples per groups to include
summary_type : string, "box" or "violin"
type of summary plot to make
ax : MatPlotLib axis
axis to plot in (will create new one if not provided)
stripplot : boolean
whether or not to plot the raw values
order : "ascending", "descending", or list of categories
how to sort categories in the plot
newline_counts : boolean
whether to add category counts as a separate line
in the axis labels
xtick_rotation : float
how much to rotate the xtick labels by (in degree)
xtick_ha : string
horizontal alignment of the xtick labels
seaborn_kwargs : dictionary
additional arguments to pass to Seaborn boxplot/violinplot
stripplot_kwargs : dictionary
additional arguments to pass to Seaborn stripplot (if stripplot=True)
Returns
-------
ax : MatPlotLib axis
axis with plot data
"""
if ax is None:
ax = plt.subplot(111)
# remove NaNs and convert continuous
continuous = pd.Series(continuous).dropna()
categorical = pd.Series(categorical).dropna().astype(str)
# series names
continuous_name = str(continuous.name)
categorical_name = str(categorical.name)
# handle cases where series names are missing or identical
if continuous_name is None:
continuous_name = "continuous"
if categorical_name is None:
categorical_name = "categorical"
if continuous_name == categorical_name:
continuous_name += "_continuous"
categorical_name += "_categorical"
merged = pd.concat([continuous, categorical], axis=1, join="inner")
merged.columns = [continuous_name, categorical_name]
# counts per category, with cutoff
categorical_counts = Counter(merged[categorical_name])
merged["count"] = merged[categorical_name].apply(categorical_counts.get)
merged = merged[merged["count"] >= count_cutoff]
merged_sorted = (
merged.groupby([categorical_name])[continuous_name]
.aggregate(np.median)
.reset_index()
)
# sort categories by mean
if order == "ascending":
merged_sorted = merged_sorted.sort_values(
continuous_name, ascending=True
)
order = merged_sorted[continuous_name]
elif order == "descending":
merged_sorted = merged_sorted.sort_values(
continuous_name, ascending=False
)
order = merged_sorted[continuous_name]
else:
merged_sorted["continuous_idx"] = merged_sorted[
categorical_name
].apply(order.index)
merged_sorted = merged_sorted.sort_values(
"continuous_idx", ascending=True
)
# recompute category counts after applying cutoff
counts = merged_sorted[categorical_name].apply(categorical_counts.get)
counts = counts.astype(str)
# x-axis labels with counts
if newline_counts:
x_labels = merged_sorted[categorical_name] + "\n(" + counts + ")"
else:
x_labels = merged_sorted[categorical_name] + " (" + counts + ")"
if summary_type == "violin":
sns.violinplot(
x=categorical_name,
y=continuous_name,
data=merged,
order=merged_sorted[categorical_name],
inner=None,
ax=ax,
**seaborn_kwargs,
)
elif summary_type == "box":
sns.boxplot(
x=categorical_name,
y=continuous_name,
data=merged,
order=merged_sorted[categorical_name],
notch=True,
ax=ax,
**seaborn_kwargs,
)
if stripplot:
sns.stripplot(
x=categorical_name,
y=continuous_name,
data=merged,
order=merged_sorted[categorical_name],
size=2,
alpha=0.5,
linewidth=1,
jitter=0.1,
edgecolor="black",
ax=ax,
**stripplot_kwargs,
)
ax.spines["right"].set_visible(False)
ax.spines["top"].set_visible(False)
ax.set_xticklabels(x_labels, rotation=xtick_rotation, ha=xtick_ha)
return ax | 5,353,041 |
def expand_xdg(xdg_var: str, path: str) -> PurePath:
"""Return the value of an XDG variable prepended to path.
This function expands an XDG variable, and then concatenates to it the
given path. The XDG variable name can be passed both uppercase or
lowercase, and either with or without the 'XDG_' prefix.
"""
xdg_var = xdg_var if xdg_var.startswith('XDG_') else 'XDG_' + xdg_var
return getattr(xdg, xdg_var.upper()) / path | 5,353,042 |
def follow_index(request):
"""Просмотр подписок"""
users = request.user.follower.all()
paginator = Paginator(users, 3)
page_number = request.GET.get('page')
page = paginator.get_page(page_number)
return render(request, 'recipes/follow_index.html',
{'page': page, 'paginator': paginator}) | 5,353,043 |
def test_sample_multi_sync_1():
"""
Tests the multi-threaded sample submission
:return:
"""
t = ThunderstormAPI(host=THOR_THUNDERSTORM_HOST, port=THOR_THUNDERSTORM_PORT)
status1 = t.get_status()
results = t.scan_multi(SAMPLES_1)
status2 = t.get_status()
assert results
assert len(results) > 0
assert int(status1['scanned_samples']) < int(status2['scanned_samples'])
for r in results:
assert r != {} | 5,353,044 |
def main():
"""
Main function :)
:return:
"""
config = Config()
config.read()
level = logging.INFO
if config.enforce_type(bool, config.DEFAULT.Debug):
level = logging.DEBUG
set_sql_debug(True)
# our logger
logger = setup_logger('mesh', level)
# meshtastic logger
logging.basicConfig(level=level,
format=LOGFORMAT)
#
telegram_connection = TelegramConnection(config.Telegram.Token, logger)
meshtastic_connection = MeshtasticConnection(config.Meshtastic.Device, logger)
meshtastic_connection.connect()
database = MeshtasticDB(config.Meshtastic.DatabaseFile, meshtastic_connection, logger)
#
aprs_streamer = APRSStreamer(config)
call_sign_filter = CallSignFilter(database, config, meshtastic_connection, logger)
aprs_streamer.set_filter(call_sign_filter)
aprs_streamer.set_logger(logger)
#
telegram_bot = TelegramBot(config, meshtastic_connection, telegram_connection)
telegram_filter = TelegramFilter(database, config, meshtastic_connection, logger)
telegram_bot.set_filter(telegram_filter)
telegram_bot.set_logger(logger)
#
meshtastic_bot = MeshtasticBot(database, config, meshtastic_connection, telegram_connection)
meshtastic_filter = MeshtasticFilter(database, config, meshtastic_connection, logger)
meshtastic_bot.set_filter(meshtastic_filter)
meshtastic_bot.set_logger(logger)
meshtastic_bot.subscribe()
#
web_server = WebServer(config, meshtastic_connection, logger)
# non-blocking
aprs_streamer.run()
web_server.run()
telegram_bot.run()
# blocking
while True:
try:
time.sleep(1)
except KeyboardInterrupt:
web_server.shutdown()
logger.info('Exit requested...')
sys.exit(0) | 5,353,045 |
def test_drawcounties_cornbelt():
"""draw counties on the map"""
mp = MapPlot(sector="cornbelt", title="Counties", nocaption=True)
mp.drawcounties()
return mp.fig | 5,353,046 |
def calculate_kde_cli(
ascending=True,
evaluate=False,
input_ts="-",
columns=None,
start_date=None,
end_date=None,
clean=False,
skiprows=None,
index_type="datetime",
source_units=None,
target_units=None,
names=None,
tablefmt="csv",
):
"""Return the kernel density estimation (KDE) curve.
Returns a time-series or the KDE curve depending on the `evaluate`
keyword.
Parameters
----------
ascending : bool
[optional, defaults to True, input filter]
Sort order.
evaluate : bool
[optional, defaults to False, transformation]
Whether or not to return a time-series of KDE density values or
the KDE curve. Defaults to False, which would return the KDE
curve.
{input_ts}
{columns}
{start_date}
{end_date}
{skiprows}
{index_type}
{names}
{source_units}
{target_units}
{clean}
{tablefmt}
"""
tsutils.printiso(
calculate_kde(
ascending=ascending,
evaluate=evaluate,
input_ts=input_ts,
columns=columns,
start_date=start_date,
end_date=end_date,
clean=clean,
skiprows=skiprows,
index_type=index_type,
source_units=source_units,
target_units=target_units,
names=names,
),
tablefmt=tablefmt,
) | 5,353,047 |
def convert_remoteResources_fields(asset):
"""
"remoteResources" : [ {
"@class": ".XRemoteResource",
"dataSource": null,
"keywords": null,
"label": "testresource",
"lastModifiedTimestamp": 1472138644728,
"remoteResourceId": 8446,
"resourceNumber": "1258.1548.58756.098",
"status": "active",
"url": null
}
],
"""
try:
remoteResources = None
if 'remoteResources' in asset:
remoteResources = asset['remoteResources']
if remoteResources is None:
asset['remoteResources'] = []
return
for remote in remoteResources:
# Convert to long
if 'lastModifiedTimestamp' in remote:
if remote['lastModifiedTimestamp']:
try:
tmp = long(remote['lastModifiedTimestamp'])
remote['lastModifiedTimestamp'] = tmp
except:
message = 'Failed to convert remoteResources lastModifiedTimestamp field to long. '
raise Exception(message)
# Convert to int
if 'remoteResourceId' in remote:
if remote['remoteResourceId']:
try:
tmp = int(remote['remoteResourceId'])
remote['remoteResourceId'] = tmp
except:
message = 'Failed to convert remoteResources remoteResourceId field to int. '
raise Exception(message)
return
except Exception as err:
raise Exception(str(err)) | 5,353,048 |
def install(name, dst, capture_error=False):
"""Install the user provided entry point to be executed as follow:
- add the path to sys path
- if the user entry point is a command, gives exec permissions to the script
Args:
name (str): name of the script or module.
dst (str): path to directory with the script or module.
capture_error (bool): Default false. If True, the running process captures the
stderr, and appends it to the returned Exception message in case of errors.
"""
if dst not in sys.path:
sys.path.insert(0, dst)
entrypoint_type = entry_point_type(dst, name)
if entrypoint_type is EntryPointType.PYTHON_PACKAGE:
_modules.install(dst, capture_error)
if entrypoint_type is EntryPointType.COMMAND:
os.chmod(os.path.join(dst, name), 511) | 5,353,049 |
def atomic_degrees(mol: IndigoObject) -> dict:
"""Get the number of atoms direct neighbors (except implicit hydrogens) in a molecule.
Args:
IndigoObject: molecule object
Returns:
dict: key - feature name, value - torch.tensor of atomic degrees
"""
degrees = []
for atom in mol.iterateAtoms():
degrees.append(atom.degree())
return {"degrees": torch.tensor(degrees).unsqueeze(1)} | 5,353,050 |
def hotspots(raster, kernel, x='x', y='y'):
"""Identify statistically significant hot spots and cold spots in an input
raster. To be a statistically significant hot spot, a feature will have a
high value and be surrounded by other features with high values as well.
Neighborhood of a feature defined by the input kernel, which currently
support a shape of circle, annulus, or custom kernel.
The result should be a raster with the following 7 values:
90 for 90% confidence high value cluster
95 for 95% confidence high value cluster
99 for 99% confidence high value cluster
-90 for 90% confidence low value cluster
-95 for 95% confidence low value cluster
-99 for 99% confidence low value cluster
0 for no significance
Parameters
----------
raster: xarray.DataArray
Input raster image with shape=(height, width)
kernel: Kernel
Returns
-------
hotspots: xarray.DataArray
"""
# validate raster
if not isinstance(raster, DataArray):
raise TypeError("`raster` must be instance of DataArray")
if raster.ndim != 2:
raise ValueError("`raster` must be 2D")
if not (issubclass(raster.values.dtype.type, np.integer) or
issubclass(raster.values.dtype.type, np.floating)):
raise ValueError(
"`raster` must be an array of integers or float")
raster_dims = raster.dims
if raster_dims != (y, x):
raise ValueError("raster.coords should be named as coordinates:"
"(%s, %s)".format(y, x))
# apply kernel to raster values
mean_array = convolve_2d(raster.values, kernel / kernel.sum(), pad=True)
# calculate z-scores
global_mean = np.nanmean(raster.values)
global_std = np.nanstd(raster.values)
if global_std == 0:
raise ZeroDivisionError("Standard deviation "
"of the input raster values is 0.")
z_array = (mean_array - global_mean) / global_std
out = _hotspots(z_array)
result = DataArray(out,
coords=raster.coords,
dims=raster.dims,
attrs=raster.attrs)
return result | 5,353,051 |
def create_from_image(input_path, output_path=None,
fitimage="FITDEF",
compress="NORMAL",
zoom=0, # %; 0=100%
size=Point(0, 0), # Point (in mm), int or str; 1,2..10=A3R,A3..B5
align=("CENTER", "CENTER"), # LEFT/CENTER/RIGHT, TOP/CENTER/BOTTOM
maxpapersize="DEFAULT",
):
"""XDW generator from image file.
fitimage 'FITDEF' | 'FIT' | 'FITDEF_DIVIDEBMP' |
'USERDEF' | 'USERDEF_FIT'
compress 'NORMAL' | 'LOSSLESS' | 'NOCOMPRESS' |
'HIGHQUALITY' | 'HIGHCOMPRESS' |
'JPEG' | 'JPEG_TTN2' | 'PACKBITS' | 'G4' |
'MRC_NORMAL' | 'MRC_HIGHQUALITY' | 'MRC_HIGHCOMPRESS'
zoom (float) in percent; 0 means 100%. < 1/1000 is ignored.
size (Point) in mm; for fitimange 'userdef' or 'userdef_fit'
(int) 1=A3R, 2=A3, 3=A4R, 4=A4, 5=A5R, 6=A5,
7=B4R, 8=B4, 9=B5R, 10=B5
align (horiz, vert) where:
horiz 'CENTER' | 'LEFT' | 'RIGHT'
vert 'CENTER' | 'TOP' | 'BOTTOM'
maxpapersize 'DEFAULT' | 'A3' | '2A0'
Returns actual pathname of generated document, which may be different
from `output_path' argument.
"""
input_path = adjust_path(input_path)
root, ext = os.path.splitext(input_path)
output_path = adjust_path(output_path or root, ext=".xdw")
output_path = derivative_path(output_path)
opt = XDW_CREATE_OPTION_EX2()
opt.nFitImage = XDW_CREATE_FITIMAGE.normalize(fitimage)
opt.nCompress = XDW_COMPRESS.normalize(compress)
#opt.nZoom = int(zoom)
opt.nZoomDetail = int(zoom * 1000) # .3f
# NB. Width and height are valid only for XDW_CREATE_USERDEF(_FIT).
if not isinstance(size, Point):
size = XDW_SIZE.normalize(size)
size = XDW_SIZE_MM[size or 3] # default=A4R
size = Point(*size)
opt.nWidth = int(size.x * 100) # .2f
opt.nHeight = int(size.y * 100) # .2f;
opt.nHorPos = XDW_CREATE_HPOS.normalize(align[0])
opt.nVerPos = XDW_CREATE_VPOS.normalize(align[1])
opt.nMaxPaperSize = XDW_CREATE_MAXPAPERSIZE.normalize(maxpapersize)
if XDWVER < 8:
XDW_CreateXdwFromImageFile(cp(input_path), cp(output_path), opt)
else:
XDW_CreateXdwFromImageFileW(input_path, output_path, opt)
return output_path | 5,353,052 |
def _read_config(filename):
"""Reads configuration file.
Returns DysonLinkCredentials or None on error.
"""
config = configparser.ConfigParser()
logging.info('Reading "%s"', filename)
try:
config.read(filename)
except configparser.Error as ex:
logging.critical('Could not read "%s": %s', filename, ex)
return None
try:
username = config['Dyson Link']['username']
password = config['Dyson Link']['password']
country = config['Dyson Link']['country']
return DysonLinkCredentials(username, password, country)
except KeyError as ex:
logging.critical('Required key missing in "%s": %s', filename, ex)
return None | 5,353,053 |
def view_share_link(request, token):
"""
Translate a given sharelink to a proposal-detailpage.
:param request:
:param token: sharelink token, which includes the pk of the proposal
:return: proposal detail render
"""
try:
pk = signing.loads(token, max_age=settings.MAXAGESHARELINK)
except signing.SignatureExpired:
return render(request, "base.html", {
"Message": "Share link has expired!"
})
except signing.BadSignature:
return render(request, "base.html", {
"Message": "Invalid token in share link!"
})
obj = get_object_or_404(Proposal, pk=pk)
return render(request, "proposals/detail_project.html", {
"proposal": obj,
"project": obj
}) | 5,353,054 |
def create(pdef):
"""Scikit-learn Pipelines objects creation (deprecated).
This function creates a list of sklearn Pipeline objects starting from the
list of list of tuples given in input that could be created using the
adenine.core.define_pipeline module.
Parameters
-----------
pdef : list of list of tuples
This arguments contains the specification needed by sklearn in order
to create a working Pipeline object.
Returns
-----------
pipes : list of sklearn.pipeline.Pipeline objects
The list of Piplines, each of them can be fitted and trasformed
with some data.
"""
from sklearn.pipeline import Pipeline
return [Pipeline(p) for p in pdef] | 5,353,055 |
def pressure_differentiable(altitude):
"""
Computes the pressure at a given altitude with a differentiable model.
Args:
altitude: Geopotential altitude [m]
Returns: Pressure [Pa]
"""
return np.exp(interpolated_log_pressure(altitude)) | 5,353,056 |
def write_pmd_field(h5, data, name=None):
"""
Data is a dict with:
attrs: flat dict of attributes.
components: flat dict of components
See inverse routine:
.readers.load_field_data
"""
if name:
g = h5.create_group(name)
else:
g = h5
# Validate attrs
attrs, other = load_field_attrs(data['attrs'])
# Encode and write required and optional
attrs = encode_attrs(attrs)
for k, v in attrs.items():
g.attrs[k] = v
# All other attributes (don't change them)
for k, v in other.items():
g.attrs[k] = v
# write components (datasets)
for key, val in data['components'].items():
# Units
u = pg_units(key)
# Ensure complex
val = val.astype(np.complex)
# Write
g2 = write_component_data(g, key, val, unit=u) | 5,353,057 |
def create_optimizer(hparams, global_step, use_tpu=False):
"""Creates a TensorFlow Optimizer.
Args:
hparams: ConfigDict containing the optimizer configuration.
global_step: The global step Tensor.
use_tpu: If True, the returned optimizer is wrapped in a
CrossShardOptimizer.
Returns:
A TensorFlow optimizer.
Raises:
ValueError: If hparams.optimizer is unrecognized.
"""
optimizer_name = hparams.optimizer.lower()
optimizer_params = {}
if optimizer_name == "momentum":
optimizer_class = tf.train.MomentumOptimizer
optimizer_params["momentum"] = hparams.get("momentum", 0.9)
optimizer_params["use_nesterov"] = hparams.get("use_nesterov", False)
elif optimizer_name == "sgd":
optimizer_class = tf.train.GradientDescentOptimizer
elif optimizer_name == "adagrad":
optimizer_class = tf.train.AdagradOptimizer
elif optimizer_name == "adam":
optimizer_class = tf.train.AdamOptimizer
elif optimizer_name == "rmsprop":
optimizer_class = tf.RMSPropOptimizer
else:
raise ValueError("Unknown optimizer: {}".format(hparams.optimizer))
# Apply weight decay wrapper.
optimizer_class = (
tf.contrib.opt.extend_with_decoupled_weight_decay(optimizer_class))
# Create optimizer.
learning_rate, weight_decay = create_learning_rate_and_weight_decay(
hparams, global_step)
optimizer = optimizer_class(
weight_decay=weight_decay,
learning_rate=learning_rate,
**optimizer_params)
if use_tpu:
optimizer = tf.contrib.tpu.CrossShardOptimizer(optimizer)
return optimizer | 5,353,058 |
def compare_dirs_ignore_words(dir1, dir2, ignore_words, ignore_files=None):
"""Same as compare_dirs but ignores lines with words in ignore_words.
"""
return compare_dirs(
dir1,
dir2,
ignore=ignore_files,
function=lambda file1, file2:
compare_text_files_ignore_lines(file1, file2, ignore_words)
) | 5,353,059 |
def escalon(num, den):
"""
Función escalón, para generar la respuesta escalón en base a una
función de transferencia.
Ejemplo:
escalon(num, den)
num = valores en formato de lista, que contiene lo valores del
númerador de la fución de transferencia.
den = valores en formato de lista, que contiene los valores del
denominador de la función de transferencia.
"""
# Se importan los modulos necesarios.
import pylab
from scipy import signal
import matplotlib.pyplot as plt
# Se determina el tamaño de la gráfica
pylab.rcParams['figure.figsize'] = (9, 6.5)
# Se declara la función de transferencia, se genera el tiempo (t), y
# la respuesta escalon y(t).
sistema = signal.TransferFunction(num, den)
t, y = signal.step(sistema)
# Se declara la gráfica la respuesta escalón .
plt.plot(t, y, 'r')
plt.title('Respuesta escalón')
plt.xlabel('Tiempo $(s)$')
plt.ylabel('Amplitud')
plt.grid()
# Se imprime en pantalla la gráfica de la respuesta escalón.
plt.show() | 5,353,060 |
def test_env_dirs_correct():
"""Test that the .envrc values are correct for each directory environment variable"""
assert dict_envrc_dir == dict(zip(ENV_DIRS_EXPECTED, DIRS_EXPECTED)) | 5,353,061 |
def toggle_ascii_filter():
""" Toggle ASCII filter (Y) """
global ascii_filter
if ascii_filter:
ascii_filter = False
else:
ascii_filter = True
display_textual_content() | 5,353,062 |
def test_robot_depends_on(
robot_with_mount_and_modules_services: Dict[str, Any]
) -> None:
"""Confirm that modules depend on emulator proxy."""
assert robot_with_mount_and_modules_services[OT2_ID].depends_on is None | 5,353,063 |
def is_zero(actual: Union[int, float]):
"""
Checks if an object is equal to zero.
:param actual: object to evaluate
:return: None
:raise AssertionError: if object is not equal to zero
"""
_check_argument_is_number(actual, 'is_zero')
if actual != 0:
raise AssertionError(f"'{short(actual)}' <{type(actual).__name__}> is not equal to zero!") | 5,353,064 |
def float_to_wazn(value):
"""Converts a float value to an integer in the WAZN notation.
The float format has a maxium of 6 decimal digits.
:param value: value to convert from float to WAZN notation
:returns: converted value in WAZN notation
"""
return int(Decimal(value) / MICRO_WAZN) | 5,353,065 |
def timer(func):
""" Decorator to measure execution time """
import time
def wrapper(*args, **kwargs):
start_time = time.time()
ret = func(*args, **kwargs)
elapsed = time.time() - start_time
print('{:s}: {:4f} sec'.format(func.__name__, elapsed))
return ret
return wrapper | 5,353,066 |
def test_wf_3nd_st_3(plugin):
""" workflow with three tasks, third one connected to two previous tasks,
splitter and partial combiner (from the second task) on the workflow level
"""
wf = Workflow(name="wf_st_9", input_spec=["x", "y"])
wf.add(add2(name="add2x", x=wf.lzin.x))
wf.add(add2(name="add2y", x=wf.lzin.y))
wf.add(multiply(name="mult", x=wf.add2x.lzout.out, y=wf.add2y.lzout.out))
wf.split(["x", "y"], x=[1, 2, 3], y=[11, 12]).combine("y")
wf.set_output([("out", wf.mult.lzout.out)])
with Submitter(plugin=plugin) as sub:
sub(wf)
results = wf.result()
assert len(results) == 3
assert results[0][0].output.out == 39
assert results[0][1].output.out == 42
assert results[1][0].output.out == 52
assert results[1][1].output.out == 56
assert results[2][0].output.out == 65
assert results[2][1].output.out == 70
# checking all directories
assert wf.output_dir
for odir in wf.output_dir:
assert odir.exists() | 5,353,067 |
def validate_assessments(url):
"""
Validate tests inside of the file referenced by the URL argument.
:param url: URL to the tests file
"""
valid = True
for test in tests_loader.load_all_tests(url):
assessment = assessment_loader.load_assessment_from_urls(test.questions_url, test.answers_url, test.grades_url)
if assessment.validate_grades():
print('Test valid')
else:
valid = False
print('Test not valid')
print('All tests OK') if valid else print('Tests failed') | 5,353,068 |
def _create_keyword_plan_campaign(client, customer_id, keyword_plan):
"""Adds a keyword plan campaign to the given keyword plan.
Args:
client: An initialized instance of GoogleAdsClient
customer_id: A str of the customer_id to use in requests.
keyword_plan: A str of the keyword plan resource_name this keyword plan
campaign should be attributed to.create_keyword_plan.
Returns:
A str of the resource_name for the newly created keyword plan campaign.
Raises:
GoogleAdsException: If an error is returned from the API.
"""
keyword_plan_campaign_service = client.get_service(
"KeywordPlanCampaignService"
)
operation = client.get_type("KeywordPlanCampaignOperation")
keyword_plan_campaign = operation.create
keyword_plan_campaign.name = f"Keyword plan campaign {uuid.uuid4()}"
keyword_plan_campaign.cpc_bid_micros = 1000000
keyword_plan_campaign.keyword_plan = keyword_plan
network = client.enums.KeywordPlanNetworkEnum.GOOGLE_SEARCH
keyword_plan_campaign.keyword_plan_network = network
geo_target = client.get_type("KeywordPlanGeoTarget")
# Constant for U.S. Other geo target constants can be referenced here:
# https://developers.google.com/google-ads/api/reference/data/geotargets
geo_target.geo_target_constant = "geoTargetConstants/2840"
keyword_plan_campaign.geo_targets.append(geo_target)
# Constant for English
language = "languageConstants/1000"
keyword_plan_campaign.language_constants.append(language)
response = keyword_plan_campaign_service.mutate_keyword_plan_campaigns(
customer_id=customer_id, operations=[operation]
)
resource_name = response.results[0].resource_name
print(f"Created keyword plan campaign with resource name: {resource_name}")
return resource_name | 5,353,069 |
def word2vec(sentences, year):
"""
Creates a word2vec model.
@param sentences: list of list of words in each sentence (title + abstract)
@return word2vec model
"""
print("Creating word2vec model")
model = Word2Vec(sentences, size=500, window=5, min_count=1, workers=4)
model.save(f"models/decades/word2vec_{str(year)}-{str(year+9)}.model")
print("Saved word2vec model")
return model | 5,353,070 |
def true_rjust(string, width, fillchar=' '):
""" Justify the string to the right, using printable length as the width. """
return fillchar * (width - true_len(string)) + string | 5,353,071 |
def test_execute_list_collection_all(mocker, capsys, mock_collection_objects, tmp_path_factory):
"""Test listing all collections from multiple paths"""
cliargs()
mocker.patch('os.path.exists', return_value=True)
mocker.patch('os.path.isdir', return_value=True)
gc = GalaxyCLI(['ansible-galaxy', 'collection', 'list'])
tmp_path = tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Collections')
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(tmp_path, validate_certs=False)
gc.execute_list_collection(artifacts_manager=concrete_artifact_cm)
out, err = capsys.readouterr()
out_lines = out.splitlines()
assert len(out_lines) == 12
assert out_lines[0] == ''
assert out_lines[1] == '# /root/.ansible/collections/ansible_collections'
assert out_lines[2] == 'Collection Version'
assert out_lines[3] == '----------------- -------'
assert out_lines[4] == 'sandwiches.pbj 1.5.0 '
assert out_lines[5] == 'sandwiches.reuben 2.5.0 '
assert out_lines[6] == ''
assert out_lines[7] == '# /usr/share/ansible/collections/ansible_collections'
assert out_lines[8] == 'Collection Version'
assert out_lines[9] == '-------------- -------'
assert out_lines[10] == 'sandwiches.ham 1.0.0 '
assert out_lines[11] == 'sandwiches.pbj 1.0.0 ' | 5,353,072 |
def threshold_generator_with_values(values, duration, num_classes):
"""
Args:
values: A Tensor with shape (-1,)
Values = strictly positive, float thresholds.
duration: An int.
num_classes: An int.
Returns:
thresh: A Tensor with shape
(len(list_values), duration, num_classes, num_classes).
In each matrix,
diag = 0, and off-diag shares a single value > 0.
Matrices are sorted in ascending order of the values
w.r.t. axis=0.
"""
num_thresh = values.shape[0]
thresh = tf.reshape(values, [num_thresh, 1, 1, 1])
thresh = tf.tile(thresh, [1, duration, num_classes, num_classes])
# (num thresh, num cls, num cls)
mask = tf.linalg.tensor_diag([-1.] * num_classes) + 1
thresh *= mask
# Now diag = 0.
thresh += mask * 1e-11
# Avoids 0 threholds, which may occur
# when logits for different classes have the same value,
# e.g., 0, due to loss of significance.
# This operation may cause sparsity of SAT curve
# if llr_min is << 1e-11, but such a case is ignorable
# in practice, according to my own experience.
return thresh | 5,353,073 |
def is_hermitian(mx, tol=1e-9):
"""
Test whether mx is a hermitian matrix.
Parameters
----------
mx : numpy array
Matrix to test.
tol : float, optional
Tolerance on absolute magitude of elements.
Returns
-------
bool
True if mx is hermitian, otherwise False.
"""
(m, n) = mx.shape
for i in range(m):
if abs(mx[i, i].imag) > tol: return False
for j in range(i + 1, n):
if abs(mx[i, j] - mx[j, i].conjugate()) > tol: return False
return True | 5,353,074 |
def argmax(a, b, axis=1, init_value=-1, name="argmax"):
""" sort in axis with ascending order """
assert axis<len(a.shape) and len(a.shape)<=2, "invalid axis"
assert b.shape[axis] == 2, "shape mismatch"
size = a.shape[axis] # save max arg index
def argmax2d(A, B):
init = hcl.compute((2,), lambda x: init_value)
r = hcl.reduce_axis(0, size, name="rdx")
# Y as reducer tensor
def sreduce(x, Y):
with hcl.if_(x > Y[1]):
Y[0] = r
Y[1] = x
my_argmax = hcl.reducer(init, sreduce)
if axis == 1:
return hcl.update(B,
lambda x, _y: my_argmax(A[x, r], axis=r), name=name)
else: # reduce in y axis
return hcl.update(B,
lambda _x, y: my_argmax(A[r, y], axis=r), name=name)
# return decorated function
mod = hcl.def_([a.shape, b.shape], name=name)(argmax2d)
mod(a, b) | 5,353,075 |
def morphology(src, operation="open", kernel_shape=(3, 3), kernel_type="ones"):
"""Dynamic calls different morphological operations
("open", "close", "dilate" and "erode") with the given parameters
Arguments:
src (numpy.ndarray) : source image of shape (rows, cols)
operation (str, optional) : name of a morphological operation:
``("open", "close", "dilate", "erode")``
Defaults to ``"open"``.
kernel_shape (tuple, optional) : shape of the kernel (rows, cols).
Defaults to (3,3).
kernel_type (str, optional) : type of kernel.
``("ones", "upper_triangle", "lower_triangle", "x", "plus", "ellipse")``
Defaults to ``"ones"``.
Returns:
numpy.ndarray: a copy of the source image after apply the effect.
"""
kernel = create_2D_kernel(kernel_shape, kernel_type)
if operation == "open":
return open(src, kernel)
elif operation == "close":
return close(src, kernel)
elif operation == "dilate":
return dilate(src, kernel)
elif operation == "erode":
return erode(src, kernel)
else:
valid_operations = ["open", "close", "dilate", "erode"]
raise ValueError(
f"Invalid morphology operation '{operation}'. Valid morphological operations are {valid_operations}"
) | 5,353,076 |
def msd(traj, mpp, fps, max_lagtime=100, detail=False, pos_columns=None):
"""Compute the mean displacement and mean squared displacement of one
trajectory over a range of time intervals.
Parameters
----------
traj : DataFrame with one trajectory, including columns frame, x, and y
mpp : microns per pixel
fps : frames per second
max_lagtime : intervals of frames out to which MSD is computed
Default: 100
detail : See below. Default False.
Returns
-------
DataFrame([<x>, <y>, <x^2>, <y^2>, msd], index=t)
If detail is True, the DataFrame also contains a column N,
the estimated number of statistically independent measurements
that comprise the result at each lagtime.
Notes
-----
Input units are pixels and frames. Output units are microns and seconds.
See also
--------
imsd
emsd
"""
if traj['frame'].max() - traj['frame'].min() + 1 == len(traj):
# no gaps: use fourier-transform algorithm
return _msd_fft(traj, mpp, fps, max_lagtime, detail, pos_columns)
else:
# there are gaps in the trajectory: use slower algorithm
return _msd_gaps(traj, mpp, fps, max_lagtime, detail, pos_columns) | 5,353,077 |
def build_rnd_graph(golden, rel, seed=None):
"""Build a random graph for testing."""
def add_word(word):
if word not in words:
words.add(word)
def add_edge(rel, word1, word2):
data.append((rel, word1, word2))
random.seed(seed)
m, _ = golden.shape
words = set()
for i in range(m):
if golden['relation'][i] != rel:
continue
add_word(golden['word1_id'][i])
add_word(golden['word2_id'][i])
data = []
for word1 in words:
for word2 in words:
if word1 >= word2:
continue
if random.randint(0, 1):
add_edge(rel, word1, word2)
add_edge(rel, word2, word1)
df = pd.DataFrame(data, columns=('relation', 'word1_id', 'word2_id'),
index=range(len(data)))
return df | 5,353,078 |
def iree_build_test(name, targets):
"""Dummy rule to ensure that targets build.
This is currently undefined in bazel and is preserved for compatibility.
"""
pass | 5,353,079 |
def main() -> None:
"""Main program.
"""
utils_io.find_or_create_dir(DATA_FOLDER)
utils_io.find_or_create_dir(SPECTROGRAMS_FOLDER)
utils_io.find_or_create_dir(PLOTS_FOLDER)
preprocess_data(method='download')
classify_bands_different_genres()
classify_bands_same_genre()
classify_genres() | 5,353,080 |
def _make_parser_func(sep):
"""
Create a parser function from the given sep.
Parameters
----------
sep: str
The separator default to use for the parser.
Returns
-------
A function object.
"""
def parser_func(
filepath_or_buffer: Union[str, pathlib.Path, IO[AnyStr]],
sep=sep,
delimiter=None,
header="infer",
names=None,
index_col=None,
usecols=None,
squeeze=False,
prefix=None,
mangle_dupe_cols=True,
dtype=None,
engine=None,
converters=None,
true_values=None,
false_values=None,
skipinitialspace=False,
skiprows=None,
nrows=None,
na_values=None,
keep_default_na=True,
na_filter=True,
verbose=False,
skip_blank_lines=True,
parse_dates=False,
infer_datetime_format=False,
keep_date_col=False,
date_parser=None,
dayfirst=False,
cache_dates=True,
iterator=False,
chunksize=None,
compression="infer",
thousands=None,
decimal: str = ".",
lineterminator=None,
quotechar='"',
quoting=0,
escapechar=None,
comment=None,
encoding=None,
dialect=None,
error_bad_lines=True,
warn_bad_lines=True,
skipfooter=0,
doublequote=True,
delim_whitespace=False,
low_memory=True,
memory_map=False,
float_precision=None,
):
# ISSUE #2408: parse parameter shared with pandas read_csv and read_table and update with provided args
_pd_read_csv_signature = {
val.name for val in inspect.signature(pandas.read_csv).parameters.values()
}
_, _, _, f_locals = inspect.getargvalues(inspect.currentframe())
if f_locals.get("sep", sep) is False:
f_locals["sep"] = "\t"
kwargs = {k: v for k, v in f_locals.items() if k in _pd_read_csv_signature}
return _read(**kwargs)
return parser_func | 5,353,081 |
def load_and_join(LC_DIR):
"""
load and join quarters together.
Takes a list of fits file names for a given star.
Returns the concatenated arrays of time, flux and flux_err
"""
fnames = sorted(glob.glob(os.path.join(LC_DIR, "*fits")))
hdulist = fits.open(fnames[0])
t = hdulist[1].data
time = t["TIME"]
flux = t["PDCSAP_FLUX"]
flux_err = t["PDCSAP_FLUX_ERR"]
q = t["SAP_QUALITY"]
m = np.isfinite(time) * np.isfinite(flux) * np.isfinite(flux_err) * \
(q == 0)
x = time[m]
med = np.median(flux[m])
y = flux[m]/med - 1
yerr = flux_err[m]/med
for fname in fnames[1:]:
hdulist = fits.open(fname)
t = hdulist[1].data
time = t["TIME"]
flux = t["PDCSAP_FLUX"]
flux_err = t["PDCSAP_FLUX_ERR"]
q = t["SAP_QUALITY"]
m = np.isfinite(time) * np.isfinite(flux) * np.isfinite(flux_err) * \
(q == 0)
x = np.concatenate((x, time[m]))
med = np.median(flux[m])
y = np.concatenate((y, flux[m]/med - 1))
yerr = np.concatenate((yerr, flux_err[m]/med))
return x, y, yerr | 5,353,082 |
def dcm_to_pil_image_gray(file_path):
"""Read a DICOM file and return it as a gray scale PIL image"""
ds = dcmread(file_path)
# Get the image after apply clahe
img_filtered = Image.fromarray(apply_clahe(ds.pixel_array).astype("uint8"))
# Normalize original image to the interval [0, 255]
img = cv.normalize(ds.pixel_array, None, alpha=0, beta=255, norm_type=cv.NORM_MINMAX, dtype=cv.CV_8U)
img = Image.fromarray(img.astype("uint8"))
return [img, img_filtered] | 5,353,083 |
def get_object_unique_name(obj: Any) -> str:
"""Return a unique string associated with the given object.
That string is constructed as follows: <object class name>_<object_hex_id>
"""
return f"{type(obj).__name__}_{hex(id(obj))}" | 5,353,084 |
def create_voting_dict():
"""
Input: a list of strings. Each string represents the voting record of a senator.
The string consists of
- the senator's last name,
- a letter indicating the senator's party,
- a couple of letters indicating the senator's home state, and
- a sequence of numbers (0's, 1's, and negative 1's) indicating the senator's
votes on bills
all separated by spaces.
Output: A dictionary that maps the last name of a senator
to a list of numbers representing the senator's voting record.
Example:
>>> vd = create_voting_dict(['Kennedy D MA -1 -1 1 1', 'Snowe R ME 1 1 1 1'])
>>> vd == {'Snowe': [1, 1, 1, 1], 'Kennedy': [-1, -1, 1, 1]}
True
You can use the .split() method to split each string in the
strlist into a list; the first element of the list will be the senator's
name, the second will be his/her party affiliation (R or D), the
third will be his/her home state, and the remaining elements of
the list will be that senator's voting record on a collection of bills.
You can use the built-in procedure int() to convert a string
representation of an integer (e.g. '1') to the actual integer
(e.g. 1).
The lists for each senator should preserve the order listed in voting data.
In case you're feeling clever, this can be done in one line.
"""
voting_dic = {}
for s in voting_data:
s = s.strip()
items = s.split(' ')
voting_dic[items[0]] = [int(v) for v in items[3:]]
return voting_dic | 5,353,085 |
def areFriends(profile1, profile2):
"""Checks wether profile1 is connected to profile2 and profile2 is connected to profile1"""
def check(p1, p2):
if p1.isServiceIdentity:
fsic = get_friend_serviceidentity_connection(p2.user, p1.user)
return fsic is not None and not fsic.deleted
else:
friend_map = get_friends_map(p1.user)
return friend_map is not None and remove_slash_default(p2.user) in friend_map.friends
return check(profile1, profile2) and check(profile2, profile1) | 5,353,086 |
def filter_dict(regex_dict, request_keys):
"""
filter regular expression dictionary by request_keys
:param regex_dict: a dictionary of regular expressions that
follows the following format:
{
"name": "sigma_aldrich",
"regexes": {
"manufacturer": {
"regex": "[C|c]ompany(?P\u003cdata\u003e.{80})",
"flags": "is"
},
"product_name": {
"regex": "\\s[P|p]roduct\\s(?P\u003cdata\u003e.{80})",
"flags": "is"
},
...
}
returns
{
'sigma_aldrich': {
"manufacturer": {
"regex": "[C|c]ompany(?P\u003cdata\u003e.{80})",
"flags": "is"
},
}
:param request_keys: a list of dictionary keys that correspond to valid
regex lookups i.e. ['manufacturer', 'product_name']
"""
out_dict = dict()
nested_regexes = regex_dict['regexes']
for request_key in request_keys:
if request_key in nested_regexes:
out_dict[request_key] = nested_regexes[request_key]
return {'name': regex_dict['name'], 'regexes': out_dict} | 5,353,087 |
def calculate_partition_movement(prev_assignment, curr_assignment):
"""Calculate the partition movements from initial to current assignment.
Algorithm:
For each partition in initial assignment
# If replica set different in current assignment:
# Get Difference in sets
:rtype: tuple
dict((partition, (from_broker_set, to_broker_set)), total_movements
"""
total_movements = 0
movements = {}
for prev_partition, prev_replicas in six.iteritems(prev_assignment):
curr_replicas = curr_assignment[prev_partition]
diff = len(set(curr_replicas) - set(prev_replicas))
if diff:
total_movements += diff
movements[prev_partition] = (
(set(prev_replicas) - set(curr_replicas)),
(set(curr_replicas) - set(prev_replicas)),
)
return movements, total_movements | 5,353,088 |
def benchmark(part, methods, p=0.9, n_shots=1000, randseed=None):
"""
Benchmark different count-correction methods with qubits partitioned into groups.
part: list of positive integers, representing partition of qubits
p: probability of any bit having its expected value when generating fake calibration results
"""
print(
f"Benchmarking with partition {part}, simulation fidelity {p}, {n_shots} shots, random seed = {randseed}"
)
seed(randseed)
n_qbs = sum(part)
subs = []
i = 0
for x in part:
subs.append([Node("x", j) for j in range(i, i + x)])
i += x
spam = SpamCorrecter(subs)
spam.calibration_circuits()
prepared_states = [si[0] for si in spam.state_infos]
calib_results = fake_calib_results(part, prepared_states, p, n_shots)
spam.calculate_matrices(calib_results)
my_result = fake_counts(n_qbs, n_shots)
res_map = [{Node("x", i): Bit(i) for i in range(n_qbs)}]
for method in methods:
print(f"Method '{method}'...")
t0 = perf_counter()
spam.correct_counts(my_result, res_map, method=method)
t1 = perf_counter()
print(f"Time: {t1-t0} s")
print()
print() | 5,353,089 |
def correct_crop_centers(
centers: List[Union[int, torch.Tensor]],
spatial_size: Union[Sequence[int], int],
label_spatial_shape: Sequence[int],
) -> List[int]:
"""
Utility to correct the crop center if the crop size is bigger than the image size.
Args:
ceters: pre-computed crop centers, will correct based on the valid region.
spatial_size: spatial size of the ROIs to be sampled.
label_spatial_shape: spatial shape of the original label data to compare with ROI.
"""
spatial_size = fall_back_tuple(spatial_size, default=label_spatial_shape)
if not (np.subtract(label_spatial_shape, spatial_size) >= 0).all():
raise ValueError("The size of the proposed random crop ROI is larger than the image size.")
# Select subregion to assure valid roi
valid_start = np.floor_divide(spatial_size, 2)
# add 1 for random
valid_end = np.subtract(label_spatial_shape + np.array(1), spatial_size / np.array(2)).astype(np.uint16)
# int generation to have full range on upper side, but subtract unfloored size/2 to prevent rounded range
# from being too high
for i, valid_s in enumerate(valid_start):
# need this because np.random.randint does not work with same start and end
if valid_s == valid_end[i]:
valid_end[i] += 1
for i, c in enumerate(centers):
center_i = c
if c < valid_start[i]:
center_i = valid_start[i]
if c >= valid_end[i]:
center_i = valid_end[i] - 1
centers[i] = center_i
corrected_centers: List[int] = [c.item() if isinstance(c, torch.Tensor) else c for c in centers] # type: ignore
return corrected_centers | 5,353,090 |
def contrast_normalize(data, centered=False):
"""Normalizes image data to have variance of 1
Parameters
----------
data : array-like
data to be normalized
centered : boolean
When False (the default), centers the data first
Returns
-------
data : array-like
normalized data
"""
if not centered:
data = center(data)
data = np.divide(data, np.sqrt(np.var(data)))
return data | 5,353,091 |
def create_gdrive_folders(website_short_id: str) -> bool:
"""Create gdrive folder for website if it doesn't already exist"""
folder_created = False
service = get_drive_service()
base_query = "mimeType = 'application/vnd.google-apps.folder' and not trashed and "
query = f"{base_query}name = '{website_short_id}'"
fields = "nextPageToken, files(id, name, parents)"
folders = list(query_files(query=query, fields=fields))
if settings.DRIVE_UPLOADS_PARENT_FOLDER_ID:
filtered_folders = []
for folder in folders:
ancestors = get_parent_tree(folder["parents"])
if settings.DRIVE_UPLOADS_PARENT_FOLDER_ID in [
ancestor["id"] for ancestor in ancestors
]:
filtered_folders.append(folder)
else:
filtered_folders = folders
if len(filtered_folders) == 0:
folder_metadata = {
"name": website_short_id,
"mimeType": DRIVE_MIMETYPE_FOLDER,
}
if settings.DRIVE_UPLOADS_PARENT_FOLDER_ID:
folder_metadata["parents"] = [settings.DRIVE_UPLOADS_PARENT_FOLDER_ID]
else:
folder_metadata["parents"] = [settings.DRIVE_SHARED_ID]
folder = (
service.files()
.create(supportsAllDrives=True, body=folder_metadata, fields="id")
.execute()
)
folder_created = True
else:
folder = filtered_folders[0]
Website.objects.filter(short_id=website_short_id).update(gdrive_folder=folder["id"])
for subfolder in [
DRIVE_FOLDER_FILES,
DRIVE_FOLDER_FILES_FINAL,
DRIVE_FOLDER_VIDEOS_FINAL,
]:
query = f"{base_query}name = '{subfolder}' and parents = '{folder['id']}'"
folders = list(query_files(query=query, fields=fields))
if len(folders) == 0:
folder_metadata = {
"name": subfolder,
"mimeType": DRIVE_MIMETYPE_FOLDER,
"parents": [folder["id"]],
}
service.files().create(
supportsAllDrives=True, body=folder_metadata, fields="id"
).execute()
folder_created = True
return folder_created | 5,353,092 |
def filter_for_recognized_pumas(df):
"""Written for income restricted indicator but can be used for many other
indicators that have rows by puma but include some non-PUMA rows. Sometimes
we set nrows in read csv/excel but this approach is more flexible"""
return df[df["puma"].isin(get_all_NYC_PUMAs())] | 5,353,093 |
def hotspots2006(path):
"""Hawaian island chain hotspot Argon-Argon ages
Ar-Ar Ages (millions of years) and distances (km) from Kilauea along the
trend of the chain of Hawaian volcanic islands and other seamounts that
are believed to have been created by a moving "hot spot".
A data frame with 10 observations on the following 6 variables.
`age`
Ar-Ar age
`CI95lim`
Measurement error; 95% CI
`geoErr`
Geological Uncertainty
`totplus`
Total uncertainty (+)
`totminus`
Total uncertainty (-)
`distance`
Distance in kilometers
Warren D. Sharp and David A. Clague, 50-Ma initiation of
Hawaiian-Emperor bend records major change in Pacific Plate motion.
Science 313: 1281-1284 (2006).
Args:
path: str.
Path to directory which either stores file or otherwise file will
be downloaded and extracted there.
Filename is `hotspots2006.csv`.
Returns:
Tuple of np.ndarray `x_train` with 10 rows and 6 columns and
dictionary `metadata` of column headers (feature names).
"""
import pandas as pd
path = os.path.expanduser(path)
filename = 'hotspots2006.csv'
if not os.path.exists(os.path.join(path, filename)):
url = 'http://dustintran.com/data/r/DAAG/hotspots2006.csv'
maybe_download_and_extract(path, url,
save_file_name='hotspots2006.csv',
resume=False)
data = pd.read_csv(os.path.join(path, filename), index_col=0,
parse_dates=True)
x_train = data.values
metadata = {'columns': data.columns}
return x_train, metadata | 5,353,094 |
def school_booking_cancel(request, pk_booking):
"""Render the school booking cancel page for a school representative.
:param request: httprequest received
:type request: HttpRequest
:param pk_booking: Primary Key of a Booking
:type pk_booking: int
:return: Return a HttpResponse whose content is filled with the result of the passed arguments.
:rtype: HttpResponse
"""
booking = Booking.objects.get(id=pk_booking)
if request.method == "POST":
booking.status = "Cancelled"
booking.reason_cancellation = request.POST["reason_cancellation"]
booking.save()
admin_email = ADMIN_EMAIL
send_email_booking_cancellation(admin_email, booking)
return redirect("school-dashboard")
data = {"booking": booking}
return render(request, "schoolApp/school-booking-cancel.html", data) | 5,353,095 |
def draw_beam_figure():
"""Draw a simple astigmatic beam ellipse with labels."""
theta = np.radians(30)
xc = 0
yc = 0
dx = 50
dy = 25
plt.subplots(1, 1, figsize=(6, 6))
# If the aspect ratio is not `equal` then the major and minor radii
# do not appear to be orthogonal to each other!
plt.axes().set_aspect('equal')
xp, yp = ellipse_arrays(xc, yc, dx, dy, theta)
plt.plot(xp, yp, 'k', lw=2)
xp, yp = rotated_rect_arrays(xc, yc, dx, dy, theta)
plt.plot(xp, yp, ':b', lw=2)
sint = np.sin(theta) / 2
cost = np.cos(theta) / 2
plt.plot([xc - dx * cost, xc + dx * cost], [yc + dx * sint, yc - dx * sint], ':b')
plt.plot([xc + dy * sint, xc - dy * sint], [yc + dy * cost, yc - dy * cost], ':r')
# draw axes
plt.annotate("x'", xy=(-25, 0), xytext=(25, 0),
arrowprops=dict(arrowstyle="<-"), va='center', fontsize=16)
plt.annotate("y'", xy=(0, 25), xytext=(0, -25),
arrowprops=dict(arrowstyle="<-"), ha='center', fontsize=16)
plt.annotate(r'$\phi$', xy=(13, -2.5), fontsize=16)
plt.annotate('', xy=(15.5, 0), xytext=(
14, -8.0), arrowprops=dict(arrowstyle="<-", connectionstyle="arc3, rad=-0.2"))
plt.annotate(r'$d_x$', xy=(-17, 7), color='blue', fontsize=16)
plt.annotate(r'$d_y$', xy=(-4, -8), color='red', fontsize=16)
plt.xlim(-30, 30)
plt.ylim(30, -30) # inverted to match image coordinates!
plt.axis('off') | 5,353,096 |
def bandpass_filter(df, spiky_var):
"""Detect outliers according to a passband filter specific to each variable.
Parameters
----------
df: pandas DataFrame that contains the spiky variable
spiky_var: string that designate the spiky variable
Returns
-------
id_outlier: index of outliers"""
if spiky_var == 'LE':
id_bandpass = ( df[spiky_var] < -35 ) | ( df[spiky_var] > 300 ) # in [W+1m-2]
elif spiky_var == 'H':
id_bandpass = ( df[spiky_var] < -100 ) | ( df[spiky_var] > 400 ) # in [W+1m-2]
elif spiky_var == 'CO2_flux':
id_bandpass = ( df[spiky_var] < -10 ) | ( df[spiky_var] > 20 ) # in [µmol+1s-1m-2]
elif spiky_var == 'CH4_flux':
id_bandpass = ( df[spiky_var] < -0.1 ) | ( df[spiky_var] > 0.25 ) # in [µmol+1s-1m-2]
return id_bandpass | 5,353,097 |
def load_energy():
"""Loads the energy file, skipping all useluss information and returns it as a dataframe"""
energy = pd.read_excel("Energy Indicators.xls", skiprows=17, header=0,
skip_footer=53-15, na_values="...", usecols=[2,3,4,5])
# Rename columns
energy.columns = ["Country", "Energy Supply [Petajoules]", "Energy Supply per Capita [Gigajoules]", "% Renewable"]
# Exclude numbers from country names
energy["Country"] = energy["Country"].str.replace("\d+", "")
# Delete the parentheses
energy["Country"] = energy["Country"].str.replace("\(.*\)", "")
return energy | 5,353,098 |
def list_list_to_string(list_lists,data_delimiter=None,row_formatter_string=None,line_begin=None,line_end=None):
"""Repeatedly calls list to string on each element of a list and string adds the result
. ie coverts a list of lists to a string. If line end is None the value defaults to "\n", for no seperator use ''
"""
if line_end is None:
line_end="\n"
check_arg_type(list_lists,ListType)
string_out=""
for index,row in enumerate(list_lists):
if index==len(list_lists)-1:
if line_end is "\n":
last_end=""
else:
last_end=re.sub("\n","",line_end,count=1)
string_out=string_out+list_to_string(row,data_delimiter=data_delimiter,
row_formatter_string=row_formatter_string,
begin=line_begin,end=last_end)
else:
string_out=string_out+list_to_string(row,data_delimiter=data_delimiter,
row_formatter_string=row_formatter_string,
begin=line_begin,end=line_end)
return string_out | 5,353,099 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.