content
stringlengths 22
815k
| id
int64 0
4.91M
|
---|---|
def base_checkout_total(
subtotal: TaxedMoney,
shipping_price: TaxedMoney,
discount: Money,
currency: str,
) -> TaxedMoney:
"""Return the total cost of the checkout."""
zero = zero_taxed_money(currency)
total = subtotal + shipping_price - discount
# Discount is subtracted from both gross and net values, which may cause negative
# net value if we are having a discount that covers whole price.
# Comparing TaxedMoney objects works only on gross values. That is why we are
# explicitly returning zero_taxed_money if total.gross is less or equal zero.
if total.gross <= zero.gross:
return zero
return total | 5,353,600 |
def if_statement(lhs='x', op='is', rhs=0, _then=None, _else=None):
"""Celery Script if statement.
Kind:
_if
Arguments:
lhs (left-hand side)
op (operator)
rhs (right-hand side)
_then (id of sequence to execute on `then`)
_else (id of sequence to execute on `else`)
"""
args = {}
args['lhs'] = lhs
args['op'] = op
args['rhs'] = rhs
if _then is None:
_then_kind = 'nothing'
_then_args = {}
else:
_then_kind = 'execute'
_then_args = {"sequence_id": _then}
if _else is None:
_else_kind = 'nothing'
_else_args = {}
else:
_else_kind = 'execute'
_else_args = {"sequence_id": _else}
args['_then'] = create_node(kind=_then_kind, args=_then_args)
args['_else'] = create_node(kind=_else_kind, args=_else_args)
_if_statement = create_node(kind='_if', args=args)
return _if_statement | 5,353,601 |
def add_next_open(df, col='next_open'):
"""
找出下根K线的开盘价
"""
df[col] = df[CANDLE_OPEN_COLUMN].shift(-1)
df[col].fillna(value=df[CANDLE_CLOSE_COLUMN], inplace=True)
return df | 5,353,602 |
def validate_args(args):
"""Validate command line arguments."""
if args.target == 'druid_deployment':
assert args.master_desired_count is not None, \
"Druid master desired count must be specified"
assert args.zookeeper_desired_count is not None, \
"Druid zookeeper desired count must be specified"
assert args.middlemanager_regular_desired_count is not None, \
"Druid regular MM node desired count must be specified"
assert args.middlemanager_crosscluster_desired_count is not None, \
"Druid crosscluster MM node desired count must be specified"
assert args.historical_desired_count is not None, \
"Druid historical node desired count must be specified"
assert args.query_desired_count is not None, \
"Druid query node desired count must be specified"
assert args.query_cc_desired_count is not None, \
"Druid query node for crosscluster desired count must be specified"
if args.target == 'influx_metric_consumer':
assert args.mode == 'turbo', \
"Currently we support Influx consumer for only Turbo metrics"
if args.target in ['influx_metric_consumer',
'master_graphite_consumer',
'prod_graphite_consumer',
'prod_no_agg_graphite_consumer',
'v4_live_influx_consumer',
'internal_graphite_consumer',
'prod_pickle_graphite_consumer',
'v2_live_pickle_graphite_consumer']:
assert args.consumer_mode is not None, \
"Must specify consumer_mode for metric consumers" | 5,353,603 |
def connectToSerial(dev):
"""connectToSerial("/path/to/device") connects to specific serial port"""
console.connectToSerial(dev) | 5,353,604 |
def seasons_used(parameters):
"""
Get a list of the seasons used for this set of parameters.
"""
seasons_used = set([s for p in parameters for s in p.seasons])
# Make sure this list is ordered by SEASONS.
return [season for season in SEASONS if season in seasons_used] | 5,353,605 |
def get_polyphyletic(cons):
"""get polyphyletic groups and a representative tip"""
tips, taxonstrings = unzip(cons.items())
tree, lookup = make_consensus_tree(taxonstrings, False, tips=tips)
cache_tipnames(tree)
names = {}
for n in tree.non_tips():
if n.name is None:
continue
if (n.name, n.Rank) not in names:
names[(n.name, n.Rank)] = {}
if n.parent is not None:
names[(n.name, n.Rank)][n.parent.name] = n.tip_names[0]
return names | 5,353,606 |
def _Install(launcher_vm, booter_template_vm):
"""Installs benchmark scripts and packages on the launcher vm."""
launcher_vm.InstallCli()
# Render boot script on launcher server VM(s)
context = _BuildContext(launcher_vm, booter_template_vm)
launcher_vm.RenderTemplate(data.ResourcePath(_BOOT_TEMPLATE), _BOOT_PATH,
context)
launcher_vm.RenderTemplate(data.ResourcePath(_STATUS_TEMPLATE), _STATUS_PATH,
context)
# Installs and start listener server on launcher VM(s).
launcher_vm.InstallPackages('netcat')
launcher_vm.PushDataFile(_LISTENER_SERVER, _REMOTE_DIR)
client_port = _SSH_PORT if _IsLinux() else _RDP_PORT
launcher_vm.RemoteCommand('touch log')
launcher_vm.RemoteCommand(_GetServerStartCommand(client_port, launcher_vm))
# Render clean up script on launcher server VM(s).
launcher_vm.RenderTemplate(data.ResourcePath(_CLEAN_UP_TEMPLATE),
_CLEAN_UP_SCRIPT_PATH, context) | 5,353,607 |
def find_closest_integer_in_ref_arr(query_int: int, ref_arr: NDArrayInt) -> Tuple[int, int]:
"""Find the closest integer to any integer inside a reference array, and the corresponding difference.
In our use case, the query integer represents a nanosecond-discretized timestamp, and the
reference array represents a numpy array of nanosecond-discretized timestamps.
Instead of sorting the whole array of timestamp differences, we just
take the minimum value (to speed up this function).
Args:
query_int: query integer,
ref_arr: Numpy array of integers
Returns:
integer, representing the closest integer found in a reference array to a query
integer, representing the integer difference between the match and query integers
"""
closest_ind = np.argmin(np.absolute(ref_arr - query_int))
closest_int = cast(int, ref_arr[closest_ind]) # mypy does not understand numpy arrays
int_diff = np.absolute(query_int - closest_int)
return closest_int, int_diff | 5,353,608 |
def saver_for_file(filename):
"""
Returns a Saver that can load the specified file, based on the file extension. None if failed to determine.
:param filename: the filename to get the saver for
:type filename: str
:return: the associated saver instance or None if none found
:rtype: Saver
"""
saver = javabridge.static_call(
"weka/core/converters/ConverterUtils", "getSaverForFile",
"(Ljava/lang/String;)Lweka/core/converters/AbstractFileSaver;", filename)
if saver is None:
return None
else:
return Saver(jobject=saver) | 5,353,609 |
def test_create_model_custom_folds(load_pos_and_neg_data):
"""test custom fold in create_model"""
exp = TimeSeriesExperiment()
setup_fold = 3
exp.setup(
data=load_pos_and_neg_data,
fold=setup_fold,
fh=12,
fold_strategy="sliding",
verbose=False,
)
#########################################
## Test Create Model with custom folds ##
#########################################
_ = exp.create_model("naive")
metrics1 = exp.pull()
custom_fold = 5
_ = exp.create_model("naive", fold=custom_fold)
metrics2 = exp.pull()
assert len(metrics1) == setup_fold + 2 # + 2 for Mean and SD
assert len(metrics2) == custom_fold + 2 | 5,353,610 |
def prepare_axes(ax, naxes):
""" set up the axes """
ax.set_frame_on(False)
ax.set_ylim(-0.15, 1.1)
xmax = 1.02*(naxes-1)
ax.set_xlim((-0.30, xmax))
ax.set_yticks([])
ax.set_yticklabels([])
ax.set_xticklabels([])
ax.set_xticks([]) | 5,353,611 |
def make(context, name):
"""Create an object in a registered table class.
This function will be stored in that object, so that the new table object
is able to create new table objects in its class.
!!! hint
This is needed when the user wants to insert new records in the table.
Parameters
----------
context: object
The context singleton in which this very function will be stored
under attribute `mkTable`.
name: string
The registered name of the derived table class.
"""
tableObj = factory(name)(context, name)
tableObj.mkTable = make
return tableObj | 5,353,612 |
def cofilter(function, iterator):
"""
Return items in iterator for which `function(item)` returns True.
"""
results = []
def checkFilter(notfiltered, item):
if notfiltered == True:
results.append(item)
def dofilter(item):
d = maybeDeferred(function, item)
d.addCallback(checkFilter, item)
return d
d = _CoFunCaller(resultCollector=dofilter).coiterate(iterator)
d.addCallback(lambda _: results)
return d | 5,353,613 |
def download_dataset(dataset_name='mnist'):
"""
Load MNIST dataset using keras convenience function
Args:
dataset_name (str): which of the keras datasets to download
dtype (np.dtype): Type of numpy array
Returns tuple[np.array[float]]:
(train images, train labels), (test images, test labels)
"""
if dataset_name == 'mnist':
return tf.keras.datasets.mnist.load_data()
elif dataset_name == 'binarised_mnist':
return load_binarised_mnist_data() | 5,353,614 |
def __listen_for_requests_events(node_id, success, measurement: str = 'locust_requests') -> Callable:
"""
Persist request information to influxdb.
:param node_id: The id of the node reporting the event.
:param measurement: The measurement where to save this point.
:param success: Flag the info to as successful request or not
"""
def event_handler(request_type=None, name=None, response_time=None, response_length=None, exception=None,
**_) -> None:
time = datetime.utcnow()
tags = {
'node_id': node_id,
'request_type': request_type,
'name': name,
'success': success,
'exception': repr(exception),
}
fields = {
'response_time': response_time,
'response_length': response_length,
'counter': 1, # TODO: Review the need of this field
}
point = __make_data_point(measurement, tags, fields, time)
cache.append(point)
return event_handler | 5,353,615 |
def test_find_next_htr(routing_grid: PyRoutingGrid, lay: int, coord: int, w_ntr: int, mode: Union[RoundMode, int],
even: bool, expect: int) -> None:
"""Check that find_next_htr() works properly."""
ans = routing_grid.find_next_htr(lay, coord, w_ntr, mode, even)
assert ans == expect | 5,353,616 |
def create_trigger_function_sql(
*,
audit_logged_model: Type[Model],
context_model: Type[Model],
log_entry_model: Type[Model],
) -> str:
"""
Generate the SQL to create the function to log the SQL.
"""
trigger_function_name = f"{ audit_logged_model._meta.db_table }_log_change"
context_table_name = context_model._meta.db_table # noqa
context_fields = ", ".join(
field.column
for field in context_model._meta.get_fields() # noqa
if isinstance(field, Field) and not isinstance(field, AutoField)
)
log_entry_table_name = log_entry_model._meta.db_table
return dedent(
f"""
CREATE FUNCTION { trigger_function_name }()
RETURNS TRIGGER AS $$
DECLARE
-- Id of the inserted row, used to ensure exactly one row is inserted
entry_id int;
content_type_id int;
BEGIN
SELECT id INTO STRICT content_type_id
FROM django_content_type WHERE
app_label = '{ audit_logged_model._meta.app_label }'
AND model = '{ audit_logged_model._meta.model_name }';
IF (TG_OP = 'INSERT') THEN
INSERT INTO { log_entry_table_name } (
{ context_fields },
action,
at,
changes,
content_type_id,
object_id
) SELECT
{ context_fields },
TG_OP as action,
now() as at,
to_jsonb(NEW.*) as changes,
content_type_id,
NEW.id as object_id
-- We rely on this table being created by out Django middleware
FROM { context_table_name }
-- We return the id into the variable to make postgresql check
-- that exactly one row is inserted.
RETURNING id INTO STRICT entry_id;
RETURN NEW;
ELSIF (TG_OP = 'UPDATE') THEN
INSERT INTO { log_entry_table_name } (
{ context_fields },
action,
at,
changes,
content_type_id,
object_id
) SELECT
{ context_fields },
TG_OP as action,
now() as at,
(
SELECT
-- Aggregate back to a single jsonb object, with
-- column name as key and the two values in an array.
jsonb_object_agg(
COALESCE(old_row.key, new_row.key),
ARRAY[old_row.value, new_row.value]
)
FROM
-- Select key value pairs from the old and the new
-- row, and then join them on the key. THis gives
-- us rows with the same key and values from both
-- the old row and the new row.
jsonb_each(to_jsonb(OLD.*)) old_row
FULL OUTER JOIN
jsonb_each(to_jsonb(NEW.*)) new_row
ON old_row.key = new_row.key
WHERE
-- Only select rows that have actually changed
old_row.* IS DISTINCT FROM new_row.*
) as changes,
content_type_id,
NEW.id as object_id
-- We rely on this table being created by out Django middleware
FROM { context_table_name }
-- We return the id into the variable to make postgresql check
-- that exactly one row is inserted.
RETURNING id INTO STRICT entry_id;
RETURN NEW;
ELSIF (TG_OP = 'DELETE') THEN
INSERT INTO { log_entry_table_name } (
{ context_fields },
action,
at,
changes,
content_type_id,
object_id
) SELECT
{ context_fields },
TG_OP as action,
now() as at,
to_jsonb(OLD.*) as changes,
content_type_id,
OLD.id as object_id
-- We rely on this table being created by out Django middleware
FROM { context_table_name }
-- We return the id into the variable to make postgresql check
-- that exactly one row is inserted.
RETURNING id INTO STRICT entry_id;
RETURN NEW;
END IF;
END;
$$ language 'plpgsql';
"""
) | 5,353,617 |
def p_relop_neq(p: yacc.YaccProduction):
"""REL_OP : NEQ_COMPARISON"""
p[0] = {'code': '!='} | 5,353,618 |
def add_nop_conv_after(g, value_names):
"""Add do-nothing depthwise Conv nodes after the given value info. It will\\
take the given names as the inputs of the new node and replace the inputs\\
of the following nodes.
:param g: the graph\\
:param value_names: a list of string which are the names of value_info.
"""
for value_name in value_names:
# Find the value first
value = helper.find_value_by_name(g, value_name)
if value is None:
value = helper.find_input_by_name(g, value_name)
if value is None:
value = helper.find_output_by_name(g, value_name)
if value is None:
print("Cannot find an value_info named {}".format(value_name))
continue
# Get the channel number from value info
shape = helper.get_shape_from_value_info(value)
channel = shape[1]
# Construct 4 weights
node_name = value_name + "_nop_conv"
ones = [1.0] * channel
weight_node = helper.list_to_constant(node_name + "_weight", [channel, 1, 1, 1], ones)
# Construct BN node
conv_node = onnx.helper.make_node(
"Conv",
[value_name,
weight_node.output[0]],
[node_name],
name = node_name,
dilations = [1, 1],
group = channel,
kernel_shape = [1, 1],
pads = [0, 0, 0, 0],
strides = [1, 1]
)
# Reconnect the graph
following_nodes = helper.find_following_nodes_by_input_value_name(g, value_name)
if len(following_nodes) > 0:
for following_node in following_nodes:
replace_node_input(following_node, value_name, node_name)
else:
# If the node is the output, replace the output with the previous input.
new_value = onnx.helper.make_tensor_value_info(
node_name,
value.type.tensor_type.elem_type,
shape
)
output_values = []
while len(g.output):
output_values.append(g.output.pop())
while output_values:
output_value = output_values.pop()
if output_value.name == value_name:
g.output.extend([new_value])
else:
g.output.extend([output_value])
# Add node to the graph
g.node.extend([conv_node, weight_node])
topological_sort(g) | 5,353,619 |
def reorder_by_first(*arrays):
"""
Applies the same permutation to all passed arrays,
permutation sorts the first passed array
"""
arrays = check_arrays(*arrays)
order = np.argsort(arrays[0])
return [arr[order] for arr in arrays] | 5,353,620 |
def machine_save_handler(sender, **kwargs):
"""
Send value of channel to Websocket Server whenever
Machine saved.
"""
from api.serializers import MachineSerializer
instance = kwargs.pop('instance')
serialized = MachineSerializer(instance)
body = serialized.data
url = 'http://localhost:1984/machines/'
headers = {
'Content-Type': 'application/json',
}
try:
response = requests.post(url, headers=headers, data=json.dumps(body))
logger.info("POSTed APIRequest to Websocket Server")
except Exception as e:
# If cannot send notification to WebSocket server,
# it is likely not running
# Gracefully fail, log failure, and continue save
logger.info("Could not POST to Websocket Server")
logger.debug(e) | 5,353,621 |
def embedding_weights(mesh,
vocab_dim,
output_dim,
variable_dtype,
name="embedding",
ensemble_dim=None,
initializer=None):
"""Embedding weights."""
if not ensemble_dim:
ensemble_dim = []
elif not isinstance(ensemble_dim, list):
ensemble_dim = [ensemble_dim]
shape = mtf.Shape(ensemble_dim) + [vocab_dim, output_dim]
if initializer is None:
initializer = tf.random_normal_initializer()
ret = mtf.get_variable(
mesh, name, shape, dtype=variable_dtype, initializer=initializer)
return ret | 5,353,622 |
def get_db() -> Generator:
"""
endpointからアクセス時に、Dependで呼び出しdbセッションを生成する
エラーがなければ、commitする
エラー時はrollbackし、いずれの場合も最終的にcloseする
"""
db = None
try:
db = SessionLocal()
yield db
db.commit()
except Exception:
if db:
db.rollback()
finally:
if db:
db.close() | 5,353,623 |
def generiraj_emso(zenska):
"""Funkcija generira emso stevilko"""
rojstvo = random_date_generator(julijana_zakrajsek)
# Odstranim prvo števko leta
emso_stevke = rojstvo[:4] + rojstvo[5:]
if zenska:
# Malce pretirana poenostavitev zadnjih treh cifer, lahko se zgodi da pridejo iste + zanemarjam take, ki imajo niclo na zacetku,...
return (emso_stevke + '505' + str(np.random.randint(100, 999)))
else:
return (emso_stevke + '500' + str(np.random.randint(100, 999))) | 5,353,624 |
def send_metric(name, distance, points):
"""Send person's stats to Datadog over HTTP API"""
metrics = [{
'metric': 'sportid.workout.distance',
'points': [float(distance)],
'tags': ['name:' + name],
'host': environ.get('HOST', ''),
'type': 'gauge'
},
{
'metric': 'sportid.workout.points',
'points': [float(points)],
'tags': ['name:' + name],
'host': environ.get('HOST', ''),
'type': 'gauge'
}]
r = api.Metric.send(metrics)
logging.info("Sending metrics to Datadog... %s" % r['status'])
logging.info(metrics) | 5,353,625 |
def setup_logging(config_path):
"""Setup logging configuration
"""
if os.path.isfile(config_path):
with open(config_path, "rt") as f:
config = yaml.safe_load(f.read())
for handler_name, handler_conf in config["handlers"].items():
if "filename" not in handler_conf.keys():
continue
log_path = os.path.dirname(handler_conf["filename"])
if not os.path.exists(log_path):
os.mkdir(log_path)
logging.config.dictConfig(config)
logging.info("Loaded config from yml file.")
else:
logging.basicConfig(level=logging.DEBUG)
logging.info("Loaded default config.") | 5,353,626 |
def update_bad_replicas_history(dids, rse_id, session=None):
"""
Update the bad file replicas history. Method only used by necromancer
:param dids: The list of DIDs.
:param rse_id: The rse_id.
:param session: The database session in use.
"""
for did in dids:
# Check if the replica is still there
try:
result = session.query(models.RSEFileAssociation.state).filter_by(rse_id=rse_id, scope=did['scope'], name=did['name']).one()
state = result.state
if state == ReplicaState.AVAILABLE:
# If yes, and replica state is AVAILABLE, update BadReplicas
query = session.query(models.BadReplicas).filter_by(state=BadFilesStatus.BAD, rse_id=rse_id, scope=did['scope'], name=did['name'])
query.update({'state': BadFilesStatus.RECOVERED, 'updated_at': datetime.utcnow()}, synchronize_session=False)
elif state != ReplicaState.BAD:
# If the replica state is not AVAILABLE check if other replicas for the same file are still there.
try:
session.query(models.RSEFileAssociation.state).filter_by(rse_id=rse_id, scope=did['scope'], name=did['name'], state=ReplicaState.AVAILABLE).one()
except NoResultFound:
# No replicas are available for this file. Reset the replica state to BAD
update_replicas_states([{'scope': did['scope'], 'name': did['name'], 'rse_id': rse_id, 'state': ReplicaState.BAD}], session=session)
session.query(models.Source).filter_by(scope=did['scope'], name=did['name'], rse_id=rse_id).delete(synchronize_session=False)
else:
# Here that means that the file has not been processed by the necro. Just pass
pass
except NoResultFound:
# We end-up here if the replica is not registered anymore on the RSE
try:
result = session.query(models.DataIdentifier.availability).filter_by(scope=did['scope'], name=did['name'], did_type=DIDType.FILE).one()
# If yes, the final state depends on DIDAvailability
state = result.availability
final_state = None
if state == DIDAvailability.LOST:
final_state = BadFilesStatus.LOST
elif state == DIDAvailability.DELETED:
final_state = BadFilesStatus.DELETED
elif state == DIDAvailability.AVAILABLE:
final_state = BadFilesStatus.DELETED
else:
# For completness, it shouldn't happen.
print('Houston we have a problem.')
final_state = BadFilesStatus.DELETED
query = session.query(models.BadReplicas).filter_by(state=BadFilesStatus.BAD, rse_id=rse_id, scope=did['scope'], name=did['name'])
query.update({'state': final_state, 'updated_at': datetime.utcnow()}, synchronize_session=False)
except NoResultFound:
# If no, the replica is marked as LOST in BadFilesStatus
query = session.query(models.BadReplicas).filter_by(state=BadFilesStatus.BAD, rse_id=rse_id, scope=did['scope'], name=did['name'])
query.update({'state': BadFilesStatus.LOST, 'updated_at': datetime.utcnow()}, synchronize_session=False) | 5,353,627 |
def process_message(data):
"""
@keyword *prod change:* `^kevin.*prod change*(\\n|\|){2}`
*kevin* _[]_ *prod change* _[]_ * _|[user]|[summary title]|[|more details]_
@summary instead of switching context to jira just to make a silly change to ST2, have kevin make the prod change
@see *prod change:* _*kevin* create *prod change* ticket *|steve|Rolling Latest PHP Core to ST2
"""
# IF ALL CHECKS PASS SEND JOB TO DB
if 'text' in data:
text = data['text'].encode('ascii', 'ignore')
else:
text = ''
if check_channel(data['channel']) and check_text("^kevin.*prod change.*", text):
# EXTRACT TICKET DATA FROM STATEMENT
print "Prod CHange"
n_text = text.split("\n")
p_text = text.split("|")
if len(n_text) >= 3 or len(p_text) >= 3:
# ASSIGN THE TEXT
split_text = p_text if len(p_text) >= 3 else n_text
# user_id = data['user']
# user_name = get_culprit_name(user_id)
user_name = split_text[1]
summary = "{} - {}".format(user_name, split_text[2])
# desc = "\n".join(split_text[3:])
conn = sqlite3.connect('kevin.db')
with conn:
conn.execute("INSERT INTO JOBS (channel, name, parms, status) VALUES (?, 'prod_change', ?, 0)",
(data['channel'], '{}|{}'.format(user_name, summary)))
conn.commit()
outputs.append([data['channel'], 'Summary: {}'.format(summary)])
else:
print "NOT ENOUGH FIELDS: {}".format(text)
outputs.append([data['channel'], "SQUAAAWWKK Not enough Fields for Prod change"]) | 5,353,628 |
def pdf_markov2(x, y, y_offset=1, nlevels=3):
"""
Compute the empirical joint PDF for two processes of Markov order 2. This
version is a bit quicker than the more general pdf() function.
See the docstring for pdf for more info.
"""
y_offset = np.bool(y_offset)
# out = np.ones((nlevels,)*6, np.uint32)
out = np.zeros((nlevels,) * 5, np.float64)
n = x.size
for tt in xrange(2, x.size):
# out[x[tt], x[tt - 1], x[tt - 2], y[tt], y[tt - 1], y[tt - 2]] += 1
# offset signal y by +1 if we want to allow same-timebin interactions
out[x[tt], x[tt - 1], x[tt - 2],
y[tt - 1 + y_offset], y[tt - 2 + y_offset]] += 1
return out / (n - 2.) | 5,353,629 |
def add_vertex(graph, vertex):
"""
Raise Exception if sometsing gone wrong
and vertex is not added
"""
assert isinstance(vertex, str)
gr = PillowGraph(StringToAdjListDict(graph.AdjList))
gr.add_vertex(vertex)
graph.AdjList = str(gr.AdjList)
graph.save() | 5,353,630 |
def cvt_dotted_as_name(node: pytree.Base, ctx: Ctx) -> ast_cooked.Base:
"""dotted_as_name: dotted_name ['as' NAME]"""
assert ctx.is_REF, [node]
dotted_name = xcast(ast_cooked.DottedNameNode, cvt(node.children[0], ctx.to_BARE()))
if len(node.children) == 1:
# `import os.path` creates a binding for `os`.
return ast_cooked.ImportDottedAsNameNode(dotted_name=dotted_name, as_name=None)
else:
as_name = typing.cast(Union[ast_cooked.NameBindsNode, ast_cooked.NameBindsGlobalNode],
cvt(node.children[2], ctx.to_BINDING()))
assert isinstance(
as_name,
(ast_cooked.NameBindsNode, ast_cooked.NameBindsGlobalNode)) # TODO: delete
return ast_cooked.ImportDottedAsNameNode(dotted_name=dotted_name, as_name=as_name) | 5,353,631 |
def train(
model,
data,
epochs=10,
batch_size=100,
lr=0.001,
lr_decay_mul=0.9,
lam_recon=0.392,
save_dir=None,
weights_save_dir=None,
save_freq=100,
):
"""Train a given Capsule Network model.
Args:
model: The CapsuleNet model to train.
data: The dataset that you want to train: ((x_train, y_train), (x_test, y_test)).
epochs: Number of epochs for the training.
batch_size: Size of the batch used for the training.
lr: Initial learning rate value.
lr_decay_mul: The value multiplied by lr at each epoch. Set a larger value for larger epochs.
lam_recon: The coefficient for the loss of decoder (if present).
save_dir: Directory that will contain the logs of the training. `None` if you don't want to save the logs.
weights_save_dir: Directory that will contain the weights saved. `None` if you don't want to save the weights.
save_freq: The number of batches after which weights are saved.
Returns:
The trained model.
"""
# Unpack data
(x_train, y_train), (x_test, y_test) = data
# Understand if the model uses the decoder or not
n_output = len(model.outputs)
# Compile the model
model.compile(
optimizer=optimizers.Adam(lr=lr),
loss=[margin_loss, "mse"] if n_output == 2 else [margin_loss],
loss_weights=[1.0, lam_recon] if n_output == 2 else [1.0],
metrics=["accuracy"],
)
# Define a callback to reduce learning rate
cbacks = [
callbacks.LearningRateScheduler(
schedule=lambda epoch: lr * (lr_decay_mul ** epoch)
)
]
# Define a callback to save training datas
if save_dir:
cbacks.append(callbacks.CSVLogger(os.path.join(save_dir, "training.csv")))
# Define a callback to save weights during the training
if weights_save_dir:
cbacks.append(WeightsSaver(weights_save_dir, save_freq))
# Simple training without data augmentation
model.fit(
x=(x_train, y_train) if n_output == 2 else x_train,
y=(y_train, x_train) if n_output == 2 else y_train,
batch_size=batch_size,
epochs=epochs,
validation_data=((x_test, y_test), (y_test, x_test))
if n_output == 2
else (x_test, y_test),
callbacks=cbacks,
)
# Save final weights at the end of the training
if weights_save_dir:
model.save_weights(os.path.join(weights_save_dir, "trained.h5"))
return model | 5,353,632 |
def test_actor_for_edge_expiration(setup):
# type: (SetupTest) -> None
"""Test choice of actor ID when expiring an edge.
Our current audit log model has no concept of a system-generated change and has to map every
change to a user ID that performed that change. We previously had a bug where we would try to
grab the first owner of the group and use them as the actor when someone expired out of a
group, which caused uncaught exceptions if the group somehow ended up in a state with no
owners. Test that we do something sane when expiring edges if possible.
Everything we're testing here is a workaround for a bug. Once the audit log has been fixed so
that we can log entries for system actions without attributing them to some user in the system,
this test and all of the logic it's testing can go away.
"""
settings = Settings()
now_minus_one_second = datetime.utcfromtimestamp(int(time() - 1))
audit_log_service = setup.service_factory.create_audit_log_service()
# An expiring individual user should be logged with an actor ID of the user.
with setup.transaction():
setup.add_user_to_group("[email protected]", "some-group", expiration=now_minus_one_second)
edge = setup.session.query(GroupEdge).filter_by(expiration=now_minus_one_second).one()
notify_edge_expiration(settings, setup.session, edge)
log_entries = audit_log_service.entries_affecting_user("[email protected]", 1)
assert log_entries
assert log_entries[0].actor == "[email protected]"
assert log_entries[0].action == "expired_from_group"
assert log_entries[0].on_user == "[email protected]"
with setup.transaction():
edge.delete(setup.session)
# An expiring group should be logged with an actor ID of the owner of the parent group.
with setup.transaction():
setup.add_user_to_group("[email protected]", "parent-group", role="owner")
setup.add_user_to_group("[email protected]", "child-group", role="owner")
setup.add_group_to_group("child-group", "parent-group", expiration=now_minus_one_second)
edge = setup.session.query(GroupEdge).filter_by(expiration=now_minus_one_second).one()
notify_edge_expiration(settings, setup.session, edge)
log_entries = audit_log_service.entries_affecting_group("child-group", 1)
assert log_entries
assert log_entries[0].actor == "[email protected]"
assert log_entries[0].action == "expired_from_group"
assert log_entries[0].on_group == "child-group"
log_entries = audit_log_service.entries_affecting_group("parent-group", 1)
assert log_entries
assert log_entries[0].actor == "[email protected]"
assert log_entries[0].action == "expired_from_group"
assert log_entries[0].on_group == "parent-group"
with setup.transaction():
edge.delete(setup.session)
# If the parent group has no owner, it should be logged with an actor ID of the owner of the
# child group.
with setup.transaction():
setup.add_user_to_group("[email protected]", "a-group", role="owner")
setup.add_group_to_group("a-group", "ownerless-group", expiration=now_minus_one_second)
edge = setup.session.query(GroupEdge).filter_by(expiration=now_minus_one_second).one()
notify_edge_expiration(settings, setup.session, edge)
log_entries = audit_log_service.entries_affecting_group("a-group", 1)
assert log_entries
assert log_entries[0].actor == "[email protected]"
assert log_entries[0].action == "expired_from_group"
assert log_entries[0].on_group == "a-group"
log_entries = audit_log_service.entries_affecting_group("ownerless-group", 1)
assert log_entries
assert log_entries[0].actor == "[email protected]"
assert log_entries[0].action == "expired_from_group"
assert log_entries[0].on_group == "ownerless-group"
with setup.transaction():
edge.delete(setup.session)
# If neither group has an owner, raise an exception.
with setup.transaction():
setup.add_group_to_group("other-group", "ownerless-group", expiration=now_minus_one_second)
edge = setup.session.query(GroupEdge).filter_by(expiration=now_minus_one_second).one()
with pytest.raises(UnknownActorDuringExpirationException):
notify_edge_expiration(settings, setup.session, edge) | 5,353,633 |
def txn_replay(session_filename, txn, proxy, result_queue, request_session):
""" Replays a single transaction
:param request_session: has to be a valid requests session"""
req = txn.getRequest()
resp = txn.getResponse()
# Construct HTTP request & fire it off
txn_req_headers = req.getHeaders()
txn_req_headers_dict = extractHeader.header_to_dict(txn_req_headers)
txn_req_headers_dict['Content-MD5'] = txn._uuid # used as unique identifier
if 'body' in txn_req_headers_dict:
del txn_req_headers_dict['body']
#print("Replaying session")
try:
# response = request_session.request(extractHeader.extract_txn_req_method(txn_req_headers),
# 'http://' + extractHeader.extract_host(txn_req_headers) + extractHeader.extract_GET_path(txn_req_headers),
# headers=txn_req_headers_dict,stream=False) # making stream=False raises contentdecoding exception? kill me
method = extractHeader.extract_txn_req_method(txn_req_headers)
response = None
body = None
content = None
if 'Transfer-Encoding' in txn_req_headers_dict:
# deleting the host key, since the STUPID post/get functions are going to add host field anyway, so there will be multiple host fields in the header
# This confuses the ATS and it returns 400 "Invalid HTTP request". I don't believe this
# BUT, this is not a problem if the data is not chunked encoded.. Strange, huh?
del txn_req_headers_dict['Host']
if 'Content-Length' in txn_req_headers_dict:
#print("ewww !")
del txn_req_headers_dict['Content-Length']
body = gen()
if 'Content-Length' in txn_req_headers_dict:
nBytes = int(txn_req_headers_dict['Content-Length'])
body = createDummyBodywithLength(nBytes)
#print("request session is",id(request_session))
if method == 'GET':
r1 = request_session.request('GET', extractHeader.extract_GET_path(
txn_req_headers), headers=txn_req_headers_dict, data=body)
responseHeaders = r1.headers
responseContent = r1.content # byte array
#print("len: {0} received {1}".format(responseHeaders['Content-Length'], responseContent))
elif method == 'POST':
r1 = request_session.request('POST', extractHeader.extract_GET_path(
txn_req_headers), headers=txn_req_headers_dict, data=body)
responseHeaders = r1.headers
responseContent = r1.content
#print("len: {0} received {1}".format(responseHeaders['Content-Length'], responseContent))
elif method == 'HEAD':
r1 = request_session.request('HEAD', extractHeader.extract_GET_path(
txn_req_headers), headers=txn_req_headers_dict, data=body)
responseHeaders = r1.headers
responseContent = r1.content
else: # EXPERIMENTAL
r1 = request_session.request(method, extractHeader.extract_GET_path(
txn_req_headers), headers=txn_req_headers_dict, data=body)
responseHeaders = r1.headers
responseContent = r1.content
#gzip_file = gzip.GzipFile(fileobj=responseContent)
#shutil.copyfileobj(gzip_file, f)
expected = extractHeader.responseHeader_to_dict(resp.getHeaders())
# print("------------EXPECTED-----------")
# print(expected)
# print("------------RESP--------------")
# print(responseHeaders)
# print()
if mainProcess.verbose:
expected_output_split = resp.getHeaders().split('\r\n')[0].split(' ', 2)
expected_output = (int(expected_output_split[1]), str(expected_output_split[2]))
r = result.Result(session_filename, expected_output[0], r1.status_code, responseContent)
b_res, res = r.getResult(responseHeaders, expected, colorize=True)
print(res)
if not b_res:
print("Received response")
print(responseHeaders)
print("Expected response")
print(expected)
# result_queue.put(r)
except UnicodeEncodeError as e:
# these unicode errors are due to the interaction between Requests and our wiretrace data.
# TODO fix
print("UnicodeEncodeError exception")
except requests.exceptions.ContentDecodingError as e:
print("ContentDecodingError", e)
except:
e = sys.exc_info()
print("ERROR in NonSSLReplay: ", e, response, session_filename) | 5,353,634 |
def check_if_recipe_skippable(recipe, channels, repodata_dict, actualname_to_idname):
"""
check_if_recipe_skippable
=========================
Method used to check if a recipe should be skipped or not.
Skip criteria include:
- If the version of the recipe in the channel repodata is greater than or equal to the query recipe.
- If the query recipe's version and build are equal to or less than the recipe in the repodata
Non-Skip Citeria include:
- Opposite of skip criteria
- If the recipe is not in any channel
Parameters:
-----------
1) recipe: (str) The directory path to the query recipe
2) channels: (list) A list of channels to check against
3) repodata_dict: (dict) A dictionary of repodata by channel (From get_repodata() method)
4) actualname_to_idname: (dict) Dict of recipe names as keys as id names in the repodata_dict as keys. (From get_repodata() method)
Returns:
++++++++
- Return True if recipe building is skippable
- Return False if recipe building cannot be skipped
"""
platform, metas = load_platform_metas(recipe, finalize=False)
# The recipe likely defined skip: True
if not metas:
return True
## Get each packages name, version, and build number
packages = set(
(meta.name(), float(meta.version()), float(meta.build_number() or 0))
for meta in metas
)
for name, version, build_num in packages:
present = False
for c in channels:
## Check for the recipe in one of the channel's repodata
if name in actualname_to_idname[c].keys():
## Find the newest/highest versioned and build package
present = True
cur_version = -1.0
cur_build = -1.0
for pkg_tar in actualname_to_idname[c][name]:
repo_version = float(repodata_dict[c][pkg_tar]["version"])
repo_build_number = float(repodata_dict[c][pkg_tar]["build_number"])
## If version is greater than the previous version, reset values with this package
if repo_version > cur_version:
cur_version = repo_version
cur_build = repo_build_number
## If version is the same but the build number is greater, reset values with this package
elif version == cur_version and repo_build_number > cur_build:
cur_build = repo_build_number
## Check if the query package is newer then what is repoted in the repodata or not
## If the query package's version is greater than the best in the repodata, update recipe
if cur_version < version:
return False
## If the query package's is the same version but the build number is greater than the best in the repodata, update recipe
elif cur_version == version and cur_build < build_num:
return False
## If package not already in the repodata
if not present:
return False
print(
":ggd:build recipes: FILTER: not building recipe {} because the version and/or build number match what is already in the channel and not forced".format(
recipe
)
)
return True | 5,353,635 |
def ensure_csv_detections_file(
folder: types.GirderModel, detection_item: Item, user: types.GirderUserModel
) -> types.GirderModel:
"""
Ensures that the detection item has a file which is a csv.
Attach the newly created .csv to the existing detection_item.
:returns: the file document.
TODO: move this to the training job code instead of keeping it
in the request thread
"""
filename, gen = crud.get_annotation_csv_generator(folder, user, excludeBelowThreshold=True)
filename = slugify(filename)
csv_bytes = ("".join([line for line in gen()])).encode()
new_file = File().createFile(
user,
detection_item,
filename,
len(csv_bytes),
Assetstore().getCurrent(),
reuseExisting=True,
)
upload = Upload().createUploadToFile(new_file, user, len(csv_bytes))
new_file = Upload().handleChunk(upload, csv_bytes)
return new_file | 5,353,636 |
def hello(name=None):
"""Assuming that name is a String and it checks for user typos to return a name with a first capital letter (Xxxx).
Args:
name (str): A persons name.
Returns:
str: "Hello, Name!" to a given name, or says Hello, World! if name is not given (or passed as an empty String).
"""
return "Hello, World!" if name is None or not name else "Hello, {}!".format(name.title()) | 5,353,637 |
def main():
"""
"""
#file = open( "jbc.p", "rb" )
#jbc = pickle.load(file)
#file.close()
#
file = open( "stffmtx.p", "rb" )
neq = pickle.load(file)
iband = pickle.load(file)
#stf = pickle.load(file)
file.close()
#
elements = pickle.load(open( "memb.p", "rb" ))
jbcc = pickle.load(open( "jbc.p", "rb" ))
jbc = list( chain.from_iterable( jbcc ) )
#for memb in elements:
# idof, jdof = memb[0]
# a = memb[1]
# assemble(idof, jdof, jbc, a, stf)
#
#aa = loop_members(elements)
#
#with mp.Manager() as manager:
#freeze_support()
#manager = Manager()
#d = manager.list(elements)
#l = manager.list(jbc)
#st = manager.list(stf)
#st = Array('f', stf)
qout = mp.Queue()
p = mp.Process(target=loop_members,
args=(elements, jbc, neq, iband, qout))
p.start()
try:
stf = qout.get(False)
except qout.empty():
pass
p.join()
#
#print(stf)
#p = Pool(processes=2)
#stiff = p.starmap_async(loop_members, elements)
#p.close()
#print(stiff.get())
#
#
#file = open( "stffmtx.p", "ab" )
#pickle.dump( p.st, file )
#file.close()
#
file = open( "stffmtx.p", "ab" )
pickle.dump( neq, file )
pickle.dump( iband, file )
pickle.dump( stf, file )
file.close()
print('** End multiprocessing Matrix') | 5,353,638 |
def computePCA(inputMatrix, n_components=None):
"""Compute Principle Component Analysis (PCA) on feature space. n_components specifies the number of dimensions in the transformed basis to keep."""
pca_ = PCA(n_components)
pca_.fit(inputMatrix)
return pca_ | 5,353,639 |
def tag(repo, subset, x):
"""The specified tag by name, or all tagged revisions if no name is given.
Pattern matching is supported for `name`. See
:hg:`help revisions.patterns`.
"""
# i18n: "tag" is a keyword
args = getargs(x, 0, 1, _("tag takes one or no arguments"))
cl = repo.changelog
if args:
pattern = getstring(args[0],
# i18n: "tag" is a keyword
_('the argument to tag must be a string'))
kind, pattern, matcher = stringutil.stringmatcher(pattern)
if kind == 'literal':
# avoid resolving all tags
tn = repo._tagscache.tags.get(pattern, None)
if tn is None:
raise error.RepoLookupError(_("tag '%s' does not exist")
% pattern)
s = {repo[tn].rev()}
else:
s = {cl.rev(n) for t, n in repo.tagslist() if matcher(t)}
else:
s = {cl.rev(n) for t, n in repo.tagslist() if t != 'tip'}
return subset & s | 5,353,640 |
def save_imgs(output_image_dir, dataloader):
"""Saves a grid of generated imagenet pictures with captions"""
target_dir = os.path.join(output_image_dir, "imgs/")
if not os.path.isdir(target_dir):
os.makedirs(target_dir)
i = 0
for (imgs, _, _) in dataloader:
imgs = imgs.cpu().numpy()
imgs = np.clip(imgs, 0, 1)
imgs = np.split(imgs, imgs.shape[0])
for img in imgs:
img = np.squeeze(img)
img = np.transpose(img, (1, 2, 0))
save_file = os.path.join(target_dir, "{:013d}.png".format(i))
matplotlib.image.imsave(save_file, img)
print("saved {}".format(save_file))
i += 1 | 5,353,641 |
def _embed_json(service, targetid):
"""
Returns oEmbed JSON for a given URL and service
"""
return d.http_get(_OEMBED_MAP[service] % (urlquote(targetid),)).json() | 5,353,642 |
def test_helling():
"""Test de functie helling in bereken.py."""
assert bereken.helling(np.array([0, 0]), np.array([1, 0])) == 0.0
assert bereken.helling(np.array([0, 0]), np.array([1, 1])) == 45.0
assert bereken.helling(np.array([0, 0]), np.array([-1, -1])) == 45.0
assert bereken.helling(np.array([0, 0]), np.array([-1, 1])) == -45.0
assert bereken.helling(np.array([0, 0]), np.array([1, -1])) == -45.0
assert bereken.helling(np.array([0, 0]), np.array([0, 0])) == -90
assert bereken.helling(np.array([0, 0]), np.array([0, 1])) == -90 | 5,353,643 |
def is_blank(s):
"""Returns True if string contains only space characters."""
return re.search(reNonSpace, s) is None | 5,353,644 |
def persistTeamData(teamData):
""" Creates a dataframe containing the project ids and the members matched to that project """
if connection.is_connected():
cursor = connection.cursor()
for row in teamData.index:
sql = "INSERT INTO Team(ProjectId, ProjectName, \
MemberId, MemberName)VALUES(%s,%s,%s,%s);"
cursor.execute(
sql,
(
str(teamData.loc[row, "ProjectId"]),
str(teamData.loc[row, "ProjectName"]),
str(teamData.loc[row, "MemberId"]),
str(teamData.loc[row, "MemberName"]),
),
)
connection.commit() | 5,353,645 |
def remove_invalid_chars_from_passage(passage_text):
"""
Return a cleaned passage if the passage is invalid.
If the passage is valid, return None
"""
# Check if any of the characters are invalid
bad_chars = [c for c in passage_text if c in INVALID_PASSAGE_CHARACTERS]
if bad_chars:
for b in set(bad_chars):
passage_text = passage_text.replace(b, '')
return passage_text | 5,353,646 |
def fallback_humanize(date, fallback_format=None, use_fallback=False):
"""
Format date with arrow and a fallback format
"""
# Convert to local timezone
date = arrow.get(date).to('local')
# Set default fallback format
if not fallback_format:
fallback_format = '%Y/%m/%d %H:%M:%S'
# Determine using fallback format or not by a variable
if use_fallback:
return date.datetime.strftime(fallback_format)
try:
# Use Arrow's humanize function
lang, encode = locale.getdefaultlocale()
clock = date.humanize(locale=lang)
except:
# Notice at the 1st time only
if not dg['humanize_unsupported']:
dg['humanize_unsupported'] = True
printNicely(
light_magenta('Humanized date display method does not support your $LC_ALL.'))
# Fallback when LC_ALL is not supported
clock = date.datetime.strftime(fallback_format)
return clock | 5,353,647 |
def questbackup(cfg, server):
"""Silly solution to a silly problem."""
if server not in cfg['servers']:
log.warning(f'{server} has been misspelled or not configured!')
elif 'worldname' not in cfg['servers'][server]:
log.warning(f'{server} has no world directory specified!')
elif 'questing' not in cfg['servers'][server]:
log.warning(f'{server} has is not setup for questing backup!')
else:
bpath = cfg['backupspath']
world = cfg['servers'][server]['worldname']
quests = cfg['servers'][server]['questing']
log.info(f'Starting backup for {server}\'s quests...')
if isUp(server):
log.info(f'{server} is running, don\'t care, just want {quests.upper()}!')
sbpath = f'{bpath}/{server}/questing/{quests}'
try:
os.makedirs(sbpath, exist_ok=True)
except Exception as e:
log.error(e + '\nBackup aborted, DANGER! might loose quests!')
return False
else:
log.info('Created missing directories! (if they were missing)')
log.info('Deleting old quest backups...')
now = time()
with os.scandir(sbpath) as d:
for entry in d:
if not entry.name.startswith('.') and entry.is_file():
stats = entry.stat()
if stats.st_mtime < now - (10080 * 60):
try:
os.remove(entry.path)
except OSError as e:
log.error(e)
else:
log.info(f'Deleted {entry.path} for being too old!')
log.info('Creating quest backup...')
bname = datetime.now().strftime('%Y.%m.%d-%H-%M-%S') + f'-{server}-{world}-{quests.replace("/", "_")}.tar.gz'
os.chdir(sbpath)
serverpath = cfg['serverspath']
with tarfile.open(bname, 'w:gz') as tf:
tf.add(f'{serverpath}/{server}/{world}/{quests}', quests)
log.info('Quest backup created!')
if isUp(server):
log.info(f'{server} is running, STILL DON\'T CARE!')
log.info('DONE!') | 5,353,648 |
def find_file_in_pythonpath( filename, subfolder='miri',
walkdir=False, path_only=False):
"""
Find a file matching the given name within the PYTHONPATH.
:Parameters:
filename: str
The name of the file to be located.
subfolder: str, optional (default='miri')
If specified, a preferred subfolder within the PYTHONPATH which,
if it exists, will be searched before the top level directory.
walkdir: bool, optional (default=False)
Set to True if the entire directory tree under each of the
directories in the search path should be searched as well.
By default, only the specific directories listed in the search
path are searched.
N.B. Specifying walkdir=True can slow down a file search
significantly, especially if there are a lot of utilities
installed in PYTHONPATH. The search can be speeded up by
specifying a preferred subfolder.
path_only: bool, optional (default=False)
Set to True to return the path to the folder containing the file
rather than the path to the file itself.
:Returns:
filepath: str
The full path (and name) of the matching file.
If no file is found an empty string is returned.
"""
# Step through each path in the PYTHONPATH
for path in sys.path:
# If there is a preferred subfolder, and it exists
# search this first.
if subfolder:
sfname = os.path.join(path, subfolder)
if os.path.isdir(sfname):
result = _check_folder(sfname, filename, walkdir=walkdir,
path_only=path_only)
if result:
return result
result = _check_folder(path, filename, walkdir=walkdir,
path_only=path_only)
if result:
return result
# No file found - return an empty string.
return '' | 5,353,649 |
def driveForward(self, distance : float, speed : float = 50):
"""Moves Cozmo forward or backwards.
Arguments:
distance: A float representing the distance in millimeters for cozmo to travel.
- Positive: Moves Cozmo forward.
- Negative: Moves Cozmo backward.
speed: A float representing the speed, in millimeters per seconds, for Cozmo to travel at.
- 50 (default)
.. code-block:: python
robot.driveForward(100, 20)
"""
#Convert parameters into Cozmo classes that represent the units
realDistance = distance_mm(distance)
realSpeed = speed_mmps(speed)
#Log action for debugging
self.debug("Driving forward %s millimeters at a rate of %s mm/s." % (distance, speed))
#Execute movement
self.robot.drive_straight(realDistance, realSpeed).wait_for_completed() | 5,353,650 |
def set_tpu_info(params):
"""Docs."""
logging.info('Retrieve TPU information')
tpu_init = tf.tpu.initialize_system()
tpu_shutdown = tf.tpu.shutdown_system()
with common_utils.get_session(params, isolate_session_state=True) as sess:
topology_proto = sess.run(tpu_init)
topology = tpu_lib.topology.Topology(serialized=topology_proto)
sess.run(tpu_shutdown)
# Note: we use the name `worker` instead of `replica`
# TPU terminologies are extremely confusing.
num_cores_per_replica = params.num_cores_per_replica
num_workers = topology.num_tasks
num_replicas = (topology.num_tpus_per_task * topology.num_tasks //
num_cores_per_replica)
params.add_hparam('num_workers', num_workers)
params.add_hparam('num_replicas', num_replicas)
params.add_hparam('num_cores_per_worker', topology.num_tpus_per_task)
logging.info('-' * 80)
logging.info(f'num_workers={num_workers}')
logging.info(f'num_cores_per_worker={topology.num_tpus_per_task}')
logging.info(f'num_cores_per_replica={num_cores_per_replica}')
logging.info(f'num_replicas={num_replicas}')
params.topology = topology
num_cores_to_shape = {
1: [1, 1, 1, 1],
2: [1, 1, 1, 2],
4: [1, 2, 1, 2],
8: [2, 2, 1, 2],
16: [4, 2, 1, 2],
32: [4, 4, 1, 2],
64: [4, 8, 1, 2],
128: [8, 8, 1, 2],
256: [8, 16, 1, 2],
512: [16, 16, 1, 2],
2048: [32, 32, 1, 2],
}
if params.num_cores_per_replica not in num_cores_to_shape:
raise ValueError(f'Unknown num_cores {params.num_cores_per_replica}')
computation_shape = num_cores_to_shape[params.num_cores_per_replica]
params.device_assignment = tpu_lib.device_assignment.device_assignment(
topology=params.topology,
computation_shape=computation_shape,
num_replicas=params.num_replicas)
if (params.batch_norm_batch_size is not None and
params.batch_norm_batch_size < params.train_batch_size // num_workers):
logging.warning(f'batch_norm_batch_size={FLAGS.batch_norm_batch_size} '
f'changed into {FLAGS.train_batch_size}')
params['batch_norm_batch_size'] = params.train_batch_size // num_workers | 5,353,651 |
def min_column_widths(rows):
"""Computes the minimum column width for the table of strings.
>>> min_column_widths([["some", "fields"], ["other", "line"]])
[5, 6]
"""
def lengths(row): return map(len, row)
def maximums(row1, row2) : return map(max, row1, row2)
return reduce(maximums, map(lengths, rows)) | 5,353,652 |
def entities(address_book):
"""Get the entities utility."""
return zope.component.getUtility(IEntities) | 5,353,653 |
def generate_dataset_file_url(client, filepath):
"""Generate url for DatasetFile."""
if not client:
return
try:
if not client.project:
return
project = client.project
except ValueError:
from renku.core.management.migrations.models.v9 import Project
metadata_path = client.renku_path.joinpath(OLD_METADATA_PATH)
project = Project.from_yaml(metadata_path)
project_id = urlparse(project._id)
else:
project_id = urlparse(project.id)
filepath = quote(filepath, safe="/")
path = pathlib.posixpath.join(project_id.path, "files", "blob", filepath)
project_id = project_id._replace(path=path)
return project_id.geturl() | 5,353,654 |
def _check_attrs(obj):
"""Checks that a periodic function/method has all the expected attributes.
This will return the expected attributes that were **not** found.
"""
missing_attrs = []
for attr_name in _REQUIRED_ATTRS:
if not hasattr(obj, attr_name):
missing_attrs.append(attr_name)
return missing_attrs | 5,353,655 |
def stats(human=False):
"""
Print repository statistics.
@param human (bool) Whether to output the data in human-readable
format.
"""
stat_data = admin_api.stats()
if human:
click.echo(
'This option is not supported yet. Sorry.\nUse the `/admin/stats`'
' endpoint in the web UI for a pretty printout.')
else:
click.echo(json.dumps(stat_data)) | 5,353,656 |
def _chunk_noise(noise):
"""Chunk input noise data into valid Touchstone file rows."""
data = zip(
noise["freq"],
noise["nf"],
np.abs(noise["rc"]),
np.angle(noise["rc"]),
noise["res"],
)
for freq, nf, rcmag, rcangle, res in data:
yield freq, nf, rcmag, rcangle, res | 5,353,657 |
def test_TSclassifier():
"""Test TS Classifier"""
covset = generate_cov(40, 3)
labels = np.array([0, 1]).repeat(20)
assert_raises(TypeError, TSclassifier, clf='666')
clf = TSclassifier()
clf.fit(covset, labels)
assert_array_equal(clf.classes_, np.array([0, 1]))
clf.predict(covset)
clf.predict_proba(covset) | 5,353,658 |
def create_table():
"""
If table does not exist, Table is create.
If exists, return error.
Error is caught in try/except loop.
:return:
"""
sql_command = """
CREATE TABLE IF NOT EXISTS data_table (
postId INTEGER,
id INTEGER PRIMARY KEY,
name VARCHAR(1000),
email VARCHAR(200),
body CHAR(1000));"""
try:
cursor.execute(sql_command)
connection.commit()
except:
pass | 5,353,659 |
def test_dataset_info():
"""Read raster metadata and return spatial info."""
info = utils.get_dataset_info(asset1)
assert info["geometry"]
assert info["properties"]["path"]
assert info["properties"]["bounds"]
assert info["properties"]["datatype"]
assert info["properties"]["minzoom"] == 7
assert info["properties"]["maxzoom"] == 9 | 5,353,660 |
def archive(context: Context, images: List[ImageName], archive: str):
"""Operates on docker-save produced archives."""
ctx = get_context_object(context)
ctx["images"] = images
ctx["imagesource"] = ArchiveImageSource(archive=Path(archive))
verify(context) | 5,353,661 |
def _create_admin_user(keystone, admin_email, admin_password):
"""Create admin user in Keystone.
:param keystone: keystone v2 client
:param admin_email: admin user's e-mail address to be set
:param admin_password: admin user's password to be set
"""
admin_tenant = keystone.tenants.find(name='admin')
try:
keystone.users.find(name='admin')
LOG.info('Admin user already exists, skip creation')
except exceptions.NotFound:
LOG.info('Creating admin user.')
keystone.users.create('admin', email=admin_email,
password=admin_password,
tenant_id=admin_tenant.id) | 5,353,662 |
def TokenStartBlockElement(block):
"""
`TokenStartBlockElement` is used to denote that we are starting a new block element.
Under most circumstances, this token will not render anything.
"""
return {
"type": "SpaceCharacters",
"data": "",
"_md_type": mdTokenTypes["TokenStartBlockElement"],
"_md_block": block,
} | 5,353,663 |
def parse_json(json_path):
"""
Parse training params json file to python dictionary
:param json_path: path to training params json file
:return: python dict
"""
with open(json_path) as f:
d = json.load(f)
return d | 5,353,664 |
def compute_statistic(specify_db_path=None):
"""
"""
# 初始化redis实例
RedisCtx.get_instance().host = settings.Redis_Host
RedisCtx.get_instance().port = settings.Redis_Port
MetricsAgent.get_instance().initialize_by_dict(metrics_dict)
# 获取日志文件们所在的路径
db_path, logs_path = Index.get_log_paths(specify_db_path)
work_hour = db_path.rsplit('/', 1)[-1]
t = datetime.strptime(work_hour, settings.LogPath_Format)
settings.Working_TS = time.mktime((t.year, t.month, t.day, t.hour, t.minute, t.second, 0, 0, 0))
settings.Working_DAY = int(time.mktime((t.year, t.month, t.day, 0, 0, 0, 0, 0, 0)))
logger.debug(DEBUG_PREFIX+ 'working_hour:%s working_ts:%s, len:%s', work_hour, settings.Working_TS, len(str(settings.Working_TS)))
# 从每个小时日志文件夹中获取 record schema && record header
utils.load_record_settings(db_path)
# 重新生成索引python_index目录
Index.regen_index(db_path)
# 获取所有策略权重
utils.get_strategies_weigh()
# 获取compute variables
compute_variables = utils.fetch_compute_variables()
logger.debug(DEBUG_PREFIX+'获得的计算变量们是:%s', [ _['name'] for _ in compute_variables if _['name']])
# 新增变量时的调试本地的变量文件, 文件里面就可以只有单独的变量树来避免等很久
# import json
# with open('/home/threathunter/nebula/nebula_web/venv/lib/python2.7/site-packages/nebula_utils/unittests/offline.json', 'r') as f:
# compute_variables = json.load(f)
# cvs = [ ComputeVariableHandler.get_compute_variable(**_) for _ in compute_variables]
# dag.add_nodes(cvs)
# 遍历日志离线统计
compute_dag(compute_variables, logs_path)
# 注册生成风险事件的回调函数
Hook_Functions.append(gen_risk_incidents)
# 统计点击量的counter
Hook_Functions.append(gen_click_counter)
# Hook_Functions.append(gen_related_counter)
# 注册统计user profile的回调函数
Hook_Functions.append(gen_visit_profile)
# 统计notices过去一个小时的数据
ioloop.IOLoop.current().run_sync(gen_notice_statistics)
# 聚合notices过去一个小时的metrics
last = millis_now()
logger.info("开始merge history metrics")
merge_history_metrics('default', 'web.notices', 'sum',
group_tags=['test', 'strategy', 'location', 'url'])
now = millis_now()
logger.info("时间消耗:{}ms".format(now - last))
last = now
# 清理统计数据目录
logger.info("开始清理统计数据目录")
stat_db_tmp_path, stat_db_path = get_stat_db_path(db_path)
now = millis_now()
logger.info("时间消耗:{}ms".format(now - last))
last = now
# 持久化统计数据
logger.info("开始持久化统计数据")
write_statistic_data(stat_db_tmp_path)
now = millis_now()
logger.info("时间消耗:{}ms".format(now - last))
last = now
# 统计完成, 临时统计目录改名为正式, 提供查询服务
logger.info("开始移动目录")
shutil.move(stat_db_tmp_path, stat_db_path)
now = millis_now()
logger.info("时间消耗:{}ms".format(now - last))
last = now
# 定时脚本统计完成后,调用web API清除缓存,刷新数据
utils.clean_cache()
now = millis_now()
logger.info("时间消耗:{}ms".format(now - last)) | 5,353,665 |
def test_encode_decode_uuid_should_succeed(n: int) -> None:
"""Tests andom uuid encoding for n times"""
for seq in range(1, n+1):
x = uuid.uuid4()
encoded = encode(x)
decoded = decode(encoded)
assert x == decoded | 5,353,666 |
def cut_tails(fastq, out_dir, trimm_adapter, trimm_primer, hangF, hangR):
"""
The functuion ...
Parameters
----------
reads : str
path to ...
out_dir : str
path to ...
hang1 : str
Sequence ...
hang2 : str
Sequence ...
Returns
-------
-
"""
output = fastq
# cut barcodes
if trimm_adapter == True:
call('porechop -i {} --verbosity 0 -t 100 --require_two_barcodes --extra_end_trim 0 -o {}/trimmed_barcode.fastq'.format(fastq, out_dir), shell=True)
fastq = out_dir + "/trimmed_barcode.fastq"
output = out_dir + "/trimmed_barcode.fastq"
# cut primers
if trimm_primer == True:
opn_fastq = parse(fastq, 'fastq')
# cut primers
with open('{}/trimmed_primer.fastq'.format(out_dir), 'w') as trimmed_fasta:
for record in opn_fastq:
for idx in range(4):
if idx != 1 or idx != 3:
trimmed_fasta.write(record.format('fastq').split('\n')[idx] + '\n')
else:
trimmed_fasta.write(record.format('fastq').split('\n')[idx][len(hangF): -len(hangR)] + '\n')
output = '{}/trimmed_primer.fastq'.format(out_dir)
return output | 5,353,667 |
def incidentReports(
draw: Callable[..., Any],
new: bool = False,
event: Optional[Event] = None,
maxNumber: Optional[int] = None,
beforeNow: bool = False,
fromNow: bool = False,
) -> IncidentReport:
"""
Strategy that generates :class:`IncidentReport` values.
"""
automatic: Optional[bool]
if new:
number = 0
automatic = False
else:
number = draw(incidentNumbers(max=maxNumber))
automatic = None
if event is None:
event = draw(events())
return IncidentReport(
event=event,
number=number,
created=draw(dateTimes(beforeNow=beforeNow, fromNow=fromNow)),
summary=draw(incidentReportSummaries()),
incidentNumber=None, # FIXME: May allow some to be passed in?
reportEntries=draw(
lists(
reportEntries(
automatic=automatic, beforeNow=beforeNow, fromNow=fromNow
)
)
),
) | 5,353,668 |
def define_vectorized_funcs():
"""
Defines vectorized versions of functions from uncertainties.umath_core.
Some functions have their name translated, so as to follow NumPy's
convention (example: math.acos -> numpy.arccos).
"""
this_module = sys.modules[__name__]
# NumPy does not always use the same function names as the math
# module:
func_name_translations = dict([
(f_name, 'arc'+f_name[1:])
for f_name in ['acos', 'acosh', 'asin', 'atan', 'atan2', 'atanh']])
new_func_names = [
func_name_translations.get(function_name, function_name)
# The functions from umath_core.non_std_wrapped_funcs
# (available from umath) are normally not in
# NumPy, so they are not included here:
for function_name in umath_core.many_scalars_to_scalar_funcs]
for (function_name, unumpy_name) in zip(
umath_core.many_scalars_to_scalar_funcs, new_func_names):
# ! The newly defined functions (uncertainties.unumpy.cos, etc.)
# do not behave exactly like their NumPy equivalent (numpy.cos,
# etc.): cos(0) gives an array() and not a
# numpy.float... (equality tests succeed, though).
func = getattr(umath_core, function_name)
# Data type of the result of the unumpy function:
otypes = (
# It is much more convenient to preserve the type of
# functions that return a number without
# uncertainty. Thus, for example, unumpy.isnan() can
# return an array with a boolean data type (instead of
# object), which allows the result to be used with NumPy's
# boolean indexing.
{} if function_name in umath_core.locally_cst_funcs
# If by any chance a function returns, in a particular
# case, an integer instead of a number with uncertainty,
# side-effects in vectorize() would fix the resulting
# dtype to integer, which is not what is wanted (as
# vectorize(), at least in NumPy around 2010 maybe,
# decided about the output data type by looking at the
# type of first element only).
else {'otypes': [object]})
setattr(
this_module, unumpy_name,
#!!!! For umath_core.locally_cst_funcs, would it make sense
# to optimize this by using instead the equivalent (? see
# above) vectorized NumPy function on the nominal values?
numpy.vectorize(func,
doc="""\
Vectorized version of umath.%s.
Original documentation:
%s""" % (function_name, func.__doc__),
**otypes))
__all__.append(unumpy_name) | 5,353,669 |
def load_spelling(spell_file=SPELLING_FILE):
"""
Load the term_freq from spell_file
"""
with open(spell_file, encoding="utf-8") as f:
tokens = f.read().split('\n')
size = len(tokens)
term_freq = {token: size - i for i, token in enumerate(tokens)}
return term_freq | 5,353,670 |
def NotP8():
"""
Return the matroid ``NotP8``.
This is a matroid that is not `P_8`, found on page 512 of [Oxl1992]_ (the
first edition).
EXAMPLES::
sage: M = matroids.named_matroids.P8()
sage: N = matroids.named_matroids.NotP8()
sage: M.is_isomorphic(N)
False
sage: M.is_valid()
True
"""
A = Matrix(GF(3), [
[1, 0, 0, 0, 0, 1, 1, -1],
[0, 1, 0, 0, 1, 0, 1, 1],
[0, 0, 1, 0, 1, 1, 0, 1],
[0, 0, 0, 1, -1, 1, 1, 1]
])
M = TernaryMatroid(A, 'abcdefgh')
M.rename('NotP8: ' + repr(M))
return M | 5,353,671 |
def kill(proc):
"""Kills |proc| and ignores exceptions thrown for non-existent processes."""
try:
if proc and proc.pid:
os.kill(proc.pid, signal.SIGKILL)
except OSError:
pass | 5,353,672 |
def eqv(var_inp):
"""Returns the von-mises stress of a Field or FieldContainer
Returns
-------
field : ansys.dpf.core.Field, ansys.dpf.core.FieldContainer
The von-mises stress of this field. Output type will match input type.
"""
if isinstance(var_inp, dpf.core.Field):
return _eqv(var_inp)
elif isinstance(var_inp, dpf.core.FieldsContainer):
return _eqv_fc(var_inp)
# elif isinstance(var_inp, dpf.core.Operator):
# return _eqv_op(var_inp)
else:
raise TypeError('Input type must be a Field or FieldContainer') | 5,353,673 |
def sort(array: list[int]) -> list[int]:
"""Counting sort implementation.
"""
result: list[int] = [0, ] * len(array)
low: int = min(array)
high: int = max(array)
count_array: list[int] = [0 for i in range(low, high + 1)]
for i in array:
count_array[i - low] += 1
for j in range(1, len(count_array)):
count_array[j] += count_array[j - 1]
for k in reversed(array):
result[count_array[k - low] - 1] = k
count_array[k - low] -= 1
return result | 5,353,674 |
def build_updated_figures(
df, colorscale_name
):
"""
Build all figures for dashboard
Args:
- df: census 2010 dataset (cudf.DataFrame)
- colorscale_name
Returns:
tuple of figures in the following order
(datashader_plot, education_histogram, income_histogram,
cow_histogram, age_histogram, n_selected_indicator,
coordinates_4326_backup, position_backup)
"""
colorscale_transform = 'linear'
education_histogram = build_histogram_default_bins(
df, 'education', 'v', colorscale_name, colorscale_transform
)
income_histogram = build_histogram_default_bins(
df, 'income', 'v', colorscale_name, colorscale_transform
)
cow_histogram = build_histogram_default_bins(
df, 'cow', 'v', colorscale_name, colorscale_transform
)
age_histogram = build_histogram_default_bins(
df, 'age', 'v', colorscale_name, colorscale_transform
)
return (
education_histogram, income_histogram,
cow_histogram, age_histogram,
) | 5,353,675 |
def authorization(self, name, pasw, pages, screen, edu_lects, std_lects, dt):
"""
This method checks if user credentials are valid through server.
:param self: It is for handling class structure.
:param name: It is username.
:param pasw: It is password.
:param pages: It is list of pages.
:param screen: It is screen manager.
:param edu_lects: It is class of lectures page for educators.
:param std_lects: It is class of lectures page for students.
:param dt: It is for handling callback input.
:return:
"""
Clock.schedule_once(partial(validation,
self,
pages,
screen,
edu_lects,
std_lects
),
3
)
try:
self.data = database_api.signIn(name,
pasw
)
except:
self.data = None | 5,353,676 |
def create_tcp(sip, pjsip, nmapped):
"""
Creates a 'transport-tcp' section in the pjsip.conf file based
on the following settings from sip.conf:
tcpenable
tcpbindaddr (or bindaddr)
"""
protocol = 'tcp'
bind, section = get_bind(sip, pjsip, protocol)
if not bind:
return
set_value('protocol', protocol, section, pjsip, nmapped, 'transport')
set_value('bind', bind, section, pjsip, nmapped, 'transport')
set_transport_common(section, sip, pjsip, protocol, nmapped) | 5,353,677 |
def sacct():
"""
Wrapper around the slurm "sacct" command. Returns an object, and each
property is the (unformatted) value according to sacct.
Would also work with .e.g:
# with open("/home/Desktop/sacct.txt", "r") as file:
:return: SacctWrapper object, with attributes based on the output of sacct
"""
with os.popen("sacct -j $SLURM_JOB_ID -l --parsable2") as file:
return SacctWrapper(file) | 5,353,678 |
def get_or_else_optional(optional: Optional[_T], alt_value: _T) -> _T:
"""
General-purpose getter for `Optional`. If it's `None`, returns the `alt_value`.
Otherwise, returns the contents of `optional`.
"""
if optional is None:
return alt_value
return optional | 5,353,679 |
def main():
"""
Starts the crawler with user-provided CLI arguments
"""
parser = argparse.ArgumentParser(
description="A crawler for gathering Maven coordinates and put them in a Kafka topic.")
parser.add_argument("--m", default=MVN_URL, type=str, help="The URL of Maven repositories")
parser.add_argument("--p", required=True, type=str, help="A path to save the POM files on the disk")
parser.add_argument("--q", required=True, type=str, help="The file of queue items")
parser.add_argument("--c", default=5.0, type=float,
help="How long the crawler waits before sending a request (in sec.)")
parser.add_argument("--no-kafka", dest='no_kafka', action='store_true',
help="Stores the extracted Maven coordinates in a JSON file rather than in a Kafka topic.")
parser.add_argument("--t", default="fasten.mvn.pkg", type=str,
help="The name of a Kafka topic to put Maven coordinates into.")
parser.add_argument("--h", default="localhost:9092", type=str, help="The address of Kafka server")
parser.add_argument("--l", default=-1, type=int,
help="The number of POM files to be extracted. -1 means unlimited.")
parser.set_defaults(no_kafka=False)
args = parser.parse_args()
MVN_PATH = args.p
extract_pom_files(args.m, MVN_PATH, args.q, args.c,
None if args.no_kafka else MavenCoordProducer(args.h, args.t), args.l) | 5,353,680 |
def create_dic(udic):
"""
Create a glue dictionary from a universal dictionary
"""
return udic | 5,353,681 |
def information_gain_w_posterior():
"""
Make a table/plot which estimates the mutual information (K-L divergence) between two distributions.
The distributions are approximated by the MCMC samples.
The information gain H = /int P(X) log_2( P(X) / pi(X) )
where P(X) is the posterior, and pi(X) is the prior
"""
#this sets the number of bins for the samples of the distribution
#it thereby controls the integration mesh for estimating the mutual information
nbins=70
#factor to thin chains
thin=1
chain0 = Chain(path=workdir/'mcmc'/'chain-idf-0_RHIC_PTEMCEE.hdf')
data0 = chain0.load_wo_reshape(thin=thin)
data0 = data0.reshape(-1, 18).T
chain1 = Chain(path=workdir/'mcmc'/'chain-idf-0_LHC_RHIC_PTEMCEE.hdf')
data1 = chain1.load_wo_reshape(thin=thin)
data1 = data1.reshape(-1, 19).T
labels = chain1.labels
ranges = np.array(chain0.range)
#indices0 = [1, 2, 3, 4, 5, 6, 16] # TRENTo, FS and T_sw
#indices1 = [2, 3, 4, 5, 6, 7, 17] # TRENTo, FS and T_sw
indices0 = np.arange(1, 17)
indices1 = np.arange(2, 18)
data0 = np.take(data0, indices0, axis=0)
data1 = np.take(data1, indices1, axis=0)
labels = np.take(labels, indices1)
ranges = np.take(ranges, indices1, axis=0)
ndims, nsamples = data0.shape
fig, axes = plt.subplots( nrows=ndims, ncols=ndims, figsize=(1.*ndims, 1.*ndims) )
cmap0 = plt.get_cmap('Purples')
cmap1 = plt.get_cmap('Oranges')
color0 = 'purple'
color1 = 'orange'
cmap0.set_bad('white')
cmap1.set_bad('white')
for i, row in enumerate(axes):
for j, ax in enumerate(row):
x0 = data0[j]
y0 = data0[i]
x1 = data1[j]
y1 = data1[i]
xlabel = labels[j]
ylabel = labels[i]
if i==j:
H0, bins, _ = ax.hist(x0, bins=nbins, histtype='step', range=ranges[i, :], normed=True, color=color0)
H1, _, _ = ax.hist(x1, bins=nbins, histtype='step', range=ranges[i, :], normed=True, color=color1)
sns.kdeplot(x0, color=color0, shade=True, ax=ax)
sns.kdeplot(x1, color=color1, shade=True, ax=ax)
bin_width = bins[1] - bins[0]
info = calculate_information_gain(H0, H1, bin_width)
info = "{:.1e}".format(info)
info_str = "IG = " + info
ax.annotate(info_str, xy=(0.5, 0.9), xycoords="axes fraction", ha='center', va='bottom', fontsize=4, weight='bold')
ax.set_ylim(0, max( H0.max()*1.2, H1.max()*1.2 ))
if i>j:
H0, xbins, ybins, _ = ax.hist2d(x0, y0, bins=nbins, range=[ ranges[j, :], ranges[i,:] ], alpha=1.0, zorder=1, cmap=cmap0)
if i<j:
#ax.set_visible(False)
H0, xbins, ybins, _ = ax.hist2d(x0, y0, bins=nbins, range=[ ranges[j, :], ranges[i,:] ], alpha=0., zorder=1, cmap=cmap0)
H1, xbins, ybins, _ = ax.hist2d(x1, y1, bins=nbins, range=[ ranges[j, :], ranges[i,:] ], alpha=1.0, zorder=1, cmap=cmap1)
dx = xbins[1] - xbins[0]
dy = ybins[1] - ybins[0]
bin_width = [dx, dy]
info = calculate_information_gain(H0, H1, bin_width)
info = "{:.1e}".format(info)
info_str = "IG = " + info
ax.annotate(info_str, xy=(0.5, 0.9), xycoords="axes fraction", ha='center', va='bottom', fontsize=4, weight='bold')
if ax.is_first_col():
ax.set_ylabel(ylabel, fontsize=5)
if ax.is_first_col() and i!=0:
pass
else:
ax.set_yticks([])
if ax.is_last_row():
ax.set_xlabel(xlabel, fontsize=5)
else:
ax.set_xticks([])
plt.subplots_adjust(wspace=0., hspace=0.)
for tick in ax.xaxis.get_major_ticks():
tick.label.set_fontsize(5)
for tick in ax.yaxis.get_major_ticks():
tick.label.set_fontsize(5)
fig.align_ylabels()
set_tight(pad=.0, h_pad=.0, w_pad=.0, rect=(.01, 0, 1, 1)) | 5,353,682 |
def get_isotopes(loop):
"""
Given a text data loop, usually the very last one in the save frame, find and return the isotopes for naming.
Example output: [ '15N', '1H' ]
"""
# = = = Catch random error in giving the saveFrame instead
loop = loop_assert(loop, 'get_isotopes')
# For entries like hetNOE, the tags will be duplicated with
colsElement = get_col_tag_startswith(loop, 'Atom_type')
if np.any( [ loop[0][x] == '.' for x in colsElement] ):
# = = Entry is incomplete. Get name as backup.
print("= = WARNING: Entry does not contain Atom_type information. Using Atom_ID as backup.")
colsElement = get_col_tag_startswith(loop, 'Atom_ID')
listElement = [ loop[0][x] for x in colsElement]
#print( listElement )
for c in colsElement:
if not is_column_identical(loop,c):
print("= = ERROR in get isotopes(): the column entries for the isotopes are not identical!", file=sys.stderr)
return None
colsIsotopeNum = get_col_tag_startswith(loop,'Atom_isotope_number')
for c in colsIsotopeNum:
if not is_column_identical(loop,c):
print("= = ERROR in get isotopes(): the column entries for the isotopes are not identical!", file=sys.stderr)
return None
listIsotopeNum = [ loop[0][x] for x in colsIsotopeNum ]
if np.any( np.array(listIsotopeNum) == '.' ):
# = = Entry is incomplete. Get IDs
print("= = WARNING: Entry does not contain Atom_isotope_number information. Will guess using atom type information.")
listIsotopeNum = []
for x in listElement:
if x == 'H':
listIsotopeNum.append('1')
elif x == 'C':
listIsotopeNum.append('13')
elif x == 'N':
listIsotopeNum.append('15')
elif x == 'O':
listIsotopeNum.append('17')
else:
print("= = ERROR: Atom types is not H C, N, or O. Will bail.", file=sys.stderr)
sys.exit(1)
out=[]
for a,b in zip(listIsotopeNum, listElement):
out.append( "%s%s" % (a, b) )
return out | 5,353,683 |
def select_user(query_message, mydb):
"""
Prompt the user to select from a list of all database users.
Args:
query_message - The messages to display in the prompt
mydb - A connected MySQL connection
"""
questions = [
inquirer.List('u',
message=query_message,
choices=list_users(mydb)
)
]
return inquirer.prompt(questions)['u'] | 5,353,684 |
def runTool(plugin_name, config_dict=None, user=None, scheduled_id=None,
caption=None, unique_output=True):
"""Runs a tool and stores this "run" in the :class:`evaluation_system.model.db.UserDB`.
:type plugin_name: str
:param plugin_name: name of the referred plugin.
:type config_dict: dict or metadict
:param config_dict: The configuration used for running the tool. If is None, the default configuration will be stored,
this might be incomplete.
:type user: :class:`evaluation_system.model.user.User`
:param user: The user starting the tool
:type scheduled_id: int
:param scheduled_id: if the process is already scheduled then put the row id here
:type caption: str
:param caption: the caption to set.
"""
plugin_name = plugin_name.lower()
if user is None:
user = User()
p = getPluginInstance(plugin_name, user)
complete_conf = None
# check whether a scheduled id is given
if scheduled_id:
config_dict = loadScheduledConf(plugin_name, scheduled_id, user)
if config_dict is None:
conf_file = user.getUserToolConfig(plugin_name)
if os.path.isfile(conf_file):
log.debug('Loading config file %s', conf_file)
with open(conf_file, 'r') as f:
complete_conf = p.readConfiguration(f)
else:
log.debug('No config file was found in %s', conf_file)
if complete_conf is None:
# at this stage we want to resolve or tokens and perform some kind of sanity check before going further
complete_conf = p.setupConfiguration(config_dict=config_dict, recursion=True)
log.debug('Running %s with %s', plugin_name, complete_conf)
rowid = 0
if scheduled_id:
user.getUserDB().upgradeStatus(scheduled_id,
user.getName(),
History.processStatus.running)
rowid = scheduled_id
elif user:
version_details = getVersion(plugin_name)
rowid = user.getUserDB().storeHistory(p,
complete_conf,
user.getName(),
History.processStatus.running,
version_details=version_details,
caption=caption)
# follow the notes
followHistoryTag(rowid, user.getName(), 'Owner')
try:
# we want that the rowid is visible to the tool
p.rowid = rowid
# In any case we have now a complete setup in complete_conf
result = p._runTool(config_dict=complete_conf,
unique_output=unique_output)
# save results when existing
if result is None:
user.getUserDB().upgradeStatus(rowid,
user.getName(),
History.processStatus.finished_no_output)
else:
# create the preview
preview_path = config.get(config.PREVIEW_PATH, None)
if preview_path:
logging.debug('Converting....')
_preview_create(plugin_name, result)
logging.debug('finished')
# write the created files to the database
logging.debug('Storing results into data base....')
user.getUserDB().storeResults(rowid, result)
logging.debug('finished')
# temporary set all processes to finished
user.getUserDB().upgradeStatus(rowid,
user.getName(),
History.processStatus.finished)
except:
user.getUserDB().upgradeStatus(rowid,
user.getName(),
History.processStatus.broken)
raise
return result | 5,353,685 |
def generate_abl_contract_for_lateral_stage(
lateral_stage: LateralProgressionStage,
parent_blinding_xkey: CCoinExtKey,
start_block_num: int,
creditor_control_asset: CreditorAsset,
debtor_control_asset: DebtorAsset,
bitcoin_asset: BitcoinAsset,
first_stage_input_descriptor: Optional[BlindingInputDescriptor] = None
) -> int:
"""
Generate the main contract code and accompanying data,
and store all the info in vertical stage objects
"""
assert start_block_num > 0
lstage = lateral_stage
plan = lstage.plan
lstage_blinding_xkey = safe_derive(
parent_blinding_xkey, STAGE_NEXT_LEVEL_PATH
)
# Need blinding factors and input descriptors ready
# before we can generate the scripts
for vstage in lstage.vertical_stages:
blinding_xkey = safe_derive(
lstage_blinding_xkey, f'{vstage.index_m}h')
blinding_factor = hashlib.sha256(
safe_derive(blinding_xkey, STAGE_BLINDING_FACTOR_PATH)
).digest()
asset_blinding_factor = hashlib.sha256(
safe_derive(blinding_xkey, STAGE_BLINDING_ASSET_FACTOR_PATH)
).digest()
if lstage.level_n == 0 and vstage.index_m == 0:
assert first_stage_input_descriptor is not None
contract_input_descriptor = first_stage_input_descriptor
first_stage_input_descriptor = None
else:
assert first_stage_input_descriptor is None
contract_input_descriptor = BlindingInputDescriptor(
asset=plan.collateral.asset,
amount=plan.collateral.amount,
blinding_factor=Uint256(blinding_factor),
asset_blinding_factor=Uint256(asset_blinding_factor),
)
vstage.blinding_data = VerticalProgressionStageBlindingData(
blinding_xkey, contract_input_descriptor
)
collateral_grab_outs_hash = \
get_hash_of_collateral_forfeiture_checked_outs(
lstage.vertical_stages[-1],
creditor_control_asset, debtor_control_asset, bitcoin_asset)
total_vstages = 0
# Need to process in reverse, because scripts in earlier stages
# depend on scripts in later stages
for vstage in reversed(lstage.vertical_stages):
total_vstages += 1
if vstage.next_lateral_stage:
total_vstages += generate_abl_contract_for_lateral_stage(
vstage.next_lateral_stage,
vstage.blinding_data.blinding_xkey,
start_block_num,
creditor_control_asset,
debtor_control_asset,
bitcoin_asset
)
full_repayment_cod = get_full_repayment_checked_outs_data(
vstage,
creditor_control_asset,
debtor_control_asset,
bitcoin_asset,
)
partial_repayment_cod = get_partial_repayment_checked_outs_data(
vstage,
creditor_control_asset,
debtor_control_asset,
bitcoin_asset,
)
revoc_cod = get_revocation_tx_checked_outs_data(
vstage,
creditor_control_asset,
bitcoin_asset
)
stage_script, checked_outs_hashes = \
generate_script_and_checked_outs_hashes(
vstage,
creditor_control_asset,
debtor_control_asset,
start_block_num,
full_repayment_checked_outs_data=full_repayment_cod,
partial_repayment_checked_outs_data=partial_repayment_cod,
revoc_checked_outs_data=revoc_cod,
hash_of_collateral_grab_outputs_data=collateral_grab_outs_hash,
)
vstage.script_data = VerticalProgressionStageScriptData(
stage_script, checked_outs_hashes
)
return total_vstages | 5,353,686 |
def setUpModule():
"""Stub in get_db and reset_db for testing the simple db api."""
base.db_api = qonos.db.sqlalchemy.api
base.db_api.configure_db() | 5,353,687 |
def create_connection(language):
"""
a function to create sqlite3 connections to db, it retries 100 times if connection returned an error
Args:
language: language
Returns:
sqlite3 connection if success otherwise False
"""
try:
# retries
for i in range(0, 100):
try:
return sqlite3.connect(os.path.join(os.path.dirname(os.path.dirname(__file__)),
_builder(_core_config(), _core_default_config())["api_db_name"]))
except:
time.sleep(0.01)
except:
warn(messages(language, 168))
return False | 5,353,688 |
def force_norm():
"""perform normalization simulation"""
norm = meep.Simulation(cell_size=cell,
boundary_layers=[pml],
geometry=[],
resolution=resolution)
norm.init_fields()
source(norm)
flux_inc = meep_ext.add_flux_plane(norm, fcen, df, nfreq, [0,0,0], [W, W, 0])
norm.run(until_after_sources=meep.stop_when_fields_decayed(.5*um, decay,
pt=meep.Vector3(0,0,0), decay_by=1e-3))
return {'frequency': np.array(meep.get_flux_freqs(flux_inc)), 'area': (W)**2,
'incident': np.asarray(meep.get_fluxes(flux_inc))} | 5,353,689 |
def search(coordinates):
"""Search for closest known locations to these coordinates
"""
gd = GeocodeData()
return gd.query(coordinates) | 5,353,690 |
def get_all_apis_router(_type: str, root_path: str) -> (Path, Path):
"""Return api files and definition files just put the file on folder swagger."""
swagger_path = Path(root_path)
all_files = list(x.name for x in swagger_path.glob("**/*.yaml"))
schemas_files = [x for x in all_files if "schemas" in x]
api_files = [x for x in all_files if "schemas" not in x and "main" not in x]
return api_files if _type == "api" else schemas_files | 5,353,691 |
def get_cached_patches(dataset_dir=None):
"""
Finds the cached patches (stored as images) from disk and returns their paths as a list of tuples
:param dataset_dir: Path to the dataset folder
:return: List of paths to patches as tuples (path_to_left, path_to_middle, path_to_right)
"""
if dataset_dir is None:
dataset_dir = config.DATASET_DIR
cache_dir = join(dataset_dir, 'cache')
frame_paths = [join(cache_dir, x) for x in listdir(cache_dir)]
frame_paths = [x for x in frame_paths if is_image(x)]
frame_paths.sort()
tuples = []
for i in range(len(frame_paths) // config.MAX_SEQUENCE_LENGTH):
foo = (frame_paths[i * config.MAX_SEQUENCE_LENGTH + ix] for ix in range(config.MAX_SEQUENCE_LENGTH))
tuples.append(list(foo))
return tuples | 5,353,692 |
def complex_mse(y_true: tf.Tensor, y_pred: tf.Tensor):
"""
Args:
y_true: The true labels, :math:`V \in \mathbb{C}^{B \\times N}`
y_pred: The true labels, :math:`\\widehat{V} \in \mathbb{C}^{B \\times N}`
Returns:
The complex mean squared error :math:`\\boldsymbol{e} \in \mathbb{R}^B`,
where given example :math:`\\widehat{V}_i \in \mathbb{C}^N`,
we have :math:`e_i = \\frac{\|V_i - \\widehat{V}_i\|^2}{N}`.
"""
real_loss = tf.losses.mse(tf.math.real(y_true), tf.math.real(y_pred))
imag_loss = tf.losses.mse(tf.math.imag(y_true), tf.math.imag(y_pred))
return (real_loss + imag_loss) / 2 | 5,353,693 |
def expand_not(tweets):
"""
DESCRIPTION:
In informal speech, which is widely used in social media, it is common to use contractions of words
(e.g., don't instead of do not).
This may result in misinterpreting the meaning of a phrase especially in the case of negations.
This function expands these contractions and other similar ones (e.g it's --> it is etc...).
INPUT:
tweets: Series of a set of tweets as a python strings
OUTPUT:
Series of filtered tweets
"""
tweets = tweets.str.replace('n\'t', ' not', case=False)
tweets = tweets.str.replace('i\'m', 'i am', case=False)
tweets = tweets.str.replace('\'re', ' are', case=False)
tweets = tweets.str.replace('it\'s', 'it is', case=False)
tweets = tweets.str.replace('that\'s', 'that is', case=False)
tweets = tweets.str.replace('\'ll', ' will', case=False)
tweets = tweets.str.replace('\'l', ' will', case=False)
tweets = tweets.str.replace('\'ve', ' have', case=False)
tweets = tweets.str.replace('\'d', ' would', case=False)
tweets = tweets.str.replace('he\'s', 'he is', case=False)
tweets = tweets.str.replace('what\'s', 'what is', case=False)
tweets = tweets.str.replace('who\'s', 'who is', case=False)
tweets = tweets.str.replace('\'s', '', case=False)
for punct in ['!', '?', '.']:
regex = "(\\"+punct+"( *)){2,}"
tweets = tweets.str.replace(regex, punct+' <repeat> ', case=False)
return tweets | 5,353,694 |
def parse_filter_kw(filter_kw):
"""
Return a parsed filter keyword and boolean indicating if filter is a hashtag
Args:
:filter_kw: (str) filter keyword
Returns:
:is_hashtag: (bool) True, if 'filter_kw' is hashtag
:parsed_kw: (str) parsed 'filter_kw' (lowercase, without '#', ...)
"""
filter_kw = filter_kw.strip()
is_hashtag = filter_kw.startswith('#')
parsed_kw = parse_string(filter_kw, remove=('#', "'")).lower()
return (is_hashtag, parsed_kw) | 5,353,695 |
def get_branch_index(BRANCHES, branch_name):
"""
Get the place of the branch name in the array of BRANCHES so will know into which next branch to merge - the next one in array.
"""
i = 0
for branch in BRANCHES:
if branch_name == branch:
return i
else:
i = i + 1 | 5,353,696 |
def prettify_save(soup_objects_list, output_file_name):
"""
Saves the results of get_soup() function to a text file.
Parameters:
-----------
soup_object_list:
list of BeautifulSoup objects to be saved to the text file
output_file_name:
entered as string with quotations and with extension .txt , used to name the output text file
This function can work independent of the rest of the library.
Note:
Unique to Windows, open() needs argument: encoding = 'utf8' for it to work.
"""
prettified_soup = [BeautifulSoup.prettify(k) for k in soup_objects_list]
custom_word_added = [m + 'BREAKHERE' for m in prettified_soup]
one_string = "".join(custom_word_added)
# unique to Windows, open() needs argument: encoding = "utf8"
with open(output_file_name, 'w') as file:
file.write(one_string)
return None | 5,353,697 |
def determine_required_bytes_signed_integer(value: int) -> int:
"""
Determines the number of bytes that are required to store value
:param value: a SIGNED integer
:return: 1, 2, 4, or 8
"""
value = ensure_int(value)
if value < 0:
value *= -1
value -= 1
if (value >> 7) == 0:
return 1
if (value >> 15) == 0:
return 2
if (value >> 31) == 0:
return 4
if (value >> 63) == 0:
return 8
raise IntegerLargerThan64BitsException | 5,353,698 |
def scrape_cvs():
"""Scrape and return CVS data."""
page_headers = {"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.150 Safari/537.36",
"accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9"}
page = get_resource(CVS_ROOT + CVS_VACCINE_PAGE, page_headers)
soup = BeautifulSoup(page.content, 'html.parser')
modals = [elem for elem in soup.find_all(
class_='modal__box') if elem.get('id').startswith('vaccineinfo')]
state_urls = {}
for modal in modals:
state = modal.get('id').split('-')[-1]
state_urls[state] = CVS_ROOT + \
modal.find(class_='covid-status').get('data-url')
state_dfs = []
state_headers = {
'authority': 'www.cvs.com',
'user-agent': "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.150 Safari/537.36",
'accept': '*/*',
'sec-fetch-site': 'same-origin',
'sec-fetch-mode': 'cors',
'sec-fetch-dest': 'empty',
'referer': 'https://www.cvs.com/immunizations/covid-19-vaccine',
'accept-language': 'en-US,en;q=0.9',
'referrerPolicy': 'strict-origin-when-cross-origin',
'mode': 'cors',
'credentials': 'include'
}
for state, url in state_urls.items():
print(url)
state_response = get_resource(url, state_headers)
state_df = cvs_json_to_df(state, state_response.json())
state_dfs.append(state_df)
return pd.concat(state_dfs) | 5,353,699 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.