content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def get_empath_scores(text):
"""
Obtains empath analysis on the text. Takes the dictionary mapping categories to
scores, which is produced by passing the text to empath, and returns the scores.
Args:
text: string containing text to perform empath analysis on
Returns:
A list of empath scores, such that there is a score in the list for each
of empath's pre-built categories
"""
empath_dict = lexicon.analyze(text, normalize=True)
empath_scores = list(empath_dict.values())
return empath_scores | f68a55a2dc4ba98696e9df3f88aaacf73d81ff2d | 3,369 |
import json
import logging
def sqlite_insert(engine, table_name, data):
"""
Inserts data into a table - either one row or in bulk.
Create the table if not exists.
Parameters
----------
engine: sqlalchemy engine for sqlite
uri: string
data: dict
Returns
-------
bool
"""
dtype = type(data)
try:
with session_scope(engine) as session:
try:
conditionally_create_generic_table(engine, table_name)
except TableCreationException:
pass # most likely because it already exists, ignore
if dtype is list:
for row in data:
session.execute('insert into ' + table_name + ' (data) values (:values)',
{'values': json.dumps(row)})
elif dtype is dict:
# investigate: http://docs.sqlalchemy.org/en/latest/faq/performance.html
# Bulk_insert_mappings or use raw sqlite3
row = data
session.execute('insert into ' + table_name + ' (data) values (:values)',
{'values': json.dumps(row)})
return True
except IntegrityError as e:
logging.error(e)
raise DuplicateRowException
except (OperationalError, StatementError) as e:
logging.error(e)
raise InsertException
except Exception as e:
logging.error(e)
raise Exception('not sure what went wrong - could not insert data') | 430e3109d233119043bc6c5f10ba966aa08992c1 | 3,371 |
def _fit_solver(solver):
"""
Call ``fit`` on the solver. Needed for multiprocessing.
"""
return solver.fit() | 7007752777445d2cc6d476d7af1f83d6cdfe236b | 3,372 |
import types
def flatten(l):
"""
recursively turns any nested list into a regular list (using a DFS)
"""
res = []
for x in l:
if (isinstance(x, types.ListType)):
res += flatten(x)
else:
res.append(x)
return res | 5947566b7dfd1d03204c2a39f1e853ce812e18fe | 3,373 |
def build_column_hierarchy(param_list, level_names, ts_columns, hide_levels=[]):
"""For each parameter in `param_list`, create a new column level with parameter values.
Combine this level with columns `ts_columns` using Cartesian product."""
checks.assert_same_shape(param_list, level_names, axis=0)
param_indexes = []
for i in range(len(param_list)):
if level_names[i] not in hide_levels:
param_index = index_fns.index_from_values(param_list[i], name=level_names[i])
param_indexes.append(param_index)
if len(param_indexes) > 1:
param_columns = index_fns.stack_indexes(*param_indexes)
elif len(param_indexes) == 1:
param_columns = param_indexes[0]
else:
param_columns = None
if param_columns is not None:
return index_fns.combine_indexes(param_columns, ts_columns)
return ts_columns | f19d4f93a0cfbc6ebe700285c8761c78ca5f9b1a | 3,374 |
def gradient_descent(f, xk, delta = 0.01, plot=False, F = None, axlim = 10):
"""
f: multivariable function with 1 array as parameter
xk : a vector to start descent
delta : precision of search
plot : option to plot the results or not
F : the function f expressed with 2 arrays in argument (X,Y) representing the colomns xk[0] and xk[1] for ploting issues. used only if plot == True
axlim : limit of the plot 3 axis (x,y,z)
"""
if plot : ax = plt.axes(projection='3d')
A = []
t = perf_counter()
dk = nd.Gradient(f)(xk)
while la.norm(dk) > delta :
if plot and len(A) < 10 : A.append(xk)
xt = xk
phi = lambda s : f(xk - s * dk)
alpha = op.newton(phi, 1)
xk -= alpha * dk
if plot and len(A) < 10 : A.append(xk)
dk = nd.Gradient(f)(xk)
if la.norm(xk - xt) < delta : break
t = perf_counter() - t
print("execution time: ",t)
if plot :
for u in A:
ax.scatter(u[0], u[1], f(u), c = 'b', s = 50)
ax.scatter(xk[0], xk[1], f(xk), c = 'r', s = 50,label="optimum")
x = np.arange(-axlim, axlim, axlim/100)
y = np.arange(-axlim, axlim, axlim/100)
X, Y = np.meshgrid(x, y)
Z = F(X,Y)
ax.set_xlabel('x', labelpad=20)
ax.set_ylabel('y', labelpad=20)
ax.set_zlabel('z', labelpad=20)
surf = ax.plot_surface(X, Y, Z, cmap = plt.cm.cividis)
plt.legend()
plt.title("optimizition with Gradient Descent")
plt.show()
return xk | 99b8da92c6df296c2d02b2f0d14f38b94ea87aef | 3,375 |
def _get_cells(obj):
"""Extract cells and cell_data from a vtkDataSet and sort it by types."""
cells, cell_data = {}, {}
data = _get_data(obj.GetCellData())
arr = vtk2np(obj.GetCells().GetData())
loc = vtk2np(obj.GetCellLocationsArray())
types = vtk2np(obj.GetCellTypesArray())
for typ in VTK_TYP:
if not isinstance(typ, int):
continue
cell_name = VTK_TYP[typ]
n_no = NODE_NO[cell_name]
cell_loc_i = np.where(types == typ)[0]
loc_i = loc[cell_loc_i]
# if there are no cells of the actual type continue
if len(loc_i) == 0:
# if not loc_i:
continue
arr_i = np.empty((len(loc_i), n_no), dtype=int)
for i in range(n_no):
arr_i[:, i] = arr[loc_i + i + 1]
cells[cell_name] = arr_i
cell_data_i = {}
for data_i in data:
cell_data_i[data_i] = data[data_i][cell_loc_i]
if cell_data_i != {}:
cell_data[cell_name] = cell_data_i
return cells, cell_data | 84f603d92e1548b6d9ebe33b31ac4277bed49281 | 3,376 |
def check_X(X, enforce_univariate=False, enforce_min_instances=1):
"""Validate input data.
Parameters
----------
X : pd.DataFrame
enforce_univariate : bool, optional (default=False)
Enforce that X is univariate.
enforce_min_instances : int, optional (default=1)
Enforce minimum number of instances.
Returns
-------
X : pd.DataFrame
Raises
------
ValueError
If X is an invalid input
"""
if not isinstance(X, pd.DataFrame):
raise ValueError(f"X must be a pd.DataFrame, but found: "
f"{(type(X))}")
if enforce_univariate:
_enforce_X_univariate(X)
if enforce_min_instances > 0:
_enforce_min_instances(X, min_instances=enforce_min_instances)
return X | 2022cbccfaec72cc68e2d9692d96fb3241d9991a | 3,377 |
def parse_amount(value: int) -> Decimal:
"""Return a scaled down amount."""
return Decimal(value) / Decimal(AMOUNT_SCALE_FACTOR) | 66e7668ed5da3d451644de00dc98bfb2bf8745f0 | 3,378 |
def svn_ra_do_update2(*args):
"""
svn_ra_do_update2(svn_ra_session_t session, svn_ra_reporter3_t reporter,
void report_baton, svn_revnum_t revision_to_update_to,
char update_target, svn_depth_t depth,
svn_boolean_t send_copyfrom_args, svn_delta_editor_t update_editor,
void update_baton,
apr_pool_t pool) -> svn_error_t
"""
return apply(_ra.svn_ra_do_update2, args) | 139fd5b8ea5b86ad70f056d5b53a86cc01ce3952 | 3,379 |
from ._sentiwords import tag
def sentiwords_tag(doc, output="bag"):
"""Tag doc with SentiWords polarity priors.
Performs left-to-right, longest-match annotation of token spans with
polarities from SentiWords.
Uses no part-of-speech information; when a span has multiple possible
taggings in SentiWords, the mean is returned.
Parameters
----------
doc : document or list of strings
output : string, optional
Output format. Either "bag" for a histogram (dict) of annotated token
span frequencies, or "tokens" a mixed list of strings and (list of
strings, polarity) pairs.
"""
doc = _tokenize_if_needed(fetch(doc))
tagged = tag(doc)
if output == "bag":
d = {}
for ngram, polarity in tagged:
if polarity == 0:
continue
if ngram in d:
d[ngram][1] += 1
else:
d[ngram] = [polarity, 1]
return d
elif output == "tokens":
return [ngram if polarity == 0 else (ngram, polarity)
for ngram, polarity in tagged]
else:
raise ValueError("unknown output format %r" % output) | c4769e82d9b9aff55f7d6e3de08188f5ba6501bb | 3,381 |
def _GetCommandTaskIds(command):
"""Get a command's task ids."""
# A task count is the number of tasks we put in the command queue for this
# command. We cap this number to avoid a single command with large run count
# dominating an entire cluster. If a task count is smaller than a run count,
# completed tasks will be rescheduled as needed.
task_count = min(command.run_count, MAX_TASK_COUNT)
_, request_id, _, command_id = command.key.flat()
return ["%s-%s-%s" % (request_id, command_id, i) for i in range(task_count)] | 98d6ac89e4f5569968740475eba924840170464f | 3,382 |
import uuid
import json
def save_orchestrator_response(url, jsonresponse, dryrun):
"""Given a URL and JSON response create/update the corresponding mockfile."""
endpoint = url.split("/api/")[1].rstrip("/")
try:
path, identifier = endpoint.rsplit("/", maxsplit=1)
except ValueError:
path, identifier = None, endpoint
if any(char in identifier for char in "?&="):
# Skip urls with query parameters for now (can be normalized if it's needed)
print(f"Unsupported URL parameters: {url}")
return
if any(pattern in url for pattern in TO_EXCLUDE):
print(f"Excluding URL {url}")
return
def get_id(string):
"""Defines how final URL component can be used as identifier"""
try:
parsed = uuid.UUID(string)
return str(parsed)[:8]
except ValueError:
if string.isnumeric():
return string
return None
try:
response = json.loads(jsonresponse)
except json.JSONDecodeError as e:
print(f"Invalid JSON response: {url} ({e})")
return
if (parsed_id := get_id(identifier)) is None:
# URL ends on a word "products" or "organisations"
filename = f"{identifier}.json"
else:
# URL ends on UUID or integer
if "/domain-model/" in url:
filename_prefix = "".join(c for c in response["product"]["tag"].lower() if c.isalpha())
else:
filename_prefix = ""
filename = f"{filename_prefix}-{parsed_id}.json" if filename_prefix else f"{parsed_id}.json"
if not path:
# Store in data/
fpath = DATA_ROOT / filename
print(
f"{endpoint} -> {'update (if changed)' if fpath.is_file() else 'create'} '{filename}' in root directory"
)
else:
# Store in data/<subfolder>/
dpath = DATA_ROOT / path
fpath = dpath / filename
print(
f"{endpoint} -> {'update (if changed)' if fpath.is_file() else 'create'} '{filename}' "
f"in {'new' if not dpath.is_dir() else 'existing'} directory '{path}'"
)
if not dpath.is_dir() and not dryrun:
dpath.mkdir(parents=True)
if not dryrun:
with fpath.open(mode="w") as handle:
json.dump(response, handle, sort_keys=True, indent=4) | aab7d3e925d7b4d695832ba7aa45bd93b3824fb1 | 3,383 |
import math
def compute_bleu(reference_corpus, translation_corpus, max_order=4,
use_bp=True):
"""Computes BLEU score of translated segments against one or more references.
Args:
reference_corpus: list of references for each translation. Each
reference should be tokenized into a list of tokens.
translation_corpus: list of translations to score. Each translation
should be tokenized into a list of tokens.
max_order: Maximum n-gram order to use when computing BLEU score.
use_bp: boolean, whether to apply brevity penalty.
Returns:
BLEU score.
"""
reference_length = 0
translation_length = 0
bp = 1.0
geo_mean = 0
matches_by_order = [0] * max_order
possible_matches_by_order = [0] * max_order
precisions = []
for (references, translations) in zip(reference_corpus, translation_corpus):
reference_length += len(references)
translation_length += len(translations)
ref_ngram_counts = _get_ngrams_with_counter(references, max_order)
translation_ngram_counts = _get_ngrams_with_counter(translations, max_order)
overlap = dict((ngram,
min(count, translation_ngram_counts[ngram]))
for ngram, count in ref_ngram_counts.items())
for ngram in overlap:
matches_by_order[len(ngram) - 1] += overlap[ngram]
for ngram in translation_ngram_counts:
possible_matches_by_order[len(ngram) - 1] += translation_ngram_counts[
ngram]
precisions = [0] * max_order
smooth = 1.0
for i in range(0, max_order):
if possible_matches_by_order[i] > 0:
precisions[i] = float(matches_by_order[i]) / possible_matches_by_order[i]
if matches_by_order[i] > 0:
precisions[i] = float(matches_by_order[i]) / possible_matches_by_order[
i]
else:
smooth *= 2
precisions[i] = 1.0 / (smooth * possible_matches_by_order[i])
else:
precisions[i] = 0.0
if max(precisions) > 0:
p_log_sum = sum(math.log(p) for p in precisions if p)
geo_mean = math.exp(p_log_sum / max_order)
if use_bp:
ratio = translation_length / reference_length
bp = math.exp(1 - 1. / ratio) if ratio < 1.0 else 1.0
bleu = geo_mean * bp
return np.float32(bleu) | e0978e3b05513c32ac55f72b298e8752eb887fb1 | 3,385 |
def flip(inputDirection: direction) -> direction:
"""
Chooses what part of the general pointer to flip, by DP%2 == CC rule, providing the following flow:
(0,0) -> (0,1)
(0,1) -> (1,1)
(1,1) -> (1,0)
(1,0) -> (2,0)
(2,0) -> (2,1)
(2,1) -> (3,1)
(3,1) -> (3,0)
(3,0) -> (0,0)
:param inputDirection: Original state of the pointers
:return: Tuple of ints containing new pointers
"""
if inputDirection.pointers[0] % 2 == inputDirection.pointers[1]:
return direction((inputDirection.pointers[0], flipCC(inputDirection.pointers[1])))
return direction((flipDP(inputDirection.pointers[0]), inputDirection.pointers[1])) | 44797849c14736de7380c5169e29bc9095f11a45 | 3,386 |
def save(request):
"""Update the column levels in campaign_tree table with the user's input from the data warehouse frontend."""
if any(request["changes"]):
query = 'UPDATE campaign_tree SET '
query += ', '.join([f"""levels[{index + 1}] = trim(regexp_replace(%s, '\s+', ' ', 'g'))"""
for index, change in enumerate(request["changes"])
if change])
where_clause, variables = _build_where_clause(request)
query += ' ' + where_clause
with mara_db.postgresql.postgres_cursor_context('mara') as cursor: # type: psycopg2.extensions.cursor
cursor.execute(query, tuple([change for change in request['changes'] if change] + variables))
return f'Successfully updated {cursor.rowcount} rows: <tt>{str(cursor.query.decode("utf-8"))}</tt>'
else:
return 'No changes to be made' | 7cc75024db3de8596bc685439d02a1023bfbae25 | 3,387 |
def _print_model(server, user_key, device_type_model):
"""
Print the model for a given device type
:param device_type_model: Device type ID to print the model for
"""
name = None
model = []
parameters = _get_parameters(server, user_key)
parameters = parameters['deviceParams']
try:
device_type_model = int(device_type_model)
except:
print(Color.RED + 'Please provide an integer device type.' + Color.END + '\n')
return 0
if device_type_model == 22 or device_type_model == 23 or device_type_model == 24:
if device_type_model == 22:
name = 'Web Camera'
elif device_type_model == 23:
name = 'Android Camera'
elif device_type_model == 24:
name = 'iOS Camera'
model = ['accessCameraSettings', 'audioStreaming', 'videoStreaming', 'ppc.hdStatus', 'ppc.rapidMotionStatus', 'batteryLevel', 'ppc.charging', 'motionStatus', 'selectedCamera', 'ppc.autoFocus', 'ppc.recordSeconds', 'ppc.motionSensitivity', 'version', 'ppc.robotConnected', 'ppc.robotMotionDirection', 'ppc.robotOrientation', 'ppc.robotVantageSphericalCoordinates', 'ppc.robotVantageTimer', 'ppc.robotVantageConfigurationStatus', 'ppc.robotVantageName', 'ppc.robotVantageSequence', 'ppc.robotVantageMoveToIndex', 'ppc.availableBytes', 'twitterAutoShare', 'twitterDescription', 'ppc.twitterReminder', 'ppc.twitterStatus', 'ppc.motionCountDownTime', 'ppc.blackoutScreenOn', 'ppc.warningStatus', 'ppc.warningText', 'ppc.recordFullDuration', 'ppc.flashOn', 'streamError', 'ppc.streamStatus', 'model', 'timeZoneId', 'ppc.motionActivity', 'ppc.outputVolume', 'ppc.captureImage', 'recordStatus', 'ppc.alarm', 'ppc.countdown', 'ppc.playSound', 'ppc.motionAlarm', 'ppc.cameraName', 'ppc.throttleStatus']
elif device_type_model == 31:
name = 'Gateway'
model = ['firmware', 'ipAddress', 'manufacturer', 'model', 'numberOfChildren', 'permitJoining', 'zbChannel', 'reboot', 'cloud', 'firmwareUpdateStatus', 'firmwareUrl', 'firmwareChecksum']
elif device_type_model == 130:
name = 'LintAlert PRO Plus'
model = ['sig.led', 'sig.pressure', 'sig.wciPressure', 'sig.status', 'sig.runtime', 'sig.maxled', 'sig.curMaxLed', 'sig.type', 'sig.table', 'sig.clean', 'waterLeak', 'version', 'rssi']
elif device_type_model == 4200:
name = 'Netatmo Healthy Home Coach'
model = ['degC', 'co2', 'relativeHumidity', 'noise', 'firmware', 'wifiSignal', 'pressure', 'nam.healthIdx']
elif device_type_model == 4201:
name = 'Netatmo Weather Station Indoor Module'
model = ['degC', 'co2', 'relativeHumidity', 'noise', 'pressure', 'firmware', 'wifiSignal']
elif device_type_model == 4202:
name = 'Netatmo Weather Station Outdoor Module'
model = ['degC', 'relativeHumidity', 'firmware', 'signalStrength', 'batteryLevel']
elif device_type_model == 4204:
name = 'Netatmo Welcome'
model = ['status', 'ipc.sdStatus', 'ppc.charging', 'ipc.mainVideoUrl']
elif device_type_model == 4220:
name = 'Sensibo'
model = ['degC', 'relativeHumidity', 'powerStatus', 'systemMode', 'coolingSetpoint', 'fanMode', 'swingMode', 'systemModeValues', 'fanModeValues', 'swingValues', 'tempValues']
elif device_type_model == 9001:
name = 'GE Dimmer Switch'
model = ['currentLevel', 'state', 'lqi', 'rssi', 'model', 'manufacturer']
elif device_type_model == 9002:
name = 'Siren'
model = ['ppc.alarmWarn', 'ppc.alarmDuration', 'ppc.alarmStrobe', 'ppc.alarmSquawk', 'batteryLevel', 'batteryVoltage', 'lqi', 'rssi', 'model', 'manufacturer']
elif device_type_model == 9003:
name = 'Temperature & Humidity Sensor'
model = ['relativeHumidity', 'degC', 'batteryLevel', 'batteryVoltage', 'lqi', 'rssi', 'model', 'manufacturer']
elif device_type_model == 9006:
name = 'Fire Alarm'
model = ['alarmStatus', 'batteryLevel', 'batteryVoltage', 'lqi', 'rssi', 'model', 'manufacturer']
elif device_type_model == 9007:
name = 'Smoke Detector'
model = ['alarmStatus', 'batteryLevel', 'batteryVoltage', 'lqi', 'rssi', 'model', 'manufacturer']
elif device_type_model == 9008:
name = 'Heat Detector'
model = ['alarmStatus', 'batteryLevel', 'batteryVoltage', 'lqi', 'rssi', 'model', 'manufacturer']
elif device_type_model == 9010:
name = 'Smart Lock'
model = ['degC', 'lockStatus', 'lqi', 'rssi', 'model', 'manufacturer']
elif device_type_model == 10014:
name = 'Entry Sensor'
model = ['doorStatus', 'batteryLevel', 'batteryVoltage', 'lqi', 'rssi', 'model', 'manufacturer']
elif device_type_model == 10017:
name = 'Water Sensor'
model = ['waterLeak', 'batteryLevel', 'batteryVoltage', 'lqi', 'rssi', 'model', 'manufacturer']
elif device_type_model == 10019:
name = 'Touch Sensor'
model = ['vibrationStatus', 'batteryLevel', 'batteryVoltage', 'lqi', 'rssi', 'model', 'manufacturer']
elif device_type_model == 10031:
name = 'Gateway'
model = ['firmware', 'ipAddress', 'model', 'numberOfChildren', 'permitJoining', 'zbChannel']
elif device_type_model == 10033:
name = 'Temperature Sensor'
model = ['degC', 'batteryLevel', 'batteryVoltage', 'lqi', 'rssi', 'model', 'manufacturer']
elif device_type_model == 10034:
name = 'Humidity Sensor'
model = ['relativeHumidity', 'batteryLevel', 'batteryVoltage', 'lqi', 'rssi', 'model', 'manufacturer']
elif device_type_model == 10035:
name = 'Smart Plug'
model = ['power', 'energy', 'outletStatus', 'lqi', 'rssi', 'model', 'manufacturer']
elif device_type_model == 10036:
name = 'Smart Bulb'
model = ['currentLevel', 'state', 'batteryLevel', 'batteryVoltage', 'lqi', 'rssi', 'model', 'manufacturer']
elif device_type_model == 10037:
name = 'Thermostat'
model = ['degC', 'fanModeSequence', 'systemMode', 'controlSequenceOfOperation', 'coolingSetpoint', 'heatingSetpoint', 'batteryLevel', 'batteryVoltage', 'lqi', 'rssi', 'model', 'manufacturer']
elif device_type_model == 10038:
name = 'Motion Sensor'
model = ['motionStatus', 'batteryLevel', 'batteryVoltage', 'lqi', 'rssi', 'model', 'manufacturer']
if len(model) > 0:
print(Color.GREEN + name + Color.END)
for m in model:
description = ''
for p in parameters:
if p['name'] == m:
description = '('
if 'systemUnit' in p:
description += p['systemUnit'] + ', '
if p['numeric']:
description += 'numeric'
else:
description += 'non-numeric'
if 'description' in p:
description += ', ' + p['description']
description += ')'
print(' ' + Color.BOLD + m + Color.END + ' ' + description)
else:
print(Color.RED + 'This device type does not yet have a model defined.' + Color.END)
return | b901cb21d39ac0fce1d8a60c3926d8b274ce5189 | 3,388 |
def parse_ipv6_addresses(text):
"""."""
addresses = ioc_grammars.ipv6_address.searchString(text)
return _listify(addresses) | 6177bc5fcb3b6613e945a7c63931d88a12d372cd | 3,390 |
def as_jenks_caspall_sampled(*args, **kwargs):
"""
Generate Jenks-Caspall Sampled classes from the provided queryset. If the queryset
is empty, no class breaks are returned. For more information on the Jenks
Caspall Sampled classifier, please visit:
U{http://pysal.geodacenter.org/1.2/library/esda/mapclassify.html#pysal.esda.mapclassify.Jenks_Caspall_Sampled}
@type queryset: QuerySet
@param queryset: The query set that contains the entire distribution of
data values.
@type field: string
@param field: The name of the field on the model in the queryset that
contains the data values.
@type nclasses: integer
@param nclasses: The number of class breaks desired.
@type geofield: string
@param geofield: The name of the geometry field. Defaults to 'geom'.
@rtype: L{sld.StyledLayerDescriptor}
@returns: An SLD object that represents the class breaks.
"""
return _as_classification(Jenks_Caspall_Sampled, *args, **kwargs) | 7a271d3c48b6d813cacc9502f214d2045d8accfe | 3,391 |
def positive_dice_parse(dice: str) -> str:
"""
:param dice: Formatted string, where each line is blank or matches
t: [(t, )*t]
t = (0|T|2A|SA|2S|S|A)
(note: T stands for Triumph here)
:return: Formatted string matching above, except tokens are replaced
with their corresponding values in the 4-tuple system,
(successes, advantages, triumphs, despairs)
"""
return dice.replace("0", "(0, 0, 0, 0)")\
.replace("T", "(1, 0, 1, 0)")\
.replace("2A", "(0, 2, 0, 0)")\
.replace("SA", "(1, 1, 0, 0)")\
.replace("2S", "(2, 0, 0, 0)")\
.replace("S", "(1, 0, 0, 0)")\
.replace("A", "(0, 1, 0, 0)") | 5b266a4025706bfc8f4deabe67735a32f4b0785d | 3,392 |
def fmt_title(text):
"""Article title formatter.
Except functional words, first letter uppercase. Example:
"Google Killing Annoying Browsing Feature"
**中文文档**
文章标题的格式, 除了虚词, 每个英文单词的第一个字母大写。
"""
text = text.strip()
if len(text) == 0: # if empty string, return it
return text
else:
text = text.lower() # lower all char
# delete redundant empty space
chunks = [chunk for chunk in text.split(" ") if len(chunk) >= 1]
new_chunks = list()
for chunk in chunks:
if chunk not in _function_words:
chunk = chunk[0].upper() + chunk[1:]
new_chunks.append(chunk)
new_chunks[0] = new_chunks[0][0].upper() + new_chunks[0][1:]
return " ".join(new_chunks) | 44474f8f92888904a56f63bdcf1031f2d7c472e1 | 3,393 |
def insn_add_off_drefs(*args):
"""
insn_add_off_drefs(insn, x, type, outf) -> ea_t
"""
return _ida_ua.insn_add_off_drefs(*args) | 684af1cea2deafffc33b007f064a67fb89ffd54f | 3,394 |
def lambda_handler(event, context):
"""Lambda function that responds to changes in labeling job status, updating
the corresponding dynamo db tables and publishing to sns after a job is cancelled.
Parameters
----------
event: dict, required API gateway request with an input SQS arn, output SQS arn
context: object, required Lambda Context runtime methods and attributes
Context doc: https://docs.aws.amazon.com/lambda/latest/dg/python-context-object.html
Returns
------
Lambda Output Format: dict
Return doc: https://docs.aws.amazon.com/apigateway/latest/developerguide/set-up-lambda-proxy-integrations.html
"""
log.log_request_and_context(event, context)
job_status = event["status"]
job_arns = event["job_arns"]
if len(job_arns) != 1:
raise ValueError("incorrect number of job arns in event: ", job_arns)
job_arn = job_arns[0]
# We received a new status for the job_arn.
process_new_status(job_arn, job_status, context.invoked_function_arn)
return "success" | 3640bded7316a573d0740084e8006a876bb7300c | 3,395 |
from typing import Union
def get_answer(question: Union[dns.message.Message, bytes],
server: Union[IPv4Address, IPv6Address],
port: int = 53,
tcp: bool = False,
timeout: int = pydnstest.mock_client.SOCKET_OPERATION_TIMEOUT) -> dns.message.Message:
"""Get an DNS message with answer with specific query"""
sock = pydnstest.mock_client.setup_socket(str(server), port, tcp=tcp)
with sock:
pydnstest.mock_client.send_query(sock, question)
return pydnstest.mock_client.get_dns_message(sock, timeout=timeout) | 95493396393ce09e4610fb32c063d0d9676b14ea | 3,396 |
def intersectionPoint(line1, line2):
"""
Determining intersection point b/w two lines of the form r = xcos(R) + ysin(R)
"""
y = (line2[0][0]*np.cos(line1[0][1]) - line1[0][0]*np.cos(line2[0][1]))/(np.sin(line2[0][1])*np.cos(line1[0][1]) - np.sin(line1[0][1])*np.cos(line2[0][1]))
x = (line1[0][0] - y*np.sin(line1[0][1]))/np.cos(line1[0][1])
return [x,y] | f23ec1960de85b72724388747ec8157925eefae1 | 3,397 |
def _remove_keywords(d):
"""
copy the dict, filter_keywords
Parameters
----------
d : dict
"""
return { k:v for k, v in iteritems(d) if k not in RESERVED } | 0eb7ed59898c3ec323574d0068af745250aef63b | 3,398 |
def build_trib_exp(trib_identifier, trib_key_field):
"""Establishes a SQL query expresion associating a given tributary id"""
return '"{0}"'.format(trib_key_field) + " LIKE '%{0}%'".format(trib_identifier) | 792d5e4237268410f050323ff1748246a5cdee5d | 3,399 |
import torch
def train_epoch(loader, vae, optimizer, device, epoch_idx, log_interval,
loss_weights, stats_logger, clip_gradients=None):
"""Train VAE for an epoch"""
vae.train()
train_losses = {}
train_total_loss = 0
for batch_idx, data in enumerate(loader):
data = data.to(device).float()
target = data
optimizer.zero_grad()
decoder_output, z, mu, logvar = vae(data)
losses = vae.loss(decoder_output, target, z, mu, logvar)
total_loss = sum(loss_weights.get(loss_name, 1) * loss
for loss_name, loss in losses.items()
if '_unweighted' not in loss_name)
total_loss.backward()
if clip_gradients is not None:
torch.nn.utils.clip_grad_value_(vae.parameters(), clip_gradients)
optimizer.step()
train_total_loss += total_loss.item() * len(data)
for name, loss in losses.items():
train_loss = train_losses.setdefault(name, 0)
train_losses[name] = train_loss + loss.item() * len(data)
if batch_idx % log_interval == 0:
s = ('Train Epoch: {} [{}/{} ({:.0f}%)]\t'
.format(epoch_idx,
batch_idx * len(data),
len(loader.dataset),
100. * batch_idx / len(loader)))
s += ', '.join('Loss {}: {:.7f}'.format(name, loss.item())
for name, loss in losses.items())
print(s)
stats = {name: loss / len(loader.dataset)
for name, loss in train_losses.items()}
stats['total_loss'] = train_total_loss / len(loader.dataset)
s = ('====> Epoch: {} Avg. total loss: {:.7f}, '
.format(epoch_idx, stats['total_loss']))
s += ', '.join('{} loss: {:.7f}'.format(name, loss)
for name, loss in stats.items() if name != 'total_loss')
print(s)
# Add weighted losses for logging
for name, loss in train_losses.items():
weight = loss_weights.get(name, 1)
stats['weighted_' + name] = weight * loss / len(loader.dataset)
return stats | e846987844933359a67f7b6581a8429ef88bfb0b | 3,400 |
from mpl_toolkits.axes_grid1.parasite_axes import SubplotHost
def phased_multi_axes(times, data, std, ephemeris, thin=1,
colours='midnightblue', ylim_shrink=0.8,
subplot_kw=None, gridspec_kw=None, **kws):
"""
Parameters
----------
times
data
std
ephemeris
thin
colours
subplot_kw
gridspec_kw
Returns
-------
"""
# sharex=True, # not sharing x since it shares
# all the ticks which is NOT desired here.
# instead set range for all
# NOTE: could try:
# for tck in ax.xaxis.get_major_ticks():
# tck.label1.set_visible(True)
n = len(times)
fig, axes = plt.subplots(n, 1,
sharey=True,
subplot_kw=subplot_kw,
gridspec_kw=gridspec_kw
)
# hack to get dual axes on topmost
pos = axes[0].get_position()
axes[0].remove()
ax = fig.axes[0] = axes[0] = SubplotHost(fig, n, 1, 1, **subplot_kw)
axp = make_twin(ax, 45, ephemeris.P)
fig.add_subplot(ax)
ax.set_position(pos)
# get colours
if not isinstance(colours, (list, tuple, np.ndarray)):
colours = [colours] * n
# plot options
opts = dict(fmt='o', ms=1, alpha=0.75, clip_on=False)
opts.update(**kws)
# do plotting
s = np.s_[::thin]
xlim = [np.inf, -np.inf]
ylim = [np.inf, -np.inf]
for i, (ax, t, y, u) in enumerate(zip(axes, times, data, std)):
first = (i == 0)
last = (i == n - 1)
#
phase = ephemeris.phase(t)
phase -= max(np.floor(phase[0]) + 1, 0)
if np.all(phase < 0):
phase += 1
ebc = ax.errorbar(phase[s], y[s], u if u is None else u[s],
color=colours[i], **opts)
xlim = [min(xlim[0], phase[0]),
max(xlim[1], phase[-1])]
ylim = [min(ylim[0], y.min()),
max(ylim[1], y.max())]
# ticks
ax.tick_params('y', which='minor', length=2.5, left=True, right=True)
ax.tick_params('y', which='major', length=5, left=True, right=True)
ax.yaxis.set_minor_locator(ticker.AutoMinorLocator())
if last:
ax.tick_params('x', which='minor', length=2.5, bottom=(not first),
top=(not last))
ax.tick_params('x', which='major', length=5, bottom=(not first),
top=(not last))
ax.xaxis.set_minor_locator(ticker.AutoMinorLocator())
else:
ax.tick_params('x', length=0)
# remove top & bottom spines
if not first:
ax.spines['top'].set_visible(False)
if not last:
ax.spines['bottom'].set_visible(False)
ax.xaxis.set_ticklabels([])
ax.tick_params(labelright=True, labelleft=True)
ax.grid(True)
# axes limits
stretch = np.ptp(xlim) * 0.025
xlim = np.add(xlim, [-stretch, stretch])
ylim[1] *= ylim_shrink
for ax in axes:
ax.set(xlim=xlim, ylim=ylim)
# axes[0].set_ylim(-0.15, 1.65)
# x label
axes_label_font_spec = dict(weight='bold', size=14)
ax.set_xlabel('Orbital Phase', fontdict=axes_label_font_spec)
# y label
y_middle = 0.5 # (fig.subplotpars.top - fig.subplotpars.bottom) / 2
for x, va in zip((0.01, 1), ('top', 'bottom')):
fig.text(x, y_middle, 'Relative Flux', axes_label_font_spec,
rotation=90, rotation_mode='anchor',
ha='center', va=va)
# top ticks
# axp.xaxis.set_ticks(np.r_[-2.5:3.5:0.5])
axp.set_xlabel('Time (hours)', fontdict=dict(weight='bold'))
axp.tick_params('x', which='minor', length=2.5, bottom=False,
top=True)
return fig | 32d2da62acde2a2424c310e1bd0196dbac9309cf | 3,401 |
def get_delta(K):
"""This function returns the delta matrix needed calculting Pj = delta*S + (1-delta)*(1-S)
Args:
inputs:
K: Integers below 2^K will be considered
outputs:
delta: Matrix containing binary codes of numbers (1, 2^K) each one arranged row-wise. shape [2^K x K]
one_minus_delta: Matrix containing complement of binary codes of numbers (1, 2^K) each one arranged row-wise. shape [2^K x K]
"""
delta = np.arange(1, 2 ** K)[:, np.newaxis] >> np.arange(K)[::-1] & 1
# all_ones = np.array(
# [list(np.binary_repr(2 ** int(np.ceil(np.log2(1 + x))) - 1, K)) for x in
# range(1, 2 ** K)], dtype=int)
all_ones = np.array([[1 for _ in range(K)] for _ in range(2**K-1)])
one_minus_delta = all_ones - delta
return delta, one_minus_delta | 84e72790024c7294e715dd5efc03f001a7ab887d | 3,402 |
def string_split_readable(inp, length):
"""
Convenience function to chunk a string into parts of a certain length,
whilst being wary of spaces.
This means that chunks will only be split on spaces, which means some
chunks will be shorter, but it also means that the resulting list will
only contain readable strings.
ValueError is thrown if there's a word that's longer than the max chunk
size.
:param inp: The string to be split
:param length: Maximum length of the chunks to return
:return: List containing the split chunks
"""
done = []
current = ""
for word in inp.split():
if len(current) == length:
done.append(current)
current = ""
if len(word) > length:
raise ValueError(_("Word %s is longer than %s characters") %
(word, length))
else:
if len(current + word) > length:
done.append(current)
current = ""
current += word
if len(current) <= (length - 1):
current += " "
if len(current):
done.append(current)
return done | 1f1d3641cc293754c174d32d397dab252c009eca | 3,403 |
import torch
def get_similarity_transform_matrix(
from_pts: torch.Tensor, to_pts: torch.Tensor) -> torch.Tensor:
"""
Args:
from_pts, to_pts: b x n x 2
Returns:
torch.Tensor: b x 3 x 3
"""
mfrom = from_pts.mean(dim=1, keepdim=True) # b x 1 x 2
mto = to_pts.mean(dim=1, keepdim=True) # b x 1 x 2
a1 = (from_pts - mfrom).square().sum([1, 2], keepdim=False) # b
c1 = ((to_pts - mto) * (from_pts - mfrom)).sum([1, 2], keepdim=False) # b
to_delta = to_pts - mto
from_delta = from_pts - mfrom
c2 = (to_delta[:, :, 0] * from_delta[:, :, 1] - to_delta[:,
:, 1] * from_delta[:, :, 0]).sum([1], keepdim=False) # b
a = c1 / a1
b = c2 / a1
dx = mto[:, 0, 0] - a * mfrom[:, 0, 0] - b * mfrom[:, 0, 1] # b
dy = mto[:, 0, 1] + b * mfrom[:, 0, 0] - a * mfrom[:, 0, 1] # b
ones_pl = torch.ones_like(a1)
zeros_pl = torch.zeros_like(a1)
return torch.stack([
a, b, dx,
-b, a, dy,
zeros_pl, zeros_pl, ones_pl,
], dim=-1).reshape(-1, 3, 3) | 76524a1f85644cfedfda9dd60497768614a058b0 | 3,404 |
def get_current_daily_puzzle(**kwargs) -> ChessDotComResponse:
"""
:returns: ``ChessDotComResponse`` object containing
information about the daily puzzle found in www.chess.com.
"""
return Resource(
uri = "/puzzle",
top_level_attr = "puzzle",
**kwargs
) | 733ce2eaa45b773cdfc04395ceb4dbe101ae8b78 | 3,405 |
def stroke_negative():
"""
render template if user is predicted negative for stroke
"""
return render_template("negative.html") | 2f1a07b57b19143e6755f3067c4923bb7231fb89 | 3,406 |
import sirepo.sim_data
def default_data(sim_type):
"""New simulation base data
Args:
sim_type (str): simulation type
Returns:
dict: simulation data
"""
return open_json_file(
sim_type,
path=sirepo.sim_data.get_class(sim_type).resource_path(f'default-data{sirepo.const.JSON_SUFFIX}')
) | 47791f63a6b6c636d8e0a5513e47ab10bd2db209 | 3,407 |
def get_instance_name_to_id_map(instance_info):
"""
generate instance_name to instance_id map.
Every instance without a name will be given a key 'unknownx', where x is an incrementing number of instances without a key.
"""
instance_name_to_id = {}
unknown_instance_count = 0
for instance_id in instance_info:
instance = instance_info[instance_id]
instance_name = "unnamed" + str(unknown_instance_count)
if "Tags" in instance:
for tag in instance["Tags"]:
if tag["Key"] == "Name":
instance_name = tag["Value"]
if instance_name == "unnamed" + str(unknown_instance_count):
unknown_instance_count = unknown_instance_count + 1
instance_name_to_id[instance_name] = instance["InstanceId"]
return instance_name_to_id | 293923476a19362fbbc2b3bb0b34bc35523bdfa1 | 3,408 |
def log_get_stdio_record(log):
"""
Returns a darshan log record for STDIO.
Args:
log: handle returned by darshan.open
Returns:
dict: log record
"""
return log_get_generic_record(log, "STDIO", "struct darshan_stdio_file **") | 6438d1ca88357cb3928492ddb89c4beab643f9fb | 3,409 |
def generate_spiral2d(nspiral=1000,
ntotal=500,
nsample=100,
start=0.,
stop=1, # approximately equal to 6pi
noise_std=.1,
a=0.,
b=1.,
savefig=True):
"""Parametric formula for 2d spiral is `r = a + b * theta`.
Args:
nspiral: number of spirals, i.e. batch dimension
ntotal: total number of datapoints per spiral
nsample: number of sampled datapoints for model fitting per spiral
start: spiral starting theta value
stop: spiral ending theta value
noise_std: observation noise standard deviation
a, b: parameters of the Archimedean spiral
savefig: plot the ground truth for sanity check
Returns:
Tuple where first element is true trajectory of size (nspiral, ntotal, 2),
second element is noisy observations of size (nspiral, nsample, 2),
third element is timestamps of size (ntotal,),
and fourth element is timestamps of size (nsample,)
"""
# add 1 all timestamps to avoid division by 0
orig_ts = np.linspace(start, stop, num=ntotal)
samp_ts = orig_ts[:nsample]
# generate clock-wise and counter clock-wise spirals in observation space
# with two sets of time-invariant latent dynamics
zs_cw = stop + 1. - orig_ts
rs_cw = a + b * 50. / zs_cw
xs, ys = rs_cw * np.cos(zs_cw) - 5., rs_cw * np.sin(zs_cw)
orig_traj_cw = np.stack((xs, ys), axis=1)
zs_cc = orig_ts
rw_cc = a + b * zs_cc
xs, ys = rw_cc * np.cos(zs_cc) + 5., rw_cc * np.sin(zs_cc)
orig_traj_cc = np.stack((xs, ys), axis=1)
if savefig:
plt.figure()
plt.plot(orig_traj_cw[:, 0], orig_traj_cw[:, 1], label='clock')
plt.plot(orig_traj_cc[:, 0], orig_traj_cc[:, 1], label='counter clock')
plt.legend()
plt.savefig('./ground_truth.png', dpi=500)
print('Saved ground truth spiral at {}'.format('./ground_truth.png'))
# sample starting timestamps
orig_trajs = []
samp_trajs = []
for _ in range(nspiral):
# don't sample t0 very near the start or the end
t0_idx = npr.multinomial(
1, [1. / (ntotal - 2. * nsample)] * (ntotal - int(2 * nsample)))
t0_idx = np.argmax(t0_idx) + nsample
cc = bool(npr.rand() > .5) # uniformly select rotation
orig_traj = orig_traj_cc if cc else orig_traj_cw
orig_trajs.append(orig_traj)
samp_traj = orig_traj[t0_idx:t0_idx + nsample, :].copy()
samp_traj += npr.randn(*samp_traj.shape) * noise_std
samp_trajs.append(samp_traj)
# batching for sample trajectories is good for RNN; batching for original
# trajectories only for ease of indexing
orig_trajs = np.stack(orig_trajs, axis=0)
samp_trajs = np.stack(samp_trajs, axis=0)
return orig_trajs, samp_trajs, orig_ts, samp_ts | 4d5129f651fd3a817c9be3beb9c2358895dd3654 | 3,411 |
import math
def AUC_confidence(auc_value, num, interval=0.95):
"""
Calculate upper and lower 95% CI for area under the roc curve
Inspired by https://stats.stackexchange.com/questions/18887
:param r: spearman's rho
:param num: number of data points
:param interval: confidence interval (0-1.0)
:return: lower bound, upper bound
"""
stderr = 1.0 / math.sqrt(num - 3)
z_score = norm.ppf(interval)
delta = z_score * stderr
lower = math.tanh(math.atanh(auc_value) - delta)
upper = math.tanh(math.atanh(auc_value) + delta)
return lower, upper | 5beab0e62171d49dcfb0fbd126243e4906787273 | 3,412 |
def add_data(data):
""" This adds data """
item = data
db.insert(data)
return 'chain updated' | 52efd328097c95768de7f049335dbad9761e5715 | 3,413 |
from typing import Counter
def build_node_to_name_map(head):
"""
:type head: DecisionGraphNode
:return:
"""
node_to_name_map = {}
name_to_next_idx_map = Counter()
def add_node_name(node):
assert node not in node_to_name_map
node_type_name = node.get_node_type_name()
idx = name_to_next_idx_map[node_type_name]
name_to_next_idx_map[node_type_name] += 1
name = "{}_{}".format(node_type_name, idx)
node_to_name_map[node] = name
bfs(head, add_node_name)
return node_to_name_map | 9d4b21317030c30539a5ec5947e574e3bd4fdd60 | 3,415 |
def ReduceFloat(f, op=None):
"""Reduce a single float value over MPI"""
if not hasMPI:
raise Exception("mpi4py required for Reduce operations: not found")
if op is None:
op = MPI.SUM
fa = np.array([f]) # can only reduce over numpy arrays
MPI.COMM_WORLD.Allreduce(MPI.IN_PLACE,
fa,
op=MPI.SUM)
return fa[0] | 12ca088e19a20eed145e1a90d8d88941f5d249ac | 3,416 |
def GetVerificationStepsKeyName(name):
"""Returns a str used to uniquely identify a verification steps."""
return 'VerificationSteps_' + name | e50e9bd7b586d8bbfaf8902ce343d35d752948a4 | 3,417 |
def annotate_ms1_peaks(ms1_data, ms2_data, analyte_list):
"""Interpolate MS1 intensities for the time points for the MS2 scans for the largest mass peak in each analyte.
Use relative changes in intensity between interpolated MS1 data and real MS2 data to find MS2 peaks that go with
each analyte. """
ms2_data["analyte_id"] = None
# Extract list of unique scan numbers and corresponding retention times
ms2_scans = ms2_data[["scan", "rt"]].drop_duplicates().sort_values(by=["scan"])
for analyte in analyte_list:
max_peak_data = ms1_data[ms1_data["peak_id"] == analyte.max_peak_id][["scan", "rt", "intensity"]].sort_values(by=["scan"])
interpolated_range = ms2_scans[ms2_scans["scan"].between(max_peak_data["scan"].min(), max_peak_data["scan"].max())].copy()
if len(interpolated_range.index) >= config.matched_scan_minimum:
if len(max_peak_data.index) > 3:
tck = interpolate.splrep(max_peak_data["rt"].to_numpy(), max_peak_data["intensity"].to_numpy(), s=0)
elif len(max_peak_data.index) == 3:
tck = interpolate.splrep(max_peak_data["rt"].to_numpy(), max_peak_data["intensity"].to_numpy(), s=0, k=2)
else:
continue
interpolated_intensities = interpolate.splev(interpolated_range["rt"].to_numpy(), tck, der=0)
interpolated_range["intensity"] = interpolated_intensities
ms2_data = ms2_to_analyte_vectorized(ms2_data,
interpolated_range[["scan", "intensity"]],
analyte.analyte_id)
else:
continue
return ms2_data | 32e0712ed27d802d99290cf01ba1f5f0dc07bae2 | 3,418 |
def split_tblastn_hits_into_separate_genes(query_res_obj, max_gap):
"""Take a SearchIO QueryResult object and return a new object with hits
split into groups of HSPs that represent distinct genes. This is important,
because there may be multiple paralogous genes present in a single
nucleotide subject sequence (such as a chromosome or scaffold).
"""
# Print message.
print('\n\tSearch program was tblastn.\n\tChecking number of distinct genes represented by HSPs.\n')
# Copy the query result object.
#query_res_obj2 = copy.deepcopy(query_res_obj)
# Compile a list of all HSP clusters.
# Display a simple visualization of HSP location.
# List hits and HSPs in original object.
num_dots = 150
all_hsp_clusters = []
hit_num = 0
for hit in query_res_obj:
hit_num += 1
print('\tQuery: ' + hit.query_id)
print('\tHit '+ str(hit_num) + ': ' + hit.id + ' ' + hit.description)
print('\t' + 'HSP positions in subject sequence (1 dot = ' +\
str(int(hit.seq_len / num_dots)) + ' bp):')
print('\t ' + '0' + ' ' * (num_dots -2) + str(hit.seq_len))
print('\t ' + 'v' + ' ' * (num_dots -2) + 'v')
print('\t ' + '.' * num_dots + ' ' + 'Query range:')
# Make a list of hsps.
hsps = []
for hsp in hit:
hsps.append(hsp)
# Sort the HSPs.
hsps2 = sorted(hsps, key=lambda x: x.hit_start)
# Display the HSPs.
for hsp in hsps2:
string = '\t'
sign = None
if hsp.hit_frame > 0:
sign = '+'
elif hsp.hit_frame < 0:
sign = '-'
prepend_dots = '.' * int((hsp.hit_start*num_dots)/(hit.seq_len))
string = string + sign + prepend_dots
span_string = str(hsp.hit_start) + ', ' + str(hsp.hit_end)
string = string + span_string
string = string + '.' * max([0, num_dots - len(prepend_dots) - len(span_string)])
string = string + ' ' + str(hsp.query_range) #+ ' ' + str(hsp.evalue)
print(string)
#print(hsp.hit.seq)
print('\n')
# Generate an expanded list of hit objects.
# Recursively find clusters of HSPs that likely represent different
# genes, and return as a list of lists.
hsp_clusters = get_hsp_clusters(hit, max_gap)
all_hsp_clusters = all_hsp_clusters + hsp_clusters
# Display HSPs in each cluster.
cluster_num = 0
for clusterplus in hsp_clusters:
cluster = clusterplus[0]
cluster_num += 1
# Call function for printing visualization.
print_cluster(clusterplus, hit_num, cluster_num, num_dots) #***
## ***Redundant?:
## Check that the clusters do not overlap with each other on the subject
## sequence.
#for cluster1 in hsp_clusters:
# for cluster2 in hsp_clusters:
# if cluster1[0] != cluster2[0]:
# if clusters_overlap(cluster1[0], cluster2[0]):
# # Visualize overlapping clusters (for troubleshooting).
# startend = get_cluster_range(cluster1[0] + cluster2[0])
# print('Overlapping clusters:')
# print_cluster(cluster1,\
# str(get_cluster_range(cluster1[0])),\
# cluster_num, num_dots, startend)
# print_cluster(cluster2,\
# str(get_cluster_range(cluster2[0])),\
# cluster_num, num_dots, startend)
# ## Assert no overlap.
# #assert not clusters_overlap(cluster1[0], cluster2[0]),\
# #"""Clusters overlap: %s and %s""" %\
# #(cluster1[0][0].hit_id + str(get_cluster_range(cluster1[0])),\
# # cluster2[0][0].hit_id + str(get_cluster_range(cluster2[0])))
## Check that the clusters do not overlap with each other on the subject
## sequence.
#for cluster1 in all_hsp_clusters:
# for cluster2 in all_hsp_clusters:
# if cluster1[0] != cluster2[0]:
# assert not clusters_overlap(cluster1[0], cluster2[0]),\
# """Clusters overlap: %s and %s""" %\
# (cluster1[0][0].hit_id + str(get_cluster_range(cluster1[0])),\
# cluster2[0][0].hit_id + str(get_cluster_range(cluster2[0])))
# Sort HSPs according to E-value (the ranking may change because when
# TBLASTN HSPs for the same scaffold sequence are split into those
# representing potentially separate genes, then some may have higher
# E-values).
all_hsp_clusters.sort(key=lambda x: min([y.evalue for y in x[0]]))
# Return the list of SearchIO HSP (not Hit) object clusters/lists.
return all_hsp_clusters | f33bc8ed36343cb4e0c00c186546d6f979885c92 | 3,419 |
def to_entity_values(entity_group):
""" Parse current entity group content into a CreateEntity[]
"""
values = []
for _, row in entity_group.iterrows():
value = row[ENTITY_VALUE_COLUMN]
if not value: # Handle reserved entities
continue
synonyms = []
patterns = []
# Drop first two item and iterate the rest items (synonym or pattern)
for _, val in row.drop([ENTITY_COLUMN, ENTITY_VALUE_COLUMN]) \
.iteritems():
if not pd.isnull(val):
if val.startswith('/'): # is pattern?
patterns.append(val[:-1][1:])
else:
synonyms.append(val)
# Construct CreateValue[]
if len(patterns) != 0:
values.append({'value': value, 'patterns': patterns,
'type': 'patterns'})
else:
values.append({'value': value, 'synonyms': synonyms,
'type': 'synonyms'})
return values | 278f9d5a7c8294338d83ba025c67fe23f36a8ac2 | 3,420 |
import codecs
import logging
def read_file(file_path):
"""
Read the contents of a file using utf-8 encoding, or return an empty string
if it does not exist
:param file_path: str: path to the file to read
:return: str: contents of file
"""
try:
with codecs.open(file_path, 'r', encoding='utf-8', errors='xmlcharrefreplace') as infile:
return infile.read()
except OSError as e:
logging.exception('Error opening {}'.format(file_path))
return '' | 13a72bc939021e3046243ed9afc7014cb403652a | 3,421 |
def scrub(data):
"""
Reads a CSV file and organizes it neatly into a DataFrame.
Arguments:
data {.csv} -- the csv file to be read and scrubbed
Returns:
DataFrame -- the logarithmic returns of selected ticker symbols
"""
df = pd.read_csv(data, header=0, index_col=0, parse_dates=True)
df.dropna(axis=1, inplace=True)
logret = np.log(df).diff().iloc[1:]
return logret | cce082da1d1f4c4308b4f30df918750f91de3f3f | 3,422 |
def _get_lto_level():
"""
Returns the user-specific LTO parallelism level.
"""
default = 32 if config.get_lto_type() else 0
return read_int("cxx", "lto", default) | 1d0279d363aaa02dcf820f3a064e9b2023ae36a4 | 3,424 |
from typing import List
from typing import Any
def slice_label_rows(labeldf: pd.DataFrame, label: str, sample_list: List[str],
row_mask: NDArray[Any]) -> NDArray[Any]:
"""
Selects rows from the Pandas DataFrame of labels corresponding to the samples in a particular sample_block.
Args:
labeldf : Pandas DataFrame containing the labels
label : Header for the particular label to slice. Can be 'all' if all labels are desired.
sample_list : List of sample ids corresponding to the sample_block to be sliced out.
row_mask : 1D numpy array of size n_rows containing booleans used to mask samples from the rows sliced from
labeldf.
Returns:
Matrix of [number of samples in sample_block - number of samples masked] x [number of labels to slice]
"""
if row_mask.size == 0:
row_mask = np.full(len(sample_list), True)
if label == 'all':
return labeldf.loc[sample_list, :].to_numpy()[row_mask, :]
else:
return labeldf[label].loc[sample_list].to_numpy().reshape(-1, 1)[row_mask, :] | 859bac2e577b534592a3428cd163f123608c9d72 | 3,425 |
def rollback(var_list, ckpt_folder, ckpt_file=None):
""" This function provides a shortcut for reloading a model and calculating a list of variables
:param var_list:
:param ckpt_folder:
:param ckpt_file: in case an older ckpt file is needed, provide it here, e.g. 'cifar.ckpt-6284'
:return:
"""
global_step = global_step_config()
# register a session
sess = tf.Session(config=tf.ConfigProto(
allow_soft_placement=True,
log_device_placement=False))
# initialization
init_op = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())
sess.run(init_op)
# load the training graph
saver = tf.compat.v1.train.Saver(max_to_keep=2)
ckpt = get_ckpt(ckpt_folder, ckpt_file=ckpt_file)
if ckpt is None:
raise FileNotFoundError('No ckpt Model found at {}.'.format(ckpt_folder))
saver.restore(sess, ckpt.model_checkpoint_path)
FLAGS.print('Model reloaded.')
# run the session
coord = tf.train.Coordinator()
# threads = tf.train.start_queue_runners(sess=sess, coord=coord)
var_value, global_step_value = sess.run([var_list, global_step])
coord.request_stop()
# coord.join(threads)
sess.close()
FLAGS.print('Variable calculated.')
return var_value, global_step_value | e434ba292b842ee29ca5e61e33b24089a34b52a8 | 3,426 |
import numpy
def read_interaction_file_mat(file):
"""
Renvoie la matrice d'adjacence associée au graph d'intéraction entre protéines ainsi que la liste
ordonnée des sommets
:param file: tableau contenant un graphe
:type file: dataframe
:return: une matrice d'adjascence de ce graphe et une liste ordonnée des sommets
:rtype: tuple
"""
list_sommets = pd.concat([file.Sommet, file.Interaction])
list_sommets = sorted(list(dict.fromkeys(list_sommets)))
res_mat = numpy.zeros((len(list_sommets), len(list_sommets)), dtype=int)
res_list = read_interaction_file_list(file)
for interaction in res_list:
res_mat[list_sommets.index(interaction[0])][list_sommets.index(interaction[1])] = 1
res_mat[list_sommets.index(interaction[1])][list_sommets.index(interaction[0])] = 1
return res_mat, list_sommets | de62b45810ada6a69b779f42c39b589092d95428 | 3,427 |
def load_figures(fig_names):
"""
Uses a list of the figure names to load them into a list
@param fig_names:
@type fig_names:
@return: A list containing all the figures
@rtype: list
"""
fig_list = []
for i, name in enumerate(fig_names):
fig_list.append(pl.load(open(f"{name}.pickle", "rb")))
return fig_list | 6e90a2c9c7fbbbb89d793b8e0c8e7b521f797f64 | 3,428 |
def define_mimonet_layers(input_shape, classes, regularized=False):
"""
Use the functional API to define the model
https://keras.io/getting-started/functional-api-guide/
params: input_shape (h,w,channels)
"""
layers = { 'inputs' : None,
'down_path' : {},
'bottle_neck' : None,
'up_path' : {},
'outputs' : None }
layers['inputs'] = [Input(input_shape[0],name='in1'),Input(input_shape[1],name='in2'),Input(input_shape[2],name='in3')]
layers['down_path'][4] = cnv3x3Relu(64,regularized=regularized)(layers['inputs'][0])
layers['down_path'][4] = cnv3x3Relu(64,regularized=regularized)(layers['down_path'][4])
layers['down_path'][3] = crop_concatenate(layers['inputs'][1],
new_down_level(128,layers['down_path'][4],regularized=regularized))
layers['down_path'][2] = crop_concatenate(layers['inputs'][2],
new_down_level(256,layers['down_path'][3],regularized=regularized))
layers['down_path'][1] = new_down_level(512,layers['down_path'][2],regularized=regularized)
layers['bottle_neck'] = new_down_level(1024,layers['down_path'][1],regularized=regularized)
layers['up_path'][1] = new_up_level(512,layers['bottle_neck'],layers['down_path'][1],regularized=regularized)
layers['up_path'][2] = new_up_level(256,layers['up_path'][1],layers['down_path'][2],padding='same',regularized=regularized)
layers['up_path'][3] = new_up_level(128,layers['up_path'][2],layers['down_path'][3],padding='same',regularized=regularized)
layers['up_path'][4] = new_up_level(64,layers['up_path'][3],layers['down_path'][4],regularized=regularized)
auxla1, la1 = feature_mask(4,256,64,classes,layers['up_path'][2],'la1')
auxla2, la2 = feature_mask(2,128,64,classes,layers['up_path'][3],'la2')
auxla3 = layers['up_path'][4]
layers['outputs'] = [ la1,la2 ]
layers['outputs'] += [ Conv2D(classes, (1, 1), activation='softmax', name='la3')(auxla3) ]
l0 = crop_concatenate(auxla1, auxla2)
l0 = crop_concatenate(l0,auxla3)
l0 = cnv3x3Relu(64,regularized=regularized, padding='same')(l0)
l0 = cnv3x3Relu(32,regularized=regularized, padding='same')(l0)
layers['outputs'] += [ Conv2D(classes, (1, 1), activation='softmax', name='l0')(l0) ]
return layers | e3151f29590cbd523063e13fffc29290a19d071a | 3,429 |
def _list_subclasses(cls):
"""
Recursively lists all subclasses of `cls`.
"""
subclasses = cls.__subclasses__()
for subclass in cls.__subclasses__():
subclasses += _list_subclasses(subclass)
return subclasses | 4cebf48916c64f32fcd5dfff28ecde7a155edb90 | 3,431 |
import logging
def main(from_json: bool = True, filename: str = DEFAULT_ARGS['pipeline_config_save_path']):
"""
Calls the specified pipeline.
:param filename: json filename
:param from_json: whether to run pipeline from json file or not
:return: pipeline call function
"""
# Parsing arguments
parser = HfArgumentParser((ModelArguments, DatabuilderArguments, TrainingArguments, PipelineArguments))
model_args, databuilder_args, training_args, pipeline_args = parser.parse_json_file(
json_file=filename) if from_json else parser.parse_args_into_dataclasses()
# Asserting specified pipeline does exist
assert pipeline_args.pipeline in PIPELINES, \
"Unknown pipeline {}, available pipelines are {}".format(pipeline_args.pipeline, list(PIPELINES.keys()))
# Logging session informations
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN,
)
# Loading model & tokenizer
model = AutoModelForSeq2SeqLM.from_pretrained(training_args.output_dir)
tokenizer = AutoTokenizer.from_pretrained(training_args.output_dir)
# Getting specified pipeline
task_pipeline = PIPELINES[pipeline_args.pipeline]["impl"]
logger.info(f'Pipeline has been loaded and is ready for inference. ')
return task_pipeline(model=model, tokenizer=tokenizer) | ccd975889d639f3a642e820e4d7ce5e2ef583609 | 3,432 |
def put(url, **kwargs):
"""PUT to a URL."""
return session.put(url, **kwargs) | 64618fc239164a73fa90f2348de8402c5a593394 | 3,433 |
def cod_records(mocker, cod_records_json):
"""Fixture for COD records metric instance."""
mocker.patch.object(RecordsMetric, 'collect',
new=records_collect(cod_records_json))
return metrics.records('cod_records', 'http://www.google.com') | d4ac73421f3fcef9b175aa42c02354ff437581ad | 3,434 |
def get_lite_addons():
"""Load the lite addons file as a set."""
return set_from_file('validations/lite-addons.txt') | 68f26084b5e7e13492f61fc65fe504d1b5d53384 | 3,436 |
def GetApexPlayerStatus_TRN(api_key, platform, playerName):
"""
Get the status of a player on Apex Legends.
:param api_key: The API key to use.
:param platform: The platform to use.
:param playerName: The player name to use.
"""
platform = _fixplatform(platform)
if _checkplatform(platform):
url = f'https://public-api.tracker.gg/{API_VER}/apex/standard/profile/{platform}/{playerName}'
try:
res = get_request(url, {'TRN-Api-Key': api_key})
response = res[0]
if response.status_code == 200:
r = response.json()
list_legends_data = []
my_append = list_legends_data.append
for d in r['data']['segments']:
if d["type"] == "overview":
continue
else:
my_append(d)
res = ApexTrackerPy.Apexclass.TRN_PlayerStatus(
row_json=r,
elapsed_time=res[1],
platformUserId=r['data']['platformInfo']['platformUserId'],
activelegend=r['data']['metadata']['activeLegend'],
userlevel=r['data']['segments'][0]['stats']['level']['value'],
totalkill=r['data']['segments'][0]['stats']['kills']['value'],
totaldamage=r['data']['segments'][0]['stats']['damage']['value'],
totalheadshots=r['data']['segments'][0]['stats']['headshots']['value'],
CurrentRank=r['data']['segments'][0]['stats']['rankScore']['metadata']['rankName'],
CurrentRankScore=r['data']['segments'][0]['stats']['rankScore']['value'],
ArenaRankedName=r['data']['segments'][0]['stats']['arenaRankScore']['metadata']['rankName'],
ArenaRankedScore=r['data']['segments'][0]['stats']['arenaRankScore']['value'],
legends_json=list_legends_data,
)
return res
else:
raise Exception('HttpError!:The API returned status code '+str(response.status_code))
except Exception as e:
raise Exception('HttpError!:An error has occurred during the API call.\n'+str(e))
else:
raise Exception('Invalid platform!') | 296f9900e3e95afa24a0e643ed45563b57fb172a | 3,437 |
def subFactoryGet(fixture, **kwargs):
"""
To be used in fixture definition (or in the kwargs of the fixture constructor) to reference a other
fixture using the :meth:`.BaseFix.get` method.
:param fixture: Desired fixture
:param kwargs: *Optional:* key words to overwrite properties of this fixture
:return: Proxy object for the desired fixture including the altered properties
"""
return SubFactory(fixture, METHOD_GET, **kwargs) | 480db102897a3edd682acef6ee95a42b6f937b03 | 3,438 |
def hello():
"""Return the dashboard homepage."""
return render_template('index.html') | adac182b3c8dd2ae0f17425205203c5493499f19 | 3,439 |
from pathlib import Path
def test_data_dir():
"""
Returns path of test datas like excel
Used for test or notebook
"""
path = Path(__file__).parent.parent / 'testdata'
return path | f410f26276797204dd100d884b162f893b5ce4aa | 3,441 |
def is_leap_year(year: int) -> bool:
"""Returns whether the given year is a leap year"""
if year % 100 == 0:
return year % 400 == 0
else:
return year % 4 == 0 | fccaa3de6378e62b937748c671a21aa5427781e8 | 3,443 |
def is_valid_distribution(qk: np.ndarray, axis: int) -> bool:
"""valid is e.g.: [], [1.0], [0.5, 0.5]"""
"""not valid is e.g.: [-1.0], [0.6, 0.6], [np.nan], [np.nan, 0.6], [1.2]"""
assert 0 <= axis < len(qk.shape)
if qk.shape[axis] == 0:
return True
if np.any(qk < 0.0):
return False
if np.any(qk > 1.0):
return False
result = np.all(np.sum(qk, axis=axis) == 1)
return result | fdbb1ac82f2d5cf93843f3d8d1f4f4d02a3ab408 | 3,445 |
def srt(data, cube, **kwargs):
"""
Define Solar Rotational Tomography model with optional masking of
data and map areas. Can also define priors.
Parameters
----------
data: InfoArray
data cube
cube: FitsArray
map cube
obj_rmin: float
Object minimal radius. Areas below obj_rmin are masked out.
obj_rmax: float
Object maximal radius. Areas above obj_rmax are masked out.
data_rmin: float
Data minimal radius. Areas below data_rmin are masked out.
data_rmax: float
Data maximal radius. Areas above data_rmax are masked out.
mask_negative: boolean
If true, negative values in the data are masked out.
Returns
-------
P : The projector with masking
D : Smoothness priors
obj_mask : object mask array
data_mask : data mask array
"""
# Model : it is Solar rotational tomography, so obstacle="sun".
data_mask = solar.define_data_mask(data, **kwargs)
P = siddon_lo(data.header, cube.header, mask=data_mask, obstacle="sun")
D = smoothness_prior(cube, kwargs.get("height_prior", False))
P, D, obj_mask = _apply_object_mask(P, D, cube, **kwargs)
return P, D, obj_mask, data_mask | e0af1f5d0d00e8651c3668091165beaf0aaa6f55 | 3,446 |
def get(status_id):
"""Fetches a status of previously submitted PushFunds request.
Returns a status of :func:`~pyvdp.visadirect.fundstransfer.MultiPushFundsTransactionsModel` request by transaction
identifier, returned with 202 response.
:param str status_id: **Required**. Transaction status identifier.
:return: Dictionary with VDP API response.
**Usage:**
.. code:: python
from pyvdp.visadirect.fundstransfer import multipushfundstransactions
status_id = "1491819372_186_81_l73c003_VDP_ARM"
result = pushfundstransactions.send(status_id)
print(result)
"""
query_string = '/' + status_id
c = VisaDirectDispatcher(resource='visadirect',
api='fundstransfer',
method='multipushfundstransactions',
http_verb='GET',
query_string=query_string)
return c.send() | fb8951355f342405e93f44747e670afcaf094322 | 3,447 |
def eval_pop_thread(args):
"""
Evaluates solutions, returns a list of floats, between 0 and 1
(probabilities of survival and reproduction).
"""
m_solutions, m_state_hash_table, id_mi = args[0], args[1], args[2]
step = int(N_POP/N_PROC)
prob_surv = np.zeros(step)
for index_sol in range(len(m_solutions)):
print("Solution ", index_sol, " Id: ", id_mi)
sol = m_solutions[index_sol]
tmp_points = 0
max_sol = np.max(sol)
for state_key in m_state_hash_table:
state = m_state_hash_table[state_key]
tmp_w = compute_heuristic(state_key, 'WHITE', sol)
tmp_b = compute_heuristic(state_key, 'BLACK', sol)
if tmp_w < 0 and state['value']['white'] / state['games'] > 0.5:
tmp_points += 1
elif tmp_w > 0 and state['value']['black'] / state['games'] > 0.5:
tmp_points += 1
elif 0+ERROR_ZERO * max_sol >= tmp_w >= 0-ERROR_ZERO * max_sol and \
state['value']['black'] / state['games'] < 0.5 and state['value']['white'] / state['games'] < 0.5:
tmp_points += 1
if tmp_b < 0 and state['value']['black'] / state['games'] > 0.5:
tmp_points += 1
elif tmp_b > 0 and state['value']['white'] / state['games'] > 0.5:
tmp_points += 1
elif 0 + ERROR_ZERO * max_sol >= tmp_b >= 0-ERROR_ZERO * max_sol and \
state['value']['black'] / state['games'] < 0.5 and state['value']['white'] / state['games'] < 0.5:
tmp_points += 1
tmp_points /= 2
prob_surv[index_sol] = tmp_points
return prob_surv | 8acdb0acae737a8bf48578ec48c3dcc1b66c7adb | 3,449 |
def _mini_batch_convergence(model, iteration_idx, n_iter, tol,
n_samples, centers_squared_diff, batch_inertia,
context, verbose=0):
"""Helper function to encapsulate the early stopping logic"""
# Normalize inertia to be able to compare values when
# batch_size changes
batch_inertia /= model.batch_size
centers_squared_diff /= model.batch_size
# Compute an Exponentially Weighted Average of the squared
# diff to monitor the convergence while discarding
# minibatch-local stochastic variability:
# https://en.wikipedia.org/wiki/Moving_average
ewa_diff = context.get('ewa_diff')
ewa_inertia = context.get('ewa_inertia')
if ewa_diff is None:
ewa_diff = centers_squared_diff
ewa_inertia = batch_inertia
else:
alpha = float(model.batch_size) * 2.0 / (n_samples + 1)
alpha = 1.0 if alpha > 1.0 else alpha
ewa_diff = ewa_diff * (1 - alpha) + centers_squared_diff * alpha
ewa_inertia = ewa_inertia * (1 - alpha) + batch_inertia * alpha
# Log progress to be able to monitor convergence
if verbose:
progress_msg = (
'Minibatch iteration %d/%d:'
' mean batch inertia: %f, ewa inertia: %f ' % (
iteration_idx + 1, n_iter, batch_inertia,
ewa_inertia))
print(progress_msg)
# Early stopping based on absolute tolerance on squared change of
# centers position (using EWA smoothing)
if tol > 0.0 and ewa_diff <= tol:
if verbose:
print('Converged (small centers change) at iteration %d/%d'
% (iteration_idx + 1, n_iter))
return True
# Early stopping heuristic due to lack of improvement on smoothed inertia
ewa_inertia_min = context.get('ewa_inertia_min')
no_improvement = context.get('no_improvement', 0)
if ewa_inertia_min is None or ewa_inertia < ewa_inertia_min:
no_improvement = 0
ewa_inertia_min = ewa_inertia
else:
no_improvement += 1
if (model.max_no_improvement is not None
and no_improvement >= model.max_no_improvement):
if verbose:
print('Converged (lack of improvement in inertia)'
' at iteration %d/%d'
% (iteration_idx + 1, n_iter))
return True
# update the convergence context to maintain state across successive calls:
context['ewa_diff'] = ewa_diff
context['ewa_inertia'] = ewa_inertia
context['ewa_inertia_min'] = ewa_inertia_min
context['no_improvement'] = no_improvement
return False | 701488e530913bfc2e5d382a544679315dc1f013 | 3,450 |
def rho_MC(delta, rhoeq=4.39e-38):
"""
returns the characteristic density of an
axion minicluster in [solar masses/km^3]
forming from an overdensity with
overdensity parameter delta.
rhoeq is the matter density at matter
radiation equality in [solar masses/km^3]
"""
return 140 * (1 + delta) * delta**3 * rhoeq | f28e382cfcf661199728363b3ebe86f25e92760c | 3,451 |
from typing import Dict
from typing import Union
from typing import List
from typing import Optional
def _parse_parameter_from_value(
string: str,
parameter_to_wordlist_mapping: Dict[Union[TimeResolution, PeriodType, Parameter], List[List[str]]]
) -> Optional[Union[TimeResolution, PeriodType, Parameter]]:
"""
Function to parse a parameter from a given string based on a list of parameter enumerations and corresponding list
of words.
Args:
string: string containing the circa name of the parameter
parameter_to_wordlist_mapping: mapping of parameter and list of words
Returns:
None or one of the found enumerations
"""
string_split = string.split("_")
for parameter, wordlist in parameter_to_wordlist_mapping.items():
cond1 = len(wordlist) == len(string_split)
cond2 = _find_any_one_word_from_wordlist(string_split, wordlist)
if cond1 and cond2:
return parameter
return None | 0fa1e2f7edf5e6be31e0e2ae514ecc22a512e8f7 | 3,452 |
def render(scene):
"""
:param scene: Scene description
:return: [H, W, 3] image
"""
# Construct rays from the camera's eye position through the screen coordinates
camera = scene['camera']
eye, ray_dir, H, W = generate_rays(camera)
# Ray-object intersections
scene_objects = scene['objects']
obj_intersections, ray_dist, normals, material_idx = ray_object_intersections(eye, ray_dir, scene_objects)
# Valid distances
pixel_dist = ray_dist
valid_pixels = (camera['near'] <= ray_dist) & (ray_dist <= camera['far'])
pixel_dist[~valid_pixels] = np.inf # Will have to use gather operation for TF and pytorch
# Nearest object needs to be compared for valid regions only
nearest_obj = np.argmin(pixel_dist, axis=0)
C = np.arange(0, nearest_obj.size) # pixel idx
# Create depth image for visualization
# use nearest_obj for gather/select the pixel color
im_depth = pixel_dist[nearest_obj, C].reshape(H, W)
##############################
# Fragment processing
##############################
# Lighting
color_table = scene['colors']
light_pos = scene['lights']['pos']
light_clr_idx = scene['lights']['color_idx']
light_colors = color_table[light_clr_idx]
# Generate the fragments
"""
Get the normal and material for the visible objects.
"""
frag_normals = normals[nearest_obj, C]
frag_pos = obj_intersections[nearest_obj, C]
frag_albedo = scene['materials']['albedo'][material_idx[nearest_obj]]
# Fragment shading
light_dir = light_pos[np.newaxis, :] - frag_pos[:, np.newaxis, :]
light_dir_norm = np.sqrt(np.sum(light_dir ** 2, axis=-1))[..., np.newaxis]
light_dir_norm[light_dir_norm <= 0 | np.isinf(light_dir_norm)] = 1
light_dir = ops.nonzero_divide(light_dir, light_dir_norm)
im_color = np.sum(frag_normals[:, np.newaxis, :] * light_dir, axis=-1)[..., np.newaxis] * \
light_colors[np.newaxis, ...] * frag_albedo[:, np.newaxis, :]
im = np.sum(im_color, axis=1).reshape(H, W, 3)
im[(im_depth < camera['near']) | (im_depth > camera['far'])] = 0
# clip negative values
im[im < 0] = 0
# Tonemapping
if 'tonemap' in scene:
im = tonemap(im, **scene['tonemap'])
return {'image': im,
'depth': im_depth,
'ray_dist': ray_dist,
'obj_dist': pixel_dist,
'nearest': nearest_obj.reshape(H, W),
'ray_dir': ray_dir,
'valid_pixels': valid_pixels
} | 35f8cf34fea266034a76f3857213fcb83e334174 | 3,454 |
def state_fidelity(state1, state2):
"""Return the state fidelity between two quantum states.
Either input may be a state vector, or a density matrix. The state
fidelity (F) for two density matrices is defined as::
F(rho1, rho2) = Tr[sqrt(sqrt(rho1).rho2.sqrt(rho1))] ^ 2
For a pure state and mixed state the fidelity is given by::
F(|psi1>, rho2) = <psi1|rho2|psi1>
For two pure states the fidelity is given by::
F(|psi1>, |psi2>) = |<psi1|psi2>|^2
Args:
state1 (array_like): a quantum state vector or density matrix.
state2 (array_like): a quantum state vector or density matrix.
Returns:
array_like: The state fidelity F(state1, state2).
"""
# convert input to numpy arrays
s1 = np.array(state1)
s2 = np.array(state2)
# fidelity of two state vectors
if s1.ndim == 1 and s2.ndim == 1:
return np.abs(s2.conj().dot(s1)) ** 2
# fidelity of vector and density matrix
elif s1.ndim == 1:
# psi = s1, rho = s2
return np.abs(s1.conj().dot(s2).dot(s1))
elif s2.ndim == 1:
# psi = s2, rho = s1
return np.abs(s2.conj().dot(s1).dot(s2))
# fidelity of two density matrices
s1sq = _funm_svd(s1, np.sqrt)
s2sq = _funm_svd(s2, np.sqrt)
return np.linalg.norm(s1sq.dot(s2sq), ord='nuc') ** 2 | 9df10584ce9376df5690ebaccaa07046778b097c | 3,455 |
def process_state(request):
"""Procesa una request GET o POST para consultar datos de provincias.
En caso de ocurrir un error de parseo, se retorna una respuesta HTTP 400.
Args:
request (flask.Request): Request GET o POST de flask.
Returns:
flask.Response: respuesta HTTP
"""
return _process_entity(request, N.STATES, params.PARAMS_STATES, {
N.ID: 'ids',
N.NAME: 'name',
N.INTERSECTION: 'geo_shape_ids',
N.EXACT: 'exact',
N.ORDER: 'order',
N.FIELDS: 'fields',
N.OFFSET: 'offset',
N.MAX: 'size'
}) | 8e748dd73845438f768ecd34730a94c2e8696387 | 3,456 |
def is_ascii(string):
"""Return True is string contains only is us-ascii encoded characters."""
def is_ascii_char(char):
return 0 <= ord(char) <= 127
return all(is_ascii_char(char) for char in string) | cd3aeddcad7610de83af6ec5a67ecbac95f11fd8 | 3,457 |
from typing import Union
from typing import Tuple
from typing import Optional
from typing import List
def _get_predictions_from_data(
model: Union[Model, SKLEARN_MODELS],
data: Union[
tf.data.Dataset,
Tuple[Inputs, Outputs],
Tuple[Inputs, Outputs, Paths],
],
batch_size: Optional[int],
tensor_maps_in: Optional[List[TensorMap]],
tensor_maps_out: Optional[List[TensorMap]],
) -> Tuple[Predictions, Outputs, Optional[Paths]]:
"""
Get model predictions, output data, and paths from data source. Data must not
be infinite.
:param model: Model
:param data: finite tensorflow Dataset or tuple of inputs, outputs, and
optionally paths
:param batch_size: Number of samples to use in a batch, required if data is a
tuple input and output numpy arrays
:return: Tuple of predictions as a list of numpy arrays, a dictionary of
output data, and optionally paths
"""
if isinstance(data, tuple):
if len(data) == 2:
input_data, output_data = data
paths = None
elif len(data) == 3:
input_data, output_data, paths = data
else:
raise ValueError(
f"Expected 2 or 3 elements to dataset tuple, got {len(data)}",
)
if batch_size is None:
raise ValueError(
"When providing dataset as tuple of inputs and outputs, batch_size "
"is required, got {batch_size}",
)
y_predictions = model.predict(x=input_data, batch_size=batch_size)
elif isinstance(data, tf.data.Dataset):
y_prediction_batches = defaultdict(list)
output_data_batches = defaultdict(list)
id_batches = []
if isinstance(model, Model):
for batch in data:
output_data_batch = batch[BATCH_OUTPUT_INDEX]
for output_name, output_tensor in output_data_batch.items():
output_data_batches[output_name].append(output_tensor.numpy())
batch_y_predictions = model.predict(batch[BATCH_INPUT_INDEX])
if not isinstance(batch_y_predictions, list):
batch_y_predictions = [batch_y_predictions]
for prediction_idx, batch_y_prediction in enumerate(
batch_y_predictions,
):
y_prediction_batches[prediction_idx].append(batch_y_prediction)
if len(batch) == 3:
id_batches.append(batch[BATCH_IDS_INDEX].numpy().astype(str))
y_predictions = [
np.concatenate(y_prediction_batches[prediction_idx])
for prediction_idx in sorted(y_prediction_batches)
]
elif isinstance(model, SKLEARN_MODELS.__args__):
data = get_dicts_of_arrays_from_dataset(dataset=data)
assert all(tm.axes == 1 for tm in tensor_maps_in + tensor_maps_out)
assert len(tensor_maps_out) == 1
# Isolate arrays from datasets for desired tensor maps
X = get_array_from_dict_of_arrays(
tensor_maps=tensor_maps_in,
data=data[BATCH_INPUT_INDEX],
drop_redundant_columns=False,
)
y_predictions = model.predict_proba(X)
for output_name, output_tensor in data[BATCH_OUTPUT_INDEX].items():
output_data_batches[output_name].append(output_tensor)
if len(data) == 3:
id_batches.append(data[BATCH_IDS_INDEX])
else:
raise NotImplementedError(
f"Cannot perform inference on model of type {type(model).__name}",
)
# Iterate over batches and concatenate into dict of arrays
output_data = {
output_name: np.concatenate(output_data_batches[output_name])
for output_name in output_data_batches
}
paths = None if len(id_batches) == 0 else np.concatenate(id_batches).tolist()
else:
raise NotImplementedError(
"Cannot get data for inference from data of type "
"{type(data).__name__}: {data}",
)
if not isinstance(y_predictions, list):
y_predictions = [y_predictions]
return y_predictions, output_data, paths | 29a91481989d283ac1dddd831a9746ada5971a5a | 3,458 |
import pickle
def get_data(data, frame_nos, dataset, topic, usernum, fps, milisec, width, height, view_width, view_height):
"""
Read and return the viewport data
"""
VIEW_PATH = '../../Viewport/'
view_info = pickle.load(open(VIEW_PATH + 'ds{}/viewport_ds{}_topic{}_user{}'.format(dataset, dataset, topic, usernum), 'rb'), encoding='latin1')
if dataset == 1:
max_frame = int(view_info[-1][0]*1.0*fps/milisec)
for i in range(len(view_info)-1):
frame = int(view_info[i][0]*1.0*fps/milisec)
frame += int(offset*1.0*fps/milisec)
frame_nos.append(frame)
if(frame > max_frame):
break
X={}
X['VIEWPORT_x']=int(view_info[i][1][1]*width/view_width)
X['VIEWPORT_y']=int(view_info[i][1][0]*height/view_height)
data.append((X, int(view_info[i+1][1][1]*width/view_width),int(view_info[i+1][1][0]*height/view_height)))
elif dataset == 2:
for k in range(len(view_info)-1):
if view_info[k][0]<=offset+60 and view_info[k+1][0]>offset+60:
max_frame = int(view_info[k][0]*1.0*fps/milisec)
break
for k in range(len(view_info)-1):
if view_info[k][0]<=offset and view_info[k+1][0]>offset:
min_index = k+1
break
prev_frame = 0
for i in range(min_index,len(view_info)-1):
frame = int((view_info[i][0])*1.0*fps/milisec)
if frame == prev_frame:
continue
if(frame > max_frame):
break
frame_nos.append(frame)
X={}
X['VIEWPORT_x']=int(view_info[i][1][1]*width/view_width)
X['VIEWPORT_y']=int(view_info[i][1][0]*height/view_height)
data.append((X, int(view_info[i+1][1][1]*width/view_width),int(view_info[i+1][1][0]*height/view_height)))
prev_frame = frame
return data, frame_nos, max_frame | f78f3b7505b3ca5ab2cac67f2634b71cfa383707 | 3,459 |
import copy
def call(args, version):
"""Converts callList into functionString."""
# Find keyword
keywords = [i for i in args if i in Variables.keywords(version)]
# Too many keywords is a syntax error.
if len(keywords) > 1:
raise UdebsSyntaxError("CallList contains to many keywords '{}'".format(args))
# No keywords creates a tuple object.
elif len(keywords) == 0:
return "(" + ",".join(formatS(i, version) for i in args) + ")"
keyword = keywords[0]
# Get and fix data for this keyword.
data = copy.copy(Variables.default)
data.update(Variables.keywords(version)[keyword])
# Create dict of values
current = args.index(keyword)
nodes = copy.copy(data["default"])
for index in range(len(args)):
value = "$" if index >= current else "-$"
value += str(abs(index - current))
if args[index] != keyword:
nodes[value] = args[index]
# Force strings into quoted arguments.
for string in data["string"]:
nodes[string] = "'" + str(nodes[string]).replace("'", "\\'") + "'"
# Claim keyword arguments.
kwargs = {}
for key, value in data["kwargs"].items():
if value in nodes:
new_value = nodes[value]
del nodes[value]
else:
new_value = value
kwargs[key] = formatS(new_value, version)
arguments = []
# Insert positional arguments
for key in data["args"]:
if key in nodes:
arguments.append(formatS(nodes[key], version))
del nodes[key]
else:
arguments.append(formatS(key, version))
# Insert ... arguments.
if data["all"]:
for key in sorted(nodes.keys(), key=lambda x: int(x.replace("$", ""))):
arguments.append(formatS(nodes[key], version))
del nodes[key]
if len(nodes) > 0:
raise UdebsSyntaxError("Keyword contains unused arguments. '{}'".format(" ".join(args)))
# Insert keyword arguments.
for key in sorted(kwargs.keys()):
arguments.append(str(key) + "=" + str(kwargs[key]))
return data["f"] + "(" + ",".join(arguments) + ")" | 0f5be8582903973ec3ae4077e51a11e084bcc2f8 | 3,461 |
from typing import List
from typing import Dict
from typing import Any
import ray
def get_object_locations(obj_refs: List[ObjectRef], timeout_ms: int = -1
) -> Dict[ObjectRef, Dict[str, Any]]:
"""Lookup the locations for a list of objects.
It returns a dict maps from an object to its location. The dict excludes
those objects whose location lookup failed.
Args:
object_refs (List[ObjectRef]): List of object refs.
timeout_ms (int): The maximum amount of time in micro seconds to wait
before returning. Wait infinitely if it's negative.
Returns:
A dict maps from an object to its location. The dict excludes those
objects whose location lookup failed.
The location is stored as a dict with following attributes:
- node_ids (List[str]): The hex IDs of the nodes that have a
copy of this object.
- object_size (int): The size of data + metadata in bytes.
Raises:
RuntimeError: if the processes were not started by ray.init().
ray.exceptions.GetTimeoutError: if it couldn't finish the
request in time.
"""
if not ray.is_initialized():
raise RuntimeError("Ray hasn't been initialized.")
return ray.worker.global_worker.core_worker.get_object_locations(
obj_refs, timeout_ms) | c7b4aa6761024853468e09f846af0ada8f7ebbba | 3,462 |
from conf.hosts import getPlateformObject
from core.exceptions import EnvironmentDoesNotExist
def remove_host(plateform=None, name=None, environment=None):
""" Remove Host Object from Platform Object attribute hosts and return updated Platform Object.
:param: plateform: host's plateform (same as type yaml file) passed by user
:param: name: host's name passed by user
:param: name: host's environment passed by user
:type: plateform: list of one str
:type: name: list of one str
:type: environment: list of one str
:return: Updated Plateform
:rtype: Plateform Object
.. seealso:: heimdall.conf.hosts.getPlateformObject(), heimdall.core.plateform.Plateform
"""
p = getPlateformObject(plateform[0])
try:
if not p.check_environment(environment[0]):
raise EnvironmentDoesNotExist('Environment %s in plateform %s does not exists!' % (environment[0], p.name),
p.name)
except EnvironmentDoesNotExist as ede:
print ede
exit(ede.code)
if name[0] == -1: # remove all
p.environment[environment[0]] = []
else:
[p.remove_host(host) for host in p.environment[environment[0]] for n in name if host.name == n]
return p | bc8e8681718f763c382230297087b9ce27a37e20 | 3,463 |
def convert_to_float_if_possible(x, elsevalue=MISSING):
"""
Return float version of value x, else elsevalue (MISSING or other specified value
if conversion fails
"""
if isnonnumeric(x):
return elsevalue
else:
return float(x) | 74b1ca5d4ed63758ef9d56fb2be94cbbdec00b56 | 3,466 |
from typing import Union
import requests
def resolve(
names: Union[list, pd.Series, str],
data_source_ids: list = None,
resolve_once: bool = False,
best_match_only: bool = False,
with_context: bool = False,
with_vernaculars: bool = False,
with_canonical_ranks: bool = False
) -> pd.DataFrame:
"""
Receives a list of names and resolves each against the entire resolver
database or against specific data sources using the Global Names
Resolver (GNR) API. Underlying resolving and scoring algorithms are
described at: http://resolver.globalnames.org/about
Parameters
----------
names
List of species names to resolve.
data_source_ids
List of specific data sources IDs to resolve against. A list of
all the available data sources and their IDs can be found at:
http://resolver.globalnames.org/data_sources.
resolve_once
Find the first available match instead of matches across all data
sources with all possible renderings of a name.
best_match_only
Returns just one result with the highest score.
with_context
Reduce the likelihood of matches to taxonomic homonyms. When True,
a common taxonomic context is calculated for all supplied names
from matches in data sources that have classification tree paths.
Names out of determined context are penalized during score
calculation.
with_vernaculars
Return 'vernacular' field to present common names provided by a
data source for a particular match.
with_canonical_ranks
Returns 'canonical_form' with infraspecific ranks, if they are
present.
Returns
-------
pd.DataFrame
DataFrame where rows are the result for each match.
"""
if isinstance(names, str):
names = [names]
if data_source_ids is None:
data_source_ids = []
# Apparently, the GNR API does not accept Booleans so they need to be
# converted to lowercase strings first.
params = {
"data": "\n".join(names),
"data_source_ids": "|".join(data_source_ids),
"resolve_once": str(resolve_once).lower(),
"best_match_only": str(best_match_only).lower(),
"with_context": str(with_context).lower(),
"with_vernaculars": str(with_vernaculars).lower(),
"with_canonical_ranks": str(with_canonical_ranks).lower()
}
try:
response = requests.post(API_URL, json=params)
response.raise_for_status()
except requests.exceptions.HTTPError as err:
raise Exception(f"Error calling Global Name Resolver API. {err}")
data = response.json()["data"]
# The pd.json_normalize() function does not work when record_path
# is not found in every single item inside the list of elements
# passed. In some cases, the GNR API returns items without this key,
# so it needs to be added (including an empty dictionary) before
# normalizing the result.
for item in data:
if "results" not in item:
item["results"] = [{}]
return pd.json_normalize(data, record_path="results", meta="supplied_name_string") | a25bd275e8222058e5926bf9a8b53de7a1cb3ccc | 3,467 |
import numpy
def polarisation_frame_from_wcs(wcs, shape) -> PolarisationFrame:
"""Convert wcs to polarisation_frame
See FITS definition in Table 29 of https://fits.gsfc.nasa.gov/standard40/fits_standard40draft1.pdf
or subsequent revision
1 I Standard Stokes unpolarized
2 Q Standard Stokes linear
3 U Standard Stokes linear
4 V Standard Stokes circular
−1 RR Right-right circular
−2 LL Left-left circular
−3 RL Right-left cross-circular
−4 LR Left-right cross-circular
−5 XX X parallel linear
−6 YY Y parallel linear
−7 XY XY cross linear
−8 YX YX cross linear
stokesI [1]
stokesIQUV [1,2,3,4]
circular [-1,-2,-3,-4]
linear [-5,-6,-7,-8]
For example::
pol_frame = polarisation_frame_from_wcs(im.wcs, im.shape)
:param wcs: World Coordinate System
:param shape: Shape corresponding to wcs
:returns: Polarisation_Frame object
"""
# The third axis should be stokes:
polarisation_frame = None
if len(shape) == 2:
polarisation_frame = PolarisationFrame("stokesI")
else:
npol = shape[1]
pol = wcs.sub(['stokes']).wcs_pix2world(range(npol), 0)[0]
pol = numpy.array(pol, dtype='int')
for key in PolarisationFrame.fits_codes.keys():
keypol = numpy.array(PolarisationFrame.fits_codes[key])
if numpy.array_equal(pol, keypol):
polarisation_frame = PolarisationFrame(key)
return polarisation_frame
if polarisation_frame is None:
raise ValueError("Cannot determine polarisation code")
assert isinstance(polarisation_frame, PolarisationFrame)
return polarisation_frame | a2ed057be23add9a6c2041a243286bf06519306f | 3,468 |
import random
import json
import logging
def _update_traffic_class(class_name, class_type, **kwargs):
"""
Perform a PUT call to version-up a traffic class. This is required whenever entries of a traffic class are changed
in any way.
:param class_name: Alphanumeric name of the traffic class
:param class_type: Class type should be one of "ipv4," "ipv6," or "mac"
:param kwargs:
keyword s: requests.session object with loaded cookie jar
keyword url: URL in main() function
:return: True if successful, False otherwise
"""
traffic_class_data = _get_traffic_class(class_name, class_type, **kwargs)
# # must remove these fields from the data since they can't be modified
# traffic_class_data.pop('origin', None)
# traffic_class_data.pop('name', None)
# traffic_class_data.pop('type', None)
traffic_class_data['cfg_version'] = random.randrange(9007199254740991)
target_url = kwargs["url"] + "system/classes/%s,%s" % (class_name, class_type)
put_data = json.dumps(traffic_class_data, sort_keys=True, indent=4)
response = kwargs["s"].put(target_url, data=put_data, verify=False)
if not common_ops._response_ok(response, "PUT"):
logging.warning("FAIL: Updating %s traffic class '%s' failed with status code %d: %s"
% (class_type, class_name, response.status_code, response.text))
return False
else:
logging.info("SUCCESS: Updating %s traffic class '%s' succeeded" % (class_type, class_name))
return True | 8a19fedcce20a94a3e5c8f06f7fb1ee901dcc6dd | 3,469 |
def eff_w_error(n_before, n_after):
"""
n_before = entries before
n_after = entries after
"""
eff = n_after/n_before
eff_error = np.sqrt(eff*(1-eff)/n_before)
return (eff, eff_error) | 307945af0acc2eb04686b5453f2905be1111944a | 3,470 |
import scipy
def hurst(x):
"""Estimate Hurst exponent on a timeseries.
The estimation is based on the second order discrete derivative.
Parameters
----------
x : 1D numpy array
The timeseries to estimate the Hurst exponent for.
Returns
-------
h : float
The estimation of the Hurst exponent for the given timeseries.
"""
y = np.cumsum(np.diff(x, axis=1), axis=1)
b1 = [1, -2, 1]
b2 = [1, 0, -2, 0, 1]
# second order derivative
y1 = scipy.signal.lfilter(b1, 1, y, axis=1)
y1 = y1[:, len(b1) - 1:-1] # first values contain filter artifacts
# wider second order derivative
y2 = scipy.signal.lfilter(b2, 1, y, axis=1)
y2 = y2[:, len(b2) - 1:-1] # first values contain filter artifacts
s1 = np.mean(y1 ** 2, axis=1)
s2 = np.mean(y2 ** 2, axis=1)
return 0.5 * np.log2(s2 / s1) | 0632f0e4c5912410568c25774c1da66c160ff78e | 3,471 |
import yaml
def explode_on_matched_columns(df, safe_columns, other_columns):
"""Given the name of multiple columns where each entry is a string encoding
a list, and where for each row the lists in all columns are the same length,
return a dataframe where the each row is transformed into len(list)
rows, each of which contains one entry of the various lists and the
remaining columns are identical.
The columns are split into 'safe_columns', which must always contain strings
that encode lists and 'other_columns' which can sometimes be np.nan. If
a column from other_columns has a np.nan entry in some row, it will be
replaced with a list of np.nan values, with the list the same length
as the lists in safe_columns for that row.
Lists from different rows need not have the same number of elements."""
stringlist_columns = safe_columns + other_columns
copied_df = df.copy()
# Only keep rows where at least one of the stringlist columns is present
copied_df = copied_df.dropna(subset=stringlist_columns, how='all')
# Map the safe columns from strings (strings encoding lists) to lists
for stringlist_column in safe_columns:
copied_df[stringlist_column] = copied_df[stringlist_column].map(yaml.safe_load)
for column in other_columns:
# Replace any nan values with an empty list, matching the list lengths
# from one of the safe columns
copied_df[column] = replace_nan_with_empty_list(column,
safe_columns[0],
copied_df)
exploded = pd.DataFrame({
col:np.repeat(copied_df[col].values, copied_df[stringlist_columns[0]].str.len())
for col in copied_df.columns.drop(stringlist_columns)}
)
exploded_with_col = exploded.assign(**{column_to_expand:np.concatenate(copied_df[column_to_expand].values)
for column_to_expand in stringlist_columns})[df.columns]
return exploded_with_col | 4f38310e563c8081ee7297ec2af2211ca8084504 | 3,472 |
import networkx
def plot_time_series_graph(val_matrix,
var_names=None,
fig_ax=None,
figsize=None,
sig_thres=None,
link_matrix=None,
link_colorbar_label='MCI',
save_name=None,
link_width=None,
arrow_linewidth=20.,
vmin_edges=-1,
vmax_edges=1.,
edge_ticks=.4,
cmap_edges='RdBu_r',
order=None,
node_size=10,
arrowhead_size=20,
curved_radius=.2,
label_fontsize=10,
alpha=1.,
node_label_size=10,
label_space_left=0.1,
label_space_top=0.,
network_lower_bound=0.2,
undirected_style='dashed'
):
"""Creates a time series graph.
This is still in beta. The time series graph's links are colored by
val_matrix.
Parameters
----------
val_matrix : array_like
Matrix of shape (N, N, tau_max+1) containing test statistic values.
var_names : list, optional (default: None)
List of variable names. If None, range(N) is used.
fig_ax : tuple of figure and axis object, optional (default: None)
Figure and axes instance. If None they are created.
figsize : tuple
Size of figure.
sig_thres : array-like, optional (default: None)
Matrix of significance thresholds. Must be of same shape as val_matrix.
Either sig_thres or link_matrix has to be provided.
link_matrix : bool array-like, optional (default: None)
Matrix of significant links. Must be of same shape as val_matrix. Either
sig_thres or link_matrix has to be provided.
save_name : str, optional (default: None)
Name of figure file to save figure. If None, figure is shown in window.
link_colorbar_label : str, optional (default: 'MCI')
Test statistic label.
link_width : array-like, optional (default: None)
Array of val_matrix.shape specifying relative link width with maximum
given by arrow_linewidth. If None, all links have same width.
order : list, optional (default: None)
order of variables from top to bottom.
arrow_linewidth : float, optional (default: 30)
Linewidth.
vmin_edges : float, optional (default: -1)
Link colorbar scale lower bound.
vmax_edges : float, optional (default: 1)
Link colorbar scale upper bound.
edge_ticks : float, optional (default: 0.4)
Link tick mark interval.
cmap_edges : str, optional (default: 'RdBu_r')
Colormap for links.
node_size : int, optional (default: 20)
Node size.
arrowhead_size : int, optional (default: 20)
Size of link arrow head. Passed on to FancyArrowPatch object.
curved_radius, float, optional (default: 0.2)
Curvature of links. Passed on to FancyArrowPatch object.
label_fontsize : int, optional (default: 10)
Fontsize of colorbar labels.
alpha : float, optional (default: 1.)
Opacity.
node_label_size : int, optional (default: 10)
Fontsize of node labels.
link_label_fontsize : int, optional (default: 6)
Fontsize of link labels.
label_space_left : float, optional (default: 0.1)
Fraction of horizontal figure space to allocate left of plot for labels.
label_space_top : float, optional (default: 0.)
Fraction of vertical figure space to allocate top of plot for labels.
network_lower_bound : float, optional (default: 0.2)
Fraction of vertical space below graph plot.
undirected_style : string, optional (default: 'dashed')
Style of undirected contemporaneous links.
"""
if fig_ax is None:
fig = pyplot.figure(figsize=figsize)
ax = fig.add_subplot(111, frame_on=False)
else:
fig, ax = fig_ax
if sig_thres is None and link_matrix is None:
raise ValueError("Need to specify either sig_thres or link_matrix")
elif sig_thres is not None and link_matrix is None:
link_matrix = np.abs(val_matrix) >= sig_thres
if link_width is not None and not np.all(link_width >= 0.):
raise ValueError("link_width must be non-negative")
N, N, dummy = val_matrix.shape
tau_max = dummy - 1
max_lag = tau_max + 1
if var_names is None:
var_names = range(N)
if order is None:
order = range(N)
if set(order) != set(range(N)):
raise ValueError("order must be a permutation of range(N)")
def translate(row, lag):
return row * max_lag + lag
# Define graph links by absolute maximum (positive or negative like for
# partial correlation)
tsg = np.zeros((N * max_lag, N * max_lag))
tsg_attr = np.zeros((N * max_lag, N * max_lag))
for i, j, tau in np.column_stack(np.where(link_matrix)):
# print '\n',i, j, tau
# print np.where(nonmasked[:,j])[0]
for t in range(max_lag):
if (0 <= translate(i, t - tau) and
translate(i, t - tau) % max_lag <= translate(j, t) % max_lag):
# print translate(i, t-tau), translate(j, t), val_matrix[i,j,tau]
tsg[translate(i, t - tau), translate(j, t)
] = val_matrix[i, j, tau]
tsg_attr[translate(i, t - tau), translate(j, t)
] = val_matrix[i, j, tau]
G = networkx.DiGraph(tsg)
# node_color = np.zeros(N)
# list of all strengths for color map
all_strengths = []
# Add attributes, contemporaneous and directed links are handled separately
for (u, v, dic) in G.edges(data=True):
dic['directed_attribute'] = None
if u != v:
if u % max_lag == v % max_lag:
dic['undirected'] = True
dic['directed'] = False
else:
dic['undirected'] = False
dic['directed'] = True
dic['undirected_alpha'] = alpha
dic['undirected_color'] = _get_absmax(
np.array([[[tsg_attr[u, v],
tsg_attr[v, u]]]])
).squeeze()
dic['undirected_width'] = arrow_linewidth
all_strengths.append(dic['undirected_color'])
dic['directed_alpha'] = alpha
dic['directed_width'] = arrow_linewidth
# value at argmax of average
dic['directed_color'] = tsg_attr[u, v]
all_strengths.append(dic['directed_color'])
dic['label'] = None
dic['directed_edge'] = False
dic['directed_edgecolor'] = None
dic['undirected_edge'] = False
dic['undirected_edgecolor'] = None
# If no links are present, set value to zero
if len(all_strengths) == 0:
all_strengths = [0.]
posarray = np.zeros((N * max_lag, 2))
for i in range(N * max_lag):
posarray[i] = np.array([(i % max_lag), (1. - i // max_lag)])
pos_tmp = {}
for i in range(N * max_lag):
# for n in range(N):
# for tau in range(max_lag):
# i = n*N + tau
pos_tmp[i] = np.array([((i % max_lag) - posarray.min(axis=0)[0]) /
(posarray.max(axis=0)[0] -
posarray.min(axis=0)[0]),
((1. - i // max_lag) -
posarray.min(axis=0)[1]) /
(posarray.max(axis=0)[1] -
posarray.min(axis=0)[1])])
pos = {}
for n in range(N):
for tau in range(max_lag):
pos[n * max_lag + tau] = pos_tmp[order[n] * max_lag + tau]
node_rings = {0: {'sizes': None, 'color_array': None,
'label': '', 'colorbar': False,
}
}
# ] for v in range(max_lag)]
node_labels = ['' for i in range(N * max_lag)]
_draw_network_with_curved_edges(
fig=fig, ax=ax,
G=deepcopy(G), pos=pos,
# dictionary of rings: {0:{'sizes':(N,)-array, 'color_array':(N,)-array
# or None, 'cmap':string,
node_rings=node_rings,
# 'vmin':float or None, 'vmax':float or None, 'label':string or None}}
node_labels=node_labels, node_label_size=node_label_size,
node_alpha=alpha, standard_size=node_size,
standard_cmap='OrRd', standard_color='grey',
log_sizes=False,
cmap_links=cmap_edges, links_vmin=vmin_edges,
links_vmax=vmax_edges, links_ticks=edge_ticks,
cmap_links_edges='YlOrRd', links_edges_vmin=-1., links_edges_vmax=1.,
links_edges_ticks=.2, link_edge_colorbar_label='link_edge',
arrowstyle='simple', arrowhead_size=arrowhead_size,
curved_radius=curved_radius, label_fontsize=label_fontsize,
label_fraction=.5,
link_colorbar_label=link_colorbar_label, undirected_curved=True,
network_lower_bound=network_lower_bound,
undirected_style=undirected_style
)
for i in range(N):
trans = transforms.blended_transform_factory(
fig.transFigure, ax.transData)
ax.text(label_space_left, pos[order[i] * max_lag][1],
'%s' % str(var_names[order[i]]), fontsize=label_fontsize,
horizontalalignment='left', verticalalignment='center',
transform=trans)
for tau in np.arange(max_lag - 1, -1, -1):
trans = transforms.blended_transform_factory(
ax.transData, fig.transFigure)
if tau == max_lag - 1:
ax.text(pos[tau][0], 1.-label_space_top, r'$t$',
fontsize=label_fontsize,
horizontalalignment='center',
verticalalignment='top', transform=trans)
else:
ax.text(pos[tau][0], 1.-label_space_top,
r'$t-%s$' % str(max_lag - tau - 1),
fontsize=label_fontsize,
horizontalalignment='center', verticalalignment='top',
transform=trans)
# fig.subplots_adjust(left=0.1, right=.98, bottom=.25, top=.9)
# savestring = os.path.expanduser(save_name)
if save_name is not None:
pyplot.savefig(save_name)
else:
pyplot.show() | e4acb78dbb8809f3b1604b4a44437c775c0cdfb7 | 3,473 |
def get_configuration_docname(doctype=None, txt=None, searchfield=None, start=None, page_len=None, filters=None):
"""get relevant fields of the configuration doctype"""
return frappe.db.sql("""select soi.configuration_docname, so.name, so.customer from `tabSales Order Item` soi
inner join `tabSales Order` so on soi.parent=so.name where
soi.configuration_doctype = %(configuration_doctype)s and soi.configuration_docname is not null
and (soi.configuration_docname like %(txt)s or so.name like %(txt)s)""",
{'configuration_doctype':filters.get('configuration_doctype'),
'txt': "%%%s%%" % txt}) | fb9494aacfbff6ec77f0e512daab35ffcd9c7fb9 | 3,474 |
import requests
import re
def skymapper_search(searchrad,waveband,targetra,targetdec):
""" Search for stars within search radius of target in Skymapper
catalogue
"""
# set up arrays and url
star_ra = []
star_dec = []
star_mag = []
star_magerr = []
sky_ra = []
sky_dec = []
sky_u_petro = []
sky_u_petro_err = []
sky_u_psf = []
sky_u_psf_err = []
sky_v_petro = []
sky_v_petro_err = []
sky_v_psf = []
sky_v_psf_err = []
sky_g_petro = []
sky_g_petro_err = []
sky_g_psf = []
sky_g_psf_err = []
sky_r_petro = []
sky_r_petro_err = []
sky_r_psf = []
sky_r_psf_err = []
sky_i_petro = []
sky_i_petro_err = []
sky_i_psf = []
sky_i_psf_err = []
sky_z_petro = []
sky_z_petro_err = []
sky_z_psf = []
sky_z_psf_err = []
sr_deg = float(searchrad*0.0166667)
sky_url = "http://skymapper.anu.edu.au/sm-cone/query?RA={0}&DEC={1}&SR={2}"
sky_url = sky_url.format(targetra,targetdec,sr_deg)
# Attempt to parse url to find stars within search radius of filter
try:
skytable = requests.get(sky_url,timeout=30).text
sc = 0
for lines in skytable.split('<TR>'):
sc += 1
if sc >= 2:
columns = re.split("<TD>|</TD>|\n",lines)
sky_ra.append(columns[5])
sky_dec.append(columns[7])
sky_u_petro.append(columns[33])
sky_u_petro_err.append(columns[35])
sky_u_psf.append(columns[29])
sky_u_psf_err.append(columns[31])
sky_v_petro.append(columns[41])
sky_v_petro_err.append(columns[43])
sky_v_psf.append(columns[37])
sky_v_psf_err.append(columns[39])
sky_g_petro.append(columns[49])
sky_g_petro_err.append(columns[51])
sky_g_psf.append(columns[45])
sky_g_psf_err.append(columns[47])
sky_r_petro.append(columns[57])
sky_r_petro_err.append(columns[59])
sky_r_psf.append(columns[53])
sky_r_psf_err.append(columns[55])
sky_i_petro.append(columns[65])
sky_i_petro_err.append(columns[67])
sky_i_psf.append(columns[61])
sky_i_psf_err.append(columns[63])
sky_z_petro.append(columns[73])
sky_z_petro_err.append(columns[75])
sky_z_psf.append(columns[69])
sky_z_psf_err.append(columns[71])
# Raise error if something goes wrong
except requests.exceptions.RequestException as e:
print ('\nException raised for Skymapper url!!')
print (e)
print ('')
# Save parsed star properties for a given filter and remove extended
# shaped sources
for i in range(len(sky_ra)):
if (sky_g_psf[i] != '' and sky_g_petro[i] != '' and
sky_r_psf[i] != '' and sky_r_petro[i] != ''):
if (np.abs(float(sky_g_psf[i]) - float(sky_g_petro[i])) < 0.25
and np.abs(float(sky_r_psf[i]) - float(sky_r_petro[i]))
< 0.25):
if waveband == 'V':
V_mag = float(sky_g_psf[i])-0.0038
V_mag = (V_mag-0.5784*(float(sky_g_psf[i])
-float(sky_r_psf[i])))
gerr = float(sky_g_psf_err[i])**2
rerr = float(sky_r_psf_err[i])**2
V_magerr = np.sqrt((0.5784*rerr)**2+(0.4216*gerr)**2)
star_mag.append(V_mag)
star_magerr.append(V_magerr)
star_ra.append(float(sky_ra[i]))
star_dec.append(float(sky_dec[i]))
if waveband == 'B':
B_mag = float(sky_g_psf[i])+0.2271
B_mag = (B_mag+0.3130*(float(sky_g_psf[i])-
float(sky_r_psf[i])))
gerr = float(sky_g_psf_err[i])**2
rerr = float(sky_r_psf_err[i])**2
B_magerr = np.sqrt((0.3130*rerr)**2+(1.3130*gerr)**2)
star_mag.append(B_mag)
star_magerr.append(B_magerr)
star_ra.append(float(sky_ra[i]))
star_dec.append(float(sky_dec[i]))
if waveband == 'R':
R_mag = float(sky_r_psf[i])-0.0971
R_mag = (R_mag-0.1837*(float(sky_g_psf[i])-
float(sky_r_psf[i])))
gerr = float(sky_g_psf_err[i])**2
rerr = float(sky_r_psf_err[i])**2
R_magerr = np.sqrt((1.1837*rerr)**2+(0.1837*gerr)**2)
star_mag.append(R_mag)
star_magerr.append(R_magerr)
star_ra.append(float(sky_ra[i]))
star_dec.append(float(sky_dec[i]))
if waveband == 'u':
if (sky_u_psf[i] != '' and sky_u_petro[i] != ''):
if (np.abs(float(sky_u_psf[i]) - float(sky_u_petro[i]))<0.25):
star_mag.append(float(sky_u_psf[i]))
star_magerr.append(float(sky_u_psf_err[i]))
star_ra.append(float(sky_ra[i]))
star_dec.append(float(sky_dec[i]))
if waveband == 'g':
if (sky_g_psf[i] != '' and sky_g_petro[i] != ''):
if (np.abs(float(sky_g_psf[i]) - float(sky_g_petro[i]))<0.25):
star_mag.append(float(sky_g_psf[i]))
star_magerr.append(float(sky_g_psf_err[i]))
star_ra.append(float(sky_ra[i]))
star_dec.append(float(sky_dec[i]))
if waveband == 'r':
if (sky_r_psf[i] != '' and sky_r_petro[i] != ''):
if (np.abs(float(sky_r_psf[i]) - float(sky_r_petro[i]))<0.25):
star_mag.append(float(sky_r_psf[i]))
star_magerr.append(float(sky_r_psf_err[i]))
star_ra.append(float(sky_ra[i]))
star_dec.append(float(sky_dec[i]))
if waveband == 'i' :
if (sky_i_psf[i] != '' and sky_i_petro[i] != ''):
if (np.abs(float(sky_i_psf[i]) - float(sky_i_petro[i]))<0.25):
star_mag.append(float(sky_i_psf[i]))
star_magerr.append(float(sky_i_psf_err[i]))
star_ra.append(float(sky_ra[i]))
star_dec.append(float(sky_dec[i]))
if waveband == 'z' :
if (sky_z_psf[i] != '' and sky_z_petro[i] != ''):
if (np.abs(float(sky_z_psf[i]) - float(sky_z_petro[i]))<0.25):
star_mag.append(float(sky_z_psf[i]))
star_magerr.append(float(sky_z_psf_err[i]))
star_ra.append(float(sky_ra[i]))
star_dec.append(float(sky_dec[i]))
# Create list with catalogue name
star_cat = ['SkyMapper'] * len(star_ra)
return star_ra,star_dec,star_mag,star_magerr,star_cat | 3ebed23f2ec73f6a8e859e645a2c3b5f936ac674 | 3,476 |
import random
def Decimal_to_Hexadecimal(x : str) -> str:
"""
It Converts the Given Decimal Number into Hexadecimal Number System of Base `16` and takes input in `str` form
Args:
x `(str)` : It is the Positional Argument by order which stores the Decimal Input from User.
Returns (str) : The Output `returned` is in the form of a `str` which is the Hexadecimal Converted Number.
"""
""" For Recognising the Dot """
list1 = list(x)
left = []
right = []
flag = False
for val in range(len(list1)):
if list1[val] == "." or flag == True:
if list1[val] != ".":
right.append(list1[val])
else:
flag = True
continue
else:
num = int(list1[val])
left.append(num)
""" For Shifting the left elements in list into a variable """
leftmost = 0
for val in left:
leftmost = leftmost*10 + val
""" For Shifting the right elements in list into a variable """
rightmost = ''
for val in right:
rightmost = rightmost + val
dict = {10: "A", 11 : "B", 12 : "C", 13 : "D", 14 : "E", 15 : "F"}
""" Calculation of the left part """
cur = 0
rem = 0
next = leftmost
list_of_numbers = []
while next != 0:
rem = next%16
if rem > 9:
if rem in dict:
rem = dict[rem]
list_of_numbers.append(rem)
else:
pass
else:
list_of_numbers.append(rem)
cur = next//16
next = cur
list_of_numbers.reverse()
numbers = ''
for val in range(len(list_of_numbers)):
string = str(list_of_numbers[val])
numbers = numbers + string
""" Calculation of the right part """
zeros = '1' + len(rightmost)*'0'
length = int(zeros)
next = int(rightmost)/length
list_of_numbers = []
length = 0
while length <= 20:
if next * 16< 1:
list_of_numbers.append(0)
next = (next * 16)
else:
next = (next * 16)
num2 = int(next)
if num2 > 9:
if num2 in dict:
alter = dict[num2]
list_of_numbers.append(alter)
else:
pass
else:
list_of_numbers.append(num2)
num = int(next)
next = next - num
pass
length += 1
numbers2 = ''
for val in range(len(list_of_numbers)):
number = str(list_of_numbers[val])
numbers2 = numbers2 + number
# print(f"The Decimal -> Hexadecimal Conversion is {numbers}.{numbers2.rstrip('0')}")
color = random.choice([RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN])
return f" {BOLD} {color} The Decimal -> Hexadecimal Conversion is {numbers}.{numbers2.rstrip('0')} {RESET}" | ffe5050a834a9111a50f28c425f1bd21f60605ff | 3,477 |
def hardcorenas_d(pretrained=False, **kwargs):
""" hardcorenas_D """
arch_def = [['ds_r1_k3_s1_e1_c16_nre'], ['ir_r1_k5_s2_e3_c24_nre_se0.25', 'ir_r1_k5_s1_e3_c24_nre_se0.25'],
['ir_r1_k5_s2_e3_c40_nre_se0.25', 'ir_r1_k5_s1_e4_c40_nre_se0.25', 'ir_r1_k3_s1_e3_c40_nre_se0.25'],
['ir_r1_k5_s2_e4_c80_se0.25', 'ir_r1_k3_s1_e3_c80_se0.25', 'ir_r1_k3_s1_e3_c80_se0.25',
'ir_r1_k3_s1_e3_c80_se0.25'],
['ir_r1_k3_s1_e4_c112_se0.25', 'ir_r1_k5_s1_e4_c112_se0.25', 'ir_r1_k3_s1_e3_c112_se0.25',
'ir_r1_k5_s1_e3_c112_se0.25'],
['ir_r1_k5_s2_e6_c192_se0.25', 'ir_r1_k5_s1_e6_c192_se0.25', 'ir_r1_k5_s1_e6_c192_se0.25',
'ir_r1_k3_s1_e6_c192_se0.25'], ['cn_r1_k1_s1_c960']]
model = _gen_hardcorenas(pretrained=pretrained, variant='hardcorenas_d', arch_def=arch_def, **kwargs)
return model | ff9be560a0061101fd672bd115fbfd8920537177 | 3,478 |
import tqdm
def refine(weights, trees, X, Y, epochs, lr, batch_size, optimizer, verbose):
"""Performs SGD using the MSE loss over the leaf nodes of the given trees on the given data. The weights of each tree are respected during optimization but not optimized.
Args:
weights (np.array): The weights of the trees.
trees (list of Tree): The trees.
X (2d np.array): The data.
Y (np.array): The targe.
epochs (int): The number of epochs SGD is performed.
lr (float): The learning rate of SGD.
batch_size (int): The batch size of SGD
optimizer (str): The optimizer used for optimization. Can be {{"sgd", "adam"}}.
verbose (bool): If True outputs the loss during optimization.
Returns:
list of trees: The refined trees.
"""
n_classes = trees[0].n_classes
if batch_size > X.shape[0]:
if verbose:
print("WARNING: The batch size for SGD is larger than the dataset supplied: batch_size = {} > X.shape[0] = {}. Using batch_size = X.shape[0]".format(batch_size, X.shape[0]))
batch_size = X.shape[0]
# To make the following SGD somewhat efficient this code extracts all the leaf nodes and gathers them in an array. To do so it iterates over all trees and all nodes in the trees. Each leaf node is added to the leafs array and the corresponding node.id is stored in mappings. For scikit-learn trees this would be much simpler as they already offer a dedicated leaf field:
# leafs = []
# for tree in trees:
# tmp = tree.tree_.value / tree.tree_.value.sum(axis=(1,2))[:,np.newaxis,np.newaxis]
# leafs.append(tmp.squeeze(1))
mappings = []
leafs = []
for t, w in zip(trees, weights):
leaf_mapping = {}
l = []
for i, n in enumerate(t.nodes):
if n.prediction is not None:
leaf_mapping[n.id] = len(l)
# Normalize the values in the leaf nodes for SGD. This is usually a better initialization
pred = np.array(n.prediction) / sum(n.prediction)
l.append(pred)
mappings.append(leaf_mapping)
leafs.append(np.array(l))
if optimizer == "adam":
m = []
v = []
t = 1
for l in leafs:
m.append(np.zeros_like(l))
v.append(np.zeros_like(l))
for epoch in range(epochs):
mini_batches = create_mini_batches(X, Y, batch_size, True)
batch_cnt = 0
loss_sum = 0
accuracy_sum = 0
with tqdm(total=X.shape[0], ncols=150, disable = not verbose) as pbar:
for x,y in mini_batches:
# Prepare the target and apply all trees
target_one_hot = np.array( [ [1.0 if yi == i else 0.0 for i in range(n_classes)] for yi in y] )
indices = [apply(t, m, x) for t,m in zip(trees, mappings)]
pred = []
for i, idx, w in zip(range(len(trees)), indices, weights):
pred.append(w * leafs[i][idx])
pred = np.array(pred)
fbar = pred.sum(axis=0)
# SGD
if optimizer == "sgd":
deriv = 2 * (fbar - target_one_hot) * 1.0 / x.shape[0] * 1.0 / n_classes #* 1.0 / len(trees)
for i, idx in zip(range(len(trees)), indices):
np.add.at(leafs[i], idx, - lr * deriv)
else:
# Adam
deriv = 2 * (fbar - target_one_hot) * 1.0 / x.shape[0] * 1.0 / n_classes #* 1.0 / len(trees)
beta1 = 0.9
beta2 = 0.999
for i, idx in zip(range(len(trees)), indices):
grad = np.zeros_like(leafs[i])
np.add.at(grad, idx, deriv)
m[i] = beta1 * m[i] + (1-beta1) * grad
v[i] = beta2 * v[i] + (1-beta2) * (grad ** 2)
m_corrected = m[i] / (1-beta1**t)
v_corrected = v[i] / (1-beta2**t)
leafs[i] += - lr * m_corrected / (np.sqrt(v_corrected) + 1e-8)
t += 1
# compute some statistics
loss_sum += ((fbar - target_one_hot)**2).mean()
accuracy_sum += (fbar.argmax(axis=1) == y).mean() * 100.0
batch_cnt += 1
pbar.update(x.shape[0])
desc = '[{}/{}] loss {:2.4f} accuracy {:2.4f}'.format(
epoch,
epochs-1,
loss_sum / batch_cnt,
accuracy_sum / batch_cnt,
)
pbar.set_description(desc)
# Copy the optimized leafs back into the trees with the pre-computed mapping
for t, m, l in zip(trees, mappings, leafs):
for nid, i in m.items():
t.nodes[nid].prediction = l[i].tolist()
return trees | 6704e36b61ac9bda65ba0e118590aa2b627c8e2a | 3,479 |
from typing import ClassVar
from typing import Any
from typing import Dict
def fetch_db_object(cls: ClassVar, body: Any):
"""Fetch a database object via SQLAlchemy.
:param cls: the class of object to fetch.
:param body: the body of the object. If the body is None then None is returned (for the case where no object
exists), if the body is already of type cls then the body is returned as the object and if the body is a dictionary
with the key 'id' a query is made to fetch the given object.
:return: the object.
"""
if body is None:
item = None
elif isinstance(body, cls):
item = body
elif isinstance(body, Dict):
if "id" not in body:
raise AttributeError(f"id not found in {body}")
id = body["id"]
item = session_.query(cls).filter(cls.id == id).one_or_none()
if item is None:
raise ValueError(f"{item} with id {id} not found")
else:
raise ValueError(f"Unknown item type {body}")
return item | ae4a96ac9875d5b936df1d9c05f8a022a9a4b51e | 3,480 |
def should_skip_cred_test():
"""
Returns `True` if a test requiring credentials should be skipped.
Otherwise returns `False`
"""
if username is None or password is None:
return True
return False | c5f45a20f7febc100a2f2eb950697c91837e0281 | 3,481 |
from typing import List
from pathlib import Path
def list_input_images(img_dir_or_csv: str,
bucket_name: str = None,
glob_patterns: List = None):
"""
Create list of images from given directory or csv file.
:param img_dir_or_csv: (str) directory containing input images or csv with list of images
:param bucket_name: (str, optional) name of aws s3 bucket
:param glob_patterns: (list of str) if directory is given as input (not csv), these are the glob patterns that will be used
to find desired images
returns list of dictionaries where keys are "tif" and values are paths to found images. "meta" key is also added
if input is csv and second column contains a metadata file. Then, value is path to metadata file.
"""
if bucket_name:
s3 = boto3.resource('s3')
bucket = s3.Bucket(bucket_name)
if img_dir_or_csv.endswith('.csv'):
bucket.download_file(img_dir_or_csv, 'img_csv_file.csv')
list_img = read_csv('img_csv_file.csv')
else:
raise NotImplementedError(
'Specify a csv file containing images for inference. Directory input not implemented yet')
else:
if img_dir_or_csv.endswith('.csv'):
list_img = read_csv(img_dir_or_csv)
elif is_url(img_dir_or_csv):
list_img = []
img_path = Path(img_dir_or_csv)
img = {}
img['tif'] = img_path
list_img.append(img)
else:
img_dir = Path(img_dir_or_csv)
assert img_dir.is_dir() or img_dir.is_file(), f'Could not find directory/file "{img_dir_or_csv}"'
list_img_paths = set()
if img_dir.is_dir():
for glob_pattern in glob_patterns:
assert isinstance(glob_pattern, str), f'Invalid glob pattern: "{glob_pattern}"'
list_img_paths.update(sorted(img_dir.glob(glob_pattern)))
else:
list_img_paths.update(img_dir)
list_img = []
for img_path in list_img_paths:
img = {}
img['tif'] = img_path
list_img.append(img)
assert len(list_img) >= 0, f'No .tif files found in {img_dir_or_csv}'
return list_img | 0dccd2d0356b8f89991a1ab1f8a621e696918ab5 | 3,482 |
def get_insta_links(L: Instaloader, url: str) -> tuple:
"""
Return list of shortcodes
:param url: URL
:return: success status and list of shortcodes
"""
try:
shortcode = get_insta_shortcode(url)
post = Post.from_shortcode(L.context, shortcode)
return True, post
except Exception as e:
print(str(e))
return False, [] | 6ee9eac712d4603d1b7cffedd11cf07e4345ec0a | 3,483 |
async def http_request_callback(_request: HttpRequest) -> HttpResponse:
"""A response handler which returns some text"""
with open(__file__, 'rb') as file_pointer:
buf = file_pointer.read()
headers = [
(b'content-type', b'text/plain'),
(b'content-length', str(len(buf)).encode('ascii'))
]
return HttpResponse(200, headers, bytes_writer(buf, chunk_size=-1)) | 2c5bdf2e4617c7780fe9c8d0b4a65b363e05babc | 3,485 |
from typing import Tuple
from typing import List
from typing import Union
def item_coverage(
possible_users_items: Tuple[List[Union[int, str]], List[Union[int, str]]],
recommendations: List[Tuple[Union[int, str], Union[int, str]]],
) -> float:
"""
Calculates the coverage value for items in possible_users_items[1] given the collection of recommendations.
Recommendations over users/items not in possible_users_items are discarded.
Args:
possible_users_items (Tuple[List[Union[int, str]], List[Union[int, str]]]): contains exactly TWO sub-lists,
first one with users, second with items
recommendations (List[Tuple[Union[int, str], Union[int, str]]]): contains user-item recommendation tuples,
e.g. [(user1, item1),(user2, item2),]
Returns: item coverage (float): a metric showing the fraction of items which got recommended at least once.
"""
if len(possible_users_items) != 2:
raise ValueError("possible_users_items must be of length 2: [users, items]")
if np.any([len(x) == 0 for x in possible_users_items]):
raise ValueError("possible_users_items cannot hold empty lists!")
possible_items = set(possible_users_items[1])
items_with_recommendations = set([x[1] for x in recommendations])
items_without_recommendations = possible_items.difference(items_with_recommendations)
item_cov = 1 - len(items_without_recommendations) / len(possible_items)
return round(item_cov, 3) | f3eb59e0146561c8a18f74c548539b8cc9dcbb5b | 3,487 |
def calc_area(img_it, contours, conv_sq, list_save):
"""
Summary
Parameters
----------
yearstr : TYPE
DESCRIPTION.
Returns
-------
TYPE
DESCRIPTION.
"""
# Calculate areas
sum_file = 0
for c in contours:
M = cv2.moments(c)
area = M['m00']
area_conv = area * conv_sq
sum_file = sum_file + area_conv
# print(sum_file)
list_save.append([img_it, sum_file])
return(list_save) | f3bdba8892041edfe5ba0497c927f846fd8110d9 | 3,488 |
Subsets and Splits