content
stringlengths 22
815k
| id
int64 0
4.91M
|
---|---|
def threaded_main():
"""
Run non-blocking task.
Use daemon because we must finish writing image even after UI quits.
:return:
"""
thread = threading.Thread(target=main,
args=([sys.argv[0], '-c'],),
daemon=True)
thread.start() | 5,355,600 |
def delete_user():
""" Deletes the current user's account. """
DB.session.delete(current_user)
DB.session.commit()
flash("Account deleted", 'success')
return redirect('/login') | 5,355,601 |
def setup_code_gen(no_of_accessories):
""" Generate setup code
"""
try:
invalid_setup_codes = ['00000000','11111111','22222222','33333333','44444444','55555555',\
'66666666','77777777','88888888','99999999','12345678','87654321']
setup_code_created = []
for _ in range(no_of_accessories):
setup_code = ''
# random generate setup_code
for _ in range(8):
random_num = str(random.randint(0,9))
setup_code += random_num
# generate again till valid
while setup_code in invalid_setup_codes:
setup_code = ''
for _ in range(8):
random_num = str(randint(0,9))
setup_code += random_num
# Check if the setup code has valid format
if (len(setup_code) != 8) or (not setup_code.isdigit()):
print "\nSetup code generated should be 8 numbers without any '-' in between. Eg. 11122333 \n"
raise SystemExit(1)
# Add the hyphen (-) in the PIN for salt-verifier generation. So, 11122333 will become 111-22-333
setup_code = setup_code[:3] + '-' + setup_code[3:5] + '-' + setup_code[5:]
setup_code_created.append(setup_code)
return setup_code_created
except StandardError as std_err:
print std_err
except:
raise | 5,355,602 |
def letter_difference(letter_1: str, letter_2: str) -> int:
"""
Return the difference in value between letter_1 and letter_2
"""
assert len(letter_1) == 1
assert len(letter_2) == 1
diff = letter_to_value[letter_2] - letter_to_value[letter_1]
if diff > 13:
diff -= 27
return diff | 5,355,603 |
def refresh_access_token(request):
"""Updates `accessToken` in request cookies (not in browser cookies) using `refreshToken`. """
try:
refresh_token = request.COOKIES['refreshToken']
url = urljoin(settings.TIT_API_HOST, '/api/auth/token/refresh/')
response = requests.post(url, {'refresh': refresh_token})
result = response.json()
request.COOKIES['accessToken'] = result['access']
return True
except (KeyError, requests.HTTPError, ):
"""Refresh token doesn't exist in cookies or response from TIT API
returned error status code.
"""
return False | 5,355,604 |
def ngrams(string, n=3, punctuation=PUNCTUATION, **kwargs):
""" Returns a list of n-grams (tuples of n successive words) from the given string.
Punctuation marks are stripped from words.
"""
s = string
s = s.replace(".", " .")
s = s.replace("?", " ?")
s = s.replace("!", " !")
s = [w.strip(punctuation) for w in s.split()]
s = [w.strip() for w in s if w.strip()]
return [tuple(s[i:i + n]) for i in range(len(s) - n + 1)] | 5,355,605 |
def binstringToBitList(binstring):
"""Converts a string of '0's and '1's to a list of 0's and 1's"""
bitList = []
for bit in binstring:
bitList.append(int(bit))
return bitList | 5,355,606 |
def test_reconstructed_plane():
"""
Test the reconstruction of a fitted plane from orientation
of the plane expressed in spherical coordinates
"""
fit = random_pca()
sdr = fit.strike_dip_rake()
ang = fit.angular_errors()
reconstructed = ReconstructedPlane(*sdr, *ang)
# Test that the nominal recovered orientation is the same
assert N.allclose(fit.normal, reconstructed.normal)
assert N.allclose(fit.angular_errors(), reconstructed.angular_errors())
assert_array_almost_equal(fit.strike_dip_rake(), reconstructed.strike_dip_rake())
fax = fit.axes
rax = reconstructed.axes
if fax[-1,-1] < 0:
fax *= -1
# Tolerance should be zero
assert_array_almost_equal(fax, rax)
cov = N.diag(fit.covariance_matrix / fit.covariance_matrix[-1,-1])
rcov = N.diag(reconstructed.covariance_matrix)
assert N.allclose(cov, rcov) | 5,355,607 |
def file_revisions(request, repo_id):
"""List file revisions in file version history page.
"""
repo = get_repo(repo_id)
if not repo:
raise Http404
# perm check
if check_folder_permission(request, repo_id, '/') is None:
raise Http404
return render_file_revisions(request, repo_id) | 5,355,608 |
def textctrl_info_t_get_tabsize(*args):
"""
textctrl_info_t_get_tabsize(self) -> unsigned int
"""
return _ida_kernwin.textctrl_info_t_get_tabsize(*args) | 5,355,609 |
def node_is_hidden(node_name):
"""
Returns whether or not given node is hidden
:param node_name: str
:return: bool
"""
if python.is_string(node_name):
return not maya.cmds.getAttr('{}.visibility'.format(node_name))
return not maya.cmds.getAttr('{}.visibility'.format(node.get_name(node_name))) | 5,355,610 |
def cargo_raze_transitive_deps():
"""Loads all dependnecies from repositories required for cargo-raze"""
rules_foreign_cc_dependencies()
rust_repositories() | 5,355,611 |
def generate_patch_grid_from_normalized_LAF(img: torch.Tensor, LAF: torch.Tensor, PS: int = 32) -> torch.Tensor:
"""Helper function for affine grid generation.
Args:
img: image tensor of shape :math:`(B, CH, H, W)`.
LAF: laf with shape :math:`(B, N, 2, 3)`.
PS: patch size to be extracted.
Returns:
grid
"""
raise_error_if_laf_is_not_valid(LAF)
B, N, _, _ = LAF.size()
num, ch, h, w = img.size()
# norm, then renorm is needed for allowing detection on one resolution
# and extraction at arbitrary other
LAF_renorm = denormalize_laf(LAF, img)
grid = F.affine_grid(LAF_renorm.view(B * N, 2, 3), [B * N, ch, PS, PS], align_corners=False) # type: ignore
grid[..., :, 0] = 2.0 * grid[..., :, 0].clone() / float(w) - 1.0
grid[..., :, 1] = 2.0 * grid[..., :, 1].clone() / float(h) - 1.0
return grid | 5,355,612 |
def _resolve_condition_operands(
left_operand: Union[str, pipeline_channel.PipelineChannel],
right_operand: Union[str, pipeline_channel.PipelineChannel],
) -> Tuple[str, str]:
"""Resolves values and PipelineChannels for condition operands.
Args:
left_operand: The left operand of a condition expression.
right_operand: The right operand of a condition expression.
Returns:
A tuple of the resolved operands values:
(left_operand_value, right_operand_value).
"""
# Pre-scan the operand to get the type of constant value if there's any.
# The value_type can be used to backfill missing PipelineChannel.channel_type.
value_type = None
for value_or_reference in [left_operand, right_operand]:
if isinstance(value_or_reference, pipeline_channel.PipelineChannel):
parameter_type = type_utils.get_parameter_type(
value_or_reference.channel_type)
if parameter_type in [
pipeline_spec_pb2.ParameterType.STRUCT,
pipeline_spec_pb2.ParameterType.LIST,
pipeline_spec_pb2.ParameterType
.PARAMETER_TYPE_ENUM_UNSPECIFIED,
]:
input_name = _additional_input_name_for_pipeline_channel(
value_or_reference)
raise ValueError('Conditional requires scalar parameter values'
' for comparison. Found input "{}" of type {}'
' in pipeline definition instead.'.format(
input_name,
value_or_reference.channel_type))
parameter_types = set()
for value_or_reference in [left_operand, right_operand]:
if isinstance(value_or_reference, pipeline_channel.PipelineChannel):
parameter_type = type_utils.get_parameter_type(
value_or_reference.channel_type)
else:
parameter_type = type_utils.get_parameter_type(
type(value_or_reference).__name__)
parameter_types.add(parameter_type)
if len(parameter_types) == 2:
# Two different types being compared. The only possible types are
# String, Boolean, Double and Integer. We'll promote the other type
# using the following precedence:
# String > Boolean > Double > Integer
if pipeline_spec_pb2.ParameterType.STRING in parameter_types:
canonical_parameter_type = pipeline_spec_pb2.ParameterType.STRING
elif pipeline_spec_pb2.ParameterType.BOOLEAN in parameter_types:
canonical_parameter_type = pipeline_spec_pb2.ParameterType.BOOLEAN
else:
# Must be a double and int, promote to double.
assert pipeline_spec_pb2.ParameterType.NUMBER_DOUBLE in parameter_types, \
'Types: {} [{} {}]'.format(
parameter_types, left_operand, right_operand)
assert pipeline_spec_pb2.ParameterType.NUMBER_INTEGER in parameter_types, \
'Types: {} [{} {}]'.format(
parameter_types, left_operand, right_operand)
canonical_parameter_type = pipeline_spec_pb2.ParameterType.NUMBER_DOUBLE
elif len(parameter_types) == 1: # Both operands are the same type.
canonical_parameter_type = parameter_types.pop()
else:
# Probably shouldn't happen.
raise ValueError('Unable to determine operand types for'
' "{}" and "{}"'.format(left_operand, right_operand))
operand_values = []
for value_or_reference in [left_operand, right_operand]:
if isinstance(value_or_reference, pipeline_channel.PipelineChannel):
input_name = _additional_input_name_for_pipeline_channel(
value_or_reference)
operand_value = "inputs.parameter_values['{input_name}']".format(
input_name=input_name)
parameter_type = type_utils.get_parameter_type(
value_or_reference.channel_type)
if parameter_type == pipeline_spec_pb2.ParameterType.NUMBER_INTEGER:
operand_value = 'int({})'.format(operand_value)
elif isinstance(value_or_reference, str):
operand_value = "'{}'".format(value_or_reference)
parameter_type = pipeline_spec_pb2.ParameterType.STRING
elif isinstance(value_or_reference, bool):
# Booleans need to be compared as 'true' or 'false' in CEL.
operand_value = str(value_or_reference).lower()
parameter_type = pipeline_spec_pb2.ParameterType.BOOLEAN
elif isinstance(value_or_reference, int):
operand_value = str(value_or_reference)
parameter_type = pipeline_spec_pb2.ParameterType.NUMBER_INTEGER
else:
assert isinstance(value_or_reference, float), value_or_reference
operand_value = str(value_or_reference)
parameter_type = pipeline_spec_pb2.ParameterType.NUMBER_DOUBLE
if parameter_type != canonical_parameter_type:
# Type-cast to so CEL does not complain.
if canonical_parameter_type == pipeline_spec_pb2.ParameterType.STRING:
assert parameter_type in [
pipeline_spec_pb2.ParameterType.BOOLEAN,
pipeline_spec_pb2.ParameterType.NUMBER_INTEGER,
pipeline_spec_pb2.ParameterType.NUMBER_DOUBLE,
]
operand_value = "'{}'".format(operand_value)
elif canonical_parameter_type == pipeline_spec_pb2.ParameterType.BOOLEAN:
assert parameter_type in [
pipeline_spec_pb2.ParameterType.NUMBER_INTEGER,
pipeline_spec_pb2.ParameterType.NUMBER_DOUBLE,
]
operand_value = 'true' if int(operand_value) == 0 else 'false'
else:
assert canonical_parameter_type == pipeline_spec_pb2.ParameterType.NUMBER_DOUBLE
assert parameter_type == pipeline_spec_pb2.ParameterType.NUMBER_INTEGER
operand_value = 'double({})'.format(operand_value)
operand_values.append(operand_value)
return tuple(operand_values) | 5,355,613 |
def SanityCheck(help_provider, help_name_map):
"""Helper for checking that a HelpProvider has minimally adequate content."""
# Sanity check the content.
assert (len(help_provider.help_spec.help_name) > 1
and len(help_provider.help_spec.help_name) < MAX_HELP_NAME_LEN)
for hna in help_provider.help_spec.help_name_aliases:
assert hna
one_line_summary_len = len(help_provider.help_spec.help_one_line_summary)
assert (one_line_summary_len > MIN_ONE_LINE_SUMMARY_LEN
and one_line_summary_len < MAX_ONE_LINE_SUMMARY_LEN)
assert len(help_provider.help_spec.help_text) > 10
# Ensure there are no dupe help names or aliases across commands.
name_check_list = [help_provider.help_spec.help_name]
name_check_list.extend(help_provider.help_spec.help_name_aliases)
for name_or_alias in name_check_list:
if help_name_map.has_key(name_or_alias):
raise CommandException(
'Duplicate help name/alias "%s" found while loading help from %s. '
'That name/alias was already taken by %s' % (
name_or_alias, help_provider.__module__,
help_name_map[name_or_alias].__module__)) | 5,355,614 |
def linkQuantFilesForDabs(cfg):
"""
Create a per-DAB file link to the quant file. This is needed later by Data2DB.
"""
# link to the new quant file
if cfg.configVerified is False:
raise ValueError('Please run checkConfig(cfg) prior to calling linkQuantFiles')
print("Linking quant files for each DAB file\n")
# copy quant file to the output dir
baseName = os.path.basename(cfg.quantFile)
targetQuantFile = os.path.join(cfg.outDir, baseName)
os.system(f'cp {cfg.quantFile} {targetQuantFile}')
datasets = readDatasetList(cfg.datasetsFile)
for (pclFile, dsetPlat, platform) in datasets:
m = re.match("(.*).pcl", pclFile)
datasetName = m.group(1)
quantFile = os.path.join(cfg.dabDir, f'{datasetName}.quant')
if os.path.exists(quantFile):
os.system(f"rm -rf {quantFile}")
cmd = f"ln -s {targetQuantFile} {quantFile}"
# print(cmd)
os.system(cmd) | 5,355,615 |
def format_record(test_record):
"""Create a properly formatted Kinesis, S3, or SNS record.
Supports a dictionary or string based data record. Reads in
event templates from the test/integration/templates folder.
Args:
test_record: Test record metadata dict with the following structure:
data - string or dict of the raw data
description - a string describing the test that is being performed
trigger - bool of if the record should produce an alert
source - which stream/s3 bucket originated the data
service - which aws service originated the data
compress (optional) - if the payload needs to be gzip compressed or not
Returns:
dict in the format of the specific service
"""
service = test_record['service']
source = test_record['source']
compress = test_record.get('compress')
data_type = type(test_record['data'])
if data_type == dict:
data = json.dumps(test_record['data'])
elif data_type in (unicode, str):
data = test_record['data']
else:
LOGGER_CLI.info('Invalid data type: %s', type(test_record['data']))
return
# Get the template file for this particular service
template_path = os.path.join(DIR_TEMPLATES, '{}.json'.format(service))
with open(template_path, 'r') as service_template:
try:
template = json.load(service_template)
except ValueError as err:
LOGGER_CLI.error('Error loading %s.json: %s', service, err)
return
if service == 's3':
# Set the S3 object key to a random value for testing
test_record['key'] = ('{:032X}'.format(random.randrange(16**32)))
template['s3']['object']['key'] = test_record['key']
template['s3']['object']['size'] = len(data)
template['s3']['bucket']['arn'] = 'arn:aws:s3:::{}'.format(source)
template['s3']['bucket']['name'] = source
# Create the mocked s3 object in the designated bucket with the random key
put_mocked_s3_object(source, test_record['key'], data)
elif service == 'kinesis':
if compress:
kinesis_data = base64.b64encode(zlib.compress(data))
else:
kinesis_data = base64.b64encode(data)
template['kinesis']['data'] = kinesis_data
template['eventSourceARN'] = 'arn:aws:kinesis:us-east-1:111222333:stream/{}'.format(source)
elif service == 'sns':
template['Sns']['Message'] = data
template['EventSubscriptionArn'] = 'arn:aws:sns:us-east-1:111222333:{}'.format(source)
else:
LOGGER_CLI.info('Invalid service %s', service)
return template | 5,355,616 |
def int_to_bitstr(int_value: int) -> str:
"""
A function which returns its bit representation as a string.
Arguments:
int_value (int) - The int value we want to get the bit representation for.
Return:
str - The string representation of the bits required to form the int.
"""
return bin(int_value)[2:] | 5,355,617 |
def target_reached(effect):
"""target amount has been reached (100% or more)"""
if not effect.instance.target:
return False
return effect.instance.amount_raised >= effect.instance.target | 5,355,618 |
def resid_mask(ints, wfs_map=read_map(wfs_file), act_map=read_map(act_file), num_aps=236):
"""
Returns the locations of the valid actuators in the actuator array
resids: Nx349 residual wavefront array (microns)
ints: Nx304 intensity array (any units)
N: Number of timestamps
"""
# Check inputs
N = ints.shape[0] # Num timestamps
# Aggregate intensities over all timestamps
med_ints = np.median(ints, axis=0)
# Fill WFS map with aggregated intensities
int_map = wfs_map.copy()
int_map[np.where(int_map==1)] = med_ints
# Find lenslets with greatest intensity
idxs = np.flip(np.argsort(int_map, axis=None))[:num_aps] # flat idxs of sort
idxs = np.unravel_index(idxs, wfs_map.shape) # 2D idxs of sort
# Mask for good sub-ap values
good_aps = np.zeros(wfs_map.shape, dtype=int)
good_aps[idxs] = 1
good_aps = good_aps * wfs_map # Just in case
# Mask for good actuator values
good_acts = np.pad(good_aps, ((1,1),(1,1)))
good_acts = (good_acts[1:,1:] | good_acts[1:,:-1]
| good_acts[:-1,:-1] | good_acts[:-1,1:]) * act_map
return good_acts | 5,355,619 |
def session():
"""Sets up a HTTP session with a retry policy."""
s = requests.Session()
retries = Retry(total=5, backoff_factor=0.5)
s.mount("http://", HTTPAdapter(max_retries=retries))
return s | 5,355,620 |
def fit(df, methodtype='hc', scoretype='bic', black_list=None, white_list=None, bw_list_method='enforce', max_indegree=None, epsilon=1e-4, max_iter=1e6, verbose=3):
"""Structure learning fit model.
Description
-----------
Search strategies for structure learning
The search space of DAGs is super-exponential in the number of variables and the above scoring functions allow for local maxima.
To learn model structure (a DAG) from a data set, there are three broad techniques:
1. Score-based structure learning (BIC/BDeu/K2 score; exhaustive search, hill climb/tabu search)
a. exhaustivesearch
b. hillclimbsearch
2. Constraint-based structure learning (PC)
a. chi-square test
3. Hybrid structure learning (The combination of both techniques) (MMHC)
Score-based Structure Learning
This approach construes model selection as an optimization task. It has two building blocks:
A scoring function sD:->R that maps models to a numerical score, based on how well they fit to a given data set D.
A search strategy to traverse the search space of possible models M and select a model with optimal score.
Commonly used scoring functions to measure the fit between model and data are Bayesian Dirichlet scores such as BDeu or K2
and the Bayesian Information Criterion (BIC, also called MDL).
As before, BDeu is dependent on an equivalent sample size.
Parameters
----------
df : pd.DataFrame()
Input dataframe.
methodtype : str, (default : 'hc')
String Search strategy for structure_learning.
'hc' or 'hillclimbsearch' (default)
'ex' or 'exhaustivesearch'
'cs' or 'constraintsearch'
scoretype : str, (default : 'bic')
Scoring function for the search spaces.
'bic', 'k2', 'bdeu'
black_list : List or None, (default : None)
If a list of edges is provided as black_list, they are excluded from the search and the resulting model will not contain any of those edges. The default is None.
Works only in case of methodtype='hc'. See also paramter: `bw_list_method`
white_list : List or None, (default : None)
If a list of edges is provided as white_list, the search is limited to those edges. The resulting model will then only contain edges that are in white_list. The default is None.
Works only in case of methodtype='hc'/ See also paramter: `bw_list_method`
bw_list_method : str, (default : 'enforce')
'enforce' : A list of edges can optionally be passed as `black_list` or `white_list` to exclude those edges or to limit the search. This option is limited to only methodtype='hc'
'filter' : Filter the dataframe based on `black_list` or `white_list`. Filtering can be done for every methodtype/scoretype.
max_indegree : int, (default : None)
If provided and unequal None, the procedure only searches among models where all nodes have at most max_indegree parents. (only in case of methodtype='hc')
epsilon: float (default: 1e-4)
Defines the exit condition. If the improvement in score is less than `epsilon`, the learned model is returned. (only in case of methodtype='hc')
max_iter: int (default: 1e6)
The maximum number of iterations allowed. Returns the learned model when the number of iterations is greater than `max_iter`. (only in case of methodtype='hc')
verbose : int, (default : 3)
Print progress to screen.
0: NONE
1: ERROR
2: WARNING
3: INFO (default)
4: DEBUG
5: TRACE
Returns
-------
dict with model.
Examples
--------
>>> import bnlearn as bn
>>>
>>> # Load asia DAG
>>> model = bn.import_DAG('asia')
>>>
>>> # plot ground truth
>>> G = bn.plot(model)
>>>
>>> # Sampling
>>> df = bn.sampling(model, n=10000)
>>>
>>> # Structure learning of sampled dataset
>>> model_sl = bn.structure_learning.fit(df, methodtype='hc', scoretype='bic')
>>>
>>> # Plot based on structure learning of sampled data
>>> bn.plot(model_sl, pos=G['pos'])
>>>
>>> # Compare networks and make plot
>>> bn.compare_networks(model, model_sl, pos=G['pos'])
"""
assert isinstance(pd.DataFrame(), type(df)), 'df must be of type pd.DataFrame()'
assert (scoretype=='bic') | (scoretype=='k2') | (scoretype=='bdeu'), 'scoretype must be string: "bic", "k2" or "bdeu"'
assert (methodtype=='hc') | (methodtype=='ex')| (methodtype=='cs') | (methodtype=='exhaustivesearch')| (methodtype=='hillclimbsearch')| (methodtype=='constraintsearch'), 'Methodtype string is invalid' # noqa
if isinstance(white_list, str): white_list = [white_list]
if isinstance(black_list, str): black_list = [black_list]
if (white_list is not None) and len(white_list)==0: white_list = None
if (black_list is not None) and len(black_list)==0: black_list = None
if (bw_list_method is None) : bw_list_method='enforce'
config = {}
config['verbose'] = verbose
config['method'] = methodtype
config['scoring'] = scoretype
config['black_list'] = black_list
config['white_list'] = white_list
config['bw_list_method'] = bw_list_method
config['max_indegree'] = max_indegree
config['epsilon'] = epsilon
config['max_iter'] = max_iter
# Show warnings
# PGMPY_VER = version.parse(pgmpy.__version__)>version.parse("0.1.9") # Can be be removed if pgmpy >v0.1.9
# if (not PGMPY_VER) and ((black_list is not None) or (white_list is not None)):
# if config['verbose']>=2: print('[bnlearn] >Warning: black_list and white_list only works for pgmpy > v0.1.9') # Can be be removed if pgmpy >v0.1.9
if df.shape[1]>10 and df.shape[1]<15:
if config['verbose']>=2: print('[bnlearn] >Warning: Computing DAG with %d nodes can take a very long time!' %(df.shape[1]))
if (black_list is not None) and methodtype!='hc':
if config['verbose']>=2: print('[bnlearn] >Warning: blacklist only works in case of methodtype="hc"')
if (white_list is not None) and methodtype!='hc':
if config['verbose']>=2: print('[bnlearn] >Warning: white_list only works in case of methodtype="hc"')
if (max_indegree is not None) and methodtype!='hc':
if config['verbose']>=2: print('[bnlearn] >Warning: max_indegree only works in case of methodtype="hc"')
if config['verbose']>=3: print('[bnlearn] >Computing best DAG using [%s]' %(config['method']))
# Make sure columns are of type string
df.columns = df.columns.astype(str)
# Filter on white_list and black_list
df = _white_black_list(df, white_list, black_list, bw_list_method=config['bw_list_method'], verbose=verbose)
# ExhaustiveSearch can be used to compute the score for every DAG and returns the best-scoring one:
if config['method']=='ex' or config['method']=='exhaustivesearch':
"""The first property makes exhaustive search intractable for all but very small networks,
the second prohibits efficient local optimization algorithms to always find the optimal structure.
Thus, identifiying the ideal structure is often not tractable.
Despite these bad news, heuristic search strategies often yields good results
If only few nodes are involved (read: less than 5)."""
if (df.shape[1]>15) and (config['verbose']>=3):
print('[bnlearn] >Warning: Structure learning with more then 15 nodes is computationally not feasable with exhaustivesearch. Use hillclimbsearch or constraintsearch instead!!') # noqa
out = _exhaustivesearch(df, scoretype=config['scoring'], verbose=config['verbose'])
# HillClimbSearch
if config['method']=='hc' or config['method']=='hillclimbsearch':
out = _hillclimbsearch(df,
scoretype=config['scoring'],
black_list=config['black_list'],
white_list=config['white_list'],
max_indegree=config['max_indegree'],
bw_list_method=config['bw_list_method'],
epsilon=config['epsilon'],
max_iter=config['max_iter'],
verbose=config['verbose'],
)
# Constraint-based Structure Learning
if config['method']=='cs' or config['method']=='constraintsearch':
"""Constraint-based Structure Learning
A different, but quite straightforward approach to build a DAG from data is this:
Identify independencies in the data set using hypothesis tests
Construct DAG (pattern) according to identified independencies (Conditional) Independence Tests
Independencies in the data can be identified using chi2 conditional independence tests."""
out = _constraintsearch(df, verbose=config['verbose'])
# Setup simmilarity matrix
adjmat = _dag2adjmat(out['model'])
# adjmat = pd.DataFrame(data=False, index=out['model'].nodes(), columns=out['model'].nodes()).astype('Bool')
# # Fill adjmat with edges
# edges = out['model'].edges()
# for edge in edges:
# adjmat.loc[edge[0],edge[1]]=True
# adjmat.index.name = 'source'
# adjmat.columns.name = 'target'
# Store
out['adjmat'] = adjmat
out['config'] = config
# return
return(out) | 5,355,621 |
def handle_index():
"""
Kezeli az index oldalat, elokesziti es visszakulti a html-t a kliensnek.
:return:
"""
return render_template("index.html") | 5,355,622 |
def save_pkl(obj, file_name):
"""Save an object to the given file name as pickle."""
with open(file_name, "wb") as f:
pickle.dump(obj, f) | 5,355,623 |
def get_polynomial_coefficients(degree=5):
"""
Return a list with coefficient names,
[1 x y x^2 xy y^2 x^3 ...]
"""
names = ["1"]
for exp in range(1, degree + 1): # 0, ..., degree
for x_exp in range(exp, -1, -1):
y_exp = exp - x_exp
if x_exp == 0:
x_str = ""
elif x_exp == 1:
x_str = r"$x$"
else:
x_str = rf"$x^{x_exp}$"
if y_exp == 0:
y_str = ""
elif y_exp == 1:
y_str = r"$y$"
else:
y_str = rf"$y^{y_exp}$"
names.append(x_str + y_str)
return names | 5,355,624 |
def cluster_stats(all_cluster_labels, draw = False):
"""
In this function, we want to derive the mean and variance of cluster as the number of cluster differs
:param all_cluster_labels:
:return:
"""
cluster_count = np.zeros(max(all_cluster_labels) + 1)
dictionary = dict()
dictionary['size'] = cluster_count
for cluster_label in all_cluster_labels:
cluster_count[cluster_label] += 1
print(f"Median: {np.median(cluster_count)}, Variance: {np.var(cluster_count)}")
print(f"Normalized Median: {np.median(cluster_count) / len(cluster_count)}, "
f"Normalized Var: {np.var(cluster_count) / len(cluster_count)}")
if draw:
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_axes([0, 0, 1, 1])
x_axis = [i for i in range(len(cluster_count))]
## let's sort the y_axis by cluster_count
cluster_count = np.sort(cluster_count)
y_axis = cluster_count
ax.bar(x_axis, y_axis)
plt.show() | 5,355,625 |
def _get_configured_credentials() -> Dict[str, bytes]:
"""
Get the encryupted credentials stored in disk
"""
path = get_credentials_path()
credentials: Dict[str, bytes]
with open(path, "rb") as file_handle:
credentials = pickle.load(file_handle)
if len(credentials) == 0:
raise ConfigurationError(
"You have not setup your credentials yet. "
"Please do so by using 'omigami credentials-helper' CLI functionality and try again."
)
if not all(key in ["k", "u", "p"] for key in credentials.keys()):
raise ConfigurationError(
"Something seems wrong with your credentials. "
"Please, run 'omigami credentials-helper --unset' to remove them and then set them again."
)
return credentials | 5,355,626 |
def open(request: HttpRequest, *args, **kwargs) -> HttpResponse:
"""
Create a temporary project from a single source.
This view allows for all users, including anonymous users, to create a
temporary project which they can later save as a permanent project
if they wish. It aims to be a quick way to start a project and preview
publishing of a file.
TODO: See https://github.com/stencila/hub/pull/552 for more todos
"""
if request.method == "GET":
# TODO: If a GET request attempt to get source from end of URL or a query parameter
return render(request, "projects/open.html")
if request.method == "POST":
viewset = ProjectsViewSet.init("create", request, args, kwargs)
serializer = viewset.get_serializer(data=dict(temporary=True, public=True))
serializer.is_valid(raise_exception=True)
project = serializer.create(serializer.validated_data)
url = request.POST.get("url")
if url:
Source.from_address(url, project=project, path="main")
file = request.FILES.get("file")
if file:
UploadSource.objects.create(project=project, path=file.name, file=file)
# TODO: Make the source the project's main file. How to do before pulling it?
# TODO: Create a newer simpler job preview page, that is visible to
# anon users and redirect to that instead of to the project overview page
# job = source.pull()
return redir("ui-projects-retrieve", "temp", project.name)
raise Http404 | 5,355,627 |
def __Logout(si):
"""
Disconnect (logout) service instance
@param si: Service instance (returned from Connect)
"""
try:
if si:
content = si.RetrieveContent()
content.sessionManager.Logout()
except Exception as e:
pass | 5,355,628 |
def kabsch_rotate(P, Q):
"""
Rotate matrix P unto matrix Q using Kabsch algorithm
"""
U = kabsch(P, Q)
# Rotate P
P = np.dot(P, U)
return P | 5,355,629 |
def matrix(
odoo=ODOO_VERSIONS, pg=PG_VERSIONS, odoo_skip=frozenset(), pg_skip=frozenset()
):
"""All possible combinations.
We compute the variable matrix here instead of in ``.travis.yml`` because
this generates faster builds, given the scripts found in ``hooks``
directory are already multi-version-build aware.
"""
return map(
dict,
product(
product(("ODOO_MINOR",), ODOO_VERSIONS & odoo - odoo_skip),
product(("DB_VERSION",), PG_VERSIONS & pg - pg_skip),
),
) | 5,355,630 |
def iterate_pagerank(corpus, damping_factor):
"""
Return PageRank values for each page by iteratively updating
PageRank values until convergence.
Return a dictionary where keys are page names, and values are
their estimated PageRank value (a value between 0 and 1). All
PageRank values should sum to 1.
"""
# Initialize a dict with {"page": 1/n} for all pages in corpus
new_dist = dict([(page, 1 / len(corpus)) for page in corpus])
finished = False
while not finished:
# Make copy before changing
prev_dist = copy.deepcopy(new_dist)
for page in corpus:
# Run the iterative algorithm on each page
new_dist[page] = iter_algorithm(damping_factor, len(corpus), page, corpus, new_dist)
# If any page has a difference over .001 from the previous run, the while loop will continue
for pg in new_dist:
finished = True
if abs(prev_dist[pg] - new_dist[pg]) > 0.001:
finished = False
break
return new_dist | 5,355,631 |
def user_enabled(inst, opt):
"""
Check whether the option is enabled.
:param inst: instance from content object init
:param url: Option to be checked
:return: True if enabled, False if disabled or non present
"""
return opt in inst.settings and inst.settings[opt] | 5,355,632 |
async def get_buttons_data(client: Client, message: Message):
"""
Get callback_data and urls of all the inline buttons of the message you replied to.
"""
reply_message = message.reply_to_message
if reply_message and reply_message.reply_markup:
if reply_message.reply_markup.inline_keyboard:
row_lines = []
for i, row in enumerate(reply_message.reply_markup.inline_keyboard):
row_buttons = []
for button in row:
if button.callback_data:
data = button.callback_data
elif button.url:
data = button.url
else:
continue
row_buttons.append(f"<i>{quote_html(button.text)}:</i> <code>{quote_html(data)}</code>")
buttons = "\n".join(row_buttons)
row_lines.append(f"<b>Row {i + 1}:</b>\n{buttons}")
if row_lines:
clean_time = 20
await message.edit_text("\n\n".join(row_lines))
else:
clean_time = 4
await message.edit_text("There is no any callback_data or url button inside this keyboard.")
return await clean_up(client, message.chat.id, message.message_id, clear_after=clean_time)
await message.edit_text("Reply to a message containing an inline keyboard to extract callback_data and urls.")
await clean_up(client, message.chat.id, message.message_id, clear_after=4) | 5,355,633 |
def send_notification(*, subsystem, recipients, subject, body_html, body_text):
"""Method to send a notification. A plugin may use only part of the information, but all fields are required.
Args:
subsystem (`str`): Name of the subsystem originating the notification
recipients (`list` of :obj:`NotificationContact`): List of recipients
subject (`str`): Subject / title of the notification
body_html (`str)`: HTML formatted version of the message
body_text (`str`): Text formatted version of the message
Returns:
`None`
"""
from cloud_inquisitor import CINQ_PLUGINS
if not body_html and not body_text:
raise ValueError('body_html or body_text must be provided')
# Make sure that we don't have any duplicate recipients
recipients = list(set(recipients))
notifiers = map(lambda plugin: plugin.load(), CINQ_PLUGINS['cloud_inquisitor.plugins.notifiers']['plugins'])
for cls in filter(lambda x: x.enabled(), notifiers):
for recipient in recipients:
if isinstance(recipient, NotificationContact):
if recipient.type == cls.notifier_type:
try:
notifier = cls()
notifier.notify(subsystem, recipient.value, subject, body_html, body_text)
except Exception:
log.exception('Failed sending notification for {}/{}'.format(
recipient.type,
recipient.value
))
else:
log.warning('Unexpected recipient {}'.format(recipient)) | 5,355,634 |
def convert_img_k_to_jy(imagename, outfile):
"""
Calculated from flux density / brightness temp conversion page:
https://science.nrao.edu/facilities/vla/proposing/TBconv
NOTE the implicit conversion at the top for (beam/omega) into [ster]
Image must have units:
restfreq -> Hz
bmaj -> arcsec
bmin -> arcsec
cdelt1 -> rad
Parameters
----------
imagename : str
outfile : str
perbeam : bool, default True
return in units of "Jy/beam", otherwise "Jy/beam" if False
"""
# rest frequency for K to Jy conversion
freq_d = imhead(imagename, mode='get', hdkey='restfreq')
assert freq_d['unit'] == 'Hz'
nu_ghz = freq_d['value'] / 1e9 # to GHz
# beam major FWHM
bmaj_d = imhead(imagename, mode='get', hdkey='bmaj')
assert bmaj_d['unit'] == 'arcsec'
thetamaj_as = bmaj_d['value']
# beam minor FWHM
bmin_d = imhead(imagename, mode='get', hdkey='bmin')
assert bmin_d['unit'] == 'arcsec'
thetamin_as = bmin_d['value']
# pixel size, square pixels, from coordinate delta
cdelt1 = imhead(imagename, mode='get', hdkey='cdelt1')
assert cdelt1['unit'] == 'rad'
pixsize_as = abs(cdelt1['value']) * 206264.8 # to arcsec
# beam to pixel_size, prefactor = pi / (4 log(2))
beamsize_as2 = 1.3309004 * thetamaj_as * thetamin_as
pixperbeam = beamsize_as2 / pixsize_as**2
# compute image in units of Jy/beam
jybmimage = outfile + '.jybm'
if_exists_remove(jybmimage)
immath(
imagename=imagename,
expr='8.18249739e-7*{0:.6f}*{0:.6f}*IM0*{1:.6f}*{2:.6f}'.format(
nu_ghz, thetamaj_as, thetamin_as),
outfile=jybmimage,
)
imhead(jybmimage, mode='put', hdkey='bunit', hdvalue='Jy/beam')
# compute image in units of Jy/pix
jypiximage = outfile + '.jypix'
if_exists_remove(jypiximage)
immath(
imagename=jybmimage,
expr='IM0/{0:.6f}'.format(pixperbeam),
outfile=jypiximage,
)
imhead(jypiximage, mode='put', hdkey='bunit', hdvalue='Jy/pixel') | 5,355,635 |
def update_document(
*, db_session: Session = Depends(get_db), document_id: PrimaryKey, document_in: DocumentUpdate
):
"""Update a document."""
document = get(db_session=db_session, document_id=document_id)
if not document:
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND,
detail=[{"msg": "The document with this id does not exist."}],
)
document = update(db_session=db_session, document=document, document_in=document_in)
return document | 5,355,636 |
def from_jabsorb(request, seems_raw=False):
"""
Transforms a jabsorb request into a more Python data model (converts maps
and lists)
:param request: Data coming from Jabsorb
:param seems_raw: Set it to True if the given data seems to already have
been parsed (no Java class hint). If True, the lists will
be kept as lists instead of being converted to tuples.
:return: A Python representation of the given data
"""
if isinstance(request, (tuple, set, frozenset)):
# Special case : JSON arrays (Python lists)
return type(request)(from_jabsorb(element) for element in request)
elif isinstance(request, list):
# Check if we were a list or a tuple
if seems_raw:
return list(from_jabsorb(element) for element in request)
else:
return tuple(from_jabsorb(element) for element in request)
elif isinstance(request, dict):
# Dictionary
java_class = request.get(JAVA_CLASS)
json_class = request.get(JSON_CLASS)
seems_raw = not java_class and not json_class
if java_class:
# Java Map ?
if JAVA_MAPS_PATTERN.match(java_class) is not None:
return HashableDict((from_jabsorb(key), from_jabsorb(value))
for key, value in request["map"].items())
# Java List ?
elif JAVA_LISTS_PATTERN.match(java_class) is not None:
return HashableList(from_jabsorb(element)
for element in request["list"])
# Java Set ?
elif JAVA_SETS_PATTERN.match(java_class) is not None:
return HashableSet(from_jabsorb(element)
for element in request["set"])
# Any other case
result = AttributeMap((from_jabsorb(key),
from_jabsorb(value, seems_raw))
for key, value in request.items())
# Keep JSON class information as is
if json_class:
result[JSON_CLASS] = json_class
return result
elif not _is_builtin(request):
# Bean
for attr in dir(request):
# Only convert public fields
if not attr[0] == '_':
# Field conversion
setattr(request, attr, from_jabsorb(getattr(request, attr)))
return request
else:
# Any other case
return request | 5,355,637 |
def write_board_to_svg_file(board, file_name, hex_edge=50, hex_offset=0,
board_padding=None, pointy_top=True, trim_board=True, style=None):
"""
Writes given board to a svg file of given name.
:param board: 2 dimensional list of fields, each represented as a number
:param file_name name of the output file
:param hex_edge: length of hexagon's side (in pixels)
:param hex_offset: distance between side of one hexagon and its neighbour (in pixels)
:param board_padding padding of the board (in pixels)
:param pointy_top: specifies if hexagons should be pointy topped or flat topped
:param trim_board: if True, fields with a value 0 will be removed during transformation
:param style css style (as string)
"""
if board_padding is None:
board_padding = hex_edge
styles = ['.board { fill: white } .hex-field { fill: white; stroke: black } .hex-field-0 { fill: black }']
if style is not None:
styles.append(style)
hexagons = transform_board_into_hexagons(board, hex_edge, hex_offset, pointy_top, trim_board)
min_x, min_y, max_x, max_y = calculate_bounding_box(hexagons)
offset = (board_padding - min_x, board_padding - min_y)
hexagons = move_hexagons_by_offset(hexagons, offset)
board_size = (2 * board_padding + max_x - min_x, 2 * board_padding + max_y - min_y)
svg_image = create_svg_image(styles, board_size, hexagons)
svg_image.saveas(file_name)
return svg_image | 5,355,638 |
def ensure_conf(app):
"""
Ensure for the given app the the redbeat_conf
attribute is set to an instance of the RedBeatConfig
class.
"""
name = 'redbeat_conf'
app = app_or_default(app)
try:
config = getattr(app, name)
except AttributeError:
config = RedBeatConfig(app)
setattr(app, name, config)
return config | 5,355,639 |
def check_if_process_present(string_to_find):
"""Checks if process runs on machine
Parameters:
string_to_find (string): process we want to find
Returns:
found (bool): True if found process running
"""
output = check_output(["ps", "-ax"], universal_newlines=True)
if string_to_find in output:
return True
else:
return False | 5,355,640 |
def test_boussole_compile_auto(tests_settings, temp_builds_dir, manifest_name):
"""
Testing everything:
* Sass helpers correctly generate CSS;
* Manifest is correctly serialized to expected datas;
* Builded CSS is the same than stored one in data fixtures;
"""
manifest_css = manifest_name + ".css"
manifest_json = os.path.join(
tests_settings.fixtures_path,
'json',
manifest_name + ".json",
)
# Open JSON fixture for expected serialized data from parsed manifest
with open(manifest_json, "r") as fp:
expected = json.load(fp)
basepath = temp_builds_dir.join(
'sass_helper_boussole_compile_{}'.format(manifest_css)
)
basedir = basepath.strpath
template_sassdir = os.path.join(tests_settings.fixtures_path, 'sass')
test_sassdir = os.path.join(basedir, 'sass')
test_config_filepath = os.path.join(test_sassdir, 'settings.json')
# Copy Sass sources to compile from template
shutil.copytree(template_sassdir, test_sassdir)
# Get expected CSS content from file in fixture
expected_css_filepath = os.path.join(
tests_settings.fixtures_path,
"sass",
"css",
manifest_css
)
with io.open(expected_css_filepath, 'r') as fp:
expected_css_content = fp.read()
# Load boussole settings and search for compilable files
project = ProjectBase(backend_name="json")
settings = project.backend_engine.load(filepath=test_config_filepath)
compilable_files = ScssFinder().mirror_sources(
settings.SOURCES_PATH,
targetdir=settings.TARGET_PATH,
excludes=settings.EXCLUDES
)
# Since Boussole list every compilable Sass source, we select only the entry
# corresponding to the manifest we seek for (from "manifest_css")
source_css_filename = None
source_sass_filename = None
for k, v in compilable_files:
if v.endswith(manifest_css):
source_sass_filename = k
source_css_filename = v
break
# Compile only the source we target from "manifest_css"
compiler = SassCompileHelper()
success, message = compiler.safe_compile(
settings,
source_sass_filename,
source_css_filename
)
# Output error to ease debug
if not success:
print(u"Compile error with: {}".format(source_sass_filename))
print(message)
else:
# Builded CSS is identical to the expected one from fixture
with io.open(source_css_filename, 'r') as fp:
compiled_content = fp.read()
assert expected_css_content == compiled_content
# Described manifest is the same as expected payload from fixture
manifest = Manifest()
manifest.load(compiled_content)
dump = json.loads(manifest.to_json())
assert expected == dump | 5,355,641 |
def consulta_dicionario(nivel):
"""
Entrada: Parâmetro do nível selecionado (fácil, médio, difícil)
Tarefa: Determinar qual dicionário a ser consultado
Saída: Parâmetros do dicionário (texto, lacunas, gabarito)
"""
nivel_dicionario = nivel
if nivel_dicionario == 'facil':
texto = dicionario_nivel_facil['texto']
lacunas = dicionario_nivel_facil['lacunas']
gabarito = dicionario_nivel_facil['gabarito']
elif nivel_dicionario == 'medio':
texto = dicionario_nivel_medio['texto']
lacunas = dicionario_nivel_medio['lacunas']
gabarito = dicionario_nivel_medio['gabarito']
elif nivel_dicionario == 'dificil':
texto = dicionario_nivel_dificil['texto']
lacunas = dicionario_nivel_dificil['lacunas']
gabarito = dicionario_nivel_dificil['gabarito']
return texto, lacunas, gabarito | 5,355,642 |
def stats_check(
main_table: Table,
compare_table: Table,
checks: List[OutlierCheck] = [],
max_rows_returned: int = 100,
):
"""
:param main_table: main table
:type main_table: table object
:param compare_table: table to be compared
:type compare_table: table object
:param checks: check class object, which represent boolean expression
:type checks: Check
:param max_rows_returned: number of row returned if the check fails.
:type max_rows_returned: int
"""
return AgnosticStatsCheck(
main_table=main_table,
compare_table=compare_table,
checks=checks,
max_rows_returned=max_rows_returned,
) | 5,355,643 |
async def get_collectible_name(collectible_id: int, db: AsyncSession = Depends(get_db_session)):
"""Gets the collectible name"""
result = await destiny_items.get_collectible(db=db, collectible_id=collectible_id)
return NameModel(name=result.name) if result else NameModel(name=None) | 5,355,644 |
def test_pyflann_searches():
"""
"""
try:
num_neighbors = 3
pts = testdata_points(nPts=5743, nDims=2)
qpts = testdata_points(nPts=7, nDims=2)
import vtool_ibeis as vt
# sample a radius
radius = vt.L2(pts[0:1], qpts[0:1])[0] * 2 + 1
flann = FLANN_CLS()
print('NN_OnTheFly')
# build nn_index on the fly
indices1, dists1 = flann.nn(pts, qpts, num_neighbors, algorithm='hierarchical')
print(ub.hzcat('indices1, dists1 = ', indices1, dists1))
_build_params = flann.build_index(pts, algorithm='kmeans')
del _build_params
print('NN_Index')
indices2, dists2 = flann.nn_index(qpts, num_neighbors=num_neighbors)
print(ub.hzcat('indices2, dists2 = ', indices2, dists2))
# this can only be called on one query point at a time
# because the output size is unknown
print('NN_Radius, radius=%r' % (radius,))
indices3, dists3 = flann.nn_radius(pts[0], radius)
print('indices3 = %r ' % (indices3,))
print('dists3 = %r ' % (dists3,))
assert np.all(dists3 < radius)
except Exception as ex:
import utool as ut
ut.printex(ex, key_list=[
'query',
'query.shape',
'pts.shape',
], pad_stdout=True)
#utool.embed()
raise | 5,355,645 |
def prime_factors(n):
"""
Return a list of prime factors of n
:param n: int
:return: list
"""
# check if 2 is the largest prime
all_factors = set()
t = n
while t % 2 == 0:
t /= 2
all_factors.add(2)
# check the divisors greater than 2
d = 3
while d < n ** 0.5:
while not t % d:
t /= d
all_factors.add(d)
d += 2
return all_factors | 5,355,646 |
def calculate_central_age(Ns, Ni, zeta, seZeta, rhod, Nd, sigma=0.15):
"""Function to calculate central age."""
Ns = np.array(Ns)
Ni = np.array(Ni)
# We just replace 0 counts with a low value, the age will be rounded to
# 2 decimals. That should take care of the zero count issue.
Ns = np.where(Ns == 0, 1e-10, Ns) # Do this to account for 0 track counts
Ni = np.where(Ni == 0, 1e-10, Ni) # Do this to account for 0 track counts
# Calculate mj
LAMBDA = 1.55125e-4
G = 0.5
m = Ns + Ni
p = Ns / m
theta = np.sum(Ns) / np.sum(m)
for i in range(0, 30):
w = m / (theta * (1 - theta) + (m - 1) * theta**2 * (1 - theta)**2 * sigma**2)
sigma = sigma * np.sqrt(np.sum(w**2 * (p - theta)**2) / np.sum(w))
theta = np.sum(w * p) / np.sum(w)
t = (1.0 / LAMBDA) * np.log( 1.0 + G * LAMBDA * zeta * rhod * (theta) / (1.0 - theta))
se = np.sqrt(1 / (theta**2 * (1.0 - theta)**2 * np.sum(w)) + 1.0 / Nd + (seZeta / zeta)**2) * t
return {"Central": np.round(t, 2), "se": np.round(se, 2), "sigma": np.round(sigma, 2)} | 5,355,647 |
def _get_cross_reference_token(auth_cookie: str) -> str:
"""Gets a new cross reference token affiliated with the Roblox auth cookie.
:param auth_cookie: Your Roblox authentication cookie.
:return: A fresh cross reference token.
"""
session: requests.Session = _get_session(auth_cookie)
response: requests.Response = session.post("https://auth.roblox.com/v2/logout")
try:
token = response.headers["x-csrf-token"]
except KeyError:
raise Exception("Please specify a valid auth cookie")
return token | 5,355,648 |
def erase_not_displayed(client):
"""Erase all non-displayed models from memory.
Args:
client (obj): creopyson Client.
Returns:
None
"""
return client._creoson_post("file", "erase_not_displayed") | 5,355,649 |
def reset_position_for_friends_image_details_from_voter(voter, twitter_profile_image_url_https,
facebook_profile_image_url_https):
"""
Reset all position image urls in PositionForFriends from we vote image details
:param voter:
:param twitter_profile_image_url_https:
:param facebook_profile_image_url_https:
:return:
"""
position_list_manager = PositionListManager()
position_manager = PositionManager()
stance_we_are_looking_for = ANY_STANCE
friends_vs_public = FRIENDS_ONLY
speaker_image_url_https = None
reset_all_position_image_urls_results = []
if positive_value_exists(twitter_profile_image_url_https):
speaker_image_url_https = twitter_profile_image_url_https
elif positive_value_exists(facebook_profile_image_url_https):
speaker_image_url_https = facebook_profile_image_url_https
positions_for_voter_results = position_list_manager.retrieve_all_positions_for_voter(
voter.id, voter.we_vote_id, stance_we_are_looking_for, friends_vs_public)
if positions_for_voter_results['position_list_found']:
friends_position_list = positions_for_voter_results['position_list']
for position_object in friends_position_list:
reset_position_image_urls_results = position_manager.reset_position_image_details(
position_object, speaker_image_url_https=speaker_image_url_https)
reset_all_position_image_urls_results.append(reset_position_image_urls_results)
results = {
'success': True,
'reset_all_position_results': reset_all_position_image_urls_results
}
return results | 5,355,650 |
def get_analysis(panda_data):
"""
Get Analysis of CSV Data
:param panda_data: Panda dataframes
:return: panda data frames
"""
# Create Object for Analysis0
sentiment_object = SentimentConfig.sentiment_object
ner_object = SentimentConfig.ner_object
# Get list of sentences
list = panda_data['text'].to_list()
sentiment_result = np.array([sentiment_object.get_sentiment(i) for i in list])
panda_data["Positive Score"] = sentiment_result[:, 2]
panda_data["Negative Score"] = sentiment_result[:, 0]
panda_data["Neutral Score"] = sentiment_result[:, 1]
panda_data["Sentiment Result"] = sentiment_result[:, 3]
# NER Data Analysis Added
ner_result = np.array([ner_object.get_ner(i) for i in list])
panda_data["Entity Result"] = ner_result
# Adjective Analysis Added
adjective_result = np.array([ner_object.get_adjectives(i) for i in list])
panda_data["Adjective Result"] = adjective_result
return panda_data | 5,355,651 |
def create_coordinate_string_dict():
"""31パターンのヒモ。"""
w = 120
h = 120
return {
47: (0, 0),
57: (1*-w, 0),
58: (2*-w, 0),
16: (4*-w, 0),
35: (5*-w, 0),
36: (6*-w, 0),
38: (0, 1*-h),
13: (1*-w, 1*-h),
14: (2*-w, 1*-h),
15: (3*-w, 1*-h),
25: (4*-w, 1*-h),
17: (5*-w, 1*-h),
27: (6*-w, 1*-h),
37: (7*-w, 1*-h),
1357: (0, 2*-h),
1571: (1*-w, 2*-h),
7135: (2*-w, 2*-h),
3583: (4*-w, 2*-h),
274: (5*-w, 2*-h),
1361: (6*-w, 2*-h),
1371: (0, 3*-h),
15037: (1*-w, 3*-h),
3573: (2*-w, 3*-h),
416: (4*-w, 3*-h),
258: (6*-w, 3*-h),
1753: (0, 4*-h),
1351: (1*-w, 4*-h),
3175: (2*-w, 4*-h),
2572: (4*-w, 4*-h),
638: (5*-w, 4*-h),
1471: (6*-w, 4*-h),
} | 5,355,652 |
def election_flink_cluster(cluster_group=None):
"""
根据 cluster_group 的取值,选择单个选举或全部选举
当优先级为-1时表示该集群被加入黑名单,不参与选举。选举结果也不更新黑名单的集群
只有手动将集群的优先级从-1更改为0时,集群方可再次参与选举
debug集群不注册到资源系统
@param cluster_group:
@return:
"""
cluster_label = "standard"
result_list = get_flink_session_std_cluster(resource_group_id=cluster_group, active=1)
# 根据 cluster_group, cluster_label, version 进行选举或另起新集群
flink_cluster_overview = []
func_info = []
jobnavi_cluster_info = {}
for result in result_list:
resource_group_id = result["resource_group_id"]
cluster_name = result["cluster_name"]
geog_area_code = result["geog_area_code"]
flink_cluster_overview.append(
{
"cluster_group": resource_group_id,
"cluster_label": cluster_label,
"cluster_name": cluster_name,
"geog_area_code": geog_area_code,
"version": FLINK_VERSION,
"overview": {},
}
)
if geog_area_code not in jobnavi_cluster_info:
jobnavi_cluster_id = JobNaviHelper.get_jobnavi_cluster("stream")
jobnavi_cluster_info[geog_area_code] = jobnavi_cluster_id
jobnavi_cluster_id = jobnavi_cluster_info[geog_area_code]
params = {
"geog_area_code": geog_area_code,
"jobnavi_cluster_id": jobnavi_cluster_id,
"cluster_name": cluster_name,
}
func_info.append([_get_flink_cluster_overview, params])
# 控制最大并发为 COMMON_MAX_COROUTINE_NUM
threads_res = []
for segment in [
func_info[i : i + COMMON_MAX_COROUTINE_NUM] for i in range(0, len(func_info), COMMON_MAX_COROUTINE_NUM)
]:
threads_res.extend(concurrent_call_func(segment))
# 根据 cluster_group, cluster_label, version 进行分组,每一个组中的值为一个选举集合
grouped_flink_cluster_info = {}
error_clusters = []
for idx, flink_cluster in enumerate(flink_cluster_overview):
# 若线程调用异常返回,threads_res[idx] 为 None
overview = threads_res[idx] or {}
if not overview:
# 获取当前集群信息失败
error_clusters.append(
{
"cluster_group": flink_cluster["resource_group_id"],
"cluster_label": cluster_label,
"cluster_name": flink_cluster["cluster_name"],
"version": FLINK_VERSION,
}
)
continue
flink_clusters_to_elect = grouped_flink_cluster_info.setdefault(
(
flink_cluster["cluster_group"],
flink_cluster["cluster_label"],
flink_cluster["version"],
),
[],
)
flink_clusters_to_elect.append(
{
"cluster_name": flink_cluster["cluster_name"],
"geog_area_code": flink_cluster["geog_area_code"],
"overview": overview,
}
)
# 打印获取信息异常的 yarn-session 集群
if error_clusters:
content = "<br>".join(
map(
lambda _cluster: "资源组: {}, 集群名称: {}".format(_cluster["cluster_group"], _cluster["cluster_name"]),
error_clusters,
)
)
logger.exception("get yarn-session schedule info error: %s\n" % content)
send_message(SYSTEM_ADMINISTRATORS, "《获取 yarn-session 集群信息失败》", content)
max_priority_clusters = []
apply_flink_clusters = []
for group_key, _flink_clusters_info in list(grouped_flink_cluster_info.items()):
# 在 _flink_clusters_info 中将负载最低的集群的优先级赋值为1,其它为0
# 若 _flink_clusters_info 中所有集群都是非正常(非低负载)状态,需要根据 group_key 申请新集群
max_yarn_session_num = -1
max_priority_cluster_name = None
min_cluster_load_state = -1
# 标记当前 cluster_group、cluster_label 是否需要启动集群
has_available_cluster = False
# 一个 group_key 必定对应一个 geog_area_code
geog_area_code = None
for _flink_cluster_info in _flink_clusters_info:
_load_state = get_flink_cluster_load_state(_flink_cluster_info["overview"])
_yarn_session_num = _get_yarn_session_num(group_key[0], group_key[1], _flink_cluster_info["cluster_name"])
if _yarn_session_num > max_yarn_session_num:
max_yarn_session_num = _yarn_session_num
# 该标配(高配)集群组存在可用资源,不申请新集群
has_available_cluster = _load_state["available"] | has_available_cluster
if min_cluster_load_state == -1 or _load_state["value"] <= min_cluster_load_state:
max_priority_cluster_name = _flink_cluster_info["cluster_name"]
min_cluster_load_state = _load_state["value"]
geog_area_code = _flink_cluster_info["geog_area_code"]
if not has_available_cluster:
apply_flink_clusters.append(
{
"cluster_group": group_key[0],
"cluster_label": group_key[1],
"version": group_key[2],
"cluster_name": "{}_{}{}".format(group_key[0], group_key[1], max_yarn_session_num + 1),
"geog_area_code": geog_area_code,
}
)
# 对于当前的 group_key, 需要将 cluster_name 对应的记录的优先级置为最高
max_priority_clusters.append(
{
"cluster_group": group_key[0],
"cluster_label": group_key[1],
"version": group_key[2],
"cluster_name": max_priority_cluster_name,
"geog_area_code": geog_area_code,
}
)
# 重置优先级
_update_flink_cluster_priority(max_priority_clusters)
# 申请集群
if apply_flink_clusters:
_apply_yarn_session(apply_flink_clusters) | 5,355,653 |
def all_but_ast(check):
"""Only passes AST to check."""
def _check_wrapper(contents, ast, **kwargs):
"""Wrap check and passes the AST to it."""
del contents
del kwargs
return check(ast)
return _check_wrapper | 5,355,654 |
def get_history_items():
"""
Get all history item
"""
return [
readline.get_history_item(i)
for i in xrange(1, readline.get_current_history_length() + 1)
] | 5,355,655 |
def generate_nucmer_commands(
filenames: List[Path],
outdir: Path = Path("."),
nucmer_exe: Path = pyani_config.NUCMER_DEFAULT,
filter_exe: Path = pyani_config.FILTER_DEFAULT,
maxmatch: bool = False,
) -> Tuple[List, List]:
"""Return list of NUCmer command-lines for ANIm.
:param filenames: a list of paths to input FASTA files
:param outdir: path to output directory
:param nucmer_exe: location of the nucmer binary
:param maxmatch: Boolean flag indicating to use NUCmer's -maxmatch option
The first element returned is a list of NUCmer commands, and the
second a corresponding list of delta_filter_wrapper.py commands.
The NUCmer commands should each be run before the corresponding
delta-filter command.
TODO: This return value needs to be reworked as a collection.
Loop over all FASTA files generating NUCmer command lines for each
pairwise comparison.
"""
nucmer_cmdlines, delta_filter_cmdlines = [], []
filenames = sorted(filenames) # enforce ordering of filenames
for idx, fname1 in enumerate(filenames[:-1]):
for fname2 in filenames[idx + 1 :]:
ncmd, dcmd = construct_nucmer_cmdline(
fname1, fname2, outdir, nucmer_exe, filter_exe, maxmatch
)
nucmer_cmdlines.append(ncmd)
delta_filter_cmdlines.append(dcmd)
return (nucmer_cmdlines, delta_filter_cmdlines) | 5,355,656 |
def normalize(*args):
"""Scale a sequence of occurrences into probabilities that sum up to 1."""
total = sum(args)
return [arg / total for arg in args] | 5,355,657 |
def get_CZI_zstack(filename,frame,channel,filepath=None,img_info=None):
"""
Obtains a single z-stack from a 3D imaging time-series for a specified time and channel.
Parameters
----------
filename : str
Name of the file from which to retrieve the z-stack.
frame : int
The temporal slice of the image series from which to retrieve the z-stack.
channel : int
The channel from which to retrieve the z-stack.
filepath : str, optional
Path to the file.
img_info : tuple of ints, optional
5-tuple containing lengths of the `X`, `Y`, `Z` (spatial), `T` (temporal) dimensions
of the image series, and the number of channels, `num_channels`.
E.g. (sizeX,sizeY,sizeZ,sizeT,num_channels). See output of get_CZI_metadata().
Pass these pre-computed values for increased speed in batch processing.
Returns
-------
zstack : numpy.ndarray, or None
Z-stack of the image series specified by the desired `frame`; contains 3 spatial
dimensions. If loading is unsuccessful, `None` is returned.
"""
# prepare file name, check that file exists
if not (filepath is None):
czi_image = os.path.join(filepath,filename)
else:
czi_image = filename
if not os.path.exists(czi_image):
return None
# retrieve image dimensions, and number of channels
if img_info is None:
(sizeX,sizeY,sizeZ,sizeT,num_channels), _ = get_CZI_metadata(filename,filepath=filepath)
else:
assert len(img_info) == 5
(sizeX,sizeY,sizeZ,sizeT,num_channels) = img_info
# make sure frame and channel are in bounds
assert frame < sizeT
assert channel < num_channels
#initialize array and load z-stack
zstack = np.zeros((sizeZ, sizeY,sizeX))
with bioformats.ImageReader(czi_image) as reader:
for z in range(sizeZ):
zstack[z,:,:] = reader.read(t=frame,z=z,c=channel)
return zstack | 5,355,658 |
def test_game():
"""
Run through a few games and make sure there's no
exceptions.
"""
for i in range(10):
for n in range(2, 5):
g = Game(n)
while not g.winner():
g.act(random.choice(g.options())) | 5,355,659 |
def _make_request(
resource: str,
from_currency_code: str,
to_currency_code: str,
timestamp: int,
access_token: str,
exchange_code: str,
num_records: int,
api_version: str
) -> requests.Response:
"""
API documentation for cryptocompare can be found at https://min-api.cryptocompare.com/documentation
"""
base_url = f"https://min-api.cryptocompare.com/data/{api_version}/{resource}"
params = {
"fsym": from_currency_code,
"tsym": to_currency_code,
"e": exchange_code,
"limit": num_records,
"toTs": timestamp,
"api_key": access_token
}
return requests.get(base_url, params=params) | 5,355,660 |
def dependencies_order_of_build(target_contract, dependencies_map):
""" Return an ordered list of contracts that is sufficient to sucessfully
deploys the target contract.
Note:
This function assumes that the `dependencies_map` is an acyclic graph.
"""
if len(dependencies_map) == 0:
return [target_contract]
if target_contract not in dependencies_map:
raise ValueError('no dependencies defined for {}'.format(target_contract))
order = [target_contract]
todo = list(dependencies_map[target_contract])
while len(todo):
target_contract = todo.pop(0)
target_pos = len(order)
for dependency in dependencies_map[target_contract]:
# we need to add the current contract before all it's depedencies
if dependency in order:
target_pos = order.index(dependency)
else:
todo.append(dependency)
order.insert(target_pos, target_contract)
order.reverse()
return order | 5,355,661 |
def projective_error_function(params, args):
"""
:param params:
:param args:
:return:
"""
# fx fy cx cy k0 k1
project_params = params[0:5]
f, cx, cy, k0, k1 = project_params
K = eye(3, 3)
K[0,0] = f
K[1,1] = f
K[0, 2] = k0
K[1, 2] = k1
model, image = args
tp = params[5:]
_, R, t = transform(tp, model)
Rt = np.c_[R, t.transpose()]
# Reconstruct camera matrix
P = K @ Rt
# Project
X = np.zeros((4, len(model[0])))
X[0:3] = model
X[3] = 1
PX = P @ X
image_star = PX[0:2] / PX[2]
dataShape = image.shape
nData = dataShape[0] * dataShape[1]
imagevec = image.reshape(1, nData)[0]
image_star_vec = image_star.reshape(1, nData)[0]
return imagevec - image_star_vec | 5,355,662 |
def _transform_masks(y, transform, data_format=None, **kwargs):
"""Based on the transform key, apply a transform function to the masks.
Refer to :mod:`deepcell.utils.transform_utils` for more information about
available transforms. Caution for unknown transform keys.
Args:
y (numpy.array): Labels of ``ndim`` 4 or 5
transform (str): Name of the transform, one of
``{"deepcell", "disc", "watershed", None}``.
data_format (str): A string, one of ``channels_last`` (default)
or ``channels_first``. The ordering of the dimensions in the
inputs. ``channels_last`` corresponds to inputs with shape
``(batch, height, width, channels)`` while ``channels_first``
corresponds to inputs with shape
``(batch, channels, height, width)``.
kwargs (dict): Optional transform keyword arguments.
Returns:
numpy.array: the output of the given transform function on ``y``.
Raises:
ValueError: Rank of ``y`` is not 4 or 5.
ValueError: Channel dimension of ``y`` is not 1.
ValueError: ``transform`` is invalid value.
"""
valid_transforms = {
'deepcell', # deprecated for "pixelwise"
'pixelwise',
'disc',
'watershed', # deprecated for "outer-distance"
'watershed-cont', # deprecated for "outer-distance"
'inner-distance',
'outer-distance',
'centroid', # deprecated for "inner-distance"
'fgbg'
}
if data_format is None:
data_format = K.image_data_format()
if y.ndim not in {4, 5}:
raise ValueError('`labels` data must be of ndim 4 or 5. Got', y.ndim)
channel_axis = 1 if data_format == 'channels_first' else -1
if y.shape[channel_axis] != 1:
raise ValueError('Expected channel axis to be 1 dimension. Got',
y.shape[1 if data_format == 'channels_first' else -1])
if isinstance(transform, str):
transform = transform.lower()
if transform not in valid_transforms and transform is not None:
raise ValueError('`{}` is not a valid transform'.format(transform))
if transform in {'pixelwise', 'deepcell'}:
if transform == 'deepcell':
warnings.warn('The `{}` transform is deprecated. Please use the '
'`pixelwise` transform instead.'.format(transform),
DeprecationWarning)
dilation_radius = kwargs.pop('dilation_radius', None)
separate_edge_classes = kwargs.pop('separate_edge_classes', False)
edge_class_shape = 4 if separate_edge_classes else 3
if data_format == 'channels_first':
shape = tuple([y.shape[0]] + [edge_class_shape] + list(y.shape[2:]))
else:
shape = tuple(list(y.shape[0:-1]) + [edge_class_shape])
# using uint8 since should only be 4 unique values.
y_transform = np.zeros(shape, dtype=np.uint8)
for batch in range(y_transform.shape[0]):
if data_format == 'channels_first':
mask = y[batch, 0, ...]
else:
mask = y[batch, ..., 0]
y_transform[batch] = transform_utils.pixelwise_transform(
mask, dilation_radius, data_format=data_format,
separate_edge_classes=separate_edge_classes)
elif transform in {'outer-distance', 'watershed', 'watershed-cont'}:
if transform in {'watershed', 'watershed-cont'}:
warnings.warn('The `{}` transform is deprecated. Please use the '
'`outer-distance` transform instead.'.format(transform),
DeprecationWarning)
by_frame = kwargs.pop('by_frame', True)
bins = kwargs.pop('distance_bins', None)
distance_kwargs = {
'bins': bins,
'erosion_width': kwargs.pop('erosion_width', 0),
}
# If using 3d transform, pass in scale arg
if y.ndim == 5 and not by_frame:
distance_kwargs['sampling'] = kwargs.pop('sampling', [0.5, 0.217, 0.217])
if data_format == 'channels_first':
shape = tuple([y.shape[0]] + list(y.shape[2:]))
else:
shape = y.shape[0:-1]
y_transform = np.zeros(shape, dtype=K.floatx())
if y.ndim == 5:
if by_frame:
_distance_transform = transform_utils.outer_distance_transform_movie
else:
_distance_transform = transform_utils.outer_distance_transform_3d
else:
_distance_transform = transform_utils.outer_distance_transform_2d
for batch in range(y_transform.shape[0]):
if data_format == 'channels_first':
mask = y[batch, 0, ...]
else:
mask = y[batch, ..., 0]
y_transform[batch] = _distance_transform(mask, **distance_kwargs)
y_transform = np.expand_dims(y_transform, axis=-1)
if bins is not None:
# convert to one hot notation
# uint8's max value of255 seems like a generous limit for binning.
y_transform = to_categorical(y_transform, num_classes=bins, dtype=np.uint8)
if data_format == 'channels_first':
y_transform = np.rollaxis(y_transform, y.ndim - 1, 1)
elif transform in {'inner-distance', 'centroid'}:
if transform == 'centroid':
warnings.warn('The `{}` transform is deprecated. Please use the '
'`inner-distance` transform instead.'.format(transform),
DeprecationWarning)
by_frame = kwargs.pop('by_frame', True)
bins = kwargs.pop('distance_bins', None)
distance_kwargs = {
'bins': bins,
'erosion_width': kwargs.pop('erosion_width', 0),
'alpha': kwargs.pop('alpha', 0.1),
'beta': kwargs.pop('beta', 1)
}
# If using 3d transform, pass in scale arg
if y.ndim == 5 and not by_frame:
distance_kwargs['sampling'] = kwargs.pop('sampling', [0.5, 0.217, 0.217])
if data_format == 'channels_first':
shape = tuple([y.shape[0]] + list(y.shape[2:]))
else:
shape = y.shape[0:-1]
y_transform = np.zeros(shape, dtype=K.floatx())
if y.ndim == 5:
if by_frame:
_distance_transform = transform_utils.inner_distance_transform_movie
else:
_distance_transform = transform_utils.inner_distance_transform_3d
else:
_distance_transform = transform_utils.inner_distance_transform_2d
for batch in range(y_transform.shape[0]):
if data_format == 'channels_first':
mask = y[batch, 0, ...]
else:
mask = y[batch, ..., 0]
y_transform[batch] = _distance_transform(mask, **distance_kwargs)
y_transform = np.expand_dims(y_transform, axis=-1)
if distance_kwargs['bins'] is not None:
# convert to one hot notation
# uint8's max value of255 seems like a generous limit for binning.
y_transform = to_categorical(y_transform, num_classes=bins, dtype=np.uint8)
if data_format == 'channels_first':
y_transform = np.rollaxis(y_transform, y.ndim - 1, 1)
elif transform == 'disc' or transform is None:
dtype = K.floatx() if transform == 'disc' else np.int32
y_transform = to_categorical(y.squeeze(channel_axis), dtype=dtype)
if data_format == 'channels_first':
y_transform = np.rollaxis(y_transform, y.ndim - 1, 1)
elif transform == 'fgbg':
y_transform = np.where(y > 1, 1, y)
# convert to one hot notation
if data_format == 'channels_first':
y_transform = np.rollaxis(y_transform, 1, y.ndim)
# using uint8 since should only be 2 unique values.
y_transform = to_categorical(y_transform, dtype=np.uint8)
if data_format == 'channels_first':
y_transform = np.rollaxis(y_transform, y.ndim - 1, 1)
return y_transform | 5,355,663 |
def root_dir():
""" Returns root director for this project """
return os.path.dirname(os.path.realpath(__file__ + '/..')) | 5,355,664 |
def query_subgraph(seeds, genes_top, output_path): # pylint: disable=too-many-locals
"""
This function queries the data, writes the resulting subgraph and returns a
dictionary containing the number of nodes and edges.
seeds: list
genes_top: dict whose keys are genes and values are their ranks
"""
genes_list = list(genes_top.keys())
genes = set(seeds + genes_list)
seeds_set = frozenset(seeds)
# Produce the induced subgraph of genes in all networks.
nodes_raw, edges_raw = query_sqlite(genes)
# Only keep the nodes of interest.
nodes = [node for node in nodes_raw if normalized_node_id(node["_id"]) in genes]
edges = [
edge
for edge in edges_raw
if (
normalized_node_id(edge["_from"]) in genes
and normalized_node_id(edge["_to"]) in genes
)
]
def node_is_seed(node_id):
return normalized_node_id(node_id) in seeds_set
# graph data
cytoscape_nodes = [
dict(
data=cytoscape_node(
node,
genes_top.get(normalized_node_id(node["_id"])),
seed=node_is_seed(node["_id"]),
)
)
for node in nodes
]
cytoscape_edges = [dict(data=cytoscape_edge(edge)) for edge in edges]
cytoscape_data = dict(
nodes=cytoscape_nodes,
edges=cytoscape_edges,
)
cytoscape_path = os.path.join(output_path, "graph.json")
with open(cytoscape_path, "w") as cytoscape_json:
cytoscape_json.write(json.dumps(cytoscape_data))
# graph metadata
return dict(
nodes=len(nodes),
edges=len(edges),
) | 5,355,665 |
def get_cur_version():
"""
Get current apk version string
"""
pkg_name = cur_activity.getPackageName()
return str(
cur_activity.getPackageManager().getPackageInfo(
pkg_name, 0).versionName) | 5,355,666 |
def get_item(TableName=None, Key=None, AttributesToGet=None, ConsistentRead=None, ReturnConsumedCapacity=None, ProjectionExpression=None, ExpressionAttributeNames=None):
"""
The GetItem operation returns a set of attributes for the item with the given primary key. If there is no matching item, GetItem does not return any data and there will be no Item element in the response.
See also: AWS API Documentation
Exceptions
Examples
This example retrieves an item from the Music table. The table has a partition key and a sort key (Artist and SongTitle), so you must specify both of these attributes.
Expected Output:
:example: response = client.get_item(
TableName='string',
Key={
'string': {
'S': 'string',
'N': 'string',
'B': b'bytes',
'SS': [
'string',
],
'NS': [
'string',
],
'BS': [
b'bytes',
],
'M': {
'string': {'... recursive ...'}
},
'L': [
{'... recursive ...'},
],
'NULL': True|False,
'BOOL': True|False
}
},
AttributesToGet=[
'string',
],
ConsistentRead=True|False,
ReturnConsumedCapacity='INDEXES'|'TOTAL'|'NONE',
ProjectionExpression='string',
ExpressionAttributeNames={
'string': 'string'
}
)
:type TableName: string
:param TableName: [REQUIRED]\nThe name of the table containing the requested item.\n
:type Key: dict
:param Key: [REQUIRED]\nA map of attribute names to AttributeValue objects, representing the primary key of the item to retrieve.\nFor the primary key, you must provide all of the attributes. For example, with a simple primary key, you only need to provide a value for the partition key. For a composite primary key, you must provide values for both the partition key and the sort key.\n\n(string) --\n(dict) --Represents the data for an attribute.\nEach attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.\nFor more information, see Data Types in the Amazon DynamoDB Developer Guide .\n\nS (string) --An attribute of type String. For example:\n\n'S': 'Hello'\n\nN (string) --An attribute of type Number. For example:\n\n'N': '123.45'\nNumbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations.\n\nB (bytes) --An attribute of type Binary. For example:\n\n'B': 'dGhpcyB0ZXh0IGlzIGJhc2U2NC1lbmNvZGVk'\n\nSS (list) --An attribute of type String Set. For example:\n\n'SS': ['Giraffe', 'Hippo' ,'Zebra']\n\n(string) --\n\n\nNS (list) --An attribute of type Number Set. For example:\n\n'NS': ['42.2', '-19', '7.5', '3.14']\nNumbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations.\n\n(string) --\n\n\nBS (list) --An attribute of type Binary Set. For example:\n\n'BS': ['U3Vubnk=', 'UmFpbnk=', 'U25vd3k=']\n\n(bytes) --\n\n\nM (dict) --An attribute of type Map. For example:\n\n'M': {'Name': {'S': 'Joe'}, 'Age': {'N': '35'}}\n\n(string) --\n(dict) --Represents the data for an attribute.\nEach attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.\nFor more information, see Data Types in the Amazon DynamoDB Developer Guide .\n\n\n\n\n\nL (list) --An attribute of type List. For example:\n\n'L': [ {'S': 'Cookies'} , {'S': 'Coffee'}, {'N', '3.14159'}]\n\n(dict) --Represents the data for an attribute.\nEach attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.\nFor more information, see Data Types in the Amazon DynamoDB Developer Guide .\n\n\n\nNULL (boolean) --An attribute of type Null. For example:\n\n'NULL': true\n\nBOOL (boolean) --An attribute of type Boolean. For example:\n\n'BOOL': true\n\n\n\n\n\n\n
:type AttributesToGet: list
:param AttributesToGet: This is a legacy parameter. Use ProjectionExpression instead. For more information, see AttributesToGet in the Amazon DynamoDB Developer Guide .\n\n(string) --\n\n
:type ConsistentRead: boolean
:param ConsistentRead: Determines the read consistency model: If set to true , then the operation uses strongly consistent reads; otherwise, the operation uses eventually consistent reads.
:type ReturnConsumedCapacity: string
:param ReturnConsumedCapacity: Determines the level of detail about provisioned throughput consumption that is returned in the response:\n\nINDEXES - The response includes the aggregate ConsumedCapacity for the operation, together with ConsumedCapacity for each table and secondary index that was accessed. Note that some operations, such as GetItem and BatchGetItem , do not access any indexes at all. In these cases, specifying INDEXES will only return ConsumedCapacity information for table(s).\nTOTAL - The response includes only the aggregate ConsumedCapacity for the operation.\nNONE - No ConsumedCapacity details are included in the response.\n\n
:type ProjectionExpression: string
:param ProjectionExpression: A string that identifies one or more attributes to retrieve from the table. These attributes can include scalars, sets, or elements of a JSON document. The attributes in the expression must be separated by commas.\nIf no attribute names are specified, then all attributes are returned. If any of the requested attributes are not found, they do not appear in the result.\nFor more information, see Specifying Item Attributes in the Amazon DynamoDB Developer Guide .\n
:type ExpressionAttributeNames: dict
:param ExpressionAttributeNames: One or more substitution tokens for attribute names in an expression. The following are some use cases for using ExpressionAttributeNames :\n\nTo access an attribute whose name conflicts with a DynamoDB reserved word.\nTo create a placeholder for repeating occurrences of an attribute name in an expression.\nTo prevent special characters in an attribute name from being misinterpreted in an expression.\n\nUse the # character in an expression to dereference an attribute name. For example, consider the following attribute name:\n\nPercentile\n\nThe name of this attribute conflicts with a reserved word, so it cannot be used directly in an expression. (For the complete list of reserved words, see Reserved Words in the Amazon DynamoDB Developer Guide ). To work around this, you could specify the following for ExpressionAttributeNames :\n\n{'#P':'Percentile'}\n\nYou could then use this substitution in an expression, as in this example:\n\n#P = :val\n\n\nNote\nTokens that begin with the : character are expression attribute values , which are placeholders for the actual value at runtime.\n\nFor more information on expression attribute names, see Specifying Item Attributes in the Amazon DynamoDB Developer Guide .\n\n(string) --\n(string) --\n\n\n\n
:rtype: dict
ReturnsResponse Syntax
{
'Item': {
'string': {
'S': 'string',
'N': 'string',
'B': b'bytes',
'SS': [
'string',
],
'NS': [
'string',
],
'BS': [
b'bytes',
],
'M': {
'string': {'... recursive ...'}
},
'L': [
{'... recursive ...'},
],
'NULL': True|False,
'BOOL': True|False
}
},
'ConsumedCapacity': {
'TableName': 'string',
'CapacityUnits': 123.0,
'ReadCapacityUnits': 123.0,
'WriteCapacityUnits': 123.0,
'Table': {
'ReadCapacityUnits': 123.0,
'WriteCapacityUnits': 123.0,
'CapacityUnits': 123.0
},
'LocalSecondaryIndexes': {
'string': {
'ReadCapacityUnits': 123.0,
'WriteCapacityUnits': 123.0,
'CapacityUnits': 123.0
}
},
'GlobalSecondaryIndexes': {
'string': {
'ReadCapacityUnits': 123.0,
'WriteCapacityUnits': 123.0,
'CapacityUnits': 123.0
}
}
}
}
Response Structure
(dict) --
Represents the output of a GetItem operation.
Item (dict) --
A map of attribute names to AttributeValue objects, as specified by ProjectionExpression .
(string) --
(dict) --
Represents the data for an attribute.
Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.
For more information, see Data Types in the Amazon DynamoDB Developer Guide .
S (string) --
An attribute of type String. For example:
"S": "Hello"
N (string) --
An attribute of type Number. For example:
"N": "123.45"
Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations.
B (bytes) --
An attribute of type Binary. For example:
"B": "dGhpcyB0ZXh0IGlzIGJhc2U2NC1lbmNvZGVk"
SS (list) --
An attribute of type String Set. For example:
"SS": ["Giraffe", "Hippo" ,"Zebra"]
(string) --
NS (list) --
An attribute of type Number Set. For example:
"NS": ["42.2", "-19", "7.5", "3.14"]
Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations.
(string) --
BS (list) --
An attribute of type Binary Set. For example:
"BS": ["U3Vubnk=", "UmFpbnk=", "U25vd3k="]
(bytes) --
M (dict) --
An attribute of type Map. For example:
"M": {"Name": {"S": "Joe"}, "Age": {"N": "35"}}
(string) --
(dict) --
Represents the data for an attribute.
Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.
For more information, see Data Types in the Amazon DynamoDB Developer Guide .
L (list) --
An attribute of type List. For example:
"L": [ {"S": "Cookies"} , {"S": "Coffee"}, {"N", "3.14159"}]
(dict) --
Represents the data for an attribute.
Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.
For more information, see Data Types in the Amazon DynamoDB Developer Guide .
NULL (boolean) --
An attribute of type Null. For example:
"NULL": true
BOOL (boolean) --
An attribute of type Boolean. For example:
"BOOL": true
ConsumedCapacity (dict) --
The capacity units consumed by the GetItem operation. The data returned includes the total provisioned throughput consumed, along with statistics for the table and any indexes involved in the operation. ConsumedCapacity is only returned if the ReturnConsumedCapacity parameter was specified. For more information, see Read/Write Capacity Mode in the Amazon DynamoDB Developer Guide .
TableName (string) --
The name of the table that was affected by the operation.
CapacityUnits (float) --
The total number of capacity units consumed by the operation.
ReadCapacityUnits (float) --
The total number of read capacity units consumed by the operation.
WriteCapacityUnits (float) --
The total number of write capacity units consumed by the operation.
Table (dict) --
The amount of throughput consumed on the table affected by the operation.
ReadCapacityUnits (float) --
The total number of read capacity units consumed on a table or an index.
WriteCapacityUnits (float) --
The total number of write capacity units consumed on a table or an index.
CapacityUnits (float) --
The total number of capacity units consumed on a table or an index.
LocalSecondaryIndexes (dict) --
The amount of throughput consumed on each local index affected by the operation.
(string) --
(dict) --
Represents the amount of provisioned throughput capacity consumed on a table or an index.
ReadCapacityUnits (float) --
The total number of read capacity units consumed on a table or an index.
WriteCapacityUnits (float) --
The total number of write capacity units consumed on a table or an index.
CapacityUnits (float) --
The total number of capacity units consumed on a table or an index.
GlobalSecondaryIndexes (dict) --
The amount of throughput consumed on each global index affected by the operation.
(string) --
(dict) --
Represents the amount of provisioned throughput capacity consumed on a table or an index.
ReadCapacityUnits (float) --
The total number of read capacity units consumed on a table or an index.
WriteCapacityUnits (float) --
The total number of write capacity units consumed on a table or an index.
CapacityUnits (float) --
The total number of capacity units consumed on a table or an index.
Exceptions
DynamoDB.Client.exceptions.ProvisionedThroughputExceededException
DynamoDB.Client.exceptions.ResourceNotFoundException
DynamoDB.Client.exceptions.RequestLimitExceeded
DynamoDB.Client.exceptions.InternalServerError
Examples
This example retrieves an item from the Music table. The table has a partition key and a sort key (Artist and SongTitle), so you must specify both of these attributes.
response = client.get_item(
Key={
'Artist': {
'S': 'Acme Band',
},
'SongTitle': {
'S': 'Happy Day',
},
},
TableName='Music',
)
print(response)
Expected Output:
{
'Item': {
'AlbumTitle': {
'S': 'Songs About Life',
},
'Artist': {
'S': 'Acme Band',
},
'SongTitle': {
'S': 'Happy Day',
},
},
'ResponseMetadata': {
'...': '...',
},
}
:return: {
'Item': {
'string': {
'S': 'string',
'N': 'string',
'B': b'bytes',
'SS': [
'string',
],
'NS': [
'string',
],
'BS': [
b'bytes',
],
'M': {
'string': {'... recursive ...'}
},
'L': [
{'... recursive ...'},
],
'NULL': True|False,
'BOOL': True|False
}
},
'ConsumedCapacity': {
'TableName': 'string',
'CapacityUnits': 123.0,
'ReadCapacityUnits': 123.0,
'WriteCapacityUnits': 123.0,
'Table': {
'ReadCapacityUnits': 123.0,
'WriteCapacityUnits': 123.0,
'CapacityUnits': 123.0
},
'LocalSecondaryIndexes': {
'string': {
'ReadCapacityUnits': 123.0,
'WriteCapacityUnits': 123.0,
'CapacityUnits': 123.0
}
},
'GlobalSecondaryIndexes': {
'string': {
'ReadCapacityUnits': 123.0,
'WriteCapacityUnits': 123.0,
'CapacityUnits': 123.0
}
}
}
}
:returns:
(string) --
"""
pass | 5,355,667 |
def main(arguments=None):
"""Parse args and run
"""
parser = parse_arguments()
args = parser.parse_args(arguments)
smif.cli.log.setup_logging(args.verbose)
def exception_handler(exception_type, exception, traceback, debug_hook=sys.excepthook):
if args.verbose:
debug_hook(exception_type, exception, traceback)
else:
print("{}: {}".format(exception_type.__name__, exception))
sys.excepthook = exception_handler
if 'func' in args:
args.func(args)
else:
parser.print_help() | 5,355,668 |
def checkpointload(checkpointfile):
"""Loads an hyperoptimizer checkpoint from file
Returns a list of tuples (params, loss) referring to previous hyperoptimization trials
"""
try:
with open(checkpointfile, "rb") as f:
return pkl.load(f)
except (FileNotFoundError, EOFError):
return [] | 5,355,669 |
def encrypt_password(password, key):
"""
Encrypts the password using the given key.
Args:
password (str): password to be encrypted
key (str): key to be used to encrypt the password
"""
from time import time
from array import array
import hmac
import base64
h_hash = get_sha_hash
uhash = h_hash(','.join(str(x) for x in
[repr(time()), repr(os.getpid()),
repr(len(password)),
password, key]))[:16]
k_enc, k_auth = h_hash('enc' + key + uhash), h_hash('auth' + key + uhash)
pwd_len = len(password)
password_stream = array('L', password + '0000'[pwd_len & 3:])
x_key = expand_key(k_enc, pwd_len + 4)
for i_cnt in range(len(password_stream)):
password_stream[i_cnt] = password_stream[i_cnt] ^ x_key[i_cnt]
cipher_t = uhash + password_stream.tostring()[:pwd_len]
auth = hmac.new(cipher_t, k_auth).digest()
encrypt_str = cipher_t + auth[:8]
encoded_str = base64.encodestring(encrypt_str)
encrypted_password = encoded_str.rstrip('\n')
return encrypted_password | 5,355,670 |
def prepare_test_data(datapath):
""" Wrapper function to load the test dataset """
print("Loading and encoding the test dataset")
depth_test = np.array(pd.read_csv(os.path.join(datapath,'test_depth.txt'),sep="\t", header = None))
depth_test = depth_test.reshape(depth_test.shape[0],depth_test.shape[1], 1)
exp_test = np.array(pd.read_csv(os.path.join(datapath,'test_expression.txt'),sep="\t", header = None))
exp_test = exp_test.reshape(exp_test.shape[0], exp_test.shape[1],1)
time_test = np.array(pd.read_csv(os.path.join(datapath,'test_ref.txt'),sep="\t", header = None))
time_test = time_test.reshape(time_test.shape[0], time_test.shape[1], 1)
foldchange_test = np.array(pd.read_csv(os.path.join(datapath,'test_foldchange.txt'),sep="\t", header = None))
foldchange_test = foldchange_test.reshape(foldchange_test.shape[0], foldchange_test.shape[1], 1)
weight_test = time_test*foldchange_test
seq_test, y_test = load_sequence_data(datapath, 'test_sequences.csv')
test_bed= pr.read_bed(os.path.join(datapath,"test_tiles.bed"),
as_df=True)
print('Test labels shape:', y_test.shape)
print('Test features shape:', depth_test.shape, seq_test.shape, exp_test.shape, weight_test.shape)
return depth_test, exp_test, weight_test, seq_test, y_test, test_bed | 5,355,671 |
def get_case_color_marker(case):
"""Get color and marker based on case."""
black_o = ("#000000", "o")
teal_D = ("#469990", "D")
orange_s = ("#de9f16", "s")
purple_v = ("#802f99", "v")
bs = case["batch_size"]
sub = case["subsampling"]
mc = case["mc_samples"]
if sub is None and mc == 0: # only bs
mapping = {2: purple_v, 8: orange_s, 32: teal_D, 128: black_o}
try:
return mapping[bs]
except KeyError:
warn(f"Could not map bs={bs} to color-marker-pair. Returning (black, o)")
return black_o
if sub is not None and mc == 0: # only bs & sub
return teal_D
if sub is None and mc != 0: # only bs & mc
return orange_s
if sub is not None and mc != 0: # bs, sub & mc
return purple_v | 5,355,672 |
def clean_integer_score(x):
"""Converts x from potentially a float or string into a clean integer, and replace NA and NP values with one string character"""
try:
x = str(int(float(x)))
except Exception as exc:
if isinstance(x, basestring):
pass
else:
raise
x = x.lower().strip()
return 'A' if x == 'na (not assesible)' else 'P' if x == 'np (not performed)' else x | 5,355,673 |
def do_stuff2():
"""This is not right."""
(first, second) = 1, 2, 3
return first + second | 5,355,674 |
def rule_block_distributor(rule_param, src_cortical_area, dst_cortical_area, src_neuron_id, z_offset):
"""
This rule helps to take a set of unique inputs from one cortical area and develop synaptic projections that can
lead to a comprehensive set of unique connections that covers all the combinations of the input values.
Note: This function is designed for the corner case of the destination cortical area being 1 dimensional in z
direction
"""
# todo: generalize this function so it takes the direction of the source and destination cortical areas as input
candidate_list = list()
block_list = blocks.z_block_refs(cortical_area=dst_cortical_area, x_ref=0, y_ref=0)
source_x_depth = runtime_data.genome['blueprint'][src_cortical_area]['neuron_params']['block_boundaries'][0]
for offset in range(source_x_depth):
for block_ref in block_list:
if blocks.block_ref_2_id(block_ref)[2] // (2 ** offset) % 2 == 0:
for neuron in blocks.neurons_in_the_block(cortical_area=dst_cortical_area, block_ref=block_ref):
candidate_list.append(neuron)
return candidate_list | 5,355,675 |
def get_extra(item_container):
""" liefert die erste passende image_url """
if item_container.item.extra != '':
return get_extra_data(item_container)
item_container = item_container.get_parent()
while item_container.item.app.name == 'dmsEduFolder':
if item_container.item.extra != '':
return get_extra_data(item_container)
item_container = item_container.get_parent()
if item_container.item.app.name != 'dmsEduFolder':
return None | 5,355,676 |
def abort_if_requests_doesnt_exists(id):
"""Checks if given id exists in the database"""
if not Requests.get_one_by_field('id', value=id):
api.abort(404, "Request with id {} doesn't exist or your provided an id that does not belong to you".format(id)) | 5,355,677 |
def pt_accuracy(output, target, topk=(1,)):
"""Compute the accuracy over the k top predictions for the specified values of k."""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res | 5,355,678 |
def compute_phasing_counts(variant_to_read_names_dict):
"""
Parameters
----------
variants_to_read_names : dict
Dictionary mapping varcode.Variant to set of read names
Returns
-------
Dictionary from variant to Counter(Variant)
"""
read_names_to_variants = defaultdict(set)
for variant, read_names in variant_to_read_names_dict.items():
for read_name in read_names:
read_names_to_variants[read_name].add(variant)
# now count up how many reads are shared between pairs of variants
phasing_counts = defaultdict(Counter)
for variant, read_names in variant_to_read_names_dict.items():
for read_name in read_names:
for other_variant in read_names_to_variants[read_name]:
if variant != other_variant:
phasing_counts[variant][other_variant] += 1
return phasing_counts | 5,355,679 |
def run_bar(
bar,
sort_by=["sample_num"],
dryrun=0,
rev=[False],
delete_as_complete=True,
retract_when_done=False,
save_as_complete="",
):
"""
run all sample dictionaries stored in the list bar
@param bar: a list of sample dictionaries
@param sort_by: list of strings determining the sorting of scans
strings include project, configuration, sample_id, plan, plan_args, spriority, apriority
within which all of one acquisition, etc
@param dryrun: Print out the list of plans instead of actually doing anything - safe to do during setup
@param rev: list the same length of sort_by, or booleans, wetierh to reverse that sort
@param delete_as_complete: remove the acquisitions from the bar as we go, so we can automatically start back up
@param retract_when_done: go to throughstation mode at the end of all runs.
@param save_as_complete: if a valid path, will save the running bar to this position in case of failure
@return:
"""
config_change_time = 120 # time to change between configurations, in seconds.
save_to_file = False
try:
open(save_as_complete, "w")
except OSError:
save_to_file = False
pass
else:
save_to_file = True
list_out = []
for samp_num, s in enumerate(bar):
sample = s
sample_id = s["sample_id"]
sample_project = s["project_name"]
for acq_num, a in enumerate(s["acquisitions"]):
if "priority" not in a.keys():
a["priority"] = 50
list_out.append(
[
sample_id, # 0 X
sample_project, # 1 X
a["configuration"], # 2 X
a["plan_name"], # 3
avg_scan_time(a["plan_name"], 2), # 4 calculated plan time
sample, # 5 full sample dict
a, # 6 full acquisition dict
samp_num, # 7 sample index
acq_num, # 8 acq index
a["args"], # 9 X
s["density"], # 10
s["proposal_id"], # 11 X
s["sample_priority"], # 12 X
a["priority"],
]
) # 13 X
switcher = {
"sample_id": 0,
"project": 1,
"config": 2,
"plan": 3,
"plan_args": 9,
"proposal": 11,
"spriority": 12,
"apriority": 13,
"sample_num": 7,
}
# add anything to the above list, and make a key in the above dictionary,
# using that element to sort by something else
try:
sort_by.reverse()
rev.reverse()
except AttributeError:
if isinstance(sort_by, str):
sort_by = [sort_by]
rev = [rev]
else:
print(
"sort_by needs to be a list of strings\n"
"such as project, configuration, sample_id, plan, plan_args, spriority, apriority"
)
return
try:
for k, r in zip(sort_by, rev):
list_out = sorted(list_out, key=itemgetter(switcher[k]), reverse=r)
except KeyError:
print(
"sort_by needs to be a list of strings\n"
"such as project, configuration, sample_id, plan, plan_args, spriority, apriority"
)
return
if dryrun:
text = ""
total_time = 0
for i, step in enumerate(list_out):
# check configuration
# check sample position
# check acquisition
text += "load {} from {}, config {}, run {} (p {} a {}), starts @ {} takes {}\n".format(
step[5]["sample_name"],
step[1],
step[2],
step[3],
step[12],
step[13],
time_sec(total_time),
time_sec(step[4]),
)
total_time += step[4]
if step[2] != list_out[i - 1][2]:
total_time += config_change_time
text += (
f"\n\nTotal estimated time including config changes {time_sec(total_time)}"
)
boxed_text("Dry Run", text, "lightblue", width=120, shrink=True)
else:
run_start_time = datetime.datetime.now()
for i, step in enumerate(list_out):
time_remaining = sum([avg_scan_time(row[3],nscans=2) for row in list_out[i:]])
this_step_time = avg_scan_time(step[3],nscans=2)
start_time = datetime.datetime.now()
total_time = datetime.datetime.now() - run_start_time
boxed_text(
"Scan Status",
"\nTime so far: {}".format(str(total_time))
+ "\nStarting scan {} out of {}".format(
colored(f"#{i + 1}", "blue"), len(list_out)
)
+ "{} of {} in project {} Proposal # {}\n which should take {}\n".format(
colored(step[3], "blue"), # plan
colored(step[0], "blue"), # sample_id
colored(step[1], "blue"), # project
colored(step[11], "blue"), # proposal
time_sec(this_step_time),
)
+ f"time remaining approx {time_sec(time_remaining)} \n\n",
"red",
width=120,
shrink=True,
)
rsoxs_bot.send_message(
f"Starting scan {i + 1} out of {len(list_out)}\n"
+ f"{step[3]} of {step[0]} in project {step[1]} Proposal # {step[11]}"
f"\nwhich should take {time_sec(this_step_time)}"
+ f"\nTime so far: {str(total_time)}"
f"time remaining approx {time_sec(time_remaining)}"
)
yield from load_configuration(step[2]) # move to configuration
yield from load_sample(step[5]) # move to sample / load sample metadata
yield from do_acquisitions(
[step[6]]
) # run acquisition (will load configuration again)
uid = db[-1].uid
print(f"acq uid = {uid}")
scan_id = db[uid].start["scan_id"]
timestamp = db[uid].start["time"]
success = db[uid].stop["exit_status"]
bar[step[7]].setdefault("acq_history", []).append(
{
"uid": uid,
"scan_id": scan_id,
"acq": step[6],
"time": timestamp,
"status": success,
}
)
if delete_as_complete:
bar[step[7]]["acquisitions"].remove(step[6])
if save_to_file:
save_samplesxls(bar, save_as_complete)
elapsed_time = datetime.datetime.now() - start_time
rsoxs_bot.send_message(
f"Acquisition {scan_id} complete. Actual time : {str(elapsed_time)},"
)
rsoxs_bot.send_message("All scans complete!")
if retract_when_done:
yield from all_out() | 5,355,680 |
def test_domain_command_failure(client, mocker):
"""
When there is a invalid response then ValueError should be raised with valid message
"""
from GoogleChronicleBackstory import domain_command
dummy_response = "{ \"error\": { \"code\": 400, \"message\": \"Invalid JSON payload received. Unknown name " \
"\'artifact.domai_name\': Cannot bind query parameter. Field \'domai_name\' could not be found " \
"in request message.\", \"status\": \"INVALID_ARGUMENT\", \"details\": [ { } ] } } "
mock_response = (
Response(dict(status=400)),
dummy_response
)
client.http_client.request.return_value = mock_response
mocker.patch('GoogleChronicleBackstory.return_error', new=return_error)
with pytest.raises(ValueError) as error:
domain_command(client, ARGS['domain'])
expected_message = "Status code: 400\nError: Invalid JSON payload received. Unknown name \'artifact.domai_name\': " \
"Cannot bind query parameter. Field \'domai_name\' could not be found in request message."
assert str(error.value) == expected_message | 5,355,681 |
def _inputs_and_vae(hparams):
"""Constructs a VAE."""
obs_encoder = codec.MLPObsEncoder(hparams)
obs_decoder = codec.MLPObsDecoder(
hparams,
codec.BernoulliDecoder(squeeze_input=True),
param_size=1)
inputs = context_mod.EncodeObserved(obs_encoder)
vae = vae_mod.make(hparams, obs_encoder, obs_decoder)
return inputs, vae | 5,355,682 |
def main():
"""Starts the bot."""
# TODO: Handle network errors, see:
# https://github.com/python-telegram-bot/python-telegram-bot/wiki/Handling-network-errors
# TODO: Multithreading can be implemented for performance. See:
# https://github.com/python-telegram-bot/python-telegram-bot/wiki/Performance-Optimizations
# Increase the connection pool size to 8 (Check telegram/ext/updater.py for
# pool size requirements):
req = Request(con_pool_size=8)
# Set a limit of 29 messages per second (30 is the max. allowed, 29 should
# ensure safety) for all chats, 19 per minute for groups (20 is maximum).:
msgq = mq.MessageQueue(all_burst_limit=29, group_burst_limit=19)
skiwobot = SanalkiwoBot(TOKEN, request=req, msg_queue=msgq)
updater = UpdaterBotStop(bot=skiwobot, use_context=True)
dp = updater.dispatcher
jobq = updater.job_queue
global BOT_ID
BOT_ID = skiwobot.id
# On command messages: #
dp.add_handler(CommandHandler({"start", "basla", "baslat"}, start))
dp.add_handler(CommandHandler({"help", "yardim", "info"}, help_info))
dp.add_handler(
CommandHandler({"corona", "covid", "covid19", "korona"}, corona)
)
dp.add_handler(
CommandHandler({"abonelik", "subscription"}, annc_subscription)
)
dp.add_handler(CommandHandler({"db_backup"}, db_backup))
dp.add_handler(CommandHandler({"duyur", "announce"}, announce))
dp.add_handler(CommandHandler({"iptal", "abort"}, abort_state))
dp.add_handler(
CommandHandler({"duyurusil", "revokeannc"}, revoke_announcement)
)
# On non-command updates: #
# Text messages which do not solely consist of commands and ignores
# edits:
dp.add_handler(
MessageHandler(
(Filters.text & (~Filters.update.edited_message)),
read_incoming
)
)
# (use different Filters attributes to associate message edits and other
# types of messages with different func.s)
# Log all errors:
dp.add_error_handler(error_log)
# Clean leftover files once a day with db_cleanup func.:
# TODO: JobQueue might benefit from a better implementation. see:
# https://github.com/python-telegram-bot/python-telegram-bot/wiki/Extensions-%E2%80%93-JobQueue
jobq.run_repeating(db_cleanup, interval=dt.timedelta(days=1), first=0)
# Start the bot:
if DEPLOYED:
updater.start_webhook(
listen="0.0.0.0",
port=PORT,
url_path=TOKEN
)
updater.bot.set_webhook("https://sanalkiwobot.herokuapp.com/" + TOKEN)
else:
updater.start_polling()
logger.info("Waiting for input...")
# Run the bot until the process receives SIGINT, SIGTERM or SIGABRT
updater.idle() | 5,355,683 |
def read_grid_hdf5(filepath, name):
"""Read a grid from HDF5 file.
Parameters
----------
filepath : string or pathlib.Path object
Path of the HDF5 file.
name : string
Name of the grid.
Returns
-------
x : numpy.ndarray
The x-coordinates along a gridline in the x-direction.
y : numpy.ndarray
The y-coordinates along a gridline in the y-direction.
z : numpy.ndarray
The z-coordinates along a gridline in the z-direction.
"""
f = h5py.File(str(filepath), 'r')
dim = len(f[name])
x, y, z = f[name]['x'][:], f[name]['y'][:], None
if dim == 3:
z = f[name]['z'][:]
f.close()
if z is None or len(z) == 1:
return x, y
return x, y, z | 5,355,684 |
def string_to_version(verstring):
"""
Return a tuple of (epoch, version, release) from a version string
This function replaces rpmUtils.miscutils.stringToVersion, see
https://bugzilla.redhat.com/1364504
"""
# is there an epoch?
components = verstring.split(':')
if len(components) > 1:
epoch = components[0]
else:
epoch = 0
remaining = components[:2][0].split('-')
version = remaining[0]
release = remaining[1]
return (epoch, version, release) | 5,355,685 |
def cff2provn(filename):
"""Parse cml xml file and return a prov bundle object"""
#filename = "/Users/fariba/Desktop/UCI/freesurfer/scripts/meta-MC-SCA-023_tp1.cml"
tree = xml.dom.minidom.parse(filename)
collections = tree.documentElement
g = prov.ProvBundle()
g.add_namespace(xsd)
g.add_namespace(dcterms)
g.add_namespace(cml)
url_entity = g.entity(cml[get_id()])
url_entity.add_extra_attributes({prov.PROV['type']: nidm['nidm:ConnectomeFileFormat'],
prov.PROV['location']: prov.Literal(filename, prov.XSD['String'])})
cml_collection = g.collection(cml[get_id()])
cml_collection.add_extra_attributes(
{prov.PROV['type']: cml['connectome'],
prov.PROV['label']: filename})
g.wasDerivedFrom(cml_collection, url_entity)
# get species, subject_name, and subject_timepoint
species = tree.getElementsByTagName('cml:species')[0].toxml()
species = species.replace('<cml:species>', '').replace('</cml:species>', '')
tp = ''
sub = ''
tags = collections.getElementsByTagName("cml:tag")
for t in tags:
if t.attributes['key'].value == 'subject_name':
sub = t.toxml()
if t.attributes['key'].value == 'subject_timepoint':
tp = t.toxml()
sub = sub.replace('<cml:tag key="subject_name">', '').replace('</cml:tag>', '')
tp = tp.replace('<cml:tag key="subject_timepoint">', '').replace('</cml:tag>', '')
#print species + " " + sub + " " + tp
cml_meta = g.entity(cml[get_id()])
cml_meta.add_extra_attributes(
{prov.PROV['type']: cml['connectome-meta'], cml['species']: species, cml['timepoint']: tp,
cml['subject_name']: sub})
g.hadMember(cml_collection, cml_meta)
volumes = collections.getElementsByTagName("cml:connectome-volume")
c = 0
for v in volumes:
c = c + 1
#print v.getAttribute("src") + " " + v.getAttribute("dtype") + " " + v.getAttribute("name") + " " + v.getAttribute("fileformat")
#print v.attributes['fileformat'].value
dtype = v.getAttribute('dtype')
src = v.getAttribute('src')
name = v.getAttribute('name')
fileformat = v.getAttribute('fileformat')
cml_volume = g.entity(cml[get_id()])
cml_volume.add_extra_attributes(
{prov.PROV['type']: cml['connectome-volume'], cml['dtype']: dtype, cml['src']: src, cml['name']: name,
cml['fileformat']: fileformat})
g.hadMember(cml_collection, cml_volume)
tracks = collections.getElementsByTagName("cml:connectome-track")
c = 0
for t in tracks:
c = c + 1
#print t.getAttribute("src") + " " + t.getAttribute("dtype") + " " + t.getAttribute("name") + " " + t.getAttribute("fileformat")
dtype = t.getAttribute('dtype')
src = t.getAttribute('src')
name = t.getAttribute('name')
fileformat = t.getAttribute('fileformat')
cml_track = g.entity(cml[get_id()])
cml_track.add_extra_attributes(
{prov.PROV['type']: cml['connectome-track'], cml['dtype']: dtype, cml['src']: src, cml['name']: name,
cml['fileformat']: fileformat})
g.hadMember(cml_collection, cml_track)
networks = collections.getElementsByTagName("cml:connectome-network")
c = 0
for n in networks:
c = c + 1
#print n.getAttribute("src") + " " + n.getAttribute("dtype") + " " + n.getAttribute("name") + " " + n.getAttribute("fileformat")
dtype = n.getAttribute('dtype')
src = n.getAttribute('src')
name = n.getAttribute('name')
fileformat = n.getAttribute('fileformat')
cml_network = g.entity(cml[get_id()])
cml_network.add_extra_attributes(
{prov.PROV['type']: cml['connectome-network'], cml['dtype']: dtype, cml['src']: src, cml['name']: name,
cml['fileformat']: fileformat})
g.hadMember(cml_collection, cml_network)
surfaces = collections.getElementsByTagName("cml:connectome-surface")
c = 0
for s in surfaces:
c = c + 1
#print s.getAttribute("src") + " " + s.getAttribute("dtype") + " " + s.getAttribute("name") + " " + s.getAttribute("fileformat")
dtype = s.getAttribute('dtype')
src = s.getAttribute('src')
name = s.getAttribute('name')
fileformat = s.getAttribute('fileformat')
cml_surface = g.entity(cml[get_id()])
cml_surface.add_extra_attributes(
{prov.PROV['type']: cml['connectome-surface'], cml['dtype']: dtype, cml['src']: src, cml['name']: name,
cml['fileformat']: fileformat})
g.hadMember(cml_collection, cml_surface)
data = collections.getElementsByTagName("cml:connectome-data")
c = 0
for d in data:
c = c + 1
#print d.getAttribute("src") + " " + d.getAttribute("dtype") + " " + d.getAttribute("name") + " " + d.getAttribute("fileformat")
dtype = d.getAttribute('dtype')
src = d.getAttribute('src')
name = d.getAttribute('name')
cml_data = g.entity(cml[get_id()])
cml_data.add_extra_attributes(
{prov.PROV['type']: cml['connectome-data'], cml['dtype']: dtype, cml['src']: src, cml['name']: name,
cml['fileformat']: fileformat})
g.hadMember(cml_collection, cml_data)
return g | 5,355,686 |
def optional(idx, *args):
"""A converter for functions having optional arguments.
The index to the last non-optional parameter is specified and a list of types for optional arguments follows.
"""
return lambda ctx, typespecs: _optional_imp(ctx, typespecs[idx], args) | 5,355,687 |
def analysis():
"""Work out numbers of patients done this year and whether on target"""
def report_number_this_week():
try:
with open('d:\\JOHN TILLET\\episode_data\\'
'jtdata\\weekly_data.py', 'rb') as pf:
weekly = pickle.load(pf)
print('Number this week: {}'.format(str(weekly['number'])))
except IOError:
print('Cant find weekly_data file')
sys.exit(1)
print()
desired_weekly = int(input('Weekly target: '))
print(' **********')
print('This period starts 1-7-2017')
first_date = datetime.datetime(2017, 7, 1)
today = datetime.datetime.today()
days_diff = (today - first_date).days
print('Days this period %d' % days_diff)
first_invoice = 5057
csvfile = 'd:\\JOHN TILLET\\episode_data\\jtdata\\patients.csv'
with open(csvfile, 'r') as file_handle:
reader = csv.reader(file_handle)
first_bill = next(reader)
first_bill_invoice = int(first_bill[15])
with open('d:\\JOHN TILLET\\episode_data\\'
'jtdata\\invoice_store.py', 'rb') as handle:
last_invoice = pickle.load(handle)
invoice_diff = int(last_invoice - first_invoice)
desired_number = int(days_diff * desired_weekly / 7)
weekly_number = int((7 * invoice_diff / days_diff))
number_to_give_away = invoice_diff - desired_number
print('Weekly target %d' % desired_weekly)
print('Number done this period %d' % invoice_diff)
report_number_this_week()
print('Number done this print run %d' % (int(last_invoice) - first_bill_invoice))
print('Weekly no. patients %d' % weekly_number)
print('Number available to give away %d to avereage %d per week.'
% (number_to_give_away, desired_weekly))
print(' **********') | 5,355,688 |
def calc_plot_ROC(y1, y2):
"""
Take two distributions and plot the ROC curve if you used the difference
in those distributions as a binary classifier.
:param y1:
:param y2:
:return:
"""
y_score = np.concatenate([y1, y2])
y_true = np.concatenate([np.zeros(len(y1)), np.ones(len(y2))])
return plot_ROC(y_true, y_score) | 5,355,689 |
def custom_leastsq(obj_fn, jac_fn, x0, f_norm2_tol=1e-6, jac_norm_tol=1e-6,
rel_ftol=1e-6, rel_xtol=1e-6, max_iter=100, num_fd_iters=0,
max_dx_scale=1.0, damping_mode="identity", damping_basis="diagonal_values",
damping_clip=None, use_acceleration=False, uphill_step_threshold=0.0,
init_munu="auto", oob_check_interval=0, oob_action="reject", oob_check_mode=0,
resource_alloc=None, arrays_interface=None, serial_solve_proc_threshold=100,
x_limits=None, verbosity=0, profiler=None):
"""
An implementation of the Levenberg-Marquardt least-squares optimization algorithm customized for use within pyGSTi.
This general purpose routine mimic to a large extent the interface used by
`scipy.optimize.leastsq`, though it implements a newer (and more robust) version
of the algorithm.
Parameters
----------
obj_fn : function
The objective function. Must accept and return 1D numpy ndarrays of
length N and M respectively. Same form as scipy.optimize.leastsq.
jac_fn : function
The jacobian function (not optional!). Accepts a 1D array of length N
and returns an array of shape (M,N).
x0 : numpy.ndarray
Initial evaluation point.
f_norm2_tol : float, optional
Tolerace for `F^2` where `F = `norm( sum(obj_fn(x)**2) )` is the
least-squares residual. If `F**2 < f_norm2_tol`, then mark converged.
jac_norm_tol : float, optional
Tolerance for jacobian norm, namely if `infn(dot(J.T,f)) < jac_norm_tol`
then mark converged, where `infn` is the infinity-norm and
`f = obj_fn(x)`.
rel_ftol : float, optional
Tolerance on the relative reduction in `F^2`, that is, if
`d(F^2)/F^2 < rel_ftol` then mark converged.
rel_xtol : float, optional
Tolerance on the relative value of `|x|`, so that if
`d(|x|)/|x| < rel_xtol` then mark converged.
max_iter : int, optional
The maximum number of (outer) interations.
num_fd_iters : int optional
Internally compute the Jacobian using a finite-difference method
for the first `num_fd_iters` iterations. This is useful when `x0`
lies at a special or singular point where the analytic Jacobian is
misleading.
max_dx_scale : float, optional
If not None, impose a limit on the magnitude of the step, so that
`|dx|^2 < max_dx_scale^2 * len(dx)` (so elements of `dx` should be,
roughly, less than `max_dx_scale`).
damping_mode : {'identity', 'JTJ', 'invJTJ', 'adaptive'}
How damping is applied. `'identity'` means that the damping parameter mu
multiplies the identity matrix. `'JTJ'` means that mu multiplies the
diagonal or singular values (depending on `scaling_mode`) of the JTJ
(Fischer information and approx. hessaian) matrix, whereas `'invJTJ'`
means mu multiplies the reciprocals of these values instead. The
`'adaptive'` mode adaptively chooses a damping strategy.
damping_basis : {'diagonal_values', 'singular_values'}
Whether the the diagonal or singular values of the JTJ matrix are used
during damping. If `'singular_values'` is selected, then a SVD of the
Jacobian (J) matrix is performed and damping is performed in the basis
of (right) singular vectors. If `'diagonal_values'` is selected, the
diagonal values of relevant matrices are used as a proxy for the the
singular values (saving the cost of performing a SVD).
damping_clip : tuple, optional
A 2-tuple giving upper and lower bounds for the values that mu multiplies.
If `damping_mode == "identity"` then this argument is ignored, as mu always
multiplies a 1.0 on the diagonal if the identity matrix. If None, then no
clipping is applied.
use_acceleration : bool, optional
Whether to include a geodesic acceleration term as suggested in
arXiv:1201.5885. This is supposed to increase the rate of
convergence with very little overhead. In practice we've seen
mixed results.
uphill_step_threshold : float, optional
Allows uphill steps when taking two consecutive steps in nearly
the same direction. The condition for accepting an uphill step
is that `(uphill_step_threshold-beta)*new_objective < old_objective`,
where `beta` is the cosine of the angle between successive steps.
If `uphill_step_threshold == 0` then no uphill steps are allowed,
otherwise it should take a value between 1.0 and 2.0, with 1.0 being
the most permissive to uphill steps.
init_munu : tuple, optional
If not None, a (mu, nu) tuple of 2 floats giving the initial values
for mu and nu.
oob_check_interval : int, optional
Every `oob_check_interval` outer iterations, the objective function
(`obj_fn`) is called with a second argument 'oob_check', set to True.
In this case, `obj_fn` can raise a ValueError exception to indicate
that it is Out Of Bounds. If `oob_check_interval` is 0 then this
check is never performed; if 1 then it is always performed.
oob_action : {"reject","stop"}
What to do when the objective function indicates (by raising a ValueError
as described above). `"reject"` means the step is rejected but the
optimization proceeds; `"stop"` means the optimization stops and returns
as converged at the last known-in-bounds point.
oob_check_mode : int, optional
An advanced option, expert use only. If 0 then the optimization is
halted as soon as an *attempt* is made to evaluate the function out of bounds.
If 1 then the optimization is halted only when a would-be *accepted* step
is out of bounds.
resource_alloc : ResourceAllocation, optional
When not None, an resource allocation object used for distributing the computation
across multiple processors.
arrays_interface : ArraysInterface
An object that provides an interface for creating and manipulating data arrays.
serial_solve_proc_threshold : int optional
When there are fewer than this many processors, the optimizer will solve linear
systems serially, using SciPy on a single processor, rather than using a parallelized
Gaussian Elimination (with partial pivoting) algorithm coded in Python. Since SciPy's
implementation is more efficient, it's not worth using the parallel version until there
are many processors to spread the work among.
x_limits : numpy.ndarray, optional
A (num_params, 2)-shaped array, holding on each row the (min, max) values for the corresponding
parameter (element of the "x" vector). If `None`, then no limits are imposed.
verbosity : int, optional
Amount of detail to print to stdout.
profiler : Profiler, optional
A profiler object used for to track timing and memory usage.
Returns
-------
x : numpy.ndarray
The optimal solution.
converged : bool
Whether the solution converged.
msg : str
A message indicating why the solution converged (or didn't).
"""
resource_alloc = _ResourceAllocation.cast(resource_alloc)
comm = resource_alloc.comm
printer = _VerbosityPrinter.create_printer(verbosity, comm)
ari = arrays_interface # shorthand
# MEM from ..baseobjs.profiler import Profiler
# MEM debug_prof = Profiler(comm, True)
# MEM profiler = debug_prof
msg = ""
converged = False
global_x = x0.copy()
f = obj_fn(global_x) # 'E'-type array
norm_f = ari.norm2_f(f) # _np.linalg.norm(f)**2
half_max_nu = 2**62 # what should this be??
tau = 1e-3
alpha = 0.5 # for acceleration
nu = 2
mu = 1 # just a guess - initialized on 1st iter and only used if rejected
#Allocate potentially shared memory used in loop
JTJ = ari.allocate_jtj()
JTf = ari.allocate_jtf()
x = ari.allocate_jtf()
#x_for_jac = ari.allocate_x_for_jac()
if num_fd_iters > 0:
fdJac = ari.allocate_jac()
ari.allscatter_x(global_x, x)
if x_limits is not None:
x_lower_limits = ari.allocate_jtf()
x_upper_limits = ari.allocate_jtf()
ari.allscatter_x(x_limits[:, 0], x_lower_limits)
ari.allscatter_x(x_limits[:, 1], x_upper_limits)
if damping_basis == "singular_values":
Jac_V = ari.allocate_jtj()
if damping_mode == 'adaptive':
dx_lst = [ari.allocate_jtf(), ari.allocate_jtf(), ari.allocate_jtf()]
new_x_lst = [ari.allocate_jtf(), ari.allocate_jtf(), ari.allocate_jtf()]
global_new_x_lst = [global_x.copy() for i in range(3)]
else:
dx = ari.allocate_jtf()
new_x = ari.allocate_jtf()
global_new_x = global_x.copy()
if use_acceleration:
dx1 = ari.allocate_jtf()
dx2 = ari.allocate_jtf()
df2_x = ari.allocate_jtf()
JTdf2 = ari.allocate_jtf()
global_accel_x = global_x.copy()
# don't let any component change by more than ~max_dx_scale
if max_dx_scale:
max_norm_dx = (max_dx_scale**2) * len(global_x)
else: max_norm_dx = None
if not _np.isfinite(norm_f):
msg = "Infinite norm of objective function at initial point!"
if len(global_x) == 0: # a model with 0 parameters - nothing to optimize
msg = "No parameters to optimize"; converged = True
# DB: from ..tools import matrixtools as _mt
# DB: print("DB F0 (%s)=" % str(f.shape)); _mt.print_mx(f,prec=0,width=4)
#num_fd_iters = 1000000 # DEBUG: use finite difference iterations instead
# print("DEBUG: setting num_fd_iters == 0!"); num_fd_iters = 0 # DEBUG
last_accepted_dx = None
min_norm_f = 1e100 # sentinel
best_x = ari.allocate_jtf()
best_x[:] = x[:] # like x.copy() -the x-value corresponding to min_norm_f ('P'-type)
spow = 0.0 # for damping_mode == 'adaptive'
if damping_clip is not None:
def dclip(ar): return _np.clip(ar, damping_clip[0], damping_clip[1])
else:
def dclip(ar): return ar
if init_munu != "auto":
mu, nu = init_munu
best_x_state = (mu, nu, norm_f, f.copy(), spow, None) # need f.copy() b/c f is objfn mem
rawJTJ_scratch = None
jtj_buf = ari.allocate_jtj_shared_mem_buf()
try:
for k in range(max_iter): # outer loop
# assume global_x, x, f, fnorm hold valid values
if len(msg) > 0:
break # exit outer loop if an exit-message has been set
if norm_f < f_norm2_tol:
if oob_check_interval <= 1:
msg = "Sum of squares is at most %g" % f_norm2_tol
converged = True; break
else:
printer.log(("** Converged with out-of-bounds with check interval=%d, reverting to last "
"know in-bounds point and setting interval=1 **") % oob_check_interval, 2)
oob_check_interval = 1
x[:] = best_x[:]
mu, nu, norm_f, f[:], spow, _ = best_x_state
continue # can't make use of saved JTJ yet - recompute on nxt iter
#printer.log("--- Outer Iter %d: norm_f = %g, mu=%g" % (k,norm_f,mu))
if profiler: profiler.memory_check("custom_leastsq: begin outer iter *before de-alloc*")
Jac = None
if profiler: profiler.memory_check("custom_leastsq: begin outer iter")
# unnecessary b/c global_x is already valid: ari.allgather_x(x, global_x)
if k >= num_fd_iters:
Jac = jac_fn(global_x) # 'EP'-type, but doesn't actually allocate any more mem (!)
else:
# Note: x holds only number of "fine"-division params - need to use global_x, and
# Jac only holds a subset of the derivative and element columns and rows, respectively.
f_fixed = f.copy() # a static part of the distributed `f` resturned by obj_fn - MUST copy this.
pslice = ari.jac_param_slice(only_if_leader=True)
eps = 1e-7
#Don't do this: for ii, i in enumerate(range(pslice.start, pslice.stop)): (must keep procs in sync)
for i in range(len(global_x)):
x_plus_dx = global_x.copy()
x_plus_dx[i] += eps
fd = (obj_fn(x_plus_dx) - f_fixed) / eps
if pslice.start <= i < pslice.stop:
fdJac[:, i - pslice.start] = fd
#if comm is not None: comm.barrier() # overkill for shared memory leader host barrier
Jac = fdJac
#DEBUG: compare with analytic jacobian (need to uncomment num_fd_iters DEBUG line above too)
#Jac_analytic = jac_fn(x)
#if _np.linalg.norm(Jac_analytic-Jac) > 1e-6:
# print("JACDIFF = ",_np.linalg.norm(Jac_analytic-Jac)," per el=",
# _np.linalg.norm(Jac_analytic-Jac)/Jac.size," sz=",Jac.size)
# DB: from ..tools import matrixtools as _mt
# DB: print("DB JAC (%s)=" % str(Jac.shape)); _mt.print_mx(Jac,prec=0,width=4); assert(False)
if profiler: profiler.memory_check("custom_leastsq: after jacobian:"
+ "shape=%s, GB=%.2f" % (str(Jac.shape),
Jac.nbytes / (1024.0**3)))
Jnorm = _np.sqrt(ari.norm2_jac(Jac))
xnorm = _np.sqrt(ari.norm2_x(x))
printer.log("--- Outer Iter %d: norm_f = %g, mu=%g, |x|=%g, |J|=%g" % (k, norm_f, mu, xnorm, Jnorm))
#assert(_np.isfinite(Jac).all()), "Non-finite Jacobian!" # NaNs tracking
#assert(_np.isfinite(_np.linalg.norm(Jac))), "Finite Jacobian has inf norm!" # NaNs tracking
tm = _time.time()
#OLD MPI-enabled JTJ computation
##if my_mpidot_qtys is None:
## my_mpidot_qtys = _mpit.distribute_for_dot(Jac.T.shape, Jac.shape, resource_alloc)
#JTJ, JTJ_shm = _mpit.mpidot(Jac.T, Jac, my_mpidot_qtys[0], my_mpidot_qtys[1],
# my_mpidot_qtys[2], resource_alloc, JTJ, JTJ_shm) # _np.dot(Jac.T,Jac) 'PP'
ari.fill_jtj(Jac, JTJ, jtj_buf)
ari.fill_jtf(Jac, f, JTf) # 'P'-type
if profiler: profiler.add_time("custom_leastsq: dotprods", tm)
#assert(not _np.isnan(JTJ).any()), "NaN in JTJ!" # NaNs tracking
#assert(not _np.isinf(JTJ).any()), "inf in JTJ! norm Jac = %g" % _np.linalg.norm(Jac) # NaNs tracking
#assert(_np.isfinite(JTJ).all()), "Non-finite JTJ!" # NaNs tracking
#assert(_np.isfinite(JTf).all()), "Non-finite JTf!" # NaNs tracking
idiag = ari.jtj_diag_indices(JTJ)
norm_JTf = ari.infnorm_x(JTf)
norm_x = ari.norm2_x(x) # _np.linalg.norm(x)**2
undamped_JTJ_diag = JTJ[idiag].copy() # 'P'-type
#max_JTJ_diag = JTJ.diagonal().copy()
JTf *= -1.0; minus_JTf = JTf # use the same memory for -JTf below (shouldn't use JTf anymore)
#Maybe just have a minus_JTf variable?
# FUTURE TODO: keep tallying allocated memory, i.e. array_types (stopped here)
if damping_basis == "singular_values":
# Jac = U * s * Vh; J.T * J = conj(V) * s * U.T * U * s * Vh = conj(V) * s^2 * Vh
# Jac_U, Jac_s, Jac_Vh = _np.linalg.svd(Jac, full_matrices=False)
# Jac_V = _np.conjugate(Jac_Vh.T)
global_JTJ = ari.gather_jtj(JTJ)
if comm is None or comm.rank == 0:
global_Jac_s2, global_Jac_V = _np.linalg.eigh(global_JTJ)
ari.scatter_jtj(global_Jac_V, Jac_V)
comm.bcast(global_Jac_s2, root=0)
else:
ari.scatter_jtj(None, Jac_V)
global_Jac_s2 = comm.bcast(None, root=0)
#print("Rank %d: min s2 = %g" % (comm.rank, min(global_Jac_s2)))
#if min(global_Jac_s2) < -1e-4 and (comm is None or comm.rank == 0):
# print("WARNING: min Jac s^2 = %g (max = %g)" % (min(global_Jac_s2), max(global_Jac_s2)))
assert(min(global_Jac_s2) / abs(max(global_Jac_s2)) > -1e-6), "JTJ should be positive!"
global_Jac_s = _np.sqrt(_np.clip(global_Jac_s2, 1e-12, None)) # eigvals of JTJ must be >= 0
global_Jac_VT_mJTf = ari.global_svd_dot(Jac_V, minus_JTf) # = dot(Jac_V.T, minus_JTf)
#DEBUG
#num_large_svals = _np.count_nonzero(Jac_s > _np.max(Jac_s) / 1e2)
#Jac_Uproj = Jac_U[:,0:num_large_svals]
#JTJ_evals, JTJ_U = _np.linalg.eig(JTJ)
#printer.log("JTJ (dim=%d) eval min/max=%g, %g; %d large svals (of %d)" % (
# JTJ.shape[0], _np.min(_np.abs(JTJ_evals)), _np.max(_np.abs(JTJ_evals)),
# num_large_svals, len(Jac_s)))
if norm_JTf < jac_norm_tol:
if oob_check_interval <= 1:
msg = "norm(jacobian) is at most %g" % jac_norm_tol
converged = True; break
else:
printer.log(("** Converged with out-of-bounds with check interval=%d, reverting to last "
"know in-bounds point and setting interval=1 **") % oob_check_interval, 2)
oob_check_interval = 1
x[:] = best_x[:]
mu, nu, norm_f, f[:], spow, _ = best_x_state
continue # can't make use of saved JTJ yet - recompute on nxt iter
if k == 0:
if init_munu == "auto":
if damping_mode == 'identity':
mu = tau * ari.max_x(undamped_JTJ_diag) # initial damping element
#mu = min(mu, MU_TOL1)
else:
# initial multiplicative damping element
#mu = tau # initial damping element - but this seem to low, at least for termgap...
mu = min(1.0e5, ari.max_x(undamped_JTJ_diag) / norm_JTf) # Erik's heuristic
#tries to avoid making mu so large that dx is tiny and we declare victory prematurely
else:
mu, nu = init_munu
rawJTJ_scratch = JTJ.copy() # allocates the memory for a copy of JTJ so only update mem elsewhere
best_x_state = mu, nu, norm_f, f.copy(), spow, rawJTJ_scratch # update mu,nu,JTJ of initial best state
else:
#on all other iterations, update JTJ of best_x_state if best_x == x, i.e. if we've just evaluated
# a previously accepted step that was deemed the best we've seen so far
if _np.allclose(x, best_x):
rawJTJ_scratch[:, :] = JTJ[:, :] # use pre-allocated memory
rawJTJ_scratch[idiag] = undamped_JTJ_diag # no damping; the "raw" JTJ
best_x_state = best_x_state[0:5] + (rawJTJ_scratch,) # update mu,nu,JTJ of initial "best state"
#determing increment using adaptive damping
while True: # inner loop
if profiler: profiler.memory_check("custom_leastsq: begin inner iter")
#print("DB: Pre-damping JTJ diag = [",_np.min(_np.abs(JTJ[idiag])),_np.max(_np.abs(JTJ[idiag])),"]")
if damping_mode == 'identity':
assert(damping_clip is None), "damping_clip cannot be used with damping_mode == 'identity'"
if damping_basis == "singular_values":
reg_Jac_s = global_Jac_s + mu
#Notes:
#Previously we computed inv_JTJ here and below computed dx:
#inv_JTJ = _np.dot(Jac_V, _np.dot(_np.diag(1 / reg_Jac_s**2), Jac_V.T))
# dx = _np.dot(Jac_V, _np.diag(1 / reg_Jac_s**2), global_Jac_VT_mJTf
#But now we just compute reg_Jac_s here, and so the rest below.
else:
# ok if assume fine-param-proc.size == 1 (otherwise need to sync setting local JTJ)
JTJ[idiag] = undamped_JTJ_diag + mu # augment normal equations
elif damping_mode == 'JTJ':
if damping_basis == "singular_values":
reg_Jac_s = global_Jac_s + mu * dclip(global_Jac_s)
else:
add_to_diag = mu * dclip(undamped_JTJ_diag)
JTJ[idiag] = undamped_JTJ_diag + add_to_diag # ok if assume fine-param-proc.size == 1
elif damping_mode == 'invJTJ':
if damping_basis == "singular_values":
reg_Jac_s = global_Jac_s + mu * dclip(1.0 / global_Jac_s)
else:
add_to_diag = mu * dclip(1.0 / undamped_JTJ_diag)
JTJ[idiag] = undamped_JTJ_diag + add_to_diag # ok if assume fine-param-proc.size == 1
elif damping_mode == 'adaptive':
if damping_basis == "singular_values":
reg_Jac_s_lst = [global_Jac_s + mu * dclip(global_Jac_s**(spow + 0.1)),
global_Jac_s + mu * dclip(global_Jac_s**spow),
global_Jac_s + mu * dclip(global_Jac_s**(spow - 0.1))]
else:
add_to_diag_lst = [mu * dclip(undamped_JTJ_diag**(spow + 0.1)),
mu * dclip(undamped_JTJ_diag**spow),
mu * dclip(undamped_JTJ_diag**(spow - 0.1))]
else:
raise ValueError("Invalid damping mode: %s" % damping_mode)
#assert(_np.isfinite(JTJ).all()), "Non-finite JTJ (inner)!" # NaNs tracking
#assert(_np.isfinite(JTf).all()), "Non-finite JTf (inner)!" # NaNs tracking
try:
if profiler: profiler.memory_check("custom_leastsq: before linsolve")
tm = _time.time()
success = True
if damping_basis == 'diagonal_values':
if damping_mode == 'adaptive':
for ii, add_to_diag in enumerate(add_to_diag_lst):
JTJ[idiag] = undamped_JTJ_diag + add_to_diag # ok if assume fine-param-proc.size == 1
#dx_lst.append(_scipy.linalg.solve(JTJ, -JTf, sym_pos=True))
#dx_lst.append(custom_solve(JTJ, -JTf, resource_alloc))
_custom_solve(JTJ, minus_JTf, dx_lst[ii], ari, resource_alloc,
serial_solve_proc_threshold)
else:
#dx = _scipy.linalg.solve(JTJ, -JTf, sym_pos=True)
_custom_solve(JTJ, minus_JTf, dx, ari, resource_alloc, serial_solve_proc_threshold)
elif damping_basis == 'singular_values':
#Note: above solves JTJ*x = -JTf => x = inv_JTJ * (-JTf)
# but: J = U*s*Vh => JTJ = (VhT*s*UT)(U*s*Vh) = VhT*s^2*Vh, and inv_Vh = V b/c V is unitary
# so inv_JTJ = inv_Vh * 1/s^2 * inv_VhT = V * 1/s^2 * VT = (N,K)*(K,K)*(K,N) if use psuedoinv
if damping_mode == 'adaptive':
#dx_lst = [_np.dot(ijtj, minus_JTf) for ijtj in inv_JTJ_lst] # special case
for ii, s in enumerate(reg_Jac_s_lst):
ari.fill_dx_svd(Jac_V, (1 / s**2) * global_Jac_VT_mJTf, dx_lst[ii])
else:
# dx = _np.dot(inv_JTJ, minus_JTf)
ari.fill_dx_svd(Jac_V, (1 / reg_Jac_s**2) * global_Jac_VT_mJTf, dx)
else:
raise ValueError("Invalid damping_basis = '%s'" % damping_basis)
if profiler: profiler.add_time("custom_leastsq: linsolve", tm)
#except _np.linalg.LinAlgError:
except _scipy.linalg.LinAlgError: # DIST TODO - a different kind of exception caught?
success = False
if success and use_acceleration: # Find acceleration term:
assert(damping_mode != 'adaptive'), "Cannot use acceleration in adaptive mode (yet)"
assert(damping_basis != 'singular_values'), "Cannot use acceleration w/singular-value basis (yet)"
df2_eps = 1.0
try:
#df2 = (obj_fn(x + df2_dx) + obj_fn(x - df2_dx) - 2 * f) / \
# df2_eps**2 # 2nd deriv of f along dx direction
# Above line expanded to reuse shared memory
df2 = -2 * f
df2_x[:] = x + df2_eps * dx
ari.allgather_x(df2_x, global_accel_x)
df2 += obj_fn(global_accel_x)
df2_x[:] = x - df2_eps * dx
ari.allgather_x(df2_x, global_accel_x)
df2 += obj_fn(global_accel_x)
df2 /= df2_eps**2
f[:] = df2; df2 = f # use `f` as an appropriate shared-mem object for fill_jtf below
ari.fill_jtf(Jac, df2, JTdf2)
JTdf2 *= -0.5 # keep using JTdf2 memory in solve call below
#dx2 = _scipy.linalg.solve(JTJ, -0.5 * JTdf2, sym_pos=True) # Note: JTJ not init w/'adaptive'
_custom_solve(JTJ, JTdf2, dx2, ari, resource_alloc, serial_solve_proc_threshold)
dx1[:] = dx[:]
dx += dx2 # add acceleration term to dx
except _scipy.linalg.LinAlgError:
print("WARNING - linear solve failed for acceleration term!")
# but ok to continue - just stick with first order term
except ValueError:
print("WARNING - value error during computation of acceleration term!")
reject_msg = ""
if profiler: profiler.memory_check("custom_leastsq: after linsolve")
if success: # linear solve succeeded
#dx = _hack_dx(obj_fn, x, dx, Jac, JTJ, JTf, f, norm_f)
if damping_mode != 'adaptive':
new_x[:] = x + dx
norm_dx = ari.norm2_x(dx) # _np.linalg.norm(dx)**2
#ensure dx isn't too large - don't let any component change by more than ~max_dx_scale
if max_norm_dx and norm_dx > max_norm_dx:
dx *= _np.sqrt(max_norm_dx / norm_dx)
new_x[:] = x + dx
norm_dx = ari.norm2_x(dx) # _np.linalg.norm(dx)**2
#apply x limits (bounds)
if x_limits is not None:
# Approach 1: project x into valid space by simply clipping out-of-bounds values
for i, (x_el, lower, upper) in enumerate(zip(x, x_lower_limits, x_upper_limits)):
if new_x[i] < lower:
new_x[i] = lower
dx[i] = lower - x_el
elif new_x[i] > upper:
new_x[i] = upper
dx[i] = upper - x_el
norm_dx = ari.norm2_x(dx) # _np.linalg.norm(dx)**2
# Approach 2: by scaling back dx (seems less good, but here in case we want it later)
# # minimally reduce dx s.t. new_x = x + dx so that x_lower_limits <= x+dx <= x_upper_limits
# # x_lower_limits - x <= dx <= x_upper_limits - x. Note: use potentially updated dx from
# # max_norm_dx block above. For 0 <= scale <= 1,
# # 1) require x + scale*dx - x_upper_limits <= 0 => scale <= (x_upper_limits - x) / dx
# # [Note: above assumes dx > 0 b/c if not it moves x away from bound and scale < 0]
# # so if scale >= 0, then scale = min((x_upper_limits - x) / dx, 1.0)
# scale = None
# new_x[:] = (x_upper_limits - x) / dx
# new_x_min = ari.min_x(new_x)
# if 0 <= new_x_min < 1.0:
# scale = new_x_min
#
# # 2) require x + scale*dx - x_lower_limits <= 0 => scale <= (x - x_lower_limits) / (-dx)
# new_x[:] = (x_lower_limits - x) / dx
# new_x_min = ari.min_x(new_x)
# if 0 <= new_x_min < 1.0:
# scale = new_x_min if (scale is None) else min(new_x_min, scale)
#
# if scale is not None:
# dx *= scale
# new_x[:] = x + dx
# norm_dx = ari.norm2_x(dx) # _np.linalg.norm(dx)**2
else:
for dx, new_x in zip(dx_lst, new_x_lst):
new_x[:] = x + dx
norm_dx_lst = [ari.norm2_x(dx) for dx in dx_lst]
#ensure dx isn't too large - don't let any component change by more than ~max_dx_scale
if max_norm_dx:
for i, norm_dx in enumerate(norm_dx_lst):
if norm_dx > max_norm_dx:
dx_lst[i] *= _np.sqrt(max_norm_dx / norm_dx)
new_x_lst[i][:] = x + dx_lst[i]
norm_dx_lst[i] = ari.norm2_x(dx_lst[i])
#apply x limits (bounds)
if x_limits is not None:
for i, (dx, new_x) in enumerate(zip(dx_lst, new_x_lst)):
# Do same thing as above for each possible dx in dx_lst
# Approach 1:
for ii, (x_el, lower, upper) in enumerate(zip(x, x_lower_limits, x_upper_limits)):
if new_x[ii] < lower:
new_x[ii] = lower
dx[ii] = lower - x_el
elif new_x[ii] > upper:
new_x[ii] = upper
dx[ii] = upper - x_el
norm_dx_lst[i] = ari.norm2_x(dx) # _np.linalg.norm(dx)**2
# Approach 2:
# scale = None
# new_x[:] = (x_upper_limits - x) / dx
# new_x_min = ari.min_x(new_x)
# if 0 <= new_x_min < 1.0:
# scale = new_x_min
#
# new_x[:] = (x_lower_limits - x) / dx
# new_x_min = ari.min_x(new_x)
# if 0 <= new_x_min < 1.0:
# scale = new_x_min if (scale is None) else min(new_x_min, scale)
#
# if scale is not None:
# dx *= scale
# new_x[:] = x + dx
# norm_dx_lst[i] = ari.norm2_x(dx)
norm_dx = norm_dx_lst[1] # just use center value for printing & checks below
printer.log(" - Inner Loop: mu=%g, norm_dx=%g" % (mu, norm_dx), 2)
#MEM if profiler: profiler.memory_check("custom_leastsq: mid inner loop")
#print("DB: new_x = ", new_x)
if norm_dx < (rel_xtol**2) * norm_x: # and mu < MU_TOL2:
if oob_check_interval <= 1:
msg = "Relative change, |dx|/|x|, is at most %g" % rel_xtol
converged = True; break
else:
printer.log(("** Converged with out-of-bounds with check interval=%d, reverting to last "
"know in-bounds point and setting interval=1 **") % oob_check_interval, 2)
oob_check_interval = 1
x[:] = best_x[:]
mu, nu, norm_f, f[:], spow, _ = best_x_state
break
if norm_dx > (norm_x + rel_xtol) / (_MACH_PRECISION**2):
msg = "(near-)singular linear system"; break
if oob_check_interval > 0 and oob_check_mode == 0:
if k % oob_check_interval == 0:
#Check to see if objective function is out of bounds
in_bounds = []
if damping_mode == 'adaptive':
new_f_lst = []
for new_x, global_new_x in zip(new_x_lst, global_new_x_lst):
ari.allgather_x(new_x, global_new_x)
try:
new_f = obj_fn(global_new_x, oob_check=True)
except ValueError: # Use this to mean - "not allowed, but don't stop"
in_bounds.append(False)
new_f_lst.append(None) # marks OOB attempts that shouldn't be considered
else: # no exception raised
in_bounds.append(True)
new_f_lst.append(new_f.copy())
else:
#print("DB: Trying |x| = ", _np.linalg.norm(new_x), " |x|^2=", _np.dot(new_x,new_x))
# MEM if profiler: profiler.memory_check("custom_leastsq: before oob_check obj_fn")
ari.allgather_x(new_x, global_new_x)
try:
new_f = obj_fn(global_new_x, oob_check=True)
except ValueError: # Use this to mean - "not allowed, but don't stop"
in_bounds.append(False)
else:
in_bounds.append(True)
if any(in_bounds): # In adaptive mode, proceed if *any* cases are in-bounds
new_x_is_allowed = True
new_x_is_known_inbounds = True
else:
MIN_STOP_ITER = 1 # the minimum iteration where an OOB objective stops the optimization
if oob_action == "reject" or k < MIN_STOP_ITER:
new_x_is_allowed = False # (and also not in bounds)
elif oob_action == "stop":
if oob_check_interval == 1:
msg = "Objective function out-of-bounds! STOP"
converged = True; break
else: # reset to last know in-bounds point and not do oob check every step
printer.log(
("** Hit out-of-bounds with check interval=%d, reverting to last "
"know in-bounds point and setting interval=1 **") % oob_check_interval, 2)
oob_check_interval = 1
x[:] = best_x[:]
mu, nu, norm_f, f[:], spow, _ = best_x_state # can't make use of saved JTJ yet
break # restart next outer loop
else:
raise ValueError("Invalid `oob_action`: '%s'" % oob_action)
else: # don't check this time
if damping_mode == 'adaptive':
new_f_lst = []
for new_x, global_new_x in zip(new_x_lst, global_new_x_lst):
ari.allgather_x(new_x, global_new_x)
new_f_lst.append(obj_fn(global_new_x).copy())
else:
ari.allgather_x(new_x, global_new_x)
new_f = obj_fn(global_new_x, oob_check=False)
new_x_is_allowed = True
new_x_is_known_inbounds = False
else:
#Just evaluate objective function normally; never check for in-bounds condition
if damping_mode == 'adaptive':
new_f_lst = []
for new_x, global_new_x in zip(new_x_lst, global_new_x_lst):
ari.allgather_x(new_x, global_new_x)
new_f_lst.append(obj_fn(global_new_x).copy())
else:
ari.allgather_x(new_x, global_new_x)
new_f = obj_fn(global_new_x)
new_x_is_allowed = True
new_x_is_known_inbounds = bool(oob_check_interval == 0) # consider "in bounds" if not checking
if new_x_is_allowed:
# MEM if profiler: profiler.memory_check("custom_leastsq: after obj_fn")
if damping_mode == 'adaptive':
norm_new_f_lst = [ari.norm2_f(new_f) if (new_f is not None) else 1e100
for new_f in new_f_lst] # 1e100 so we don't choose OOB adaptive cases
if any([not _np.isfinite(norm_new_f) for norm_new_f in norm_new_f_lst]): # avoid inf loop
msg = "Infinite norm of objective function!"; break
#iMin = _np.argmin(norm_new_f_lst) # pick lowest (best) objective
gain_ratio_lst = [(norm_f - nnf) / ari.dot_x(dx, mu * dx + minus_JTf)
for (nnf, dx) in zip(norm_new_f_lst, dx_lst)]
iMin = _np.argmax(gain_ratio_lst) # pick highest (best) gain ratio
# but expected decrease is |f|^2 = grad(fTf) * dx = (grad(fT)*f + fT*grad(f)) * dx
# = (JT*f + fT*J) * dx
# <<more explanation>>
norm_new_f = norm_new_f_lst[iMin]
new_f = new_f_lst[iMin]
new_x = new_x_lst[iMin]
global_new_x = global_new_x_lst[iMin]
dx = dx_lst[iMin]
if iMin == 0: spow = min(1.0, spow + 0.1)
elif iMin == 2: spow = max(-1.0, spow - 0.1)
printer.log("ADAPTIVE damping => i=%d b/c fs=[%s] gains=[%s] => spow=%g" % (
iMin, ", ".join(["%.3g" % v for v in norm_new_f_lst]),
", ".join(["%.3g" % v for v in gain_ratio_lst]), spow))
else:
norm_new_f = ari.norm2_f(new_f) # _np.linalg.norm(new_f)**2
if not _np.isfinite(norm_new_f): # avoid infinite loop...
msg = "Infinite norm of objective function!"; break
# dL = expected decrease in ||F||^2 from linear model
dL = ari.dot_x(dx, mu * dx + minus_JTf)
dF = norm_f - norm_new_f # actual decrease in ||F||^2
#DEBUG - see if cos_phi < 0.001, say, might work as a convergence criterion
#if damping_basis == 'singular_values':
# # projection of new_f onto solution tangent plane
# new_f_proj = _np.dot(Jac_Uproj, _np.dot(Jac_Uproj.T, new_f))
# # angle between residual vec and tangent plane
# cos_phi = _np.sqrt(_np.dot(new_f_proj, new_f_proj) / norm_new_f)
# #grad_f_norm = _np.linalg.norm(mu * dx - JTf)
#else:
# cos_phi = 0
if dF <= 0 and uphill_step_threshold > 0:
beta = 0 if last_accepted_dx is None else \
(ari.dot_x(dx, last_accepted_dx)
/ _np.sqrt(ari.norm2_x(dx) * ari.norm2_x(last_accepted_dx)))
uphill_ok = (uphill_step_threshold - beta) * norm_new_f < min(min_norm_f, norm_f)
else:
uphill_ok = False
if use_acceleration:
accel_ratio = 2 * _np.sqrt(ari.norm2_x(dx2) / ari.norm2_x(dx1))
printer.log(" (cont): norm_new_f=%g, dL=%g, dF=%g, reldL=%g, reldF=%g aC=%g" %
(norm_new_f, dL, dF, dL / norm_f, dF / norm_f, accel_ratio), 2)
else:
printer.log(" (cont): norm_new_f=%g, dL=%g, dF=%g, reldL=%g, reldF=%g" %
(norm_new_f, dL, dF, dL / norm_f, dF / norm_f), 2)
accel_ratio = 0.0
if dL / norm_f < rel_ftol and dF >= 0 and dF / norm_f < rel_ftol \
and dF / dL < 2.0 and accel_ratio <= alpha:
if oob_check_interval <= 1: # (if 0 then no oob checking is done)
msg = "Both actual and predicted relative reductions in the" + \
" sum of squares are at most %g" % rel_ftol
converged = True; break
else:
printer.log(("** Converged with out-of-bounds with check interval=%d, "
"reverting to last know in-bounds point and setting "
"interval=1 **") % oob_check_interval, 2)
oob_check_interval = 1
x[:] = best_x[:]
mu, nu, norm_f, f[:], spow, _ = best_x_state # can't make use of saved JTJ yet
break
# MEM if profiler: profiler.memory_check("custom_leastsq: before success")
if (dL > 0 and dF > 0 and accel_ratio <= alpha) or uphill_ok:
#Check whether an otherwise acceptable solution is in-bounds
if oob_check_mode == 1 and oob_check_interval > 0 and k % oob_check_interval == 0:
#Check to see if objective function is out of bounds
try:
#print("DB: Trying |x| = ", _np.linalg.norm(new_x), " |x|^2=", _np.dot(new_x,new_x))
# MEM if profiler:
# MEM profiler.memory_check("custom_leastsq: before oob_check obj_fn mode 1")
obj_fn(global_new_x, oob_check=True) # don't actually need return val (== new_f)
new_f_is_allowed = True
new_x_is_known_inbounds = True
except ValueError: # Use this to mean - "not allowed, but don't stop"
MIN_STOP_ITER = 1 # the minimum iteration where an OOB objective can stops the opt.
if oob_action == "reject" or k < MIN_STOP_ITER:
new_f_is_allowed = False # (and also not in bounds)
elif oob_action == "stop":
if oob_check_interval == 1:
msg = "Objective function out-of-bounds! STOP"
converged = True; break
else: # reset to last know in-bounds point and not do oob check every step
printer.log(
("** Hit out-of-bounds with check interval=%d, reverting to last "
"know in-bounds point and setting interval=1 **") % oob_check_interval,
2)
oob_check_interval = 1
x[:] = best_x[:]
mu, nu, norm_f, f[:], spow, _ = best_x_state # can't use of saved JTJ yet
break # restart next outer loop
else:
raise ValueError("Invalid `oob_action`: '%s'" % oob_action)
else:
new_f_is_allowed = True
if new_f_is_allowed:
# reduction in error: increment accepted!
t = 1.0 - (2 * dF / dL - 1.0)**3 # dF/dL == gain ratio
# always reduce mu for accepted step when |dx| is small
mu_factor = max(t, 1.0 / 3.0) if norm_dx > 1e-8 else 0.3
mu *= mu_factor
nu = 2
x[:] = new_x[:]; f[:] = new_f[:]; norm_f = norm_new_f
global_x[:] = global_new_x[:]
printer.log(" Accepted%s! gain ratio=%g mu * %g => %g"
% (" UPHILL" if uphill_ok else "", dF / dL, mu_factor, mu), 2)
last_accepted_dx = dx.copy()
if new_x_is_known_inbounds and norm_f < min_norm_f:
min_norm_f = norm_f
best_x[:] = x[:]
best_x_state = (mu, nu, norm_f, f.copy(), spow, None)
#Note: we use rawJTJ=None above because the current `JTJ` was evaluated
# at the *last* x-value -- we need to wait for the next outer loop
# to compute the JTJ for this best_x_state
#assert(_np.isfinite(x).all()), "Non-finite x!" # NaNs tracking
#assert(_np.isfinite(f).all()), "Non-finite f!" # NaNs tracking
##Check to see if we *would* switch to Q-N method in a hybrid algorithm
#new_Jac = jac_fn(new_x)
#new_JTf = _np.dot(new_Jac.T,new_f)
#print(" CHECK: %g < %g ?" % (_np.linalg.norm(new_JTf,
# ord=_np.inf),0.02 * _np.linalg.norm(new_f)))
break # exit inner loop normally
else:
reject_msg = " (out-of-bounds)"
else:
reject_msg = " (out-of-bounds)"
else:
reject_msg = " (LinSolve Failure)"
# if this point is reached, either the linear solve failed
# or the error did not reduce. In either case, reject increment.
#Increase damping (mu), then increase damping factor to
# accelerate further damping increases.
mu *= nu
if nu > half_max_nu: # watch for nu getting too large (&overflow)
msg = "Stopping after nu overflow!"; break
nu = 2 * nu
printer.log(" Rejected%s! mu => mu*nu = %g, nu => 2*nu = %g"
% (reject_msg, mu, nu), 2)
#end of inner loop
#end of outer loop
else:
#if no break stmt hit, then we've exceeded max_iter
msg = "Maximum iterations (%d) exceeded" % max_iter
converged = True # call result "converged" even in this case, but issue warning:
printer.warning("Treating result as *converged* after maximum iterations (%d) were exceeded." % max_iter)
except KeyboardInterrupt:
if comm is not None:
# ensure all procs agree on what best_x is (in case the interrupt occurred around x being updated)
comm.Bcast(best_x, root=0)
printer.log("Rank %d caught keyboard interrupt! Returning the current solution as being *converged*."
% comm.Get_rank())
else:
printer.log("Caught keyboard interrupt! Returning the current solution as being *converged*.")
msg = "Keyboard interrupt!"
converged = True
if comm is not None:
comm.barrier() # Just to be safe, so procs stay synchronized and we don't free anything too soon
ari.deallocate_jtj(JTJ)
ari.deallocate_jtf(JTf)
ari.deallocate_jtf(x)
ari.deallocate_jtj_shared_mem_buf(jtj_buf)
#ari.deallocate_x_for_jac(x_for_jac)
if x_limits is not None:
ari.deallocate_jtf(x_lower_limits)
ari.deallocate_jtf(x_upper_limits)
if damping_basis == "singular_values":
ari.deallocate_jtj(Jac_V)
if damping_mode == 'adaptive':
for xx in dx_lst: ari.deallocate_jtf(xx)
for xx in new_x_lst: ari.deallocate_jtf(xx)
else:
ari.deallocate_jtf(dx)
ari.deallocate_jtf(new_x)
if use_acceleration:
ari.deallocate_jtf(dx1)
ari.deallocate_jtf(dx2)
ari.deallocate_jtf(df2_x)
ari.deallocate_jtf(JTdf2)
if num_fd_iters > 0:
ari.deallocate_jac(fdJac)
ari.allgather_x(best_x, global_x)
ari.deallocate_jtf(best_x)
#JTJ[idiag] = undampled_JTJ_diag #restore diagonal
mu, nu, norm_f, f[:], spow, rawJTJ = best_x_state
global_f = _np.empty(ari.global_num_elements(), 'd')
ari.allgather_f(f, global_f)
return global_x, converged, msg, mu, nu, norm_f, global_f, rawJTJ
#solution = _optResult()
#solution.x = x; solution.fun = f
#solution.success = converged
#solution.message = msg
#return solution | 5,355,690 |
def degrees(x):
"""Converts angle x from radians to degrees.
:type x: numbers.Real
:rtype: float
"""
return 0.0 | 5,355,691 |
def account_credit(account=None,
asset=None,
date=None,
tp=None,
order_by=['tp', 'account', 'asset'],
hide_empty=False):
"""
Get credit operations for the account
Args:
account: filter by account code
asset: filter by asset code
date: get balance for specified date/time
tp: FIlter by account type
sort: field or list of sorting fields
hide_empty: don't return zero balances
Returns:
generator object
"""
return _account_summary('credit',
account=account,
asset=asset,
date=date,
tp=tp,
order_by=order_by,
hide_empty=hide_empty) | 5,355,692 |
def get_logger(log_file=None):
"""
Initialize logger configuration.
Returns:
logger.
"""
formatter = logging.Formatter(
'%(asctime)s %(name)s.%(funcName)s +%(lineno)s: '
'%(levelname)-8s [%(process)d] %(message)s'
)
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
if log_file:
file_handler = handlers.RotatingFileHandler(log_file, backupCount=10)
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
console_handler = logging.StreamHandler()
console_handler.setFormatter(formatter)
logger.addHandler(console_handler)
return logger | 5,355,693 |
def verify(model):
"""
测试数据模型检验
:param model: 网络模型以及其参数
:return res: 返回对应的列表
"""
device = 'cuda' if torch.cuda.is_available() else 'cpu'
model = model.to(device)
if device == 'cuda':
model = torch.nn.DataParallel(model)
cudnn.benchmark = True
res = []
for idx, data in enumerate(test_loader):
img, label = data
img, label = img.to(device), label.to(device)
label2 = label.numpy()[0]
img = img.view(img.size(0), -1)
out = model(img)
all_output = []
for i in out.data:
all_output.append(i.numpy())
all_output = all_output[0]
if max(all_output) == all_output[label2]:
correct = True
else:
correct = False
all_output = sorted(all_output, reverse=True)
bvsb = all_output[0] - all_output[1]
obj = {
"label": int(label2),
"correct": correct,
"bvsb": float(bvsb)
}
res.append(obj)
if idx >= test_num - 1:
break
return res | 5,355,694 |
def parse_solution_file(solution_file):
"""Parse a solution file."""
ids = []
classes = []
with open(solution_file) as file_handle:
solution_reader = csv.reader(file_handle)
header = next(solution_reader, None)
if header != HEADER:
raise ValueError(
'Incorrect header found: {}, should be: {}'.format(
header, HEADER))
solution = sorted(list(solution_reader), key=lambda x: x[0])
for row in solution:
if len(row) < 2:
raise ValueError(
'Bad row length: {}, '
'should be at least {} for row {}'.format(
len(row), len(HEADER), row))
row_classes = row[1:]
if any(class_ not in POSSIBLE_CLASSES for class_ in row_classes):
raise ValueError(
'Unknown class found among: {}'.format(row_classes))
ids.append(row[0])
classes.append(row_classes)
return ids, classes | 5,355,695 |
def fibonacci_modulo(number, modulo):
"""
Calculating (n-th Fibonacci number) mod m
Args:
number: fibonacci number
modulo: modulo
Returns:
(n-th Fibonacci number) mod m
Examples:
>>> fibonacci_modulo(11527523930876953, 26673)
10552
"""
period = _pisano_period_len(modulo)
answer = _fib(number - number // period * period) % modulo
return answer | 5,355,696 |
def _str_unusual_grades(df: pd.DataFrame) -> Union[str, None]:
"""Print the number of unusual grades."""
grades = np.arange(0, 10.5, 0.5).astype(float)
catch_grades = []
for item in df["grade"]:
try:
if float(item) not in grades:
catch_grades.append(item)
except ValueError:
catch_grades.append(item)
if catch_grades == []:
return None
else:
return (
f"– Over all grades, {len(catch_grades)} of {len(df)} cards do not receive"
f" standard grades. These grades are in {set(catch_grades)}"
) | 5,355,697 |
def _adjust_estimator_options(estimator: Any, est_options: Dict[str, Any], **kwargs) -> Dict[str, Any]:
"""
Adds specific required classifier options to the `clf_options` dictionary.
Parameters
----------
classifier : Any
The classifier object for which the options have to be added
clf_options : Dict[str, Any]
Dictionary, where the additional classifier options should be added to
kwargs :
Additional classifier options as keyword arguments
Returns
-------
Dict[str, Any]
The input `clf_options` dictionary containing the additional classifier options
"""
if estimator.__name__ == 'XGBClassifier':
est_options['num_class'] = kwargs['n_categories']
elif estimator.__name__ == 'DNNClassifier':
est_options['n_classes'] = kwargs['n_categories']
est_options['n_features'] = kwargs['n_features']
est_options['random_state'] = kwargs['random_seed']
return est_options | 5,355,698 |
def run(args):
"""
Create all registered downloads (locally).
"""
if args.domain:
args.env['request'].environ['HTTP_HOST'] = args.domain
for name, download in args.env['registry'].getUtilitiesFor(IDownload):
args.log.info('creating download %s' % name)
if not args.list:
download.create(args.env['request']) | 5,355,699 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.