filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_0_12607 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from copy import copy
import itertools
import math
import os
import pytest
import random
import shutil
import tempfile
import time
from subprocess import check_call
from tests.common.test_dimensions import create_exec_option_dimension_from_dict
from tests.common.impala_test_suite import ImpalaTestSuite, LOG
from tests.util.filesystem_utils import WAREHOUSE, get_fs_path
from tests.util.test_file_parser import QueryTestSectionReader
# Random fuzz testing of HDFS scanners. Existing tables for any HDFS file format
# are corrupted in random ways to flush out bugs with handling of corrupted data.
class TestScannersFuzzing(ImpalaTestSuite):
# Use abort_on_error = False to ensure we scan all the files.
ABORT_ON_ERROR_VALUES = [False]
# Only run on all nodes - num_nodes=1 would not provide additional coverage.
NUM_NODES_VALUES = [0]
# Limit memory to avoid causing other concurrent tests to fail.
MEM_LIMITS = ['512m']
# Test the codegen and non-codegen paths.
DISABLE_CODEGEN_VALUES = [True, False]
# Test a range of batch sizes to exercise different corner cases.
BATCH_SIZES = [0, 1, 16, 10000]
# Test with denial of reservations at varying frequency. This will affect the number
# of scanner threads that can be spun up.
DEBUG_ACTION_VALUES = [None,
'-1:OPEN:[email protected]',
'-1:OPEN:[email protected]']
@classmethod
def get_workload(cls):
return 'functional-query'
@classmethod
def add_test_dimensions(cls):
super(TestScannersFuzzing, cls).add_test_dimensions()
cls.ImpalaTestMatrix.add_dimension(
create_exec_option_dimension_from_dict({
'abort_on_error' : cls.ABORT_ON_ERROR_VALUES,
'num_nodes' : cls.NUM_NODES_VALUES,
'mem_limit' : cls.MEM_LIMITS,
'debug_action' : cls.DEBUG_ACTION_VALUES}))
# TODO: enable for more table formats once they consistently pass the fuzz test.
# TODO(IMPALA-6772): enable for ORC formats once a new version after release-1.4.3
# of ORC library is released.
cls.ImpalaTestMatrix.add_constraint(lambda v:
v.get_value('table_format').file_format in ('avro', 'parquet') or
(v.get_value('table_format').file_format == 'text' and
v.get_value('table_format').compression_codec in ('none', 'lzo')))
def test_fuzz_alltypes(self, vector, unique_database):
table_format = vector.get_value('table_format')
src_db = QueryTestSectionReader.get_db_name(table_format)
table_name = "alltypes"
self.run_fuzz_test(vector, src_db, table_name, unique_database, table_name)
def test_fuzz_decimal_tbl(self, vector, unique_database):
table_format = vector.get_value('table_format')
table_name = "decimal_tbl"
if table_format.file_format == 'avro':
table_name = "avro_decimal_tbl"
if table_format.compression_codec != 'snap' or \
table_format.compression_type != 'block':
pytest.skip()
elif table_format.file_format == 'rc' or \
table_format.file_format == 'seq':
pytest.skip()
elif table_format.file_format == 'text' and \
table_format.compression_codec != 'none':
# decimal_tbl is not present for these file formats
pytest.skip()
src_db = QueryTestSectionReader.get_db_name(table_format)
self.run_fuzz_test(vector, src_db, table_name, unique_database, table_name, 10)
def test_fuzz_nested_types(self, vector, unique_database):
table_format = vector.get_value('table_format')
table_name = "complextypestbl"
src_db = QueryTestSectionReader.get_db_name(table_format)
if table_format.file_format != 'parquet': pytest.skip()
self.run_fuzz_test(vector, src_db, table_name, unique_database, table_name, 10)
def test_fuzz_uncompressed_parquet(self, vector, unique_database):
"""Parquet tables in default schema are compressed, so in order
to do the fuzz_test on an uncompressed parquet table, this test
clones from an existing parquet table into a new table with
no compression.
"""
table_format = vector.get_value('table_format')
if vector.get_value('table_format').compression_codec != 'none': pytest.skip()
if table_format.file_format != 'parquet': pytest.skip()
"""Even when the compression_codec is none, the default compression type is snappy
so compression codec is changed explicitly to be none.
"""
self.execute_query("set compression_codec=none")
tbl_list = ["alltypes", "decimal_tbl"]
for orig_tbl_name in tbl_list:
src_table_name = "parquet_uncomp_src_" + orig_tbl_name
fuzz_table_name = "parquet_uncomp_dst_" + orig_tbl_name
fq_tbl_name = unique_database + "." + src_table_name
create_tbl = ("create table {0} stored as parquet as select * from"
" functional_parquet.{1}".format(fq_tbl_name, orig_tbl_name))
self.execute_query(create_tbl)
self.run_fuzz_test(vector, unique_database, src_table_name, unique_database,
fuzz_table_name, 10)
# TODO: add test coverage for additional data types like char and varchar
def run_fuzz_test(self, vector, src_db, src_table, fuzz_db, fuzz_table, num_copies=1):
""" Do some basic fuzz testing: create a copy of an existing table with randomly
corrupted files and make sure that we don't crash or behave in an unexpected way.
'unique_database' is used for the table, so it will be cleaned up automatically.
If 'num_copies' is set, create that many corrupted copies of each input file.
SCANNER_FUZZ_SEED can be set in the environment to reproduce the result (assuming that
input files are the same).
SCANNER_FUZZ_KEEP_FILES can be set in the environment to keep the generated files.
"""
# Create and seed a new random number generator for reproducibility.
rng = random.Random()
random_seed = os.environ.get("SCANNER_FUZZ_SEED") or time.time()
LOG.info("Using random seed %d", random_seed)
rng.seed(long(random_seed))
tmp_table_dir = tempfile.mkdtemp(prefix="tmp-scanner-fuzz-%s" % fuzz_table,
dir=os.path.join(os.environ['IMPALA_HOME'], "testdata"))
self.execute_query("create table %s.%s like %s.%s" % (fuzz_db, fuzz_table,
src_db, src_table))
fuzz_table_location = get_fs_path("/test-warehouse/{0}.db/{1}".format(
fuzz_db, fuzz_table))
LOG.info("Generating corrupted version of %s in %s. Local working directory is %s",
fuzz_table, fuzz_db, tmp_table_dir)
# Find the location of the existing table and get the full table directory structure.
fq_table_name = src_db + "." + src_table
table_loc = self._get_table_location(fq_table_name, vector)
check_call(['hdfs', 'dfs', '-copyToLocal', table_loc + "/*", tmp_table_dir])
partitions = self.walk_and_corrupt_table_data(tmp_table_dir, num_copies, rng)
for partition in partitions:
self.execute_query('alter table {0}.{1} add partition ({2})'.format(
fuzz_db, fuzz_table, ','.join(partition)))
# Copy all of the local files and directories to hdfs.
to_copy = ["%s/%s" % (tmp_table_dir, file_or_dir)
for file_or_dir in os.listdir(tmp_table_dir)]
self.filesystem_client.copy_from_local(to_copy, fuzz_table_location)
if "SCANNER_FUZZ_KEEP_FILES" not in os.environ:
shutil.rmtree(tmp_table_dir)
# Querying the corrupted files should not DCHECK or crash.
self.execute_query("refresh %s.%s" % (fuzz_db, fuzz_table))
# Execute a query that tries to read all the columns and rows in the file.
# Also execute a count(*) that materializes no columns, since different code
# paths are exercised.
queries = [
'select count(*) from (select distinct * from {0}.{1}) q'.format(
fuzz_db, fuzz_table),
'select count(*) from {0}.{1} q'.format(fuzz_db, fuzz_table)]
for query, batch_size, disable_codegen in \
itertools.product(queries, self.BATCH_SIZES, self.DISABLE_CODEGEN_VALUES):
query_options = copy(vector.get_value('exec_option'))
query_options['batch_size'] = batch_size
query_options['disable_codegen'] = disable_codegen
query_options['disable_codegen_rows_threshold'] = 0
try:
result = self.execute_query(query, query_options = query_options)
LOG.info('\n'.join(result.log))
except Exception as e:
if 'memory limit exceeded' in str(e).lower():
# Memory limit error should fail query.
continue
msg = "Should not throw error when abort_on_error=0: '{0}'".format(e)
LOG.error(msg)
# Parquet and compressed text can fail the query for some parse errors.
# E.g. corrupt Parquet footer (IMPALA-3773) or a corrupt LZO index file
# (IMPALA-4013).
table_format = vector.get_value('table_format')
if table_format.file_format != 'parquet' \
and not (table_format.file_format == 'text' and \
table_format.compression_codec != 'none') \
and not table_format.file_format == 'rc' \
and not table_format.file_format == 'seq':
raise
def walk_and_corrupt_table_data(self, tmp_table_dir, num_copies, rng):
""" Walks a local copy of a HDFS table directory. Returns a list of partitions, each
as a list of "key=val" pairs. Ensures there is 'num_copies' copies of each file,
and corrupts each of the copies.
"""
partitions = []
# Iterate over the partitions and files we downloaded.
for subdir, dirs, files in os.walk(tmp_table_dir):
if '_impala_insert_staging' in subdir: continue
if len(dirs) != 0: continue # Skip non-leaf directories
rel_subdir = os.path.relpath(subdir, tmp_table_dir)
if rel_subdir != ".":
# Create metadata for any directory partitions.
partitions.append(self.partitions_from_path(rel_subdir))
# Corrupt all of the files that we find.
for filename in files:
filepath = os.path.join(subdir, filename)
copies = [filepath]
for copy_num in range(1, num_copies):
copypath = os.path.join(subdir, "copy{0}_{1}".format(copy_num, filename))
shutil.copyfile(filepath, copypath)
copies.append(copypath)
for filepath in copies:
self.corrupt_file(filepath, rng)
return partitions
def partitions_from_path(self, relpath):
""" Return a list of "key=val" parts from partitions inferred from the directory path.
"""
reversed_partitions = []
while relpath != '':
relpath, suffix = os.path.split(relpath)
reversed_partitions.append(suffix)
return reversed(reversed_partitions)
def corrupt_file(self, path, rng):
""" Corrupt the file at 'path' in the local file system in a randomised way using the
random number generator 'rng'. Rewrites the file in-place.
Logs a message to describe how the file was corrupted, so the error is reproducible.
"""
with open(path, "rb") as f:
data = bytearray(f.read())
num_corruptions = rng.randint(0, int(math.log(len(data))))
for _ in xrange(num_corruptions):
flip_offset = rng.randint(0, len(data) - 1)
flip_val = rng.randint(0, 255)
LOG.info("corrupt file: Flip byte in {0} at {1} from {2} to {3}".format(
path, flip_offset, data[flip_offset], flip_val))
data[flip_offset] = flip_val
if rng.random() < 0.4:
truncation = rng.randint(0, len(data))
LOG.info("corrupt file: Truncate {0} to {1}".format(path, truncation))
data = data[:truncation]
with open(path, "wb") as f:
f.write(data)
|
the-stack_0_12608 | import os
import warnings
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import gym
import numpy as np
from stable_baselines3.common import base_class
from stable_baselines3.common.callbacks import EvalCallback, BaseCallback
from stable_baselines3.common.vec_env import VecEnv, sync_envs_normalization
from controller.helpers.logging import merge_dicts, log_dict, get_done_or_dones
# we're adapting stable_baseline's eval function to also return averaged info dict
def evaluate_policy_with_info(
model: "base_class.BaseAlgorithm",
env: Union[gym.Env, VecEnv],
n_eval_episodes: int = 10,
deterministic: bool = True,
render: bool = False,
callback: Optional[Callable[[Dict[str, Any], Dict[str, Any]], None]] = None,
reward_threshold: Optional[float] = None,
return_episode_rewards: bool = False,
warn: bool = True,
) -> Union[Tuple[float, float], Tuple[List[float], List[int]]]:
"""
Runs policy for ``n_eval_episodes`` episodes and returns average reward.
This is made to work only with one env.
.. note::
If environment has not been wrapped with ``Monitor`` wrapper, reward and
episode lengths are counted as it appears with ``env.step`` calls. If
the environment contains wrappers that modify rewards or episode lengths
(e.g. reward scaling, early episode reset), these will affect the evaluation
results as well. You can avoid this by wrapping environment with ``Monitor``
wrapper before anything else.
:param model: The RL agent you want to evaluate.
:param env: The gym environment. In the case of a ``VecEnv``
this must contain only one environment.
:param n_eval_episodes: Number of episode to evaluate the agent
:param deterministic: Whether to use deterministic or stochastic actions
:param render: Whether to render the environment or not
:param callback: callback function to do additional checks,
called after each step. Gets locals() and globals() passed as parameters.
:param reward_threshold: Minimum expected reward per episode,
this will raise an error if the performance is not met
:param return_episode_rewards: If True, a list of rewards and episde lengths
per episode will be returned instead of the mean.
:param warn: If True (default), warns user about lack of a Monitor wrapper in the
evaluation environment.
:return: Mean reward per episode, std of reward per episode.
Returns ([float], [int]) when ``return_episode_rewards`` is True, first
list containing per-episode rewards and second containing per-episode lengths
(in number of steps).
"""
is_monitor_wrapped = False
# Avoid circular import
from stable_baselines3.common.env_util import is_wrapped
from stable_baselines3.common.monitor import Monitor
if isinstance(env, VecEnv):
assert env.num_envs == 1, "You must pass only one environment when using this function"
is_monitor_wrapped = env.env_is_wrapped(Monitor)[0]
else:
is_monitor_wrapped = is_wrapped(env, Monitor)
if not is_monitor_wrapped and warn:
warnings.warn(
"Evaluation environment is not wrapped with a ``Monitor`` wrapper. "
"This may result in reporting modified episode lengths and rewards, if other wrappers happen to modify these. "
"Consider wrapping environment first with ``Monitor`` wrapper.",
UserWarning,
)
episode_rewards, episode_lengths = [], []
not_reseted = True
all_infos = {}
while len(episode_rewards) < n_eval_episodes:
# Number of loops here might differ from true episodes
# played, if underlying wrappers modify episode lengths.
# Avoid double reset, as VecEnv are reset automatically.
if not isinstance(env, VecEnv) or not_reseted:
obs = env.reset()
not_reseted = False
done, state = False, None
episode_reward = 0.0
episode_length = 0
while not done:
action, state = model.predict(obs, state=state, deterministic=deterministic)
obs, reward, done, info = env.step(action)
episode_reward += reward
if callback is not None:
callback(locals(), globals())
episode_length += 1
if render:
env.render()
info = info[0] # access dict within list
all_infos = merge_dicts(info, all_infos)
if is_monitor_wrapped:
# Do not trust "done" with episode endings.
# Remove vecenv stacking (if any)
if isinstance(env, VecEnv):
info = info[0]
if "episode" in info.keys():
# Monitor wrapper includes "episode" key in info if environment
# has been wrapped with it. Use those rewards instead.
episode_rewards.append(info["episode"]["r"])
episode_lengths.append(info["episode"]["l"])
else:
episode_rewards.append(episode_reward)
episode_lengths.append(episode_length)
mean_reward = np.mean(episode_rewards)
std_reward = np.std(episode_rewards)
if reward_threshold is not None:
assert mean_reward > reward_threshold, "Mean reward below threshold: " f"{mean_reward:.2f} < {reward_threshold:.2f}"
if return_episode_rewards:
return episode_rewards, episode_lengths, all_infos
return mean_reward, std_reward, all_infos
class EvalCallbackWithInfo(EvalCallback):
def __init__(
self,
eval_env: Union[gym.Env, VecEnv],
callback_on_new_best: Optional[BaseCallback] = None,
n_eval_episodes: int = 5,
eval_freq: int = 10000,
log_path: str = None,
best_model_save_path: str = None,
deterministic: bool = True,
render: bool = False,
verbose: int = 1,
warn: bool = True,
exclude_infos_from_logging=["terminal_observation"],
eval_at_init=False,
eval_after_episode=True,
):
super(EvalCallbackWithInfo, self).__init__(
eval_env, callback_on_new_best, n_eval_episodes, eval_freq, log_path, best_model_save_path, deterministic, render, verbose, warn
)
self.exclude_infos_from_logging = exclude_infos_from_logging
self.eval_at_init = eval_at_init
self.eval_after_episode = eval_after_episode
self.epside_counter = 0
def _init_callback(self) -> None:
# Does not work in some corner cases, where the wrapper is not the same
if not isinstance(self.training_env, type(self.eval_env)):
warnings.warn("Training and eval env are not of the same type" f"{self.training_env} != {self.eval_env}")
# Create folders if needed
if self.best_model_save_path is not None:
os.makedirs(self.best_model_save_path, exist_ok=True)
if self.log_path is not None:
os.makedirs(os.path.dirname(self.log_path), exist_ok=True)
# test performance right at the beginning to see how well random policy does
if self.eval_at_init:
self.eval_with_info()
def eval_with_info(self):
# Sync training and eval env if there is VecNormalize
sync_envs_normalization(self.training_env, self.eval_env)
# Reset success rate buffer
self._is_success_buffer = []
episode_rewards, episode_lengths, all_infos = evaluate_policy_with_info(
self.model,
self.eval_env,
n_eval_episodes=self.n_eval_episodes,
render=self.render,
deterministic=self.deterministic,
return_episode_rewards=True,
warn=self.warn,
callback=self._log_success_callback,
)
if self.log_path is not None:
self.evaluations_timesteps.append(self.num_timesteps)
self.evaluations_results.append(episode_rewards)
self.evaluations_length.append(episode_lengths)
kwargs = {}
# Save success log if present
if len(self._is_success_buffer) > 0:
self.evaluations_successes.append(self._is_success_buffer)
kwargs = dict(successes=self.evaluations_successes)
np.savez(
self.log_path,
timesteps=self.evaluations_timesteps,
results=self.evaluations_results,
ep_lengths=self.evaluations_length,
**kwargs,
)
mean_reward, std_reward = np.mean(episode_rewards), np.std(episode_rewards)
mean_ep_length, std_ep_length = np.mean(episode_lengths), np.std(episode_lengths)
self.last_mean_reward = mean_reward
if self.verbose > 0:
print(f"Eval num_timesteps={self.num_timesteps}, " f"episode_reward={mean_reward:.2f} +/- {std_reward:.2f}")
print(f"Episode length: {mean_ep_length:.2f} +/- {std_ep_length:.2f}")
# log mean infos from evaluation runs
log_dict(all_infos, self.logger, "eval/mean_", "mean", self.exclude_infos_from_logging)
# Add to current Logger
self.logger.record("eval/mean_reward", float(mean_reward))
self.logger.record("eval/mean_ep_length", mean_ep_length)
if len(self._is_success_buffer) > 0:
success_rate = np.mean(self._is_success_buffer)
if self.verbose > 0:
print(f"Success rate: {100 * success_rate:.2f}%")
self.logger.record("eval/success_rate", success_rate)
if mean_reward > self.best_mean_reward:
if self.verbose > 0:
print("New best mean reward!")
if self.best_model_save_path is not None:
self.model.save(os.path.join(self.best_model_save_path, "best_model"))
self.best_mean_reward = mean_reward
# Trigger callback if needed
if self.callback is not None:
return self._on_event()
def _on_step(self) -> bool:
if get_done_or_dones(self):
self.epside_counter += 1
eval_after_step = self.n_calls % self.eval_freq == 0 and not self.eval_after_episode
eval_after_episode = self.epside_counter % self.eval_freq == 0 and self.eval_after_episode
is_final_step = self.num_timesteps == self.model._total_timesteps
if self.eval_freq > 0 and (eval_after_step or eval_after_episode or is_final_step):
self.eval_with_info()
return True
|
the-stack_0_12609 | # -*- coding: utf-8 -*-
countries = {
"ad" : "Andorra",
"ae" : "the United Arab Emirates",
"af" : "Afghanistan",
"ag" : "Antigua and Barbuda",
"ai" : "Anguilla",
"al" : "Albania",
"am" : "Armenia",
"an" : "the Netherlands Antilles",
"ao" : "Angola",
"aq" : "Antarctica",
"ar" : "Argentina",
"as" : "American Samoa",
"at" : "Austria",
"au" : "Australia",
"aw" : "Aruba",
"ax" : "the Aland Islands",
"az" : "Azerbaijan",
"ba" : "Bosnia and Herzegovina",
"bb" : "Barbados",
"bd" : "Bangladesh",
"be" : "Belgium",
"bf" : "Burkina Faso",
"bg" : "Bulgaria",
"bh" : "Bahrain",
"bi" : "Burundi",
"bj" : "Benin",
"bl" : "Saint Bartelemey",
"bm" : "Bermuda",
"bn" : "Brunei",
"bo" : "Bolivia",
"bq" : "Bonaire, Sint Eustatius and Saba",
"br" : "Brazil",
"bs" : "the Bahamas",
"bt" : "Bhutan",
"bv" : "the Bouvet Island",
"bw" : "Botswana",
"by" : "Belarus",
"bz" : "Belize",
"ca" : "Canada",
"cc" : "the Cocos (Keeling) Islands",
"cd" : "the Democratic Republic of the Congo",
"cf" : "Central African Republic",
"cg" : "Congo",
"ch" : "Switzerland",
"ci" : u"Côte d'Ivoire",
"ck" : "the Cook Islands",
"cl" : "Chile",
"cm" : "Cameroon",
"cn" : "China",
"co" : "Colombia",
"cr" : "Costa Rica",
"cu" : "Cuba",
"cv" : "Cape Verde",
"cw" : u"Curaçao",
"cx" : "the Christmas Island",
"cy" : "Cyprus",
"cz" : "the Czech Republic",
"de" : "Germany",
"dj" : "Djibouti",
"dk" : "Denmark",
"dm" : "Dominica",
"do" : "the Dominican Republic",
"dz" : "Algeria",
"ec" : "Ecuador",
"ee" : "Estonia",
"eg" : "Egypt",
"eh" : "the Western Sahara",
"er" : "Eritrea",
"es" : "Spain",
"et" : "Ethiopia",
"fi" : "Finland",
"fj" : "Fiji",
"fk" : "the Falkland Islands (Malvinas)",
"fm" : "the Federated States of Micronesia",
"fo" : "the Faroe Islands",
"fr" : "France",
"ga" : "Gabon",
"gb" : "the United Kingdom",
"gd" : "Grenada",
"ge" : "Georgia",
"gf" : "French Guiana",
"gg" : "Guernsey",
"gh" : "Ghana",
"gi" : "Gibraltar",
"gl" : "Greenland",
"gm" : "Gambia",
"gn" : "Guinea",
"gp" : "Guadeloupe",
"gq" : "Equatorial Guinea",
"gr" : "Greece",
"gs" : "South Georgia and the South Sandwich Islands",
"gt" : "Guatemala",
"gu" : "Guam",
"gw" : "Guinea-Bissau",
"gy" : "Guyana",
"hk" : "Hong Kong",
"hm" : "Heard Island and McDonald Islands",
"hn" : "Honduras",
"hr" : "Croatia",
"ht" : "Haiti",
"hu" : "Hungary",
"id" : "Indonesia",
"ie" : "Ireland",
"il" : "Israel",
"im" : "the Isle of Man",
"in" : "India",
"io" : "the British Indian Ocean Territory",
"iq" : "Iraq",
"ir" : "Iran",
"is" : "Iceland",
"it" : "Italy",
"je" : "Jersey",
"jm" : "Jamaica",
"jo" : "Jordan",
"jp" : "Japan",
"ke" : "Kenya",
"kg" : "Kyrgyzstan",
"kh" : "Cambodia",
"ki" : "Kiribati",
"km" : "Comoros",
"kn" : "Saint Kitts and Nevis",
"kp" : "North Korea",
"kr" : "the Republic of Korea",
"kw" : "Kuwait",
"ky" : "the Cayman Islands",
"kz" : "Kazakhstan",
"la" : "Laos",
"lb" : "Lebanon",
"lc" : "Saint Lucia",
"li" : "Liechtenstein",
"lk" : "Sri Lanka",
"lr" : "Liberia",
"ls" : "Lesotho",
"lt" : "Lithuania",
"lu" : "Luxembourg",
"lv" : "Latvia",
"ly" : "Libya",
"ma" : "Morocco",
"mc" : "Monaco",
"md" : "the Republic of Moldova",
"me" : "Montenegro",
"mf" : "Saint Martin",
"mg" : "Madagascar",
"mh" : "the Marshall Islands",
"mk" : "Macedonia",
"ml" : "Mali",
"mm" : "Burma",
"mn" : "Mongolia",
"mo" : "Macau",
"mp" : "the Northern Mariana Islands",
"mq" : "Martinique",
"mr" : "Mauritania",
"ms" : "Montserrat",
"mt" : "Malta",
"mu" : "Mauritius",
"mv" : "the Maldives",
"mw" : "Malawi",
"mx" : "Mexico",
"my" : "Malaysia",
"mz" : "Mozambique",
"na" : "Namibia",
"nc" : "New Caledonia",
"ne" : "Niger",
"nf" : "Norfolk Island",
"ng" : "Nigeria",
"ni" : "Nicaragua",
"nl" : "the Netherlands",
"no" : "Norway",
"np" : "Nepal",
"nr" : "Nauru",
"nu" : "Niue",
"nz" : "New Zealand",
"om" : "Oman",
"pa" : "Panama",
"pe" : "Peru",
"pf" : "French Polynesia",
"pg" : "Papua New Guinea",
"ph" : "the Philippines",
"pk" : "Pakistan",
"pl" : "Poland",
"pm" : "Saint Pierre and Miquelon",
"pn" : "the Pitcairn Islands",
"pr" : "Puerto Rico",
"ps" : "the Palestinian Territory",
"pt" : "Portugal",
"pw" : "Palau",
"py" : "Paraguay",
"qa" : "Qatar",
"re" : "Reunion",
"ro" : "Romania",
"rs" : "Serbia",
"ru" : "Russia",
"rw" : "Rwanda",
"sa" : "Saudi Arabia",
"sb" : "the Solomon Islands",
"sc" : "the Seychelles",
"sd" : "Sudan",
"se" : "Sweden",
"sg" : "Singapore",
"sh" : "Saint Helena",
"si" : "Slovenia",
"sj" : "Svalbard and Jan Mayen",
"sk" : "Slovakia",
"sl" : "Sierra Leone",
"sm" : "San Marino",
"sn" : "Senegal",
"so" : "Somalia",
"sr" : "Suriname",
"ss" : "South Sudan",
"st" : u"São Tomé and Príncipe",
"sv" : "El Salvador",
"sx" : "Sint Maarten",
"sy" : "the Syrian Arab Republic",
"sz" : "Swaziland",
"tc" : "Turks and Caicos Islands",
"td" : "Chad",
"tf" : "the French Southern Territories",
"tg" : "Togo",
"th" : "Thailand",
"tj" : "Tajikistan",
"tk" : "Tokelau",
"tl" : "East Timor",
"tm" : "Turkmenistan",
"tn" : "Tunisia",
"to" : "Tonga",
"tr" : "Turkey",
"tt" : "Trinidad and Tobago",
"tv" : "Tuvalu",
"tw" : "Taiwan",
"tz" : "the United Republic of Tanzania",
"ua" : "Ukraine",
"ug" : "Uganda",
"um" : "the United States Minor Outlying Islands",
"us" : "the United States",
"uy" : "Uruguay",
"uz" : "Uzbekistan",
"va" : "Vatican City",
"vc" : "Saint Vincent and the Grenadines",
"ve" : "Venezuela",
"vg" : "the British Virgin Islands",
"vi" : "the United States Virgin Islands",
"vn" : "Vietnam",
"vu" : "Vanuatu",
"wf" : "Wallis and Futuna",
"ws" : "Samoa",
"xk" : "Kosovo",
"ye" : "Yemen",
"yt" : "Mayotte",
"za" : "South Africa",
"zm" : "Zambia",
"zw" : "Zimbabwe"
}
|
the-stack_0_12610 | """
======================================================================
A demo of structured Ward hierarchical clustering on an image of coins
======================================================================
Compute the segmentation of a 2D image with Ward hierarchical
clustering. The clustering is spatially constrained in order
for each segmented region to be in one piece.
"""
# Author : Vincent Michel, 2010
# Alexandre Gramfort, 2011
# License: BSD 3 clause
# %%
# Generate data
# -------------
from skimage.data import coins
orig_coins = coins()
# %%
# Resize it to 20% of the original size to speed up the processing
# Applying a Gaussian filter for smoothing prior to down-scaling
# reduces aliasing artifacts.
import numpy as np
from scipy.ndimage import gaussian_filter
from skimage.transform import rescale
smoothened_coins = gaussian_filter(orig_coins, sigma=2)
rescaled_coins = rescale(
smoothened_coins,
0.2,
mode="reflect",
anti_aliasing=False,
)
X = np.reshape(rescaled_coins, (-1, 1))
# %%
# Define structure of the data
# ----------------------------
#
# Pixels are connected to their neighbors.
from sklearn.feature_extraction.image import grid_to_graph
connectivity = grid_to_graph(*rescaled_coins.shape)
# %%
# Compute clustering
# ------------------
import time as time
from sklearn.cluster import AgglomerativeClustering
print("Compute structured hierarchical clustering...")
st = time.time()
n_clusters = 27 # number of regions
ward = AgglomerativeClustering(
n_clusters=n_clusters, linkage="ward", connectivity=connectivity
)
ward.fit(X)
label = np.reshape(ward.labels_, rescaled_coins.shape)
print(f"Elapsed time: {time.time() - st:.3f}s")
print(f"Number of pixels: {label.size}")
print(f"Number of clusters: {np.unique(label).size}")
# %%
# Plot the results on an image
# ----------------------------
#
# Agglomerative clustering is able to segment each coin however, we have had to
# use a ``n_cluster`` larger than the number of coins because the segmentation
# is finding a large in the background.
import matplotlib.pyplot as plt
plt.figure(figsize=(5, 5))
plt.imshow(rescaled_coins, cmap=plt.cm.gray)
for l in range(n_clusters):
plt.contour(
label == l,
colors=[
plt.cm.nipy_spectral(l / float(n_clusters)),
],
)
plt.axis("off")
plt.show()
|
the-stack_0_12611 | from Tkinter import *
class Test(Frame):
def printit(self):
print(self.hi_there["command"])
def createWidgets(self):
# a hello button
self.QUIT = Button(self, text='QUIT', foreground='red',
command=self.quit)
self.QUIT.pack(side=LEFT, fill=BOTH)
self.hi_there = Button(self, text='Hello',
command=self.printit)
self.hi_there.pack(side=LEFT)
# note how Packer defaults to side=TOP
self.guy2 = Button(self, text='button 2')
self.guy2.pack()
self.guy3 = Button(self, text='button 3')
self.guy3.pack()
def __init__(self, master=None):
Frame.__init__(self, master)
Pack.config(self)
self.createWidgets()
test = Test()
test.mainloop()
|
the-stack_0_12612 | # -*- coding: utf-8 -*-
import time
from common.base_test import BaseTest
from project import INIT0_PK, INIT1_PK, INIT2_PK, INIT3_PK, INIT4_PK
import lemoncheesecake.api as lcc
from lemoncheesecake.matching import check_that, not_equal_to
SUITE = {
"description": "Operation 'committee_member_deactivate'"
}
@lcc.prop("main", "type")
@lcc.tags("operations", "committee_member_operations", "committee_member_deactivate")
@lcc.suite("Check work of operation 'committee_member_deactivate'", rank=1)
class CommitteeMemberDeactivate(BaseTest):
def __init__(self):
super().__init__()
self.__database_api_identifier = None
self.init0 = None
self.init1 = None
self.init2 = None
self.init3 = None
self.init4 = None
def setup_suite(self):
super().setup_suite()
self._connect_to_ethereum()
self._connect_to_echopy_lib()
lcc.set_step("Setup for {}".format(self.__class__.__name__))
self.__database_api_identifier = self.get_identifier("database")
self.committee_members_info = self.get_active_committee_members_info(self.__database_api_identifier)
self.init0 = self.committee_members_info[0]["account_id"]
self.init1 = self.committee_members_info[1]["account_id"]
self.init2 = self.committee_members_info[2]["account_id"]
self.init3 = self.committee_members_info[3]["account_id"]
self.init4 = self.committee_members_info[4]["account_id"]
lcc.log_info(
"Echo initial accounts: {}, {}, {}, {}, {}".format(
self.init0, self.init1, self.init2, self.init3, self.init4
)
)
def teardown_suite(self):
self._disconnect_to_echopy_lib()
super().teardown_suite()
@lcc.test("Simple work of operation 'committee_member_deactivate'")
@lcc.depends_on("Operations.CommitteeMember.CommitteeMemberActivate.CommitteeMemberActivate.method_main_check")
def method_main_check(self):
operation = self.echo_ops.get_committee_member_deactivate_operation(
echo=self.echo,
committee_member_account=self.init0,
committee_to_deactivate=self.committee_members_info[-1]["committee_id"],
signer=INIT0_PK
)
collected_operation = self.collect_operations(operation, self.__database_api_identifier)
lcc.log_info("Collected successfully")
lcc.set_step("Make proposal of deactivating new account")
operation = self.echo_ops.get_proposal_create_operation(
echo=self.echo,
fee_paying_account=self.init0,
proposed_ops=collected_operation,
expiration_time=self.get_expiration_time(15),
review_period_seconds=10,
signer=INIT0_PK
)
collected_operation = self.collect_operations(operation, self.__database_api_identifier)
broadcast_result = self.echo_ops.broadcast(echo=self.echo, list_operations=operation)
if not self.is_operation_completed(broadcast_result, expected_static_variant=1):
raise Exception("Operation 'proposal_created' failed while broadcast")
proposal_id = broadcast_result["trx"]["operation_results"][0][1]
lcc.set_step("Make voting of deactivating new account")
operation = self.echo_ops.get_proposal_update_operation(
echo=self.echo,
fee_paying_account=self.init0,
proposal=proposal_id,
active_approvals_to_add=[self.init0, self.init1, self.init2, self.init3, self.init4],
active_approvals_to_remove=[],
key_approvals_to_add=[],
key_approvals_to_remove=[],
signer=[INIT0_PK, INIT1_PK, INIT2_PK, INIT3_PK, INIT4_PK]
)
collected_operation = self.collect_operations(operation, self.__database_api_identifier)
broadcast_result = self.echo_ops.broadcast(echo=self.echo, list_operations=collected_operation)
if not self.is_operation_completed(broadcast_result, expected_static_variant=0):
raise Exception("Operation 'proposal_update' failed while broadcast")
lcc.log_info("All committee member has voted")
lcc.set_step(
"Waiting for maintenance and release of two blocks and check that new committee member were deactivated"
)
self.produce_block(self.__database_api_identifier)
time.sleep(15)
self.produce_block(self.__database_api_identifier)
check_that(
"acitve committee member",
self.committee_members_info[-1]["account_id"],
not_equal_to(self.get_active_committee_members_info(self.__database_api_identifier)[-1]["account_id"]),
quiet=True
)
|
the-stack_0_12615 | from flaskapp.models import Question
from test.main.base_classes import BaseUnit
from test.main.utils import test_post_request
class AddQuestionTestCase(BaseUnit):
def test_add_sub_question(self):
# Test valid data
new_question = dict(
question="Is it okay?",
mark=8,
difficulty="Easy",
cognitive_level="Application",
imp=True,
submit="submit",
)
_, question = test_post_request(self,
"/course/1/unit/1/question/sub/new/",
new_question, Question, 1)
# Testing if repr method is working
self.assertEqual(
str(question),
"Question(Is it okay?, 8, Easy, Application, sub, True)",
)
# Test invalid data
new_question = dict(
question="Isn't it okay?",
mark=None,
imp=False,
difficulty="Easy",
cognitive_level="Application",
submit="submit",
)
self.assertRaises(
AttributeError,
test_post_request,
self,
"/course/1/unit/1/question/sub/new/",
new_question,
Question,
2,
)
def test_add_mcq_question(self):
# test valid data
new_mcq = dict(
question="Rate it",
mark=8,
difficulty="Easy",
cognitive_level="Application",
imp=None,
option1="10",
option2="9",
option3="8",
option4="7",
)
_, mcq = test_post_request(self, "/course/1/unit/1/question/mcq/new/",
new_mcq, Question, 1)
# test repr method
self.assertEqual(
str(mcq),
"Question(Rate it, 8, Easy, Application, mcq, False)",
)
# test invalid data
new_mcq = dict(
question=None,
mark=8,
difficulty="Easy",
cognitive_level="Application",
imp=True,
submit="submit",
option1="A",
option2="B",
option3="C",
option4="D",
)
self.assertRaises(
AttributeError,
test_post_request,
self,
"/course/1/unit/1/question/mcq/new/",
new_mcq,
Question,
2,
)
|
the-stack_0_12616 | import keras.metrics
import tensorflow as tf
def weighted_crossentropy(y_true, y_pred):
class_weights = tf.constant([[[[1., 1., 10.]]]])
unweighted_losses = tf.nn.softmax_cross_entropy_with_logits_v2(labels=y_true, logits=y_pred)
weights = tf.reduce_sum(class_weights * y_true, axis=-1)
weighted_losses = weights * unweighted_losses
loss = tf.reduce_mean(weighted_losses)
return loss
|
the-stack_0_12617 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
"""
import numpy as np
from scipy.integrate._ivp.ivp import OdeResult
import matplotlib.pyplot as plt
plt.style.use('seaborn')
def solve_ivp(fun, t_span, y0, t_eval=None, dt=0.01):
t0, tf = float(t_span[0]), float(t_span[-1])
if t_eval is not None:
assert t0 == t_eval[0]
assert tf == t_eval[-1]
# these variables are only needed if t_eval is not None
i = 1
tp = t0
yp = y0
t = t0
y = y0
ts = [t]
ys = [y]
while t < tf :
y = y + dt*fun(t,y)
t = t + dt
if t_eval is not None:
while i < len(t_eval) and t >= t_eval[i]:
if t == t_eval[i]:
ts.append(t)
ys.append(y)
i += 1
elif t > t_eval[i]:
yint = yp + (t_eval[i]-tp)*(y-yp)/(t-tp)
ts.append(t_eval[i])
ys.append(yint)
i += 1
tp = t
yp = y
else:
ts.append(t)
ys.append(y)
ts = np.hstack(ts)
ys = np.vstack(ys).T
return OdeResult(t=ts, y=ys)
if __name__ == "__main__":
# stability region for Euler forward for this problem is h<2/50=0.04
@np.vectorize
def func(t,y):
return -50*y
# t_span = (0,1)
# y0 = np.array([1,1])
#
# sol = solve_ivp(func, t_span, y0 )
#
# plt.figure()
# plt.plot(sol.t, sol.y)
t_eval = np.linspace(0,1,10)
y0 = np.array([1])
sol = solve_ivp(func, [t_eval[0], t_eval[-1]], y0, t_eval=t_eval)
|
the-stack_0_12618 | #!/usr/bin/env python3
from __future__ import unicode_literals
# Allow direct execution
import os
import sys
import unittest
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import io
import re
import string
from test.helper import FakeYDL
from yt_dlp.extractor import YoutubeIE
from yt_dlp.compat import compat_str, compat_urlretrieve
_TESTS = [
(
'https://s.ytimg.com/yts/jsbin/html5player-vflHOr_nV.js',
86,
'>=<;:/.-[+*)(\'&%$#"!ZYX0VUTSRQPONMLKJIHGFEDCBA\\yxwvutsrqponmlkjihgfedcba987654321',
),
(
'https://s.ytimg.com/yts/jsbin/html5player-vfldJ8xgI.js',
85,
'3456789a0cdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRS[UVWXYZ!"#$%&\'()*+,-./:;<=>?@',
),
(
'https://s.ytimg.com/yts/jsbin/html5player-vfle-mVwz.js',
90,
']\\[@?>=<;:/.-,+*)(\'&%$#"hZYXWVUTSRQPONMLKJIHGFEDCBAzyxwvutsrqponmlkjiagfedcb39876',
),
(
'https://s.ytimg.com/yts/jsbin/html5player-en_US-vfl0Cbn9e.js',
84,
'O1I3456789abcde0ghijklmnopqrstuvwxyzABCDEFGHfJKLMN2PQRSTUVW@YZ!"#$%&\'()*+,-./:;<=',
),
(
'https://s.ytimg.com/yts/jsbin/html5player-en_US-vflXGBaUN.js',
'2ACFC7A61CA478CD21425E5A57EBD73DDC78E22A.2094302436B2D377D14A3BBA23022D023B8BC25AA',
'A52CB8B320D22032ABB3A41D773D2B6342034902.A22E87CDD37DBE75A5E52412DC874AC16A7CFCA2',
),
(
'https://s.ytimg.com/yts/jsbin/html5player-en_US-vflBb0OQx.js',
84,
'123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQ0STUVWXYZ!"#$%&\'()*+,@./:;<=>'
),
(
'https://s.ytimg.com/yts/jsbin/html5player-en_US-vfl9FYC6l.js',
83,
'123456789abcdefghijklmnopqr0tuvwxyzABCDETGHIJKLMNOPQRS>UVWXYZ!"#$%&\'()*+,-./:;<=F'
),
(
'https://s.ytimg.com/yts/jsbin/html5player-en_US-vflCGk6yw/html5player.js',
'4646B5181C6C3020DF1D9C7FCFEA.AD80ABF70C39BD369CCCAE780AFBB98FA6B6CB42766249D9488C288',
'82C8849D94266724DC6B6AF89BBFA087EACCD963.B93C07FBA084ACAEFCF7C9D1FD0203C6C1815B6B'
),
(
'https://s.ytimg.com/yts/jsbin/html5player-en_US-vflKjOTVq/html5player.js',
'312AA52209E3623129A412D56A40F11CB0AF14AE.3EE09501CB14E3BCDC3B2AE808BF3F1D14E7FBF12',
'112AA5220913623229A412D56A40F11CB0AF14AE.3EE0950FCB14EEBCDC3B2AE808BF331D14E7FBF3',
)
]
class TestPlayerInfo(unittest.TestCase):
def test_youtube_extract_player_info(self):
PLAYER_URLS = (
('https://www.youtube.com/s/player/64dddad9/player_ias.vflset/en_US/base.js', '64dddad9'),
('https://www.youtube.com/s/player/64dddad9/player_ias.vflset/fr_FR/base.js', '64dddad9'),
('https://www.youtube.com/s/player/64dddad9/player-plasma-ias-phone-en_US.vflset/base.js', '64dddad9'),
('https://www.youtube.com/s/player/64dddad9/player-plasma-ias-phone-de_DE.vflset/base.js', '64dddad9'),
('https://www.youtube.com/s/player/64dddad9/player-plasma-ias-tablet-en_US.vflset/base.js', '64dddad9'),
# obsolete
('https://www.youtube.com/yts/jsbin/player_ias-vfle4-e03/en_US/base.js', 'vfle4-e03'),
('https://www.youtube.com/yts/jsbin/player_ias-vfl49f_g4/en_US/base.js', 'vfl49f_g4'),
('https://www.youtube.com/yts/jsbin/player_ias-vflCPQUIL/en_US/base.js', 'vflCPQUIL'),
('https://www.youtube.com/yts/jsbin/player-vflzQZbt7/en_US/base.js', 'vflzQZbt7'),
('https://www.youtube.com/yts/jsbin/player-en_US-vflaxXRn1/base.js', 'vflaxXRn1'),
('https://s.ytimg.com/yts/jsbin/html5player-en_US-vflXGBaUN.js', 'vflXGBaUN'),
('https://s.ytimg.com/yts/jsbin/html5player-en_US-vflKjOTVq/html5player.js', 'vflKjOTVq'),
)
for player_url, expected_player_id in PLAYER_URLS:
player_id = YoutubeIE._extract_player_info(player_url)
self.assertEqual(player_id, expected_player_id)
class TestSignature(unittest.TestCase):
def setUp(self):
TEST_DIR = os.path.dirname(os.path.abspath(__file__))
self.TESTDATA_DIR = os.path.join(TEST_DIR, 'testdata')
if not os.path.exists(self.TESTDATA_DIR):
os.mkdir(self.TESTDATA_DIR)
def make_tfunc(url, sig_input, expected_sig):
m = re.match(r'.*-([a-zA-Z0-9_-]+)(?:/watch_as3|/html5player)?\.[a-z]+$', url)
assert m, '%r should follow URL format' % url
test_id = m.group(1)
def test_func(self):
basename = 'player-%s.js' % test_id
fn = os.path.join(self.TESTDATA_DIR, basename)
if not os.path.exists(fn):
compat_urlretrieve(url, fn)
ydl = FakeYDL()
ie = YoutubeIE(ydl)
with io.open(fn, encoding='utf-8') as testf:
jscode = testf.read()
func = ie._parse_sig_js(jscode)
src_sig = (
compat_str(string.printable[:sig_input])
if isinstance(sig_input, int) else sig_input)
got_sig = func(src_sig)
self.assertEqual(got_sig, expected_sig)
test_func.__name__ = str('test_signature_js_' + test_id)
setattr(TestSignature, test_func.__name__, test_func)
for test_spec in _TESTS:
make_tfunc(*test_spec)
if __name__ == '__main__':
unittest.main()
|
the-stack_0_12619 | # -*- coding: utf-8 -*-
"""
oauthlib.oauth2.rfc6749
~~~~~~~~~~~~~~~~~~~~~~~
This module is an implementation of various logic needed
for consuming OAuth 2.0 RFC6749.
"""
from __future__ import absolute_import, unicode_literals
import time
import warnings
from oauthlib.common import generate_token
from oauthlib.oauth2.rfc6749 import tokens
from oauthlib.oauth2.rfc6749.errors import (InsecureTransportError,
TokenExpiredError)
from oauthlib.oauth2.rfc6749.parameters import (parse_token_response,
prepare_token_request,
prepare_token_revocation_request)
from oauthlib.oauth2.rfc6749.utils import is_secure_transport
AUTH_HEADER = 'auth_header'
URI_QUERY = 'query'
BODY = 'body'
FORM_ENC_HEADERS = {
'Content-Type': 'application/x-www-form-urlencoded'
}
class Client(object):
"""Base OAuth2 client responsible for access token management.
This class also acts as a generic interface providing methods common to all
client types such as ``prepare_authorization_request`` and
``prepare_token_revocation_request``. The ``prepare_x_request`` methods are
the recommended way of interacting with clients (as opposed to the abstract
prepare uri/body/etc methods). They are recommended over the older set
because they are easier to use (more consistent) and add a few additional
security checks, such as HTTPS and state checking.
Some of these methods require further implementation only provided by the
specific purpose clients such as
:py:class:`oauthlib.oauth2.MobileApplicationClient` and thus you should always
seek to use the client class matching the OAuth workflow you need. For
Python, this is usually :py:class:`oauthlib.oauth2.WebApplicationClient`.
"""
refresh_token_key = 'refresh_token'
def __init__(self, client_id,
default_token_placement=AUTH_HEADER,
token_type='Bearer',
access_token=None,
refresh_token=None,
mac_key=None,
mac_algorithm=None,
token=None,
scope=None,
state=None,
redirect_url=None,
state_generator=generate_token,
**kwargs):
"""Initialize a client with commonly used attributes.
:param client_id: Client identifier given by the OAuth provider upon
registration.
:param default_token_placement: Tokens can be supplied in the Authorization
header (default), the URL query component (``query``) or the request
body (``body``).
:param token_type: OAuth 2 token type. Defaults to Bearer. Change this
if you specify the ``access_token`` parameter and know it is of a
different token type, such as a MAC, JWT or SAML token. Can
also be supplied as ``token_type`` inside the ``token`` dict parameter.
:param access_token: An access token (string) used to authenticate
requests to protected resources. Can also be supplied inside the
``token`` dict parameter.
:param refresh_token: A refresh token (string) used to refresh expired
tokens. Can also be supplied inside the ``token`` dict parameter.
:param mac_key: Encryption key used with MAC tokens.
:param mac_algorithm: Hashing algorithm for MAC tokens.
:param token: A dict of token attributes such as ``access_token``,
``token_type`` and ``expires_at``.
:param scope: A list of default scopes to request authorization for.
:param state: A CSRF protection string used during authorization.
:param redirect_url: The redirection endpoint on the client side to which
the user returns after authorization.
:param state_generator: A no argument state generation callable. Defaults
to :py:meth:`oauthlib.common.generate_token`.
"""
self.client_id = client_id
self.default_token_placement = default_token_placement
self.token_type = token_type
self.access_token = access_token
self.refresh_token = refresh_token
self.mac_key = mac_key
self.mac_algorithm = mac_algorithm
self.token = token or {}
self.scope = scope
self.state_generator = state_generator
self.state = state
self.redirect_url = redirect_url
self.code = None
self.expires_in = None
self._expires_at = None
self.populate_token_attributes(self.token)
@property
def token_types(self):
"""Supported token types and their respective methods
Additional tokens can be supported by extending this dictionary.
The Bearer token spec is stable and safe to use.
The MAC token spec is not yet stable and support for MAC tokens
is experimental and currently matching version 00 of the spec.
"""
return {
'Bearer': self._add_bearer_token,
'MAC': self._add_mac_token
}
def prepare_request_uri(self, *args, **kwargs):
"""Abstract method used to create request URIs."""
raise NotImplementedError("Must be implemented by inheriting classes.")
def prepare_request_body(self, *args, **kwargs):
"""Abstract method used to create request bodies."""
raise NotImplementedError("Must be implemented by inheriting classes.")
def parse_request_uri_response(self, *args, **kwargs):
"""Abstract method used to parse redirection responses."""
raise NotImplementedError("Must be implemented by inheriting classes.")
def add_token(self, uri, http_method='GET', body=None, headers=None,
token_placement=None, **kwargs):
"""Add token to the request uri, body or authorization header.
The access token type provides the client with the information
required to successfully utilize the access token to make a protected
resource request (along with type-specific attributes). The client
MUST NOT use an access token if it does not understand the token
type.
For example, the "bearer" token type defined in
[`I-D.ietf-oauth-v2-bearer`_] is utilized by simply including the access
token string in the request:
.. code-block:: http
GET /resource/1 HTTP/1.1
Host: example.com
Authorization: Bearer mF_9.B5f-4.1JqM
while the "mac" token type defined in [`I-D.ietf-oauth-v2-http-mac`_] is
utilized by issuing a MAC key together with the access token which is
used to sign certain components of the HTTP requests:
.. code-block:: http
GET /resource/1 HTTP/1.1
Host: example.com
Authorization: MAC id="h480djs93hd8",
nonce="274312:dj83hs9s",
mac="kDZvddkndxvhGRXZhvuDjEWhGeE="
.. _`I-D.ietf-oauth-v2-bearer`: https://tools.ietf.org/html/rfc6749#section-12.2
.. _`I-D.ietf-oauth-v2-http-mac`: https://tools.ietf.org/html/rfc6749#section-12.2
"""
if not is_secure_transport(uri):
raise InsecureTransportError()
token_placement = token_placement or self.default_token_placement
case_insensitive_token_types = dict(
(k.lower(), v) for k, v in self.token_types.items())
if not self.token_type.lower() in case_insensitive_token_types:
raise ValueError("Unsupported token type: %s" % self.token_type)
if not (self.access_token or self.token.get('access_token')):
raise ValueError("Missing access token.")
if self._expires_at and self._expires_at < time.time():
raise TokenExpiredError()
return case_insensitive_token_types[self.token_type.lower()](uri, http_method, body,
headers, token_placement, **kwargs)
def prepare_authorization_request(self, authorization_url, state=None,
redirect_url=None, scope=None, **kwargs):
"""Prepare the authorization request.
This is the first step in many OAuth flows in which the user is
redirected to a certain authorization URL. This method adds
required parameters to the authorization URL.
:param authorization_url: Provider authorization endpoint URL.
:param state: CSRF protection string. Will be automatically created if
not provided. The generated state is available via the ``state``
attribute. Clients should verify that the state is unchanged and
present in the authorization response. This verification is done
automatically if using the ``authorization_response`` parameter
with ``prepare_token_request``.
:param redirect_url: Redirect URL to which the user will be returned
after authorization. Must be provided unless previously setup with
the provider. If provided then it must also be provided in the
token request.
:param kwargs: Additional parameters to included in the request.
:returns: The prepared request tuple with (url, headers, body).
"""
if not is_secure_transport(authorization_url):
raise InsecureTransportError()
self.state = state or self.state_generator()
self.redirect_url = redirect_url or self.redirect_url
self.scope = scope or self.scope
auth_url = self.prepare_request_uri(
authorization_url, redirect_uri=self.redirect_url,
scope=self.scope, state=self.state, **kwargs)
return auth_url, FORM_ENC_HEADERS, ''
def prepare_token_request(self, token_url, authorization_response=None,
redirect_url=None, state=None, body='', **kwargs):
"""Prepare a token creation request.
Note that these requests usually require client authentication, either
by including client_id or a set of provider specific authentication
credentials.
:param token_url: Provider token creation endpoint URL.
:param authorization_response: The full redirection URL string, i.e.
the location to which the user was redirected after successfull
authorization. Used to mine credentials needed to obtain a token
in this step, such as authorization code.
:param redirect_url: The redirect_url supplied with the authorization
request (if there was one).
:param body: Existing request body (URL encoded string) to embed parameters
into. This may contain extra paramters. Default ''.
:param kwargs: Additional parameters to included in the request.
:returns: The prepared request tuple with (url, headers, body).
"""
if not is_secure_transport(token_url):
raise InsecureTransportError()
state = state or self.state
if authorization_response:
self.parse_request_uri_response(
authorization_response, state=state)
self.redirect_url = redirect_url or self.redirect_url
body = self.prepare_request_body(body=body,
redirect_uri=self.redirect_url, **kwargs)
return token_url, FORM_ENC_HEADERS, body
def prepare_refresh_token_request(self, token_url, refresh_token=None,
body='', scope=None, **kwargs):
"""Prepare an access token refresh request.
Expired access tokens can be replaced by new access tokens without
going through the OAuth dance if the client obtained a refresh token.
This refresh token and authentication credentials can be used to
obtain a new access token, and possibly a new refresh token.
:param token_url: Provider token refresh endpoint URL.
:param refresh_token: Refresh token string.
:param body: Existing request body (URL encoded string) to embed parameters
into. This may contain extra paramters. Default ''.
:param scope: List of scopes to request. Must be equal to
or a subset of the scopes granted when obtaining the refresh
token.
:param kwargs: Additional parameters to included in the request.
:returns: The prepared request tuple with (url, headers, body).
"""
if not is_secure_transport(token_url):
raise InsecureTransportError()
self.scope = scope or self.scope
body = self.prepare_refresh_body(body=body,
refresh_token=refresh_token, scope=self.scope, **kwargs)
return token_url, FORM_ENC_HEADERS, body
def prepare_token_revocation_request(self, revocation_url, token,
token_type_hint="access_token", body='', callback=None, **kwargs):
"""Prepare a token revocation request.
:param revocation_url: Provider token revocation endpoint URL.
:param token: The access or refresh token to be revoked (string).
:param token_type_hint: ``"access_token"`` (default) or
``"refresh_token"``. This is optional and if you wish to not pass it you
must provide ``token_type_hint=None``.
:param callback: A jsonp callback such as ``package.callback`` to be invoked
upon receiving the response. Not that it should not include a () suffix.
:param kwargs: Additional parameters to included in the request.
:returns: The prepared request tuple with (url, headers, body).
Note that JSONP request may use GET requests as the parameters will
be added to the request URL query as opposed to the request body.
An example of a revocation request
.. code-block: http
POST /revoke HTTP/1.1
Host: server.example.com
Content-Type: application/x-www-form-urlencoded
Authorization: Basic czZCaGRSa3F0MzpnWDFmQmF0M2JW
token=45ghiukldjahdnhzdauz&token_type_hint=refresh_token
An example of a jsonp revocation request
.. code-block: http
GET /revoke?token=agabcdefddddafdd&callback=package.myCallback HTTP/1.1
Host: server.example.com
Content-Type: application/x-www-form-urlencoded
Authorization: Basic czZCaGRSa3F0MzpnWDFmQmF0M2JW
and an error response
.. code-block: http
package.myCallback({"error":"unsupported_token_type"});
Note that these requests usually require client credentials, client_id in
the case for public clients and provider specific authentication
credentials for confidential clients.
"""
if not is_secure_transport(revocation_url):
raise InsecureTransportError()
return prepare_token_revocation_request(revocation_url, token,
token_type_hint=token_type_hint, body=body, callback=callback,
**kwargs)
def parse_request_body_response(self, body, scope=None, **kwargs):
"""Parse the JSON response body.
If the access token request is valid and authorized, the
authorization server issues an access token as described in
`Section 5.1`_. A refresh token SHOULD NOT be included. If the request
failed client authentication or is invalid, the authorization server
returns an error response as described in `Section 5.2`_.
:param body: The response body from the token request.
:param scope: Scopes originally requested.
:return: Dictionary of token parameters.
:raises: Warning if scope has changed. OAuth2Error if response is invalid.
These response are json encoded and could easily be parsed without
the assistance of OAuthLib. However, there are a few subtle issues
to be aware of regarding the response which are helpfully addressed
through the raising of various errors.
A successful response should always contain
**access_token**
The access token issued by the authorization server. Often
a random string.
**token_type**
The type of the token issued as described in `Section 7.1`_.
Commonly ``Bearer``.
While it is not mandated it is recommended that the provider include
**expires_in**
The lifetime in seconds of the access token. For
example, the value "3600" denotes that the access token will
expire in one hour from the time the response was generated.
If omitted, the authorization server SHOULD provide the
expiration time via other means or document the default value.
**scope**
Providers may supply this in all responses but are required to only
if it has changed since the authorization request.
.. _`Section 5.1`: https://tools.ietf.org/html/rfc6749#section-5.1
.. _`Section 5.2`: https://tools.ietf.org/html/rfc6749#section-5.2
.. _`Section 7.1`: https://tools.ietf.org/html/rfc6749#section-7.1
"""
self.token = parse_token_response(body, scope=scope)
self.populate_token_attributes(self.token)
return self.token
def prepare_refresh_body(self, body='', refresh_token=None, scope=None, **kwargs):
"""Prepare an access token request, using a refresh token.
If the authorization server issued a refresh token to the client, the
client makes a refresh request to the token endpoint by adding the
following parameters using the "application/x-www-form-urlencoded"
format in the HTTP request entity-body:
grant_type
REQUIRED. Value MUST be set to "refresh_token".
refresh_token
REQUIRED. The refresh token issued to the client.
scope
OPTIONAL. The scope of the access request as described by
Section 3.3. The requested scope MUST NOT include any scope
not originally granted by the resource owner, and if omitted is
treated as equal to the scope originally granted by the
resource owner.
"""
refresh_token = refresh_token or self.refresh_token
return prepare_token_request(self.refresh_token_key, body=body, scope=scope,
refresh_token=refresh_token, **kwargs)
def _add_bearer_token(self, uri, http_method='GET', body=None,
headers=None, token_placement=None):
"""Add a bearer token to the request uri, body or authorization header."""
if token_placement == AUTH_HEADER:
headers = tokens.prepare_bearer_headers(self.access_token, headers)
elif token_placement == URI_QUERY:
uri = tokens.prepare_bearer_uri(self.access_token, uri)
elif token_placement == BODY:
body = tokens.prepare_bearer_body(self.access_token, body)
else:
raise ValueError("Invalid token placement.")
return uri, headers, body
def _add_mac_token(self, uri, http_method='GET', body=None,
headers=None, token_placement=AUTH_HEADER, ext=None, **kwargs):
"""Add a MAC token to the request authorization header.
Warning: MAC token support is experimental as the spec is not yet stable.
"""
headers = tokens.prepare_mac_header(self.access_token, uri,
self.mac_key, http_method, headers=headers, body=body, ext=ext,
hash_algorithm=self.mac_algorithm, **kwargs)
return uri, headers, body
def _populate_attributes(self, response):
warnings.warn("Please switch to the public method "
"populate_token_attributes.", DeprecationWarning)
return self.populate_token_attributes(response)
def populate_code_attributes(self, response):
"""Add attributes from an auth code response to self."""
if 'code' in response:
self.code = response.get('code')
def populate_token_attributes(self, response):
"""Add attributes from a token exchange response to self."""
if 'access_token' in response:
self.access_token = response.get('access_token')
if 'refresh_token' in response:
self.refresh_token = response.get('refresh_token')
if 'token_type' in response:
self.token_type = response.get('token_type')
if 'expires_in' in response:
self.expires_in = response.get('expires_in')
self._expires_at = time.time() + int(self.expires_in)
if 'expires_at' in response:
self._expires_at = int(response.get('expires_at'))
if 'mac_key' in response:
self.mac_key = response.get('mac_key')
if 'mac_algorithm' in response:
self.mac_algorithm = response.get('mac_algorithm')
|
the-stack_0_12621 | from typing import TYPE_CHECKING, List
from django.conf import settings
from saleor.plugins.base_plugin import BasePlugin, ConfigurationTypeField
from . import (
GatewayConfig,
authorize,
capture,
get_client_token,
list_client_sources,
process_payment,
refund,
void,
)
GATEWAY_NAME = "Braintree"
if TYPE_CHECKING:
# flake8: noqa
from . import GatewayResponse, PaymentData, TokenConfig
from ...interface import CustomerSource
def require_active_plugin(fn):
def wrapped(self, *args, **kwargs):
previous = kwargs.get("previous_value", None)
if not self.active:
return previous
return fn(self, *args, **kwargs)
return wrapped
class BraintreeGatewayPlugin(BasePlugin):
PLUGIN_ID = "mirumee.payments.braintree"
PLUGIN_NAME = GATEWAY_NAME
DEFAULT_ACTIVE = settings.BRAINTREE_PLUGIN_ACTIVE
DEFAULT_CONFIGURATION = [
{"name": "Public API key", "value": settings.BRAINTREE_PUBLIC_KEY},
{"name": "Secret API key", "value": settings.BRAINTREE_PRIVATE_KEY},
{"name": "Use sandbox", "value": settings.BRAINTREE_SANDBOX_MODE},
{"name": "Merchant ID", "value": settings.BRAINTREE_MERCHANT_ID},
{"name": "Store customers card", "value": False},
{"name": "Automatic payment capture", "value": True},
{"name": "Require 3D secure", "value": False},
]
CONFIG_STRUCTURE = {
"Public API key": {
"type": ConfigurationTypeField.SECRET,
"help_text": "Provide Braintree public API key",
"label": "Public API key",
},
"Secret API key": {
"type": ConfigurationTypeField.SECRET,
"help_text": "Provide Braintree secret API key",
"label": "Secret API key",
},
"Merchant ID": {
"type": ConfigurationTypeField.SECRET,
"help_text": "Provide Braintree merchant ID",
"label": "Merchant ID",
},
"Use sandbox": {
"type": ConfigurationTypeField.BOOLEAN,
"help_text": "Determines if Saleor should use Braintree sandbox API.",
"label": "Use sandbox",
},
"Store customers card": {
"type": ConfigurationTypeField.BOOLEAN,
"help_text": "Determines if Saleor should store cards on payments"
" in Braintree customer.",
"label": "Store customers card",
},
"Automatic payment capture": {
"type": ConfigurationTypeField.BOOLEAN,
"help_text": "Determines if Saleor should automaticaly capture payments.",
"label": "Automatic payment capture",
},
"Require 3D secure": {
"type": ConfigurationTypeField.BOOLEAN,
"help_text": "Determines if Saleor should enforce 3D secure during payment.",
"label": "Require 3D secure",
},
}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
configuration = {item["name"]: item["value"] for item in self.configuration}
self.config = GatewayConfig(
gateway_name=GATEWAY_NAME,
auto_capture=configuration["Automatic payment capture"],
connection_params={
"sandbox_mode": configuration["Use sandbox"],
"merchant_id": configuration["Merchant ID"],
"public_key": configuration["Public API key"],
"private_key": configuration["Secret API key"],
},
store_customer=configuration["Store customers card"],
require_3d_secure=configuration["Require 3D secure"],
)
def _get_gateway_config(self) -> GatewayConfig:
return self.config
@require_active_plugin
def authorize_payment(
self, payment_information: "PaymentData", previous_value
) -> "GatewayResponse":
return authorize(payment_information, self._get_gateway_config())
@require_active_plugin
def capture_payment(
self, payment_information: "PaymentData", previous_value
) -> "GatewayResponse":
return capture(payment_information, self._get_gateway_config())
@require_active_plugin
def refund_payment(
self, payment_information: "PaymentData", previous_value
) -> "GatewayResponse":
return refund(payment_information, self._get_gateway_config())
@require_active_plugin
def void_payment(
self, payment_information: "PaymentData", previous_value
) -> "GatewayResponse":
return void(payment_information, self._get_gateway_config())
@require_active_plugin
def process_payment(
self, payment_information: "PaymentData", previous_value
) -> "GatewayResponse":
return process_payment(payment_information, self._get_gateway_config())
@require_active_plugin
def list_payment_sources(
self, customer_id: str, previous_value
) -> List["CustomerSource"]:
sources = list_client_sources(self._get_gateway_config(), customer_id)
previous_value.extend(sources)
return previous_value
@require_active_plugin
def get_client_token(self, token_config: "TokenConfig", previous_value):
return get_client_token(self._get_gateway_config(), token_config)
@require_active_plugin
def get_payment_config(self, previous_value):
config = self._get_gateway_config()
return [
{"field": "store_customer_card", "value": config.store_customer},
{"field": "client_token", "value": get_client_token(config=config)},
]
|
the-stack_0_12623 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
import inspect
import sys
from math import trunc
def get_locale(name):
"""Returns an appropriate :class:`Locale <arrow.locales.Locale>`
corresponding to an inpute locale name.
:param name: the name of the locale.
"""
locale_cls = _locales.get(name.lower())
if locale_cls is None:
raise ValueError("Unsupported locale '{}'".format(name))
return locale_cls()
# base locale type.
class Locale(object):
""" Represents locale-specific data and functionality. """
names = []
timeframes = {
"now": "",
"seconds": "",
"minute": "",
"minutes": "",
"hour": "",
"hours": "",
"day": "",
"days": "",
"week": "",
"weeks": "",
"month": "",
"months": "",
"year": "",
"years": "",
}
meridians = {"am": "", "pm": "", "AM": "", "PM": ""}
past = None
future = None
month_names = []
month_abbreviations = []
day_names = []
day_abbreviations = []
ordinal_day_re = r"(\d+)"
def __init__(self):
self._month_name_to_ordinal = None
def describe(self, timeframe, delta=0, only_distance=False):
""" Describes a delta within a timeframe in plain language.
:param timeframe: a string representing a timeframe.
:param delta: a quantity representing a delta in a timeframe.
:param only_distance: return only distance eg: "11 seconds" without "in" or "ago" keywords
"""
humanized = self._format_timeframe(timeframe, delta)
if not only_distance:
humanized = self._format_relative(humanized, timeframe, delta)
return humanized
def day_name(self, day):
""" Returns the day name for a specified day of the week.
:param day: the ``int`` day of the week (1-7).
"""
return self.day_names[day]
def day_abbreviation(self, day):
""" Returns the day abbreviation for a specified day of the week.
:param day: the ``int`` day of the week (1-7).
"""
return self.day_abbreviations[day]
def month_name(self, month):
""" Returns the month name for a specified month of the year.
:param month: the ``int`` month of the year (1-12).
"""
return self.month_names[month]
def month_abbreviation(self, month):
""" Returns the month abbreviation for a specified month of the year.
:param month: the ``int`` month of the year (1-12).
"""
return self.month_abbreviations[month]
def month_number(self, name):
""" Returns the month number for a month specified by name or abbreviation.
:param name: the month name or abbreviation.
"""
if self._month_name_to_ordinal is None:
self._month_name_to_ordinal = self._name_to_ordinal(self.month_names)
self._month_name_to_ordinal.update(
self._name_to_ordinal(self.month_abbreviations)
)
return self._month_name_to_ordinal.get(name)
def year_full(self, year):
""" Returns the year for specific locale if available
:param name: the ``int`` year (4-digit)
"""
return "{:04d}".format(year)
def year_abbreviation(self, year):
""" Returns the year for specific locale if available
:param name: the ``int`` year (4-digit)
"""
return "{:04d}".format(year)[2:]
def meridian(self, hour, token):
""" Returns the meridian indicator for a specified hour and format token.
:param hour: the ``int`` hour of the day.
:param token: the format token.
"""
if token == "a":
return self.meridians["am"] if hour < 12 else self.meridians["pm"]
if token == "A":
return self.meridians["AM"] if hour < 12 else self.meridians["PM"]
def ordinal_number(self, n):
""" Returns the ordinal format of a given integer
:param n: an integer
"""
return self._ordinal_number(n)
def _ordinal_number(self, n):
return "{}".format(n)
def _name_to_ordinal(self, lst):
return dict(map(lambda i: (i[1].lower(), i[0] + 1), enumerate(lst[1:])))
def _format_timeframe(self, timeframe, delta):
return self.timeframes[timeframe].format(trunc(abs(delta)))
def _format_relative(self, humanized, timeframe, delta):
if timeframe == "now":
return humanized
direction = self.past if delta < 0 else self.future
return direction.format(humanized)
# base locale type implementations.
class EnglishLocale(Locale):
names = [
"en",
"en_us",
"en_gb",
"en_au",
"en_be",
"en_jp",
"en_za",
"en_ca",
"en_ph",
]
past = "{0} ago"
future = "in {0}"
timeframes = {
"now": "just now",
"seconds": "seconds",
"minute": "a minute",
"minutes": "{0} minutes",
"hour": "an hour",
"hours": "{0} hours",
"day": "a day",
"days": "{0} days",
"week": "a week",
"weeks": "{0} weeks",
"month": "a month",
"months": "{0} months",
"year": "a year",
"years": "{0} years",
}
meridians = {"am": "am", "pm": "pm", "AM": "AM", "PM": "PM"}
month_names = [
"",
"January",
"February",
"March",
"April",
"May",
"June",
"July",
"August",
"September",
"October",
"November",
"December",
]
month_abbreviations = [
"",
"Jan",
"Feb",
"Mar",
"Apr",
"May",
"Jun",
"Jul",
"Aug",
"Sep",
"Oct",
"Nov",
"Dec",
]
day_names = [
"",
"Monday",
"Tuesday",
"Wednesday",
"Thursday",
"Friday",
"Saturday",
"Sunday",
]
day_abbreviations = ["", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"]
ordinal_day_re = r"((?P<value>[2-3]?1(?=st)|[2-3]?2(?=nd)|[2-3]?3(?=rd)|[1-3]?[04-9](?=th)|1[1-3](?=th))(st|nd|rd|th))"
def _ordinal_number(self, n):
if n % 100 not in (11, 12, 13):
remainder = abs(n) % 10
if remainder == 1:
return "{}st".format(n)
elif remainder == 2:
return "{}nd".format(n)
elif remainder == 3:
return "{}rd".format(n)
return "{}th".format(n)
def describe(self, timeframe, delta=0, only_distance=False):
""" Describes a delta within a timeframe in plain language.
:param timeframe: a string representing a timeframe.
:param delta: a quantity representing a delta in a timeframe.
:param only_distance: return only distance eg: "11 seconds" without "in" or "ago" keywords
"""
humanized = super(EnglishLocale, self).describe(timeframe, delta, only_distance)
if only_distance and timeframe == "now":
humanized = "instantly"
return humanized
class ItalianLocale(Locale):
names = ["it", "it_it"]
past = "{0} fa"
future = "tra {0}"
timeframes = {
"now": "adesso",
"seconds": "qualche secondo",
"minute": "un minuto",
"minutes": "{0} minuti",
"hour": "un'ora",
"hours": "{0} ore",
"day": "un giorno",
"days": "{0} giorni",
"month": "un mese",
"months": "{0} mesi",
"year": "un anno",
"years": "{0} anni",
}
month_names = [
"",
"gennaio",
"febbraio",
"marzo",
"aprile",
"maggio",
"giugno",
"luglio",
"agosto",
"settembre",
"ottobre",
"novembre",
"dicembre",
]
month_abbreviations = [
"",
"gen",
"feb",
"mar",
"apr",
"mag",
"giu",
"lug",
"ago",
"set",
"ott",
"nov",
"dic",
]
day_names = [
"",
"lunedì",
"martedì",
"mercoledì",
"giovedì",
"venerdì",
"sabato",
"domenica",
]
day_abbreviations = ["", "lun", "mar", "mer", "gio", "ven", "sab", "dom"]
ordinal_day_re = r"((?P<value>[1-3]?[0-9](?=[ºª]))[ºª])"
def _ordinal_number(self, n):
return "{}º".format(n)
class SpanishLocale(Locale):
names = ["es", "es_es"]
past = "hace {0}"
future = "en {0}"
timeframes = {
"now": "ahora",
"seconds": "segundos",
"minute": "un minuto",
"minutes": "{0} minutos",
"hour": "una hora",
"hours": "{0} horas",
"day": "un día",
"days": "{0} días",
"week": "una semana",
"weeks": "{0} semanas",
"month": "un mes",
"months": "{0} meses",
"year": "un año",
"years": "{0} años",
}
meridians = {"am": "am", "pm": "pm", "AM": "AM", "PM": "PM"}
month_names = [
"",
"enero",
"febrero",
"marzo",
"abril",
"mayo",
"junio",
"julio",
"agosto",
"septiembre",
"octubre",
"noviembre",
"diciembre",
]
month_abbreviations = [
"",
"ene",
"feb",
"mar",
"abr",
"may",
"jun",
"jul",
"ago",
"sep",
"oct",
"nov",
"dic",
]
day_names = [
"",
"lunes",
"martes",
"miércoles",
"jueves",
"viernes",
"sábado",
"domingo",
]
day_abbreviations = ["", "lun", "mar", "mie", "jue", "vie", "sab", "dom"]
ordinal_day_re = r"((?P<value>[1-3]?[0-9](?=[ºª]))[ºª])"
def _ordinal_number(self, n):
return "{}º".format(n)
class FrenchLocale(Locale):
names = ["fr", "fr_fr"]
past = "il y a {0}"
future = "dans {0}"
timeframes = {
"now": "maintenant",
"seconds": "quelques secondes",
"minute": "une minute",
"minutes": "{0} minutes",
"hour": "une heure",
"hours": "{0} heures",
"day": "un jour",
"days": "{0} jours",
"week": "une semaine",
"weeks": "{0} semaines",
"month": "un mois",
"months": "{0} mois",
"year": "un an",
"years": "{0} ans",
}
month_names = [
"",
"janvier",
"février",
"mars",
"avril",
"mai",
"juin",
"juillet",
"août",
"septembre",
"octobre",
"novembre",
"décembre",
]
month_abbreviations = [
"",
"janv",
"févr",
"mars",
"avr",
"mai",
"juin",
"juil",
"août",
"sept",
"oct",
"nov",
"déc",
]
day_names = [
"",
"lundi",
"mardi",
"mercredi",
"jeudi",
"vendredi",
"samedi",
"dimanche",
]
day_abbreviations = ["", "lun", "mar", "mer", "jeu", "ven", "sam", "dim"]
ordinal_day_re = (
r"((?P<value>\b1(?=er\b)|[1-3]?[02-9](?=e\b)|[1-3]1(?=e\b))(er|e)\b)"
)
def _ordinal_number(self, n):
if abs(n) == 1:
return "{}er".format(n)
return "{}e".format(n)
class GreekLocale(Locale):
names = ["el", "el_gr"]
past = "{0} πριν"
future = "σε {0}"
timeframes = {
"now": "τώρα",
"seconds": "δευτερόλεπτα",
"minute": "ένα λεπτό",
"minutes": "{0} λεπτά",
"hour": "μία ώρα",
"hours": "{0} ώρες",
"day": "μία μέρα",
"days": "{0} μέρες",
"month": "ένα μήνα",
"months": "{0} μήνες",
"year": "ένα χρόνο",
"years": "{0} χρόνια",
}
month_names = [
"",
"Ιανουαρίου",
"Φεβρουαρίου",
"Μαρτίου",
"Απριλίου",
"Μαΐου",
"Ιουνίου",
"Ιουλίου",
"Αυγούστου",
"Σεπτεμβρίου",
"Οκτωβρίου",
"Νοεμβρίου",
"Δεκεμβρίου",
]
month_abbreviations = [
"",
"Ιαν",
"Φεβ",
"Μαρ",
"Απρ",
"Μαϊ",
"Ιον",
"Ιολ",
"Αυγ",
"Σεπ",
"Οκτ",
"Νοε",
"Δεκ",
]
day_names = [
"",
"Δευτέρα",
"Τρίτη",
"Τετάρτη",
"Πέμπτη",
"Παρασκευή",
"Σάββατο",
"Κυριακή",
]
day_abbreviations = ["", "Δευ", "Τρι", "Τετ", "Πεμ", "Παρ", "Σαβ", "Κυρ"]
class JapaneseLocale(Locale):
names = ["ja", "ja_jp"]
past = "{0}前"
future = "{0}後"
timeframes = {
"now": "現在",
"seconds": "数秒",
"minute": "1分",
"minutes": "{0}分",
"hour": "1時間",
"hours": "{0}時間",
"day": "1日",
"days": "{0}日",
"week": "1週間",
"weeks": "{0}週間",
"month": "1ヶ月",
"months": "{0}ヶ月",
"year": "1年",
"years": "{0}年",
}
month_names = [
"",
"1月",
"2月",
"3月",
"4月",
"5月",
"6月",
"7月",
"8月",
"9月",
"10月",
"11月",
"12月",
]
month_abbreviations = [
"",
" 1",
" 2",
" 3",
" 4",
" 5",
" 6",
" 7",
" 8",
" 9",
"10",
"11",
"12",
]
day_names = ["", "月曜日", "火曜日", "水曜日", "木曜日", "金曜日", "土曜日", "日曜日"]
day_abbreviations = ["", "月", "火", "水", "木", "金", "土", "日"]
class SwedishLocale(Locale):
names = ["sv", "sv_se"]
past = "för {0} sen"
future = "om {0}"
timeframes = {
"now": "just nu",
"seconds": "några sekunder",
"minute": "en minut",
"minutes": "{0} minuter",
"hour": "en timme",
"hours": "{0} timmar",
"day": "en dag",
"days": "{0} dagar",
"month": "en månad",
"months": "{0} månader",
"year": "ett år",
"years": "{0} år",
}
month_names = [
"",
"januari",
"februari",
"mars",
"april",
"maj",
"juni",
"juli",
"augusti",
"september",
"oktober",
"november",
"december",
]
month_abbreviations = [
"",
"jan",
"feb",
"mar",
"apr",
"maj",
"jun",
"jul",
"aug",
"sep",
"okt",
"nov",
"dec",
]
day_names = [
"",
"måndag",
"tisdag",
"onsdag",
"torsdag",
"fredag",
"lördag",
"söndag",
]
day_abbreviations = ["", "mån", "tis", "ons", "tor", "fre", "lör", "sön"]
class FinnishLocale(Locale):
names = ["fi", "fi_fi"]
# The finnish grammar is very complex, and its hard to convert
# 1-to-1 to something like English.
past = "{0} sitten"
future = "{0} kuluttua"
timeframes = {
"now": ["juuri nyt", "juuri nyt"],
"seconds": ["muutama sekunti", "muutaman sekunnin"],
"minute": ["minuutti", "minuutin"],
"minutes": ["{0} minuuttia", "{0} minuutin"],
"hour": ["tunti", "tunnin"],
"hours": ["{0} tuntia", "{0} tunnin"],
"day": ["päivä", "päivä"],
"days": ["{0} päivää", "{0} päivän"],
"month": ["kuukausi", "kuukauden"],
"months": ["{0} kuukautta", "{0} kuukauden"],
"year": ["vuosi", "vuoden"],
"years": ["{0} vuotta", "{0} vuoden"],
}
# Months and days are lowercase in Finnish
month_names = [
"",
"tammikuu",
"helmikuu",
"maaliskuu",
"huhtikuu",
"toukokuu",
"kesäkuu",
"heinäkuu",
"elokuu",
"syyskuu",
"lokakuu",
"marraskuu",
"joulukuu",
]
month_abbreviations = [
"",
"tammi",
"helmi",
"maalis",
"huhti",
"touko",
"kesä",
"heinä",
"elo",
"syys",
"loka",
"marras",
"joulu",
]
day_names = [
"",
"maanantai",
"tiistai",
"keskiviikko",
"torstai",
"perjantai",
"lauantai",
"sunnuntai",
]
day_abbreviations = ["", "ma", "ti", "ke", "to", "pe", "la", "su"]
def _format_timeframe(self, timeframe, delta):
return (
self.timeframes[timeframe][0].format(abs(delta)),
self.timeframes[timeframe][1].format(abs(delta)),
)
def _format_relative(self, humanized, timeframe, delta):
if timeframe == "now":
return humanized[0]
direction = self.past if delta < 0 else self.future
which = 0 if delta < 0 else 1
return direction.format(humanized[which])
def _ordinal_number(self, n):
return "{}.".format(n)
class ChineseCNLocale(Locale):
names = ["zh", "zh_cn"]
past = "{0}前"
future = "{0}后"
timeframes = {
"now": "刚才",
"seconds": "几秒",
"minute": "1分钟",
"minutes": "{0}分钟",
"hour": "1小时",
"hours": "{0}小时",
"day": "1天",
"days": "{0}天",
"week": "一周",
"weeks": "{0}周",
"month": "1个月",
"months": "{0}个月",
"year": "1年",
"years": "{0}年",
}
month_names = [
"",
"一月",
"二月",
"三月",
"四月",
"五月",
"六月",
"七月",
"八月",
"九月",
"十月",
"十一月",
"十二月",
]
month_abbreviations = [
"",
" 1",
" 2",
" 3",
" 4",
" 5",
" 6",
" 7",
" 8",
" 9",
"10",
"11",
"12",
]
day_names = ["", "星期一", "星期二", "星期三", "星期四", "星期五", "星期六", "星期日"]
day_abbreviations = ["", "一", "二", "三", "四", "五", "六", "日"]
class ChineseTWLocale(Locale):
names = ["zh_tw"]
past = "{0}前"
future = "{0}後"
timeframes = {
"now": "剛才",
"seconds": "幾秒",
"minute": "1分鐘",
"minutes": "{0}分鐘",
"hour": "1小時",
"hours": "{0}小時",
"day": "1天",
"days": "{0}天",
"month": "1個月",
"months": "{0}個月",
"year": "1年",
"years": "{0}年",
}
month_names = [
"",
"1月",
"2月",
"3月",
"4月",
"5月",
"6月",
"7月",
"8月",
"9月",
"10月",
"11月",
"12月",
]
month_abbreviations = [
"",
" 1",
" 2",
" 3",
" 4",
" 5",
" 6",
" 7",
" 8",
" 9",
"10",
"11",
"12",
]
day_names = ["", "周一", "周二", "周三", "周四", "周五", "周六", "周日"]
day_abbreviations = ["", "一", "二", "三", "四", "五", "六", "日"]
class KoreanLocale(Locale):
names = ["ko", "ko_kr"]
past = "{0} 전"
future = "{0} 후"
timeframes = {
"now": "지금",
"seconds": "몇 초",
"minute": "1분",
"minutes": "{0}분",
"hour": "1시간",
"hours": "{0}시간",
"day": "1일",
"days": "{0}일",
"month": "1개월",
"months": "{0}개월",
"year": "1년",
"years": "{0}년",
}
month_names = [
"",
"1월",
"2월",
"3월",
"4월",
"5월",
"6월",
"7월",
"8월",
"9월",
"10월",
"11월",
"12월",
]
month_abbreviations = [
"",
" 1",
" 2",
" 3",
" 4",
" 5",
" 6",
" 7",
" 8",
" 9",
"10",
"11",
"12",
]
day_names = ["", "월요일", "화요일", "수요일", "목요일", "금요일", "토요일", "일요일"]
day_abbreviations = ["", "월", "화", "수", "목", "금", "토", "일"]
# derived locale types & implementations.
class DutchLocale(Locale):
names = ["nl", "nl_nl"]
past = "{0} geleden"
future = "over {0}"
timeframes = {
"now": "nu",
"seconds": "seconden",
"minute": "een minuut",
"minutes": "{0} minuten",
"hour": "een uur",
"hours": "{0} uur",
"day": "een dag",
"days": "{0} dagen",
"month": "een maand",
"months": "{0} maanden",
"year": "een jaar",
"years": "{0} jaar",
}
# In Dutch names of months and days are not starting with a capital letter
# like in the English language.
month_names = [
"",
"januari",
"februari",
"maart",
"april",
"mei",
"juni",
"juli",
"augustus",
"september",
"oktober",
"november",
"december",
]
month_abbreviations = [
"",
"jan",
"feb",
"mrt",
"apr",
"mei",
"jun",
"jul",
"aug",
"sep",
"okt",
"nov",
"dec",
]
day_names = [
"",
"maandag",
"dinsdag",
"woensdag",
"donderdag",
"vrijdag",
"zaterdag",
"zondag",
]
day_abbreviations = ["", "ma", "di", "wo", "do", "vr", "za", "zo"]
class SlavicBaseLocale(Locale):
def _format_timeframe(self, timeframe, delta):
form = self.timeframes[timeframe]
delta = abs(delta)
if isinstance(form, list):
if delta % 10 == 1 and delta % 100 != 11:
form = form[0]
elif 2 <= delta % 10 <= 4 and (delta % 100 < 10 or delta % 100 >= 20):
form = form[1]
else:
form = form[2]
return form.format(delta)
class BelarusianLocale(SlavicBaseLocale):
names = ["be", "be_by"]
past = "{0} таму"
future = "праз {0}"
timeframes = {
"now": "зараз",
"seconds": "некалькі секунд",
"minute": "хвіліну",
"minutes": ["{0} хвіліну", "{0} хвіліны", "{0} хвілін"],
"hour": "гадзіну",
"hours": ["{0} гадзіну", "{0} гадзіны", "{0} гадзін"],
"day": "дзень",
"days": ["{0} дзень", "{0} дні", "{0} дзён"],
"month": "месяц",
"months": ["{0} месяц", "{0} месяцы", "{0} месяцаў"],
"year": "год",
"years": ["{0} год", "{0} гады", "{0} гадоў"],
}
month_names = [
"",
"студзеня",
"лютага",
"сакавіка",
"красавіка",
"траўня",
"чэрвеня",
"ліпеня",
"жніўня",
"верасня",
"кастрычніка",
"лістапада",
"снежня",
]
month_abbreviations = [
"",
"студ",
"лют",
"сак",
"крас",
"трав",
"чэрв",
"ліп",
"жнів",
"вер",
"каст",
"ліст",
"снеж",
]
day_names = [
"",
"панядзелак",
"аўторак",
"серада",
"чацвер",
"пятніца",
"субота",
"нядзеля",
]
day_abbreviations = ["", "пн", "ат", "ср", "чц", "пт", "сб", "нд"]
class PolishLocale(SlavicBaseLocale):
names = ["pl", "pl_pl"]
past = "{0} temu"
future = "za {0}"
timeframes = {
"now": "teraz",
"seconds": "kilka sekund",
"minute": "minutę",
"minutes": ["{0} minut", "{0} minuty", "{0} minut"],
"hour": "godzina",
"hours": ["{0} godzin", "{0} godziny", "{0} godzin"],
"day": "dzień",
"days": ["{0} dzień", "{0} dni", "{0} dni"],
"month": "miesiąc",
"months": ["{0} miesiąc", "{0} miesiące", "{0} miesięcy"],
"year": "rok",
"years": ["{0} rok", "{0} lata", "{0} lat"],
}
month_names = [
"",
"styczeń",
"luty",
"marzec",
"kwiecień",
"maj",
"czerwiec",
"lipiec",
"sierpień",
"wrzesień",
"październik",
"listopad",
"grudzień",
]
month_abbreviations = [
"",
"sty",
"lut",
"mar",
"kwi",
"maj",
"cze",
"lip",
"sie",
"wrz",
"paź",
"lis",
"gru",
]
day_names = [
"",
"poniedziałek",
"wtorek",
"środa",
"czwartek",
"piątek",
"sobota",
"niedziela",
]
day_abbreviations = ["", "Pn", "Wt", "Śr", "Czw", "Pt", "So", "Nd"]
class RussianLocale(SlavicBaseLocale):
names = ["ru", "ru_ru"]
past = "{0} назад"
future = "через {0}"
timeframes = {
"now": "сейчас",
"seconds": "несколько секунд",
"minute": "минуту",
"minutes": ["{0} минуту", "{0} минуты", "{0} минут"],
"hour": "час",
"hours": ["{0} час", "{0} часа", "{0} часов"],
"day": "день",
"days": ["{0} день", "{0} дня", "{0} дней"],
"week": "неделю",
"weeks": ["{0} неделю", "{0} недели", "{0} недель"],
"month": "месяц",
"months": ["{0} месяц", "{0} месяца", "{0} месяцев"],
"year": "год",
"years": ["{0} год", "{0} года", "{0} лет"],
}
month_names = [
"",
"января",
"февраля",
"марта",
"апреля",
"мая",
"июня",
"июля",
"августа",
"сентября",
"октября",
"ноября",
"декабря",
]
month_abbreviations = [
"",
"янв",
"фев",
"мар",
"апр",
"май",
"июн",
"июл",
"авг",
"сен",
"окт",
"ноя",
"дек",
]
day_names = [
"",
"понедельник",
"вторник",
"среда",
"четверг",
"пятница",
"суббота",
"воскресенье",
]
day_abbreviations = ["", "пн", "вт", "ср", "чт", "пт", "сб", "вс"]
class AfrikaansLocale(Locale):
names = ["af", "af_nl"]
past = "{0} gelede"
future = "in {0}"
timeframes = {
"now": "nou",
"seconds": "sekondes",
"minute": "minuut",
"minutes": "{0} minute",
"hour": "uur",
"hours": "{0} ure",
"day": "een dag",
"days": "{0} dae",
"month": "een maand",
"months": "{0} maande",
"year": "een jaar",
"years": "{0} jaar",
}
month_names = [
"",
"Januarie",
"Februarie",
"Maart",
"April",
"Mei",
"Junie",
"Julie",
"Augustus",
"September",
"Oktober",
"November",
"Desember",
]
month_abbreviations = [
"",
"Jan",
"Feb",
"Mrt",
"Apr",
"Mei",
"Jun",
"Jul",
"Aug",
"Sep",
"Okt",
"Nov",
"Des",
]
day_names = [
"",
"Maandag",
"Dinsdag",
"Woensdag",
"Donderdag",
"Vrydag",
"Saterdag",
"Sondag",
]
day_abbreviations = ["", "Ma", "Di", "Wo", "Do", "Vr", "Za", "So"]
class BulgarianLocale(SlavicBaseLocale):
names = ["bg", "bg_BG"]
past = "{0} назад"
future = "напред {0}"
timeframes = {
"now": "сега",
"seconds": "няколко секунди",
"minute": "минута",
"minutes": ["{0} минута", "{0} минути", "{0} минути"],
"hour": "час",
"hours": ["{0} час", "{0} часа", "{0} часа"],
"day": "ден",
"days": ["{0} ден", "{0} дни", "{0} дни"],
"month": "месец",
"months": ["{0} месец", "{0} месеца", "{0} месеца"],
"year": "година",
"years": ["{0} година", "{0} години", "{0} години"],
}
month_names = [
"",
"януари",
"февруари",
"март",
"април",
"май",
"юни",
"юли",
"август",
"септември",
"октомври",
"ноември",
"декември",
]
month_abbreviations = [
"",
"ян",
"февр",
"март",
"апр",
"май",
"юни",
"юли",
"авг",
"септ",
"окт",
"ноем",
"дек",
]
day_names = [
"",
"понеделник",
"вторник",
"сряда",
"четвъртък",
"петък",
"събота",
"неделя",
]
day_abbreviations = ["", "пон", "вт", "ср", "четв", "пет", "съб", "нед"]
class UkrainianLocale(SlavicBaseLocale):
names = ["ua", "uk_ua"]
past = "{0} тому"
future = "за {0}"
timeframes = {
"now": "зараз",
"seconds": "кілька секунд",
"minute": "хвилину",
"minutes": ["{0} хвилину", "{0} хвилини", "{0} хвилин"],
"hour": "годину",
"hours": ["{0} годину", "{0} години", "{0} годин"],
"day": "день",
"days": ["{0} день", "{0} дні", "{0} днів"],
"month": "місяць",
"months": ["{0} місяць", "{0} місяці", "{0} місяців"],
"year": "рік",
"years": ["{0} рік", "{0} роки", "{0} років"],
}
month_names = [
"",
"січня",
"лютого",
"березня",
"квітня",
"травня",
"червня",
"липня",
"серпня",
"вересня",
"жовтня",
"листопада",
"грудня",
]
month_abbreviations = [
"",
"січ",
"лют",
"бер",
"квіт",
"трав",
"черв",
"лип",
"серп",
"вер",
"жовт",
"лист",
"груд",
]
day_names = [
"",
"понеділок",
"вівторок",
"середа",
"четвер",
"п’ятниця",
"субота",
"неділя",
]
day_abbreviations = ["", "пн", "вт", "ср", "чт", "пт", "сб", "нд"]
class MacedonianLocale(SlavicBaseLocale):
names = ["mk", "mk_mk"]
past = "пред {0}"
future = "за {0}"
timeframes = {
"now": "сега",
"seconds": "секунди",
"minute": "една минута",
"minutes": ["{0} минута", "{0} минути", "{0} минути"],
"hour": "еден саат",
"hours": ["{0} саат", "{0} саати", "{0} саати"],
"day": "еден ден",
"days": ["{0} ден", "{0} дена", "{0} дена"],
"month": "еден месец",
"months": ["{0} месец", "{0} месеци", "{0} месеци"],
"year": "една година",
"years": ["{0} година", "{0} години", "{0} години"],
}
meridians = {"am": "дп", "pm": "пп", "AM": "претпладне", "PM": "попладне"}
month_names = [
"",
"Јануари",
"Февруари",
"Март",
"Април",
"Мај",
"Јуни",
"Јули",
"Август",
"Септември",
"Октомври",
"Ноември",
"Декември",
]
month_abbreviations = [
"",
"Јан.",
" Фев.",
" Мар.",
" Апр.",
" Мај",
" Јун.",
" Јул.",
" Авг.",
" Септ.",
" Окт.",
" Ноем.",
" Декем.",
]
day_names = [
"",
"Понеделник",
" Вторник",
" Среда",
" Четврток",
" Петок",
" Сабота",
" Недела",
]
day_abbreviations = [
"",
"Пон.",
" Вт.",
" Сре.",
" Чет.",
" Пет.",
" Саб.",
" Нед.",
]
class DeutschBaseLocale(Locale):
past = "vor {0}"
future = "in {0}"
timeframes = {
"now": "gerade eben",
"seconds": "Sekunden",
"minute": "einer Minute",
"minutes": "{0} Minuten",
"hour": "einer Stunde",
"hours": "{0} Stunden",
"day": "einem Tag",
"days": "{0} Tagen",
"month": "einem Monat",
"months": "{0} Monaten",
"year": "einem Jahr",
"years": "{0} Jahren",
}
timeframes_only_distance = timeframes.copy()
timeframes_only_distance["minute"] = "eine Minute"
timeframes_only_distance["hour"] = "eine Stunde"
timeframes_only_distance["day"] = "ein Tag"
timeframes_only_distance["month"] = "ein Monat"
timeframes_only_distance["year"] = "ein Jahr"
month_names = [
"",
"Januar",
"Februar",
"März",
"April",
"Mai",
"Juni",
"Juli",
"August",
"September",
"Oktober",
"November",
"Dezember",
]
month_abbreviations = [
"",
"Jan",
"Feb",
"Mär",
"Apr",
"Mai",
"Jun",
"Jul",
"Aug",
"Sep",
"Okt",
"Nov",
"Dez",
]
day_names = [
"",
"Montag",
"Dienstag",
"Mittwoch",
"Donnerstag",
"Freitag",
"Samstag",
"Sonntag",
]
day_abbreviations = ["", "Mo", "Di", "Mi", "Do", "Fr", "Sa", "So"]
def _ordinal_number(self, n):
return "{}.".format(n)
def describe(self, timeframe, delta=0, only_distance=False):
""" Describes a delta within a timeframe in plain language.
:param timeframe: a string representing a timeframe.
:param delta: a quantity representing a delta in a timeframe.
:param only_distance: return only distance eg: "11 seconds" without "in" or "ago" keywords
"""
humanized = self.timeframes_only_distance[timeframe].format(trunc(abs(delta)))
if not only_distance:
humanized = self._format_timeframe(timeframe, delta)
humanized = self._format_relative(humanized, timeframe, delta)
return humanized
class GermanLocale(DeutschBaseLocale, Locale):
names = ["de", "de_de"]
class AustrianLocale(DeutschBaseLocale, Locale):
names = ["de_at"]
month_names = [
"",
"Jänner",
"Februar",
"März",
"April",
"Mai",
"Juni",
"Juli",
"August",
"September",
"Oktober",
"November",
"Dezember",
]
class NorwegianLocale(Locale):
names = ["nb", "nb_no"]
past = "for {0} siden"
future = "om {0}"
timeframes = {
"now": "nå nettopp",
"seconds": "noen sekunder",
"minute": "ett minutt",
"minutes": "{0} minutter",
"hour": "en time",
"hours": "{0} timer",
"day": "en dag",
"days": "{0} dager",
"month": "en måned",
"months": "{0} måneder",
"year": "ett år",
"years": "{0} år",
}
month_names = [
"",
"januar",
"februar",
"mars",
"april",
"mai",
"juni",
"juli",
"august",
"september",
"oktober",
"november",
"desember",
]
month_abbreviations = [
"",
"jan",
"feb",
"mar",
"apr",
"mai",
"jun",
"jul",
"aug",
"sep",
"okt",
"nov",
"des",
]
day_names = [
"",
"mandag",
"tirsdag",
"onsdag",
"torsdag",
"fredag",
"lørdag",
"søndag",
]
day_abbreviations = ["", "ma", "ti", "on", "to", "fr", "lø", "sø"]
class NewNorwegianLocale(Locale):
names = ["nn", "nn_no"]
past = "for {0} sidan"
future = "om {0}"
timeframes = {
"now": "no nettopp",
"seconds": "nokre sekund",
"minute": "ett minutt",
"minutes": "{0} minutt",
"hour": "ein time",
"hours": "{0} timar",
"day": "ein dag",
"days": "{0} dagar",
"month": "en månad",
"months": "{0} månader",
"year": "eit år",
"years": "{0} år",
}
month_names = [
"",
"januar",
"februar",
"mars",
"april",
"mai",
"juni",
"juli",
"august",
"september",
"oktober",
"november",
"desember",
]
month_abbreviations = [
"",
"jan",
"feb",
"mar",
"apr",
"mai",
"jun",
"jul",
"aug",
"sep",
"okt",
"nov",
"des",
]
day_names = [
"",
"måndag",
"tysdag",
"onsdag",
"torsdag",
"fredag",
"laurdag",
"sundag",
]
day_abbreviations = ["", "må", "ty", "on", "to", "fr", "la", "su"]
class PortugueseLocale(Locale):
names = ["pt", "pt_pt"]
past = "há {0}"
future = "em {0}"
timeframes = {
"now": "agora",
"second": "um segundo",
"seconds": "{0} segundos",
"minute": "um minuto",
"minutes": "{0} minutos",
"hour": "uma hora",
"hours": "{0} horas",
"day": "um dia",
"days": "{0} dias",
"week": "uma semana",
"weeks": "{0} semanas",
"month": "um mês",
"months": "{0} meses",
"year": "um ano",
"years": "{0} anos",
}
month_names = [
"",
"janeiro",
"fevereiro",
"março",
"abril",
"maio",
"junho",
"julho",
"agosto",
"setembro",
"outubro",
"novembro",
"dezembro",
]
month_abbreviations = [
"",
"jan",
"fev",
"mar",
"abr",
"maio",
"jun",
"jul",
"ago",
"set",
"out",
"nov",
"dez",
]
day_names = [
"",
"segunda-feira",
"terça-feira",
"quarta-feira",
"quinta-feira",
"sexta-feira",
"sábado",
"domingo",
]
day_abbreviations = ["", "seg", "ter", "qua", "qui", "sex", "sab", "dom"]
class BrazilianPortugueseLocale(PortugueseLocale):
names = ["pt_br"]
past = "faz {0}"
future = "em {0}"
timeframes = {
"now": "agora",
"second": "um segundo",
"seconds": "{0} segundos",
"minute": "um minuto",
"minutes": "{0} minutos",
"hour": "uma hora",
"hours": "{0} horas",
"day": "um dia",
"days": "{0} dias",
"week": "uma semana",
"weeks": "{0} semanas",
"month": "um mês",
"months": "{0} meses",
"year": "um ano",
"years": "{0} anos",
}
month_names = [
"",
"Janeiro",
"Fevereiro",
"Março",
"Abril",
"Maio",
"Junho",
"Julho",
"Agosto",
"Setembro",
"Outubro",
"Novembro",
"Dezembro",
]
month_abbreviations = [
"",
"Jan",
"Fev",
"Mar",
"Abr",
"Mai",
"Jun",
"Jul",
"Ago",
"Set",
"Out",
"Nov",
"Dez",
]
day_names = [
"",
"Segunda-feira",
"Terça-feira",
"Quarta-feira",
"Quinta-feira",
"Sexta-feira",
"Sábado",
"Domingo",
]
day_abbreviations = ["", "Seg", "Ter", "Qua", "Qui", "Sex", "Sab", "Dom"]
class TagalogLocale(Locale):
names = ["tl", "tl_ph"]
past = "nakaraang {0}"
future = "{0} mula ngayon"
timeframes = {
"now": "ngayon lang",
"seconds": "segundo",
"minute": "isang minuto",
"minutes": "{0} minuto",
"hour": "isang oras",
"hours": "{0} oras",
"day": "isang araw",
"days": "{0} araw",
"month": "isang buwan",
"months": "{0} buwan",
"year": "isang taon",
"years": "{0} taon",
}
month_names = [
"",
"Enero",
"Pebrero",
"Marso",
"Abril",
"Mayo",
"Hunyo",
"Hulyo",
"Agosto",
"Setyembre",
"Oktubre",
"Nobyembre",
"Disyembre",
]
month_abbreviations = [
"",
"Ene",
"Peb",
"Mar",
"Abr",
"May",
"Hun",
"Hul",
"Ago",
"Set",
"Okt",
"Nob",
"Dis",
]
day_names = [
"",
"Lunes",
"Martes",
"Miyerkules",
"Huwebes",
"Biyernes",
"Sabado",
"Linggo",
]
day_abbreviations = ["", "Lun", "Mar", "Miy", "Huw", "Biy", "Sab", "Lin"]
def _ordinal_number(self, n):
return "ika-{}".format(n)
class VietnameseLocale(Locale):
names = ["vi", "vi_vn"]
past = "{0} trước"
future = "{0} nữa"
timeframes = {
"now": "hiện tại",
"seconds": "giây",
"minute": "một phút",
"minutes": "{0} phút",
"hour": "một giờ",
"hours": "{0} giờ",
"day": "một ngày",
"days": "{0} ngày",
"week": "một tuần",
"weeks": "{0} tuần",
"month": "một tháng",
"months": "{0} tháng",
"year": "một năm",
"years": "{0} năm",
}
month_names = [
"",
"Tháng Một",
"Tháng Hai",
"Tháng Ba",
"Tháng Tư",
"Tháng Năm",
"Tháng Sáu",
"Tháng Bảy",
"Tháng Tám",
"Tháng Chín",
"Tháng Mười",
"Tháng Mười Một",
"Tháng Mười Hai",
]
month_abbreviations = [
"",
"Tháng 1",
"Tháng 2",
"Tháng 3",
"Tháng 4",
"Tháng 5",
"Tháng 6",
"Tháng 7",
"Tháng 8",
"Tháng 9",
"Tháng 10",
"Tháng 11",
"Tháng 12",
]
day_names = [
"",
"Thứ Hai",
"Thứ Ba",
"Thứ Tư",
"Thứ Năm",
"Thứ Sáu",
"Thứ Bảy",
"Chủ Nhật",
]
day_abbreviations = ["", "Thứ 2", "Thứ 3", "Thứ 4", "Thứ 5", "Thứ 6", "Thứ 7", "CN"]
class TurkishLocale(Locale):
names = ["tr", "tr_tr"]
past = "{0} önce"
future = "{0} sonra"
timeframes = {
"now": "şimdi",
"seconds": "saniye",
"minute": "bir dakika",
"minutes": "{0} dakika",
"hour": "bir saat",
"hours": "{0} saat",
"day": "bir gün",
"days": "{0} gün",
"month": "bir ay",
"months": "{0} ay",
"year": "yıl",
"years": "{0} yıl",
}
month_names = [
"",
"Ocak",
"Şubat",
"Mart",
"Nisan",
"Mayıs",
"Haziran",
"Temmuz",
"Ağustos",
"Eylül",
"Ekim",
"Kasım",
"Aralık",
]
month_abbreviations = [
"",
"Oca",
"Şub",
"Mar",
"Nis",
"May",
"Haz",
"Tem",
"Ağu",
"Eyl",
"Eki",
"Kas",
"Ara",
]
day_names = [
"",
"Pazartesi",
"Salı",
"Çarşamba",
"Perşembe",
"Cuma",
"Cumartesi",
"Pazar",
]
day_abbreviations = ["", "Pzt", "Sal", "Çar", "Per", "Cum", "Cmt", "Paz"]
class AzerbaijaniLocale(Locale):
names = ["az", "az_az"]
past = "{0} əvvəl"
future = "{0} sonra"
timeframes = {
"now": "indi",
"seconds": "saniyə",
"minute": "bir dəqiqə",
"minutes": "{0} dəqiqə",
"hour": "bir saat",
"hours": "{0} saat",
"day": "bir gün",
"days": "{0} gün",
"month": "bir ay",
"months": "{0} ay",
"year": "il",
"years": "{0} il",
}
month_names = [
"",
"Yanvar",
"Fevral",
"Mart",
"Aprel",
"May",
"İyun",
"İyul",
"Avqust",
"Sentyabr",
"Oktyabr",
"Noyabr",
"Dekabr",
]
month_abbreviations = [
"",
"Yan",
"Fev",
"Mar",
"Apr",
"May",
"İyn",
"İyl",
"Avq",
"Sen",
"Okt",
"Noy",
"Dek",
]
day_names = [
"",
"Bazar ertəsi",
"Çərşənbə axşamı",
"Çərşənbə",
"Cümə axşamı",
"Cümə",
"Şənbə",
"Bazar",
]
day_abbreviations = ["", "Ber", "Çax", "Çər", "Cax", "Cüm", "Şnb", "Bzr"]
class ArabicLocale(Locale):
names = [
"ar",
"ar_ae",
"ar_bh",
"ar_dj",
"ar_eg",
"ar_eh",
"ar_er",
"ar_km",
"ar_kw",
"ar_ly",
"ar_om",
"ar_qa",
"ar_sa",
"ar_sd",
"ar_so",
"ar_ss",
"ar_td",
"ar_ye",
]
past = "منذ {0}"
future = "خلال {0}"
timeframes = {
"now": "الآن",
"seconds": {"double": "ثانيتين", "ten": "{0} ثوان", "higher": "{0} ثانية"},
"minute": "دقيقة",
"minutes": {"double": "دقيقتين", "ten": "{0} دقائق", "higher": "{0} دقيقة"},
"hour": "ساعة",
"hours": {"double": "ساعتين", "ten": "{0} ساعات", "higher": "{0} ساعة"},
"day": "يوم",
"days": {"double": "يومين", "ten": "{0} أيام", "higher": "{0} يوم"},
"month": "شهر",
"months": {"double": "شهرين", "ten": "{0} أشهر", "higher": "{0} شهر"},
"year": "سنة",
"years": {"double": "سنتين", "ten": "{0} سنوات", "higher": "{0} سنة"},
}
month_names = [
"",
"يناير",
"فبراير",
"مارس",
"أبريل",
"مايو",
"يونيو",
"يوليو",
"أغسطس",
"سبتمبر",
"أكتوبر",
"نوفمبر",
"ديسمبر",
]
month_abbreviations = [
"",
"يناير",
"فبراير",
"مارس",
"أبريل",
"مايو",
"يونيو",
"يوليو",
"أغسطس",
"سبتمبر",
"أكتوبر",
"نوفمبر",
"ديسمبر",
]
day_names = [
"",
"الإثنين",
"الثلاثاء",
"الأربعاء",
"الخميس",
"الجمعة",
"السبت",
"الأحد",
]
day_abbreviations = ["", "إثنين", "ثلاثاء", "أربعاء", "خميس", "جمعة", "سبت", "أحد"]
def _format_timeframe(self, timeframe, delta):
form = self.timeframes[timeframe]
delta = abs(delta)
if isinstance(form, dict):
if delta == 2:
form = form["double"]
elif delta > 2 and delta <= 10:
form = form["ten"]
else:
form = form["higher"]
return form.format(delta)
class LevantArabicLocale(ArabicLocale):
names = ["ar_iq", "ar_jo", "ar_lb", "ar_ps", "ar_sy"]
month_names = [
"",
"كانون الثاني",
"شباط",
"آذار",
"نيسان",
"أيار",
"حزيران",
"تموز",
"آب",
"أيلول",
"تشرين الأول",
"تشرين الثاني",
"كانون الأول",
]
month_abbreviations = [
"",
"كانون الثاني",
"شباط",
"آذار",
"نيسان",
"أيار",
"حزيران",
"تموز",
"آب",
"أيلول",
"تشرين الأول",
"تشرين الثاني",
"كانون الأول",
]
class AlgeriaTunisiaArabicLocale(ArabicLocale):
names = ["ar_tn", "ar_dz"]
month_names = [
"",
"جانفي",
"فيفري",
"مارس",
"أفريل",
"ماي",
"جوان",
"جويلية",
"أوت",
"سبتمبر",
"أكتوبر",
"نوفمبر",
"ديسمبر",
]
month_abbreviations = [
"",
"جانفي",
"فيفري",
"مارس",
"أفريل",
"ماي",
"جوان",
"جويلية",
"أوت",
"سبتمبر",
"أكتوبر",
"نوفمبر",
"ديسمبر",
]
class MauritaniaArabicLocale(ArabicLocale):
names = ["ar_mr"]
month_names = [
"",
"يناير",
"فبراير",
"مارس",
"إبريل",
"مايو",
"يونيو",
"يوليو",
"أغشت",
"شتمبر",
"أكتوبر",
"نوفمبر",
"دجمبر",
]
month_abbreviations = [
"",
"يناير",
"فبراير",
"مارس",
"إبريل",
"مايو",
"يونيو",
"يوليو",
"أغشت",
"شتمبر",
"أكتوبر",
"نوفمبر",
"دجمبر",
]
class MoroccoArabicLocale(ArabicLocale):
names = ["ar_ma"]
month_names = [
"",
"يناير",
"فبراير",
"مارس",
"أبريل",
"ماي",
"يونيو",
"يوليوز",
"غشت",
"شتنبر",
"أكتوبر",
"نونبر",
"دجنبر",
]
month_abbreviations = [
"",
"يناير",
"فبراير",
"مارس",
"أبريل",
"ماي",
"يونيو",
"يوليوز",
"غشت",
"شتنبر",
"أكتوبر",
"نونبر",
"دجنبر",
]
class IcelandicLocale(Locale):
def _format_timeframe(self, timeframe, delta):
timeframe = self.timeframes[timeframe]
if delta < 0:
timeframe = timeframe[0]
elif delta > 0:
timeframe = timeframe[1]
return timeframe.format(abs(delta))
names = ["is", "is_is"]
past = "fyrir {0} síðan"
future = "eftir {0}"
timeframes = {
"now": "rétt í þessu",
"seconds": ("nokkrum sekúndum", "nokkrar sekúndur"),
"minute": ("einni mínútu", "eina mínútu"),
"minutes": ("{0} mínútum", "{0} mínútur"),
"hour": ("einum tíma", "einn tíma"),
"hours": ("{0} tímum", "{0} tíma"),
"day": ("einum degi", "einn dag"),
"days": ("{0} dögum", "{0} daga"),
"month": ("einum mánuði", "einn mánuð"),
"months": ("{0} mánuðum", "{0} mánuði"),
"year": ("einu ári", "eitt ár"),
"years": ("{0} árum", "{0} ár"),
}
meridians = {"am": "f.h.", "pm": "e.h.", "AM": "f.h.", "PM": "e.h."}
month_names = [
"",
"janúar",
"febrúar",
"mars",
"apríl",
"maí",
"júní",
"júlí",
"ágúst",
"september",
"október",
"nóvember",
"desember",
]
month_abbreviations = [
"",
"jan",
"feb",
"mar",
"apr",
"maí",
"jún",
"júl",
"ágú",
"sep",
"okt",
"nóv",
"des",
]
day_names = [
"",
"mánudagur",
"þriðjudagur",
"miðvikudagur",
"fimmtudagur",
"föstudagur",
"laugardagur",
"sunnudagur",
]
day_abbreviations = ["", "mán", "þri", "mið", "fim", "fös", "lau", "sun"]
class DanishLocale(Locale):
names = ["da", "da_dk"]
past = "for {0} siden"
future = "efter {0}"
timeframes = {
"now": "lige nu",
"seconds": "et par sekunder",
"minute": "et minut",
"minutes": "{0} minutter",
"hour": "en time",
"hours": "{0} timer",
"day": "en dag",
"days": "{0} dage",
"month": "en måned",
"months": "{0} måneder",
"year": "et år",
"years": "{0} år",
}
month_names = [
"",
"januar",
"februar",
"marts",
"april",
"maj",
"juni",
"juli",
"august",
"september",
"oktober",
"november",
"december",
]
month_abbreviations = [
"",
"jan",
"feb",
"mar",
"apr",
"maj",
"jun",
"jul",
"aug",
"sep",
"okt",
"nov",
"dec",
]
day_names = [
"",
"mandag",
"tirsdag",
"onsdag",
"torsdag",
"fredag",
"lørdag",
"søndag",
]
day_abbreviations = ["", "man", "tir", "ons", "tor", "fre", "lør", "søn"]
class MalayalamLocale(Locale):
names = ["ml"]
past = "{0} മുമ്പ്"
future = "{0} ശേഷം"
timeframes = {
"now": "ഇപ്പോൾ",
"seconds": "സെക്കന്റ്",
"minute": "ഒരു മിനിറ്റ്",
"minutes": "{0} മിനിറ്റ്",
"hour": "ഒരു മണിക്കൂർ",
"hours": "{0} മണിക്കൂർ",
"day": "ഒരു ദിവസം ",
"days": "{0} ദിവസം ",
"month": "ഒരു മാസം ",
"months": "{0} മാസം ",
"year": "ഒരു വർഷം ",
"years": "{0} വർഷം ",
}
meridians = {
"am": "രാവിലെ",
"pm": "ഉച്ചക്ക് ശേഷം",
"AM": "രാവിലെ",
"PM": "ഉച്ചക്ക് ശേഷം",
}
month_names = [
"",
"ജനുവരി",
"ഫെബ്രുവരി",
"മാർച്ച്",
"ഏപ്രിൽ ",
"മെയ് ",
"ജൂണ്",
"ജൂലൈ",
"ഓഗസ്റ്റ്",
"സെപ്റ്റംബർ",
"ഒക്ടോബർ",
"നവംബർ",
"ഡിസംബർ",
]
month_abbreviations = [
"",
"ജനു",
"ഫെബ് ",
"മാർ",
"ഏപ്രിൽ",
"മേയ്",
"ജൂണ്",
"ജൂലൈ",
"ഓഗസ്റ",
"സെപ്റ്റ",
"ഒക്ടോ",
"നവം",
"ഡിസം",
]
day_names = ["", "തിങ്കള്", "ചൊവ്വ", "ബുധന്", "വ്യാഴം", "വെള്ളി", "ശനി", "ഞായര്"]
day_abbreviations = [
"",
"തിങ്കള്",
"ചൊവ്വ",
"ബുധന്",
"വ്യാഴം",
"വെള്ളി",
"ശനി",
"ഞായര്",
]
class HindiLocale(Locale):
names = ["hi"]
past = "{0} पहले"
future = "{0} बाद"
timeframes = {
"now": "अभी",
"seconds": "सेकंड्",
"minute": "एक मिनट ",
"minutes": "{0} मिनट ",
"hour": "एक घंटा",
"hours": "{0} घंटे",
"day": "एक दिन",
"days": "{0} दिन",
"month": "एक माह ",
"months": "{0} महीने ",
"year": "एक वर्ष ",
"years": "{0} साल ",
}
meridians = {"am": "सुबह", "pm": "शाम", "AM": "सुबह", "PM": "शाम"}
month_names = [
"",
"जनवरी",
"फरवरी",
"मार्च",
"अप्रैल ",
"मई",
"जून",
"जुलाई",
"अगस्त",
"सितंबर",
"अक्टूबर",
"नवंबर",
"दिसंबर",
]
month_abbreviations = [
"",
"जन",
"फ़र",
"मार्च",
"अप्रै",
"मई",
"जून",
"जुलाई",
"आग",
"सित",
"अकत",
"नवे",
"दिस",
]
day_names = [
"",
"सोमवार",
"मंगलवार",
"बुधवार",
"गुरुवार",
"शुक्रवार",
"शनिवार",
"रविवार",
]
day_abbreviations = ["", "सोम", "मंगल", "बुध", "गुरुवार", "शुक्र", "शनि", "रवि"]
class CzechLocale(Locale):
names = ["cs", "cs_cz"]
timeframes = {
"now": "Teď",
"seconds": {"past": "{0} sekundami", "future": ["{0} sekundy", "{0} sekund"]},
"minute": {"past": "minutou", "future": "minutu", "zero": "{0} minut"},
"minutes": {"past": "{0} minutami", "future": ["{0} minuty", "{0} minut"]},
"hour": {"past": "hodinou", "future": "hodinu", "zero": "{0} hodin"},
"hours": {"past": "{0} hodinami", "future": ["{0} hodiny", "{0} hodin"]},
"day": {"past": "dnem", "future": "den", "zero": "{0} dnů"},
"days": {"past": "{0} dny", "future": ["{0} dny", "{0} dnů"]},
"month": {"past": "měsícem", "future": "měsíc", "zero": "{0} měsíců"},
"months": {"past": "{0} měsíci", "future": ["{0} měsíce", "{0} měsíců"]},
"year": {"past": "rokem", "future": "rok", "zero": "{0} let"},
"years": {"past": "{0} lety", "future": ["{0} roky", "{0} let"]},
}
past = "Před {0}"
future = "Za {0}"
month_names = [
"",
"leden",
"únor",
"březen",
"duben",
"květen",
"červen",
"červenec",
"srpen",
"září",
"říjen",
"listopad",
"prosinec",
]
month_abbreviations = [
"",
"led",
"úno",
"bře",
"dub",
"kvě",
"čvn",
"čvc",
"srp",
"zář",
"říj",
"lis",
"pro",
]
day_names = [
"",
"pondělí",
"úterý",
"středa",
"čtvrtek",
"pátek",
"sobota",
"neděle",
]
day_abbreviations = ["", "po", "út", "st", "čt", "pá", "so", "ne"]
def _format_timeframe(self, timeframe, delta):
"""Czech aware time frame format function, takes into account
the differences between past and future forms."""
form = self.timeframes[timeframe]
if isinstance(form, dict):
if delta == 0:
form = form["zero"] # And *never* use 0 in the singular!
elif delta > 0:
form = form["future"]
else:
form = form["past"]
delta = abs(delta)
if isinstance(form, list):
if 2 <= delta % 10 <= 4 and (delta % 100 < 10 or delta % 100 >= 20):
form = form[0]
else:
form = form[1]
return form.format(delta)
class SlovakLocale(Locale):
names = ["sk", "sk_sk"]
timeframes = {
"now": "Teraz",
"seconds": {"past": "pár sekundami", "future": ["{0} sekundy", "{0} sekúnd"]},
"minute": {"past": "minútou", "future": "minútu", "zero": "{0} minút"},
"minutes": {"past": "{0} minútami", "future": ["{0} minúty", "{0} minút"]},
"hour": {"past": "hodinou", "future": "hodinu", "zero": "{0} hodín"},
"hours": {"past": "{0} hodinami", "future": ["{0} hodiny", "{0} hodín"]},
"day": {"past": "dňom", "future": "deň", "zero": "{0} dní"},
"days": {"past": "{0} dňami", "future": ["{0} dni", "{0} dní"]},
"month": {"past": "mesiacom", "future": "mesiac", "zero": "{0} mesiacov"},
"months": {"past": "{0} mesiacmi", "future": ["{0} mesiace", "{0} mesiacov"]},
"year": {"past": "rokom", "future": "rok", "zero": "{0} rokov"},
"years": {"past": "{0} rokmi", "future": ["{0} roky", "{0} rokov"]},
}
past = "Pred {0}"
future = "O {0}"
month_names = [
"",
"január",
"február",
"marec",
"apríl",
"máj",
"jún",
"júl",
"august",
"september",
"október",
"november",
"december",
]
month_abbreviations = [
"",
"jan",
"feb",
"mar",
"apr",
"máj",
"jún",
"júl",
"aug",
"sep",
"okt",
"nov",
"dec",
]
day_names = [
"",
"pondelok",
"utorok",
"streda",
"štvrtok",
"piatok",
"sobota",
"nedeľa",
]
day_abbreviations = ["", "po", "ut", "st", "št", "pi", "so", "ne"]
def _format_timeframe(self, timeframe, delta):
"""Slovak aware time frame format function, takes into account
the differences between past and future forms."""
form = self.timeframes[timeframe]
if isinstance(form, dict):
if delta == 0:
form = form["zero"] # And *never* use 0 in the singular!
elif delta > 0:
form = form["future"]
else:
form = form["past"]
delta = abs(delta)
if isinstance(form, list):
if 2 <= delta % 10 <= 4 and (delta % 100 < 10 or delta % 100 >= 20):
form = form[0]
else:
form = form[1]
return form.format(delta)
class FarsiLocale(Locale):
names = ["fa", "fa_ir"]
past = "{0} قبل"
future = "در {0}"
timeframes = {
"now": "اکنون",
"seconds": "ثانیه",
"minute": "یک دقیقه",
"minutes": "{0} دقیقه",
"hour": "یک ساعت",
"hours": "{0} ساعت",
"day": "یک روز",
"days": "{0} روز",
"month": "یک ماه",
"months": "{0} ماه",
"year": "یک سال",
"years": "{0} سال",
}
meridians = {
"am": "قبل از ظهر",
"pm": "بعد از ظهر",
"AM": "قبل از ظهر",
"PM": "بعد از ظهر",
}
month_names = [
"",
"January",
"February",
"March",
"April",
"May",
"June",
"July",
"August",
"September",
"October",
"November",
"December",
]
month_abbreviations = [
"",
"Jan",
"Feb",
"Mar",
"Apr",
"May",
"Jun",
"Jul",
"Aug",
"Sep",
"Oct",
"Nov",
"Dec",
]
day_names = [
"",
"دو شنبه",
"سه شنبه",
"چهارشنبه",
"پنجشنبه",
"جمعه",
"شنبه",
"یکشنبه",
]
day_abbreviations = ["", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"]
class HebrewLocale(Locale):
names = ["he", "he_IL"]
past = "לפני {0}"
future = "בעוד {0}"
timeframes = {
"now": "הרגע",
"seconds": "שניות",
"minute": "דקה",
"minutes": "{0} דקות",
"hour": "שעה",
"hours": "{0} שעות",
"2-hours": "שעתיים",
"day": "יום",
"days": "{0} ימים",
"2-days": "יומיים",
"month": "חודש",
"months": "{0} חודשים",
"2-months": "חודשיים",
"year": "שנה",
"years": "{0} שנים",
"2-years": "שנתיים",
}
meridians = {
"am": 'לפנ"צ',
"pm": 'אחר"צ',
"AM": "לפני הצהריים",
"PM": "אחרי הצהריים",
}
month_names = [
"",
"ינואר",
"פברואר",
"מרץ",
"אפריל",
"מאי",
"יוני",
"יולי",
"אוגוסט",
"ספטמבר",
"אוקטובר",
"נובמבר",
"דצמבר",
]
month_abbreviations = [
"",
"ינו׳",
"פבר׳",
"מרץ",
"אפר׳",
"מאי",
"יוני",
"יולי",
"אוג׳",
"ספט׳",
"אוק׳",
"נוב׳",
"דצמ׳",
]
day_names = ["", "שני", "שלישי", "רביעי", "חמישי", "שישי", "שבת", "ראשון"]
day_abbreviations = ["", "ב׳", "ג׳", "ד׳", "ה׳", "ו׳", "ש׳", "א׳"]
def _format_timeframe(self, timeframe, delta):
"""Hebrew couple of <timeframe> aware"""
couple = "2-{}".format(timeframe)
if abs(delta) == 2 and couple in self.timeframes:
return self.timeframes[couple].format(abs(delta))
else:
return self.timeframes[timeframe].format(abs(delta))
class MarathiLocale(Locale):
names = ["mr"]
past = "{0} आधी"
future = "{0} नंतर"
timeframes = {
"now": "सद्य",
"seconds": "सेकंद",
"minute": "एक मिनिट ",
"minutes": "{0} मिनिट ",
"hour": "एक तास",
"hours": "{0} तास",
"day": "एक दिवस",
"days": "{0} दिवस",
"month": "एक महिना ",
"months": "{0} महिने ",
"year": "एक वर्ष ",
"years": "{0} वर्ष ",
}
meridians = {"am": "सकाळ", "pm": "संध्याकाळ", "AM": "सकाळ", "PM": "संध्याकाळ"}
month_names = [
"",
"जानेवारी",
"फेब्रुवारी",
"मार्च",
"एप्रिल",
"मे",
"जून",
"जुलै",
"अॉगस्ट",
"सप्टेंबर",
"अॉक्टोबर",
"नोव्हेंबर",
"डिसेंबर",
]
month_abbreviations = [
"",
"जान",
"फेब्रु",
"मार्च",
"एप्रि",
"मे",
"जून",
"जुलै",
"अॉग",
"सप्टें",
"अॉक्टो",
"नोव्हें",
"डिसें",
]
day_names = [
"",
"सोमवार",
"मंगळवार",
"बुधवार",
"गुरुवार",
"शुक्रवार",
"शनिवार",
"रविवार",
]
day_abbreviations = ["", "सोम", "मंगळ", "बुध", "गुरु", "शुक्र", "शनि", "रवि"]
def _map_locales():
locales = {}
for _, cls in inspect.getmembers(sys.modules[__name__], inspect.isclass):
if issubclass(cls, Locale): # pragma: no branch
for name in cls.names:
locales[name.lower()] = cls
return locales
class CatalanLocale(Locale):
names = ["ca", "ca_es", "ca_ad", "ca_fr", "ca_it"]
past = "Fa {0}"
future = "En {0}"
timeframes = {
"now": "Ara mateix",
"seconds": "segons",
"minute": "1 minut",
"minutes": "{0} minuts",
"hour": "una hora",
"hours": "{0} hores",
"day": "un dia",
"days": "{0} dies",
"month": "un mes",
"months": "{0} mesos",
"year": "un any",
"years": "{0} anys",
}
month_names = [
"",
"Gener",
"Febrer",
"Març",
"Abril",
"Maig",
"Juny",
"Juliol",
"Agost",
"Setembre",
"Octubre",
"Novembre",
"Desembre",
]
month_abbreviations = [
"",
"Gener",
"Febrer",
"Març",
"Abril",
"Maig",
"Juny",
"Juliol",
"Agost",
"Setembre",
"Octubre",
"Novembre",
"Desembre",
]
day_names = [
"",
"Dilluns",
"Dimarts",
"Dimecres",
"Dijous",
"Divendres",
"Dissabte",
"Diumenge",
]
day_abbreviations = [
"",
"Dilluns",
"Dimarts",
"Dimecres",
"Dijous",
"Divendres",
"Dissabte",
"Diumenge",
]
class BasqueLocale(Locale):
names = ["eu", "eu_eu"]
past = "duela {0}"
future = "{0}" # I don't know what's the right phrase in Basque for the future.
timeframes = {
"now": "Orain",
"seconds": "segundu",
"minute": "minutu bat",
"minutes": "{0} minutu",
"hour": "ordu bat",
"hours": "{0} ordu",
"day": "egun bat",
"days": "{0} egun",
"month": "hilabete bat",
"months": "{0} hilabet",
"year": "urte bat",
"years": "{0} urte",
}
month_names = [
"",
"urtarrilak",
"otsailak",
"martxoak",
"apirilak",
"maiatzak",
"ekainak",
"uztailak",
"abuztuak",
"irailak",
"urriak",
"azaroak",
"abenduak",
]
month_abbreviations = [
"",
"urt",
"ots",
"mar",
"api",
"mai",
"eka",
"uzt",
"abu",
"ira",
"urr",
"aza",
"abe",
]
day_names = [
"",
"astelehena",
"asteartea",
"asteazkena",
"osteguna",
"ostirala",
"larunbata",
"igandea",
]
day_abbreviations = ["", "al", "ar", "az", "og", "ol", "lr", "ig"]
class HungarianLocale(Locale):
names = ["hu", "hu_hu"]
past = "{0} ezelőtt"
future = "{0} múlva"
timeframes = {
"now": "éppen most",
"seconds": {"past": "másodpercekkel", "future": "pár másodperc"},
"minute": {"past": "egy perccel", "future": "egy perc"},
"minutes": {"past": "{0} perccel", "future": "{0} perc"},
"hour": {"past": "egy órával", "future": "egy óra"},
"hours": {"past": "{0} órával", "future": "{0} óra"},
"day": {"past": "egy nappal", "future": "egy nap"},
"days": {"past": "{0} nappal", "future": "{0} nap"},
"month": {"past": "egy hónappal", "future": "egy hónap"},
"months": {"past": "{0} hónappal", "future": "{0} hónap"},
"year": {"past": "egy évvel", "future": "egy év"},
"years": {"past": "{0} évvel", "future": "{0} év"},
}
month_names = [
"",
"január",
"február",
"március",
"április",
"május",
"június",
"július",
"augusztus",
"szeptember",
"október",
"november",
"december",
]
month_abbreviations = [
"",
"jan",
"febr",
"márc",
"ápr",
"máj",
"jún",
"júl",
"aug",
"szept",
"okt",
"nov",
"dec",
]
day_names = [
"",
"hétfő",
"kedd",
"szerda",
"csütörtök",
"péntek",
"szombat",
"vasárnap",
]
day_abbreviations = ["", "hét", "kedd", "szer", "csüt", "pént", "szom", "vas"]
meridians = {"am": "de", "pm": "du", "AM": "DE", "PM": "DU"}
def _format_timeframe(self, timeframe, delta):
form = self.timeframes[timeframe]
if isinstance(form, dict):
if delta > 0:
form = form["future"]
else:
form = form["past"]
return form.format(abs(delta))
class EsperantoLocale(Locale):
names = ["eo", "eo_xx"]
past = "antaŭ {0}"
future = "post {0}"
timeframes = {
"now": "nun",
"seconds": "kelkaj sekundoj",
"minute": "unu minuto",
"minutes": "{0} minutoj",
"hour": "un horo",
"hours": "{0} horoj",
"day": "unu tago",
"days": "{0} tagoj",
"month": "unu monato",
"months": "{0} monatoj",
"year": "unu jaro",
"years": "{0} jaroj",
}
month_names = [
"",
"januaro",
"februaro",
"marto",
"aprilo",
"majo",
"junio",
"julio",
"aŭgusto",
"septembro",
"oktobro",
"novembro",
"decembro",
]
month_abbreviations = [
"",
"jan",
"feb",
"mar",
"apr",
"maj",
"jun",
"jul",
"aŭg",
"sep",
"okt",
"nov",
"dec",
]
day_names = [
"",
"lundo",
"mardo",
"merkredo",
"ĵaŭdo",
"vendredo",
"sabato",
"dimanĉo",
]
day_abbreviations = ["", "lun", "mar", "mer", "ĵaŭ", "ven", "sab", "dim"]
meridians = {"am": "atm", "pm": "ptm", "AM": "ATM", "PM": "PTM"}
ordinal_day_re = r"((?P<value>[1-3]?[0-9](?=a))a)"
def _ordinal_number(self, n):
return "{}a".format(n)
class ThaiLocale(Locale):
names = ["th", "th_th"]
past = "{0}{1}ที่ผ่านมา"
future = "ในอีก{1}{0}"
timeframes = {
"now": "ขณะนี้",
"seconds": "ไม่กี่วินาที",
"minute": "1 นาที",
"minutes": "{0} นาที",
"hour": "1 ชั่วโมง",
"hours": "{0} ชั่วโมง",
"day": "1 วัน",
"days": "{0} วัน",
"month": "1 เดือน",
"months": "{0} เดือน",
"year": "1 ปี",
"years": "{0} ปี",
}
month_names = [
"",
"มกราคม",
"กุมภาพันธ์",
"มีนาคม",
"เมษายน",
"พฤษภาคม",
"มิถุนายน",
"กรกฎาคม",
"สิงหาคม",
"กันยายน",
"ตุลาคม",
"พฤศจิกายน",
"ธันวาคม",
]
month_abbreviations = [
"",
"ม.ค.",
"ก.พ.",
"มี.ค.",
"เม.ย.",
"พ.ค.",
"มิ.ย.",
"ก.ค.",
"ส.ค.",
"ก.ย.",
"ต.ค.",
"พ.ย.",
"ธ.ค.",
]
day_names = ["", "จันทร์", "อังคาร", "พุธ", "พฤหัสบดี", "ศุกร์", "เสาร์", "อาทิตย์"]
day_abbreviations = ["", "จ", "อ", "พ", "พฤ", "ศ", "ส", "อา"]
meridians = {"am": "am", "pm": "pm", "AM": "AM", "PM": "PM"}
BE_OFFSET = 543
def year_full(self, year):
"""Thai always use Buddhist Era (BE) which is CE + 543"""
year += self.BE_OFFSET
return "{:04d}".format(year)
def year_abbreviation(self, year):
"""Thai always use Buddhist Era (BE) which is CE + 543"""
year += self.BE_OFFSET
return "{:04d}".format(year)[2:]
def _format_relative(self, humanized, timeframe, delta):
"""Thai normally doesn't have any space between words"""
if timeframe == "now":
return humanized
space = "" if timeframe == "seconds" else " "
direction = self.past if delta < 0 else self.future
return direction.format(humanized, space)
class BengaliLocale(Locale):
names = ["bn", "bn_bd", "bn_in"]
past = "{0} আগে"
future = "{0} পরে"
timeframes = {
"now": "এখন",
"seconds": "সেকেন্ড",
"minute": "এক মিনিট",
"minutes": "{0} মিনিট",
"hour": "এক ঘণ্টা",
"hours": "{0} ঘণ্টা",
"day": "এক দিন",
"days": "{0} দিন",
"month": "এক মাস",
"months": "{0} মাস ",
"year": "এক বছর",
"years": "{0} বছর",
}
meridians = {"am": "সকাল", "pm": "বিকাল", "AM": "সকাল", "PM": "বিকাল"}
month_names = [
"",
"জানুয়ারি",
"ফেব্রুয়ারি",
"মার্চ",
"এপ্রিল",
"মে",
"জুন",
"জুলাই",
"আগস্ট",
"সেপ্টেম্বর",
"অক্টোবর",
"নভেম্বর",
"ডিসেম্বর",
]
month_abbreviations = [
"",
"জানু",
"ফেব",
"মার্চ",
"এপ্রি",
"মে",
"জুন",
"জুল",
"অগা",
"সেপ্ট",
"অক্টো",
"নভে",
"ডিসে",
]
day_names = [
"",
"সোমবার",
"মঙ্গলবার",
"বুধবার",
"বৃহস্পতিবার",
"শুক্রবার",
"শনিবার",
"রবিবার",
]
day_abbreviations = ["", "সোম", "মঙ্গল", "বুধ", "বৃহঃ", "শুক্র", "শনি", "রবি"]
def _ordinal_number(self, n):
if n > 10 or n == 0:
return "{}তম".format(n)
if n in [1, 5, 7, 8, 9, 10]:
return "{}ম".format(n)
if n in [2, 3]:
return "{}য়".format(n)
if n == 4:
return "{}র্থ".format(n)
if n == 6:
return "{}ষ্ঠ".format(n)
class RomanshLocale(Locale):
names = ["rm", "rm_ch"]
past = "avant {0}"
future = "en {0}"
timeframes = {
"now": "en quest mument",
"seconds": "secundas",
"minute": "ina minuta",
"minutes": "{0} minutas",
"hour": "in'ura",
"hours": "{0} ura",
"day": "in di",
"days": "{0} dis",
"month": "in mais",
"months": "{0} mais",
"year": "in onn",
"years": "{0} onns",
}
month_names = [
"",
"schaner",
"favrer",
"mars",
"avrigl",
"matg",
"zercladur",
"fanadur",
"avust",
"settember",
"october",
"november",
"december",
]
month_abbreviations = [
"",
"schan",
"fav",
"mars",
"avr",
"matg",
"zer",
"fan",
"avu",
"set",
"oct",
"nov",
"dec",
]
day_names = [
"",
"glindesdi",
"mardi",
"mesemna",
"gievgia",
"venderdi",
"sonda",
"dumengia",
]
day_abbreviations = ["", "gli", "ma", "me", "gie", "ve", "so", "du"]
class SwissLocale(Locale):
names = ["de", "de_ch"]
past = "vor {0}"
future = "in {0}"
timeframes = {
"now": "gerade eben",
"seconds": "Sekunden",
"minute": "einer Minute",
"minutes": "{0} Minuten",
"hour": "einer Stunde",
"hours": "{0} Stunden",
"day": "einem Tag",
"days": "{0} Tagen",
"week": "einer Woche",
"weeks": "{0} Wochen",
"month": "einem Monat",
"months": "{0} Monaten",
"year": "einem Jahr",
"years": "{0} Jahren",
}
month_names = [
"",
"Januar",
"Februar",
"März",
"April",
"Mai",
"Juni",
"Juli",
"August",
"September",
"Oktober",
"November",
"Dezember",
]
month_abbreviations = [
"",
"Jan",
"Feb",
"Mär",
"Apr",
"Mai",
"Jun",
"Jul",
"Aug",
"Sep",
"Okt",
"Nov",
"Dez",
]
day_names = [
"",
"Montag",
"Dienstag",
"Mittwoch",
"Donnerstag",
"Freitag",
"Samstag",
"Sonntag",
]
day_abbreviations = ["", "Mo", "Di", "Mi", "Do", "Fr", "Sa", "So"]
class RomanianLocale(Locale):
names = ["ro", "ro_ro"]
past = "{0} în urmă"
future = "peste {0}"
timeframes = {
"now": "acum",
"seconds": "câteva secunde",
"minute": "un minut",
"minutes": "{0} minute",
"hour": "o oră",
"hours": "{0} ore",
"day": "o zi",
"days": "{0} zile",
"month": "o lună",
"months": "{0} luni",
"year": "un an",
"years": "{0} ani",
}
month_names = [
"",
"ianuarie",
"februarie",
"martie",
"aprilie",
"mai",
"iunie",
"iulie",
"august",
"septembrie",
"octombrie",
"noiembrie",
"decembrie",
]
month_abbreviations = [
"",
"ian",
"febr",
"mart",
"apr",
"mai",
"iun",
"iul",
"aug",
"sept",
"oct",
"nov",
"dec",
]
day_names = [
"",
"luni",
"marți",
"miercuri",
"joi",
"vineri",
"sâmbătă",
"duminică",
]
day_abbreviations = ["", "Lun", "Mar", "Mie", "Joi", "Vin", "Sâm", "Dum"]
class SlovenianLocale(Locale):
names = ["sl", "sl_si"]
past = "pred {0}"
future = "čez {0}"
timeframes = {
"now": "zdaj",
"seconds": "sekund",
"minute": "minuta",
"minutes": "{0} minutami",
"hour": "uro",
"hours": "{0} ur",
"day": "dan",
"days": "{0} dni",
"month": "mesec",
"months": "{0} mesecev",
"year": "leto",
"years": "{0} let",
}
meridians = {"am": "", "pm": "", "AM": "", "PM": ""}
month_names = [
"",
"Januar",
"Februar",
"Marec",
"April",
"Maj",
"Junij",
"Julij",
"Avgust",
"September",
"Oktober",
"November",
"December",
]
month_abbreviations = [
"",
"Jan",
"Feb",
"Mar",
"Apr",
"Maj",
"Jun",
"Jul",
"Avg",
"Sep",
"Okt",
"Nov",
"Dec",
]
day_names = [
"",
"Ponedeljek",
"Torek",
"Sreda",
"Četrtek",
"Petek",
"Sobota",
"Nedelja",
]
day_abbreviations = ["", "Pon", "Tor", "Sre", "Čet", "Pet", "Sob", "Ned"]
class IndonesianLocale(Locale):
names = ["id", "id_id"]
past = "{0} yang lalu"
future = "dalam {0}"
timeframes = {
"now": "baru saja",
"seconds": "detik",
"minute": "1 menit",
"minutes": "{0} menit",
"hour": "1 jam",
"hours": "{0} jam",
"day": "1 hari",
"days": "{0} hari",
"month": "1 bulan",
"months": "{0} bulan",
"year": "1 tahun",
"years": "{0} tahun",
}
meridians = {"am": "", "pm": "", "AM": "", "PM": ""}
month_names = [
"",
"Januari",
"Februari",
"Maret",
"April",
"Mei",
"Juni",
"Juli",
"Agustus",
"September",
"Oktober",
"November",
"Desember",
]
month_abbreviations = [
"",
"Jan",
"Feb",
"Mar",
"Apr",
"Mei",
"Jun",
"Jul",
"Ags",
"Sept",
"Okt",
"Nov",
"Des",
]
day_names = ["", "Senin", "Selasa", "Rabu", "Kamis", "Jumat", "Sabtu", "Minggu"]
day_abbreviations = [
"",
"Senin",
"Selasa",
"Rabu",
"Kamis",
"Jumat",
"Sabtu",
"Minggu",
]
class NepaliLocale(Locale):
names = ["ne", "ne_np"]
past = "{0} पहिले"
future = "{0} पछी"
timeframes = {
"now": "अहिले",
"seconds": "सेकण्ड",
"minute": "मिनेट",
"minutes": "{0} मिनेट",
"hour": "एक घण्टा",
"hours": "{0} घण्टा",
"day": "एक दिन",
"days": "{0} दिन",
"month": "एक महिना",
"months": "{0} महिना",
"year": "एक बर्ष",
"years": "बर्ष",
}
meridians = {"am": "पूर्वाह्न", "pm": "अपरान्ह", "AM": "पूर्वाह्न", "PM": "अपरान्ह"}
month_names = [
"",
"जनवरी",
"फेब्रुअरी",
"मार्च",
"एप्रील",
"मे",
"जुन",
"जुलाई",
"अगष्ट",
"सेप्टेम्बर",
"अक्टोबर",
"नोवेम्बर",
"डिसेम्बर",
]
month_abbreviations = [
"",
"जन",
"फेब",
"मार्च",
"एप्रील",
"मे",
"जुन",
"जुलाई",
"अग",
"सेप",
"अक्ट",
"नोव",
"डिस",
]
day_names = [
"",
"सोमवार",
"मंगलवार",
"बुधवार",
"बिहिवार",
"शुक्रवार",
"शनिवार",
"आइतवार",
]
day_abbreviations = ["", "सोम", "मंगल", "बुध", "बिहि", "शुक्र", "शनि", "आइत"]
class EstonianLocale(Locale):
names = ["ee", "et"]
past = "{0} tagasi"
future = "{0} pärast"
timeframes = {
"now": {"past": "just nüüd", "future": "just nüüd"},
"second": {"past": "üks sekund", "future": "ühe sekundi"},
"seconds": {"past": "{0} sekundit", "future": "{0} sekundi"},
"minute": {"past": "üks minut", "future": "ühe minuti"},
"minutes": {"past": "{0} minutit", "future": "{0} minuti"},
"hour": {"past": "tund aega", "future": "tunni aja"},
"hours": {"past": "{0} tundi", "future": "{0} tunni"},
"day": {"past": "üks päev", "future": "ühe päeva"},
"days": {"past": "{0} päeva", "future": "{0} päeva"},
"month": {"past": "üks kuu", "future": "ühe kuu"},
"months": {"past": "{0} kuud", "future": "{0} kuu"},
"year": {"past": "üks aasta", "future": "ühe aasta"},
"years": {"past": "{0} aastat", "future": "{0} aasta"},
}
month_names = [
"",
"Jaanuar",
"Veebruar",
"Märts",
"Aprill",
"Mai",
"Juuni",
"Juuli",
"August",
"September",
"Oktoober",
"November",
"Detsember",
]
month_abbreviations = [
"",
"Jan",
"Veb",
"Mär",
"Apr",
"Mai",
"Jun",
"Jul",
"Aug",
"Sep",
"Okt",
"Nov",
"Dets",
]
day_names = [
"",
"Esmaspäev",
"Teisipäev",
"Kolmapäev",
"Neljapäev",
"Reede",
"Laupäev",
"Pühapäev",
]
day_abbreviations = ["", "Esm", "Teis", "Kolm", "Nelj", "Re", "Lau", "Püh"]
def _format_timeframe(self, timeframe, delta):
form = self.timeframes[timeframe]
if delta > 0:
form = form["future"]
else:
form = form["past"]
return form.format(abs(delta))
_locales = _map_locales()
|
the-stack_0_12624 | # Copyright 2020 The MuLT Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from optimization import LightGBMOptimizer, SVMOptimizer, LogisticRegressionOptimizer
from optimization import KNNOptimizer, MLPOptimizer, RFOptimizer
from pipeline import SelectMarker
from lightgbm import LGBMModel
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import train_test_split
import numpy as np
import pandas as pd
import os
class SMLA(SelectMarker):
def __init__(self,
predictor,
optimizer_default_params=None,
model_default_params=None,
verbose=-1,
random_state=None,
use_gpu=False,
test_size=.2,
n_gene_limit=None,
output_path='.',
experiment_number=1,
number_of_experiments=1,
export_metadata=True
):
assert isinstance(optimizer_default_params, dict) or optimizer_default_params is None
assert isinstance(model_default_params, dict) or model_default_params is None
self.output_path = output_path
#
self.predictor = predictor
self.model_default_params = model_default_params
#
self.optimized_params = None
self.optimizer_default_params = optimizer_default_params
#
self.model = None
self.fitted_shape = None
#
self.random_state = random_state
self.verbose = verbose
self.use_gpu = use_gpu
#
self.test_size = test_size
#
self.scaler = MinMaxScaler()
#
self.n_gene_limit = n_gene_limit
self.selected_clinical = None
self.selected_genes = None
#
self.experiment_number = experiment_number
self.number_of_experiments = number_of_experiments
self.export_metadata = export_metadata
#
if self.predictor == 'lightgbm':
self.optimizer = LightGBMOptimizer(**self.optimizer_default_params)
elif self.predictor == 'svm':
self.optimizer = SVMOptimizer(**self.optimizer_default_params)
elif self.predictor == 'knn':
self.optimizer = KNNOptimizer(**self.optimizer_default_params)
elif self.predictor == 'lr':
self.optimizer = LogisticRegressionOptimizer(**self.optimizer_default_params)
elif self.predictor == 'mlp':
self.optimizer = MLPOptimizer(**self.optimizer_default_params)
elif self.predictor == 'rf':
self.optimizer = RFOptimizer(**self.optimizer_default_params)
else:
raise ValueError('predictor should be one of the following: '
'lightgbm, svm, knn, lr, or mlp')
for subdir in ['selected_markers']:
path = os.path.join(self.output_path, subdir)
if not os.path.exists(path):
os.makedirs(path)
def fit(self,
genes,
outcome,
clinical_markers=None, treatments=None,
clinical_marker_selection_threshold=0.1,
genes_marker_selection_threshold=0.1,
early_stopping_rounds=100):
# feature selection
if clinical_markers is not None:
self.selected_clinical = self.select_markers(
clinical_markers, outcome, threshold=clinical_marker_selection_threshold)
x = clinical_markers.loc[:, self.selected_clinical[0]]
if treatments is not None:
x = x.join(treatments) if x is not None else treatments
self.selected_genes = self.select_markers(
genes, outcome, threshold=genes_marker_selection_threshold, random_state=self.random_state)
if self.export_metadata:
if self.selected_clinical is not None:
pd.DataFrame({'clinical_marker': self.selected_clinical[0],
'pvalue': self.selected_clinical[1],
'entropy': self.selected_clinical[2]}).to_csv(
os.path.join(
self.output_path, 'selected_markers',
'clinical_{0:03}_{1:03}.csv'.format(
self.experiment_number, self.number_of_experiments)),
index=False)
pd.DataFrame({'gene': self.selected_genes[0],
'pvalue': self.selected_genes[1],
'entropy': self.selected_genes[2]}).to_csv(
os.path.join(
self.output_path, 'selected_markers',
'genes_{0:03}_{1:03}.csv'.format(
self.experiment_number, self.number_of_experiments)),
index=False)
genes = genes.loc[:, self.selected_genes[0]]
# join data sets
x = x.join(genes, how='inner').fillna(0).values if x is not None else genes.fillna(0).values
y = outcome.values
x = self.scaler.fit_transform(x)
######
self.fitted_shape = x.shape
self.optimized_params = self.optimizer.optimize(x, y)
self.optimized_params['random_state'] = self.random_state
self.optimized_params['n_jobs'] = -1
if self.model_default_params is not None:
self.optimized_params.update(self.model_default_params)
if self.predictor == 'lightgbm':
self.fit_lightgbm(x, y, early_stopping_rounds)
elif self.predictor == 'svm':
self.fit_svm(x, y)
elif self.predictor == 'knn':
self.fit_knn(x, y)
elif self.predictor == 'lr':
self.fit_lr(x, y)
elif self.predictor == 'mlp':
self.fit_mlp(x, y, early_stopping_rounds)
elif self.predictor == 'rf':
self.fit_rf(x, y)
def fit_rf(self, x, y):
self.model = RandomForestClassifier(**self.optimized_params)
self.model.fit(x, y)
def fit_lightgbm(self, x, y, early_stopping_rounds):
self.model = LGBMModel(**self.optimized_params)
self.model.fit(x, y)
if early_stopping_rounds is not None and early_stopping_rounds > 0:
x_train, x_valid, y_train, y_valid = train_test_split(x, y, stratify=y, shuffle=True,
test_size=self.test_size, random_state=self.random_state)
self.model.fit(x_train, y_train, eval_set=[(x_valid, y_valid)], verbose=self.verbose)
else:
self.model.fit(x, y)
def fit_svm(self, x, y):
del self.optimized_params['n_jobs']
self.model = SVC(**self.optimized_params, probability=True)
self.model.fit(x, y)
def fit_lr(self, x, y):
self.model = LogisticRegression(**self.optimized_params)
self.model.fit(x, y)
def fit_mlp(self, x, y, early_stopping_rounds):
esr = early_stopping_rounds is not None and early_stopping_rounds > 0
del self.optimized_params['n_jobs']
self.model = MLPClassifier(**self.optimized_params,
early_stopping=esr,
validation_fraction=self.test_size)
self.model.fit(x, y)
def fit_knn(self, x, y):
del self.optimized_params['random_state']
self.model = KNeighborsClassifier(**self.optimized_params)
self.model.fit(x, y)
def predict(self, genes, clinical_markers=None, treatments=None):
assert isinstance(genes, pd.DataFrame), 'genes should a pd.DataFrame'
if clinical_markers is not None:
x = clinical_markers.loc[:, self.selected_clinical[0]]
if treatments is not None:
x = x.join(treatments) if x is not None else treatments
genes = genes.loc[:, self.selected_genes[0]]
x = x.join(genes, how='inner').fillna(0).values if x is not None else genes.fillna(0)
x = np.maximum(0, np.minimum(1, self.scaler.transform(x)))
assert x.shape[1] == self.fitted_shape[1], \
'new data should have same number of features used to fit model'
if self.predictor == 'lightgbm':
result = self.model.predict(x)
else:
result = self.model.predict_proba(x)
if len(result.shape) > 1:
result = result[:, -1]
return result
|
the-stack_0_12625 | """
Программа для построения интегральных кривых дифференциального уравнения 3-го порядка,
разрешенного относительно производной y''' = f(x, y, y', y'').
Левая кнопка мыши - зафиксировать начальное условие или зафиксировать интегральную кривую.
Правая кнопка мыши - сменить началные условия.
"""
import matplotlib
matplotlib.use('TkAgg')
from collections import namedtuple
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.patches import Circle
from scipy.integrate import ode
def f(x, y):
""" Правая часть дифференциального уравнения y'''=(x, y, y', y'')
Здесь y <--> y[0]; y' <--> y[1]; y'' <--> y[2]
"""
return [y[1], y[2], x + y[0] + y[1]+ y[2]]
def on_move(event):
global x0, y0, x1, y1, dy0
if not event.xdata or not event.ydata: # выход курсора за пределы области
line.set_data([], [])
dot.set_data([], [])
tang.set_data([], [])
circ.set_radius(0)
ax.set_title("")
fig.canvas.draw_idle()
return
if x0 is None: # инициализация 1-го начального условия
dot.set_data([event.xdata], [event.ydata])
ax.set_title(f"y({event.xdata:.2f})={event.ydata:.2f}")
if event.button == 1:
x0 = event.xdata
y0 = event.ydata
elif x1 is None: # инициализация 2-го начального условия
# восстановление доп. построений, если они были удалены при выходе
# мыши за пределы области
dot.set_data([x0], [y0])
tang.set_data([2*x0 - event.xdata, event.xdata], [2*y0 - event.ydata, event.ydata])
delta_x = event.xdata - x0
delta_y = event.ydata - y0
if delta_x == 0: # деление на ноль запрещено
return
ax.set_title(f"y({x0:.2f})={y0:.2f}, y'({x0:.2f})={delta_y/delta_x:.2f}")
if event.button == 1:
x1 = event.xdata
y1 = event.ydata
dy0 = delta_y / delta_x
else: # инициализация 3-го начального условия и построение интегральной кривой
x2 = event.xdata
y2 = event.ydata
# восстановление доп. построений, если они были удалены при выходе
# мыши за пределы области
dot.set_data([x0], [y0])
tang.set_data([2*x0 - x1, x1], [2*y0 - y1, y1])
if dy0 == 0:
x_c = x0
y_c = 0.5*(y2+y0+(x2-x0)**2/(y2-y0))
R = abs(y0-y_c)
else:
# Угловой коэффициент прямой, на кот. должен лежать центр окр.
k = -1/dy0
# Координаты центра окружности
x_c = .5 * (y2**2 - y0**2 + x2**2 - x0**2 + 2 * (y0 - k*x0) * (y0 - y2)) / (k*(y2 - y0) + x2 - x0)
y_c = k * (x_c - x0) + y0
# Радиус окружности
R = np.hypot(y0 - y_c, x0 - x_c)
# Отрисовка окружности
circ.center = (x_c, y_c)
circ.set_radius(R)
# Начальное значение 2ой производной
d2y0 = (1+dy0**2)**(3/2)/R*np.sign(y_c-y0)
ax.set_title(f"y({x0:.2f})={y0:.2f}, y'({x0:.2f})={dy0:.2f}, y''({x0:.2f})={d2y0:.2f}")
de = ode(f)
de.set_integrator('dop853')
# de.set_integrator('zvode', method='bdf')
dt = 0.05
sol = []
de.set_initial_value([y0, dy0, d2y0], x0)
while de.successful() and de.t <= xlim.end:
de.integrate(de.t + dt)
sol.append((de.t, de.y[0]))
de.set_initial_value([y0, dy0, d2y0], x0)
while de.successful() and de.t >= xlim.start:
de.integrate(de.t - dt)
sol.append((de.t, de.y[0]))
sol.sort(key=lambda x: x[0])
sol = list(zip(*sol))
if event.button == 1: # зафиксировать интегральную кривую
ax.plot(sol[0], sol[1], 'r')
elif event.button == 3: # сменить начальную точку
x0 = event.xdata
y0 = event.ydata
x1 = None
y1 = None
dy0 = None
dot.set_data([x0], [y0])
tang.set_data([], [])
circ.set_radius(0)
line.set_data([], [])
ax.set_title(f"y({x0:.2f})={y0:.2f}")
else: # текущая интегральная кривая
line.set_data(sol[0], sol[1])
print(f"y''({x0:.2f})={d2y0:.2f}")
fig.canvas.draw_idle()
Lim = namedtuple('Lim', ['start', 'end'])
xlim = Lim(-5, 5)
ylim = Lim(-5, 5)
x0 = None
y0 = None
x1 = None
y1 = None
dy0 = None
fig, ax = plt.subplots()
ax.grid()
ax.set_xlim(xlim)
ax.set_ylim(ylim)
ax.set_aspect('equal')
ax.hlines(0, *xlim, lw=0.5)
ax.vlines(0, *ylim, lw=0.5)
fig.canvas.mpl_connect('button_press_event', on_move)
fig.canvas.mpl_connect('motion_notify_event', on_move)
line, = ax.plot([], [], 'm')
dot, = ax.plot([], [], '.m')
tang, = ax.plot([], [], 'g', lw=0.5)
circ = Circle((0, 0), 0, color='g', lw=0.5, fill=False)
ax.add_patch(circ)
plt.show()
|
the-stack_0_12627 | from typing import List
from pyrep.objects.dummy import Dummy
from pyrep.objects.joint import Joint
from rlbench.backend.task import Task
from rlbench.backend.conditions import JointCondition
OPTIONS = ['left', 'right']
class TurnTap(Task):
def init_task(self) -> None:
self.left_start = Dummy('waypoint0')
self.left_end = Dummy('waypoint1')
self.right_start = Dummy('waypoint5')
self.right_end = Dummy('waypoint6')
self.left_joint = Joint('left_joint')
self.right_joint = Joint('right_joint')
def init_episode(self, index: int) -> List[str]:
option = OPTIONS[index]
if option == 'right':
self.left_start.set_position(self.right_start.get_position())
self.left_start.set_orientation(self.right_start.get_orientation())
self.left_end.set_position(self.right_end.get_position())
self.left_end.set_orientation(self.right_end.get_orientation())
self.register_success_conditions(
[JointCondition(self.right_joint, 1.57)])
else:
self.register_success_conditions(
[JointCondition(self.left_joint, 1.57)])
return ['turn %s tap' % option,
'rotate the %s tap' % option,
'grasp the %s tap and turn it' % option]
def variation_count(self) -> int:
return 2
|
the-stack_0_12628 | import glob
import os
import ast
import sys
import json
from collections import Counter
sys.setrecursionlimit(1000000)
CODE_DIR = "python_top_code"
OUT_DIR = "stats"
def make_dir_ignore_exists(d):
try:
return os.mkdir(d)
except FileExistsError as E:
pass
def decode_data(data):
try:
data = data.decode("utf8")
return data
except UnicodeDecodeError:
pass
data = data.decode("ISO-8859-1")
return data
def gen_repo_asts(repo):
ok = 0
bad = 0
for file in glob.glob(f"{CODE_DIR}/{repo}/*.py"):
data = open(file, "rb").read()
data = decode_data(data)
if "generated" in data[:1024]:
print(f"skipping file {file}: autogenerated")
continue
try:
yield ast.parse(data)
ok += 1
except Exception:
bad += 1
print(f"ast generation finished: ok {ok}, bad {bad}")
def gen_ast_subnodes(ast_node):
for child in ast.iter_child_nodes(ast_node):
yield child
yield from gen_ast_subnodes(child)
def save_counter(repo, filename, counter):
make_dir_ignore_exists(OUT_DIR)
make_dir_ignore_exists(f"{OUT_DIR}/{repo}")
with open(f"{OUT_DIR}/{repo}/{filename}", "w") as f:
for k, v in counter.most_common():
print(v, k, file=f)
c = Counter()
class_keywords_c = Counter()
class_bases_c = Counter()
class_decorators_c = Counter()
function_decorators_c = Counter()
async_function_decorators_c = Counter()
exception_handlers_c = Counter()
attributes_c = Counter()
func_names_c = Counter()
async_func_names_c = Counter()
class_names_c = Counter()
module_names_c = Counter()
from_module_names_c = Counter()
repo = sys.argv[1].replace("/", "_")
for cur_ast in gen_repo_asts(repo):
for ast_node in gen_ast_subnodes(cur_ast):
name = type(ast_node).__name__
c[name] += 1
if isinstance(ast_node, ast.For):
if ast_node.orelse:
c["bay_for_with_else"] += 1
elif isinstance(ast_node, ast.While):
if ast_node.orelse:
c["bay_while_with_else"] += 1
elif isinstance(ast_node, ast.ClassDef):
has_metaclass = False
for keyword in ast_node.keywords:
if keyword.arg == "metaclass":
has_metaclass = True
class_keywords_c[f"{keyword.arg}={ast.unparse(keyword.value)}"] += 1
if has_metaclass:
c["bay_class_with_metaclass"] += 1
bases = [ast.unparse(b) for b in ast_node.bases]
if bases and bases != ["object"]:
c["bay_class_with_bases"] += 1
if bases:
class_bases_c.update(bases)
else:
class_bases_c["<no_base_class>"] += 1
decorators = [ast.unparse(b) for b in ast_node.decorator_list]
if decorators:
c["bay_class_with_decorators"] += 1
if decorators:
class_decorators_c.update(decorators)
else:
class_decorators_c["<no_decorators>"] += 1
if ast.get_docstring(ast_node):
c["bay_class_with_docstring"] += 1
class_names_c[ast_node.name] += 1
elif isinstance(ast_node, ast.Try):
has_handlers = ast_node.handlers
has_final = ast_node.finalbody
has_else = ast_node.orelse
except_type = "bay_try"
if has_handlers:
except_type += "_except"
if has_final:
except_type += "_finally"
if has_else:
except_type += "_else"
c[except_type] += 1
elif isinstance(ast_node, ast.FunctionDef):
decorators = [ast.unparse(b) for b in ast_node.decorator_list]
if decorators:
c["bay_functions_with_decorators"] += 1
if decorators:
function_decorators_c.update(decorators)
else:
function_decorators_c["<no_decorators>"] += 1
if ast.get_docstring(ast_node):
c["bay_functions_with_docstring"] += 1
func_names_c[ast_node.name] += 1
if ast_node.returns:
c["bay_functions_annotation_in_returns"] += 1
elif isinstance(ast_node, ast.AsyncFunctionDef):
decorators = [ast.unparse(b) for b in ast_node.decorator_list]
if decorators:
c["bay_async_functions_with_decorators"] += 1
if decorators:
async_function_decorators_c.update(decorators)
else:
async_function_decorators_c["<no_decorators>"] += 1
if ast.get_docstring(ast_node):
c["bay_async_functions_with_docstring"] += 1
async_func_names_c[ast_node.name] += 1
if ast_node.returns:
c["bay_async_functions_annotation_in_returns"] += 1
elif isinstance(ast_node, ast.Assign):
if isinstance(ast_node.value, ast.Yield):
c["bay_assign_yield"] += 1
if isinstance(ast_node.value, ast.YieldFrom):
c["bay_assign_yield_from"] += 1
elif isinstance(ast_node, ast.ExceptHandler):
except_type = ast_node.type
try:
exception_handlers_c[ast.unparse(except_type)] += 1
except Exception:
pass
elif isinstance(ast_node, ast.Attribute):
attributes_c[ast_node.attr] += 1
elif isinstance(ast_node, ast.Import):
modules = [ast.unparse(b) for b in ast_node.names]
module_names_c.update(modules)
elif isinstance(ast_node, ast.ImportFrom):
from_module_names_c[ast_node.module] += 1
elif isinstance(ast_node, ast.arg):
if ast_node.annotation:
c["bay_arg_annotation"] += 1
save_counter(repo, "stat_ast.txt", c)
save_counter(repo, "stat_class_keywords.txt", class_keywords_c)
save_counter(repo, "stat_class_bases.txt", class_bases_c)
save_counter(repo, "stat_class_decorators_c.txt", class_decorators_c)
save_counter(repo, "stat_function_decorators_c.txt", function_decorators_c)
save_counter(repo, "stat_async_function_decorators.txt", async_function_decorators_c)
save_counter(repo, "stat_exception_handlers.txt", exception_handlers_c)
save_counter(repo, "stat_attributes.txt", attributes_c)
save_counter(repo, "stat_func_names.txt", func_names_c)
save_counter(repo, "stat_async_func_names.txt", async_func_names_c)
save_counter(repo, "stat_class_names.txt", class_names_c)
save_counter(repo, "stat_module_names.txt", module_names_c)
save_counter(repo, "stat_from_module_names.txt", from_module_names_c)
|
the-stack_0_12629 | # qubit number=2
# total number=10
import cirq
import qiskit
from qiskit import IBMQ
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2,floor, sqrt, pi
import numpy as np
import networkx as nx
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f^\pm
# NOTE: use U1 gate (P gate) with \lambda = 180 ==> CZ gate
# or multi_control_Z_gate (issue #127)
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
# oracle.draw('mpl', filename='circuit/deutsch-oracle.png')
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n, "qc")
target = QuantumRegister(1, "qt")
prog = QuantumCircuit(input_qubit, target)
# inverse last one (can be omitted if using O_f^\pm)
prog.x(target)
# apply H to get superposition
for i in range(n):
prog.h(input_qubit[i])
prog.h(input_qubit[1]) # number=1
prog.h(input_qubit[1]) # number=7
prog.cz(input_qubit[0],input_qubit[1]) # number=8
prog.h(input_qubit[1]) # number=9
prog.cx(input_qubit[0],input_qubit[1]) # number=5
prog.h(target)
prog.barrier()
# apply oracle O_f
oracle = build_oracle(n, f)
prog.append(
oracle.to_gate(),
[input_qubit[i] for i in range(n)] + [target])
# apply H back (QFT on Z_2^n)
for i in range(n):
prog.h(input_qubit[i])
prog.barrier()
# measure
prog.x(input_qubit[0]) # number=3
prog.y(input_qubit[1]) # number=6
prog.x(input_qubit[0]) # number=4
# circuit end
return prog
if __name__ == '__main__':
n = 2
f = lambda rep: rep[-1]
# f = lambda rep: "1" if rep[0:2] == "01" or rep[0:2] == "10" else "0"
# f = lambda rep: "0"
prog = make_circuit(n, f)
sample_shot =2800
backend = BasicAer.get_backend('statevector_simulator')
circuit1 = transpile(prog,FakeVigo())
circuit1.x(qubit=3)
circuit1.x(qubit=3)
prog = circuit1
info = execute(prog, backend=backend).result().get_statevector()
qubits = round(log2(len(info)))
info = {
np.binary_repr(i, qubits): round((info[i]*(info[i].conjugate())).real,3)
for i in range(2 ** qubits)
}
writefile = open("../data/startQiskit_Class150.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.depth(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
|
the-stack_0_12630 | import logging
import asyncio
from .http_utils import Request, Response
from .exceptions import (
BadRequestException,
NotFoundException,
TimeoutException,
)
TIMEOUT = 5
# 一个 HTTPServer 对象,需要一个 Router 对象和一个 http_parser 模块,并使用它们来初始化
class HTTPServer(object):
"""
Contains objects that are shared by HTTPConnections and schedules async
connections.
:param router: An object that must expose the 'get_handler' interface.
:param http_parser: An object that must expose the 'parse_into' interface,
which works with a Request object and a bytearray.
:param loop: An object that implements the 'asyncio.BaseEventLoop'
interface.
"""
# HTTPServer(self.router, self.http_parser, self.loop)
def __init__(self, router, http_parser, loop):
self.router = router
self.http_parser = http_parser
self.loop = loop
async def handle_connection(self, reader, writer):
"""
Creates and schedules a HTTPConnection given a set (reader, writer)
objects.
:param reader: An object that implements the 'asyncio.StreamReader'
interface.
:param writer: An object that implements the 'asyncio.StreamWriter'
interface.
"""
connection = HTTPConnection(self, reader, writer)
asyncio.ensure_future(connection.handle_request(), loop=self.loop)
'''
HTTPConnection 对象,每一个对象表示一个单独的客户端 HTTP 连接,并且处理其请求-响应周期:
使用 http_parser 模块将收到的字节流解析为一个 Request 对象;
使用一个 Router 实例寻找并调用正确的函数来生成一个响应;
最后将这个响应发送回客户端。
'''
class HTTPConnection(object):
"""
Takes care of whole life cycle of a single TCP connection with a
HTTP client. First reads incoming data, parses it with
'http_server.parser', generates as Response with 'http_server.router'
and sends data back to client.
:param http_server: An instance of HTTPServer.
:param reader: An object that implements the 'asyncio.StreamReader'
interface.
:param writer: An object that implements the 'asyncio.StreamWriter'
interface.
"""
def __init__(self, http_server, reader, writer):
self.router = http_server.router # Router 实例寻找并调用正确的函数来生成一个响应
self.http_parser = http_server.http_parser # http_parser 模块将收到的字节流解析为一个 Request 对象
self.loop = http_server.loop
self._reader = reader
self._writer = writer
self._buffer = bytearray()
self._conn_timeout = None
self.request = Request()
async def handle_request(self):
"""
Reads bytes from a connection and attempts to parse them
incrementally until it can issue a Response and close the
connection.
Also handles resetting the timeout counter for a connection.
"""
try:
while not self.request.finished and not self._reader.at_eof():
data = await self._reader.read(1024)
if data:
self._reset_conn_timeout()
await self.process_data(data)
if self.request.finished:
await self.reply()
elif self._reader.at_eof():
raise BadRequestException()
except (NotFoundException,
BadRequestException) as e:
self.error_reply(e.code, body=Response.reason_phrases[e.code])
except Exception as e:
logging.error(e)
logging.error(e.__traceback__)
self.error_reply(500, body=Response.reason_phrases[500])
self.close_connection()
async def process_data(self, data):
"""
Accumulates data inside of _buffer and attempts to
parse the accumulated data.
:param data: A bytearray object.
"""
self._buffer.extend(data)
self._buffer = self.http_parser.parse_into(
self.request, self._buffer)
def close_connection(self):
"""
Cancels the timeout timer and closes the connection.
"""
logging.debug('Closing connection')
self._cancel_conn_timeout()
self._writer.close()
def error_reply(self, code, body=''):
"""
Generates a simple error response.
:param code: Integer signifying the HTTP error.
:param body: A string that contains an error message.
"""
response = Response(code=code, body=body)
self._writer.write(response.to_bytes())
self._writer.drain()
async def reply(self):
"""
Obtains and applies the correct handler from 'self.router'
and write the Response back to the client.
"""
logging.debug('Replying to request')
request = self.request
handler = self.router.get_handler(request.path)
response = await handler.handle(request)
if not isinstance(response, Response):
response = Response(code=200, body=response)
self._writer.write(response.to_bytes())
await self._writer.drain()
def _conn_timeout_close(self):
self.error_reply(500, 'timeout')
self.close_connection()
def _reset_conn_timeout(self, timeout=TIMEOUT):
self._cancel_conn_timeout()
self._conn_timeout = self.loop.call_later(
timeout, self._conn_timeout_close)
def _cancel_conn_timeout(self):
if self._conn_timeout:
self._conn_timeout.cancel()
|
the-stack_0_12631 | # -*- coding: utf-8 -*-
import logging
from pyramid.interfaces import IRequest
from openregistry.assets.core.includeme import IContentConfigurator
from openregistry.assets.core.interfaces import IAssetManager
from openregistry.assets.basic.models import Asset, IBasicAsset
from openregistry.assets.basic.adapters import BasicAssetConfigurator, BasicAssetManagerAdapter
from openregistry.assets.basic.constants import (
DEFAULT_ASSET_BASIC_TYPE,
DEFAULT_LEVEL_OF_ACCREDITATION
)
LOGGER = logging.getLogger(__name__)
def includeme(config, plugin_config=None):
config.scan("openregistry.assets.basic.views")
config.scan("openregistry.assets.basic.subscribers")
config.registry.registerAdapter(BasicAssetConfigurator,
(IBasicAsset, IRequest),
IContentConfigurator)
config.registry.registerAdapter(BasicAssetManagerAdapter,
(IBasicAsset, ),
IAssetManager)
asset_types = plugin_config.get('aliases', [])
if plugin_config.get('use_default', False):
asset_types.append(DEFAULT_ASSET_BASIC_TYPE)
for at in asset_types:
config.add_assetType(Asset, at)
LOGGER.info("Included openregistry.assets.basic plugin", extra={'MESSAGE_ID': 'included_plugin'})
# add accreditation level
if not plugin_config.get('accreditation'):
config.registry.accreditation['asset'][Asset._internal_type] = DEFAULT_LEVEL_OF_ACCREDITATION
else:
config.registry.accreditation['asset'][Asset._internal_type] = plugin_config['accreditation']
|
the-stack_0_12632 | import base64
import os
import shutil
import string
import sys
import tempfile
import unittest
from datetime import timedelta
from django.conf import settings
from django.contrib.sessions.backends.cache import SessionStore as CacheSession
from django.contrib.sessions.backends.cached_db import \
SessionStore as CacheDBSession
from django.contrib.sessions.backends.db import SessionStore as DatabaseSession
from django.contrib.sessions.backends.file import SessionStore as FileSession
from django.contrib.sessions.backends.signed_cookies import \
SessionStore as CookieSession
from django.contrib.sessions.exceptions import InvalidSessionKey
from django.contrib.sessions.middleware import SessionMiddleware
from django.contrib.sessions.models import Session
from django.contrib.sessions.serializers import (
JSONSerializer, PickleSerializer,
)
from django.core import management
from django.core.cache import caches
from django.core.cache.backends.base import InvalidCacheBackendError
from django.core.exceptions import ImproperlyConfigured
from django.http import HttpResponse
from django.test import (
RequestFactory, TestCase, ignore_warnings, override_settings,
)
from django.test.utils import patch_logger
from django.utils import six, timezone
from django.utils.encoding import force_text
from django.utils.six.moves import http_cookies
from .custom_db_backend import SessionStore as CustomDatabaseSession
class SessionTestsMixin(object):
# This does not inherit from TestCase to avoid any tests being run with this
# class, which wouldn't work, and to allow different TestCase subclasses to
# be used.
backend = None # subclasses must specify
def setUp(self):
self.session = self.backend()
def tearDown(self):
# NB: be careful to delete any sessions created; stale sessions fill up
# the /tmp (with some backends) and eventually overwhelm it after lots
# of runs (think buildbots)
self.session.delete()
def test_new_session(self):
self.assertFalse(self.session.modified)
self.assertFalse(self.session.accessed)
def test_get_empty(self):
self.assertEqual(self.session.get('cat'), None)
def test_store(self):
self.session['cat'] = "dog"
self.assertTrue(self.session.modified)
self.assertEqual(self.session.pop('cat'), 'dog')
def test_pop(self):
self.session['some key'] = 'exists'
# Need to reset these to pretend we haven't accessed it:
self.accessed = False
self.modified = False
self.assertEqual(self.session.pop('some key'), 'exists')
self.assertTrue(self.session.accessed)
self.assertTrue(self.session.modified)
self.assertEqual(self.session.get('some key'), None)
def test_pop_default(self):
self.assertEqual(self.session.pop('some key', 'does not exist'),
'does not exist')
self.assertTrue(self.session.accessed)
self.assertFalse(self.session.modified)
def test_setdefault(self):
self.assertEqual(self.session.setdefault('foo', 'bar'), 'bar')
self.assertEqual(self.session.setdefault('foo', 'baz'), 'bar')
self.assertTrue(self.session.accessed)
self.assertTrue(self.session.modified)
def test_update(self):
self.session.update({'update key': 1})
self.assertTrue(self.session.accessed)
self.assertTrue(self.session.modified)
self.assertEqual(self.session.get('update key', None), 1)
def test_has_key(self):
self.session['some key'] = 1
self.session.modified = False
self.session.accessed = False
self.assertIn('some key', self.session)
self.assertTrue(self.session.accessed)
self.assertFalse(self.session.modified)
def test_values(self):
self.assertEqual(list(self.session.values()), [])
self.assertTrue(self.session.accessed)
self.session['some key'] = 1
self.assertEqual(list(self.session.values()), [1])
def test_iterkeys(self):
self.session['x'] = 1
self.session.modified = False
self.session.accessed = False
i = six.iterkeys(self.session)
self.assertTrue(hasattr(i, '__iter__'))
self.assertTrue(self.session.accessed)
self.assertFalse(self.session.modified)
self.assertEqual(list(i), ['x'])
def test_itervalues(self):
self.session['x'] = 1
self.session.modified = False
self.session.accessed = False
i = six.itervalues(self.session)
self.assertTrue(hasattr(i, '__iter__'))
self.assertTrue(self.session.accessed)
self.assertFalse(self.session.modified)
self.assertEqual(list(i), [1])
def test_iteritems(self):
self.session['x'] = 1
self.session.modified = False
self.session.accessed = False
i = six.iteritems(self.session)
self.assertTrue(hasattr(i, '__iter__'))
self.assertTrue(self.session.accessed)
self.assertFalse(self.session.modified)
self.assertEqual(list(i), [('x', 1)])
def test_clear(self):
self.session['x'] = 1
self.session.modified = False
self.session.accessed = False
self.assertEqual(list(self.session.items()), [('x', 1)])
self.session.clear()
self.assertEqual(list(self.session.items()), [])
self.assertTrue(self.session.accessed)
self.assertTrue(self.session.modified)
def test_save(self):
if (hasattr(self.session, '_cache') and 'DummyCache' in
settings.CACHES[settings.SESSION_CACHE_ALIAS]['BACKEND']):
raise unittest.SkipTest("Session saving tests require a real cache backend")
self.session.save()
self.assertTrue(self.session.exists(self.session.session_key))
def test_delete(self):
self.session.save()
self.session.delete(self.session.session_key)
self.assertFalse(self.session.exists(self.session.session_key))
def test_flush(self):
self.session['foo'] = 'bar'
self.session.save()
prev_key = self.session.session_key
self.session.flush()
self.assertFalse(self.session.exists(prev_key))
self.assertNotEqual(self.session.session_key, prev_key)
self.assertIsNone(self.session.session_key)
self.assertTrue(self.session.modified)
self.assertTrue(self.session.accessed)
def test_cycle(self):
self.session['a'], self.session['b'] = 'c', 'd'
self.session.save()
prev_key = self.session.session_key
prev_data = list(self.session.items())
self.session.cycle_key()
self.assertNotEqual(self.session.session_key, prev_key)
self.assertEqual(list(self.session.items()), prev_data)
def test_save_doesnt_clear_data(self):
self.session['a'] = 'b'
self.session.save()
self.assertEqual(self.session['a'], 'b')
def test_invalid_key(self):
# Submitting an invalid session key (either by guessing, or if the db has
# removed the key) results in a new key being generated.
try:
session = self.backend('1')
try:
session.save()
except AttributeError:
self.fail(
"The session object did not save properly. "
"Middleware may be saving cache items without namespaces."
)
self.assertNotEqual(session.session_key, '1')
self.assertEqual(session.get('cat'), None)
session.delete()
finally:
# Some backends leave a stale cache entry for the invalid
# session key; make sure that entry is manually deleted
session.delete('1')
def test_session_key_empty_string_invalid(self):
"""Falsey values (Such as an empty string) are rejected."""
self.session._session_key = ''
self.assertIsNone(self.session.session_key)
def test_session_key_too_short_invalid(self):
"""Strings shorter than 8 characters are rejected."""
self.session._session_key = '1234567'
self.assertIsNone(self.session.session_key)
def test_session_key_valid_string_saved(self):
"""Strings of length 8 and up are accepted and stored."""
self.session._session_key = '12345678'
self.assertEqual(self.session.session_key, '12345678')
def test_session_key_is_read_only(self):
def set_session_key(session):
session.session_key = session._get_new_session_key()
self.assertRaises(AttributeError, set_session_key, self.session)
# Custom session expiry
def test_default_expiry(self):
# A normal session has a max age equal to settings
self.assertEqual(self.session.get_expiry_age(), settings.SESSION_COOKIE_AGE)
# So does a custom session with an idle expiration time of 0 (but it'll
# expire at browser close)
self.session.set_expiry(0)
self.assertEqual(self.session.get_expiry_age(), settings.SESSION_COOKIE_AGE)
def test_custom_expiry_seconds(self):
modification = timezone.now()
self.session.set_expiry(10)
date = self.session.get_expiry_date(modification=modification)
self.assertEqual(date, modification + timedelta(seconds=10))
age = self.session.get_expiry_age(modification=modification)
self.assertEqual(age, 10)
def test_custom_expiry_timedelta(self):
modification = timezone.now()
# Mock timezone.now, because set_expiry calls it on this code path.
original_now = timezone.now
try:
timezone.now = lambda: modification
self.session.set_expiry(timedelta(seconds=10))
finally:
timezone.now = original_now
date = self.session.get_expiry_date(modification=modification)
self.assertEqual(date, modification + timedelta(seconds=10))
age = self.session.get_expiry_age(modification=modification)
self.assertEqual(age, 10)
def test_custom_expiry_datetime(self):
modification = timezone.now()
self.session.set_expiry(modification + timedelta(seconds=10))
date = self.session.get_expiry_date(modification=modification)
self.assertEqual(date, modification + timedelta(seconds=10))
age = self.session.get_expiry_age(modification=modification)
self.assertEqual(age, 10)
def test_custom_expiry_reset(self):
self.session.set_expiry(None)
self.session.set_expiry(10)
self.session.set_expiry(None)
self.assertEqual(self.session.get_expiry_age(), settings.SESSION_COOKIE_AGE)
def test_get_expire_at_browser_close(self):
# Tests get_expire_at_browser_close with different settings and different
# set_expiry calls
with override_settings(SESSION_EXPIRE_AT_BROWSER_CLOSE=False):
self.session.set_expiry(10)
self.assertFalse(self.session.get_expire_at_browser_close())
self.session.set_expiry(0)
self.assertTrue(self.session.get_expire_at_browser_close())
self.session.set_expiry(None)
self.assertFalse(self.session.get_expire_at_browser_close())
with override_settings(SESSION_EXPIRE_AT_BROWSER_CLOSE=True):
self.session.set_expiry(10)
self.assertFalse(self.session.get_expire_at_browser_close())
self.session.set_expiry(0)
self.assertTrue(self.session.get_expire_at_browser_close())
self.session.set_expiry(None)
self.assertTrue(self.session.get_expire_at_browser_close())
def test_decode(self):
# Ensure we can decode what we encode
data = {'a test key': 'a test value'}
encoded = self.session.encode(data)
self.assertEqual(self.session.decode(encoded), data)
def test_decode_failure_logged_to_security(self):
bad_encode = base64.b64encode(b'flaskdj:alkdjf')
with patch_logger('django.security.SuspiciousSession', 'warning') as calls:
self.assertEqual({}, self.session.decode(bad_encode))
# check that the failed decode is logged
self.assertEqual(len(calls), 1)
self.assertIn('corrupted', calls[0])
def test_actual_expiry(self):
# this doesn't work with JSONSerializer (serializing timedelta)
with override_settings(SESSION_SERIALIZER='django.contrib.sessions.serializers.PickleSerializer'):
self.session = self.backend() # reinitialize after overriding settings
# Regression test for #19200
old_session_key = None
new_session_key = None
try:
self.session['foo'] = 'bar'
self.session.set_expiry(-timedelta(seconds=10))
self.session.save()
old_session_key = self.session.session_key
# With an expiry date in the past, the session expires instantly.
new_session = self.backend(self.session.session_key)
new_session_key = new_session.session_key
self.assertNotIn('foo', new_session)
finally:
self.session.delete(old_session_key)
self.session.delete(new_session_key)
def test_session_load_does_not_create_record(self):
"""
Loading an unknown session key does not create a session record.
Creating session records on load is a DOS vulnerability.
"""
if self.backend is CookieSession:
raise unittest.SkipTest("Cookie backend doesn't have an external store to create records in.")
session = self.backend('someunknownkey')
session.load()
self.assertFalse(session.exists(session.session_key))
# provided unknown key was cycled, not reused
self.assertNotEqual(session.session_key, 'someunknownkey')
class DatabaseSessionTests(SessionTestsMixin, TestCase):
backend = DatabaseSession
session_engine = 'django.contrib.sessions.backends.db'
@property
def model(self):
return self.backend.get_model_class()
def test_session_str(self):
"Session repr should be the session key."
self.session['x'] = 1
self.session.save()
session_key = self.session.session_key
s = self.model.objects.get(session_key=session_key)
self.assertEqual(force_text(s), session_key)
def test_session_get_decoded(self):
"""
Test we can use Session.get_decoded to retrieve data stored
in normal way
"""
self.session['x'] = 1
self.session.save()
s = self.model.objects.get(session_key=self.session.session_key)
self.assertEqual(s.get_decoded(), {'x': 1})
def test_sessionmanager_save(self):
"""
Test SessionManager.save method
"""
# Create a session
self.session['y'] = 1
self.session.save()
s = self.model.objects.get(session_key=self.session.session_key)
# Change it
self.model.objects.save(s.session_key, {'y': 2}, s.expire_date)
# Clear cache, so that it will be retrieved from DB
del self.session._session_cache
self.assertEqual(self.session['y'], 2)
def test_clearsessions_command(self):
"""
Test clearsessions command for clearing expired sessions.
"""
self.assertEqual(0, self.model.objects.count())
# One object in the future
self.session['foo'] = 'bar'
self.session.set_expiry(3600)
self.session.save()
# One object in the past
other_session = self.backend()
other_session['foo'] = 'bar'
other_session.set_expiry(-3600)
other_session.save()
# Two sessions are in the database before clearsessions...
self.assertEqual(2, self.model.objects.count())
with override_settings(SESSION_ENGINE=self.session_engine):
management.call_command('clearsessions')
# ... and one is deleted.
self.assertEqual(1, self.model.objects.count())
@override_settings(USE_TZ=True)
class DatabaseSessionWithTimeZoneTests(DatabaseSessionTests):
pass
class CustomDatabaseSessionTests(DatabaseSessionTests):
backend = CustomDatabaseSession
session_engine = 'sessions_tests.custom_db_backend'
def test_extra_session_field(self):
# Set the account ID to be picked up by a custom session storage
# and saved to a custom session model database column.
self.session['_auth_user_id'] = 42
self.session.save()
# Make sure that the customized create_model_instance() was called.
s = self.model.objects.get(session_key=self.session.session_key)
self.assertEqual(s.account_id, 42)
# Make the session "anonymous".
self.session.pop('_auth_user_id')
self.session.save()
# Make sure that save() on an existing session did the right job.
s = self.model.objects.get(session_key=self.session.session_key)
self.assertEqual(s.account_id, None)
class CacheDBSessionTests(SessionTestsMixin, TestCase):
backend = CacheDBSession
@unittest.skipIf('DummyCache' in
settings.CACHES[settings.SESSION_CACHE_ALIAS]['BACKEND'],
"Session saving tests require a real cache backend")
def test_exists_searches_cache_first(self):
self.session.save()
with self.assertNumQueries(0):
self.assertTrue(self.session.exists(self.session.session_key))
# Some backends might issue a warning
@ignore_warnings(module="django.core.cache.backends.base")
def test_load_overlong_key(self):
self.session._session_key = (string.ascii_letters + string.digits) * 20
self.assertEqual(self.session.load(), {})
@override_settings(SESSION_CACHE_ALIAS='sessions')
def test_non_default_cache(self):
# 21000 - CacheDB backend should respect SESSION_CACHE_ALIAS.
self.assertRaises(InvalidCacheBackendError, self.backend)
@override_settings(USE_TZ=True)
class CacheDBSessionWithTimeZoneTests(CacheDBSessionTests):
pass
# Don't need DB flushing for these tests, so can use unittest.TestCase as base class
class FileSessionTests(SessionTestsMixin, unittest.TestCase):
backend = FileSession
def setUp(self):
# Do file session tests in an isolated directory, and kill it after we're done.
self.original_session_file_path = settings.SESSION_FILE_PATH
self.temp_session_store = settings.SESSION_FILE_PATH = tempfile.mkdtemp()
# Reset the file session backend's internal caches
if hasattr(self.backend, '_storage_path'):
del self.backend._storage_path
super(FileSessionTests, self).setUp()
def tearDown(self):
super(FileSessionTests, self).tearDown()
settings.SESSION_FILE_PATH = self.original_session_file_path
shutil.rmtree(self.temp_session_store)
@override_settings(
SESSION_FILE_PATH="/if/this/directory/exists/you/have/a/weird/computer")
def test_configuration_check(self):
del self.backend._storage_path
# Make sure the file backend checks for a good storage dir
self.assertRaises(ImproperlyConfigured, self.backend)
def test_invalid_key_backslash(self):
# Ensure we don't allow directory-traversal.
# This is tested directly on _key_to_file, as load() will swallow
# a SuspiciousOperation in the same way as an IOError - by creating
# a new session, making it unclear whether the slashes were detected.
self.assertRaises(InvalidSessionKey,
self.backend()._key_to_file, "a\\b\\c")
def test_invalid_key_forwardslash(self):
# Ensure we don't allow directory-traversal
self.assertRaises(InvalidSessionKey,
self.backend()._key_to_file, "a/b/c")
@override_settings(SESSION_ENGINE="django.contrib.sessions.backends.file")
def test_clearsessions_command(self):
"""
Test clearsessions command for clearing expired sessions.
"""
storage_path = self.backend._get_storage_path()
file_prefix = settings.SESSION_COOKIE_NAME
def count_sessions():
return len([session_file for session_file in os.listdir(storage_path)
if session_file.startswith(file_prefix)])
self.assertEqual(0, count_sessions())
# One object in the future
self.session['foo'] = 'bar'
self.session.set_expiry(3600)
self.session.save()
# One object in the past
other_session = self.backend()
other_session['foo'] = 'bar'
other_session.set_expiry(-3600)
other_session.save()
# Two sessions are in the filesystem before clearsessions...
self.assertEqual(2, count_sessions())
management.call_command('clearsessions')
# ... and one is deleted.
self.assertEqual(1, count_sessions())
class CacheSessionTests(SessionTestsMixin, unittest.TestCase):
backend = CacheSession
# Some backends might issue a warning
@ignore_warnings(module="django.core.cache.backends.base")
def test_load_overlong_key(self):
self.session._session_key = (string.ascii_letters + string.digits) * 20
self.assertEqual(self.session.load(), {})
def test_default_cache(self):
self.session.save()
self.assertNotEqual(caches['default'].get(self.session.cache_key), None)
@override_settings(CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.dummy.DummyCache',
},
'sessions': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'session',
},
}, SESSION_CACHE_ALIAS='sessions')
def test_non_default_cache(self):
# Re-initialize the session backend to make use of overridden settings.
self.session = self.backend()
self.session.save()
self.assertEqual(caches['default'].get(self.session.cache_key), None)
self.assertNotEqual(caches['sessions'].get(self.session.cache_key), None)
class SessionMiddlewareTests(TestCase):
@override_settings(SESSION_COOKIE_SECURE=True)
def test_secure_session_cookie(self):
request = RequestFactory().get('/')
response = HttpResponse('Session test')
middleware = SessionMiddleware()
# Simulate a request the modifies the session
middleware.process_request(request)
request.session['hello'] = 'world'
# Handle the response through the middleware
response = middleware.process_response(request, response)
self.assertTrue(
response.cookies[settings.SESSION_COOKIE_NAME]['secure'])
@override_settings(SESSION_COOKIE_HTTPONLY=True)
def test_httponly_session_cookie(self):
request = RequestFactory().get('/')
response = HttpResponse('Session test')
middleware = SessionMiddleware()
# Simulate a request the modifies the session
middleware.process_request(request)
request.session['hello'] = 'world'
# Handle the response through the middleware
response = middleware.process_response(request, response)
self.assertTrue(
response.cookies[settings.SESSION_COOKIE_NAME]['httponly'])
self.assertIn(http_cookies.Morsel._reserved['httponly'],
str(response.cookies[settings.SESSION_COOKIE_NAME]))
@override_settings(SESSION_COOKIE_HTTPONLY=False)
def test_no_httponly_session_cookie(self):
request = RequestFactory().get('/')
response = HttpResponse('Session test')
middleware = SessionMiddleware()
# Simulate a request the modifies the session
middleware.process_request(request)
request.session['hello'] = 'world'
# Handle the response through the middleware
response = middleware.process_response(request, response)
self.assertFalse(response.cookies[settings.SESSION_COOKIE_NAME]['httponly'])
self.assertNotIn(http_cookies.Morsel._reserved['httponly'],
str(response.cookies[settings.SESSION_COOKIE_NAME]))
def test_session_save_on_500(self):
request = RequestFactory().get('/')
response = HttpResponse('Horrible error')
response.status_code = 500
middleware = SessionMiddleware()
# Simulate a request the modifies the session
middleware.process_request(request)
request.session['hello'] = 'world'
# Handle the response through the middleware
response = middleware.process_response(request, response)
# Check that the value wasn't saved above.
self.assertNotIn('hello', request.session.load())
def test_session_delete_on_end(self):
request = RequestFactory().get('/')
response = HttpResponse('Session test')
middleware = SessionMiddleware()
# Before deleting, there has to be an existing cookie
request.COOKIES[settings.SESSION_COOKIE_NAME] = 'abc'
# Simulate a request that ends the session
middleware.process_request(request)
request.session.flush()
# Handle the response through the middleware
response = middleware.process_response(request, response)
# Check that the cookie was deleted, not recreated.
# A deleted cookie header looks like:
# Set-Cookie: sessionid=; expires=Thu, 01-Jan-1970 00:00:00 GMT; Max-Age=0; Path=/
self.assertEqual(
'Set-Cookie: {}={}; expires=Thu, 01-Jan-1970 00:00:00 GMT; '
'Max-Age=0; Path=/'.format(
settings.SESSION_COOKIE_NAME,
'""' if sys.version_info >= (3, 5) else '',
),
str(response.cookies[settings.SESSION_COOKIE_NAME])
)
@override_settings(SESSION_COOKIE_DOMAIN='.example.local')
def test_session_delete_on_end_with_custom_domain(self):
request = RequestFactory().get('/')
response = HttpResponse('Session test')
middleware = SessionMiddleware()
# Before deleting, there has to be an existing cookie
request.COOKIES[settings.SESSION_COOKIE_NAME] = 'abc'
# Simulate a request that ends the session
middleware.process_request(request)
request.session.flush()
# Handle the response through the middleware
response = middleware.process_response(request, response)
# Check that the cookie was deleted, not recreated.
# A deleted cookie header with a custom domain looks like:
# Set-Cookie: sessionid=; Domain=.example.local;
# expires=Thu, 01-Jan-1970 00:00:00 GMT; Max-Age=0; Path=/
self.assertEqual(
'Set-Cookie: {}={}; Domain=.example.local; expires=Thu, '
'01-Jan-1970 00:00:00 GMT; Max-Age=0; Path=/'.format(
settings.SESSION_COOKIE_NAME,
'""' if sys.version_info >= (3, 5) else '',
),
str(response.cookies[settings.SESSION_COOKIE_NAME])
)
def test_flush_empty_without_session_cookie_doesnt_set_cookie(self):
request = RequestFactory().get('/')
response = HttpResponse('Session test')
middleware = SessionMiddleware()
# Simulate a request that ends the session
middleware.process_request(request)
request.session.flush()
# Handle the response through the middleware
response = middleware.process_response(request, response)
# A cookie should not be set.
self.assertEqual(response.cookies, {})
# The session is accessed so "Vary: Cookie" should be set.
self.assertEqual(response['Vary'], 'Cookie')
def test_empty_session_saved(self):
""""
If a session is emptied of data but still has a key, it should still
be updated.
"""
request = RequestFactory().get('/')
response = HttpResponse('Session test')
middleware = SessionMiddleware()
# Set a session key and some data.
middleware.process_request(request)
request.session['foo'] = 'bar'
# Handle the response through the middleware.
response = middleware.process_response(request, response)
self.assertEqual(tuple(request.session.items()), (('foo', 'bar'),))
# A cookie should be set, along with Vary: Cookie.
self.assertIn(
'Set-Cookie: sessionid=%s' % request.session.session_key,
str(response.cookies)
)
self.assertEqual(response['Vary'], 'Cookie')
# Empty the session data.
del request.session['foo']
# Handle the response through the middleware.
response = HttpResponse('Session test')
response = middleware.process_response(request, response)
self.assertEqual(dict(request.session.values()), {})
session = Session.objects.get(session_key=request.session.session_key)
self.assertEqual(session.get_decoded(), {})
# While the session is empty, it hasn't been flushed so a cookie should
# still be set, along with Vary: Cookie.
self.assertGreater(len(request.session.session_key), 8)
self.assertIn(
'Set-Cookie: sessionid=%s' % request.session.session_key,
str(response.cookies)
)
self.assertEqual(response['Vary'], 'Cookie')
# Don't need DB flushing for these tests, so can use unittest.TestCase as base class
class CookieSessionTests(SessionTestsMixin, unittest.TestCase):
backend = CookieSession
def test_save(self):
"""
This test tested exists() in the other session backends, but that
doesn't make sense for us.
"""
pass
def test_cycle(self):
"""
This test tested cycle_key() which would create a new session
key for the same session data. But we can't invalidate previously
signed cookies (other than letting them expire naturally) so
testing for this behavior is meaningless.
"""
pass
@unittest.expectedFailure
def test_actual_expiry(self):
# The cookie backend doesn't handle non-default expiry dates, see #19201
super(CookieSessionTests, self).test_actual_expiry()
def test_unpickling_exception(self):
# signed_cookies backend should handle unpickle exceptions gracefully
# by creating a new session
self.assertEqual(self.session.serializer, JSONSerializer)
self.session.save()
self.session.serializer = PickleSerializer
self.session.load()
|
the-stack_0_12635 | def math():
i_put = int(input())
if 5 < i_put < 2000:
for i in range(1, i_put+1):
if i % 2 == 0:
print(str(i) + '^2 =', i*i)
if __name__ == '__main__':
math()
|
the-stack_0_12637 | #! /usr/bin/env python3
# -*- coding: utf-8 -*-
'''
------------------------------------------------------------
Main entry for ImgReSizer.
.. module:: `Main`
:platform: Unix
:synopsis: Takes configuration json with :py:class: imgresizer.CommandLine and run image processing
.. moduleauthor:: Tumurtogtokh Davaakhuu <[email protected]>
------------------------------------------------------------
'''
# IMPORT STANDARD
import sys
import os
# IMPORT Local
from imgresizer import Img
from imgresizer import ImageSizerController
from imgresizer import CommandLine
# =============================================================================
# MAIN
def main():
cli = CommandLine()
config = cli.load_configuration()
if cli.exit:
sys.exit(0)
IMG_URLS = cli.process_img_url_file()
TARGET = config['targets']
MAX_THREADS = config['num_threads']
DATA = config['data']
INCOMING = config['input_dir']
OUTGOING = config['output_dir']
img_sizer = ImageSizerController(Img(DATA, INCOMING, OUTGOING, MAX_THREADS),
IMG_URLS, TARGET)
# img_sizer.perform_resizing()
img_sizer.make_imgs()
if __name__ == '__main__':
main()
|
the-stack_0_12640 | from rest_framework.urlpatterns import format_suffix_patterns
from django.urls import re_path
from api.bookmarks import views as bookmark_views
from api.experiment_groups import views
from constants.urls import GROUP_ID_PATTERN, ID_PATTERN, NAME_PATTERN, USERNAME_PATTERN
groups_urlpatterns = [
re_path(r'^{}/{}/groups/{}/?$'.format(USERNAME_PATTERN, NAME_PATTERN, ID_PATTERN),
views.ExperimentGroupDetailView.as_view()),
re_path(r'^{}/{}/groups/{}/statuses/?$'.format(
USERNAME_PATTERN, NAME_PATTERN, GROUP_ID_PATTERN),
views.ExperimentGroupStatusListView.as_view()),
re_path(r'^{}/{}/groups/{}/stop/?$'.format(USERNAME_PATTERN, NAME_PATTERN, ID_PATTERN),
views.ExperimentGroupStopView.as_view()),
re_path(
r'^{}/{}/groups/{}/bookmark/?$'.format(USERNAME_PATTERN, NAME_PATTERN, ID_PATTERN),
bookmark_views.ExperimentGroupBookmarkCreateView.as_view()),
re_path(
r'^{}/{}/groups/{}/unbookmark/?$'.format(USERNAME_PATTERN, NAME_PATTERN, ID_PATTERN),
bookmark_views.ExperimentGroupBookmarkDeleteView.as_view()),
]
# Order is important, because the patterns could swallow other urls
urlpatterns = format_suffix_patterns(groups_urlpatterns)
|
the-stack_0_12641 | # coding:utf-8
import collections
import csv
import os
from util.log import logger
logger = logger()
class Template(object):
def __init__(self,
base_dic, cmp_dic,
base_cost, cmp_cost,
base_call_times, cmp_call_times,
base_method_thread, cmp_method_thread,
base_theads_pid, cmp_theads_pid):
self.base = base_dic # base_sorted_dic, cmp_sorted_dic, base_cost, cmp_cost,base_call_times,cmp_call_times
self.cmp = cmp_dic
self.base_cost = base_cost
self.cmp_cost = cmp_cost
self.base_call_times = base_call_times
self.cmp_call_times = cmp_call_times
self.order_base_dic = collections.OrderedDict()
self.order_cmp_dic = collections.OrderedDict()
self.order_base_keys, self.order_base_values = self.initObjDatas(self.base, self.order_base_dic)
self.order_cmp_keys, self.order_cmp_values = self.initObjDatas(self.cmp, self.order_cmp_dic)
self.base_method_thread = base_method_thread
self.cmp_method_thread = cmp_method_thread
self.base_theads_pid = base_theads_pid
self.cmp_theads_pid = cmp_theads_pid
def initObjDatas(self, obj, init_obj):
_keys = []
_values = []
for each in obj:
init_obj[each[0]] = each[1]
for _k, _v in init_obj.items():
_keys.append(_k)
_values.append(_v)
return _keys, _values
def generateTable(self, path, rows, data):
if os.path.isfile(path):
os.remove(path)
csvfile = file(path, "wb")
writer = csv.writer(csvfile)
# writer.writerow(rows)
writer.writerows(data)
csvfile.close()
def searchDictList(self, orderDict):
keys = []
values = []
for k, v in orderDict.items():
keys.append(k)
values.append(v)
return keys, values
def generateTableData(self, path, rows):
''' ['调用方法','隶属线程', '线程PID', '基准分支排名', '对比分支排名', '基准分支方法耗时', '对比分支方法耗时',
'耗时差(对比分支-基准分支)', '耗时上涨比例(%)', '基准分支方法调用次数','对比分支方法调用次数','方法耗时排名变化'] '''
logger.debug("self.cmp_cost:\n" + str(self.cmp_cost))
logger.debug("self.base_cost:\n" + str(self.base_cost))
if self.base_cost != 0:
ratio = format(float(self.cmp_cost - self.base_cost) / float(self.base_cost), '.2%')
else:
ratio = self.cmp_cost
data = []
add_rows = rows
add_rows[0] = add_rows[0] + "- 系数: " + str(ratio)
add_flag = 0
for cmp_obj in self.order_cmp_keys:
''' 当cmp_obj有新增方法时 '''
if cmp_obj not in self.order_base_keys:
add_flag = 1
method = cmp_obj
base_index = "-"
cmp_index = self.order_cmp_keys.index(cmp_obj)
base_time = 0
cmp_time = self.order_cmp_values[cmp_index]
cmp_call_times = self.cmp_call_times[cmp_obj] if self.cmp_call_times.has_key(cmp_obj) else "-"
if self.cmp_method_thread.has_key(cmp_obj):
cmp_thread = self.cmp_method_thread[cmp_obj]
self.cmp_method_thread.pop(cmp_obj)
else:
cmp_thread = "-"
base_call_times = 0
diff = cmp_time
rate = format(float(1), '.2%')
rank_change = cmp_index
content = (
method, str(cmp_thread), str(base_index), str(cmp_index), str(base_time), str(cmp_time), str(diff),
str(rate), str(base_call_times), str(cmp_call_times), str(rank_change))
data.append(content)
if add_flag == 1:
data.insert(0, add_rows)
rows[0] = rows[0] + "- 系数: " + str(ratio)
data.append(rows)
for base_obj in self.order_base_keys:
method = base_obj
base_index = self.order_base_keys.index(base_obj) # 获取base_key的排名
if base_obj in self.order_cmp_keys:
cmp_index = self.order_cmp_keys.index(base_obj) # 当base_obj方法还在cmp_obj方法中
base_call_times = self.base_call_times[base_obj] if self.base_call_times.has_key(base_obj) else "-"
cmp_call_times = self.cmp_call_times[base_obj] if self.cmp_call_times.has_key(base_obj) else "-"
else:
cmp_index = "-" # 当base_obj方法在cmp_obj已经删减
base_call_times = self.base_call_times[base_obj] if self.base_call_times.has_key(base_obj) else "-"
cmp_call_times = 0
if self.base_method_thread.has_key(base_obj):
base_thread = self.base_method_thread[base_obj]
self.base_method_thread.pop(base_obj)
else:
base_thread = "-"
base_time = self.order_base_values[base_index]
if cmp_index == "-":
cmp_time = 0
rank_change = base_index
else:
cmp_time = self.order_cmp_values[cmp_index]
rank_change = base_index - cmp_index
diff = cmp_time - base_time
try:
rate = format(float(diff) / float(base_time), '.2%') # -100%:代表base_obj方法在cmp_obj已经删减的比率
except Exception as e:
rate = "error"
content = (
method, str(base_thread), str(base_index), str(cmp_index), str(base_time), str(cmp_time), str(diff),
str(rate), str(base_call_times), str(cmp_call_times), str(rank_change))
data.append(content)
self.generateTable(path, rows, data)
logger.debug("self.base_cost-self.cmp_cost:\n" + str(self.base_cost - self.cmp_cost))
logger.debug("self.base_method_thread:\n" + str(self.base_method_thread))
logger.debug("self.cmp_method_thread:\n" + str(self.cmp_method_thread))
|
the-stack_0_12643 | import re
import os
import nltk
import zlib
import codecs
import shutil
import logging
from unidecode import unidecode
from indra.literature.pmc_client import extract_text
from indra.resources.greek_alphabet import greek_alphabet
logger = logging.getLogger(__name__)
class IsiPreprocessor(object):
"""Preprocess a set of documents, one by one, and add the preprocessed
text to a temporary directory in a format suitable for the ISI reader.
The ISI reader requires plain text with one sentence per line.
Attributes
----------
preprocessed_dir : str
The directory holding the literature text preprocessed and sentence
tokenized in a format suitable for the ISI reader
next_file_id : int
The next file with preprocessed text will be named next_file_id.txt
pmids : dict
A dictionary mapping file ids to the pmid of the text corresponding
to that file, can be None if unknown
extra_annotations : dict
A dictionary mapping file ids to a (possibly empty) dictionary with
additional annotations to include for statements extracted from this
document
"""
def __init__(self, preprocessed_dir):
preprocessed_dir = os.path.abspath(preprocessed_dir)
self.preprocessed_dir = preprocessed_dir
self.next_file_id = 1
self.pmids = {}
self.extra_annotations = {}
# This directory should be empty
contents = os.listdir(preprocessed_dir)
if len(contents) != 0:
logger.warning('IsiPreprocessor should get an empty directory in' +
' which to store preprocessed files.')
def register_preprocessed_file(self, infile, pmid, extra_annotations):
"""Set up already preprocessed text file for reading with ISI reader.
This is essentially a mock function to "register" already preprocessed
files and get an IsiPreprocessor object that can be passed to
the IsiProcessor.
Parameters
----------
infile : str
Path to an already preprocessed text file (i.e. one ready to
be sent for reading to ISI reader).
pmid : str
The PMID corresponding to the file
extra_annotations : dict
Extra annotations to be added to each statement, possibly including
metadata about the source (annotations with the key "interaction"
will be overridden)
"""
infile_base = os.path.basename(infile)
outfile = os.path.join(self.preprocessed_dir, infile_base)
shutil.copyfile(infile, outfile)
infile_key = os.path.splitext(infile_base)[0]
self.pmids[infile_key] = pmid
self.extra_annotations[infile_key] = extra_annotations
def preprocess_plain_text_string(self, text, pmid, extra_annotations):
"""Preprocess plain text string for use by ISI reader.
Preprocessing is done by tokenizing into sentences and writing
each sentence on its own line in a plain text file. All other
preprocessing functions ultimately call this one.
Parameters
----------
text : str
The plain text of the article of abstract
pmid : str
The PMID from which it comes, or None if not specified
extra_annotations : dict
Extra annotations to be added to each statement, possibly including
metadata about the source (annotations with the key "interaction"
will be overridden)
"""
output_file = '%s.txt' % self.next_file_id
output_file = os.path.join(self.preprocessed_dir, output_file)
# Replace greek characters with corresponding strings
for greek_letter, spelled_letter in greek_alphabet.items():
text = text.replace(greek_letter, spelled_letter)
# Replace all other unicode characters with nearest ascii equivalents
text = unidecode(text)
# Tokenize sentence
sentences = nltk.sent_tokenize(text)
# Write sentences to text file
first_sentence = True
with codecs.open(output_file, 'w', encoding='utf-8') as f:
for sentence in sentences:
if not first_sentence:
f.write('\n')
f.write(sentence.rstrip())
first_sentence = False
# Store annotations
self.pmids[str(self.next_file_id)] = pmid
self.extra_annotations[str(self.next_file_id)] = extra_annotations
# Increment file id
self.next_file_id += 1
def preprocess_plain_text_file(self, filename, pmid, extra_annotations):
"""Preprocess a plain text file for use with ISI reder.
Preprocessing results in a new text file with one sentence
per line.
Parameters
----------
filename : str
The name of the plain text file
pmid : str
The PMID from which it comes, or None if not specified
extra_annotations : dict
Extra annotations to be added to each statement, possibly including
metadata about the source (annotations with the key "interaction"
will be overridden)
"""
with codecs.open(filename, 'r', encoding='utf-8') as f:
content = f.read()
self.preprocess_plain_text_string(content, pmid,
extra_annotations)
def preprocess_nxml_file(self, filename, pmid, extra_annotations):
"""Preprocess an NXML file for use with the ISI reader.
Preprocessing is done by extracting plain text from NXML and then
creating a text file with one sentence per line.
Parameters
----------
filename : str
Filename (more specifically the file path) of an nxml file to
process
pmid : str
The PMID from which it comes, or None if not specified
extra_annotations : dict
Extra annotations to be added to each statement, possibly including
metadata about the source (annotations with the key "interaction"
will be overridden)
"""
with open(filename, 'r') as fh:
txt_content = extract_text(fh.read())
# We need to remove some common LaTEX commands from the converted text
# or the reader will get confused
cmd1 = r'[^ \{\}]+\{[^\{\}]+\}\{[^\{\}]+\}'
cmd2 = r'[^ \{\}]+\{[^\{\}]+\}'
txt_content = re.sub(cmd1, '', txt_content)
txt_content = re.sub(cmd2, '', txt_content)
# Prepocess text extracted from nxml
self.preprocess_plain_text_string(txt_content, pmid, extra_annotations)
def preprocess_abstract_list(self, abstract_list):
"""Preprocess abstracts in database pickle dump format for ISI reader.
For each abstract, creates a plain text file with one sentence per
line, and stores metadata to be included with each statement from
that abstract.
Parameters
----------
abstract_list : list[dict]
Compressed abstracts with corresopnding metadata in INDRA database
pickle dump format.
"""
for abstract_struct in abstract_list:
abs_format = abstract_struct['format']
content_type = abstract_struct['text_type']
content_zipped = abstract_struct['content']
tcid = abstract_struct['tcid']
trid = abstract_struct['trid']
assert(abs_format == 'text')
assert(content_type == 'abstract')
pmid = None # Don't worry about pmid for now
extra_annotations = {'tcid': tcid, 'trid': trid}
# Uncompress content
content = zlib.decompress(content_zipped,
zlib.MAX_WBITS+16).decode('utf-8')
self.preprocess_plain_text_string(content, pmid, extra_annotations)
def iter_outputs(self, output_dir):
"""Iterate over the outputs in a given directory using stored metadata.
For each of the output JSONs, retrieve the extra annotations for that
file, and link the file with its corresponding PMID.
Parameters
----------
output_dir : str
The path to the directory where the JSON outputs were dumped.
"""
for basename, pmid in self.pmids.items():
fname = os.path.join(output_dir, '%s.json' % basename)
extra_annotations = self.extra_annotations.get(fname, {})
yield fname, pmid, extra_annotations
|
the-stack_0_12644 | import numpy as np
import math
from instrument.geometry.pml import weave
from instrument.geometry import shapes, operations
import os, sys
class Clampcell(object):
def __init__(self, total_height=False):
self.sample_height=28.57 #mm
if total_height is True:
self.sample_height=95.758
###### OUTER BODY #############
def outer_body(self):
Al_OutDiameter = 32.05 # mm
Al_OutRadius=Al_OutDiameter/2
Al_Height=self.sample_height #28.57 #mm (total height 95.758 mm)
Al_InSmallestCone_Dia= 14.59 #mm (inner boundary is tappered cylinder, bottom Diameter )
Al_InSmallestCone_Rad=Al_InSmallestCone_Dia/2
Al_InconeAngle= 2
Al_InHeight=Al_Height+10 #mm (tappered cylinder height) (this should be same as Al_Height, but in constructive geometry the inner height has to be larger for correct subtraction)
Al_InLargestCone_Dia= (2* np.tan(np.deg2rad(Al_InconeAngle/2))*Al_InHeight)+Al_InSmallestCone_Dia #( tappered cylinder top diameter)
Al_InLargestCone_Rad=Al_InLargestCone_Dia/2
Al_InSmallest_ConeHeight=Al_InSmallestCone_Dia/(2*np.tan(np.deg2rad(Al_InconeAngle/2)))
Al_InLargest_ConeHeight=Al_InSmallest_ConeHeight+Al_InHeight
Al_boxHeightToSubtract=Al_InSmallest_ConeHeight*2
Al_boxthisckness= Al_InSmallestCone_Dia+20
Al_HalfHeight=Al_InHeight/2
Al_moving_height=Al_InSmallest_ConeHeight+Al_HalfHeight
### CReate the string for OUTER BODY ######
Al_OutRadius_str=str(Al_OutRadius)+r'*mm'
Al_Height_str=str(Al_Height)+r'*mm'
Al_InLargestCone_Rad_str=str(Al_InLargestCone_Rad)+r'*mm'
Al_InLargest_ConeHeight_str=str(Al_InLargest_ConeHeight)+r'*mm'
Al_InSmallest_ConeHeight_str=str(Al_InSmallest_ConeHeight)+r'*mm'
Al_boxHeightToSubtract_str=str(Al_boxHeightToSubtract)+r'*mm'
Al_boxthisckness_str=str(Al_boxthisckness)+r'*mm'
Al_moving_height_str=str(-Al_moving_height)+r'*mm'
#create the inner Al largest cone
Al_largest_cone=shapes.cone(radius=Al_InLargestCone_Rad_str, height=Al_InLargest_ConeHeight_str) # upside down
#rotation to make top wider
Al_largest_cone_widertip=operations.rotate(Al_largest_cone, angle="180*deg",vertical="0",transversal="1",beam="0")
#make a tapered cylinder
Al_tapered_cylinder= operations.Difference(Al_largest_cone_widertip,
shapes.block(thickness=Al_boxthisckness_str,height=Al_boxHeightToSubtract_str,width=Al_boxthisckness_str) )
#moving the center of the cylinder to the center of the coordinate
Al_centered_taperedCylinder=operations.translate(Al_tapered_cylinder, vertical=Al_moving_height_str)
#Creating the outer Al body
outer_Al = operations.subtract(
shapes.cylinder(radius=Al_OutRadius_str, height=Al_Height_str),
Al_centered_taperedCylinder,
)
return(outer_Al)
######## INNER SLEEVE ##########
def inner_sleeve(self):
CuBe_InDiameter = 4.74 # mm
CuBe_InRadius=CuBe_InDiameter/2
CuBe_InHeight=self.sample_height+10 #mm (total height 95.758 mm)
CuBe_Height=self.sample_height
CuBe_OutSmallestCone_Dia=14.63 #(outer boundary is tappered cylinder, bottom diameter )
CuBe_OutSmallestCone_Rad=CuBe_OutSmallestCone_Dia/2
CuBe_OutconeAngle= 2 # the tappered angle
CuBe_OutLargestCone_Dia= (2* np.tan(np.deg2rad(CuBe_OutconeAngle/2))*CuBe_Height)+CuBe_OutSmallestCone_Dia #( tappered cylinder top diamter)
CuBe_OutLargestCone_Rad=CuBe_OutLargestCone_Dia/2
CuBe_OutSmallest_ConeHeight=CuBe_OutSmallestCone_Dia/(2*np.tan(np.deg2rad(CuBe_OutconeAngle/2)))
CuBe_OutLargest_ConeHeight=CuBe_OutSmallest_ConeHeight+CuBe_Height
CuBe_boxHeightToSubtract=CuBe_OutSmallest_ConeHeight*2
CuBe_boxthisckness= CuBe_OutSmallestCone_Dia+20
CuBe_HalfHeight=CuBe_Height/2
CuBe_moving_height=CuBe_OutSmallest_ConeHeight+CuBe_HalfHeight
### CReate the string for INNER SLEEVE ######
CuBe_InRadius_str=str(CuBe_InRadius)+r'*mm'
CuBe_InHeight_str=str(CuBe_InHeight)+r'*mm'
CuBe_Height_str=str(CuBe_Height)+r'*mm'
CuBe_OutLargestCone_Rad_str=str(CuBe_OutLargestCone_Rad)+r'*mm'
CuBe_OutLargest_ConeHeight_str=str(CuBe_OutLargest_ConeHeight)+r'*mm'
CuBe_boxHeightToSubtract_str=str(CuBe_boxHeightToSubtract)+r'*mm'
CuBe_boxthisckness_str=str(CuBe_boxthisckness)+r'*mm'
CuBe_moving_height_str=str(-CuBe_moving_height)+r'*mm'
#create the outer CuBe largest cone
CuBe_largest_cone=shapes.cone(radius=CuBe_OutLargestCone_Rad_str, height=CuBe_OutLargest_ConeHeight_str) # upside down
#rotation to make top wider
CuBe_largest_cone_widertip=operations.rotate(CuBe_largest_cone, angle="180*deg",vertical="0",transversal="1",beam="0")
#make a tapered cylinder
CuBe_tapered_cylinder= operations.Difference(CuBe_largest_cone_widertip,
shapes.block(thickness=CuBe_boxthisckness_str,height=CuBe_boxHeightToSubtract_str,width=CuBe_boxthisckness_str) )
#moving the center of the cylinder to the center of the coordinate
CuBe_centered_taperedCylinder=operations.translate(CuBe_tapered_cylinder, vertical=CuBe_moving_height_str)
#Creating the InnerSleeve
CuBe_innerSleeve = operations.subtract(
CuBe_centered_taperedCylinder,
shapes.cylinder(radius=CuBe_InRadius_str, height=CuBe_InHeight_str),
)
return(CuBe_innerSleeve)
####### SAMPLE ######### ( the sample is a cylinder)
def sample(self):
sample_Height=27.3 #mm
sample_Diameter=4.16 #mm
sample_Radius=sample_Diameter/2
##covert to string###
sample_Height_str=str(sample_Height)+r'*mm'
sample_Radius_str=str(sample_Radius)+r'*mm'
##cylindrical sample##
sample= shapes.cylinder(radius=sample_Radius_str, height=sample_Height_str)
return(sample)
|
the-stack_0_12648 | import tensorflow as tf
import numpy as np
import os
import math
import matplotlib.gridspec as gridspec
import matplotlib.pyplot as plt
@tf.function
def one_hot(labels, class_size):
"""
Create one hot label matrix of size (N, C)
Inputs:
- labels: Labels Tensor of shape (N,) representing a ground-truth label
for each MNIST image
- class_size: Scalar representing of target classes our dataset
Returns:
- targets: One-hot label matrix of (N, C), where targets[i, j] = 1 when
the ground truth label for image i is j, and targets[i, :j] &
targets[i, j + 1:] are equal to 0
"""
return tf.one_hot(labels, class_size)
def save_model_weights(model, args):
"""
Save trained VAE model weights to model_ckpts/
Inputs:
- model: Trained VAE model.
- cfg: All arguments.
"""
model_flag = "cvae" if args.is_cvae else "vae"
output_dir = os.path.join("model_ckpts", model_flag)
output_path = os.path.join(output_dir, model_flag)
os.makedirs("model_ckpts", exist_ok=True)
os.makedirs(output_dir, exist_ok=True)
model.save_weights(output_path)
def show_vae_images(model, latent_size):
"""
Call this only if the model is VAE!
Generate 10 images from random vectors.
Show the generated images from your trained VAE.
Image will be saved to outputs/show_vae_images.pdf
Inputs:
- model: Your trained model.
- latent_size: Latent size of your model.
"""
# Generated images from vectors of random values.
z = tf.random.normal(shape=[10, latent_size])
samples = model.decoder(z).numpy()
# Visualize
fig = plt.figure(figsize=(10, 1))
gspec = gridspec.GridSpec(1, 10)
gspec.update(wspace=0.05, hspace=0.05)
for i, sample in enumerate(samples):
ax = plt.subplot(gspec[i])
plt.axis("off")
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.set_aspect("equal")
plt.imshow(sample.reshape(28, 28), cmap="Greys_r")
# Save the generated images
os.makedirs("outputs", exist_ok=True)
output_path = os.path.join("outputs", "show_vae_images.pdf")
plt.savefig(output_path, bbox_inches="tight")
plt.close(fig)
def show_vae_interpolation(model, latent_size):
"""
Call this only if the model is VAE!
Generate interpolation between two .
Show the generated images from your trained VAE.
Image will be saved to outputs/show_vae_interpolation.pdf
Inputs:
- model: Your trained model.
- latent_size: Latent size of your model.
"""
def show_interpolation(images):
"""
A helper to visualize the interpolation.
"""
images = tf.reshape(images, [images.shape[0], -1]) # images reshape to (batch_size, D)
sqrtn = int(math.ceil(math.sqrt(images.shape[0])))
sqrtimg = int(math.ceil(math.sqrt(images.shape[1])))
fig = plt.figure(figsize=(sqrtn, sqrtn))
gs = gridspec.GridSpec(sqrtn, sqrtn)
gs.update(wspace=0.05, hspace=0.05)
for i, img in enumerate(images):
ax = plt.subplot(gs[i])
plt.axis('off')
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.set_aspect('equal')
plt.imshow(tf.reshape(img, [sqrtimg, sqrtimg]))
# Save the generated images
os.makedirs("outputs", exist_ok=True)
output_path = os.path.join("outputs", "show_vae_interpolation.pdf")
plt.savefig(output_path, bbox_inches="tight")
plt.close(fig)
S = 12
z0 = tf.random.normal(shape=[S, latent_size], dtype=tf.dtypes.float32) # [S, latent_size]
z1 = tf.random.normal(shape=[S, latent_size], dtype=tf.dtypes.float32)
w = tf.linspace(0, 1, S)
w = tf.cast(tf.reshape(w, (S, 1, 1)), dtype=tf.float32) # [S, 1, 1]
z = tf.transpose(w * z0 + (1 - w) * z1, perm=[1, 0, 2])
z = tf.reshape(z, (S * S, latent_size)) # [S, S, latent_size]
x = model.decoder(z) # [S*S, 1, 28, 28]
show_interpolation(x)
def show_cvae_images(model, latent_size):
"""
Call this only if the model is CVAE!
Conditionally generate 10 images for each digit.
Show the generated images from your trained CVAE.
Image will be saved to outputs/show_cvae_images.pdf
Inputs:
- model: Your trained model.
- latent_size: Latent size of your model.
"""
# Conditionally generated images from vectors of random values.
num_generation = 100
num_classes = 10
num_per_class = num_generation // num_classes
c = tf.eye(num_classes) # [one hot labels for 0-9]
z = []
labels = []
for label in range(num_classes):
curr_c = c[label]
curr_c = tf.broadcast_to(curr_c, [num_per_class, len(curr_c)])
curr_z = tf.random.normal(shape=[num_per_class, latent_size])
curr_z = tf.concat([curr_z, curr_c], axis=-1)
z.append(curr_z)
labels.append([label] * num_per_class)
z = np.concatenate(z)
labels = np.concatenate(labels)
samples = model.decoder(z).numpy()
# Visualize
rows = num_classes
cols = num_generation // rows
fig = plt.figure(figsize=(cols, rows))
gspec = gridspec.GridSpec(rows, cols)
gspec.update(wspace=0.05, hspace=0.05)
for i, sample in enumerate(samples):
ax = plt.subplot(gspec[i])
plt.axis("off")
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.set_aspect("equal")
plt.imshow(sample.reshape(28, 28), cmap="Greys_r")
# Save the generated images
os.makedirs("outputs", exist_ok=True)
output_path = os.path.join("outputs", "show_cvae_images.pdf")
plt.savefig(output_path, bbox_inches="tight")
plt.close(fig)
def load_weights(model, is_cvae):
"""
Load the trained model's weights.
Inputs:
- model: Your untrained model instance.
Returns:
- model: Trained model.
"""
num_classes = 10
inputs = tf.zeros([1, 1, 28, 28]) # Random data sample
labels = tf.constant([[0]])
if is_cvae:
weights_path = os.path.join("model_ckpts", "cvae", "cvae")
one_hot_vec = one_hot(labels, num_classes)
_ = model(inputs, one_hot_vec)
model.load_weights(weights_path)
else:
weights_path = os.path.join("model_ckpts", "vae", "vae")
_ = model(inputs)
model.load_weights(weights_path)
return model
|
the-stack_0_12649 | import numpy as np
import torch
from gym import spaces
from torch import nn as nn
from torch.nn import functional as F
def loss_function_factory(loss_function):
if loss_function == "l2":
return F.mse_loss
elif loss_function == "l1":
return F.l1_loss
elif loss_function == "smooth_l1":
return F.smooth_l1_loss
elif loss_function == "bce":
return F.binary_cross_entropy
else:
raise ValueError("Unknown loss function : {}".format(loss_function))
def optimizer_factory(params, optimizer_type="ADAM", **kwargs):
if optimizer_type == "ADAM":
return torch.optim.Adam(params=params, **kwargs)
elif optimizer_type == "RMS_PROP":
return torch.optim.RMSprop(params=params, **kwargs)
else:
raise ValueError("Unknown optimizer type: {}".format(optimizer_type))
def model_factory(type="MultiLayerPerceptron", **kwargs) -> nn.Module:
from rlberry.agents.torch.utils.attention_models import EgoAttentionNetwork
from rlberry.agents.torch.utils.models import (
MultiLayerPerceptron,
DuelingNetwork,
ConvolutionalNetwork,
Table,
)
if type == "MultiLayerPerceptron":
return MultiLayerPerceptron(**kwargs)
elif type == "DuelingNetwork":
return DuelingNetwork(**kwargs)
elif type == "ConvolutionalNetwork":
return ConvolutionalNetwork(**kwargs)
elif type == "EgoAttentionNetwork":
return EgoAttentionNetwork(**kwargs)
elif type == "Table":
return Table(**kwargs)
else:
raise ValueError("Unknown model type")
def model_factory_from_env(env, **kwargs):
kwargs = size_model_config(env, **kwargs)
return model_factory(**kwargs)
def size_model_config(env, **model_config):
"""
Update the configuration of a model depending on the environment
observation/action spaces.
Typically, the input/output sizes.
Parameters
----------
env : gym.Env
An environment.
model_config : dict
A model configuration.
"""
if isinstance(env.observation_space, spaces.Box):
obs_shape = env.observation_space.shape
elif isinstance(env.observation_space, spaces.Tuple):
obs_shape = env.observation_space.spaces[0].shape
elif isinstance(env.observation_space, spaces.Discrete):
return model_config
# Assume CHW observation space
if model_config["type"] == "ConvolutionalNetwork":
model_config["in_channels"] = int(obs_shape[0])
model_config["in_height"] = int(obs_shape[1])
model_config["in_width"] = int(obs_shape[2])
else:
model_config["in_size"] = int(np.prod(obs_shape))
if isinstance(env.action_space, spaces.Discrete):
model_config["out_size"] = env.action_space.n
elif isinstance(env.action_space, spaces.Tuple):
model_config["out_size"] = env.action_space.spaces[0].n
return model_config
def activation_factory(activation_type):
if activation_type == "RELU":
return F.relu
elif activation_type == "TANH":
return torch.tanh
elif activation_type == "ELU":
return nn.ELU()
else:
raise ValueError("Unknown activation_type: {}".format(activation_type))
def trainable_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
|
the-stack_0_12651 | # Copyright (c) 2016 Matt Davis, <[email protected]>
# Chris Houseknecht, <[email protected]>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
import os
import re
import types
import copy
import inspect
import traceback
from os.path import expanduser
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six.moves import configparser
import ansible.module_utils.six.moves.urllib.parse as urlparse
try:
from ansible.release import __version__ as ANSIBLE_VERSION
except ImportError:
ANSIBLE_VERSION = 'unknown'
AZURE_COMMON_ARGS = dict(
auth_source=dict(
type='str',
choices=['auto', 'cli', 'env', 'credential_file']
),
profile=dict(type='str'),
subscription_id=dict(type='str', no_log=True),
client_id=dict(type='str', no_log=True),
secret=dict(type='str', no_log=True),
tenant=dict(type='str', no_log=True),
ad_user=dict(type='str', no_log=True),
password=dict(type='str', no_log=True),
cloud_environment=dict(type='str'),
cert_validation_mode=dict(type='str', choices=['validate', 'ignore']),
api_profile=dict(type='str', default='latest')
# debug=dict(type='bool', default=False),
)
AZURE_CREDENTIAL_ENV_MAPPING = dict(
profile='AZURE_PROFILE',
subscription_id='AZURE_SUBSCRIPTION_ID',
client_id='AZURE_CLIENT_ID',
secret='AZURE_SECRET',
tenant='AZURE_TENANT',
ad_user='AZURE_AD_USER',
password='AZURE_PASSWORD',
cloud_environment='AZURE_CLOUD_ENVIRONMENT',
cert_validation_mode='AZURE_CERT_VALIDATION_MODE',
)
# FUTURE: this should come from the SDK or an external location.
# For now, we have to copy from azure-cli
AZURE_API_PROFILES = {
'latest': {
'ContainerInstanceManagementClient': '2018-02-01-preview',
'ComputeManagementClient': dict(
default_api_version='2017-12-01',
resource_skus='2017-09-01',
disks='2017-03-30',
snapshots='2017-03-30',
virtual_machine_run_commands='2017-03-30'
),
'NetworkManagementClient': '2017-11-01',
'ResourceManagementClient': '2017-05-10',
'StorageManagementClient': '2017-10-01'
},
'2017-03-09-profile': {
'ComputeManagementClient': '2016-03-30',
'NetworkManagementClient': '2015-06-15',
'ResourceManagementClient': '2016-02-01',
'StorageManagementClient': '2016-01-01'
}
}
AZURE_TAG_ARGS = dict(
tags=dict(type='dict'),
append_tags=dict(type='bool', default=True),
)
AZURE_COMMON_REQUIRED_IF = [
('log_mode', 'file', ['log_path'])
]
ANSIBLE_USER_AGENT = 'Ansible/{0}'.format(ANSIBLE_VERSION)
CLOUDSHELL_USER_AGENT_KEY = 'AZURE_HTTP_USER_AGENT'
VSCODEEXT_USER_AGENT_KEY = 'VSCODEEXT_USER_AGENT'
CIDR_PATTERN = re.compile(r"(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1"
r"[0-9]{2}|2[0-4][0-9]|25[0-5])(/([0-9]|[1-2][0-9]|3[0-2]))")
AZURE_SUCCESS_STATE = "Succeeded"
AZURE_FAILED_STATE = "Failed"
HAS_AZURE = True
HAS_AZURE_EXC = None
HAS_AZURE_CLI_CORE = True
HAS_MSRESTAZURE = True
HAS_MSRESTAZURE_EXC = None
try:
import importlib
except ImportError:
# This passes the sanity import test, but does not provide a user friendly error message.
# Doing so would require catching Exception for all imports of Azure dependencies in modules and module_utils.
importlib = None
try:
from packaging.version import Version
HAS_PACKAGING_VERSION = True
HAS_PACKAGING_VERSION_EXC = None
except ImportError as exc:
Version = None
HAS_PACKAGING_VERSION = False
HAS_PACKAGING_VERSION_EXC = exc
# NB: packaging issue sometimes cause msrestazure not to be installed, check it separately
try:
from msrest.serialization import Serializer
except ImportError as exc:
HAS_MSRESTAZURE_EXC = exc
HAS_MSRESTAZURE = False
try:
from enum import Enum
from msrestazure.azure_exceptions import CloudError
from msrestazure.tools import resource_id, is_valid_resource_id
from msrestazure import azure_cloud
from azure.common.credentials import ServicePrincipalCredentials, UserPassCredentials
from azure.mgmt.network.version import VERSION as network_client_version
from azure.mgmt.storage.version import VERSION as storage_client_version
from azure.mgmt.compute.version import VERSION as compute_client_version
from azure.mgmt.resource.version import VERSION as resource_client_version
from azure.mgmt.dns.version import VERSION as dns_client_version
from azure.mgmt.web.version import VERSION as web_client_version
from azure.mgmt.network import NetworkManagementClient
from azure.mgmt.resource.resources import ResourceManagementClient
from azure.mgmt.storage import StorageManagementClient
from azure.mgmt.compute import ComputeManagementClient
from azure.mgmt.dns import DnsManagementClient
from azure.mgmt.web import WebSiteManagementClient
from azure.mgmt.containerservice import ContainerServiceClient
from azure.storage.cloudstorageaccount import CloudStorageAccount
except ImportError as exc:
HAS_AZURE_EXC = exc
HAS_AZURE = False
try:
from azure.cli.core.util import CLIError
from azure.common.credentials import get_azure_cli_credentials, get_cli_profile
from azure.common.cloud import get_cli_active_cloud
except ImportError:
HAS_AZURE_CLI_CORE = False
CLIError = Exception
def azure_id_to_dict(id):
pieces = re.sub(r'^\/', '', id).split('/')
result = {}
index = 0
while index < len(pieces) - 1:
result[pieces[index]] = pieces[index + 1]
index += 1
return result
def format_resource_id(val, subscription_id, namespace, types, resource_group):
return resource_id(name=val,
resource_group=resource_group,
namespace=namespace,
type=types,
subscription=subscription_id) if not is_valid_resource_id(val) else val
# FUTURE: either get this from the requirements file (if we can be sure it's always available at runtime)
# or generate the requirements files from this so we only have one source of truth to maintain...
AZURE_PKG_VERSIONS = {
'StorageManagementClient': {
'package_name': 'storage',
'expected_version': '1.5.0'
},
'ComputeManagementClient': {
'package_name': 'compute',
'expected_version': '2.0.0'
},
'ContainerInstanceManagementClient': {
'package_name': 'containerinstance',
'expected_version': '0.3.1'
},
'NetworkManagementClient': {
'package_name': 'network',
'expected_version': '1.3.0'
},
'ResourceManagementClient': {
'package_name': 'resource',
'expected_version': '1.1.0'
},
'DnsManagementClient': {
'package_name': 'dns',
'expected_version': '1.0.1'
},
'WebSiteManagementClient': {
'package_name': 'web',
'expected_version': '0.32.0'
},
} if HAS_AZURE else {}
AZURE_MIN_RELEASE = '2.0.0'
class AzureRMModuleBase(object):
def __init__(self, derived_arg_spec, bypass_checks=False, no_log=False,
check_invalid_arguments=None, mutually_exclusive=None, required_together=None,
required_one_of=None, add_file_common_args=False, supports_check_mode=False,
required_if=None, supports_tags=True, facts_module=False, skip_exec=False):
merged_arg_spec = dict()
merged_arg_spec.update(AZURE_COMMON_ARGS)
if supports_tags:
merged_arg_spec.update(AZURE_TAG_ARGS)
if derived_arg_spec:
merged_arg_spec.update(derived_arg_spec)
merged_required_if = list(AZURE_COMMON_REQUIRED_IF)
if required_if:
merged_required_if += required_if
self.module = AnsibleModule(argument_spec=merged_arg_spec,
bypass_checks=bypass_checks,
no_log=no_log,
check_invalid_arguments=check_invalid_arguments,
mutually_exclusive=mutually_exclusive,
required_together=required_together,
required_one_of=required_one_of,
add_file_common_args=add_file_common_args,
supports_check_mode=supports_check_mode,
required_if=merged_required_if)
if not HAS_PACKAGING_VERSION:
self.fail("Do you have packaging installed? Try `pip install packaging`"
"- {0}".format(HAS_PACKAGING_VERSION_EXC))
if not HAS_MSRESTAZURE:
self.fail("Do you have msrestazure installed? Try `pip install msrestazure`"
"- {0}".format(HAS_MSRESTAZURE_EXC))
if not HAS_AZURE:
self.fail("Do you have azure>={1} installed? Try `pip install ansible[azure]`"
"- {0}".format(HAS_AZURE_EXC, AZURE_MIN_RELEASE))
self._cloud_environment = None
self._network_client = None
self._storage_client = None
self._resource_client = None
self._compute_client = None
self._dns_client = None
self._web_client = None
self._containerservice_client = None
self.check_mode = self.module.check_mode
self.api_profile = self.module.params.get('api_profile')
self.facts_module = facts_module
# self.debug = self.module.params.get('debug')
# authenticate
self.credentials = self._get_credentials(self.module.params)
if not self.credentials:
if HAS_AZURE_CLI_CORE:
self.fail("Failed to get credentials. Either pass as parameters, set environment variables, "
"define a profile in ~/.azure/credentials, or log in with Azure CLI (`az login`).")
else:
self.fail("Failed to get credentials. Either pass as parameters, set environment variables, "
"define a profile in ~/.azure/credentials, or install Azure CLI and log in (`az login`).")
# cert validation mode precedence: module-arg, credential profile, env, "validate"
self._cert_validation_mode = self.module.params['cert_validation_mode'] or self.credentials.get('cert_validation_mode') or \
os.environ.get('AZURE_CERT_VALIDATION_MODE') or 'validate'
if self._cert_validation_mode not in ['validate', 'ignore']:
self.fail('invalid cert_validation_mode: {0}'.format(self._cert_validation_mode))
# if cloud_environment specified, look up/build Cloud object
raw_cloud_env = self.credentials.get('cloud_environment')
if self.credentials.get('credentials') is not None and raw_cloud_env is not None:
self._cloud_environment = raw_cloud_env
elif not raw_cloud_env:
self._cloud_environment = azure_cloud.AZURE_PUBLIC_CLOUD # SDK default
else:
# try to look up "well-known" values via the name attribute on azure_cloud members
all_clouds = [x[1] for x in inspect.getmembers(azure_cloud) if isinstance(x[1], azure_cloud.Cloud)]
matched_clouds = [x for x in all_clouds if x.name == raw_cloud_env]
if len(matched_clouds) == 1:
self._cloud_environment = matched_clouds[0]
elif len(matched_clouds) > 1:
self.fail("Azure SDK failure: more than one cloud matched for cloud_environment name '{0}'".format(raw_cloud_env))
else:
if not urlparse.urlparse(raw_cloud_env).scheme:
self.fail("cloud_environment must be an endpoint discovery URL or one of {0}".format([x.name for x in all_clouds]))
try:
self._cloud_environment = azure_cloud.get_cloud_from_metadata_endpoint(raw_cloud_env)
except Exception as e:
self.fail("cloud_environment {0} could not be resolved: {1}".format(raw_cloud_env, e.message), exception=traceback.format_exc(e))
if self.credentials.get('subscription_id', None) is None and self.credentials.get('credentials') is None:
self.fail("Credentials did not include a subscription_id value.")
self.log("setting subscription_id")
self.subscription_id = self.credentials['subscription_id']
if self.credentials.get('credentials') is not None:
# AzureCLI credentials
self.azure_credentials = self.credentials['credentials']
elif self.credentials.get('client_id') is not None and \
self.credentials.get('secret') is not None and \
self.credentials.get('tenant') is not None:
self.azure_credentials = ServicePrincipalCredentials(client_id=self.credentials['client_id'],
secret=self.credentials['secret'],
tenant=self.credentials['tenant'],
cloud_environment=self._cloud_environment,
verify=self._cert_validation_mode == 'validate')
elif self.credentials.get('ad_user') is not None and self.credentials.get('password') is not None:
tenant = self.credentials.get('tenant')
if not tenant:
tenant = 'common' # SDK default
self.azure_credentials = UserPassCredentials(self.credentials['ad_user'],
self.credentials['password'],
tenant=tenant,
cloud_environment=self._cloud_environment,
verify=self._cert_validation_mode == 'validate')
else:
self.fail("Failed to authenticate with provided credentials. Some attributes were missing. "
"Credentials must include client_id, secret and tenant or ad_user and password or "
"be logged using AzureCLI.")
# common parameter validation
if self.module.params.get('tags'):
self.validate_tags(self.module.params['tags'])
if not skip_exec:
res = self.exec_module(**self.module.params)
self.module.exit_json(**res)
def check_client_version(self, client_type):
# Ensure Azure modules are at least 2.0.0rc5.
package_version = AZURE_PKG_VERSIONS.get(client_type.__name__, None)
if package_version is not None:
client_name = package_version.get('package_name')
try:
client_module = importlib.import_module(client_type.__module__)
client_version = client_module.VERSION
except RuntimeError:
# can't get at the module version for some reason, just fail silently...
return
expected_version = package_version.get('expected_version')
if Version(client_version) < Version(expected_version):
self.fail("Installed azure-mgmt-{0} client version is {1}. The supported version is {2}. Try "
"`pip install ansible[azure]`".format(client_name, client_version, expected_version))
def exec_module(self, **kwargs):
self.fail("Error: {0} failed to implement exec_module method.".format(self.__class__.__name__))
def fail(self, msg, **kwargs):
'''
Shortcut for calling module.fail()
:param msg: Error message text.
:param kwargs: Any key=value pairs
:return: None
'''
self.module.fail_json(msg=msg, **kwargs)
def deprecate(self, msg, version=None):
self.module.deprecate(msg, version)
def log(self, msg, pretty_print=False):
pass
# Use only during module development
# if self.debug:
# log_file = open('azure_rm.log', 'a')
# if pretty_print:
# log_file.write(json.dumps(msg, indent=4, sort_keys=True))
# else:
# log_file.write(msg + u'\n')
def validate_tags(self, tags):
'''
Check if tags dictionary contains string:string pairs.
:param tags: dictionary of string:string pairs
:return: None
'''
if not self.facts_module:
if not isinstance(tags, dict):
self.fail("Tags must be a dictionary of string:string values.")
for key, value in tags.items():
if not isinstance(value, str):
self.fail("Tags values must be strings. Found {0}:{1}".format(str(key), str(value)))
def update_tags(self, tags):
'''
Call from the module to update metadata tags. Returns tuple
with bool indicating if there was a change and dict of new
tags to assign to the object.
:param tags: metadata tags from the object
:return: bool, dict
'''
new_tags = copy.copy(tags) if isinstance(tags, dict) else dict()
changed = False
if isinstance(self.module.params.get('tags'), dict):
for key, value in self.module.params['tags'].items():
if not new_tags.get(key) or new_tags[key] != value:
changed = True
new_tags[key] = value
if isinstance(tags, dict):
for key, value in tags.items():
if not self.module.params['tags'].get(key):
new_tags.pop(key)
changed = True
return changed, new_tags
def has_tags(self, obj_tags, tag_list):
'''
Used in fact modules to compare object tags to list of parameter tags. Return true if list of parameter tags
exists in object tags.
:param obj_tags: dictionary of tags from an Azure object.
:param tag_list: list of tag keys or tag key:value pairs
:return: bool
'''
if not obj_tags and tag_list:
return False
if not tag_list:
return True
matches = 0
result = False
for tag in tag_list:
tag_key = tag
tag_value = None
if ':' in tag:
tag_key, tag_value = tag.split(':')
if tag_value and obj_tags.get(tag_key) == tag_value:
matches += 1
elif not tag_value and obj_tags.get(tag_key):
matches += 1
if matches == len(tag_list):
result = True
return result
def get_resource_group(self, resource_group):
'''
Fetch a resource group.
:param resource_group: name of a resource group
:return: resource group object
'''
try:
return self.rm_client.resource_groups.get(resource_group)
except CloudError as cloud_error:
self.fail("Error retrieving resource group {0} - {1}".format(resource_group, cloud_error.message))
except Exception as exc:
self.fail("Error retrieving resource group {0} - {1}".format(resource_group, str(exc)))
def _get_profile(self, profile="default"):
path = expanduser("~/.azure/credentials")
try:
config = configparser.ConfigParser()
config.read(path)
except Exception as exc:
self.fail("Failed to access {0}. Check that the file exists and you have read "
"access. {1}".format(path, str(exc)))
credentials = dict()
for key in AZURE_CREDENTIAL_ENV_MAPPING:
try:
credentials[key] = config.get(profile, key, raw=True)
except:
pass
if credentials.get('subscription_id'):
return credentials
return None
def _get_azure_cli_credentials(self):
credentials, subscription_id = get_azure_cli_credentials()
cloud_environment = get_cli_active_cloud()
cli_credentials = {
'credentials': credentials,
'subscription_id': subscription_id,
'cloud_environment': cloud_environment
}
return cli_credentials
def _get_env_credentials(self):
env_credentials = dict()
for attribute, env_variable in AZURE_CREDENTIAL_ENV_MAPPING.items():
env_credentials[attribute] = os.environ.get(env_variable, None)
if env_credentials['profile']:
credentials = self._get_profile(env_credentials['profile'])
return credentials
if env_credentials.get('subscription_id') is not None:
return env_credentials
return None
def _get_credentials(self, params):
# Get authentication credentials.
self.log('Getting credentials')
arg_credentials = dict()
for attribute, env_variable in AZURE_CREDENTIAL_ENV_MAPPING.items():
arg_credentials[attribute] = params.get(attribute, None)
auth_source = params.get('auth_source', None)
if not auth_source:
auth_source = os.environ.get('ANSIBLE_AZURE_AUTH_SOURCE', 'auto')
if auth_source == 'cli':
if not HAS_AZURE_CLI_CORE:
self.fail("Azure auth_source is `cli`, but azure-cli package is not available. Try `pip install azure-cli --upgrade`")
try:
self.log('Retrieving credentials from Azure CLI profile')
cli_credentials = self._get_azure_cli_credentials()
return cli_credentials
except CLIError as err:
self.fail("Azure CLI profile cannot be loaded - {0}".format(err))
if auth_source == 'env':
self.log('Retrieving credentials from environment')
env_credentials = self._get_env_credentials()
return env_credentials
if auth_source == 'credential_file':
self.log("Retrieving credentials from credential file")
profile = params.get('profile', 'default')
default_credentials = self._get_profile(profile)
return default_credentials
# auto, precedence: module parameters -> environment variables -> default profile in ~/.azure/credentials
# try module params
if arg_credentials['profile'] is not None:
self.log('Retrieving credentials with profile parameter.')
credentials = self._get_profile(arg_credentials['profile'])
return credentials
if arg_credentials['subscription_id']:
self.log('Received credentials from parameters.')
return arg_credentials
# try environment
env_credentials = self._get_env_credentials()
if env_credentials:
self.log('Received credentials from env.')
return env_credentials
# try default profile from ~./azure/credentials
default_credentials = self._get_profile()
if default_credentials:
self.log('Retrieved default profile credentials from ~/.azure/credentials.')
return default_credentials
try:
if HAS_AZURE_CLI_CORE:
self.log('Retrieving credentials from AzureCLI profile')
cli_credentials = self._get_azure_cli_credentials()
return cli_credentials
except CLIError as ce:
self.log('Error getting AzureCLI profile credentials - {0}'.format(ce))
return None
def serialize_obj(self, obj, class_name, enum_modules=None):
'''
Return a JSON representation of an Azure object.
:param obj: Azure object
:param class_name: Name of the object's class
:param enum_modules: List of module names to build enum dependencies from.
:return: serialized result
'''
enum_modules = [] if enum_modules is None else enum_modules
dependencies = dict()
if enum_modules:
for module_name in enum_modules:
mod = importlib.import_module(module_name)
for mod_class_name, mod_class_obj in inspect.getmembers(mod, predicate=inspect.isclass):
dependencies[mod_class_name] = mod_class_obj
self.log("dependencies: ")
self.log(str(dependencies))
serializer = Serializer(classes=dependencies)
return serializer.body(obj, class_name, keep_readonly=True)
def get_poller_result(self, poller, wait=5):
'''
Consistent method of waiting on and retrieving results from Azure's long poller
:param poller Azure poller object
:return object resulting from the original request
'''
try:
delay = wait
while not poller.done():
self.log("Waiting for {0} sec".format(delay))
poller.wait(timeout=delay)
return poller.result()
except Exception as exc:
self.log(str(exc))
raise
def check_provisioning_state(self, azure_object, requested_state='present'):
'''
Check an Azure object's provisioning state. If something did not complete the provisioning
process, then we cannot operate on it.
:param azure_object An object such as a subnet, storageaccount, etc. Must have provisioning_state
and name attributes.
:return None
'''
if hasattr(azure_object, 'properties') and hasattr(azure_object.properties, 'provisioning_state') and \
hasattr(azure_object, 'name'):
# resource group object fits this model
if isinstance(azure_object.properties.provisioning_state, Enum):
if azure_object.properties.provisioning_state.value != AZURE_SUCCESS_STATE and \
requested_state != 'absent':
self.fail("Error {0} has a provisioning state of {1}. Expecting state to be {2}.".format(
azure_object.name, azure_object.properties.provisioning_state, AZURE_SUCCESS_STATE))
return
if azure_object.properties.provisioning_state != AZURE_SUCCESS_STATE and \
requested_state != 'absent':
self.fail("Error {0} has a provisioning state of {1}. Expecting state to be {2}.".format(
azure_object.name, azure_object.properties.provisioning_state, AZURE_SUCCESS_STATE))
return
if hasattr(azure_object, 'provisioning_state') or not hasattr(azure_object, 'name'):
if isinstance(azure_object.provisioning_state, Enum):
if azure_object.provisioning_state.value != AZURE_SUCCESS_STATE and requested_state != 'absent':
self.fail("Error {0} has a provisioning state of {1}. Expecting state to be {2}.".format(
azure_object.name, azure_object.provisioning_state, AZURE_SUCCESS_STATE))
return
if azure_object.provisioning_state != AZURE_SUCCESS_STATE and requested_state != 'absent':
self.fail("Error {0} has a provisioning state of {1}. Expecting state to be {2}.".format(
azure_object.name, azure_object.provisioning_state, AZURE_SUCCESS_STATE))
def get_blob_client(self, resource_group_name, storage_account_name, storage_blob_type='block'):
keys = dict()
try:
# Get keys from the storage account
self.log('Getting keys')
account_keys = self.storage_client.storage_accounts.list_keys(resource_group_name, storage_account_name)
except Exception as exc:
self.fail("Error getting keys for account {0} - {1}".format(storage_account_name, str(exc)))
try:
self.log('Create blob service')
if storage_blob_type == 'page':
return CloudStorageAccount(storage_account_name, account_keys.keys[0].value).create_page_blob_service()
elif storage_blob_type == 'block':
return CloudStorageAccount(storage_account_name, account_keys.keys[0].value).create_block_blob_service()
else:
raise Exception("Invalid storage blob type defined.")
except Exception as exc:
self.fail("Error creating blob service client for storage account {0} - {1}".format(storage_account_name,
str(exc)))
def create_default_pip(self, resource_group, location, public_ip_name, allocation_method='Dynamic'):
'''
Create a default public IP address <public_ip_name> to associate with a network interface.
If a PIP address matching <public_ip_name> exists, return it. Otherwise, create one.
:param resource_group: name of an existing resource group
:param location: a valid azure location
:param public_ip_name: base name to assign the public IP address
:param allocation_method: one of 'Static' or 'Dynamic'
:return: PIP object
'''
pip = None
self.log("Starting create_default_pip {0}".format(public_ip_name))
self.log("Check to see if public IP {0} exists".format(public_ip_name))
try:
pip = self.network_client.public_ip_addresses.get(resource_group, public_ip_name)
except CloudError:
pass
if pip:
self.log("Public ip {0} found.".format(public_ip_name))
self.check_provisioning_state(pip)
return pip
params = self.network_models.PublicIPAddress(
location=location,
public_ip_allocation_method=allocation_method,
)
self.log('Creating default public IP {0}'.format(public_ip_name))
try:
poller = self.network_client.public_ip_addresses.create_or_update(resource_group, public_ip_name, params)
except Exception as exc:
self.fail("Error creating {0} - {1}".format(public_ip_name, str(exc)))
return self.get_poller_result(poller)
def create_default_securitygroup(self, resource_group, location, security_group_name, os_type, open_ports):
'''
Create a default security group <security_group_name> to associate with a network interface. If a security group matching
<security_group_name> exists, return it. Otherwise, create one.
:param resource_group: Resource group name
:param location: azure location name
:param security_group_name: base name to use for the security group
:param os_type: one of 'Windows' or 'Linux'. Determins any default rules added to the security group.
:param ssh_port: for os_type 'Linux' port used in rule allowing SSH access.
:param rdp_port: for os_type 'Windows' port used in rule allowing RDP access.
:return: security_group object
'''
group = None
self.log("Create security group {0}".format(security_group_name))
self.log("Check to see if security group {0} exists".format(security_group_name))
try:
group = self.network_client.network_security_groups.get(resource_group, security_group_name)
except CloudError:
pass
if group:
self.log("Security group {0} found.".format(security_group_name))
self.check_provisioning_state(group)
return group
parameters = self.network_models.NetworkSecurityGroup()
parameters.location = location
if not open_ports:
# Open default ports based on OS type
if os_type == 'Linux':
# add an inbound SSH rule
parameters.security_rules = [
self.network_models.SecurityRule('Tcp', '*', '*', 'Allow', 'Inbound', description='Allow SSH Access',
source_port_range='*', destination_port_range='22', priority=100, name='SSH')
]
parameters.location = location
else:
# for windows add inbound RDP and WinRM rules
parameters.security_rules = [
self.network_models.SecurityRule('Tcp', '*', '*', 'Allow', 'Inbound', description='Allow RDP port 3389',
source_port_range='*', destination_port_range='3389', priority=100, name='RDP01'),
self.network_models.SecurityRule('Tcp', '*', '*', 'Allow', 'Inbound', description='Allow WinRM HTTPS port 5986',
source_port_range='*', destination_port_range='5986', priority=101, name='WinRM01'),
]
else:
# Open custom ports
parameters.security_rules = []
priority = 100
for port in open_ports:
priority += 1
rule_name = "Rule_{0}".format(priority)
parameters.security_rules.append(
self.network_models.SecurityRule(protocol='Tcp',
source_address_prefix='*',
destination_address_prefix='*',
access='Allow',
direction='Inbound',
source_port_range='*',
destination_port_range=str(port),
priority=priority,
name=rule_name)
)
self.log('Creating default security group {0}'.format(security_group_name))
try:
poller = self.network_client.network_security_groups.create_or_update(resource_group,
security_group_name,
parameters)
except Exception as exc:
self.fail("Error creating default security rule {0} - {1}".format(security_group_name, str(exc)))
return self.get_poller_result(poller)
@staticmethod
def _validation_ignore_callback(session, global_config, local_config, **kwargs):
session.verify = False
def get_api_profile(self, client_type_name, api_profile_name):
profile_all_clients = AZURE_API_PROFILES.get(api_profile_name)
if not profile_all_clients:
raise KeyError("unknown Azure API profile: {0}".format(api_profile_name))
profile_raw = profile_all_clients.get(client_type_name, None)
if not profile_raw:
self.module.warn("Azure API profile {0} does not define an entry for {1}".format(api_profile_name, client_type_name))
if isinstance(profile_raw, dict):
if not profile_raw.get('default_api_version'):
raise KeyError("Azure API profile {0} does not define 'default_api_version'".format(api_profile_name))
return profile_raw
# wrap basic strings in a dict that just defines the default
return dict(default_api_version=profile_raw)
def get_mgmt_svc_client(self, client_type, base_url=None, api_version=None):
self.log('Getting management service client {0}'.format(client_type.__name__))
self.check_client_version(client_type)
client_argspec = inspect.getargspec(client_type.__init__)
client_kwargs = dict(credentials=self.azure_credentials, subscription_id=self.subscription_id, base_url=base_url)
api_profile_dict = {}
if self.api_profile:
api_profile_dict = self.get_api_profile(client_type.__name__, self.api_profile)
if not base_url:
# most things are resource_manager, don't make everyone specify
base_url = self._cloud_environment.endpoints.resource_manager
# unversioned clients won't accept profile; only send it if necessary
# clients without a version specified in the profile will use the default
if api_profile_dict and 'profile' in client_argspec.args:
client_kwargs['profile'] = api_profile_dict
# If the client doesn't accept api_version, it's unversioned.
# If it does, favor explicitly-specified api_version, fall back to api_profile
if 'api_version' in client_argspec.args:
profile_default_version = api_profile_dict.get('default_api_version', None)
if api_version or profile_default_version:
client_kwargs['api_version'] = api_version or profile_default_version
client = client_type(**client_kwargs)
# FUTURE: remove this once everything exposes models directly (eg, containerinstance)
try:
getattr(client, "models")
except AttributeError:
def _ansible_get_models(self, *arg, **kwarg):
return self._ansible_models
setattr(client, '_ansible_models', importlib.import_module(client_type.__module__).models)
client.models = types.MethodType(_ansible_get_models, client)
# Add user agent for Ansible
client.config.add_user_agent(ANSIBLE_USER_AGENT)
# Add user agent when running from Cloud Shell
if CLOUDSHELL_USER_AGENT_KEY in os.environ:
client.config.add_user_agent(os.environ[CLOUDSHELL_USER_AGENT_KEY])
# Add user agent when running from VSCode extension
if VSCODEEXT_USER_AGENT_KEY in os.environ:
client.config.add_user_agent(os.environ[VSCODEEXT_USER_AGENT_KEY])
if self._cert_validation_mode == 'ignore':
client.config.session_configuration_callback = self._validation_ignore_callback
return client
@property
def storage_client(self):
self.log('Getting storage client...')
if not self._storage_client:
self._storage_client = self.get_mgmt_svc_client(StorageManagementClient,
base_url=self._cloud_environment.endpoints.resource_manager,
api_version='2017-10-01')
return self._storage_client
@property
def storage_models(self):
self.log('Getting storage models...')
return StorageManagementClient.models("2017-10-01")
@property
def network_client(self):
self.log('Getting network client')
if not self._network_client:
self._network_client = self.get_mgmt_svc_client(NetworkManagementClient,
base_url=self._cloud_environment.endpoints.resource_manager,
api_version='2017-06-01')
return self._network_client
@property
def network_models(self):
self.log("Getting network models...")
return NetworkManagementClient.models("2017-06-01")
@property
def rm_client(self):
self.log('Getting resource manager client')
if not self._resource_client:
self._resource_client = self.get_mgmt_svc_client(ResourceManagementClient,
base_url=self._cloud_environment.endpoints.resource_manager,
api_version='2017-05-10')
return self._resource_client
@property
def rm_models(self):
self.log("Getting resource manager models")
return ResourceManagementClient.models("2017-05-10")
@property
def compute_client(self):
self.log('Getting compute client')
if not self._compute_client:
self._compute_client = self.get_mgmt_svc_client(ComputeManagementClient,
base_url=self._cloud_environment.endpoints.resource_manager,
api_version='2017-03-30')
return self._compute_client
@property
def compute_models(self):
self.log("Getting compute models")
return ComputeManagementClient.models("2017-03-30")
@property
def dns_client(self):
self.log('Getting dns client')
if not self._dns_client:
self._dns_client = self.get_mgmt_svc_client(DnsManagementClient,
base_url=self._cloud_environment.endpoints.resource_manager)
return self._dns_client
@property
def web_client(self):
self.log('Getting web client')
if not self._web_client:
self._web_client = self.get_mgmt_svc_client(WebSiteManagementClient,
base_url=self._cloud_environment.endpoints.resource_manager)
return self._web_client
@property
def containerservice_client(self):
self.log('Getting container service client')
if not self._containerservice_client:
self._containerservice_client = self.get_mgmt_svc_client(ContainerServiceClient,
base_url=self._cloud_environment.endpoints.resource_manager)
return self._containerservice_client
|
the-stack_0_12655 | """"Example usage of BayesianDense layer on MNIST dataset (~1.5% test error). """
import os
import logging
import logging.config
from sklearn.utils import shuffle
from keras.layers import Dense, Input
from keras.models import Model
from keras.datasets import mnist
from keras.optimizers import Adam
import numpy as np
import pickle
import keras.backend as K
from tqdm import tqdm
from bayesian_dense.bayesian_dense import BayesianDense, VariationalRegularizer
from keras.regularizers import WeightRegularizer
def accuracy(model, x, label_true, batch_size):
"""Calculate accuracy of a model"""
y_pred = model.predict(x, batch_size=batch_size)
label_pred = np.argmax(y_pred,axis=1)
correct = np.count_nonzero(label_true == label_pred)
return 1.0-(float(correct)/float(x.shape[0]))
def one_hot(labels, m):
"""Convert labels to one-hot representations"""
n = labels.shape[0]
y = np.zeros((n,m))
y[np.arange(n),labels.ravel()]=1
return y
def model(hidden_dim=512, input_dim=28*28, sigma_regularization=1e-3, mu_regularization=1e-5, k=10,
activation = lambda x: K.relu(x, 1.0 / 5.5)):
"""Create two layer MLP with softmax output"""
_x = Input(shape=(input_dim,))
layer = lambda output_dim, activation: BayesianDense(output_dim,
activation=activation,
W_sigma_regularizer=VariationalRegularizer(weight=sigma_regularization),
b_sigma_regularizer=VariationalRegularizer(weight=sigma_regularization),
W_regularizer=WeightRegularizer(l1=mu_regularization))
h1 = layer(hidden_dim, activation)
h2 = layer(hidden_dim, activation)
y = layer(k, 'softmax')
_y = y(h2(h1(_x)))
m = Model(_x, _y)
m.compile(Adam(1e-3),loss='categorical_crossentropy')
return m
def mnist_data():
"""Rescale and reshape MNIST data"""
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = x_train.astype(np.float32) / 255.
x_test = x_test.astype(np.float32) / 255.
x_train = x_train.reshape((x_train.shape[0], -1))
x_test = x_test.reshape((x_test.shape[0], -1))
return (x_train, y_train, x_test, y_test)
if __name__ == "__main__":
logging.config.fileConfig('logging.conf')
path = "output/bayesian_dense/test"
if not os.path.exists(path):
os.makedirs(path)
x_train, y_train, x_test, y_test = mnist_data()
nb_epoch = 100
batch_size = 128
k = 10
decay = 0.96
lr = 1e-3
m=model()
m.summary()
log = []
for epoch in tqdm(range(nb_epoch)):
acc_train = accuracy(m, x_train, y_train, batch_size=batch_size)
acc_test = accuracy(m, x_test, y_test, batch_size=batch_size)
log.append([acc_train, acc_test])
m.optimizer.lr.set_value(np.float32(lr))
logging.info("Epoch: %i/%i, Train: %f, Test: %f, LR: %f"%(epoch, nb_epoch, acc_train, acc_test, lr))
x_train, y_train = shuffle(x_train, y_train)
m.fit(x_train, one_hot(y_train,k), nb_epoch=1, batch_size=batch_size, shuffle=True,
validation_data=(x_test, one_hot(y_test,k)))
lr *= decay
if epoch%10 == 0:
m.save_weights("%s/checkpoint-%03i.hd5"%(path,epoch))
m.save_weights('%s/model.hd5'%path)
with open("%s/log.pkl"%path, "w") as f:
pickle.dump(log, f)
|
the-stack_0_12656 | __author__ = 'yuxiang'
import datasets
import datasets.kitti_tracking
import os
import PIL
import datasets.imdb
import numpy as np
import scipy.sparse
from utils.cython_bbox import bbox_overlaps
from utils.boxes_grid import get_boxes_grid
import subprocess
import pickle as cPickle
from fast_rcnn.config import cfg
import math
from rpn_msr.generate_anchors import generate_anchors
class kitti_tracking(datasets.imdb):
def __init__(self, image_set, seq_name, kitti_tracking_path=None):
datasets.imdb.__init__(self, 'kitti_tracking_' + image_set + '_' + seq_name)
self._image_set = image_set
self._seq_name = seq_name
self._kitti_tracking_path = self._get_default_path() if kitti_tracking_path is None \
else kitti_tracking_path
self._data_path = os.path.join(self._kitti_tracking_path, image_set, 'image_02')
self._classes = ('__background__', 'Car', 'Pedestrian', 'Cyclist')
self._class_to_ind = dict(zip(self.classes, xrange(self.num_classes)))
self._image_ext = '.png'
self._image_index = self._load_image_set_index()
# Default to roidb handler
if cfg.IS_RPN:
self._roidb_handler = self.gt_roidb
else:
self._roidb_handler = self.region_proposal_roidb
# num of subclasses
if image_set == 'training' and seq_name != 'trainval':
self._num_subclasses = 220 + 1
else:
self._num_subclasses = 472 + 1
# load the mapping for subcalss to class
if image_set == 'training' and seq_name != 'trainval':
filename = os.path.join(self._kitti_tracking_path, 'voxel_exemplars', 'train', 'mapping.txt')
else:
filename = os.path.join(self._kitti_tracking_path, 'voxel_exemplars', 'trainval', 'mapping.txt')
assert os.path.exists(filename), 'Path does not exist: {}'.format(filename)
mapping = np.zeros(self._num_subclasses, dtype=np.int)
with open(filename) as f:
for line in f:
words = line.split()
subcls = int(words[0])
mapping[subcls] = self._class_to_ind[words[1]]
self._subclass_mapping = mapping
self.config = {'top_k': 100000}
# statistics for computing recall
self._num_boxes_all = np.zeros(self.num_classes, dtype=np.int)
self._num_boxes_covered = np.zeros(self.num_classes, dtype=np.int)
self._num_boxes_proposal = 0
assert os.path.exists(self._kitti_tracking_path), \
'kitti_tracking path does not exist: {}'.format(self._kitti_tracking_path)
assert os.path.exists(self._data_path), \
'Path does not exist: {}'.format(self._data_path)
def image_path_at(self, i):
"""
Return the absolute path to image i in the image sequence.
"""
return self.image_path_from_index(self.image_index[i])
def image_path_from_index(self, index):
"""
Construct an image path from the image's "index" identifier.
"""
image_path = os.path.join(self._data_path, index + self._image_ext)
assert os.path.exists(image_path), \
'Path does not exist: {}'.format(image_path)
return image_path
def _load_image_set_index(self):
"""
Load the indexes listed in this dataset's image set file.
"""
kitti_train_nums = [154, 447, 233, 144, 314, 297, 270, 800, 390, 803, 294, \
373, 78, 340, 106, 376, 209, 145, 339, 1059, 837]
kitti_test_nums = [465, 147, 243, 257, 421, 809, 114, 215, 165, 349, 1176, \
774, 694, 152, 850, 701, 510, 305, 180, 404, 173, 203, \
436, 430, 316, 176, 170, 85, 175]
if self._seq_name == 'train' or self._seq_name == 'trainval':
assert self._image_set == 'training', 'Use train set or trainval set in testing'
if self._seq_name == 'train':
seq_index = [0, 1, 2, 3, 4, 5, 12, 13, 14, 15, 16]
else:
seq_index = range(0, 21)
# for each sequence
image_index = []
for i in xrange(len(seq_index)):
seq_idx = seq_index[i]
num = kitti_train_nums[seq_idx]
for j in xrange(num):
image_index.append('{:04d}/{:06d}'.format(seq_idx, j))
else:
# a single sequence
seq_num = int(self._seq_name)
if self._image_set == 'training':
num = kitti_train_nums[seq_num]
else:
num = kitti_test_nums[seq_num]
image_index = []
for i in xrange(num):
image_index.append('{:04d}/{:06d}'.format(seq_num, i))
return image_index
def _get_default_path(self):
"""
Return the default path where kitti_tracking is expected to be installed.
"""
return os.path.join(datasets.ROOT_DIR, 'data', 'KITTI_Tracking')
def gt_roidb(self):
"""
Return the database of ground-truth regions of interest.
"""
cache_file = os.path.join(self.cache_path, self.name + '_' + cfg.SUBCLS_NAME + '_gt_roidb.pkl')
if os.path.exists(cache_file):
with open(cache_file, 'rb') as fid:
roidb = cPickle.load(fid)
print('{} gt roidb loaded from {}'.format(self.name, cache_file))
return roidb
gt_roidb = [self._load_kitti_voxel_exemplar_annotation(index)
for index in self.image_index]
if cfg.IS_RPN:
# print out recall
for i in xrange(1, self.num_classes):
print('{}: Total number of boxes {:d}'.format(self.classes[i], self._num_boxes_all[i]))
print('{}: Number of boxes covered {:d}'.format(self.classes[i], self._num_boxes_covered[i]))
print('{}: Recall {:f}'.format(self.classes[i], float(self._num_boxes_covered[i]) / float(self._num_boxes_all[i])))
with open(cache_file, 'wb') as fid:
cPickle.dump(gt_roidb, fid, cPickle.HIGHEST_PROTOCOL)
print('wrote gt roidb to {}'.format(cache_file))
return gt_roidb
def _load_kitti_voxel_exemplar_annotation(self, index):
"""
Load image and bounding boxes info from txt file in the KITTI voxel exemplar format.
"""
if self._image_set == 'training' and self._seq_name != 'trainval':
prefix = 'train'
elif self._image_set == 'training':
prefix = 'trainval'
else:
prefix = ''
if prefix == '':
lines = []
lines_flipped = []
else:
filename = os.path.join(self._kitti_tracking_path, cfg.SUBCLS_NAME, prefix, index + '.txt')
if os.path.exists(filename):
print(filename)
# the annotation file contains flipped objects
lines = []
lines_flipped = []
with open(filename) as f:
for line in f:
words = line.split()
subcls = int(words[1])
is_flip = int(words[2])
if subcls != -1:
if is_flip == 0:
lines.append(line)
else:
lines_flipped.append(line)
else:
lines = []
lines_flipped = []
num_objs = len(lines)
# store information of flipped objects
assert (num_objs == len(lines_flipped)), 'The number of flipped objects is not the same!'
gt_subclasses_flipped = np.zeros((num_objs), dtype=np.int32)
for ix, line in enumerate(lines_flipped):
words = line.split()
subcls = int(words[1])
gt_subclasses_flipped[ix] = subcls
boxes = np.zeros((num_objs, 4), dtype=np.float32)
gt_classes = np.zeros((num_objs), dtype=np.int32)
gt_subclasses = np.zeros((num_objs), dtype=np.int32)
overlaps = np.zeros((num_objs, self.num_classes), dtype=np.float32)
subindexes = np.zeros((num_objs, self.num_classes), dtype=np.int32)
subindexes_flipped = np.zeros((num_objs, self.num_classes), dtype=np.int32)
for ix, line in enumerate(lines):
words = line.split()
cls = self._class_to_ind[words[0]]
subcls = int(words[1])
boxes[ix, :] = [float(n) for n in words[3:7]]
gt_classes[ix] = cls
gt_subclasses[ix] = subcls
overlaps[ix, cls] = 1.0
subindexes[ix, cls] = subcls
subindexes_flipped[ix, cls] = gt_subclasses_flipped[ix]
overlaps = scipy.sparse.csr_matrix(overlaps)
subindexes = scipy.sparse.csr_matrix(subindexes)
subindexes_flipped = scipy.sparse.csr_matrix(subindexes_flipped)
if cfg.IS_RPN:
if cfg.IS_MULTISCALE:
# compute overlaps between grid boxes and gt boxes in multi-scales
# rescale the gt boxes
boxes_all = np.zeros((0, 4), dtype=np.float32)
for scale in cfg.TRAIN.SCALES:
boxes_all = np.vstack((boxes_all, boxes * scale))
gt_classes_all = np.tile(gt_classes, len(cfg.TRAIN.SCALES))
# compute grid boxes
s = PIL.Image.open(self.image_path_from_index(index)).size
image_height = s[1]
image_width = s[0]
boxes_grid, _, _ = get_boxes_grid(image_height, image_width)
# compute overlap
overlaps_grid = bbox_overlaps(boxes_grid.astype(np.float), boxes_all.astype(np.float))
# check how many gt boxes are covered by grids
if num_objs != 0:
index = np.tile(range(num_objs), len(cfg.TRAIN.SCALES))
max_overlaps = overlaps_grid.max(axis = 0)
fg_inds = []
for k in xrange(1, self.num_classes):
fg_inds.extend(np.where((gt_classes_all == k) & (max_overlaps >= cfg.TRAIN.FG_THRESH[k-1]))[0])
index_covered = np.unique(index[fg_inds])
for i in xrange(self.num_classes):
self._num_boxes_all[i] += len(np.where(gt_classes == i)[0])
self._num_boxes_covered[i] += len(np.where(gt_classes[index_covered] == i)[0])
else:
assert len(cfg.TRAIN.SCALES_BASE) == 1
scale = cfg.TRAIN.SCALES_BASE[0]
feat_stride = 16
# faster rcnn region proposal
base_size = 16
ratios = [3.0, 2.0, 1.5, 1.0, 0.75, 0.5, 0.25]
scales = 2**np.arange(1, 6, 0.5)
anchors = generate_anchors(base_size, ratios, scales)
num_anchors = anchors.shape[0]
# image size
s = PIL.Image.open(self.image_path_from_index(index)).size
image_height = s[1]
image_width = s[0]
# height and width of the heatmap
height = np.round((image_height * scale - 1) / 4.0 + 1)
height = np.floor((height - 1) / 2 + 1 + 0.5)
height = np.floor((height - 1) / 2 + 1 + 0.5)
width = np.round((image_width * scale - 1) / 4.0 + 1)
width = np.floor((width - 1) / 2.0 + 1 + 0.5)
width = np.floor((width - 1) / 2.0 + 1 + 0.5)
# gt boxes
gt_boxes = boxes * scale
# 1. Generate proposals from bbox deltas and shifted anchors
shift_x = np.arange(0, width) * feat_stride
shift_y = np.arange(0, height) * feat_stride
shift_x, shift_y = np.meshgrid(shift_x, shift_y)
shifts = np.vstack((shift_x.ravel(), shift_y.ravel(),
shift_x.ravel(), shift_y.ravel())).transpose()
# add A anchors (1, A, 4) to
# cell K shifts (K, 1, 4) to get
# shift anchors (K, A, 4)
# reshape to (K*A, 4) shifted anchors
A = num_anchors
K = shifts.shape[0]
all_anchors = (anchors.reshape((1, A, 4)) + shifts.reshape((1, K, 4)).transpose((1, 0, 2)))
all_anchors = all_anchors.reshape((K * A, 4))
# compute overlap
overlaps_grid = bbox_overlaps(all_anchors.astype(np.float), gt_boxes.astype(np.float))
# check how many gt boxes are covered by anchors
if num_objs != 0:
max_overlaps = overlaps_grid.max(axis = 0)
fg_inds = []
for k in xrange(1, self.num_classes):
fg_inds.extend(np.where((gt_classes == k) & (max_overlaps >= cfg.TRAIN.FG_THRESH[k-1]))[0])
for i in xrange(self.num_classes):
self._num_boxes_all[i] += len(np.where(gt_classes == i)[0])
self._num_boxes_covered[i] += len(np.where(gt_classes[fg_inds] == i)[0])
return {'boxes' : boxes,
'gt_classes': gt_classes,
'gt_subclasses': gt_subclasses,
'gt_subclasses_flipped': gt_subclasses_flipped,
'gt_overlaps': overlaps,
'gt_subindexes': subindexes,
'gt_subindexes_flipped': subindexes_flipped,
'flipped' : False}
def region_proposal_roidb(self):
"""
Return the database of regions of interest.
Ground-truth ROIs are also included.
This function loads/saves from/to a cache file to speed up future calls.
"""
cache_file = os.path.join(self.cache_path,
self.name + '_' + cfg.SUBCLS_NAME + '_' + cfg.REGION_PROPOSAL + '_region_proposal_roidb.pkl')
if os.path.exists(cache_file):
with open(cache_file, 'rb') as fid:
roidb = cPickle.load(fid)
print('{} roidb loaded from {}'.format(self.name, cache_file))
return roidb
if self._image_set != 'testing':
gt_roidb = self.gt_roidb()
print('Loading region proposal network boxes...')
if self._image_set == 'trainval':
model = cfg.REGION_PROPOSAL + '_trainval/'
else:
model = cfg.REGION_PROPOSAL + '_train/'
rpn_roidb = self._load_rpn_roidb(gt_roidb, model)
print('Region proposal network boxes loaded')
roidb = datasets.imdb.merge_roidbs(rpn_roidb, gt_roidb)
else:
print('Loading region proposal network boxes...')
model = cfg.REGION_PROPOSAL + '_trainval/'
roidb = self._load_rpn_roidb(None, model)
print('Region proposal network boxes loaded')
print('{} region proposals per image'.format(self._num_boxes_proposal / len(self.image_index)))
with open(cache_file, 'wb') as fid:
cPickle.dump(roidb, fid, cPickle.HIGHEST_PROTOCOL)
print('wrote roidb to {}'.format(cache_file))
return roidb
def _load_rpn_roidb(self, gt_roidb, model):
# set the prefix
prefix = model
box_list = []
for index in self.image_index:
filename = os.path.join(self._kitti_tracking_path, 'region_proposals', prefix, self._image_set, index + '.txt')
assert os.path.exists(filename), \
'RPN data not found at: {}'.format(filename)
print(filename)
raw_data = np.loadtxt(filename, dtype=float)
if len(raw_data.shape) == 1:
if raw_data.size == 0:
raw_data = raw_data.reshape((0, 5))
else:
raw_data = raw_data.reshape((1, 5))
x1 = raw_data[:, 0]
y1 = raw_data[:, 1]
x2 = raw_data[:, 2]
y2 = raw_data[:, 3]
score = raw_data[:, 4]
inds = np.where((x2 > x1) & (y2 > y1))[0]
raw_data = raw_data[inds,:4]
self._num_boxes_proposal += raw_data.shape[0]
box_list.append(raw_data)
return self.create_roidb_from_box_list(box_list, gt_roidb)
def evaluate_detections(self, all_boxes, output_dir):
# load the mapping for subcalss the alpha (viewpoint)
if self._image_set == 'training' and self._seq_name != 'trainval':
filename = os.path.join(self._kitti_tracking_path, 'voxel_exemplars', 'train', 'mapping.txt')
else:
filename = os.path.join(self._kitti_tracking_path, 'voxel_exemplars', 'trainval', 'mapping.txt')
assert os.path.exists(filename), 'Path does not exist: {}'.format(filename)
mapping = np.zeros(self._num_subclasses, dtype=np.float)
with open(filename) as f:
for line in f:
words = line.split()
subcls = int(words[0])
mapping[subcls] = float(words[3])
# for each image
for im_ind, index in enumerate(self.image_index):
filename = os.path.join(output_dir, index[5:] + '.txt')
print('Writing kitti_tracking results to file ' + filename)
with open(filename, 'wt') as f:
# for each class
for cls_ind, cls in enumerate(self.classes):
if cls == '__background__':
continue
dets = all_boxes[cls_ind][im_ind]
if dets == []:
continue
for k in xrange(dets.shape[0]):
subcls = int(dets[k, 5])
cls_name = self.classes[self.subclass_mapping[subcls]]
assert (cls_name == cls), 'subclass not in class'
alpha = mapping[subcls]
f.write('{:s} -1 -1 {:f} {:f} {:f} {:f} {:f} -1 -1 -1 -1 -1 -1 -1 {:.32f}\n'.format(\
cls, alpha, dets[k, 0], dets[k, 1], dets[k, 2], dets[k, 3], dets[k, 4]))
# write detection results into one file
def evaluate_detections_one_file(self, all_boxes, output_dir):
# load the mapping for subcalss the alpha (viewpoint)
if self._image_set == 'training' and self._seq_name != 'trainval':
filename = os.path.join(self._kitti_tracking_path, 'voxel_exemplars', 'train', 'mapping.txt')
else:
filename = os.path.join(self._kitti_tracking_path, 'voxel_exemplars', 'trainval', 'mapping.txt')
assert os.path.exists(filename), 'Path does not exist: {}'.format(filename)
mapping = np.zeros(self._num_subclasses, dtype=np.float)
with open(filename) as f:
for line in f:
words = line.split()
subcls = int(words[0])
mapping[subcls] = float(words[3])
# open results file
filename = os.path.join(output_dir, self._seq_name+'.txt')
print('Writing all kitti_tracking results to file ' + filename)
with open(filename, 'wt') as f:
# for each image
for im_ind, index in enumerate(self.image_index):
# for each class
for cls_ind, cls in enumerate(self.classes):
if cls == '__background__':
continue
dets = all_boxes[cls_ind][im_ind]
if dets == []:
continue
for k in xrange(dets.shape[0]):
subcls = int(dets[k, 5])
cls_name = self.classes[self.subclass_mapping[subcls]]
assert (cls_name == cls), 'subclass not in class'
alpha = mapping[subcls]
f.write('{:d} -1 {:s} -1 -1 {:f} {:f} {:f} {:f} {:f} -1 -1 -1 -1000 -1000 -1000 -10 {:f}\n'.format(\
im_ind, cls, alpha, dets[k, 0], dets[k, 1], dets[k, 2], dets[k, 3], dets[k, 4]))
def evaluate_proposals(self, all_boxes, output_dir):
# for each image
for im_ind, index in enumerate(self.image_index):
filename = os.path.join(output_dir, index[5:] + '.txt')
print('Writing kitti_tracking results to file ' + filename)
with open(filename, 'wt') as f:
# for each class
for cls_ind, cls in enumerate(self.classes):
if cls == '__background__':
continue
dets = all_boxes[cls_ind][im_ind]
if dets == []:
continue
for k in xrange(dets.shape[0]):
f.write('{:f} {:f} {:f} {:f} {:.32f}\n'.format(\
dets[k, 0], dets[k, 1], dets[k, 2], dets[k, 3], dets[k, 4]))
def evaluate_proposals_msr(self, all_boxes, output_dir):
# for each image
for im_ind, index in enumerate(self.image_index):
filename = os.path.join(output_dir, index + '.txt')
print('Writing kitti_tracking results to file ' + filename)
with open(filename, 'wt') as f:
dets = all_boxes[im_ind]
if dets == []:
continue
for k in xrange(dets.shape[0]):
f.write('{:f} {:f} {:f} {:f} {:.32f}\n'.format(dets[k, 0], dets[k, 1], dets[k, 2], dets[k, 3], dets[k, 4]))
if __name__ == '__main__':
d = datasets.kitti_tracking('training', '0000')
res = d.roidb
from IPython import embed; embed()
|
the-stack_0_12658 | import DAO
def show_my_courses(student, course_list):
print('\nMy Courses:')
print('#\tCOURSE NAME\tINSTRUCTOR NAME')
attending_dao = DAO.AttendingDAO()
my_courses = attending_dao.get_student_courses(course_list, student.get_email())
i = 1
for course in my_courses:
print(f'{i}\t{course.get_name()}\t{course.get_instructor()}')
i+=1
def show_all_courses(course_list):
print('\nAll Courses:')
print('ID\tCOURSE NAME\tINSTRUCTOR NAME')
for course in course_list:
print(f'{course.get_id()}\t{course.get_name()}\t{course.get_instructor()}')
def main():
print('Welcome!')
entry=None
while entry!='2':
entry = input('\n1. Current Student\n2. New Student\n3. Quit\nPlease, enter 1, 2 or 3: ')
if entry=='1':
student_dao = DAO.StudentDAO()
email = input('\nEnter Your Email: ')
pw = input('Enter Your Password: ')
if student_dao.validate_user(email, pw):
course_dao = DAO.CourseDAO()
attending_dao = DAO.AttendingDAO()
student = student_dao.get_student_by_email(email)
course_list = course_dao.get_courses()
print(type(student))
show_my_courses(student, course_list)
print('\nWhat Would You Like To Do?')
while entry!='2':
entry = input('\n1. Register To Course\n2. Logout\nPlease, enter 1 or 2: ')
if entry=='1':
show_all_courses(course_list)
course_id = input('\nSelect Course By ID Number: ')
print("\nAttempting to Register...")
if attending_dao.register_student_to_course(email, course_id, course_list):
show_my_courses(student, course_list)
elif entry=='2':
print('\nYou Have Been Logged Out.')
else:
print('\nInvalid Option...')
else:
print('\nWrong Credentials!')
elif entry=='2':
print("Welcome to the school!")
student_dao = DAO.StudentDAO()
email = input('Please provide your email : ')
if not student_dao.get_student_by_email(email):
name = input("What is your full name? : ")
password = input("What would you like your password to be? : ")
student_dao.add_new_student(email, name, password)
entry = '-1'
continue;
else:
print("That email is already taken")
elif entry=='3':
print("Programming is closing, ")
break;
else:
print('Invalid Option...')
print('\nClosing Program. Goodbye.')
if __name__=='__main__':
main()
|
the-stack_0_12661 | '''
Copyright 2017 The Regents of the University of Colorado
Licensed under the Apache License, Version 2.0 (the "License")
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
'''
events_mapping.py
Python Version: 3.6.3
Queries the study by the events_mapping table and populates OHDSI tables Death, visit_occurrence and procedure_occurrence.
This is research code for demonstration purposes only.
croeder 8/2017 [email protected]
'''
import logging
from HeartData import migrate
#import datetime
#import sys
#import re
import psycopg2
from psycopg2.extras import RealDictCursor
from HeartData.person import BasePerson
from ui.models import Concept
logger = logging.getLogger(__name__)
NULL_PLACEHOLDER='no_column'
def _read_event_mappings(con, study_id):
event_mappings={}
cur = con.cursor(cursor_factory=RealDictCursor)
cur.execute( ("SELECT study_id, from_table, from_column, to_table, value_vocabulary_id, value_concept_code, addl_column, addl_value, from_date_column, where_clause"
" FROM events_mapping "
" WHERE study_id = %s"), (study_id,) )
rows = cur.fetchall()
cur.close()
return rows
def populate(con, person_id_list, study):
""" populate the ohdsi person table.
Be wary of the fact that the list of person_ids is a list of ohdsi_ids,
and that when you query study tables those ids need converted.
"""
personObj = BasePerson.factory(study)
id_col = personObj.get_id_field_name()
cur = con.cursor()
event_mappings = _read_event_mappings(con, study.study_id)
procedure_id=0
visit_id=0
for row in event_mappings:
logger.info("XX events_mapping.populate() %s", row)
from_table_name=row['from_table']
prefix = from_table_name.split('_')[0]
for person_id in person_id_list:
query=""
# QUERY FOR THE VALUES, BEST SPECIFIC? TODO
if (row['from_column'] != NULL_PLACEHOLDER):
# a value and a date, like the Death table
if (row['where_clause'] != NULL_PLACEHOLDER) :
query = ("SELECT {0}, {1} from {2} where " + id_col + " = %s and ( {3} )").format(row['from_date_column'], row['from_column'], row['from_table'], row['where_clause'])
#logger.debug("QUERY1:%s %s", query, person_id)
logger.info("QUERY1:%s %s", query, person_id)
cur.execute(query, (personObj.convert_person_id_to_study(person_id),))
else:
query = ("SELECT {0}, {1} from {2} where " + id_col + " = %s").format(row['from_date_column'], row['from_column'], row['from_table'])
#logger.debug("QUERY2: %s, %s", query, row)
logger.info("QUERY2: %s, %s", query, row)
cur.execute(query, (personObj.convert_person_id_to_study(person_id),))
else:
# just a date, like the Occurrence tables:
if (row['where_clause'] != NULL_PLACEHOLDER) :
query = ("SELECT {0} from {1} where " + id_col + " = %s and ( {2} )").format(row['from_date_column'], row['from_table'], row['where_clause'])
#logger.debug("QUERY3: %s %s", query, row)
logger.info("QUERY3: %s %s", query, row)
cur.execute(query, (personObj.convert_person_id_to_study(person_id),))
else:
query = ("SELECT {0} from {1} where " + id_col + " = %s").format(row['from_date_column'], row['from_table'])
#logger.debug("QUERY4: %s %s", query, row)
logger.info("QUERY4: %s %s", query, row)
cur.execute(query, (personObj.convert_person_id_to_study(person_id),))
value_rows = cur.fetchall()
logger.debug("events.populate() from:%s to:%s rows:%d", from_table_name, row['to_table'], len(value_rows))
# LOOKUP the id (vocab, concept) from the mappings row
concept_id = Concept.objects.get(vocabulary_id=row['value_vocabulary_id'], concept_code=row['value_concept_code']).concept_id
# INSERT
if (len(value_rows) == 0):
logger.warn("no rows back from %s person:%s, with %s", query, person_id, row)
elif (concept_id == None) :
logger.error("No concept %s, %s", row['value_vocabulary_id'], row['value_concept_code'])
else:
for value_row in value_rows:
if value_row[0] != None :
logger.debug("VALUE ROWS pid:%s query:%s value:%s num-rows:%d", person_id, query, value_row, len(value_rows))
to_table_name=row['to_table']
# sometimes this is a date, sometimes a string. Use string, the lowest-common denominator, works for all sources
the_date_value=''
try:
date_time_string = str(value_row[0])
(year, month, day) = date_time_string.split(' ')[0].split('-')
the_date_value = "{0}/{1}/{2}".format(month, day, year)
except:
logger.error("populate raised on {}".format(date_time_string))
the_date_value = date_time_string
# INSERT DEATH
if to_table_name == 'Death':
statement = "INSERT into death (person_id, death_date, death_datetime, death_type_concept_id, cause_concept_id)" \
+ " values ( %s, %s, %s, %s, %s)"
logger.debug("death: %s, %s, %s, %s, %s %s %s %s); ",
statement, person_id, the_date_value, row['addl_value'], concept_id,
row['value_vocabulary_id'], row['value_concept_code'], value_row[0] )
cur.execute(statement, (person_id, the_date_value, the_date_value, row['addl_value'], concept_id))
# INSERT VISIT OCCURRENCE
elif to_table_name == 'visit_occurrence':
statement = ("INSERT into visit_occurrence "
"(visit_occurrence_id, person_id, visit_concept_id, visit_start_date, "
" visit_start_datetime, visit_end_date, visit_type_concept_id)"
" values ( %s, %s, %s, %s, %s, %s, %s)")
logger.debug("visit %s %s %s %s %s %s %s %s", statement, visit_id, person_id, concept_id, the_date_value,
row['addl_value'], row['value_vocabulary_id'], row['value_concept_code'])
cur.execute(statement, (visit_id, person_id, concept_id, the_date_value, the_date_value, the_date_value, row['addl_value']))
visit_id += 1
# INSERT PROCEDURE OCCURRENCE
elif to_table_name == 'procedure_occurrence':
statement = ("INSERT into procedure_occurrence"
" (procedure_occurrence_id, person_id, procedure_concept_id, "
" procedure_date, procedure_datetime, procedure_type_concept_id)"\
" values ( %s, %s, %s, %s, %s, %s)")
logger.debug("proc: %s %s %s %s *%s* %s %s %s %s", statement, procedure_id, person_id, concept_id,
the_date_value, row['addl_value'], row['value_vocabulary_id'], row['value_concept_code'], value_row[0] )
cur.execute(statement, (procedure_id, person_id, concept_id, the_date_value, the_date_value, row['addl_value']))
procedure_id += 1
else:
logger.error("unknown table name %s in events.populate() %s", to_table_name, row)
else:
logger.warn("None value in events_mapping.populate() with %s", value_row)
value_rows=None
cur.close()
con.commit()
|
the-stack_0_12662 | import unittest
import requests
class UnitTestsIbanAPI(unittest.TestCase):
# https://ibanapi.com/get-api
def test_get_get_api(self):
print('test_get_get_api')
params = (
('api_key', 'API_KEY'),
)
iban = "EE471000001020145685"
url = "https://api.ibanapi.com/v1/validate/" + iban
response = requests.get(url, params=params)
print(response.text)
if __name__ == '__main__':
unittest.main()
|
the-stack_0_12663 | """ formatting.py """
import math
from enum import Enum, unique
from typing import Dict, Iterable, List
from .layer_info import LayerInfo
@unique
class Verbosity(Enum):
""" Contains verbosity levels. """
QUIET, DEFAULT, VERBOSE = 0, 1, 2
class FormattingOptions:
""" Class that holds information about formatting the table output. """
def __init__(
self,
max_depth: int,
verbose: int,
col_names: Iterable[str],
col_width: int,
):
self.max_depth = max_depth
self.verbose = verbose
self.col_names = col_names
self.col_width = col_width
self.layer_name_width = 40
def set_layer_name_width(
self, summary_list: List[LayerInfo], align_val: int = 5
) -> None:
"""
Set layer name width by taking the longest line length and rounding up to
the nearest multiple of align_val.
"""
max_length = 0
for info in summary_list:
depth_indent = info.depth * align_val + 1
max_length = max(max_length, len(str(info)) + depth_indent)
if max_length >= self.layer_name_width:
self.layer_name_width = math.ceil(max_length / align_val) * align_val
def get_total_width(self) -> int:
""" Calculate the total width of all lines in the table. """
return len(tuple(self.col_names)) * self.col_width + self.layer_name_width
def format_row(self, layer_name: str, row_values: Dict[str, str]) -> str:
""" Get the string representation of a single layer of the model. """
info_to_use = [row_values.get(row_type, "") for row_type in self.col_names]
new_line = f"{layer_name:<{self.layer_name_width}} "
for info in info_to_use:
new_line += f"{info:<{self.col_width}} "
return new_line.rstrip() + "\n"
|
the-stack_0_12664 | import time
import torch
import functools
import argparse
import pyaudio
import wave
import torch.nn.functional as F
from utils import data
from ctcdecode import CTCBeamDecoder
from data.utility import add_arguments, print_arguments
parser = argparse.ArgumentParser(description=__doc__)
add_arg = functools.partial(add_arguments, argparser=parser)
parser.add_argument("--model_path",
default="save_model/model.pth",
type=str,
help="trained model path. (default: %(default)s)")
parser.add_argument("--lm_path",
default="lm/zh_giga.no_cna_cmn.prune01244.klm",
type=str,
help="language model path. (default: %(default)s)")
parser.add_argument("--record_time",
default=5,
type=int,
help="record time for second. (default: %(default)s)")
args = parser.parse_args()
print_arguments(args)
alpha = 0.8
beta = 0.3
cutoff_top_n = 40
cutoff_prob = 1.0
beam_width = 32
num_processes = 4
blank_index = 0
model = torch.load(args.model_path)
model = model.cuda()
model.eval()
decoder = CTCBeamDecoder(model.vocabulary,
args.lm_path,
alpha,
beta,
cutoff_top_n,
cutoff_prob,
beam_width,
num_processes,
blank_index)
def translate(vocab, out, out_len):
return "".join([vocab[x] for x in out[0:out_len]])
def predict(wav_path):
wav = data.load_audio(wav_path)
spec = data.spectrogram(wav)
spec.unsqueeze_(0)
with torch.no_grad():
spec = spec.cuda()
y = model.cnn(spec)
y = F.softmax(y, 1)
y_len = torch.tensor([y.size(-1)])
y = y.permute(0, 2, 1) # B * T * V
print("decoding...")
out, score, offset, out_len = decoder.decode(y, y_len)
return translate(model.vocabulary, out[0][0], out_len[0][0])
def save_wave_file(filename, data):
wf = wave.open(filename, "wb")
wf.setnchannels(CHANNELS)
wf.setsampwidth(SAMPWIDTH)
wf.setframerate(RATE)
wf.writeframes(b"".join(data))
wf.close()
def record(wav_path, time=5):
p = pyaudio.PyAudio()
stream = p.open(format=pyaudio.paInt16,
channels=CHANNELS,
rate=RATE,
input=True,
frames_per_buffer=CHUNK)
my_buf = []
print("录音中(%ds)" % time)
for i in range(0, int(RATE / CHUNK * time)):
data = stream.read(CHUNK)
my_buf.append(data)
print(".", end="", flush=True)
save_wave_file(wav_path, my_buf)
stream.close()
if __name__ == '__main__':
# 录音格式
RATE = 16000
CHUNK = 1024
CHANNELS = 1
SAMPWIDTH = 2
# 临时保存路径
save_path = 'dataset/record.wav'
while True:
_ = input("按下回车键开机录音,录音%s秒中:" % args.record_time)
record(save_path, time=args.record_time)
start = time.time()
result_text = predict(save_path)
end = time.time()
print("识别时间:%dms,识别结果:%s" % (round((end - start) * 1000), result_text))
|
the-stack_0_12665 | """
Put files into the LeoShadow subfolder.
Usage:
1. convert.py <filename> LeoShadow x
This copy file <filename> into the subfolder leoShadow,
adds the prefix, and creates an empty file at the
current location.
After restarting Leo, <filename> will be re-created without
annotations.
2. convert -all LeoShadow x
Apply 'convert.py <filename> LeoShadow x' to all .py files.
Must be run in the directory with the .py files.
x is the prefix specified the for mod_shadow plugin.
"""
import os, sys, shutil
def convert(filename, leoFolder, prefix):
if not os.path.exists(leoFolder):
os.mkdir(leoFolder)
assert os.path.exists(leoFolder)
else:
assert os.path.isdir(leoFolder)
dir, name = os.path.split(filename)
newname = os.path.join(dir, leoFolder, prefix + name)
if os.path.exists(newname):
return
print("Putting", filename, "into the shadow folder", leoFolder)
os.rename(filename, newname)
f = open(filename, "w")
f.close()
if __name__ == '__main__':
scriptname, filename, leoFolder, prefix = sys.argv
if filename == '-all':
for filename in os.listdir("."):
rest, extension = os.path.splitext(filename)
if extension == '.py':
if (extension not in ['.leo', '.pyc'] and
not filename.startswith("convert")):
if os.path.isfile(filename):
convert(filename, leoFolder, prefix)
else:
convert(filename, leoFolder, prefix)
|
the-stack_0_12666 | """
Helpers for plugin app
"""
import os
import subprocess
import pathlib
import sysconfig
import traceback
import inspect
import pkgutil
from django.conf import settings
from django.core.exceptions import AppRegistryNotReady
# region logging / errors
class IntegrationPluginError(Exception):
"""
Error that encapsulates another error and adds the path / reference of the raising plugin
"""
def __init__(self, path, message):
self.path = path
self.message = message
def __str__(self):
return self.message # pragma: no cover
class MixinImplementationError(ValueError):
"""
Error if mixin was implemented wrong in plugin
Mostly raised if constant is missing
"""
pass
class MixinNotImplementedError(NotImplementedError):
"""
Error if necessary mixin function was not overwritten
"""
pass
def log_error(error, reference: str = 'general'):
"""
Log an plugin error
"""
from plugin import registry
# make sure the registry is set up
if reference not in registry.errors:
registry.errors[reference] = []
# add error to stack
registry.errors[reference].append(error)
def handle_error(error, do_raise: bool = True, do_log: bool = True, log_name: str = ''):
"""
Handles an error and casts it as an IntegrationPluginError
"""
package_path = traceback.extract_tb(error.__traceback__)[-1].filename
install_path = sysconfig.get_paths()["purelib"]
try:
package_name = pathlib.Path(package_path).relative_to(install_path).parts[0]
except ValueError:
# is file - loaded -> form a name for that
path_obj = pathlib.Path(package_path).relative_to(settings.BASE_DIR)
path_parts = [*path_obj.parts]
path_parts[-1] = path_parts[-1].replace(path_obj.suffix, '') # remove suffix
# remove path prefixes
if path_parts[0] == 'plugin':
path_parts.remove('plugin')
path_parts.pop(0)
else:
path_parts.remove('plugins')
package_name = '.'.join(path_parts)
if do_log:
log_kwargs = {}
if log_name:
log_kwargs['reference'] = log_name
log_error({package_name: str(error)}, **log_kwargs)
if do_raise:
raise IntegrationPluginError(package_name, str(error))
# endregion
# region git-helpers
def get_git_log(path):
"""
Get dict with info of the last commit to file named in path
"""
from plugin import registry
output = None
if registry.git_is_modern:
path = path.replace(os.path.dirname(settings.BASE_DIR), '')[1:]
command = ['git', 'log', '-n', '1', "--pretty=format:'%H%n%aN%n%aE%n%aI%n%f%n%G?%n%GK'", '--follow', '--', path]
try:
output = str(subprocess.check_output(command, cwd=os.path.dirname(settings.BASE_DIR)), 'utf-8')[1:-1]
if output:
output = output.split('\n')
except subprocess.CalledProcessError: # pragma: no cover
pass
if not output:
output = 7 * [''] # pragma: no cover
return {'hash': output[0], 'author': output[1], 'mail': output[2], 'date': output[3], 'message': output[4], 'verified': output[5], 'key': output[6]}
def check_git_version():
"""returns if the current git version supports modern features"""
# get version string
try:
output = str(subprocess.check_output(['git', '--version'], cwd=os.path.dirname(settings.BASE_DIR)), 'utf-8')
except subprocess.CalledProcessError: # pragma: no cover
return False
# process version string
try:
version = output[12:-1].split(".")
if len(version) > 1 and version[0] == '2':
if len(version) > 2 and int(version[1]) >= 22:
return True
except ValueError: # pragma: no cover
pass
return False
class GitStatus:
"""
Class for resolving git gpg singing state
"""
class Definition:
"""
Definition of a git gpg sing state
"""
key: str = 'N'
status: int = 2
msg: str = ''
def __init__(self, key: str = 'N', status: int = 2, msg: str = '') -> None:
self.key = key
self.status = status
self.msg = msg
N = Definition(key='N', status=2, msg='no signature',)
G = Definition(key='G', status=0, msg='valid signature',)
B = Definition(key='B', status=2, msg='bad signature',)
U = Definition(key='U', status=1, msg='good signature, unknown validity',)
X = Definition(key='X', status=1, msg='good signature, expired',)
Y = Definition(key='Y', status=1, msg='good signature, expired key',)
R = Definition(key='R', status=2, msg='good signature, revoked key',)
E = Definition(key='E', status=1, msg='cannot be checked',)
# endregion
# region plugin finders
def get_modules(pkg):
"""get all modules in a package"""
context = {}
for loader, name, ispkg in pkgutil.walk_packages(pkg.__path__):
try:
module = loader.find_module(name).load_module(name)
pkg_names = getattr(module, '__all__', None)
for k, v in vars(module).items():
if not k.startswith('_') and (pkg_names is None or k in pkg_names):
context[k] = v
context[name] = module
except AppRegistryNotReady: # pragma: no cover
pass
except Exception as error:
# this 'protects' against malformed plugin modules by more or less silently failing
# log to stack
log_error({name: str(error)}, 'discovery')
return [v for k, v in context.items()]
def get_classes(module):
"""get all classes in a given module"""
return inspect.getmembers(module, inspect.isclass)
def get_plugins(pkg, baseclass):
"""
Return a list of all modules under a given package.
- Modules must be a subclass of the provided 'baseclass'
- Modules must have a non-empty PLUGIN_NAME parameter
"""
plugins = []
modules = get_modules(pkg)
# Iterate through each module in the package
for mod in modules:
# Iterate through each class in the module
for item in get_classes(mod):
plugin = item[1]
if issubclass(plugin, baseclass) and plugin.PLUGIN_NAME:
plugins.append(plugin)
return plugins
# endregion
|
the-stack_0_12667 | import logging
from botocore import exceptions
import json
import sys
from utils.utils import get_region_name, get_price1, get_price2, handle_limit_exceeded_exception
class Pricing:
"""For getting and returning the price of the Elastic IP's."""
#Filter for get_products pricing api call used to fetch EIP price.
eip_filter = '[{{"Field": "location", "Value": "{r}", "Type": "TERM_MATCH"}},' \
' {{"Field": "group", "Value": "ElasticIP:Address", "Type": "TERM_MATCH"}},' \
'{{"Field": "productFamily", "Value": "IP Address", "Type": "TERM_MATCH"}}]'
def __init__(self, pricing_client=None, region=None):
self.pricing_client = pricing_client
self.region = region
self.formatted_region = get_region_name(region)
logging.basicConfig(level=logging.WARNING)
self.logger = logging.getLogger()
def get_eip_price(self):
"""Returns EIP price."""
try:
f = self.eip_filter.format(r=self.formatted_region)
data = self.pricing_client.get_products(ServiceCode='AmazonEC2', Filters=json.loads(f))
if "eu-west-1" in self.region:
price = get_price2(data)
return float(price)
price = get_price1(data)
return float(price)
except exceptions.ClientError as error:
handle_limit_exceeded_exception(error, 'eip pricing.py')
sys.exit(1)
except Exception as e:
print("Error on line {} in eip pricing.py".format(sys.exc_info()[-1].tb_lineno) + " | Message: " + str(e))
sys.exit(1)
|
the-stack_0_12668 | from collections import deque
d = deque()
for _ in range(int(input())):
line = input().split()
if line[0] == 'append':
d.append(line[1])
elif line[0] == 'pop':
d.pop()
elif line[0] == 'popleft':
d.popleft()
elif line[0] == 'appendleft':
d.appendleft(line[1])
print(*d) |
the-stack_0_12669 | from typing import List, Optional
from enum import IntEnum
import numpy as np
import logging
from numpy import random
# The two following classes just make it convenient to select which mutation/recombination/selectoin to use with EA
class Recombination(IntEnum):
NONE = -1 # can be used when only mutation is required
UNIFORM = 0 # uniform crossover (only really makes sense for function dimension > 1)
INTERMEDIATE = 1 # intermediate recombination
class Mutation(IntEnum):
NONE = -1 # Can be used when only recombination is required
UNIFORM = 0 # Uniform mutation
GAUSSIAN = 1 # Gaussian mutation
class ParentSelection(IntEnum):
NEUTRAL = 0
FITNESS = 1
TOURNAMENT = 2
class Member:
"""
Class to simplify member handling.
"""
def __init__(self, initial_x: np.ndarray, target_function: callable, bounds: List[float],
mutation: Mutation, recombination: Recombination,
sigma: Optional[float] = None, recom_prob: Optional[float] = None) -> None:
"""
Init
:param initial_x: Initial coordinate of the member
:param target_function: The target function that determines the fitness value
:param bounds: Allowed bounds. For simplicities sake we assume that all elements in initial_x have the same
bounds -> bounds[0] lower bound && bounds[1] upper bounds
:param mutation: hyperparameter that determines which mutation type use
:param recombination: hyperparameter that determines which recombination type to use
:param sigma: Optional hyperparameter that is only active if mutation is gaussian
:param recom_prob: Optional hyperparameter that is only active if recombination is uniform
"""
self._x = initial_x.astype(float) # astype is crucial here. Otherwise numpy might cast everything to int
self._f = target_function
self.__bounds = bounds
self._age = 0 # basically indicates how many offspring were generated from this member
self._mutation = mutation
self._recombination = recombination
self._x_changed = True
self._fit = None
self._sigma = sigma
self._recom_prob = recom_prob
self.logger = logging.getLogger(self.__class__.__name__)
@property # fitness can only be queried never set
def fitness(self):
if self._x_changed: # Only if the x_coordinate has changed we need to evaluate the fitness.
self._x_changed = False
self._fit = self._f(self._x)
return self._fit # otherwise we can return the cached value
@property # properties let us easily handle getting and setting without exposing our private variables
def x_coordinate(self):
return self._x
@x_coordinate.setter
def x_coordinate(self, value):
assert np.all((self.__bounds[0] <= value) & (value <= self.__bounds[1])), 'Member out of bounds'
self._x_changed = True
self._x = value
def mutate(self):
"""
Mutation which creates a new offspring
:return: new member who is based on this member
"""
new_x = self.x_coordinate.copy()
self.logger.debug('new point before mutation:')
self.logger.debug(new_x)
# modify new_x either through uniform or gaussian mutation
if self._mutation == Mutation.UNIFORM:
new_x = np.random.uniform(self.__bounds[0], self.__bounds[1], new_x.size)
elif self._mutation == Mutation.GAUSSIAN:
assert self._sigma, 'Sigma has to be set when gaussian mutation is used'
new_x = new_x + self._sigma*np.random.randn()
new_x[new_x < self.__bounds[0]] = self.__bounds[0]
new_x[new_x > self.__bounds[1]] = self.__bounds[1]
elif self._mutation != Mutation.NONE:
# We won't consider any other mutation types
raise NotImplementedError
self.logger.debug('new point after mutation:')
self.logger.debug(new_x)
child = Member(new_x, self._f, self.__bounds, self._mutation, self._recombination,
self._sigma, self._recom_prob)
self._age += 1
return child
def recombine(self, partner):
"""
Recombination of this member with a partner
:param partner: Member
:return: new offspring based on this member and partner
"""
if self._recombination == Recombination.INTERMEDIATE:
new_x = 0.5*(self.x_coordinate + partner.x_coordinate)
elif self._recombination == Recombination.UNIFORM:
assert self._recom_prob is not None, \
'for this recombination type you have to specify the recombination probability'
cross = np.random.binomial(1,self._recom_prob,self.x_coordinate.size)
new_x = self.x_coordinate*cross + partner.x_coordinate*(1-cross)
elif self._recombination == Recombination.NONE:
new_x = self.x_coordinate.copy() # copy is important here to not only get a reference
else:
raise NotImplementedError
self.logger.debug('new point after recombination:')
self.logger.debug(new_x)
child = Member(new_x, self._f, self.__bounds, self._mutation, self._recombination,
self._sigma, self._recom_prob)
self._age += 1
return child
def __str__(self):
"""Makes the class easily printable"""
str = "Population member: Age={}, x={}, f(x)={}".format(self._age, self.x_coordinate, self.fitness)
return str
def __repr__(self):
"""Will also make it printable if it is an entry in a list"""
return self.__str__() + '\n'
class EA:
def __init__(self, target_func: callable, population_size: int = 10, problem_dim: int = 2,
problem_bounds: List = [-30, 30], mutation_type: Mutation = Mutation.UNIFORM,
recombination_type: Recombination = Recombination.INTERMEDIATE,
sigma: float = 1., recom_proba: float = 0.5, selection_type: ParentSelection = ParentSelection.NEUTRAL,
total_number_of_function_evaluations: int = 200, children_per_step: int = 5,
fraction_mutation: float = .5
):
"""
Simple evolutionary algorithm
:param target_func: callable target function we optimize
:param population_size: int
:param problem_dim: int
:param problem_bounds: list[int] used to make sure population members are valid
:param mutation_type: hyperparameter to set mutation strategy
:param recombination_type: hyperparameter to set recombination strategy
:param sigma: conditional hyperparameter dependent on mutation_type GAUSSIAN
:param recom_proba: conditional hyperparameter dependent on recombination_type UNIFORM
:param selection_type: hyperparameter to set selection strategy
:param total_number_of_function_evaluations: maximum allowed function evaluations
:param children_per_step: how many children to produce per step
:param fraction_mutation: balance between sexual and asexual reproduction
"""
assert 0 <= fraction_mutation <= 1
assert 0 < children_per_step
assert 0 < total_number_of_function_evaluations
assert 0 < sigma
assert 0 < problem_dim
assert 0 < population_size
# Step 1: initialize Population
self.population = [
Member(np.random.uniform(*problem_bounds, problem_dim),
target_func, problem_bounds, mutation_type, recombination_type, sigma, recom_proba
) for _ in range(population_size)]
self.population.sort(key=lambda x: x.fitness) # sort population by fitness for easier handling downstream
self.pop_size = population_size
self.selection = selection_type
self.logger = logging.getLogger(self.__class__.__name__)
self.logger.info('Initial average fitness of population: %f', self.get_average_fitness())
self.max_func_evals = total_number_of_function_evaluations
self._func_evals = population_size
self.num_children = children_per_step
self.frac_mutants = fraction_mutation
# will store the optimization trajectory and lets you easily observe how often
self.trajectory = [self.population[0]]
# a new best member was generated
def get_average_fitness(self) -> float:
"""Helper to quickly access average population fitness"""
return np.mean(list(map(lambda x: x.fitness, self.population)))
def select_parents(self):
"""
Method that implements all selection mechanism.
For ease of computation we assume that the population members are sorted according to their fitness
:return: list of ids of selected parents.
"""
parent_ids = []
mu = self.num_children
if self.selection == ParentSelection.NEUTRAL:
for i in range(mu):
id = random.randint(0, self.pop_size)
parent_ids.append(id)
elif self.selection == ParentSelection.FITNESS:
for i in range(mu):
max = sum([c.fitness for c in self.population])
pick = random.uniform(0, max)
current = 0
for id, member in zip(range(self.pop_size), self.population):
current+= member.fitness
if current > pick:
parent_ids.append(self.pop_size-1-id)
break
elif self.selection == ParentSelection.TOURNAMENT:
for i in range(mu):
tournament_size = 5
if self.pop_size <= tournament_size:
parent_ids.append(0)
else:
arr = np.array([1]*tournament_size + [0] * (self.pop_size-tournament_size))
np.random.shuffle(arr)
one_idx= np.where(arr==1)
parent_ids.append(one_idx[0][0])
else:
raise NotImplementedError
self.logger.debug('Selected parents:')
self.logger.debug(parent_ids)
return parent_ids
def step(self) -> float:
"""
Performs one step of parent selection -> offspring creation -> survival selection
:return: average population fittness
"""
# Step 2: Parent selection
parent_ids = self.select_parents()
# Step 3: Variation / create offspring
children = []
for id in parent_ids:
# for each parent create exactly one offspring (use the frac_mutants) parameter to determine
# if more recombination or mutation should be performed
parent = self.population[id]
new_pop = parent.mutate()
if np.random.uniform(0., 1., 1) < self.frac_mutants:
new_pop = parent.recombine(new_pop)
children.append(new_pop)
self._func_evals += 1
self.logger.debug('Children:')
self.logger.debug(children)
# Step 4: Survival selection
# (\mu + \lambda)-selection i.e. combine offspring and parents in one sorted list, keep the #pop_size best
self.population.extend(children)
self.population.sort(key=lambda x: x.fitness)
self.population = self.population[:self.pop_size]
self.trajectory.append(self.population[0])
return self.get_average_fitness()
def optimize(self):
"""
Simple optimization loop that stops after a predetermined number of function evaluations
:return:
"""
step = 1
while self._func_evals < self.max_func_evals:
avg_fitness = self.step()
self.logger.info(
'Step {:>3d} | Average fitness {:>10.7f} | Best fitness {:>10.7f} | #Func Evals: {:>4d}'.format(
step, avg_fitness, self.population[0].fitness, self._func_evals))
step += 1
return self.population[0]
if __name__ == '__main__':
"""
Simple main to give an example of how to use the EA
"""
from target_function import ackley
np.random.seed(0) # fix seed for comparisons sake
logging.basicConfig(level=logging.INFO)
dimensionality = 2
max_func_evals = 500 * dimensionality
pop_size = 20
ea = EA(ackley, pop_size, dimensionality, selection_type=ParentSelection.TOURNAMENT,
total_number_of_function_evaluations=max_func_evals)
optimum = ea.optimize()
# print(ea.trajectory)
print(optimum)
print('#' * 120)
ea = EA(ackley, pop_size, dimensionality, selection_type=ParentSelection.FITNESS,
total_number_of_function_evaluations=max_func_evals)
optimum = ea.optimize()
# print(ea.trajectory)
print(optimum)
print('#' * 120)
ea = EA(ackley, pop_size, dimensionality, selection_type=ParentSelection.NEUTRAL,
total_number_of_function_evaluations=max_func_evals)
optimum = ea.optimize()
# print(ea.trajectory)
print(optimum)
print('#' * 120)
|
the-stack_0_12670 | # Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may
# not use this file except in compliance with the License. A copy of the
# License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
"""Integration tests for the SageMaker TrainingJob API.
"""
import botocore
import pytest
import logging
from typing import Dict
from acktest.resources import random_suffix_name
from acktest.k8s import resource as k8s
from e2e import (
service_marker,
create_sagemaker_resource,
wait_for_status,
sagemaker_client,
)
from e2e.replacement_values import REPLACEMENT_VALUES
from e2e.bootstrap_resources import get_bootstrap_resources
from e2e.common import config as cfg
from time import sleep
RESOURCE_PLURAL = "trainingjobs"
@pytest.fixture(scope="function")
def xgboost_training_job_debugger():
resource_name = random_suffix_name("xgboost-trainingjob-debugger", 32)
replacements = REPLACEMENT_VALUES.copy()
replacements["TRAINING_JOB_NAME"] = resource_name
reference, _, resource = create_sagemaker_resource(
resource_plural=RESOURCE_PLURAL,
resource_name=resource_name,
spec_file="xgboost_trainingjob_debugger",
replacements=replacements,
)
assert resource is not None
assert k8s.get_resource_arn(resource) is not None
yield (reference, resource)
if k8s.get_resource_exists(reference):
_, deleted = k8s.delete_custom_resource(reference, 3, 10)
assert deleted
def get_sagemaker_training_job(training_job_name: str):
try:
training_job = sagemaker_client().describe_training_job(
TrainingJobName=training_job_name
)
return training_job
except botocore.exceptions.ClientError as error:
logging.error(
f"SageMaker could not find a training debugger job with the name {training_job_name}. Error {error}"
)
return None
# TODO: Move to __init__.py
def get_training_sagemaker_status(training_job_name: str):
training_sm_desc = get_sagemaker_training_job(training_job_name)
return training_sm_desc["TrainingJobStatus"]
def get_training_resource_status(reference: k8s.CustomResourceReference):
resource = k8s.get_resource(reference)
assert "trainingJobStatus" in resource["status"]
return resource["status"]["trainingJobStatus"]
def get_training_debugger_sagemaker_status(training_job_name: str):
training_sm_desc = get_sagemaker_training_job(training_job_name)
return training_sm_desc["DebugRuleEvaluationStatuses"][0]["RuleEvaluationStatus"]
def get_training_debugger_resource_status(reference: k8s.CustomResourceReference):
resource = k8s.get_resource(reference)
resource_status = resource["status"]["debugRuleEvaluationStatuses"][0][
"ruleEvaluationStatus"
]
assert resource_status is not None
return resource_status
@service_marker
class TestTrainingDebuggerJob:
def _wait_sagemaker_training_status(
self,
training_job_name,
expected_status: str,
wait_periods: int = 30,
period_length: int = 30,
):
return wait_for_status(
expected_status,
wait_periods,
period_length,
get_training_sagemaker_status,
training_job_name,
)
def _wait_resource_training_status(
self,
reference: k8s.CustomResourceReference,
expected_status: str,
wait_periods: int = 30,
period_length: int = 30,
):
return wait_for_status(
expected_status,
wait_periods,
period_length,
get_training_resource_status,
reference,
)
def _assert_training_status_in_sync(
self, training_job_name, reference, expected_status
):
assert (
self._wait_sagemaker_training_status(training_job_name, expected_status)
== self._wait_resource_training_status(reference, expected_status)
== expected_status
)
def _wait_sagemaker_training_debugger_status(
self,
training_job_name,
expected_status: str,
wait_periods: int = 30,
period_length: int = 30,
):
return wait_for_status(
expected_status,
wait_periods,
period_length,
get_training_debugger_sagemaker_status,
training_job_name,
)
def _wait_resource_training_debugger_status(
self,
reference: k8s.CustomResourceReference,
expected_status: str,
wait_periods: int = 30,
period_length: int = 30,
):
return wait_for_status(
expected_status,
wait_periods,
period_length,
get_training_debugger_resource_status,
reference,
)
def _assert_training_debugger_status_in_sync(
self, training_job_name, reference, expected_status
):
assert (
self._wait_sagemaker_training_debugger_status(
training_job_name, expected_status
)
== self._wait_resource_training_debugger_status(reference, expected_status)
== expected_status
)
def test_completed(self, xgboost_training_job_debugger):
(reference, resource) = xgboost_training_job_debugger
assert k8s.get_resource_exists(reference)
training_job_name = resource["spec"].get("trainingJobName", None)
assert training_job_name is not None
training_job_desc = get_sagemaker_training_job(training_job_name)
assert k8s.get_resource_arn(resource) == training_job_desc["TrainingJobArn"]
assert training_job_desc["TrainingJobStatus"] == cfg.JOB_STATUS_INPROGRESS
assert k8s.wait_on_condition(reference, "ACK.ResourceSynced", "False")
self._assert_training_status_in_sync(
training_job_name, reference, cfg.JOB_STATUS_COMPLETED
)
# TODO: This test is failing
assert k8s.wait_on_condition(reference, "ACK.ResourceSynced", "False")
self._assert_training_debugger_status_in_sync(
training_job_name, reference, cfg.DEBUGGERJOB_STATUS_COMPLETED
)
assert k8s.wait_on_condition(reference, "ACK.ResourceSynced", "True")
# Check that you can delete a completed resource from k8s
_, deleted = k8s.delete_custom_resource(reference, 3, 10)
assert deleted is True
|
the-stack_0_12671 | import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name='scrapy-autounit',
version='0.0.22',
author='',
author_email='',
description='Automatic unit test generation for Scrapy.',
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/fcanobrash/scrapy-autounit',
packages=setuptools.find_packages(),
classifiers=[
'Programming Language :: Python :: 3',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
],
install_requires=[
'pathlib',
'datadiff==2.0.0',
],
entry_points = {
'console_scripts': ['autounit-inspect=scrapy_autounit.inspect:main'],
},
)
|
the-stack_0_12673 | # find the minimum number of coins needed to make up a given amount
# greedy version, not dynamic programming version
denominations = [1, 2, 5, 10, 20, 50, 100, 1000]
# add the largest coin that does not exceed the target amount to the total
def coins_required(amount):
total = 0
coins = []
for denomination in denominations[::-1]:
while total + denomination <= amount:
total += denomination
coins.append(denomination)
return coins
print(coins_required(2035))
|
the-stack_0_12675 | from typing import List
from secrets import choice
from discord.ext import commands
from .. import config
keywords = ["dm"]
reply = (
"SCAM ALERT! Never accept any trade on DEVNET, SOL on this network are fake and unlimited.",
"SCAM ALERT! PLEASE ONLY DO BUSINESS ON MAGICEDEN OR SOLANART.",
"SCAM ALERT! TO STAY SAFE, PLEASE TURN OFF YOUR DMS!!.",
)
def check(word, list):
return True if word in list else False
class ScamAlert(commands.Cog):
bot: commands.Bot
allowed_channels: List[str]
def __init__(self, bot):
self.bot = bot
self.allowed_channels = config.allowed_check_scam_channels
@commands.Cog.listener()
async def on_message(self, message):
if message.author.bot:
return
if not message.content:
return
if message.channel.name in self.allowed_channels:
msg_to_list = message.content.split()
msg_to_list = [x.lower() for x in msg_to_list]
for word in keywords:
if check(word.lower(), msg_to_list):
await message.channel.send(choice(reply))
|
the-stack_0_12676 | # Copyright 2020 . All Rights Reserved.
# Author : Lei Sha
from Hyperparameters import args
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', '-g')
parser.add_argument('--modelarch', '-m')
parser.add_argument('--aspect', '-a')
parser.add_argument('--choose', '-c')
cmdargs = parser.parse_args()
print(cmdargs)
usegpu = True
if cmdargs.gpu is None:
usegpu = False
args['device'] = 'cpu'
else:
usegpu = True
args['device'] = 'cuda:' + str(cmdargs.gpu)
if cmdargs.modelarch is None:
args['model_arch'] = 'lstm'
else:
args['model_arch'] = cmdargs.modelarch
if cmdargs.aspect is None:
args['aspect'] = 0
else:
args['aspect'] = int(cmdargs.aspect)
if cmdargs.choose is None:
args['choose'] = 0
else:
args['choose'] = int(cmdargs.aspect)
import functools
print = functools.partial(print, flush=True)
import os
from textdataBeer import TextDataBeer
import time, sys
import torch
import torch.autograd as autograd
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from tqdm import tqdm
import time, datetime
import math, random
import nltk
import pickle
from nltk.translate.bleu_score import corpus_bleu, SmoothingFunction
# import matplotlib.pyplot as plt
import numpy as np
import copy
from LanguageModel_beer import LanguageModel
import LSTM_IB_GAN_beer
def asMinutes(s):
m = math.floor(s / 60)
s -= m * 60
return '%dm %ds' % (m, s)
def timeSince(since, percent):
now = time.time()
s = now - since
es = s / (percent)
rs = es - s
return '%s (%s)' % (asMinutes(s), datetime.datetime.now())
class Runner:
def __init__(self):
self.model_path = args['rootDir'] + '/chargemodel_' + args['model_arch'] + '.mdl'
def main(self):
if args['model_arch'] in ['lstmibgan']:
args['classify_type'] = 'single'
args['batchSize'] = 256
self.textData = TextDataBeer('beer')
# self.start_token = self.textData.word2index['START_TOKEN']
# self.end_token = self.textData.word2index['END_TOKEN']
args['vocabularySize'] = self.textData.getVocabularySize()
args['chargenum'] = 5
args['embeddingSize'] = self.textData.index2vector.shape[1]
print(self.textData.getVocabularySize())
args['model_arch'] = 'lstmibgan'
# args['aspect'] = 0
args['hiddenSize'] = 200
print(args)
if args['model_arch'] == 'lstmibgan':
print('Using LSTM information bottleneck GAN model for Beer.')
LM = torch.load(args['rootDir']+'/LMbeer.pkl', map_location=args['device'])
for param in LM.parameters():
param.requires_grad = False
ppl = self.CalPPL(LM)
print('PPL=',ppl)
# LM=0
LSTM_IB_GAN_beer.train(self.textData, LM, self.textData.index2vector)
def indexesFromSentence(self, sentence):
return [self.textData.word2index[word] if word in self.textData.word2index else self.textData.word2index['UNK']
for word in sentence]
def tensorFromSentence(self, sentence):
indexes = self.indexesFromSentence(sentence)
# indexes.append(self.textData.word2index['END_TOKEN'])
return torch.tensor(indexes, dtype=torch.long, device=device).view(-1, 1)
def evaluate(self, sentence, correctlabel, max_length=20):
with torch.no_grad():
input_tensor = self.tensorFromSentence(sentence)
input_length = input_tensor.size()[0]
# encoder_hidden = encoder.initHidden()
# encoder_outputs = torch.zeros(max_length, encoder.hidden_size, device=device)
x = {}
# print(input_tensor)
x['enc_input'] = torch.transpose(input_tensor, 0, 1)
x['enc_len'] = [input_length]
x['labels'] = [correctlabel]
# print(x['enc_input'], x['enc_len'])
# print(x['enc_input'].shape)
decoded_words, label, _ = self.model.predict(x, True)
return decoded_words, label
def evaluateRandomly(self, n=10):
for i in range(n):
sample = random.choice(self.textData.datasets['train'])
print('>', sample)
output_words, label = self.evaluate(sample[2], sample[1])
output_sentence = ' '.join(output_words[0]) # batch=1
print('<', output_sentence, label)
print('')
def CalPPL(self, LM):
batches = self.textData.getBatches('dev')
total = 0
loss_sum = 0
for index, batch in enumerate(batches):
x = {}
x['dec_input'] = autograd.Variable(torch.LongTensor(batch.decoderSeqs)).to(args['device'])
x['dec_len'] = batch.decoder_lens
x['dec_target'] = autograd.Variable(torch.LongTensor(batch.targetSeqs)).to(args['device'])
total += x['dec_input'].size()[0]
print(x['dec_input'].size())
embedding = nn.Embedding.from_pretrained(torch.FloatTensor(self.textData.index2vector))
decoderTargetsEmbeddings = embedding(x['dec_target'])
_, recon_loss = LM.getloss(x['dec_input'],decoderTargetsEmbeddings, x['dec_target'] )
loss_sum += recon_loss.sum()
loss_mean = loss_sum / total
return torch.exp(loss_mean)
if __name__ == '__main__':
r = Runner()
r.main() |
the-stack_0_12678 | import skimage.transform as st
import numpy as np
import matplotlib.pyplot as plt
from skimage import data, feature
def ex_1(): # Hough Transform
image = np.zeros((100, 100))
idx = np.arange(25, 75)
image[idx[::-1], idx] = 255
image[idx, idx] = 255
h, theta, d = st.hough_line(image)
fig, (ax0, ax1) = plt.subplots(1, 2)
plt.tight_layout()
ax0.imshow(image, plt.cm.gray)
ax0.set_title('input')
ax0.set_axis_off()
ax1.imshow(np.log(1 + h))
ax1.set_title('Hough')
ax1.set_xlabel('Angles (degrees)')
ax1.set_ylabel('Distance (pixels)')
ax1.axis('image')
plt.show()
def ex_2(): # Hough Transform Line Detection
image = np.zeros((100, 100))
idx = np.arange(25, 75)
image[idx[::-1], idx] = 255
image[idx, idx] = 255
h, theta, d = st.hough_line(image)
fig, (ax0, ax1, ax2) = plt.subplots(1, 3)
plt.tight_layout()
ax0.imshow(image, plt.cm.gray)
ax0.set_title('input image')
ax0.set_axis_off()
ax1.imshow(np.log(1 + h))
ax1.set_title('Hough transform')
ax1.set_xlabel('Angles (degrees)')
ax1.set_ylabel('Distance (pixels)')
ax1.axis('image')
ax2.imshow(image, plt.cm.gray)
row1, col1 = image.shape
for _, angle, dist in zip(*st.hough_line_peaks(h, theta, d)):
y0 = (dist - 0 * np.cos(angle)) / np.sin(angle)
y1 = (dist - col1 * np.cos(angle)) / np.sin(angle)
ax2.plot((0, col1), (y0, y1), '-r')
ax2.axis((0, col1, row1, 0))
ax2.set_title('Detected')
ax2.set_axis_off()
plt.show()
def ex_3(): # Probabilistic Hough Transform
image = data.camera()
edges = feature.canny(image, sigma=2, low_threshold=1, high_threshold=25)
lines = st.probabilistic_hough_line(edges, threshold=10, line_length=5, line_gap=3)
fig, (ax0, ax1, ax2) = plt.subplots(1, 3)
plt.tight_layout()
ax0.imshow(image, plt.cm.gray)
ax0.set_title('input')
ax0.set_axis_off()
ax1.imshow(edges, plt.cm.gray)
ax1.set_title('canny edges')
ax1.set_axis_off()
ax2.imshow(edges * 0)
for line in lines:
p0, p1 = line
ax2.plot((p0[0], p1[0]), (p0[1], p1[1]))
row2, col2 = image.shape
ax2.axis((0, col2, row2, 0))
ax2.set_title('probabilistic')
ax2.set_axis_off()
plt.show()
if __name__ == '__main__':
ex_3()
|
the-stack_0_12682 | from django.utils.encoding import force_unicode
from django.forms.forms import BoundField
from django.utils.html import conditional_escape
def as_p(instance=None, cls=None):
"Returns this form rendered as HTML <p>s."
if not instance and not cls:
return TypeError('as_p takes at least 1 argument (0 given)')
elif instance and cls:
return TypeError('as_p takes at most 1 argument (2 given)')
elif cls:
# This might not always work, if your form requires params,
# pass an instance instead of a class!
instance = cls()
return html_output(
instance,
normal_row = u'<p%(html_class_attr)s>%(label)s %(field)s%(help_text)s</p>',
error_row = u'%s',
row_ender = '</p>',
help_text_html = u' %s',
errors_on_separate_row = True)
def as_div(instance=None, cls=None):
"Returns this form rendered as HTML <div>s."
if not instance and not cls:
return TypeError('as_div takes at least 1 argument (0 given)')
elif instance and cls:
return TypeError('as_div takes at most 1 argument (2 given)')
elif cls:
# This might not always work, if your form requires params,
# pass an instance instead of a class!
instance = cls()
return html_output(
instance,
normal_row = u'<div%(html_class_attr)s>%(label)s %(field)s%(help_text)s</div>',
error_row = u'%s',
row_ender = '</div>',
help_text_html = u' %s',
errors_on_separate_row = True)
def as_table(instance=None, cls=None):
"Returns this form rendered as HTML <tr>s -- excluding the <table></table>."
if not instance and not cls:
return TypeError('as_table takes at least 1 argument (0 given)')
elif instance and cls:
return TypeError('as_table takes at most 1 argument (2 given)')
elif cls:
# This might not always work, if your form requires params,
# pass an instance instead of a class!
instance = cls()
return html_output(
instance,
normal_row = u'<tr%(html_class_attr)s><th>%(label)s</th><td>%(errors)s%(field)s%(help_text)s</td></tr>',
error_row = u'<tr><td colspan="2">%s</td></tr>',
row_ender = u'</td></tr>',
help_text_html = u'<br />%s',
errors_on_separate_row = False)
def as_ul(instance=None, cls=None):
"Returns this form rendered as HTML <li>s -- excluding the <ul></ul>."
if not instance and not cls:
return TypeError('as_ul takes at least 1 argument (0 given)')
elif instance and cls:
return TypeError('as_ul takes at most 1 argument (2 given)')
elif cls:
# This might not always work, if your form requires params,
# pass an instance instead of a class!
instance = cls()
return html_output(
instance,
normal_row = u'<li%(html_class_attr)s>%(errors)s%(label)s %(field)s%(help_text)s</li>',
error_row = u'<li>%s</li>',
row_ender = '</li>',
help_text_html = u' %s',
errors_on_separate_row = False)
def html_output(form, normal_row, error_row, row_ender, help_text_html, errors_on_separate_row):
"Helper function for outputting HTML. Used by as_table(), as_ul(), as_p()."
top_errors = form.non_field_errors() # Errors that should be displayed above all fields.
output, hidden_fields = [], []
for name, field in form.fields.items():
html_class_attr = ''
bf = BoundField(form, field, name)
bf_errors = form.error_class([conditional_escape(error) for error in bf.errors]) # Escape and cache in local variable.
if bf.is_hidden:
if bf_errors:
top_errors.extend([u'(Hidden field %s) %s' % (name, force_unicode(e)) for e in bf_errors])
hidden_fields.append(unicode(bf))
else:
# Create a 'class="..."' atribute if the row should have any
# CSS classes applied.
css_classes = bf.css_classes()
if css_classes:
html_class_attr = ' class="%s"' % css_classes
if errors_on_separate_row:
output.append(error_row % \
'{%% if form.%s.errors %%}{%% for error in form.%s.errors %%}{{ error }}{%% endfor %%}{%% endif %%}' \
% (name, name,))
output.append(normal_row % {
'errors': \
'{%% if form.%s.errors %%}{%% for error in form.%s.errors %%}{{ error }}{%% endfor %%}{%% endif %%}' \
% (name, name,),
'label': '{{ form.%s.label_tag }}' % (name,),
'field': '{{ form.%s }}' % (name,),
'help_text': '',
'html_class_attr': html_class_attr
})
if top_errors:
output.insert(0,
r'{% if form.errors %}{% for field, error in form.errors %}(Hidden field {{ field }}) {{ error }}{% endfor %}{% end if %}'
)
if hidden_fields: # Insert any hidden fields in the last row.
str_hidden = u'{%% for field in form.hidden_fields %%}{{ field }}{%% endfor %%}'
if output:
last_row = output[-1]
# Chop off the trailing row_ender (e.g. '</td></tr>') and
# insert the hidden fields.
if not last_row.endswith(row_ender):
# This can happen in the as_p() case (and possibly others
# that users write): if there are only top errors, we may
# not be able to conscript the last row for our purposes,
# so insert a new, empty row.
last_row = (normal_row % {'errors': '', 'label': '',
'field': '', 'help_text':'',
'html_class_attr': html_class_attr})
output.append(last_row)
output[-1] = last_row[:-len(row_ender)] + str_hidden + row_ender
else:
# If there aren't any rows in the output, just append the
# hidden fields.
output.append(str_hidden)
return u'\n'.join(output)
|
the-stack_0_12683 | import numpy as np
import statistics as stat
from config import *
def process_info(info):
"""
Process a line of info from data source and extract distance
:param info: a line of info. See below for format sample
:return: directory of {node_id (str): distance}
"""
dist = {}
rough_split = info.split('[')
if len(rough_split) <= 2:
return None
dis_list = rough_split[1].split(']')[0].split(',')
if len(dis_list) < 4:
return None
id_list = rough_split[2].split(']')[0].split(',')
if len(id_list) < 4:
return None
if (len(dis_list) != len(id_list)) | (len(dis_list) < 4):
return None
for i in range(0, len(dis_list)):
id_list[i] = id_list[i].strip('"')
if id_list[i] in ref_nodes:
dist[id_list[i]] = float(dis_list[i].strip('"'))
if len(dist) < 4:
return None
return dist
pre_process_threshold = 0.5
def pre_process_data(ranges):
"""
Pre-process a list of ranges from one reference node and eliminate points that are away from the medium by
pre_process_threshold
:param ranges: a list of ranges
:return average range after filtering
"""
median = stat.median(ranges)
s = 0 # sum
c = 0 # count
for r in ranges:
if median - pre_process_threshold < r < median + pre_process_threshold:
s += r
c += 1
if c == 0:
return None
else:
return s / c
def calc_position(dist):
"""
Calculate position based on distances to reference point
:param dist: directory of {node_id (str): distance}
:return: 1D np.array of position [x, y, z]
"""
A = np.array([0, 0, 0])
B = np.array([0])
for i in dist.keys():
if i == base_node:
continue
A = np.vstack((A, ref_nodes[i] - ref_nodes[base_node]))
B = np.vstack(
(B, dist[i] ** 2 - dist[base_node] ** 2 - np.dot(ref_nodes[i] ** 2 - ref_nodes[base_node] ** 2, np.array([1, 1, 1]))))
A = A[1:len(dist)]
B = B[1:len(dist)] * (-0.5)
AT = np.transpose(A)
B = np.dot(AT, B)
rev = np.linalg.inv(np.dot(AT, A))
pos = np.dot(rev, B)
posT = np.transpose(pos)
return posT[0]
if __name__ == '__main__':
dist = process_info(
'{"utime": 2157172559,"survey": {"seq": 26,"mask": 15,"nrngs": [{"mask": 14,"nrng": ["2.495","3.583","2.443"]},{"mask": 13,"nrng": ["1.613","5.014","3.034"]},{"mask": 11,"nrng": ["3.550","4.971","5.377"]},{"mask": 7,"nrng": ["4.971","3.018","5.377"]}]}}')
if dist is not None:
print(dist)
pos = calc_position(dist)
if pos is not None:
print(pos)
else:
print("Fail to calculate position")
else:
print("Fail to process info")
|
the-stack_0_12685 | # Copyright (c) 2013, Yanky and contributors
# For license information, please see license.txt
import frappe
def execute(filters=None):
columns, data = [], []
columns = get_columns()
article_data = get_article_data(filters)
for article in article_data:
temp_dict = {
"title":article.get("title"),
"isbn":article.get("isbn"),
"stock":article.get("stock"),
"total_quantity":article.get("total_quantity"),
"issued_count":article.get("total_quantity") - article.get("stock")
}
data.append(temp_dict)
chart = get_chart()
return columns, data, None, chart
def get_columns():
columns = ["" for column in range(5)]
columns[0] = {
"label": ("Title"),
"fieldname": "title",
"fieldtype": "Link",
"options": "Article",
"width": 200
}
columns[1] = {
"label": ("Isbn"),
"fieldname": "isbn",
"width": 200
}
columns[2] = {
"label": ("Stock"),
"fieldname": "stock",
"width": 150
}
columns[3] = {
"label": ("Total Quantity"),
"fieldname": "total_quantity",
"width": 150
}
columns[4] = {
"label": ("Issued Count"),
"fieldname": "issued_count",
"width": 150
}
return columns
def get_article_data(filters) :
if filters:
query = "select title, isbn, stock, total_quantity from tabArticle where title = '" + str(filters.get("title_filter")) + "'"
article_data = frappe.db.sql(query, as_dict=1)
else:
article_data = frappe.db.sql("""select title, isbn, stock, total_quantity from tabArticle """, as_dict=1)
return article_data
def get_chart():
chart_data = {
"labels": frappe.db.get_list('Article', fields=['title'],as_list=True),
"datasets": [
{
'name': "Stock",
'values': frappe.db.get_list('Article',fields=['stock'],as_list=True)
},
{
'name': "Total Quantity",
'values': frappe.db.get_list('Article',fields=['total_quantity'],as_list=True)
}
]
}
chart = {
"title": "Book Avialability",
"data": chart_data,
"type": 'bar',
"height": 250,
"color": ['#4463F0', '#7cd6fd']
}
return chart |
the-stack_0_12686 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import atexit
import bisect
import multiprocessing as mp
from collections import deque
import cv2
import torch
from detectron2.data import MetadataCatalog
from detectron2.engine.defaults import DefaultPredictor
from detectron2.utils.video_visualizer import VideoVisualizer
from detectron2.utils.visualizer import ColorMode, Visualizer
class VisualizationDemo(object):
def __init__(self, cfg, instance_mode=ColorMode.IMAGE, parallel=False):
"""
Args:
cfg (CfgNode):
instance_mode (ColorMode):
parallel (bool): whether to run the model in different processes from visualization.
Useful since the visualization logic can be slow.
"""
self.metadata = MetadataCatalog.get(
cfg.DATASETS.TEST[0] if len(cfg.DATASETS.TEST) else "__unused"
)
self.cpu_device = torch.device("cpu")
self.instance_mode = instance_mode
self.parallel = parallel
if parallel:
num_gpu = torch.cuda.device_count()
self.predictor = AsyncPredictor(cfg, num_gpus=num_gpu)
else:
self.predictor = DefaultPredictor(cfg)
self.metadata.thing_classes.append("object")
def run_on_image(self, image):
"""
Args:
image (np.ndarray): an image of shape (H, W, C) (in BGR order).
This is the format used by OpenCV.
Returns:
predictions (dict): the output of the model.
vis_output (VisImage): the visualized image output.
"""
vis_output = None
predictions = self.predictor(image)
# Convert image from OpenCV BGR format to Matplotlib RGB format.
image = image[:, :, ::-1]
visualizer = Visualizer(image, self.metadata, instance_mode=self.instance_mode)
if "panoptic_seg" in predictions:
panoptic_seg, segments_info = predictions["panoptic_seg"]
vis_output = visualizer.draw_panoptic_seg_predictions(
panoptic_seg.to(self.cpu_device), segments_info
)
else:
if "sem_seg" in predictions:
vis_output = visualizer.draw_sem_seg(
predictions["sem_seg"].argmax(dim=0).to(self.cpu_device)
)
if "instances" in predictions:
instances = predictions["instances"].to(self.cpu_device)
vis_output = visualizer.draw_instance_predictions(predictions=instances)
return predictions, vis_output
def _frame_from_video(self, video):
while video.isOpened():
success, frame = video.read()
if success:
yield frame
else:
break
def run_on_video(self, video):
"""
Visualizes predictions on frames of the input video.
Args:
video (cv2.VideoCapture): a :class:`VideoCapture` object, whose source can be
either a webcam or a video file.
Yields:
ndarray: BGR visualizations of each video frame.
"""
video_visualizer = VideoVisualizer(self.metadata, self.instance_mode)
def process_predictions(frame, predictions):
frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
if "panoptic_seg" in predictions:
panoptic_seg, segments_info = predictions["panoptic_seg"]
vis_frame = video_visualizer.draw_panoptic_seg_predictions(
frame, panoptic_seg.to(self.cpu_device), segments_info
)
elif "instances" in predictions:
predictions = predictions["instances"].to(self.cpu_device)
vis_frame = video_visualizer.draw_instance_predictions(frame, predictions)
elif "sem_seg" in predictions:
vis_frame = video_visualizer.draw_sem_seg(
frame, predictions["sem_seg"].argmax(dim=0).to(self.cpu_device)
)
# Converts Matplotlib RGB format to OpenCV BGR format
vis_frame = cv2.cvtColor(vis_frame.get_image(), cv2.COLOR_RGB2BGR)
return vis_frame
frame_gen = self._frame_from_video(video)
if self.parallel:
buffer_size = self.predictor.default_buffer_size
frame_data = deque()
for cnt, frame in enumerate(frame_gen):
frame_data.append(frame)
self.predictor.put(frame)
if cnt >= buffer_size:
frame = frame_data.popleft()
predictions = self.predictor.get()
yield process_predictions(frame, predictions)
while len(frame_data):
frame = frame_data.popleft()
predictions = self.predictor.get()
yield process_predictions(frame, predictions)
else:
for frame in frame_gen:
yield process_predictions(frame, self.predictor(frame))
class AsyncPredictor:
"""
A predictor that runs the model asynchronously, possibly on >1 GPUs.
Because rendering the visualization takes considerably amount of time,
this helps improve throughput a little bit when rendering videos.
"""
class _StopToken:
pass
class _PredictWorker(mp.Process):
def __init__(self, cfg, task_queue, result_queue):
self.cfg = cfg
self.task_queue = task_queue
self.result_queue = result_queue
super().__init__()
def run(self):
predictor = DefaultPredictor(self.cfg)
while True:
task = self.task_queue.get()
if isinstance(task, AsyncPredictor._StopToken):
break
idx, data = task
result = predictor(data)
self.result_queue.put((idx, result))
def __init__(self, cfg, num_gpus: int = 1):
"""
Args:
cfg (CfgNode):
num_gpus (int): if 0, will run on CPU
"""
num_workers = max(num_gpus, 1)
self.task_queue = mp.Queue(maxsize=num_workers * 3)
self.result_queue = mp.Queue(maxsize=num_workers * 3)
self.procs = []
for gpuid in range(max(num_gpus, 1)):
cfg = cfg.clone()
cfg.defrost()
cfg.MODEL.DEVICE = "cuda:{}".format(gpuid) if num_gpus > 0 else "cpu"
self.procs.append(
AsyncPredictor._PredictWorker(cfg, self.task_queue, self.result_queue)
)
self.put_idx = 0
self.get_idx = 0
self.result_rank = []
self.result_data = []
for p in self.procs:
p.start()
atexit.register(self.shutdown)
def put(self, image):
self.put_idx += 1
self.task_queue.put((self.put_idx, image))
def get(self):
self.get_idx += 1 # the index needed for this request
if len(self.result_rank) and self.result_rank[0] == self.get_idx:
res = self.result_data[0]
del self.result_data[0], self.result_rank[0]
return res
while True:
# make sure the results are returned in the correct order
idx, res = self.result_queue.get()
if idx == self.get_idx:
return res
insert = bisect.bisect(self.result_rank, idx)
self.result_rank.insert(insert, idx)
self.result_data.insert(insert, res)
def __len__(self):
return self.put_idx - self.get_idx
def __call__(self, image):
self.put(image)
return self.get()
def shutdown(self):
for _ in self.procs:
self.task_queue.put(AsyncPredictor._StopToken())
@property
def default_buffer_size(self):
return len(self.procs) * 5
|
the-stack_0_12687 | from torch.utils.data import TensorDataset
import numpy as np
import logging
import os
import random
import torch
import time
from tqdm import tqdm
from _utils import *
logger = logging.getLogger(__name__)
def load_and_cache_gen_data(args, filename, pool, tokenizer, split_tag, only_src=False, is_sample=False):
# cache the data into args.cache_path except it is sampled
# only_src: control whether to return only source ids for bleu evaluating (dev/test)
# return: examples (Example object), data (TensorDataset)
data_tag = '_all' if args.data_num == -1 else '_%d' % args.data_num
cache_fn = '{}/{}.pt'.format(args.cache_path, split_tag + ('_src' if only_src else '') + data_tag)
examples = read_examples(filename, args.data_num, args.task)
if is_sample:
examples = random.sample(examples, min(5000, len(examples)))
if split_tag == 'train':
calc_stats(examples, tokenizer, is_tokenize=True)
else:
calc_stats(examples)
if os.path.exists(cache_fn) and not is_sample:
logger.info("Load cache data from %s", cache_fn)
data = torch.load(cache_fn)
else:
if is_sample:
logger.info("Sample 5k data for computing bleu from %s", filename)
else:
logger.info("Create cache data into %s", cache_fn)
tuple_examples = [(example, idx, tokenizer, args, split_tag) for idx, example in enumerate(examples)]
features = pool.map(convert_examples_to_features, tqdm(tuple_examples, total=len(tuple_examples)))
all_source_ids = torch.tensor([f.source_ids for f in features], dtype=torch.long)
if split_tag == 'test' or only_src:
data = TensorDataset(all_source_ids)
else:
all_target_ids = torch.tensor([f.target_ids for f in features], dtype=torch.long)
data = TensorDataset(all_source_ids, all_target_ids)
if args.local_rank in [-1, 0] and not is_sample:
torch.save(data, cache_fn)
return examples, data
def load_and_cache_clone_data(args, filename, pool, tokenizer, split_tag, is_sample=False):
cache_fn = '{}/{}.pt'.format(args.cache_path, split_tag + '_all' if args.data_num == -1 else '_%d' % args.data_num)
examples = read_examples(filename, args.data_num, args.task)
if is_sample:
examples = random.sample(examples, int(len(examples) * 0.1))
calc_stats(examples, tokenizer, is_tokenize=True)
if os.path.exists(cache_fn):
logger.info("Load cache data from %s", cache_fn)
data = torch.load(cache_fn)
else:
if is_sample:
logger.info("Sample 10 percent of data from %s", filename)
elif args.data_num == -1:
logger.info("Create cache data into %s", cache_fn)
tuple_examples = [(example, idx, tokenizer, args) for idx, example in enumerate(examples)]
features = pool.map(convert_clone_examples_to_features, tqdm(tuple_examples, total=len(tuple_examples)))
all_source_ids = torch.tensor([f.source_ids for f in features], dtype=torch.long)
all_labels = torch.tensor([f.label for f in features], dtype=torch.long)
data = TensorDataset(all_source_ids, all_labels)
if args.local_rank in [-1, 0] and args.data_num == -1:
torch.save(data, cache_fn)
return examples, data
def load_and_cache_defect_data(args, filename, pool, tokenizer, split_tag, is_sample=False):
cache_fn = os.path.join(args.cache_path, split_tag)
examples = read_examples(filename, args.data_num, args.task)
if is_sample:
examples = random.sample(examples, int(len(examples) * 0.1))
calc_stats(examples, tokenizer, is_tokenize=True)
if os.path.exists(cache_fn):
logger.info("Load cache data from %s", cache_fn)
data = torch.load(cache_fn)
else:
if is_sample:
logger.info("Sample 10 percent of data from %s", filename)
elif args.data_num == -1:
logger.info("Create cache data into %s", cache_fn)
tuple_examples = [(example, idx, tokenizer, args) for idx, example in enumerate(examples)]
features = pool.map(convert_defect_examples_to_features, tqdm(tuple_examples, total=len(tuple_examples)))
# features = [convert_clone_examples_to_features(x) for x in tuple_examples]
all_source_ids = torch.tensor([f.source_ids for f in features], dtype=torch.long)
all_labels = torch.tensor([f.label for f in features], dtype=torch.long)
data = TensorDataset(all_source_ids, all_labels)
if args.local_rank in [-1, 0] and args.data_num == -1:
torch.save(data, cache_fn)
return examples, data
def load_and_cache_multi_gen_data(args, pool, tokenizer, split_tag, only_src=False, is_sample=False):
cache_fn = os.path.join(args.cache_path, split_tag)
if os.path.exists(cache_fn) and not is_sample:
logger.info("Load cache data from %s", cache_fn)
examples_data_dict = torch.load(cache_fn)
else:
examples_data_dict = {}
task_list = ['summarize', 'translate', 'refine', 'concode', 'defect']
for task in task_list:
if task == 'summarize':
sub_tasks = ['ruby', 'javascript', 'go', 'python', 'java', 'php']
elif task == 'translate':
sub_tasks = ['java-cs', 'cs-java']
elif task == 'refine':
sub_tasks = ['small', 'medium']
else:
sub_tasks = ['none']
args.task = task
for sub_task in sub_tasks:
args.sub_task = sub_task
if task == 'summarize':
args.max_source_length = 256
args.max_target_length = 128
elif task == 'translate':
args.max_source_length = 320
args.max_target_length = 256
elif task == 'refine':
if sub_task == 'small':
args.max_source_length = 130
args.max_target_length = 120
else:
args.max_source_length = 240
args.max_target_length = 240
elif task == 'concode':
args.max_source_length = 320
args.max_target_length = 150
elif task == 'defect':
args.max_source_length = 512
args.max_target_length = 3 # as do not need to add lang ids
filename = get_filenames(args.data_dir, args.task, args.sub_task, split_tag)
examples = read_examples(filename, args.data_num, args.task)
if is_sample:
examples = random.sample(examples, min(5000, len(examples)))
if split_tag == 'train':
calc_stats(examples, tokenizer, is_tokenize=True)
else:
calc_stats(examples)
tuple_examples = [(example, idx, tokenizer, args, split_tag) for idx, example in enumerate(examples)]
if args.data_num == -1:
features = pool.map(convert_examples_to_features, tqdm(tuple_examples, total=len(tuple_examples)))
else:
features = [convert_examples_to_features(x) for x in tuple_examples]
all_source_ids = torch.tensor([f.source_ids for f in features], dtype=torch.long)
if only_src:
data = TensorDataset(all_source_ids)
else:
all_target_ids = torch.tensor([f.target_ids for f in features], dtype=torch.long)
data = TensorDataset(all_source_ids, all_target_ids)
examples_data_dict['{}_{}'.format(task, sub_task) if sub_task != 'none' else task] = (examples, data)
if args.local_rank in [-1, 0] and not is_sample:
torch.save(examples_data_dict, cache_fn)
logger.info("Save data into %s", cache_fn)
return examples_data_dict
def get_filenames(data_root, task, sub_task, split=''):
if task == 'generation':
data_dir = '{}/{}'.format(data_root, task)
train_fn = '{}/train.json'.format(data_dir)
dev_fn = '{}/dev.json'.format(data_dir)
test_fn = '{}/test.json'.format(data_dir)
elif task == 'concode':
data_dir = '{}/{}'.format(data_root, task)
train_fn = '{}/train.json'.format(data_dir)
dev_fn = '{}/dev.json'.format(data_dir)
test_fn = '{}/test.json'.format(data_dir)
elif task == 'summarize':
data_dir = '{}/{}/{}'.format(data_root, task, sub_task)
train_fn = '{}/train.jsonl'.format(data_dir)
dev_fn = '{}/valid.jsonl'.format(data_dir)
test_fn = '{}/test.jsonl'.format(data_dir)
elif task == 'refine':
data_dir = '{}/{}/{}'.format(data_root, task, sub_task)
train_fn = '{}/train.buggy-fixed.buggy,{}/train.buggy-fixed.fixed'.format(data_dir, data_dir)
dev_fn = '{}/valid.buggy-fixed.buggy,{}/valid.buggy-fixed.fixed'.format(data_dir, data_dir)
test_fn = '{}/test.buggy-fixed.buggy,{}/test.buggy-fixed.fixed'.format(data_dir, data_dir)
elif task == 'translate':
data_dir = '{}/{}'.format(data_root, task)
if sub_task == 'cs-java':
train_fn = '{}/train.java-cs.txt.cs,{}/train.java-cs.txt.java'.format(data_dir, data_dir)
dev_fn = '{}/valid.java-cs.txt.cs,{}/valid.java-cs.txt.java'.format(data_dir, data_dir)
test_fn = '{}/test.java-cs.txt.cs,{}/test.java-cs.txt.java'.format(data_dir, data_dir)
else:
train_fn = '{}/train.java-cs.txt.java,{}/train.java-cs.txt.cs'.format(data_dir, data_dir)
dev_fn = '{}/valid.java-cs.txt.java,{}/valid.java-cs.txt.cs'.format(data_dir, data_dir)
test_fn = '{}/test.java-cs.txt.java,{}/test.java-cs.txt.cs'.format(data_dir, data_dir)
elif task == 'clone':
data_dir = '{}/{}'.format(data_root, task)
train_fn = '{}/train.txt'.format(data_dir)
dev_fn = '{}/valid.txt'.format(data_dir)
test_fn = '{}/test.txt'.format(data_dir)
elif task == 'defect':
data_dir = '{}/{}'.format(data_root, task)
train_fn = '{}/train.jsonl'.format(data_dir)
dev_fn = '{}/valid.jsonl'.format(data_dir)
test_fn = '{}/test.jsonl'.format(data_dir)
if split == 'train':
return train_fn
elif split == 'dev':
return dev_fn
elif split == 'test':
return test_fn
else:
return train_fn, dev_fn, test_fn
def read_examples(filename, data_num, task):
read_example_dict = {
'summarize': read_summarize_examples,
'refine': read_refine_examples,
'translate': read_translate_examples,
'generation': read_generation_examples,
'concode': read_concode_examples,
'clone': read_clone_examples,
'defect': read_defect_examples,
}
return read_example_dict[task](filename, data_num)
def calc_stats(examples, tokenizer=None, is_tokenize=False):
avg_src_len = []
avg_trg_len = []
avg_src_len_tokenize = []
avg_trg_len_tokenize = []
for ex in examples:
if is_tokenize:
avg_src_len.append(len(ex.source.split()))
avg_trg_len.append(len(str(ex.target).split()))
avg_src_len_tokenize.append(len(tokenizer.tokenize(ex.source)))
avg_trg_len_tokenize.append(len(tokenizer.tokenize(str(ex.target))))
else:
avg_src_len.append(len(ex.source.split()))
avg_trg_len.append(len(str(ex.target).split()))
if is_tokenize:
logger.info("Read %d examples, avg src len: %d, avg trg len: %d, max src len: %d, max trg len: %d",
len(examples), np.mean(avg_src_len), np.mean(avg_trg_len), max(avg_src_len), max(avg_trg_len))
logger.info("[TOKENIZE] avg src len: %d, avg trg len: %d, max src len: %d, max trg len: %d",
np.mean(avg_src_len_tokenize), np.mean(avg_trg_len_tokenize), max(avg_src_len_tokenize),
max(avg_trg_len_tokenize))
else:
logger.info("Read %d examples, avg src len: %d, avg trg len: %d, max src len: %d, max trg len: %d",
len(examples), np.mean(avg_src_len), np.mean(avg_trg_len), max(avg_src_len), max(avg_trg_len))
def get_elapse_time(t0):
elapse_time = time.time() - t0
if elapse_time > 3600:
hour = int(elapse_time // 3600)
minute = int((elapse_time % 3600) // 60)
return "{}h{}m".format(hour, minute)
else:
minute = int((elapse_time % 3600) // 60)
return "{}m".format(minute)
|
the-stack_0_12689 | import os
from conans import CMake, ConanFile, tools
class QtXlsxWriterConan(ConanFile):
name = "qtxlsxwriter"
license = "MIT"
url = "https://github.com/conan-io/conan-center-index"
homepage = "https://github.com/dbzhang800/QtXlsxWriter"
description = ".xlsx file reader and writer for Qt5"
topics = ("qtxlsxwriter", "excel", "xlsx", "conan-recipe")
settings = "os", "compiler", "build_type", "arch"
options = {
"shared": [True, False],
"fPIC": [True, False]
}
default_options = {
"shared": False,
"fPIC": True
}
generators = "cmake"
exports_sources = "CMakeLists.txt", "patches/**"
_cmake = None
@property
def _source_subfolder(self):
return "source_subfolder"
def _configure_cmake(self):
if self._cmake:
return self._cmake
self._cmake = CMake(self)
self._cmake.definitions["QT_ROOT"] = self.deps_cpp_info["qt"].rootpath.replace("\\", "/")
self._cmake.configure()
return self._cmake
def config_options(self):
if self.settings.os == "Windows":
del self.options.fPIC
def configure(self):
if self.options.shared:
del self.options.fPIC
def requirements(self):
self.requires("qt/5.15.2")
def source(self):
for source in self.conan_data["sources"][self.version]:
url = source["url"]
filename = url.rsplit("/", 1)[-1]
tools.download(url, filename, sha256=source["sha256"])
tools.unzip(os.path.join(self.source_folder, "v0.3.0.zip"), self._source_subfolder, strip_root=True)
def build(self):
for patch in self.conan_data.get("patches", {}).get(self.version, []):
tools.patch(**patch)
cmake = self._configure_cmake()
cmake.build()
def package(self):
cmake = self._configure_cmake()
cmake.install()
self.copy("LICENSE", dst="licenses")
def package_info(self):
if not self.options.shared:
self.cpp_info.defines = ["QTXLSX_STATIC"]
self.cpp_info.libs = tools.collect_libs(self)
|
the-stack_0_12690 | """Viessmann ViCare climate device."""
import logging
from homeassistant.components.climate import ClimateDevice
from homeassistant.components.climate.const import (
SUPPORT_PRESET_MODE,
SUPPORT_TARGET_TEMPERATURE,
PRESET_ECO,
PRESET_COMFORT,
HVAC_MODE_OFF,
HVAC_MODE_HEAT,
HVAC_MODE_AUTO,
)
from homeassistant.const import TEMP_CELSIUS, ATTR_TEMPERATURE, PRECISION_WHOLE
from . import DOMAIN as VICARE_DOMAIN
from . import VICARE_API
from . import VICARE_NAME
_LOGGER = logging.getLogger(__name__)
VICARE_MODE_DHW = "dhw"
VICARE_MODE_DHWANDHEATING = "dhwAndHeating"
VICARE_MODE_FORCEDREDUCED = "forcedReduced"
VICARE_MODE_FORCEDNORMAL = "forcedNormal"
VICARE_MODE_OFF = "standby"
VICARE_PROGRAM_ACTIVE = "active"
VICARE_PROGRAM_COMFORT = "comfort"
VICARE_PROGRAM_ECO = "eco"
VICARE_PROGRAM_EXTERNAL = "external"
VICARE_PROGRAM_HOLIDAY = "holiday"
VICARE_PROGRAM_NORMAL = "normal"
VICARE_PROGRAM_REDUCED = "reduced"
VICARE_PROGRAM_STANDBY = "standby"
VICARE_HOLD_MODE_AWAY = "away"
VICARE_HOLD_MODE_HOME = "home"
VICARE_HOLD_MODE_OFF = "off"
VICARE_TEMP_HEATING_MIN = 3
VICARE_TEMP_HEATING_MAX = 37
SUPPORT_FLAGS_HEATING = SUPPORT_TARGET_TEMPERATURE | SUPPORT_PRESET_MODE
VICARE_TO_HA_HVAC_HEATING = {
VICARE_MODE_DHW: HVAC_MODE_OFF,
VICARE_MODE_DHWANDHEATING: HVAC_MODE_AUTO,
VICARE_MODE_FORCEDREDUCED: HVAC_MODE_OFF,
VICARE_MODE_FORCEDNORMAL: HVAC_MODE_HEAT,
VICARE_MODE_OFF: HVAC_MODE_OFF,
}
HA_TO_VICARE_HVAC_HEATING = {
HVAC_MODE_HEAT: VICARE_MODE_FORCEDNORMAL,
HVAC_MODE_OFF: VICARE_MODE_FORCEDREDUCED,
HVAC_MODE_AUTO: VICARE_MODE_DHWANDHEATING,
}
VICARE_TO_HA_PRESET_HEATING = {
VICARE_PROGRAM_COMFORT: PRESET_COMFORT,
VICARE_PROGRAM_ECO: PRESET_ECO,
}
HA_TO_VICARE_PRESET_HEATING = {
PRESET_COMFORT: VICARE_PROGRAM_COMFORT,
PRESET_ECO: VICARE_PROGRAM_ECO,
}
PYVICARE_ERROR = "error"
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Create the ViCare climate devices."""
if discovery_info is None:
return
vicare_api = hass.data[VICARE_DOMAIN][VICARE_API]
add_entities(
[ViCareClimate(f"{hass.data[VICARE_DOMAIN][VICARE_NAME]} Heating", vicare_api)]
)
class ViCareClimate(ClimateDevice):
"""Representation of the ViCare heating climate device."""
def __init__(self, name, api):
"""Initialize the climate device."""
self._name = name
self._state = None
self._api = api
self._target_temperature = None
self._current_mode = None
self._current_temperature = None
self._current_program = None
def update(self):
"""Let HA know there has been an update from the ViCare API."""
_room_temperature = self._api.getRoomTemperature()
_supply_temperature = self._api.getSupplyTemperature()
if _room_temperature is not None and _room_temperature != PYVICARE_ERROR:
self._current_temperature = _room_temperature
elif _supply_temperature != PYVICARE_ERROR:
self._current_temperature = _supply_temperature
else:
self._current_temperature = None
self._current_program = self._api.getActiveProgram()
# The getCurrentDesiredTemperature call can yield 'error' (str) when the system is in standby
desired_temperature = self._api.getCurrentDesiredTemperature()
if desired_temperature == PYVICARE_ERROR:
desired_temperature = None
self._target_temperature = desired_temperature
self._current_mode = self._api.getActiveMode()
@property
def supported_features(self):
"""Return the list of supported features."""
return SUPPORT_FLAGS_HEATING
@property
def name(self):
"""Return the name of the climate device."""
return self._name
@property
def temperature_unit(self):
"""Return the unit of measurement."""
return TEMP_CELSIUS
@property
def current_temperature(self):
"""Return the current temperature."""
return self._current_temperature
@property
def target_temperature(self):
"""Return the temperature we try to reach."""
return self._target_temperature
@property
def hvac_mode(self):
"""Return current hvac mode."""
return VICARE_TO_HA_HVAC_HEATING.get(self._current_mode)
def set_hvac_mode(self, hvac_mode):
"""Set a new hvac mode on the ViCare API."""
vicare_mode = HA_TO_VICARE_HVAC_HEATING.get(hvac_mode)
if vicare_mode is None:
_LOGGER.error(
"Cannot set invalid vicare mode: %s / %s", hvac_mode, vicare_mode
)
return
_LOGGER.debug("Setting hvac mode to %s / %s", hvac_mode, vicare_mode)
self._api.setMode(vicare_mode)
@property
def hvac_modes(self):
"""Return the list of available hvac modes."""
return list(HA_TO_VICARE_HVAC_HEATING)
@property
def min_temp(self):
"""Return the minimum temperature."""
return VICARE_TEMP_HEATING_MIN
@property
def max_temp(self):
"""Return the maximum temperature."""
return VICARE_TEMP_HEATING_MAX
@property
def precision(self):
"""Return the precision of the system."""
return PRECISION_WHOLE
def set_temperature(self, **kwargs):
"""Set new target temperatures."""
temp = kwargs.get(ATTR_TEMPERATURE)
if temp is not None:
self._api.setProgramTemperature(
self._current_program, self._target_temperature
)
@property
def preset_mode(self):
"""Return the current preset mode, e.g., home, away, temp."""
return VICARE_TO_HA_PRESET_HEATING.get(self._current_program)
@property
def preset_modes(self):
"""Return the available preset mode."""
return list(VICARE_TO_HA_PRESET_HEATING)
def set_preset_mode(self, preset_mode):
"""Set new preset mode and deactivate any existing programs."""
vicare_program = HA_TO_VICARE_PRESET_HEATING.get(preset_mode)
if vicare_program is None:
_LOGGER.error(
"Cannot set invalid vicare program: %s / %s",
preset_mode,
vicare_program,
)
return
_LOGGER.debug("Setting preset to %s / %s", preset_mode, vicare_program)
self._api.deactivateProgram(self._current_program)
self._api.activateProgram(vicare_program)
|
the-stack_0_12691 | #!/usr/bin/env python3
import argparse
import curses
import sys
import threading
import traceback
from .source_handler import CandumpHandler, InvalidFrame, SerialHandler
should_redraw = threading.Event()
stop_reading = threading.Event()
can_messages = {}
can_messages_lock = threading.Lock()
thread_exception = None
def reading_loop(source_handler, blacklist):
"""Background thread for reading."""
try:
while not stop_reading.is_set():
try:
frame_id, data = source_handler.get_message()
except InvalidFrame:
continue
except EOFError:
break
if frame_id in blacklist:
continue
# Add the frame to the can_messages dict and tell the main thread to refresh its content
with can_messages_lock:
can_messages[frame_id] = data
should_redraw.set()
stop_reading.wait()
except:
if not stop_reading.is_set():
# Only log exception if we were not going to stop the thread
# When quitting, the main thread calls close() on the serial device
# and read() may throw an exception. We don't want to display it as
# we're stopping the script anyway
global thread_exception
thread_exception = sys.exc_info()
def init_window(stdscr):
"""Init a window filling the entire screen with a border around it."""
stdscr.clear()
stdscr.refresh()
max_y, max_x = stdscr.getmaxyx()
root_window = stdscr.derwin(max_y, max_x, 0, 0)
root_window.box()
return root_window
def format_data_hex(data):
"""Convert the bytes array to an hex representation."""
# Bytes are separated by spaces.
return ' '.join('%02X' % byte for byte in data)
def format_data_ascii(data):
"""Try to make an ASCII representation of the bytes.
Non printable characters are replaced by '?' except null character which
is replaced by '.'.
"""
msg_str = ''
for byte in data:
char = chr(byte)
if char == '\0':
msg_str = msg_str + '.'
elif ord(char) < 32 or ord(char) > 126:
msg_str = msg_str + '?'
else:
msg_str = msg_str + char
return msg_str
def main(stdscr, reading_thread):
"""Main function displaying the UI."""
# Don't print typed character
curses.noecho()
curses.cbreak()
curses.curs_set(0) # set cursor state to invisible
# Set getch() to non-blocking
stdscr.nodelay(True)
win = init_window(stdscr)
while True:
# should_redraw is set by the serial thread when new data is available
if should_redraw.wait(timeout=0.05): # Timeout needed in order to react to user input
max_y, max_x = win.getmaxyx()
column_width = 100
id_column_start = 2
bytes_column_start = 13
text_column_start = 38
# Compute row/column counts according to the window size and borders
row_start = 3
lines_per_column = max_y - (1 + row_start)
num_columns = (max_x - 2) // column_width
# Setting up column headers
for i in range(0, num_columns):
win.addstr(1, id_column_start + i * column_width, 'ID')
win.addstr(1, 25 + bytes_column_start + i * column_width, 'Bytes')
win.addstr(1, 30 + text_column_start + i * column_width, 'Text')
win.addstr(3, id_column_start, "Press 'q' to quit")
row = row_start + 2 # The first column starts a bit lower to make space for the 'press q to quit message'
current_column = 0
# Make sure we don't read the can_messages dict while it's being written to in the reading thread
with can_messages_lock:
for frame_id in sorted(can_messages.keys()):
msg = can_messages[frame_id]
msg_bytes = format_data_hex(msg)
msg_str = format_data_ascii(msg)
# print frame ID in decimal and hex
win.addstr(row, id_column_start + current_column * column_width, '%s' % str(frame_id).ljust(5))
win.addstr(row, id_column_start + 18 + current_column * column_width, '%X'.ljust(5) % frame_id)
# print frame bytes
win.addstr(row, 25 + bytes_column_start + current_column * column_width, msg_bytes.ljust(23))
# print frame text
win.addstr(row, 30 + text_column_start + current_column * column_width, msg_str.ljust(8))
row = row + 1
if row >= lines_per_column + row_start:
# column full, switch to the next one
row = row_start
current_column = current_column + 1
if current_column >= num_columns:
break
win.refresh()
should_redraw.clear()
c = stdscr.getch()
if c == ord('q') or not reading_thread.is_alive():
break
elif c == curses.KEY_RESIZE:
win = init_window(stdscr)
should_redraw.set()
def parse_ints(string_list):
int_set = set()
for line in string_list:
try:
int_set.add(int(line, 0))
except ValueError:
continue
return int_set
def run():
parser = argparse.ArgumentParser(description='Process CAN data from a serial device or from a file.')
parser.add_argument('serial_device', type=str, nargs='?')
parser.add_argument('baud_rate', type=int, default=115200, nargs='?',
help='Serial baud rate in bps (default: 115200)')
parser.add_argument('-f', '--candump-file', metavar='CANDUMP_FILE', help="File (of 'candump' format) to read from")
parser.add_argument('-s', '--candump-speed', type=float, metavar='CANDUMP_SPEED', help="Speed scale of file read")
parser.add_argument('--blacklist', '-b', nargs='+', metavar='BLACKLIST', help="Ids that must be ignored")
parser.add_argument(
'--blacklist-file',
'-bf',
metavar='BLACKLIST_FILE',
help="File containing ids that must be ignored",
)
args = parser.parse_args()
# checks arguments
if not args.serial_device and not args.candump_file:
print("Please specify serial device or file name")
print()
parser.print_help()
return
if args.serial_device and args.candump_file:
print("You cannot specify a serial device AND a file name")
print()
parser.print_help()
return
# --blacklist-file prevails over --blacklist
if args.blacklist_file:
with open(args.blacklist_file) as f_obj:
blacklist = parse_ints(f_obj)
elif args.blacklist:
blacklist = parse_ints(args.blacklist)
else:
blacklist = set()
if args.serial_device:
source_handler = SerialHandler(args.serial_device, baudrate=args.baud_rate)
elif args.candump_file:
source_handler = CandumpHandler(args.candump_file, args.candump_speed)
reading_thread = None
try:
# If reading from a serial device, it will be opened with timeout=0 (non-blocking read())
source_handler.open()
# Start the reading background thread
reading_thread = threading.Thread(target=reading_loop, args=(source_handler, blacklist,))
reading_thread.start()
# Make sure to draw the UI the first time even if no data has been read
should_redraw.set()
# Start the main loop
curses.wrapper(main, reading_thread)
finally:
# Cleanly stop reading thread before exiting
if reading_thread:
stop_reading.set()
if source_handler:
source_handler.close()
reading_thread.join()
# If the thread returned an exception, print it
if thread_exception:
traceback.print_exception(*thread_exception)
sys.stderr.flush()
if __name__ == '__main__':
run()
|
the-stack_0_12692 | # -*- coding: utf-8 -*-
"""
Class definition of YOLO_v3 style detection model on image and video
"""
import os
import time
import logging
import colorsys
import numpy as np
import tensorflow.keras.backend as K
from tensorflow.keras.models import load_model
from tensorflow.keras.layers import Input
from tensorflow.keras.utils import multi_gpu_model
from tensorflow.compat.v1.keras.backend import get_session
from tensorflow.compat.v1 import disable_eager_execution
from .model import yolo_eval, yolo_body_full, yolo_body_tiny
from .utils import letterbox_image, update_path, get_anchors, get_class_names
from .visual import draw_bounding_box
# swap X-Y axis
PREDICT_FIELDS = ('class', 'label', 'confidence', 'ymin', 'xmin', 'ymax', 'xmax')
class YOLO(object):
"""YOLO detector with tiny alternative
Example
-------
>>> # prepare EMPTY model since download and convert existing is a bit complicated
>>> anchors = get_anchors(YOLO.get_defaults('anchors_path'))
>>> classes = get_class_names(YOLO.get_defaults('classes_path'))
>>> yolo_empty = yolo_body_tiny(Input(shape=(None, None, 3)), len(anchors) // 2, len(classes))
>>> path_model = os.path.join(update_path('model_data'), 'yolo_empty.h5')
>>> yolo_empty.save(path_model)
>>> # use the empty one, so no reasonable detections are expected
>>> from keras_yolo3.utils import image_open
>>> yolo = YOLO(weights_path=path_model,
... anchors_path=YOLO.get_defaults('anchors_path'),
... classes_path=YOLO.get_defaults('classes_path'),
... model_image_size=YOLO.get_defaults('model_image_size'))
>>> img = image_open(os.path.join(update_path('model_data'), 'bike-car-dog.jpg'))
>>> yolo.detect_image(img) # doctest: +ELLIPSIS
(<PIL.JpegImagePlugin.JpegImageFile image mode=RGB size=520x518 at ...>, [...])
"""
_DEFAULT_PARAMS = {
"weights_path": os.path.join(update_path('model_data'), 'tiny-yolo.h5'),
"anchors_path": os.path.join(update_path('model_data'), 'tiny-yolo_anchors.csv'),
"classes_path": os.path.join(update_path('model_data'), 'coco_classes.txt'),
"score": 0.3,
"iou": 0.45,
# "model_image_size": (416, 416),
"nb_gpu": 1,
}
@classmethod
def get_defaults(cls, name):
if name not in cls._DEFAULT_PARAMS:
logging.warning('Unrecognized attribute name "%s"', name)
return cls._DEFAULT_PARAMS.get(name)
def __init__(self, weights_path, anchors_path, classes_path, model_image_size=(None, None),
score=0.3, iou=0.45, nb_gpu=1, **kwargs):
"""
:param str weights_path: path to loaded model weights, e.g. 'model_data/tiny-yolo.h5'
:param str anchors_path: path to loaded model anchors, e.g. 'model_data/tiny-yolo_anchors.csv'
:param str classes_path: path to loaded trained classes, e.g. 'model_data/coco_classes.txt'
:param float score: confidence score
:param float iou:
:param tuple(int,int) model_image_size: e.g. for tiny (416, 416)
:param int nb_gpu:
:param kwargs:
"""
self.__dict__.update(kwargs) # and update with user overrides
self.weights_path = update_path(weights_path)
self.anchors_path = update_path(anchors_path)
self.classes_path = update_path(classes_path)
self.score = score
self.iou = iou
self.nb_gpu = nb_gpu
if not self.nb_gpu:
# disable all GPUs
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
self.class_names = get_class_names(self.classes_path)
self.anchors = get_anchors(self.anchors_path)
self._open_session()
disable_eager_execution()
self.boxes, self.scores, self.classes = self._create_model(model_image_size)
self._generate_class_colors()
def _open_session(self):
logging.warning('Using %s backend.', K.backend())
self.sess = get_session()
def _create_model(self, model_image_size=(None, None)):
# weights_path = update_path(self.weights_path)
logging.debug('loading model from "%s"', self.weights_path)
assert self.weights_path.endswith('.h5'), 'Keras model or weights must be a .h5 file.'
# Load model, or construct model and load weights.
num_anchors = len(self.anchors)
num_classes = len(self.class_names)
try:
self.yolo_model = load_model(self.weights_path, compile=False)
except Exception:
logging.warning('Loading weights from "%s"', self.weights_path)
is_tiny_version = (num_anchors == 6) # default setting
cnn_h, cnn_w = model_image_size
input = Input(shape=(cnn_h, cnn_w, 3))
if is_tiny_version:
self.yolo_model = yolo_body_tiny(input, num_anchors // 2, num_classes)
else:
self.yolo_model = yolo_body_full(input, num_anchors // 3, num_classes)
# make sure model, anchors and classes match
self.yolo_model.load_weights(self.weights_path, by_name=True, skip_mismatch=True)
else:
out_shape = self.yolo_model.layers[-1].output_shape[-1]
ration_anchors = num_anchors / len(self.yolo_model.output) * (num_classes + 5)
assert out_shape == ration_anchors, \
'Mismatch between model and given anchor %r and class %r sizes' \
% (ration_anchors, out_shape)
logging.info('loaded model, anchors (%i), and classes (%i) from %s',
num_anchors, num_classes, self.weights_path)
# Generate output tensor targets for filtered bounding boxes.
self.input_image_shape = K.placeholder(shape=(2,))
if self.nb_gpu >= 2:
self.yolo_model = multi_gpu_model(self.yolo_model, gpus=self.nb_gpu)
boxes, scores, classes = yolo_eval(self.yolo_model.output,
self.anchors,
len(self.class_names),
self.input_image_shape,
score_threshold=self.score,
iou_threshold=self.iou)
return boxes, scores, classes
def _generate_class_colors(self):
"""Generate colors for drawing bounding boxes."""
hsv_tuples = [(x / len(self.class_names), 1., 1.)
for x in range(len(self.class_names))]
self.colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples))
_fn_colorr = lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255))
self.colors = list(map(_fn_colorr, self.colors))
np.random.seed(10101) # Fixed seed for consistent colors across runs.
# Shuffle colors to decorrelate adjacent classes.
np.random.shuffle(self.colors)
np.random.seed(None) # Reset seed to default.
def detect_image(self, image):
start = time.time()
# this should be taken from the model
model_image_size = self.yolo_model._input_layers[0].input_shape[0][1:3]
if all(model_image_size):
for size in model_image_size:
assert size % 32 == 0, 'Multiples of 32 required'
boxed_image = letterbox_image(image, tuple(reversed(model_image_size)))
else:
new_image_size = (image.width - (image.width % 32),
image.height - (image.height % 32))
boxed_image = letterbox_image(image, new_image_size)
image_data = np.array(boxed_image, dtype='float32')
logging.debug('image shape: %s', repr(image_data.shape))
if image_data.max() > 1.5:
image_data /= 255.
image_data = np.expand_dims(image_data, 0) # Add batch dimension.
out_boxes, out_scores, out_classes = self.sess.run(
[self.boxes, self.scores, self.classes],
feed_dict={
self.yolo_model.input: image_data,
self.input_image_shape: [image.size[1], image.size[0]],
K.learning_phase(): 0
})
end = time.time()
logging.debug('Found %i boxes in %f sec.', len(out_boxes), (end - start))
thickness = (image.size[0] + image.size[1]) // 500
predicts = []
for i, c in reversed(list(enumerate(out_classes))):
draw_bounding_box(image, self.class_names[c], out_boxes[i],
out_scores[i], self.colors[c], thickness)
pred = dict(zip(
PREDICT_FIELDS,
(int(c), self.class_names[c], float(out_scores[i]),
*[int(x) for x in out_boxes[i]])
))
predicts.append(pred)
return image, predicts
def _close_session(self):
self.sess.close()
def __del__(self):
self._close_session()
|
the-stack_0_12694 | import pygame
pygame.init()
def drawGrid(window, cell_width):
for i in range(1,9):
if i%3 == 0:
stroke = 3
else:
stroke = 1
pygame.draw.line(window, (60,113,210), (0, i*cell_width), (WIDTH, i*cell_width), stroke)
pygame.draw.line(window, (69,113,210), (i*cell_width, 0), (i*cell_width, HEIGHT), stroke)
def displayBoard(board):
number_font = pygame.font.SysFont("Century Gothic", 30)
for i in range(len(board)):
for j in range(len(board[0])):
if board[i][j] == 0:
number = number_font.render(" ", 1, (203,217,243))
else:
number = number_font.render(str(board[i][j]), 1, (203,217,243))
WIN.blit(number, ((j*CELL_WIDTH)+int(CELL_WIDTH/2.5), (i*CELL_WIDTH)+int(CELL_WIDTH/3)))
def findEmpty(board):
for i in range(9):
for j in range(9):
if board[i][j] == 0:
return (i, j)
return False
def valid(board, pos , n):
# Check row
for j in range(9):
if board[pos[0]][j] == n:
return False
# Check column
for i in range(9):
if board[i][pos[1]] == n:
return False
# Check square
row_index = pos[0]//3
col_index = pos[1]//3
for i in range(row_index*3, row_index*3 + 3):
for j in range(col_index*3, col_index*3 + 3):
if board[i][j] == n:
return False
return True
def solve():
global grid
pos = findEmpty(grid)
if pos == False:
return True
for i in range(1, 10):
if valid(grid, pos, i):
grid[pos[0]][pos[1]] = i
displayBoard(grid)
pygame.display.update()
if solve():
return True
grid[pos[0]][pos[1]] = 0
return False
if __name__ == "__main__":
WIDTH, HEIGHT = 540, 540
WIN = pygame.display.set_mode((WIDTH, HEIGHT))
CELL_WIDTH = WIDTH/9
grid = [
[7,8,0,4,0,0,1,2,0],
[6,0,0,0,7,5,0,0,9],
[0,0,0,6,0,1,0,7,8],
[0,0,7,0,4,0,2,6,0],
[0,0,1,0,5,0,9,3,0],
[9,0,4,0,6,0,0,0,5],
[0,7,0,3,0,0,0,1,2],
[1,2,0,0,0,7,4,0,0],
[0,4,9,2,0,6,0,0,7]
]
running = True
while running:
drawGrid(WIN, CELL_WIDTH)
displayBoard(grid)
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
if event.type == pygame.MOUSEBUTTONDOWN:
solve()
pygame.display.update()
WIN.fill((50,50,50))
|
the-stack_0_12695 | # QR Code Reader
# Author: Johnjimy Som
# Created: June 3, 2021
import math
# Initialising hex string
ini_string = "11109D6B2700A000200000E000000000" #sample
#ini_string = input('Please insert a hexcode: ')#import the hex here
# Printing initial string
# Step 1 Step1 QR code(16進数)を読み取る
print ("Initial string:", ini_string)
# Code to convert hex to binary
#Step2 読み取ったQR code(16進数)を2進数へ変換する
n = int(ini_string, 16)
binaryStr = ''
while n > 0:
binaryStr = str(n % 2) + binaryStr
n = n >> 1
result = binaryStr
# Print the resultant string
print ("\nResultant string [Binary]:", str(result))
#00010001000100001001110101101011001001110000000010100000000000000010000000000000000000001110000000000000000000000000000000000000
# Print binary characters start[9]-[34]length should be : 00010000100111010110101100
print ("\nResultant string [9-34] [Binary]:", str(result[1:]))
#Step3 2進数に変換した結果を、項目毎にデータを区切る
# import module
from tabulate import tabulate #unresolved import <'from tabulate'>
# assigned binaryStr data
mydata = [{"Encode Version", "x","x","y"},
{"Print Area", "wololo","xxx","yyyu"},
{"Item Code", "xx","yy","zz"}]
# create header
head = [" ", "City", "Binary", "Value(Decimal)"]
# display table
print(tabulate(mydata, headers=head, tablefmt="pretty")) |
the-stack_0_12696 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.tf.Cholesky."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes as dtypes_lib
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_linalg_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
from tensorflow.python.ops.linalg import linalg
from tensorflow.python.platform import benchmark
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
# Different gradient implementations for benchmark purposes
def SpecializedGrad(l, grad):
return gen_linalg_ops.cholesky_grad(l, grad)
def _GradWithInverseL(l, l_inverse, grad):
middle = math_ops.matmul(l, grad, adjoint_a=True)
middle = array_ops.matrix_set_diag(middle,
0.5 * array_ops.matrix_diag_part(middle))
middle = array_ops.matrix_band_part(middle, -1, 0)
grad_a = math_ops.matmul(
math_ops.matmul(l_inverse, middle, adjoint_a=True), l_inverse)
grad_a += math_ops.conj(array_ops.matrix_transpose(grad_a))
return grad_a * 0.5
def TriAngSolveCompositeGrad(l, grad):
# Gradient is l^{-H} @ ((l^{H} @ grad) * (tril(ones)-1/2*eye)) @ l^{-1}
# Compute ((l^{H} @ grad) * (tril(ones)-1/2*eye)) = middle
middle = math_ops.matmul(l, grad, adjoint_a=True)
middle = array_ops.matrix_set_diag(middle,
0.5 * array_ops.matrix_diag_part(middle))
middle = array_ops.matrix_band_part(middle, -1, 0)
# Compute l^{-H} @ middle = z
l_inverse_middle = linalg_ops.matrix_triangular_solve(l, middle, adjoint=True)
# We need to compute z @ l^{-1}. With matrix_triangular_solve we
# actually compute l^{-H} @ z^{H} = grad. Since we later add grad^{H}
# we can ommit the conjugate transpose here.
z_h = math_ops.conj(array_ops.matrix_transpose(l_inverse_middle))
grad_a = linalg_ops.matrix_triangular_solve(l, z_h, adjoint=True)
grad_a += linalg.adjoint(grad_a)
return grad_a * 0.5
def MatrixInverseCompositeGrad(l, grad):
l_inverse = linalg_ops.matrix_inverse(l)
return _GradWithInverseL(l, l_inverse, grad)
def TriAngInvCompositeGrad(l, grad):
num_rows = array_ops.shape(l)[-1]
batch_shape = array_ops.shape(l)[:-2]
l_inverse = linalg_ops.matrix_triangular_solve(l,
linalg_ops.eye(
num_rows,
batch_shape=batch_shape,
dtype=l.dtype))
return _GradWithInverseL(l, l_inverse, grad)
class CholeskyOpTest(test.TestCase):
def _verifyCholeskyBase(self, sess, x, chol, verification):
chol_np, verification_np = self.evaluate([chol, verification])
self.assertAllClose(x, verification_np)
self.assertShapeEqual(x, chol)
# Check that the cholesky is lower triangular, and has positive diagonal
# elements.
if chol_np.shape[-1] > 0:
chol_reshaped = np.reshape(chol_np, (-1, chol_np.shape[-2],
chol_np.shape[-1]))
for chol_matrix in chol_reshaped:
self.assertAllClose(chol_matrix, np.tril(chol_matrix))
self.assertTrue((np.diag(chol_matrix) > 0.0).all())
def _verifyCholesky(self, x):
# Verify that LL^T == x.
with self.cached_session(use_gpu=True) as sess:
chol = linalg_ops.cholesky(x)
verification = math_ops.matmul(chol, chol, adjoint_b=True)
self._verifyCholeskyBase(sess, x, chol, verification)
def testBasic(self):
data = np.array([[4., -1., 2.], [-1., 6., 0], [2., 0., 5.]])
for dtype in (np.float32, np.float64):
self._verifyCholesky(data.astype(dtype))
for dtype in (np.complex64, np.complex128):
complex_data = np.tril(1j * data, -1).astype(dtype)
complex_data += np.triu(-1j * data, 1).astype(dtype)
complex_data += data
self._verifyCholesky(complex_data)
def testBatch(self):
simple_array = np.array([[[1., 0.], [0., 5.]]]) # shape (1, 2, 2)
self._verifyCholesky(simple_array)
self._verifyCholesky(np.vstack((simple_array, simple_array)))
odd_sized_array = np.array([[[4., -1., 2.], [-1., 6., 0], [2., 0., 5.]]])
self._verifyCholesky(np.vstack((odd_sized_array, odd_sized_array)))
# Generate random positive-definite matrices.
matrices = np.random.rand(10, 5, 5)
for i in xrange(10):
matrices[i] = np.dot(matrices[i].T, matrices[i])
self._verifyCholesky(matrices)
# Generate random complex valued positive-definite matrices.
matrices = np.random.rand(10, 5, 5) + 1j * np.random.rand(10, 5, 5)
for i in xrange(10):
matrices[i] = np.dot(matrices[i].T.conj(), matrices[i])
self._verifyCholesky(matrices)
def testNonSquareMatrix(self):
with self.assertRaises(ValueError):
linalg_ops.cholesky(np.array([[1., 2., 3.], [3., 4., 5.]]))
with self.assertRaises(ValueError):
linalg_ops.cholesky(
np.array([[[1., 2., 3.], [3., 4., 5.]], [[1., 2., 3.], [3., 4., 5.]]
]))
def testWrongDimensions(self):
tensor3 = constant_op.constant([1., 2.])
with self.assertRaises(ValueError):
linalg_ops.cholesky(tensor3)
with self.assertRaises(ValueError):
linalg_ops.cholesky(tensor3)
def testNotInvertibleCPU(self):
# The input should be invertible.
with self.session(use_gpu=True):
with self.assertRaisesRegexp(
errors_impl.InvalidArgumentError,
"Cholesky decomposition was not successful. The"
" input might not be valid."):
# All rows of the matrix below add to zero
self._verifyCholesky(
np.array([[1., -1., 0.], [-1., 1., -1.], [0., -1., 1.]]))
def testEmpty(self):
self._verifyCholesky(np.empty([0, 2, 2]))
self._verifyCholesky(np.empty([2, 0, 0]))
def testConcurrentExecutesWithoutError(self):
with self.session(use_gpu=True) as sess:
matrix1 = random_ops.random_normal([5, 5], seed=42)
matrix2 = random_ops.random_normal([5, 5], seed=42)
matrix1 = math_ops.matmul(matrix1, matrix1, adjoint_a=True)
matrix2 = math_ops.matmul(matrix2, matrix2, adjoint_a=True)
c1 = linalg_ops.cholesky(matrix1)
c2 = linalg_ops.cholesky(matrix2)
c1_val, c2_val = self.evaluate([c1, c2])
self.assertAllClose(c1_val, c2_val)
class CholeskyGradTest(test.TestCase):
_backprop_block_size = 32
def getShapes(self, shapeList):
return ((elem, int(np.floor(1.2 * elem))) for elem in shapeList)
def testSmallMatrices(self):
np.random.seed(0)
shapes = self.getShapes([1, 2, 10])
self.runFiniteDifferences(
shapes, dtypes=(dtypes_lib.float32, dtypes_lib.float64))
def testSmallMatricesComplex(self):
np.random.seed(0)
shapes = self.getShapes([1, 2, 10])
self.runFiniteDifferences(
shapes, dtypes=(dtypes_lib.complex64, dtypes_lib.complex128))
def testOneBlockMatrices(self):
np.random.seed(0)
shapes = self.getShapes([self._backprop_block_size + 1])
self.runFiniteDifferences(
shapes,
dtypes=(dtypes_lib.float32, dtypes_lib.float64),
scalarTest=True)
def testTwoBlockMatrixFloat(self):
np.random.seed(0)
shapes = self.getShapes([2 * self._backprop_block_size + 1])
self.runFiniteDifferences(
shapes, dtypes=(dtypes_lib.float32,), scalarTest=True)
def testTwoBlockMatrixDouble(self):
np.random.seed(0)
shapes = self.getShapes([2 * self._backprop_block_size + 1])
self.runFiniteDifferences(
shapes, dtypes=(dtypes_lib.float64,), scalarTest=True)
def testTwoBlockMatrixComplexFloat(self):
np.random.seed(0)
shapes = self.getShapes([2 * self._backprop_block_size + 1])
self.runFiniteDifferences(
shapes, dtypes=(dtypes_lib.complex64,), scalarTest=True)
def testTwoBlockMatrixComplexDouble(self):
np.random.seed(0)
shapes = self.getShapes([2 * self._backprop_block_size + 1])
self.runFiniteDifferences(
shapes, dtypes=(dtypes_lib.complex128,), scalarTest=True)
def testAgainstSpecialized(self):
np.random.seed(0)
data = np.random.randn(33, 33).astype(np.float32)
data = np.matmul(data, data.T)
grad_data = np.random.randn(*data.shape).astype(np.float32)
with ops.Graph().as_default(), self.session(use_gpu=False) as s:
x = constant_op.constant(data, dtypes_lib.float32)
chol = linalg_ops.cholesky(x)
composite_grad = gradients_impl.gradients(chol, x, grad_data)[0]
specialized_grad = SpecializedGrad(chol, grad_data)
reference, actual = s.run([specialized_grad, composite_grad])
self.assertAllClose(reference, actual)
def runFiniteDifferences(self,
shapes,
dtypes=(dtypes_lib.float32, dtypes_lib.float64,
dtypes_lib.complex64, dtypes_lib.complex128),
scalarTest=False):
with self.session(use_gpu=True):
for shape in shapes:
for batch in False, True:
for dtype in dtypes:
if not scalarTest:
data = np.random.randn(shape[0], shape[1])
if dtype.is_complex:
data = data.astype(np.complex64)
data += 1j * np.random.randn(shape[0], shape[1])
x = constant_op.constant(data, dtype)
tensor = math_ops.matmul(
x, math_ops.conj(array_ops.transpose(x))) / shape[0]
else:
# This is designed to be a faster test for larger matrices.
data = np.random.randn()
if dtype.is_complex:
data = np.complex64(data)
data += 1j * np.random.randn()
x = constant_op.constant(data, dtype)
R = constant_op.constant(
np.random.randn(shape[0], shape[1]), dtype)
e = math_ops.multiply(R, x)
tensor = math_ops.matmul(
e, math_ops.conj(array_ops.transpose(e))) / shape[0]
# Inner-most matrices in tensor are positive definite.
if batch:
tensor = array_ops.tile(
array_ops.expand_dims(tensor, 0), [4, 1, 1])
y = linalg_ops.cholesky(tensor)
if scalarTest:
y = math_ops.reduce_mean(y)
error = gradient_checker.compute_gradient_error(
x, x._shape_as_list(), y, y._shape_as_list())
tf_logging.info("error = %f", error)
if dtype == dtypes_lib.float64:
self.assertLess(error, 1e-5)
elif dtype == dtypes_lib.complex128:
self.assertLess(error, 5e-5)
else:
self.assertLess(error, 5e-3)
class CholeskyBenchmark(test.Benchmark):
shapes = [
(4, 4),
(10, 10),
(16, 16),
(101, 101),
(256, 256),
(1000, 1000),
(1024, 1024),
(2048, 2048),
(513, 2, 2),
(513, 8, 8),
(513, 256, 256),
(4, 513, 2, 2),
]
def _GenerateMatrix(self, shape):
batch_shape = shape[:-2]
shape = shape[-2:]
assert shape[0] == shape[1]
n = shape[0]
matrix = np.ones(shape).astype(np.float32) / (
2.0 * n) + np.diag(np.ones(n).astype(np.float32))
return np.tile(matrix, batch_shape + (1, 1))
def benchmarkCholeskyOp(self):
for shape in self.shapes:
with ops.Graph().as_default(), \
session.Session(config=benchmark.benchmark_config()) as sess, \
ops.device("/cpu:0"):
matrix = variables.Variable(self._GenerateMatrix(shape))
l = linalg_ops.cholesky(matrix)
variables.global_variables_initializer().run()
self.run_op_benchmark(
sess,
control_flow_ops.group(
l,),
min_iters=25,
name="cholesky_cpu_{shape}".format(shape=shape))
if test.is_gpu_available(True):
with ops.Graph().as_default(), \
session.Session(config=benchmark.benchmark_config()) as sess, \
ops.device("/device:GPU:0"):
matrix = variables.Variable(self._GenerateMatrix(shape))
l = linalg_ops.cholesky(matrix)
variables.global_variables_initializer().run()
self.run_op_benchmark(
sess,
control_flow_ops.group(
l,),
min_iters=25,
name="cholesky_gpu_{shape}".format(shape=shape))
def benchmarkGradVariants(self):
def _BenchmarkGrad(grad_fn, name, device):
for shape in self.shapes:
matrix = self._GenerateMatrix(shape)
with ops.Graph().as_default(), \
session.Session(config=benchmark.benchmark_config()) as sess, \
ops.device(device):
l = variables.Variable(np.linalg.cholesky(matrix))
grad_matrix = variables.Variable(
np.random.randn(*matrix.shape).astype(np.float32))
grad = grad_fn(l, grad_matrix)
variables.global_variables_initializer().run()
self.run_op_benchmark(
sess,
control_flow_ops.group(
grad,),
min_iters=25,
name="{name}_{dev}_{shape}".format(
name=name, dev=grad.device, shape=shape))
if test.is_gpu_available(True):
_BenchmarkGrad(MatrixInverseCompositeGrad, "composite_matrix_inverse",
"/device:GPU:0")
_BenchmarkGrad(TriAngInvCompositeGrad, "composite_tri_ang_inverse",
"/device:GPU:0")
_BenchmarkGrad(TriAngSolveCompositeGrad, "composite_triangular_solve",
"/device:GPU:0")
_BenchmarkGrad(MatrixInverseCompositeGrad, "composite_matrix_inverse",
"/cpu:0")
_BenchmarkGrad(TriAngInvCompositeGrad, "composite_tri_ang_inverse",
"/cpu:0")
_BenchmarkGrad(TriAngSolveCompositeGrad, "composite_triangular_solve",
"/cpu:0")
_BenchmarkGrad(SpecializedGrad, "specialized", "/cpu:0")
if __name__ == "__main__":
test.main()
|
the-stack_0_12698 | import os
from bootstrapbase import BootstrapBase
from common.const import Constants
from common.mapr_logger.log import Log
from operations.operationsbase import OperationsBase
from operations.shared import SharedSystem
from operations.csi import CSI
from operations.csinfs import CSINFS
from operations.dataplatform import DataPlatform
from operations.compute import Compute
from operations.drill import Drill
from operations.ldap import LDAP
from operations.kubeflow import Kubeflow
from operations.nodesvc import Nodesvc
from operations.spark import Spark
from operations.autoticket_generator import AutoTicketGenerator
from operations.dataplatform_validator import DataPlatformValidator
from operations.tenant_validator import TenantValidator
from cluster_info import ClusterInfo
class BootstrapUninstall(BootstrapBase):
def __init__(self):
super(BootstrapUninstall, self).__init__(BootstrapBase.UNINSTALL)
self.cloud_instance = None
self.cloud_created = False
self._parse_args()
def run(self):
super(BootstrapUninstall, self).run()
k8s = OperationsBase()
k8s.load_replace_dict()
shared = SharedSystem()
nodesvc = Nodesvc()
csi = CSI()
csinfs = CSINFS()
ldap = LDAP(self._prompts)
dataplatform = DataPlatform()
compute = Compute()
spark = Spark()
autoticket_generator = AutoTicketGenerator()
dataplatform_validator = DataPlatformValidator()
tenant_validator = TenantValidator()
kubeflow = Kubeflow()
# openshift = OpenShift()
drill = Drill()
cluster_info = ClusterInfo()
self.prologue()
self.python_check()
if dataplatform.dataplatform_operation(self.parsed_args, False):
return
self.check_laptop_tools()
self.confirm_delete_installation()
if self.core_install_enabled:
do_storage = True
else:
do_storage = self.parsed_args.core_uninstall
do_compute = True
do_drill = self.parsed_args.drill_uninstall
do_csi = True
uninstall_csi = False
uninstall_compute = False
# uninstall_autoticket_generator = False
uninstall_compute_templates = False
uninstall_storage = False
uninstall_storage_templates = False
do_kubeflow = True
uninstall_kubeflow = True
do_spark = False
uninstall_spark = True
do_external = True
uninstall_external = False
do_secure = True
uninstall_secure = False
do_exampleldap = True
uninstall_exampleldap = False
uninstall_drill = False
str_tolerations = ""
if cluster_info.schedule_pods_on_master:
str_tolerations = "\n - key: node-role.kubernetes.io/master\n operator: Exists\n effect: NoSchedule"
OperationsBase.replace_dict["{tolerate-master-node}"] = str_tolerations
OperationsBase.replace_dict["{operator-repo}"] = Constants.OPERATOR_REPO
OperationsBase.replace_dict["{csi-repo}"] = Constants.CSI_REPO
OperationsBase.replace_dict["{kdf-repo}"] = Constants.KDF_REPO
OperationsBase.replace_dict["{kubeflow-repo}"] = Constants.KUBEFLOW_REPO
OperationsBase.replace_dict["{local-path-provisioner-repo}"] = Constants.LOCAL_PATH_PROVISIONER_REPO
OperationsBase.replace_dict["{kfctl-hcp-istio-repo}"] = Constants.KFCTL_HSP_ISTIO_REPO
OperationsBase.replace_dict["{busybox-repo}"] = Constants.BUSYBOX_REPO
OperationsBase.replace_dict["{fake-labels}"] = "true"
if do_csi:
uninstall_csi = self.check_remove_csi()
if do_storage:
uninstall_storage = self.check_remove_storage()
uninstall_storage_templates = self.check_remove_storage_templates()
if do_external:
uninstall_external = self.check_remove_external()
if do_secure:
uninstall_secure = self.check_remove_secure()
if do_exampleldap:
uninstall_exampleldap = self.check_remove_exampleldap(k8s)
if do_compute:
uninstall_compute = self.check_remove_compute()
# uninstall_autoticket_generator = uninstall_compute
if uninstall_compute:
uninstall_compute_templates = self.check_remove_compute_templates()
if cluster_info.is_spark_installed():
uninstall_spark = self.check_remove_spark()
uninstall_drill = do_drill
if do_kubeflow:
uninstall_kubeflow = self.check_remove_kubeflow()
# Check if the connected k8s environment is Openshift
# if k8s.is_openshift_connected():
# k8s.is_openshift = True
# k8s.switch_to_oc()
if uninstall_external:
shared.uninstall_external_components()
if uninstall_storage:
dataplatform.uninstall_dataplatform(uninstall_templates=uninstall_storage_templates)
dataplatform_validator.run_uninstall()
if uninstall_compute:
compute.uninstall_compute_components(uninstall_templates=uninstall_compute_templates)
autoticket_generator.run_uninstall()
tenant_validator.run_uninstall()
# uninstall_autoticket_generator = uninstall_compute
if uninstall_spark:
spark.uninstall_spark_components()
if uninstall_drill:
drill.uninstall_drill_components()
if uninstall_compute or uninstall_storage:
shared.uninstall_common_components()
nodesvc.uninstall_nodesvc()
elif uninstall_kubeflow:
shared.uninstall_common_components()
if uninstall_secure:
shared.uninstall_secure_components()
if uninstall_exampleldap:
ldap.uninstall_exampleldap()
if uninstall_kubeflow:
kubeflow.uninstall_kubeflow_components()
if uninstall_csi:
csi.uninstall_csi_components()
csinfs.uninstall_csi_components()
self.complete_uninstallation()
def confirm_delete_installation(self):
print(os.linesep)
Log.info("This will uninstall ALL Ezmeral Data Fabric for Kubernetes operators from your Kubernetes environment. This will cause all "
"Tenants to be destroyed. They cannot be recovered!", True)
agree = self._prompts.prompt_boolean("Do you agree?", False, key_name="AGREEMENT")
if not agree:
Log.info("Very wise decision. Exiting uninstall...", True)
BootstrapBase.exit_application(2)
def check_remove_csi(self):
choice = self._prompts.prompt_boolean("Remove the Ezmeral Data Fabric CSI driver?", False, key_name="REMOVE_CSI")
return choice
def check_remove_spark(self):
choice = self._prompts.prompt_boolean("Remove the Spark Operator?", False, key_name="REMOVE_SPARK")
return choice
def check_remove_drill(self):
choice = self._prompts.prompt_boolean("Remove the Drill Operator?", False, key_name="REMOVE_DRILL")
return choice
def check_remove_kubeflow(self):
choice = self._prompts.prompt_boolean("Remove the Kubeflow Operator?", False, key_name="REMOVE_KUBEFLOW")
return choice
def check_remove_compute(self):
choice = self._prompts.prompt_boolean("Remove Compute components?", False, key_name="REMOVE_COMPUTE")
return choice
def check_remove_compute_templates(self):
choice = self._prompts.prompt_boolean("Remove the Compute templates? Note: You will lose your template changes!", False, key_name="REMOVE_COMPUTE_TEMPLATES")
return choice
def check_remove_storage(self):
choice = self._prompts.prompt_boolean("Remove Data Platform?", False, key_name="REMOVE_STORAGE")
return choice
def check_remove_storage_templates(self):
choice = self._prompts.prompt_boolean("Remove the Data Platform Templates? Note: You will lose your template changes!", False, key_name="REMOVE_STORAGE_TEMPLATES")
return choice
def check_remove_external(self):
choice = self._prompts.prompt_boolean("Remove the External Cluster Info? Note: You will lose your imported cluster info!", False, key_name="REMOVE_EXTERNAL_INFO")
return choice
def check_remove_secure(self):
choice = self._prompts.prompt_boolean("Remove the Secure Namespace? Note: You will lose your template changes!", False, key_name="REMOVE_SECURE")
return choice
@staticmethod
def check_remove_exampleldap(k8s):
get_str = "namespace {0}".format(Constants.EXAMPLE_LDAP_NAMESPACE)
response, status = k8s.run_get(get_str, False)
result = (status == 0)
return result
def is_cloud_env(self):
print(os.linesep)
is_cloud = self._prompts.prompt_boolean("Is this a cloud env?", True, key_name="CLOUD_ENV")
if is_cloud:
return True
return False
@staticmethod
def complete_uninstallation():
print(os.linesep)
msg = "This Kubernetes environment"
warnings = Log.get_warning_count()
errors = Log.get_error_count()
if errors > 0 and warnings > 0:
msg = "{0} had {1} error(s) and {2} warning(s) during the uninstall process for selected components".format(msg, errors, warnings)
Log.error(msg)
elif errors > 0 and warnings == 0:
msg = "{0} had {1} error(s) during the uninstall process for selected components".format(msg, errors)
Log.error(msg)
elif errors == 0 and warnings > 0:
msg = "{0} had {1} warnings(s) during the uninstall process for selected components".format(msg, warnings)
Log.warning(msg)
else:
msg = "{0} has had selected components successfully uninstalled".format(msg)
Log.info(msg, True)
if errors > 0 or warnings > 0:
msg = "Please check the bootstrap log file for this session here: {0}".format(Log.get_log_filename())
Log.warning(msg)
Log.info("")
if __name__ == '__main__':
bootstrap_uninstall = BootstrapUninstall()
try:
bootstrap_uninstall.run()
except Exception as e:
Log.exception(e)
raise e
BootstrapBase.exit_application(0)
|
the-stack_0_12700 | # Copyright 2021 UW-IT, University of Washington
# SPDX-License-Identifier: Apache-2.0
from django.test import TestCase
from prereq_map.models.course_title import CourseTitle
from prereq_map.utils.typeahead import get_course_typeahead
import pandas as pd
class TestCourseTitle(TestCase):
def test_titles(self):
dataframe = pd.DataFrame(
{'department_abbrev': ['CSE', 'LAW'],
'course_number': [142, 354],
'course_college': ['School of Arts and Sci', 'Law School'],
'long_course_title': ['Intro To Java', 'Bird Law']
}
)
CourseTitle.update_titles(dataframe)
self.assertEqual(len(CourseTitle.objects.all()), 2)
title = CourseTitle().get_course_title("CSE 142")
self.assertEqual(title, "Intro To Java")
with self.assertRaises(CourseTitle.DoesNotExist):
CourseTitle.get_course_title("CSE 500")
def test_typeahead(self):
CourseTitle(department_abbrev="CSE",
course_number="142",
long_course_title="Intro to Comp Sci").save()
CourseTitle(department_abbrev="MATH",
course_number="123",
long_course_title="Counting by Numbers").save()
ct_typeahead = get_course_typeahead()
self.assertEqual(len(ct_typeahead), 2)
self.assertEqual(ct_typeahead[0], "CSE 142: Intro to Comp Sci")
|
the-stack_0_12702 | import random
from perf_load.perf_req_gen import RequestGenerator
class RGSeqReqs(RequestGenerator):
def __init__(self, *args, reqs=list(), next_random: bool=False, **kwargs):
super().__init__(*args, **kwargs)
self._req_idx = -1
self._next_idx = self._rand_idx if next_random else self._seq_idx
if not isinstance(reqs, list):
raise RuntimeError("Bad Requests sequence provided")
self._reqs_collection = []
for reqc, prms in reqs:
if not issubclass(reqc, RequestGenerator):
raise RuntimeError("Bad Request class provided")
cnt = 1
param = {}
if isinstance(prms, int) and prms > 0:
cnt = prms
elif isinstance(prms, dict):
cnt = prms.get('count', 1)
param = prms
else:
raise RuntimeError("Bad Request params provided")
new_req = reqc(*args, **param, **kwargs)
for i in range(0, cnt):
self._reqs_collection.append(new_req)
if len(self._reqs_collection) == 0:
raise RuntimeError("At least one class should be provided")
async def on_pool_create(self, pool_handle, wallet_handle, submitter_did, sign_req_f, send_req_f, *args, **kwargs):
for req_builder in set(self._reqs_collection):
await req_builder.on_pool_create(pool_handle, wallet_handle, submitter_did, sign_req_f, send_req_f, *args, **kwargs)
def _seq_idx(self):
return (self._req_idx + 1) % len(self._reqs_collection)
def _rand_idx(self):
return random.randint(0, len(self._reqs_collection) - 1)
def _gen_req_data(self):
self._req_idx = self._next_idx()
return self._reqs_collection[self._req_idx]._gen_req_data()
def get_label(self):
return self._reqs_collection[self._req_idx].get_label()
async def _gen_req(self, submit_did, req_data):
req_gen = self._reqs_collection[self._req_idx]
return await req_gen._gen_req(submit_did, req_data)
async def on_request_generated(self, req_data, gen_req):
for r in self._reqs_collection:
await r.on_request_generated(req_data, gen_req)
async def on_request_replied(self, req_data, req, resp_or_exp):
for r in self._reqs_collection:
await r.on_request_replied(req_data, req, resp_or_exp)
def req_did(self):
return self._reqs_collection[self._req_idx].req_did()
|
the-stack_0_12703 | """Tests that the config singleton is working properly
"""
from os.path import expanduser
from os.path import join
from unittest import TestCase
from mock import patch
from testfixtures import TempDirectory
from nose.tools import eq_
from nose.tools import raises
from ..config import get_config_files
from ..config import load_yaml
class TestConfig(TestCase):
"""Tests for config singleton
"""
def setUp(self):
self.test_yaml_file = '\n'.join([
'test:',
' test_sub:',
' - test_sub1: foo',
' test_sub1_other: bar',
' - test_sub2: foobar',
])
self.test_config_dict = {
'test': {
'test_sub': [
{
'test_sub1': 'foo',
'test_sub1_other': 'bar',
},
{
'test_sub2': 'foobar',
}
]
}
}
@staticmethod
@patch.dict('os.environ', {}, clear=True)
def test_get_config_files_no_enviroment_variable():
"""Tests that correct config file paths are returned when there's no
enviroment variable
"""
expected = [
'/etc/dataduct.cfg',
expanduser('~/.dataduct/dataduct.cfg'),
]
result = get_config_files()
eq_(result, expected)
@staticmethod
@patch.dict('os.environ', {'DATADUCT_CONFIG_PATH': '/test/test.cfg'})
def test_get_config_files_with_enviroment_variable():
"""Tests that correct config file paths are returned when there is
an enviroment variable
"""
expected = [
'/etc/dataduct.cfg',
expanduser('~/.dataduct/dataduct.cfg'),
'/test/test.cfg',
]
result = get_config_files()
eq_(result, expected)
def test_load_yaml_works_correctly(self):
"""Tests that the yaml file can be loaded correctly
"""
with TempDirectory() as d:
d.write('test.yaml', self.test_yaml_file.encode('utf8'))
result = load_yaml([join(d.path, 'test.yaml')])
eq_(result, self.test_config_dict)
@staticmethod
@raises(IOError)
def test_no_config_file_raises():
"""Tests that an exception is raised if no yaml file path is passed in
"""
load_yaml([])
@staticmethod
@raises(IOError)
def test_cannot_find_config_file_raises():
"""Tests that an exception is raised if it cannot find any yaml files
"""
with TempDirectory() as d:
with TempDirectory() as d2:
load_yaml([join(d.path, 'test.cfg'),
join(d2.path, 'test.cfg')])
|
the-stack_0_12705 | from azureml.core.webservice import AciWebservice
from azureml.core.webservice import Webservice
from azureml.core.image import Image
from azureml.core import Workspace
import sys
import json
# Get workspace
ws = Workspace.from_config()
# Get the Image to deploy details
try:
with open("aml_config/image.json") as f:
config = json.load(f)
except:
print('No new model, thus no deployment on ACI')
sys.exit(0)
image_name = config['image_name']
image_version = config['image_version']
images = Image.list(workspace=ws)
image, = (m for m in images if m.version==image_version and m.name == image_name)
print('From image.json, Image used to deploy webservice on ACI: {}\nImage Version: {}\nImage Location = {}'.format(image.name, image.version, image.image_location))
aciconfig = AciWebservice.deploy_configuration(cpu_cores=1,
auth_enabled=True, # this flag generates API keys to secure access
memory_gb=1,
tags={'name':'prednet', 'framework': 'Keras'},
description='Prednet')
aci_service_name = image_name
print(aci_service_name)
service = Webservice.deploy_from_image(deployment_config = aciconfig,
image = image,
name = aci_service_name,
workspace = ws)
service.wait_for_deployment(True)
print('Deployed ACI Webservice: {} \nWebservice Uri: {}'.format(service.name, service.scoring_uri))
#service=Webservice(name ='aciws0622', workspace =ws)
# Writing the ACI details to /aml_config/aci_webservice.json
aci_webservice = {}
aci_webservice['aci_name'] = service.name
aci_webservice['aci_url'] = service.scoring_uri
with open('aml_config/aci_webservice.json', 'w') as outfile:
json.dump(aci_webservice,outfile)
|
the-stack_0_12706 | #! /usr/bin/env python
"""
Module with contrast curve generation function.
"""
__author__ = 'C. Gomez, O. Absil @ ULg'
__all__ = ['contrast_curve',
'noise_per_annulus',
'throughput',
'aperture_flux']
import numpy as np
import pandas as pd
import photutils
import inspect
from scipy.interpolate import InterpolatedUnivariateSpline
from scipy import stats
from scipy.signal import savgol_filter
from skimage.draw import disk
from matplotlib import pyplot as plt
from .fakecomp import (cube_inject_companions, frame_inject_companion,
normalize_psf)
from ..conf import time_ini, timing
from ..conf.utils_conf import sep
from ..var import frame_center, dist
def contrast_curve(cube, angle_list, psf_template, fwhm, pxscale, starphot,
algo, sigma=5, nbranch=1, theta=0, inner_rad=1,
wedge=(0,360), fc_snr=100, student=True, transmission=None,
smooth=True, interp_order=2, plot=True, dpi=100, debug=False,
verbose=True, full_output=False, save_plot=None,
object_name=None, frame_size=None, fix_y_lim=(),
figsize=(8, 4), **algo_dict):
""" Computes the contrast curve at a given SIGMA (``sigma``) level for an
ADI cube or ADI+IFS cube. The contrast is calculated as
sigma*noise/throughput. This implementation takes into account the small
sample statistics correction proposed in Mawet et al. 2014.
Parameters
----------
cube : numpy ndarray
The input cube, 3d (ADI data) or 4d array (IFS data), without fake
companions.
angle_list : numpy ndarray
Vector with the parallactic angles.
psf_template : numpy ndarray
Frame with the psf template for the fake companion(s).
PSF must be centered in array. Normalization is done internally.
fwhm: int or float or 1d array, optional
The the Full Width Half Maximum in pixels. It can handle a different
FWHM value for different wavelengths (IFS data).
pxscale : float
Plate scale or pixel scale of the instrument.
starphot : int or float or 1d array
If int or float it corresponds to the aperture photometry of the
non-coronagraphic PSF which we use to scale the contrast. If a vector
is given it must contain the photometry correction for each frame.
algo : callable or function
The post-processing algorithm, e.g. vip_hci.pca.pca.
sigma : int
Sigma level for contrast calculation. Note this is a "Gaussian sigma"
regardless of whether Student t correction is performed (set by the
'student' parameter). E.g. setting sigma to 5 will yield the contrast
curve corresponding to a false alarm probability of 3e-7.
nbranch : int, optional
Number of branches on which to inject fakes companions. Each branch
is tested individually.
theta : float, optional
Angle in degrees for rotating the position of the first branch that by
default is located at zero degrees. Theta counts counterclockwise from
the positive x axis. When working on a wedge, make sure that theta is
located inside of it.
inner_rad : int, optional
Innermost radial distance to be considered in terms of FWHM.
wedge : tuple of floats, optional
Initial and Final angles for using a wedge. For example (-90,90) only
considers the right side of an image.
fc_snr: float optional
Signal to noise ratio of injected fake companions (w.r.t a Gaussian
distribution).
student : bool, optional
If True uses Student t correction to inject fake companion.
transmission : tuple of 2 1d arrays, optional
If not None, then the tuple contains a vector with the factors to be
applied to the sensitivity and a vector of the radial distances [px]
where it is sampled (in this order).
smooth : bool, optional
If True the radial noise curve is smoothed with a Savitzky-Golay filter
of order 2.
interp_order : int or None, optional
If True the throughput vector is interpolated with a spline of order
``interp_order``. Takes values from 1 to 5. If None, then the
throughput is not interpolated.
plot : bool, optional
Whether to plot the final contrast curve or not. True by default.
dpi : int optional
Dots per inch for the plots. 100 by default. 300 for printing quality.
imlib : {'opencv', 'ndimage-fourier', 'ndimage-interp', 'vip-fft'}, str opt
Library or method used for image operations (rotations). Opencv is the
default for being the fastest. See description of
`vip_hci.preproc.frame_rotate`.
interpolation: str, opt
See description of ``vip_hci.preproc.frame_rotate`` function
debug : bool, optional
Whether to print and plot additional info such as the noise, throughput,
the contrast curve with different X axis and the delta magnitude instead
of contrast.
verbose : {True, False, 0, 1, 2}, optional
If True or 1 the function prints to stdout intermediate info and timing,
if set to 2 more output will be shown.
full_output : bool, optional
If True returns intermediate arrays.
save_plot: string
If provided, the contrast curve will be saved to this path.
object_name: string
Target name, used in the plot title.
frame_size: int
Frame size used for generating the contrast curve, used in the plot
title.
fix_y_lim: tuple
If provided, the y axis limits will be fixed, for easier comparison
between plots.
**algo_dict
Any other valid parameter of the post-processing algorithms can be
passed here, including e.g. imlib and interpolation.
Returns
-------
datafr : pandas dataframe
Dataframe containing the sensitivity (Gaussian and Student corrected if
Student parameter is True), the interpolated throughput, the distance in
pixels, the noise and the sigma corrected (if Student is True).
If full_output is True then the function returns:
datafr, cube_fc_all, frame_fc_all, frame_nofc and fc_map_all.
frame_fc_all : numpy ndarray
3d array with the 3 frames of the 3 (patterns) processed cubes with
companions.
frame_nofc : numpy ndarray
2d array, PCA processed frame without companions.
fc_map_all : numpy ndarray
3d array with 3 frames containing the position of the companions in the
3 patterns.
"""
if cube.ndim != 3 and cube.ndim != 4:
raise TypeError('The input array is not a 3d or 4d cube')
if cube.ndim == 3 and (cube.shape[0] != angle_list.shape[0]):
raise TypeError('Input parallactic angles vector has wrong length')
if cube.ndim == 4 and (cube.shape[1] != angle_list.shape[0]):
raise TypeError('Input parallactic angles vector has wrong length')
if cube.ndim == 3 and psf_template.ndim != 2:
raise TypeError('Template PSF is not a frame (for ADI case)')
if cube.ndim == 4 and psf_template.ndim != 3:
raise TypeError('Template PSF is not a cube (for ADI+IFS case)')
if transmission is not None:
if not isinstance(transmission, tuple) or not len(transmission) == 2:
raise TypeError('transmission must be a tuple with 2 1d vectors')
if isinstance(fwhm, (np.ndarray,list)):
fwhm_med = np.median(fwhm)
else:
fwhm_med = fwhm
if verbose:
start_time = time_ini()
if isinstance(starphot, float) or isinstance(starphot, int):
msg0 = 'ALGO : {}, FWHM = {}, # BRANCHES = {}, SIGMA = {},'
msg0 += ' STARPHOT = {}'
print(msg0.format(algo.__name__, fwhm_med, nbranch, sigma, starphot))
else:
msg0 = 'ALGO : {}, FWHM = {}, # BRANCHES = {}, SIGMA = {}'
print(msg0.format(algo.__name__, fwhm_med, nbranch, sigma))
print(sep)
# throughput
verbose_thru = False
if verbose == 2:
verbose_thru = True
res_throug = throughput(cube, angle_list, psf_template, fwhm, pxscale,
nbranch=nbranch, theta=theta, inner_rad=inner_rad,
wedge=wedge, fc_snr=fc_snr, full_output=True,
algo=algo, verbose=verbose_thru, **algo_dict)
vector_radd = res_throug[3]
if res_throug[0].shape[0] > 1:
thruput_mean = np.nanmean(res_throug[0], axis=0)
else:
thruput_mean = res_throug[0][0]
frame_fc_all = res_throug[4]
frame_nofc = res_throug[5]
fc_map_all = res_throug[6]
if verbose:
print('Finished the throughput calculation')
timing(start_time)
if interp_order is not None:
# noise measured in the empty frame with better sampling, every px
# starting from 1*FWHM
noise_samp, res_lev_samp, rad_samp = noise_per_annulus(frame_nofc,
separation=1,
fwhm=fwhm_med,
init_rad=fwhm_med,
wedge=wedge)
radmin = vector_radd.astype(int).min()
cutin1 = np.where(rad_samp.astype(int) == radmin)[0][0]
noise_samp = noise_samp[cutin1:]
res_lev_samp = res_lev_samp[cutin1:]
rad_samp = rad_samp[cutin1:]
radmax = vector_radd.astype(int).max()
cutin2 = np.where(rad_samp.astype(int) == radmax)[0][0]
noise_samp = noise_samp[:cutin2 + 1]
res_lev_samp = res_lev_samp[:cutin2 + 1]
rad_samp = rad_samp[:cutin2 + 1]
# interpolating the throughput vector, spline order 2
f = InterpolatedUnivariateSpline(vector_radd, thruput_mean,
k=interp_order)
thruput_interp = f(rad_samp)
# interpolating the transmission vector, spline order 1
if transmission is not None:
trans = transmission[0]
radvec_trans = transmission[1]
f2 = InterpolatedUnivariateSpline(radvec_trans, trans, k=1)
trans_interp = f2(rad_samp)
thruput_interp *= trans_interp
else:
rad_samp = vector_radd
noise_samp = res_throug[1]
res_lev_samp = res_throug[2]
thruput_interp = thruput_mean
if transmission is not None:
if not transmission[0].shape == thruput_interp.shape[0]:
msg = 'Transmiss. and throughput vectors have different length'
raise ValueError(msg)
thruput_interp *= transmission[0]
rad_samp_arcsec = rad_samp * pxscale
# take abs value of the mean residual fluxes otherwise the more
# oversubtraction (negative res_lev_samp), the better the contrast!!
res_lev_samp = np.abs(res_lev_samp)
if smooth:
# smoothing the noise vector using a Savitzky-Golay filter
win = min(noise_samp.shape[0]-2, int(2*fwhm_med))
if win % 2 == 0:
win += 1
noise_samp_sm = savgol_filter(noise_samp, polyorder=2, mode='nearest',
window_length=win)
res_lev_samp_sm = savgol_filter(res_lev_samp, polyorder=2,
mode='nearest', window_length=win)
else:
noise_samp_sm = noise_samp
res_lev_samp_sm = res_lev_samp
# calculating the contrast
if isinstance(starphot, float) or isinstance(starphot, int):
cont_curve_samp = ((sigma * noise_samp_sm + res_lev_samp_sm
)/ thruput_interp) / starphot
else:
cont_curve_samp = ((sigma * noise_samp_sm + res_lev_samp_sm
) / thruput_interp) / np.median(starphot)
cont_curve_samp[np.where(cont_curve_samp < 0)] = 1
cont_curve_samp[np.where(cont_curve_samp > 1)] = 1
# calculating the Student corrected contrast
if student:
n_res_els = np.floor(rad_samp/fwhm_med*2*np.pi)
ss_corr = np.sqrt(1 + 1/n_res_els)
sigma_corr = stats.t.ppf(stats.norm.cdf(sigma), n_res_els-1)*ss_corr
if isinstance(starphot, float) or isinstance(starphot, int):
cont_curve_samp_corr = ((sigma_corr*noise_samp_sm + res_lev_samp_sm
)/thruput_interp)/starphot
else:
cont_curve_samp_corr = ((sigma_corr*noise_samp_sm + res_lev_samp_sm
)/thruput_interp) / np.median(starphot)
cont_curve_samp_corr[np.where(cont_curve_samp_corr < 0)] = 1
cont_curve_samp_corr[np.where(cont_curve_samp_corr > 1)] = 1
if debug:
plt.rc("savefig", dpi=dpi)
plt.figure(figsize=figsize, dpi=dpi)
# throughput
plt.plot(vector_radd * pxscale, thruput_mean, '.', label='computed',
alpha=0.6)
plt.plot(rad_samp_arcsec, thruput_interp, ',-', label='interpolated',
lw=2, alpha=0.5)
plt.grid('on', which='both', alpha=0.2, linestyle='solid')
plt.xlabel('Angular separation [arcsec]')
plt.ylabel('Throughput')
plt.legend(loc='best')
plt.xlim(0, np.max(rad_samp*pxscale))
# noise
plt.figure(figsize=figsize, dpi=dpi)
plt.plot(rad_samp_arcsec, noise_samp, '.', label='computed', alpha=0.6)
if smooth:
plt.plot(rad_samp_arcsec, noise_samp_sm, ',-',
label='noise smoothed', lw=2, alpha=0.5)
plt.grid('on', alpha=0.2, linestyle='solid')
plt.xlabel('Angular separation [arcsec]')
plt.ylabel('Noise')
plt.legend(loc='best')
plt.xlim(0, np.max(rad_samp_arcsec))
# mean residual level
plt.figure(figsize=figsize, dpi=dpi)
plt.plot(rad_samp_arcsec, res_lev_samp, '.',
label='computed residual level', alpha=0.6)
if smooth:
plt.plot(rad_samp_arcsec, res_lev_samp_sm, ',-',
label='smoothed residual level', lw=2, alpha=0.5)
plt.grid('on', alpha=0.2, linestyle='solid')
plt.xlabel('Angular separation [arcsec]')
plt.ylabel('Mean residual level')
plt.legend(loc='best')
plt.xlim(0, np.max(rad_samp_arcsec))
# plotting
if plot or debug:
if student:
label = ['Sensitivity (Gaussian)',
'Sensitivity (Student-t correction)']
else:
label = ['Sensitivity (Gaussian)']
plt.rc("savefig", dpi=dpi)
fig = plt.figure(figsize=figsize, dpi=dpi)
ax1 = fig.add_subplot(111)
con1, = ax1.plot(rad_samp_arcsec, cont_curve_samp, '-',
alpha=0.2, lw=2, color='green')
con2, = ax1.plot(rad_samp_arcsec, cont_curve_samp, '.',
alpha=0.2, color='green')
if student:
con3, = ax1.plot(rad_samp_arcsec, cont_curve_samp_corr, '-',
alpha=0.4, lw=2, color='blue')
con4, = ax1.plot(rad_samp_arcsec, cont_curve_samp_corr, '.',
alpha=0.4, color='blue')
lege = [(con1, con2), (con3, con4)]
else:
lege = [(con1, con2)]
plt.legend(lege, label, fancybox=True, fontsize='medium')
plt.xlabel('Angular separation [arcsec]')
plt.ylabel(str(sigma)+' sigma contrast')
plt.grid('on', which='both', alpha=0.2, linestyle='solid')
ax1.set_yscale('log')
ax1.set_xlim(0, np.max(rad_samp_arcsec))
# Give a title to the contrast curve plot
if object_name is not None and frame_size is not None:
# Retrieve ncomp and pca_type info to use in title
ncomp = algo_dict['ncomp']
if algo_dict['cube_ref'] is None:
pca_type = 'ADI'
else:
pca_type = 'RDI'
title = "{} {} {}pc {} + {}".format(pca_type, object_name, ncomp,
frame_size, inner_rad)
plt.title(title, fontsize=14)
# Option to fix the y-limit
if len(fix_y_lim) == 2:
min_y_lim = min(fix_y_lim[0], fix_y_lim[1])
max_y_lim = max(fix_y_lim[0], fix_y_lim[1])
ax1.set_ylim(min_y_lim, max_y_lim)
# Optionally, save the figure to a path
if save_plot is not None:
fig.savefig(save_plot, dpi=dpi)
if debug:
fig2 = plt.figure(figsize=figsize, dpi=dpi)
ax3 = fig2.add_subplot(111)
cc_mags = -2.5*np.log10(cont_curve_samp)
con4, = ax3.plot(rad_samp_arcsec, cc_mags, '-',
alpha=0.2, lw=2, color='green')
con5, = ax3.plot(rad_samp_arcsec, cc_mags, '.', alpha=0.2,
color='green')
if student:
cc_mags_corr = -2.5*np.log10(cont_curve_samp_corr)
con6, = ax3.plot(rad_samp_arcsec, cc_mags_corr, '-',
alpha=0.4, lw=2, color='blue')
con7, = ax3.plot(rad_samp_arcsec, cc_mags_corr, '.',
alpha=0.4, color='blue')
lege = [(con4, con5), (con6, con7)]
else:
lege = [(con4, con5)]
plt.legend(lege, label, fancybox=True, fontsize='medium')
plt.xlabel('Angular separation [arcsec]')
plt.ylabel('Delta magnitude')
plt.gca().invert_yaxis()
plt.grid('on', which='both', alpha=0.2, linestyle='solid')
ax3.set_xlim(0, np.max(rad_samp*pxscale))
ax4 = ax3.twiny()
ax4.set_xlabel('Distance [pixels]')
ax4.plot(rad_samp, cc_mags, '', alpha=0.)
ax4.set_xlim(0, np.max(rad_samp))
if student:
datafr = pd.DataFrame({'sensitivity_gaussian': cont_curve_samp,
'sensitivity_student': cont_curve_samp_corr,
'throughput': thruput_interp,
'distance': rad_samp,
'distance_arcsec': rad_samp_arcsec,
'noise': noise_samp_sm,
'residual_level': res_lev_samp_sm,
'sigma corr': sigma_corr})
else:
datafr = pd.DataFrame({'sensitivity_gaussian': cont_curve_samp,
'throughput': thruput_interp,
'distance': rad_samp,
'distance_arcsec': rad_samp_arcsec,
'noise': noise_samp_sm,
'residual_level': res_lev_samp_sm})
if full_output:
return datafr, frame_fc_all, frame_nofc, fc_map_all
else:
return datafr
def throughput(cube, angle_list, psf_template, fwhm, pxscale, algo, nbranch=1,
theta=0, inner_rad=1, fc_rad_sep=3, wedge=(0,360), fc_snr=100,
full_output=False, verbose=True, **algo_dict):
""" Measures the throughput for chosen algorithm and input dataset (ADI or
ADI+mSDI). The final throughput is the average of the same procedure
measured in ``nbranch`` azimutally equidistant branches.
Parameters
---------_
cube : numpy ndarray
The input cube, 3d (ADI data) or 4d array (IFS data), without fake
companions.
angle_list : numpy ndarray
Vector with the parallactic angles.
psf_template : numpy ndarray
Frame with the psf template for the fake companion(s).
PSF must be centered in array. Normalization is done internally.
fwhm: int or float or 1d array, optional
The the Full Width Half Maximum in pixels. It can handle a different
FWHM value for different wavelengths (IFS data).
pxscale : float
Plate scale in arcsec/px.
algo : callable or function
The post-processing algorithm, e.g. vip_hci.pca.pca. Third party Python
algorithms can be plugged here. They must have the parameters: 'cube',
'angle_list' and 'verbose'. Optionally a wrapper function can be used.
nbranch : int optional
Number of branches on which to inject fakes companions. Each branch
is tested individually.
theta : float, optional
Angle in degrees for rotating the position of the first branch that by
default is located at zero degrees. Theta counts counterclockwise from
the positive x axis.
inner_rad : int, optional
Innermost radial distance to be considered in terms of FWHM.
fc_rad_sep : int optional
Radial separation between the injected companions (in each of the
patterns) in FWHM. Must be large enough to avoid overlapping. With the
maximum possible value, a single fake companion will be injected per
cube and algorithm post-processing (which greatly affects computation
time).
wedge : tuple of floats, optional
Initial and Final angles for using a wedge. For example (-90,90) only
considers the right side of an image.
fc_snr: float optional
Signal to noise ratio of injected fake companions (w.r.t a Gaussian
distribution).
full_output : bool, optional
If True returns intermediate arrays.
verbose : bool, optional
If True prints out timing and information.
**algo_dict
Parameters of the post-processing algorithms must be passed here,
including imlib and interpolation.
Returns
-------
thruput_arr : numpy ndarray
2d array whose rows are the annulus-wise throughput values for each
branch.
vector_radd : numpy ndarray
1d array with the distances in FWHM (the positions of the annuli).
If full_output is True then the function returns: thruput_arr, noise,
vector_radd, cube_fc_all, frame_fc_all, frame_nofc and fc_map_all.
noise : numpy ndarray
1d array with the noise per annulus.
frame_fc_all : numpy ndarray
3d array with the 3 frames of the 3 (patterns) processed cubes with
companions.
frame_nofc : numpy ndarray
2d array, PCA processed frame without companions.
fc_map_all : numpy ndarray
3d array with 3 frames containing the position of the companions in the
3 patterns.
"""
array = cube
parangles = angle_list
imlib = algo_dict.get('imlib', 'vip-fft')
interpolation = algo_dict.get('interpolation', 'lanczos4')
if array.ndim != 3 and array.ndim != 4:
raise TypeError('The input array is not a 3d or 4d cube')
else:
if array.ndim == 3:
if array.shape[0] != parangles.shape[0]:
msg = 'Input parallactic angles vector has wrong length'
raise TypeError(msg)
if psf_template.ndim != 2:
raise TypeError('Template PSF is not a frame or 2d array')
maxfcsep = int((array.shape[1]/2.)/fwhm)-1
if fc_rad_sep < 3 or fc_rad_sep > maxfcsep:
msg = 'Too large separation between companions in the radial '
msg += 'patterns. Should lie between 3 and {}'
raise ValueError(msg.format(maxfcsep))
elif array.ndim == 4:
if array.shape[1] != parangles.shape[0]:
msg = 'Input vector or parallactic angles has wrong length'
raise TypeError(msg)
if psf_template.ndim != 3:
raise TypeError('Template PSF is not a frame, 3d array')
if 'scale_list' not in algo_dict:
raise ValueError('Vector of wavelength not found')
else:
if algo_dict['scale_list'].shape[0] != array.shape[0]:
raise TypeError('Input wavelength vector has wrong length')
if isinstance(fwhm, float) or isinstance(fwhm, int):
maxfcsep = int((array.shape[2] / 2.) / fwhm) - 1
else:
maxfcsep = int((array.shape[2] / 2.) / np.amin(fwhm)) - 1
if fc_rad_sep < 3 or fc_rad_sep > maxfcsep:
msg = 'Too large separation between companions in the '
msg += 'radial patterns. Should lie between 3 and {}'
raise ValueError(msg.format(maxfcsep))
if psf_template.shape[1] % 2 == 0:
raise ValueError("Only odd-sized PSF is accepted")
if not hasattr(algo, '__call__'):
raise TypeError('Parameter `algo` must be a callable function')
if not isinstance(inner_rad, int):
raise TypeError('inner_rad must be an integer')
angular_range = wedge[1] - wedge[0]
if nbranch > 1 and angular_range < 360:
msg = 'Only a single branch is allowed when working on a wedge'
raise RuntimeError(msg)
if isinstance(fwhm, (np.ndarray,list)):
fwhm_med = np.median(fwhm)
else:
fwhm_med = fwhm
if verbose:
start_time = time_ini()
#***************************************************************************
# Compute noise in concentric annuli on the "empty frame"
argl = inspect.getargspec(algo).args
if 'cube' in argl and 'angle_list' in argl and 'verbose' in argl:
if 'fwhm' in argl:
frame_nofc = algo(cube=array, angle_list=parangles, fwhm=fwhm_med,
verbose=False, **algo_dict)
if algo_dict.pop('scaling',None):
new_algo_dict = algo_dict.copy()
new_algo_dict['scaling'] = None
frame_nofc_noscal = algo(cube=array, angle_list=parangles,
fwhm=fwhm_med, verbose=False,
**new_algo_dict)
else:
frame_nofc_noscal = frame_nofc
else:
frame_nofc = algo(array, angle_list=parangles, verbose=False,
**algo_dict)
if algo_dict.pop('scaling',None):
new_algo_dict = algo_dict.copy()
new_algo_dict['scaling'] = None
frame_nofc_noscal = algo(cube=array, angle_list=parangles,
verbose=False, **new_algo_dict)
else:
frame_nofc_noscal = frame_nofc
if verbose:
msg1 = 'Cube without fake companions processed with {}'
print(msg1.format(algo.__name__))
timing(start_time)
noise, res_level, vector_radd = noise_per_annulus(frame_nofc,
separation=fwhm_med,
fwhm=fwhm_med,
wedge=wedge)
noise_noscal, _, _ = noise_per_annulus(frame_nofc_noscal,
separation=fwhm_med, fwhm=fwhm_med,
wedge=wedge)
vector_radd = vector_radd[inner_rad-1:]
noise = noise[inner_rad-1:]
res_level = res_level[inner_rad-1:]
noise_noscal = noise_noscal[inner_rad-1:]
if verbose:
print('Measured annulus-wise noise in resulting frame')
timing(start_time)
# We crop the PSF and check if PSF has been normalized (so that flux in
# 1*FWHM aperture = 1) and fix if needed
new_psf_size = int(round(3 * fwhm_med))
if new_psf_size % 2 == 0:
new_psf_size += 1
if cube.ndim == 3:
n, y, x = array.shape
psf_template = normalize_psf(psf_template, fwhm=fwhm, verbose=verbose,
size=min(new_psf_size,
psf_template.shape[1]))
# Initialize the fake companions
angle_branch = angular_range / nbranch
thruput_arr = np.zeros((nbranch, noise.shape[0]))
fc_map_all = np.zeros((nbranch * fc_rad_sep, y, x))
frame_fc_all = np.zeros((nbranch * fc_rad_sep, y, x))
cy, cx = frame_center(array[0])
# each branch is computed separately
for br in range(nbranch):
# each pattern is computed separately. For each one the companions
# are separated by "fc_rad_sep * fwhm", interleaving the injections
for irad in range(fc_rad_sep):
radvec = vector_radd[irad::fc_rad_sep]
cube_fc = array.copy()
# filling map with small numbers
fc_map = np.ones_like(array[0]) * 1e-6
fcy = []
fcx = []
for i in range(radvec.shape[0]):
flux = fc_snr * noise_noscal[irad + i * fc_rad_sep]
cube_fc = cube_inject_companions(cube_fc, psf_template,
parangles, flux, pxscale,
rad_dists=[radvec[i]],
theta=br*angle_branch +
theta,
imlib=imlib, verbose=False,
interpolation=
interpolation)
y = cy + radvec[i] * np.sin(np.deg2rad(br * angle_branch +
theta))
x = cx + radvec[i] * np.cos(np.deg2rad(br * angle_branch +
theta))
fc_map = frame_inject_companion(fc_map, psf_template, y, x,
flux, imlib, interpolation)
fcy.append(y)
fcx.append(x)
if verbose:
msg2 = 'Fake companions injected in branch {} '
msg2 += '(pattern {}/{})'
print(msg2.format(br+1, irad+1, fc_rad_sep))
timing(start_time)
#***************************************************************
arg = inspect.getargspec(algo).args
if 'cube' in arg and 'angle_list' in arg and 'verbose' in arg:
if 'fwhm' in arg:
frame_fc = algo(cube=cube_fc, angle_list=parangles,
fwhm=fwhm_med, verbose=False,
**algo_dict)
else:
frame_fc = algo(cube=cube_fc, angle_list=parangles,
verbose=False, **algo_dict)
else:
msg = 'Input algorithm must have at least 3 parameters: '
msg += 'cube, angle_list and verbose'
raise ValueError(msg)
if verbose:
msg3 = 'Cube with fake companions processed with {}'
msg3 += '\nMeasuring its annulus-wise throughput'
print(msg3.format(algo.__name__))
timing(start_time)
#**************************************************************
injected_flux = aperture_flux(fc_map, fcy, fcx, fwhm_med)
recovered_flux = aperture_flux((frame_fc - frame_nofc), fcy,
fcx, fwhm_med)
thruput = recovered_flux / injected_flux
thruput[np.where(thruput < 0)] = 0
thruput_arr[br, irad::fc_rad_sep] = thruput
fc_map_all[br*fc_rad_sep+irad, :, :] = fc_map
frame_fc_all[br*fc_rad_sep+irad, :, :] = frame_fc
elif cube.ndim == 4:
w, n, y, x = array.shape
if isinstance(fwhm, (int, float)):
fwhm = [fwhm] * w
psf_template = normalize_psf(psf_template, fwhm=fwhm, verbose=verbose,
size=min(new_psf_size,
psf_template.shape[1]))
# Initialize the fake companions
angle_branch = angular_range / nbranch
thruput_arr = np.zeros((nbranch, noise.shape[0]))
fc_map_all = np.zeros((nbranch * fc_rad_sep, w, y, x))
frame_fc_all = np.zeros((nbranch * fc_rad_sep, y, x))
cy, cx = frame_center(array[0, 0])
# each branch is computed separately
for br in range(nbranch):
# each pattern is computed separately. For each pattern the
# companions are separated by "fc_rad_sep * fwhm"
# radius = vector_radd[irad::fc_rad_sep]
for irad in range(fc_rad_sep):
radvec = vector_radd[irad::fc_rad_sep]
thetavec = range(int(theta), int(theta) + 360,
360 // len(radvec))
cube_fc = array.copy()
# filling map with small numbers
fc_map = np.ones_like(array[:, 0]) * 1e-6
fcy = []
fcx = []
for i in range(radvec.shape[0]):
flux = fc_snr * noise_noscal[irad + i * fc_rad_sep]
cube_fc = cube_inject_companions(cube_fc, psf_template,
parangles, flux, pxscale,
rad_dists=[radvec[i]],
theta=thetavec[i],
verbose=False,
imlib=imlib,
interpolation=
interpolation)
y = cy + radvec[i] * np.sin(np.deg2rad(br * angle_branch +
thetavec[i]))
x = cx + radvec[i] * np.cos(np.deg2rad(br * angle_branch +
thetavec[i]))
fc_map = frame_inject_companion(fc_map, psf_template, y, x,
flux)
fcy.append(y)
fcx.append(x)
if verbose:
msg2 = 'Fake companions injected in branch {} '
msg2 += '(pattern {}/{})'
print(msg2.format(br + 1, irad + 1, fc_rad_sep))
timing(start_time)
# **************************************************************
arg = inspect.getargspec(algo).args
if 'cube' in arg and 'angle_list' in arg and 'verbose' in arg:
if 'fwhm' in arg:
frame_fc = algo(cube=cube_fc, angle_list=parangles,
fwhm=fwhm_med, verbose=False,
**algo_dict)
else:
frame_fc = algo(cube=cube_fc, angle_list=parangles,
verbose=False, **algo_dict)
if verbose:
msg3 = 'Cube with fake companions processed with {}'
msg3 += '\nMeasuring its annulus-wise throughput'
print(msg3.format(algo.__name__))
timing(start_time)
# *************************************************************
injected_flux = [aperture_flux(fc_map[i], fcy, fcx, fwhm[i])
for i in range(array.shape[0])]
injected_flux = np.mean(injected_flux, axis=0)
recovered_flux = aperture_flux((frame_fc - frame_nofc), fcy,
fcx, fwhm_med)
thruput = recovered_flux / injected_flux
thruput[np.where(thruput < 0)] = 0
thruput_arr[br, irad::fc_rad_sep] = thruput
fc_map_all[br * fc_rad_sep + irad, :, :] = fc_map
frame_fc_all[br * fc_rad_sep + irad, :, :] = frame_fc
if verbose:
msg = 'Finished measuring the throughput in {} branches'
print(msg.format(nbranch))
timing(start_time)
if full_output:
return (thruput_arr, noise, res_level, vector_radd, frame_fc_all,
frame_nofc, fc_map_all)
else:
return thruput_arr, vector_radd
def noise_per_annulus(array, separation, fwhm, init_rad=None, wedge=(0, 360),
verbose=False, debug=False):
""" Measures the noise and mean residual level as the standard deviation
and mean, respectively, of apertures defined in each annulus with a given
separation.
The annuli start at init_rad (== fwhm by default) and stop 2*separation
before the edge of the frame.
Parameters
----------
array : numpy ndarray
Input frame.
separation : float
Separation in pixels of the centers of the annuli measured from the
center of the frame.
fwhm : float
FWHM in pixels.
init_rad : float
Initial radial distance to be used. If None then the init_rad = FWHM.
wedge : tuple of floats, optional
Initial and Final angles for using a wedge. For example (-90,90) only
considers the right side of an image. Be careful when using small
wedges, this leads to computing a standard deviation of very small
samples (<10 values).
verbose : bool, optional
If True prints information.
debug : bool, optional
If True plots the positioning of the apertures.
Returns
-------
noise : numpy ndarray
Vector with the noise value per annulus.
res_level : numpy ndarray
Vector with the mean residual level per annulus.
vector_radd : numpy ndarray
Vector with the radial distances values.
"""
def find_coords(rad, sep, init_angle, fin_angle):
angular_range = fin_angle-init_angle
npoints = (np.deg2rad(angular_range)*rad)/sep #(2*np.pi*rad)/sep
ang_step = angular_range/npoints #360/npoints
x = []
y = []
for i in range(int(npoints)):
newx = rad * np.cos(np.deg2rad(ang_step * i + init_angle))
newy = rad * np.sin(np.deg2rad(ang_step * i + init_angle))
x.append(newx)
y.append(newy)
return np.array(y), np.array(x)
###
if array.ndim != 2:
raise TypeError('Input array is not a frame or 2d array')
if not isinstance(wedge, tuple):
raise TypeError('Wedge must be a tuple with the initial and final '
'angles')
if init_rad is None:
init_rad = fwhm
init_angle, fin_angle = wedge
centery, centerx = frame_center(array)
n_annuli = int(np.floor((centery - init_rad)/separation)) - 1
noise = []
res_level = []
vector_radd = []
if verbose:
print('{} annuli'.format(n_annuli))
if debug:
_, ax = plt.subplots(figsize=(6, 6))
ax.imshow(array, origin='lower', interpolation='nearest',
alpha=0.5, cmap='gray')
for i in range(n_annuli):
y = centery + init_rad + separation * i
rad = dist(centery, centerx, y, centerx)
yy, xx = find_coords(rad, fwhm, init_angle, fin_angle)
yy += centery
xx += centerx
apertures = photutils.CircularAperture(np.array((xx, yy)).T, fwhm/2)
fluxes = photutils.aperture_photometry(array, apertures)
fluxes = np.array(fluxes['aperture_sum'])
noise_ann = np.std(fluxes)
mean_ann = np.mean(fluxes)
noise.append(noise_ann)
res_level.append(mean_ann)
vector_radd.append(rad)
if debug:
for j in range(xx.shape[0]):
# Circle takes coordinates as (X,Y)
aper = plt.Circle((xx[j], yy[j]), radius=fwhm/2, color='r',
fill=False, alpha=0.8)
ax.add_patch(aper)
cent = plt.Circle((xx[j], yy[j]), radius=0.8, color='r',
fill=True, alpha=0.5)
ax.add_patch(cent)
if verbose:
print('Radius(px) = {}, Noise = {:.3f} '.format(rad, noise_ann))
return np.array(noise), np.array(res_level), np.array(vector_radd)
def aperture_flux(array, yc, xc, fwhm, ap_factor=1, mean=False, verbose=False):
""" Returns the sum of pixel values in a circular aperture centered on the
input coordinates. The radius of the aperture is set as (ap_factor*fwhm)/2.
Parameters
----------
array : numpy ndarray
Input frame.
yc, xc : list or 1d arrays
List of y and x coordinates of sources.
fwhm : float
FWHM in pixels.
ap_factor : int, optional
Diameter of aperture in terms of the FWHM.
Returns
-------
flux : list of floats
List of fluxes.
Note
----
From Photutils documentation, the aperture photometry defines the aperture
using one of 3 methods:
'center': A pixel is considered to be entirely in or out of the aperture
depending on whether its center is in or out of the aperture.
'subpixel': A pixel is divided into subpixels and the center of each
subpixel is tested (as above).
'exact': (default) The exact overlap between the aperture and each pixel is
calculated.
"""
n_obj = len(yc)
flux = np.zeros((n_obj))
for i, (y, x) in enumerate(zip(yc, xc)):
if mean:
ind = disk((y, x), (ap_factor*fwhm)/2)
values = array[ind]
obj_flux = np.mean(values)
else:
aper = photutils.CircularAperture((x, y), (ap_factor*fwhm)/2)
obj_flux = photutils.aperture_photometry(array, aper,
method='exact')
obj_flux = np.array(obj_flux['aperture_sum'])
flux[i] = obj_flux
if verbose:
print('Coordinates of object {} : ({},{})'.format(i, y, x))
print('Object Flux = {:.2f}'.format(flux[i]))
return flux
|
the-stack_0_12707 | import tensorflow as tf
from keras.utils.np_utils import to_categorical
from models import Models
from utils import plot
from parameters import batch_size, epochs, batch_size, validation_split, verbose
def main():
# load data
# in the first time, it will be downloaded.
fashion_mnist = tf.keras.datasets.fashion_mnist
(train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data()
# print dimentions
print("shape of train-set is: ", train_images.shape)
print("shape of test-set is: ", test_images.shape)
print("The number of classes is ", len(set(test_labels)))
# change lables to catagory
y_train = []
for i in train_labels:
y_train.append(int(i))
y_train = to_categorical(y_train)
y_test = []
for i in test_labels:
y_test.append(int(i))
y_test = to_categorical(y_test)
# reshape images to 28*28*1
# convert to 3-D
X_train = train_images.reshape(
train_images.shape[0],
train_images.shape[1],
train_images.shape[2],
1
)
X_test = test_images.reshape(
test_images.shape[0],
test_images.shape[1],
test_images.shape[2],
1
)
# parameters
input_shape = X_train.shape[1:]
num_class = len(set(test_labels))
# initiate the models
Model = Models()
model = Model.MLP(input_shape, num_class)
# fit model
histoey = model.fit(
X_train, y_train,
epochs=epochs,
verbose=verbose,
batch_size=batch_size,
validation_split= validation_split,
)
# plot training phase
plot(histoey, "MLP")
# print accuracy and loss
out = model.evaluate(X_test, y_test)
if __name__ == '__main__':
main() |
the-stack_0_12710 | """Methods based on Newton's method."""
import numpy as np
from optimus.types import DirectionMethod
from optimus.types import Function
class Newton(DirectionMethod):
"""Classic Netwon's method. Direction is the inverse hessian times gradient."""
def __call__(
self, parameters: np.ndarray, objective_function: Function
) -> np.ndarray:
return np.linalg.inv(objective_function.hessian(parameters)).dot(
objective_function.gradient(parameters)
)
|
the-stack_0_12711 | import torch
from tvl_backends.nvdec import nv12_to_rgb
def test_nv12_to_rgb():
w = 3840
h = 2160
nv12 = torch.empty(int(w * h * 1.5), device='cuda:0', dtype=torch.uint8)
for i in range(100):
nv12.random_(0, 256)
rgb = nv12_to_rgb(nv12, h, w)
assert rgb.shape == (3, h, w)
|
the-stack_0_12713 | # Copyright 2014 Cloudbase Solutions Srl
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import importlib
import mock
import unittest
from cloudbaseinit import exception as cbinit_exception
class WindowsNetworkUtilsTests(unittest.TestCase):
def setUp(self):
self._ctypes_mock = mock.MagicMock()
self._moves_mock = mock.MagicMock()
self._module_patcher = mock.patch.dict(
'sys.modules',
{'ctypes': self._ctypes_mock,
'six.moves': self._moves_mock})
self._module_patcher.start()
self.network = importlib.import_module(
'cloudbaseinit.utils.windows.network')
self.network.iphlpapi = mock.MagicMock()
self.network.kernel32 = mock.MagicMock()
self.network.ws2_32 = mock.MagicMock()
def tearDown(self):
self._module_patcher.stop()
def test_format_mac_address(self):
phys_address = [00, 00, 00, 00]
response = self.network._format_mac_address(phys_address=phys_address,
phys_address_len=4)
self.assertEqual("00:00:00:00", response)
def _test_socket_addr_to_str(self, ret_val):
mock_socket_addr = mock.MagicMock()
mock_create_unicode_buffer = self._ctypes_mock.create_unicode_buffer
mock_byref = self._ctypes_mock.byref
self.network.ws2_32.WSAAddressToStringW.return_value = ret_val
if ret_val:
self.assertRaises(cbinit_exception.CloudbaseInitException,
self.network._socket_addr_to_str,
mock_socket_addr)
self.network.ws2_32.WSAGetLastError.assert_called_once_with()
else:
response = self.network._socket_addr_to_str(mock_socket_addr)
self.assertEqual(mock_create_unicode_buffer.return_value.value,
response)
self._ctypes_mock.wintypes.DWORD.assert_called_once_with(256)
mock_create_unicode_buffer.assert_called_once_with(256)
self.network.ws2_32.WSAAddressToStringW.assert_called_once_with(
mock_socket_addr.lpSockaddr, mock_socket_addr.iSockaddrLength,
None, mock_create_unicode_buffer.return_value,
mock_byref.return_value)
mock_byref.assert_called_once_with(
self._ctypes_mock.wintypes.DWORD.return_value)
def test_socket_addr_to_str(self):
self._test_socket_addr_to_str(ret_val=None)
def test_socket_addr_to_str_fail(self):
self._test_socket_addr_to_str(ret_val=1)
def _test_get_registry_dhcp_server(self, dhcp_server, exception=None):
fake_adapter = mock.sentinel.fake_adapter_name
self._moves_mock.winreg.QueryValueEx.return_value = [dhcp_server]
if exception:
self._moves_mock.winreg.QueryValueEx.side_effect = [exception]
if exception.errno != 2:
self.assertRaises(cbinit_exception.CloudbaseInitException,
self.network._get_registry_dhcp_server,
fake_adapter)
else:
response = self.network._get_registry_dhcp_server(fake_adapter)
if dhcp_server == "255.255.255.255":
self.assertEqual(None, response)
else:
self.assertEqual(dhcp_server, response)
self._moves_mock.winreg.OpenKey.assert_called_once_with(
self._moves_mock.winreg.HKEY_LOCAL_MACHINE,
"SYSTEM\\CurrentControlSet\\Services\\Tcpip\\Parameters\\"
"Interfaces\\%s" % fake_adapter, 0,
self._moves_mock.winreg.KEY_READ)
self._moves_mock.winreg.QueryValueEx.assert_called_once_with(
self._moves_mock.winreg.OpenKey.return_value.__enter__(),
"DhcpServer")
def test_get_registry_dhcp_server(self):
self._test_get_registry_dhcp_server(
dhcp_server=mock.sentinel.dhcp_server)
def test_get_registry_dhcp_server_expected(self):
self._test_get_registry_dhcp_server(dhcp_server="255.255.255.255")
def test_get_registry_dhcp_server_expeption_not_found(self):
ex = cbinit_exception.CloudbaseInitException()
ex.errno = 2
self._test_get_registry_dhcp_server(dhcp_server="", exception=ex)
def test_get_registry_dhcp_server_expeption_other(self):
ex = cbinit_exception.CloudbaseInitException()
ex.errno = 3
self._test_get_registry_dhcp_server(dhcp_server="", exception=ex)
@mock.patch('cloudbaseinit.utils.windows.network._format_mac_address')
@mock.patch('cloudbaseinit.utils.windows.network._socket_addr_to_str')
@mock.patch('cloudbaseinit.utils.windows.network'
'._get_registry_dhcp_server')
def _test_get_adapter_addresses(self, mock_get_registry_dhcp_server,
mock_socket_addr_to_str,
mock_format_mac_address,
ret_val, p, ret_val2, xp_data_length):
self.maxDiff = None
mock_byref = self._ctypes_mock.byref
mock_cast = self._ctypes_mock.cast
mock_POINTER = self._ctypes_mock.POINTER
self.network.iphlpapi.GetAdaptersAddresses.side_effect = [ret_val,
ret_val2]
self.network.kernel32.HeapAlloc.return_value = p
self.network.iphlpapi.IP_ADAPTER_DHCP_ENABLED = True
self.network.iphlpapi.IP_ADAPTER_IPV4_ENABLED = True
self.network.iphlpapi.IP_ADAPTER_ADDRESSES_SIZE_2003 = xp_data_length
p_curr_addr = mock.MagicMock()
compare_cast = []
net_adapters = []
compare_socket_addr_to_str = []
mock_cast.side_effect = [p_curr_addr, None, None]
curr_addr = p_curr_addr.contents
curr_addr.Flags = True
curr_addr.Union1.Struct1.Length = 2
curr_addr.Dhcpv4Server.iSockaddrLength = True
p_unicast_addr = curr_addr.FirstUnicastAddress
unicast_addr = p_unicast_addr.contents
unicast_addresses = [
(mock_socket_addr_to_str.return_value,
unicast_addr.Address.lpSockaddr.contents.sa_family)]
compare_GetAdaptersAddresses = [mock.call(
self.network.ws2_32.AF_UNSPEC,
self.network.iphlpapi.GAA_FLAG_SKIP_ANYCAST,
None, None, mock_byref.return_value)]
if not p:
self.assertRaises(cbinit_exception.CloudbaseInitException,
self.network.get_adapter_addresses)
if ret_val2 and ret_val2 != self.network.kernel32.ERROR_NO_DATA:
self.assertRaises(cbinit_exception.CloudbaseInitException,
self.network.get_adapter_addresses)
compare_cast.append(mock.call(p, mock_POINTER.return_value))
compare_GetAdaptersAddresses.append(mock.call(
self.network.ws2_32.AF_UNSPEC,
self.network.iphlpapi.GAA_FLAG_SKIP_ANYCAST, None,
p_curr_addr, mock_byref.return_value))
else:
response = self.network.get_adapter_addresses()
if ret_val == self.network.kernel32.ERROR_NO_DATA:
self.assertEqual([], response)
elif ret_val == self.network.kernel32.ERROR_BUFFER_OVERFLOW:
self.network.kernel32.GetProcessHeap.assert_called_once_with()
self.network.kernel32.HeapAlloc.assert_called_once_with(
self.network.kernel32.GetProcessHeap.return_value, 0,
self._ctypes_mock.wintypes.ULONG.return_value.value)
self.network.ws2_32.init_wsa.assert_called_once_with()
compare_cast.append(mock.call(p, mock_POINTER.return_value))
compare_GetAdaptersAddresses.append(mock.call(
self.network.ws2_32.AF_UNSPEC,
self.network.iphlpapi.GAA_FLAG_SKIP_ANYCAST, None,
p_curr_addr, mock_byref.return_value))
if ret_val2 == self.network.kernel32.ERROR_NO_DATA:
self.assertEqual([], response)
else:
compare_cast.append(mock.call(p_unicast_addr.contents.Next,
mock_POINTER.return_value))
mock_format_mac_address.assert_called_once_with(
p_curr_addr.contents.PhysicalAddress,
p_curr_addr.contents.PhysicalAddressLength)
if not curr_addr.Union1.Struct1.Length <= xp_data_length:
dhcp_server = mock_socket_addr_to_str.return_value
compare_socket_addr_to_str.append(
mock.call(curr_addr.Dhcpv4Server |
curr_addr.Dhcpv6Server))
else:
dhcp_server = \
mock_get_registry_dhcp_server.return_value
mock_get_registry_dhcp_server.assert_called_once_with(
curr_addr.AdapterName)
compare_cast.append(mock.call(curr_addr.Next,
mock_POINTER.return_value))
self.network.kernel32.HeapFree.assert_called_once_with(
self.network.kernel32.GetProcessHeap.return_value, 0,
p)
self.network.ws2_32.WSACleanup.assert_called_once_with()
compare_socket_addr_to_str.append(mock.call(
unicast_addr.Address))
net_adapters.append(
{"interface_index": curr_addr.Union1.Struct1.IfIndex,
"adapter_name": curr_addr.AdapterName,
"friendly_name": curr_addr.FriendlyName,
"description": curr_addr.Description,
"mtu": curr_addr.Mtu,
"mac_address": mock_format_mac_address.return_value,
"dhcp_enabled": True,
"dhcp_server": dhcp_server,
"interface_type": curr_addr.IfType,
"unicast_addresses": unicast_addresses})
self.assertEqual(net_adapters, response)
self.assertEqual(compare_cast, mock_cast.call_args_list)
self.assertEqual(
compare_GetAdaptersAddresses,
self.network.iphlpapi.GetAdaptersAddresses.call_args_list)
def test_get_adapter_addresses_no_data(self):
self._test_get_adapter_addresses(
ret_val=self.network.kernel32.ERROR_NO_DATA,
p=True, ret_val2=self.network.kernel32.ERROR_NO_DATA,
xp_data_length=3)
def test_get_adapter_addresses_overflow_and_no_data(self):
self._test_get_adapter_addresses(
ret_val=self.network.kernel32.ERROR_BUFFER_OVERFLOW,
p=True, ret_val2=self.network.kernel32.ERROR_NO_DATA,
xp_data_length=3)
def test_get_adapter_addresses_overflow_other_ret_val(self):
self._test_get_adapter_addresses(
ret_val=self.network.kernel32.ERROR_BUFFER_OVERFLOW,
p=True, ret_val2=mock.sentinel.other_return_value,
xp_data_length=3)
def test_get_adapter_addresses_overflow(self):
self._test_get_adapter_addresses(
ret_val=self.network.kernel32.ERROR_BUFFER_OVERFLOW,
p=True, ret_val2=None,
xp_data_length=3)
def test_get_adapter_addresses_overflow_xp_data(self):
self._test_get_adapter_addresses(
ret_val=self.network.kernel32.ERROR_BUFFER_OVERFLOW,
p=True, ret_val2=None,
xp_data_length=0)
|
the-stack_0_12714 | import repoInfo
from filechange import ischanged
from colors import logcolors
import pyfiglet
import logger
from utils import initCommands
def init():
info = repoInfo.checkinfoInDir()
url, branch = info
logger.checkdata(url , branch)
if('n' in info):
initCommands(info)
else:
print(f'{logcolors.BOLD}Retrieving info from git directory{logcolors.ENDC}')
print(f'{logcolors.CYAN}URL:{logcolors.ENDC} {url} , {logcolors.CYAN}Branch:{logcolors.ENDC} {branch}')
ischanged(url,branch)
if __name__ == '__main__':
f = pyfiglet.figlet_format('G - AUTO', font='5lineoblique')
print(f"{logcolors.BOLD}{f}{logcolors.ENDC}")
init()
|
the-stack_0_12718 | # -*- coding: utf-8 -*-
# Upside Travel, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import os
import boto3
import botocore.session
from botocore.stub import Stubber
from common import AV_SIGNATURE_METADATA
from common import AV_SIGNATURE_OK
from common import AV_STATUS_METADATA
from common import AV_TIMESTAMP_METADATA
from common import get_timestamp
import scan
from moto import mock_sqs
from moto import mock_s3
from publish import send_to_queue
class TestScan(unittest.TestCase):
def setUp(self):
# Common data
self.s3_bucket_name = "test_bucket"
self.s3_key_name = "test_key"
# Clients and Resources
self.s3 = boto3.resource("s3")
self.s3_client = botocore.session.get_session().create_client("s3")
self.s3_obj = self.s3.Object(self.s3_bucket_name, self.s3_key_name)
@mock_sqs
def test_get_objects_from_sqs(self):
sqs = boto3.client("sqs")
queue = sqs.create_queue(QueueName="test-queue")
queue_url = queue["QueueUrl"]
# Stage SQS queue with a message
message = self.s3_key_name
send_to_queue(message, queue_url)
all_objects = scan.get_objects_from_sqs(queue_url, self.s3_bucket_name)
self.assertEquals(len(all_objects), 1)
self.assertEquals(all_objects[0], self.s3_obj)
def test_set_av_tags(self):
scan_result = "not_malicious"
scan_signature = AV_SIGNATURE_OK
timestamp = get_timestamp()
tag_set = {
"TagSet": [
{"Key": "Arbitrary", "Value": "arbitrary"},
{"Key": AV_SIGNATURE_METADATA, "Value": scan_signature},
{"Key": AV_STATUS_METADATA, "Value": scan_result},
{"Key": AV_TIMESTAMP_METADATA, "Value": timestamp},
]
}
s3_stubber = Stubber(self.s3_client)
get_object_tagging_response = tag_set
get_object_tagging_expected_params = {
"Bucket": self.s3_bucket_name,
"Key": self.s3_key_name,
}
s3_stubber.add_response(
"get_object_tagging",
get_object_tagging_response,
get_object_tagging_expected_params,
)
put_object_tagging_response = {}
put_object_tagging_expected_params = {
"Bucket": self.s3_bucket_name,
"Key": self.s3_key_name,
"Tagging": tag_set,
}
s3_stubber.add_response(
"put_object_tagging",
put_object_tagging_response,
put_object_tagging_expected_params,
)
with s3_stubber:
response = scan.set_av_tags(
self.s3_client, self.s3_obj, scan_result, scan_signature, timestamp
)
assert response == tag_set["TagSet"]
def test_str_to_bool(self):
string = "True"
result = scan.str_to_bool(string)
assert result is True
@mock_s3
def test_download_file(self):
s3 = boto3.resource("s3")
s3_client = botocore.session.get_session().create_client("s3")
s3_client.create_bucket(Bucket=self.s3_bucket_name)
s3_client.put_object(Bucket=self.s3_bucket_name, Key=self.s3_key_name, Body="")
s3_obj = s3.Object(self.s3_bucket_name, self.s3_key_name)
scan.download_file(s3_obj)
assert os.path.isfile(f"/tmp/scandir/{s3_obj.key}")
|
the-stack_0_12719 | """Config flow for OpenWeatherMap."""
import logging
from pyowm import OWM
from pyowm.exceptions.api_call_error import APICallError
from pyowm.exceptions.api_response_error import UnauthorizedError
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.const import (
CONF_API_KEY,
CONF_LATITUDE,
CONF_LONGITUDE,
CONF_MODE,
CONF_NAME,
)
from homeassistant.core import callback
import homeassistant.helpers.config_validation as cv
from .const import (
CONF_LANGUAGE,
DEFAULT_FORECAST_MODE,
DEFAULT_LANGUAGE,
DEFAULT_NAME,
FORECAST_MODES,
LANGUAGES,
)
from .const import DOMAIN # pylint:disable=unused-import
SCHEMA = vol.Schema(
{
vol.Required(CONF_API_KEY): str,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): str,
vol.Optional(CONF_LATITUDE): cv.latitude,
vol.Optional(CONF_LONGITUDE): cv.longitude,
vol.Optional(CONF_MODE, default=DEFAULT_FORECAST_MODE): vol.In(FORECAST_MODES),
vol.Optional(CONF_LANGUAGE, default=DEFAULT_LANGUAGE): vol.In(LANGUAGES),
}
)
_LOGGER = logging.getLogger(__name__)
class OpenWeatherMapConfigFlow(config_entries.ConfigFlow, domain=DOMAIN):
"""Config flow for OpenWeatherMap."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_CLOUD_POLL
@staticmethod
@callback
def async_get_options_flow(config_entry):
"""Get the options flow for this handler."""
return OpenWeatherMapOptionsFlow(config_entry)
async def async_step_user(self, user_input=None):
"""Handle a flow initialized by the user."""
errors = {}
if user_input is not None:
latitude = user_input[CONF_LATITUDE]
longitude = user_input[CONF_LONGITUDE]
await self.async_set_unique_id(f"{latitude}-{longitude}")
self._abort_if_unique_id_configured()
try:
api_online = await _is_owm_api_online(
self.hass, user_input[CONF_API_KEY]
)
if not api_online:
errors["base"] = "invalid_api_key"
except UnauthorizedError:
errors["base"] = "invalid_api_key"
except APICallError:
errors["base"] = "cannot_connect"
if not errors:
return self.async_create_entry(
title=user_input[CONF_NAME], data=user_input
)
return self.async_show_form(step_id="user", data_schema=SCHEMA, errors=errors)
async def async_step_import(self, import_input=None):
"""Set the config entry up from yaml."""
config = import_input.copy()
if CONF_NAME not in config:
config[CONF_NAME] = DEFAULT_NAME
if CONF_LATITUDE not in config:
config[CONF_LATITUDE] = self.hass.config.latitude
if CONF_LONGITUDE not in config:
config[CONF_LONGITUDE] = self.hass.config.longitude
if CONF_MODE not in config:
config[CONF_MODE] = DEFAULT_FORECAST_MODE
if CONF_LANGUAGE not in config:
config[CONF_LANGUAGE] = DEFAULT_LANGUAGE
return await self.async_step_user(config)
class OpenWeatherMapOptionsFlow(config_entries.OptionsFlow):
"""Handle options."""
def __init__(self, config_entry):
"""Initialize options flow."""
self.config_entry = config_entry
async def async_step_init(self, user_input=None):
"""Manage the options."""
if user_input is not None:
return self.async_create_entry(title="", data=user_input)
return self.async_show_form(
step_id="init",
data_schema=self._get_options_schema(),
)
def _get_options_schema(self):
return vol.Schema(
{
vol.Optional(
CONF_MODE,
default=self.config_entry.options.get(
CONF_MODE, DEFAULT_FORECAST_MODE
),
): vol.In(FORECAST_MODES),
vol.Optional(
CONF_LANGUAGE,
default=self.config_entry.options.get(
CONF_LANGUAGE, DEFAULT_LANGUAGE
),
): vol.In(LANGUAGES),
}
)
async def _is_owm_api_online(hass, api_key):
owm = OWM(api_key)
return await hass.async_add_executor_job(owm.is_API_online)
|
the-stack_0_12724 | # Copyright 2016 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import OrderedDict
from abc import ABC, abstractmethod
import numpy as np
import pandas as pd
from zipline.data._resample import (
_minute_to_session_open,
_minute_to_session_high,
_minute_to_session_low,
_minute_to_session_close,
_minute_to_session_volume,
)
from zipline.data.bar_reader import NoDataOnDate
from zipline.data.minute_bars import MinuteBarReader
from zipline.data.session_bars import SessionBarReader
from zipline.utils.memoize import lazyval
_MINUTE_TO_SESSION_OHCLV_HOW = OrderedDict((
('open', 'first'),
('high', 'max'),
('low', 'min'),
('close', 'last'),
('volume', 'sum'),
))
def minute_frame_to_session_frame(minute_frame, calendar):
"""
Resample a DataFrame with minute data into the frame expected by a
BcolzDailyBarWriter.
Parameters
----------
minute_frame : pd.DataFrame
A DataFrame with the columns `open`, `high`, `low`, `close`, `volume`,
and `dt` (minute dts)
calendar : trading_calendars.trading_calendar.TradingCalendar
A TradingCalendar on which session labels to resample from minute
to session.
Return
------
session_frame : pd.DataFrame
A DataFrame with the columns `open`, `high`, `low`, `close`, `volume`,
and `day` (datetime-like).
"""
how = OrderedDict((c, _MINUTE_TO_SESSION_OHCLV_HOW[c])
for c in minute_frame.columns)
labels = calendar.minute_index_to_session_labels(minute_frame.index)
return minute_frame.groupby(labels).agg(how)
def minute_to_session(column, close_locs, data, out):
"""
Resample an array with minute data into an array with session data.
This function assumes that the minute data is the exact length of all
minutes in the sessions in the output.
Parameters
----------
column : str
The `open`, `high`, `low`, `close`, or `volume` column.
close_locs : array[intp]
The locations in `data` which are the market close minutes.
data : array[float64|uint32]
The minute data to be sampled into session data.
The first value should align with the market open of the first session,
containing values for all minutes for all sessions. With the last value
being the market close of the last session.
out : array[float64|uint32]
The output array into which to write the sampled sessions.
"""
if column == 'open':
_minute_to_session_open(close_locs, data, out)
elif column == 'high':
_minute_to_session_high(close_locs, data, out)
elif column == 'low':
_minute_to_session_low(close_locs, data, out)
elif column == 'close':
_minute_to_session_close(close_locs, data, out)
elif column == 'volume':
_minute_to_session_volume(close_locs, data, out)
return out
class DailyHistoryAggregator:
"""
Converts minute pricing data into a daily summary, to be used for the
last slot in a call to history with a frequency of `1d`.
This summary is the same as a daily bar rollup of minute data, with the
distinction that the summary is truncated to the `dt` requested.
i.e. the aggregation slides forward during a the course of simulation day.
Provides aggregation for `open`, `high`, `low`, `close`, and `volume`.
The aggregation rules for each price type is documented in their respective
"""
def __init__(self, market_opens, minute_reader, trading_calendar):
self._market_opens = market_opens
self._minute_reader = minute_reader
self._trading_calendar = trading_calendar
# The caches are structured as (date, market_open, entries), where
# entries is a dict of asset -> (last_visited_dt, value)
#
# Whenever an aggregation method determines the current value,
# the entry for the respective asset should be overwritten with a new
# entry for the current dt.value (int) and aggregation value.
#
# When the requested dt's date is different from date the cache is
# flushed, so that the cache entries do not grow unbounded.
#
# Example cache:
# cache = (date(2016, 3, 17),
# pd.Timestamp('2016-03-17 13:31', tz='UTC'),
# {
# 1: (1458221460000000000, np.nan),
# 2: (1458221460000000000, 42.0),
# })
self._caches = {
'open': None,
'high': None,
'low': None,
'close': None,
'volume': None
}
# The int value is used for deltas to avoid extra computation from
# creating new Timestamps.
self._one_min = pd.Timedelta('1 min').value
def _prelude(self, dt, field):
session = self._trading_calendar.minute_to_session_label(dt)
dt_value = dt.value
cache = self._caches[field]
if cache is None or cache[0] != session:
market_open = self._market_opens.loc[session]
cache = self._caches[field] = (session, market_open, {})
_, market_open, entries = cache
market_open = market_open.tz_localize('UTC')
if dt != market_open:
prev_dt = dt_value - self._one_min
else:
prev_dt = None
return market_open, prev_dt, dt_value, entries
def opens(self, assets, dt):
"""
The open field's aggregation returns the first value that occurs
for the day, if there has been no data on or before the `dt` the open
is `nan`.
Once the first non-nan open is seen, that value remains constant per
asset for the remainder of the day.
Returns
-------
np.array with dtype=float64, in order of assets parameter.
"""
market_open, prev_dt, dt_value, entries = self._prelude(dt, 'open')
opens = []
session_label = self._trading_calendar.minute_to_session_label(dt)
for asset in assets:
if not asset.is_alive_for_session(session_label):
opens.append(np.NaN)
continue
if prev_dt is None:
val = self._minute_reader.get_value(asset, dt, 'open')
entries[asset] = (dt_value, val)
opens.append(val)
continue
else:
try:
last_visited_dt, first_open = entries[asset]
if last_visited_dt == dt_value:
opens.append(first_open)
continue
elif not pd.isnull(first_open):
opens.append(first_open)
entries[asset] = (dt_value, first_open)
continue
else:
after_last = pd.Timestamp(
last_visited_dt + self._one_min, tz='UTC')
window = self._minute_reader.load_raw_arrays(
['open'],
after_last,
dt,
[asset],
)[0]
nonnan = window[~pd.isnull(window)]
if len(nonnan):
val = nonnan[0]
else:
val = np.nan
entries[asset] = (dt_value, val)
opens.append(val)
continue
except KeyError:
window = self._minute_reader.load_raw_arrays(
['open'],
market_open,
dt,
[asset],
)[0]
nonnan = window[~pd.isnull(window)]
if len(nonnan):
val = nonnan[0]
else:
val = np.nan
entries[asset] = (dt_value, val)
opens.append(val)
continue
return np.array(opens)
def highs(self, assets, dt):
"""
The high field's aggregation returns the largest high seen between
the market open and the current dt.
If there has been no data on or before the `dt` the high is `nan`.
Returns
-------
np.array with dtype=float64, in order of assets parameter.
"""
market_open, prev_dt, dt_value, entries = self._prelude(dt, 'high')
highs = []
session_label = self._trading_calendar.minute_to_session_label(dt)
for asset in assets:
if not asset.is_alive_for_session(session_label):
highs.append(np.NaN)
continue
if prev_dt is None:
val = self._minute_reader.get_value(asset, dt, 'high')
entries[asset] = (dt_value, val)
highs.append(val)
continue
else:
try:
last_visited_dt, last_max = entries[asset]
if last_visited_dt == dt_value:
highs.append(last_max)
continue
elif last_visited_dt == prev_dt:
curr_val = self._minute_reader.get_value(
asset, dt, 'high')
if pd.isnull(curr_val):
val = last_max
elif pd.isnull(last_max):
val = curr_val
else:
val = max(last_max, curr_val)
entries[asset] = (dt_value, val)
highs.append(val)
continue
else:
after_last = pd.Timestamp(
last_visited_dt + self._one_min, tz='UTC')
window = self._minute_reader.load_raw_arrays(
['high'],
after_last,
dt,
[asset],
)[0].T
val = np.nanmax(np.append(window, last_max))
entries[asset] = (dt_value, val)
highs.append(val)
continue
except KeyError:
window = self._minute_reader.load_raw_arrays(
['high'],
market_open,
dt,
[asset],
)[0].T
val = np.nanmax(window)
entries[asset] = (dt_value, val)
highs.append(val)
continue
return np.array(highs)
def lows(self, assets, dt):
"""
The low field's aggregation returns the smallest low seen between
the market open and the current dt.
If there has been no data on or before the `dt` the low is `nan`.
Returns
-------
np.array with dtype=float64, in order of assets parameter.
"""
market_open, prev_dt, dt_value, entries = self._prelude(dt, 'low')
lows = []
session_label = self._trading_calendar.minute_to_session_label(dt)
for asset in assets:
if not asset.is_alive_for_session(session_label):
lows.append(np.NaN)
continue
if prev_dt is None:
val = self._minute_reader.get_value(asset, dt, 'low')
entries[asset] = (dt_value, val)
lows.append(val)
continue
else:
try:
last_visited_dt, last_min = entries[asset]
if last_visited_dt == dt_value:
lows.append(last_min)
continue
elif last_visited_dt == prev_dt:
curr_val = self._minute_reader.get_value(
asset, dt, 'low')
val = np.nanmin([last_min, curr_val])
entries[asset] = (dt_value, val)
lows.append(val)
continue
else:
after_last = pd.Timestamp(
last_visited_dt + self._one_min, tz='UTC')
window = self._minute_reader.load_raw_arrays(
['low'],
after_last,
dt,
[asset],
)[0].T
val = np.nanmin(np.append(window, last_min))
entries[asset] = (dt_value, val)
lows.append(val)
continue
except KeyError:
window = self._minute_reader.load_raw_arrays(
['low'],
market_open,
dt,
[asset],
)[0].T
val = np.nanmin(window)
entries[asset] = (dt_value, val)
lows.append(val)
continue
return np.array(lows)
def closes(self, assets, dt):
"""
The close field's aggregation returns the latest close at the given
dt.
If the close for the given dt is `nan`, the most recent non-nan
`close` is used.
If there has been no data on or before the `dt` the close is `nan`.
Returns
-------
np.array with dtype=float64, in order of assets parameter.
"""
market_open, prev_dt, dt_value, entries = self._prelude(dt, 'close')
closes = []
session_label = self._trading_calendar.minute_to_session_label(dt)
def _get_filled_close(asset):
"""
Returns the most recent non-nan close for the asset in this
session. If there has been no data in this session on or before the
`dt`, returns `nan`
"""
window = self._minute_reader.load_raw_arrays(
['close'],
market_open,
dt,
[asset],
)[0]
try:
return window[~np.isnan(window)][-1]
except IndexError:
return np.NaN
for asset in assets:
if not asset.is_alive_for_session(session_label):
closes.append(np.NaN)
continue
if prev_dt is None:
val = self._minute_reader.get_value(asset, dt, 'close')
entries[asset] = (dt_value, val)
closes.append(val)
continue
else:
try:
last_visited_dt, last_close = entries[asset]
if last_visited_dt == dt_value:
closes.append(last_close)
continue
elif last_visited_dt == prev_dt:
val = self._minute_reader.get_value(
asset, dt, 'close')
if pd.isnull(val):
val = last_close
entries[asset] = (dt_value, val)
closes.append(val)
continue
else:
val = self._minute_reader.get_value(
asset, dt, 'close')
if pd.isnull(val):
val = _get_filled_close(asset)
entries[asset] = (dt_value, val)
closes.append(val)
continue
except KeyError:
val = self._minute_reader.get_value(
asset, dt, 'close')
if pd.isnull(val):
val = _get_filled_close(asset)
entries[asset] = (dt_value, val)
closes.append(val)
continue
return np.array(closes)
def volumes(self, assets, dt):
"""
The volume field's aggregation returns the sum of all volumes
between the market open and the `dt`
If there has been no data on or before the `dt` the volume is 0.
Returns
-------
np.array with dtype=int64, in order of assets parameter.
"""
market_open, prev_dt, dt_value, entries = self._prelude(dt, 'volume')
volumes = []
session_label = self._trading_calendar.minute_to_session_label(dt)
for asset in assets:
if not asset.is_alive_for_session(session_label):
volumes.append(0)
continue
if prev_dt is None:
val = self._minute_reader.get_value(asset, dt, 'volume')
entries[asset] = (dt_value, val)
volumes.append(val)
continue
else:
try:
last_visited_dt, last_total = entries[asset]
if last_visited_dt == dt_value:
volumes.append(last_total)
continue
elif last_visited_dt == prev_dt:
val = self._minute_reader.get_value(
asset, dt, 'volume')
val += last_total
entries[asset] = (dt_value, val)
volumes.append(val)
continue
else:
after_last = pd.Timestamp(
last_visited_dt + self._one_min, tz='UTC')
window = self._minute_reader.load_raw_arrays(
['volume'],
after_last,
dt,
[asset],
)[0]
val = np.nansum(window) + last_total
entries[asset] = (dt_value, val)
volumes.append(val)
continue
except KeyError:
window = self._minute_reader.load_raw_arrays(
['volume'],
market_open,
dt,
[asset],
)[0]
val = np.nansum(window)
entries[asset] = (dt_value, val)
volumes.append(val)
continue
return np.array(volumes)
class MinuteResampleSessionBarReader(SessionBarReader):
def __init__(self, calendar, minute_bar_reader):
self._calendar = calendar
self._minute_bar_reader = minute_bar_reader
def _get_resampled(self, columns, start_session, end_session, assets):
range_open = self._calendar.session_open(start_session)
range_close = self._calendar.session_close(end_session)
minute_data = self._minute_bar_reader.load_raw_arrays(
columns,
range_open,
range_close,
assets,
)
# Get the index of the close minute for each session in the range.
# If the range contains only one session, the only close in the range
# is the last minute in the data. Otherwise, we need to get all the
# session closes and find their indices in the range of minutes.
if start_session == end_session:
close_ilocs = np.array([len(minute_data[0]) - 1], dtype=np.int64)
else:
minutes = self._calendar.minutes_in_range(
range_open,
range_close,
)
session_closes = self._calendar.session_closes_in_range(
start_session,
end_session,
)
close_ilocs = minutes.searchsorted(session_closes.values)
results = []
shape = (len(close_ilocs), len(assets))
for col in columns:
if col != 'volume':
out = np.full(shape, np.nan)
else:
out = np.zeros(shape, dtype=np.uint32)
results.append(out)
for i in range(len(assets)):
for j, column in enumerate(columns):
data = minute_data[j][:, i]
minute_to_session(column, close_ilocs, data, results[j][:, i])
return results
@property
def trading_calendar(self):
return self._calendar
def load_raw_arrays(self, columns, start_dt, end_dt, sids):
return self._get_resampled(columns, start_dt, end_dt, sids)
def get_value(self, sid, session, colname):
# WARNING: This will need caching or other optimization if used in a
# tight loop.
# This was developed to complete interface, but has not been tuned
# for real world use.
return self._get_resampled([colname], session, session, [sid])[0][0][0]
@lazyval
def sessions(self):
cal = self._calendar
first = self._minute_bar_reader.first_trading_day
last = cal.minute_to_session_label(
self._minute_bar_reader.last_available_dt)
return cal.sessions_in_range(first, last)
@lazyval
def last_available_dt(self):
return self.trading_calendar.minute_to_session_label(
self._minute_bar_reader.last_available_dt
)
@property
def first_trading_day(self):
return self._minute_bar_reader.first_trading_day
def get_last_traded_dt(self, asset, dt):
return self.trading_calendar.minute_to_session_label(
self._minute_bar_reader.get_last_traded_dt(asset, dt))
class ReindexBarReader(ABC):
"""
A base class for readers which reindexes results, filling in the additional
indices with empty data.
Used to align the reading assets which trade on different calendars.
Currently only supports a ``trading_calendar`` which is a superset of the
``reader``'s calendar.
Parameters
----------
- trading_calendar : zipline.utils.trading_calendar.TradingCalendar
The calendar to use when indexing results from the reader.
- reader : MinuteBarReader|SessionBarReader
The reader which has a calendar that is a subset of the desired
``trading_calendar``.
- first_trading_session : pd.Timestamp
The first trading session the reader should provide. Must be specified,
since the ``reader``'s first session may not exactly align with the
desired calendar. Specifically, in the case where the first session
on the target calendar is a holiday on the ``reader``'s calendar.
- last_trading_session : pd.Timestamp
The last trading session the reader should provide. Must be specified,
since the ``reader``'s last session may not exactly align with the
desired calendar. Specifically, in the case where the last session
on the target calendar is a holiday on the ``reader``'s calendar.
"""
def __init__(self,
trading_calendar,
reader,
first_trading_session,
last_trading_session):
self._trading_calendar = trading_calendar
self._reader = reader
self._first_trading_session = first_trading_session
self._last_trading_session = last_trading_session
@property
def last_available_dt(self):
return self._reader.last_available_dt
def get_last_traded_dt(self, sid, dt):
return self._reader.get_last_traded_dt(sid, dt)
@property
def first_trading_day(self):
return self._reader.first_trading_day
def get_value(self, sid, dt, field):
# Give an empty result if no data is present.
try:
return self._reader.get_value(sid, dt, field)
except NoDataOnDate:
if field == 'volume':
return 0
else:
return np.nan
@abstractmethod
def _outer_dts(self, start_dt, end_dt):
raise NotImplementedError
@abstractmethod
def _inner_dts(self, start_dt, end_dt):
raise NotImplementedError
@property
def trading_calendar(self):
return self._trading_calendar
@lazyval
def sessions(self):
return self.trading_calendar.sessions_in_range(
self._first_trading_session,
self._last_trading_session
)
def load_raw_arrays(self, fields, start_dt, end_dt, sids):
outer_dts = self._outer_dts(start_dt, end_dt)
inner_dts = self._inner_dts(start_dt, end_dt)
indices = outer_dts.searchsorted(inner_dts)
shape = len(outer_dts), len(sids)
outer_results = []
if len(inner_dts) > 0:
inner_results = self._reader.load_raw_arrays(
fields, inner_dts[0], inner_dts[-1], sids)
else:
inner_results = None
for i, field in enumerate(fields):
if field != 'volume':
out = np.full(shape, np.nan)
else:
out = np.zeros(shape, dtype=np.uint32)
if inner_results is not None:
out[indices] = inner_results[i]
outer_results.append(out)
return outer_results
class ReindexMinuteBarReader(ReindexBarReader, MinuteBarReader):
"""
See: ``ReindexBarReader``
"""
def _outer_dts(self, start_dt, end_dt):
return self._trading_calendar.minutes_in_range(start_dt, end_dt)
def _inner_dts(self, start_dt, end_dt):
return self._reader.calendar.minutes_in_range(start_dt, end_dt)
class ReindexSessionBarReader(ReindexBarReader, SessionBarReader):
"""
See: ``ReindexBarReader``
"""
def _outer_dts(self, start_dt, end_dt):
return self.trading_calendar.sessions_in_range(start_dt, end_dt)
def _inner_dts(self, start_dt, end_dt):
return self._reader.trading_calendar.sessions_in_range(
start_dt, end_dt)
|
the-stack_0_12725 | # coding: utf-8
import copy
import random
from models.judge import Judge
from logger.log import logger
from protocol.serialize import send
from common.roomConfig import roomCfg
from common.constDefine import *
class Room:
room_id = -1 # 房间ID
master_id = -1 # 房主ID
room_type = -1 # 房间类型
users = None # 房间的玩家
judge = None # 法官
max_num = 0 # 房间最大玩家数量
user_role = None # 玩家角色
user_role_num = None # 玩家角色数量
interrupt_flag = False # 是否允许其他玩家在某个玩家发言过程中插话
speak_time = 0 # 玩家发言时长
status = None # 房间状态
def __init__(self, room_id, room_type, master_id):
self.room_id = room_id
self.room_type = room_type
self.master_id = master_id
self.users = {}
self.status = ROOM_STATUS_READY
if roomCfg[self.room_type] is not None:
self.max_num = roomCfg[self.room_type].max_num
self.user_role = roomCfg[self.room_type].user_role
self.interrupt_flag = roomCfg[self.room_type].interrupt_flag
self.speak_time = roomCfg[self.room_type].speak_time
from collections import Counter
self.user_role_num = Counter(self.user_role)
else:
logger.error("room config is not exists ! {0}".format(self.room_type))
def dump(self):
return {k: v for k, v in self.__dict__}
def get_number_by_identity(self, identity):
"""获取指定类型玩家的数量"""
if identity in self.user_role_num.keys():
return self.user_role_num[identity]
else:
return None
def is_full(self):
"""房间是否满员"""
if len(self.users) >= self.max_num:
return True
else:
return False
# 发送消息给所有玩家
# cmd 命令码
# proto 内容
# flag 默认True为给全部玩家发送, False为给除自己外的所有玩家发送
def send_msg_to_all_users(self, cmd, proto, session, flag=True):
for user in self.users.values():
if not flag and user.uuid == session.uuid:
continue
else:
send(cmd, proto, user)
def send_msg_to_identity_users(self, identity, cmd, proto):
"""发送消息给指定身份的玩家"""
for user in self.users.values():
if user.role is not None and user.role.identity == identity:
send(cmd, proto, user)
break
def add_user(self, user):
"""添加玩家"""
ret = False
if user.uuid in self.users.keys():
logger.error("addUser error ! uuid {0} is exists!".format(user.uuid))
else:
self.users[user.uuid] = user
ret = True
return ret
def del_user(self, uuid):
"""删除玩家"""
ret = False
if uuid in self.users.keys():
del self.users[uuid]
ret = True
else:
logger.error("delUser error ! uuid {0} is not exists!".format(uuid))
return ret
def alloc_role_by_index(self, index):
if index in USER_ROLE_CLASS_DICT.keys():
cls = USER_ROLE_CLASS_DICT[index]
return cls()
else:
logger.error("alloc role error ! {0}".format(index))
return None
def allot_role(self):
"""分配身份"""
tmp_role = copy.deepcopy(self.user_role)
for user in self.users.values():
if user.role is None:
index = random.randint(0, len(tmp_role))
user.role = self.alloc_role_by_index(tmp_role[index])
if user.role is not None:
del tmp_role[index]
def dismiss(self):
"""解散房间"""
for user in self.users.values():
user.room_id = 0
self.users.clear()
def speak(self, user_id, type, msg):
"""玩家发言"""
logger.info("user {0} speak {1} type:{2}".format(user_id, msg, type))
pass
def vote(self, user_id, other_id):
"""玩家投票"""
logger.info("user {0} vote id {1}".format(user_id, other_id))
pass
def do_skill(self, user_id, sid, target_id):
"""玩家使用技能"""
logger.info("user {0} use skill {1} target is {2}".format(user_id, sid, target_id))
pass
def ready(self):
"""玩家准备"""
flag = True
for user in self.users.values():
if user.status == 0:
flag = False
break
if flag:
# 全部玩家准备好,则开始游戏
self.start_game()
def start_game(self):
"""开始游戏"""
self.judge = Judge(self)
self.judge.start()
def end_game(self):
"""结束游戏"""
pass
|
the-stack_0_12726 | from abc import abstractmethod
from typing import AsyncContextManager, Collection, Container, ContextManager
from eth_typing import BLSPubkey, BLSSignature
from eth2.beacon.types.attestations import Attestation
from eth2.beacon.types.blocks import BeaconBlock
from eth2.beacon.typing import CommitteeIndex, Epoch, Operation, SignedOperation, Slot
from eth2.clock import Tick
from eth2.validator_client.duty import Duty
from eth2.validator_client.typing import BLSPrivateKey
class BeaconNodeAPI(AsyncContextManager["BeaconNodeAPI"]):
"""
``BeaconNodeAPI`` represents a remote beacon node the validator client
can query for information about the beacon state and supply
signed messages to.
"""
@abstractmethod
async def fetch_duties(
self,
current_tick: Tick,
public_keys: Collection[BLSPubkey],
target_epoch: Epoch,
) -> Collection[Duty]:
...
@abstractmethod
async def fetch_attestation(
self, public_key: BLSPubkey, slot: Slot, committee_index: CommitteeIndex
) -> Attestation:
...
@abstractmethod
async def fetch_block_proposal(
self, slot: Slot, randao_reveal: BLSSignature
) -> BeaconBlock:
...
@abstractmethod
async def publish(self, duty: Duty, signed_operation: SignedOperation) -> None:
...
class SignatoryDatabaseAPI(Container[bytes]):
"""
Provides persistence for actions of the client to prevent
the publishing of slashable signatures.
"""
@abstractmethod
async def record_signature_for(self, duty: Duty, operation: Operation) -> None:
...
@abstractmethod
async def is_slashable(self, duty: Duty, operation: Operation) -> bool:
...
@abstractmethod
def insert(self, key: bytes, value: bytes) -> None:
...
class KeyStoreAPI(ContextManager["KeyStoreAPI"]):
@property
@abstractmethod
def public_keys(self) -> Collection[BLSPubkey]:
...
@abstractmethod
def import_private_key(self, encoded_private_key: str) -> None:
...
@abstractmethod
def private_key_for(self, public_key: BLSPubkey) -> BLSPrivateKey:
...
|
the-stack_0_12728 | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import torch
import tests.helpers.pipelines as tpipes
import tests.helpers.utils as tutils
from pytorch_lightning import Trainer
from pytorch_lightning.callbacks import Callback, EarlyStopping, ModelCheckpoint
from tests.helpers import BoringModel
from tests.helpers.datamodules import ClassifDataModule
from tests.helpers.runif import RunIf
from tests.helpers.simple_models import ClassificationModel
def test_cpu_slurm_save_load(tmpdir):
"""Verify model save/load/checkpoint on CPU."""
model = BoringModel()
# logger file to get meta
logger = tutils.get_default_logger(tmpdir)
version = logger.version
# fit model
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=1,
logger=logger,
limit_train_batches=0.2,
limit_val_batches=0.2,
callbacks=[ModelCheckpoint(dirpath=tmpdir)],
)
trainer.fit(model)
real_global_step = trainer.global_step
# traning complete
assert trainer.state.finished, "cpu model failed to complete"
# predict with trained model before saving
# make a prediction
dataloaders = model.test_dataloader()
if not isinstance(dataloaders, list):
dataloaders = [dataloaders]
for dataloader in dataloaders:
for batch in dataloader:
break
model.eval()
pred_before_saving = model(batch)
# test HPC saving
# simulate snapshot on slurm
# save logger to make sure we get all the metrics
if logger:
logger.finalize("finished")
hpc_save_path = trainer._checkpoint_connector.hpc_save_path(trainer.weights_save_path)
trainer.save_checkpoint(hpc_save_path)
assert os.path.exists(hpc_save_path)
# new logger file to get meta
logger = tutils.get_default_logger(tmpdir, version=version)
model = BoringModel()
class _StartCallback(Callback):
# set the epoch start hook so we can predict before the model does the full training
def on_train_epoch_start(self, trainer, model):
assert trainer.global_step == real_global_step and trainer.global_step > 0
# predict with loaded model to make sure answers are the same
mode = model.training
model.eval()
new_pred = model(batch)
assert torch.eq(pred_before_saving, new_pred).all()
model.train(mode)
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=1,
logger=logger,
callbacks=[_StartCallback(), ModelCheckpoint(dirpath=tmpdir)],
)
# by calling fit again, we trigger training, loading weights from the cluster
# and our hook to predict using current model before any more weight updates
trainer.fit(model)
def test_early_stopping_cpu_model(tmpdir):
class ModelTrainVal(BoringModel):
def validation_step(self, *args, **kwargs):
output = super().validation_step(*args, **kwargs)
self.log("val_loss", output["x"])
return output
tutils.reset_seed()
stopping = EarlyStopping(monitor="val_loss", min_delta=0.1)
trainer_options = dict(
callbacks=[stopping],
default_root_dir=tmpdir,
gradient_clip_val=1.0,
track_grad_norm=2,
enable_progress_bar=False,
accumulate_grad_batches=2,
limit_train_batches=0.1,
limit_val_batches=0.1,
)
model = ModelTrainVal()
tpipes.run_model_test(trainer_options, model, on_gpu=False)
# test freeze on cpu
model.freeze()
model.unfreeze()
@RunIf(skip_windows=True, skip_49370=True)
def test_multi_cpu_model_ddp(tmpdir):
"""Make sure DDP works."""
tutils.set_random_main_port()
trainer_options = dict(
default_root_dir=tmpdir,
enable_progress_bar=False,
max_epochs=1,
limit_train_batches=0.4,
limit_val_batches=0.2,
gpus=None,
num_processes=2,
strategy="ddp_spawn",
)
dm = ClassifDataModule()
model = ClassificationModel()
tpipes.run_model_test(trainer_options, model, data=dm, on_gpu=False)
def test_lbfgs_cpu_model(tmpdir):
"""Test each of the trainer options.
Testing LBFGS optimizer
"""
class ModelSpecifiedOptimizer(BoringModel):
def __init__(self, optimizer_name, learning_rate):
super().__init__()
self.optimizer_name = optimizer_name
self.learning_rate = learning_rate
self.save_hyperparameters()
trainer_options = dict(
default_root_dir=tmpdir,
max_epochs=1,
enable_progress_bar=False,
limit_train_batches=0.2,
limit_val_batches=0.2,
)
model = ModelSpecifiedOptimizer(optimizer_name="LBFGS", learning_rate=0.004)
tpipes.run_model_test_without_loggers(trainer_options, model, min_acc=0.01)
def test_default_logger_callbacks_cpu_model(tmpdir):
"""Test each of the trainer options."""
trainer_options = dict(
default_root_dir=tmpdir,
max_epochs=1,
gradient_clip_val=1.0,
overfit_batches=0.20,
enable_progress_bar=False,
limit_train_batches=0.01,
limit_val_batches=0.01,
)
model = BoringModel()
tpipes.run_model_test_without_loggers(trainer_options, model, min_acc=0.01)
# test freeze on cpu
model.freeze()
model.unfreeze()
def test_running_test_after_fitting(tmpdir):
"""Verify test() on fitted model."""
class ModelTrainValTest(BoringModel):
def validation_step(self, *args, **kwargs):
output = super().validation_step(*args, **kwargs)
self.log("val_loss", output["x"])
return output
def test_step(self, *args, **kwargs):
output = super().test_step(*args, **kwargs)
self.log("test_loss", output["y"])
return output
model = ModelTrainValTest()
# logger file to get meta
logger = tutils.get_default_logger(tmpdir)
# logger file to get weights
checkpoint = tutils.init_checkpoint_callback(logger)
# fit model
trainer = Trainer(
default_root_dir=tmpdir,
enable_progress_bar=False,
max_epochs=2,
limit_train_batches=0.4,
limit_val_batches=0.2,
limit_test_batches=0.2,
callbacks=[checkpoint],
logger=logger,
)
trainer.fit(model)
assert trainer.state.finished, f"Training failed with {trainer.state}"
trainer.test()
# test we have good test accuracy
tutils.assert_ok_model_acc(trainer, key="test_loss", thr=0.5)
def test_running_test_no_val(tmpdir):
"""Verify `test()` works on a model with no `val_dataloader`.
It performs train and test only
"""
class ModelTrainTest(BoringModel):
def val_dataloader(self):
pass
def test_step(self, *args, **kwargs):
output = super().test_step(*args, **kwargs)
self.log("test_loss", output["y"])
return output
model = ModelTrainTest()
# logger file to get meta
logger = tutils.get_default_logger(tmpdir)
# logger file to get weights
checkpoint = tutils.init_checkpoint_callback(logger)
# fit model
trainer = Trainer(
default_root_dir=tmpdir,
enable_progress_bar=False,
max_epochs=1,
limit_train_batches=0.4,
limit_val_batches=0.2,
limit_test_batches=0.2,
callbacks=[checkpoint],
logger=logger,
)
trainer.fit(model)
assert trainer.state.finished, f"Training failed with {trainer.state}"
trainer.test()
# test we have good test accuracy
tutils.assert_ok_model_acc(trainer, key="test_loss")
def test_simple_cpu(tmpdir):
"""Verify continue training session on CPU."""
model = BoringModel()
# fit model
trainer = Trainer(default_root_dir=tmpdir, max_epochs=1, limit_val_batches=0.1, limit_train_batches=20)
trainer.fit(model)
# traning complete
assert trainer.state.finished, "amp + ddp model failed to complete"
def test_cpu_model(tmpdir):
"""Make sure model trains on CPU."""
trainer_options = dict(
default_root_dir=tmpdir, enable_progress_bar=False, max_epochs=1, limit_train_batches=4, limit_val_batches=4
)
model = BoringModel()
tpipes.run_model_test(trainer_options, model, on_gpu=False)
def test_all_features_cpu_model(tmpdir):
"""Test each of the trainer options."""
trainer_options = dict(
default_root_dir=tmpdir,
gradient_clip_val=1.0,
overfit_batches=0.20,
track_grad_norm=2,
enable_progress_bar=False,
accumulate_grad_batches=2,
max_epochs=1,
limit_train_batches=0.4,
limit_val_batches=0.4,
)
model = BoringModel()
tpipes.run_model_test(trainer_options, model, on_gpu=False, min_acc=0.01)
|
the-stack_0_12729 | # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This code is based on https://github.com/nwojke/deep_sort/tree/master/deep_sort
"""
import numpy as np
from scipy.optimize import linear_sum_assignment
from ..motion import kalman_filter
INFTY_COST = 1e+5
__all__ = [
'iou_1toN',
'iou_cost',
'_nn_euclidean_distance',
'_nn_cosine_distance',
'NearestNeighborDistanceMetric',
'min_cost_matching',
'matching_cascade',
'gate_cost_matrix',
]
def iou_1toN(bbox, candidates):
"""
Computer intersection over union (IoU) by one box to N candidates.
Args:
bbox (ndarray): A bounding box in format `(top left x, top left y, width, height)`.
candidates (ndarray): A matrix of candidate bounding boxes (one per row) in the
same format as `bbox`.
Returns:
ious (ndarray): The intersection over union in [0, 1] between the `bbox`
and each candidate. A higher score means a larger fraction of the
`bbox` is occluded by the candidate.
"""
bbox_tl = bbox[:2]
bbox_br = bbox[:2] + bbox[2:]
candidates_tl = candidates[:, :2]
candidates_br = candidates[:, :2] + candidates[:, 2:]
tl = np.c_[np.maximum(bbox_tl[0], candidates_tl[:, 0])[:, np.newaxis],
np.maximum(bbox_tl[1], candidates_tl[:, 1])[:, np.newaxis]]
br = np.c_[np.minimum(bbox_br[0], candidates_br[:, 0])[:, np.newaxis],
np.minimum(bbox_br[1], candidates_br[:, 1])[:, np.newaxis]]
wh = np.maximum(0., br - tl)
area_intersection = wh.prod(axis=1)
area_bbox = bbox[2:].prod()
area_candidates = candidates[:, 2:].prod(axis=1)
ious = area_intersection / (area_bbox + area_candidates - area_intersection)
return ious
def iou_cost(tracks, detections, track_indices=None, detection_indices=None):
"""
IoU distance metric.
Args:
tracks (list[Track]): A list of tracks.
detections (list[Detection]): A list of detections.
track_indices (Optional[list[int]]): A list of indices to tracks that
should be matched. Defaults to all `tracks`.
detection_indices (Optional[list[int]]): A list of indices to detections
that should be matched. Defaults to all `detections`.
Returns:
cost_matrix (ndarray): A cost matrix of shape len(track_indices),
len(detection_indices) where entry (i, j) is
`1 - iou(tracks[track_indices[i]], detections[detection_indices[j]])`.
"""
if track_indices is None:
track_indices = np.arange(len(tracks))
if detection_indices is None:
detection_indices = np.arange(len(detections))
cost_matrix = np.zeros((len(track_indices), len(detection_indices)))
for row, track_idx in enumerate(track_indices):
if tracks[track_idx].time_since_update > 1:
cost_matrix[row, :] = 1e+5
continue
bbox = tracks[track_idx].to_tlwh()
candidates = np.asarray([detections[i].tlwh for i in detection_indices])
cost_matrix[row, :] = 1. - iou_1toN(bbox, candidates)
return cost_matrix
def _nn_euclidean_distance(s, q):
"""
Compute pair-wise squared (Euclidean) distance between points in `s` and `q`.
Args:
s (ndarray): Sample points: an NxM matrix of N samples of dimensionality M.
q (ndarray): Query points: an LxM matrix of L samples of dimensionality M.
Returns:
distances (ndarray): A vector of length M that contains for each entry in `q` the
smallest Euclidean distance to a sample in `s`.
"""
s, q = np.asarray(s), np.asarray(q)
if len(s) == 0 or len(q) == 0:
return np.zeros((len(s), len(q)))
s2, q2 = np.square(s).sum(axis=1), np.square(q).sum(axis=1)
distances = -2. * np.dot(s, q.T) + s2[:, None] + q2[None, :]
distances = np.clip(distances, 0., float(np.inf))
return np.maximum(0.0, distances.min(axis=0))
def _nn_cosine_distance(s, q):
"""
Compute pair-wise cosine distance between points in `s` and `q`.
Args:
s (ndarray): Sample points: an NxM matrix of N samples of dimensionality M.
q (ndarray): Query points: an LxM matrix of L samples of dimensionality M.
Returns:
distances (ndarray): A vector of length M that contains for each entry in `q` the
smallest Euclidean distance to a sample in `s`.
"""
s = np.asarray(s) / np.linalg.norm(s, axis=1, keepdims=True)
q = np.asarray(q) / np.linalg.norm(q, axis=1, keepdims=True)
distances = 1. - np.dot(s, q.T)
return distances.min(axis=0)
class NearestNeighborDistanceMetric(object):
"""
A nearest neighbor distance metric that, for each target, returns
the closest distance to any sample that has been observed so far.
Args:
metric (str): Either "euclidean" or "cosine".
matching_threshold (float): The matching threshold. Samples with larger
distance are considered an invalid match.
budget (Optional[int]): If not None, fix samples per class to at most
this number. Removes the oldest samples when the budget is reached.
Attributes:
samples (Dict[int -> List[ndarray]]): A dictionary that maps from target
identities to the list of samples that have been observed so far.
"""
def __init__(self, metric, matching_threshold, budget=None):
if metric == "euclidean":
self._metric = _nn_euclidean_distance
elif metric == "cosine":
self._metric = _nn_cosine_distance
else:
raise ValueError(
"Invalid metric; must be either 'euclidean' or 'cosine'")
self.matching_threshold = matching_threshold
self.budget = budget
self.samples = {}
def partial_fit(self, features, targets, active_targets):
"""
Update the distance metric with new data.
Args:
features (ndarray): An NxM matrix of N features of dimensionality M.
targets (ndarray): An integer array of associated target identities.
active_targets (List[int]): A list of targets that are currently
present in the scene.
"""
for feature, target in zip(features, targets):
self.samples.setdefault(target, []).append(feature)
if self.budget is not None:
self.samples[target] = self.samples[target][-self.budget:]
self.samples = {k: self.samples[k] for k in active_targets}
def distance(self, features, targets):
"""
Compute distance between features and targets.
Args:
features (ndarray): An NxM matrix of N features of dimensionality M.
targets (list[int]): A list of targets to match the given `features` against.
Returns:
cost_matrix (ndarray): a cost matrix of shape len(targets), len(features),
where element (i, j) contains the closest squared distance between
`targets[i]` and `features[j]`.
"""
cost_matrix = np.zeros((len(targets), len(features)))
for i, target in enumerate(targets):
cost_matrix[i, :] = self._metric(self.samples[target], features)
return cost_matrix
def min_cost_matching(distance_metric,
max_distance,
tracks,
detections,
track_indices=None,
detection_indices=None):
"""
Solve linear assignment problem.
Args:
distance_metric :
Callable[List[Track], List[Detection], List[int], List[int]) -> ndarray
The distance metric is given a list of tracks and detections as
well as a list of N track indices and M detection indices. The
metric should return the NxM dimensional cost matrix, where element
(i, j) is the association cost between the i-th track in the given
track indices and the j-th detection in the given detection_indices.
max_distance (float): Gating threshold. Associations with cost larger
than this value are disregarded.
tracks (list[Track]): A list of predicted tracks at the current time
step.
detections (list[Detection]): A list of detections at the current time
step.
track_indices (list[int]): List of track indices that maps rows in
`cost_matrix` to tracks in `tracks`.
detection_indices (List[int]): List of detection indices that maps
columns in `cost_matrix` to detections in `detections`.
Returns:
A tuple (List[(int, int)], List[int], List[int]) with the following
three entries:
* A list of matched track and detection indices.
* A list of unmatched track indices.
* A list of unmatched detection indices.
"""
if track_indices is None:
track_indices = np.arange(len(tracks))
if detection_indices is None:
detection_indices = np.arange(len(detections))
if len(detection_indices) == 0 or len(track_indices) == 0:
return [], track_indices, detection_indices # Nothing to match.
cost_matrix = distance_metric(tracks, detections, track_indices,
detection_indices)
cost_matrix[cost_matrix > max_distance] = max_distance + 1e-5
indices = linear_sum_assignment(cost_matrix)
matches, unmatched_tracks, unmatched_detections = [], [], []
for col, detection_idx in enumerate(detection_indices):
if col not in indices[1]:
unmatched_detections.append(detection_idx)
for row, track_idx in enumerate(track_indices):
if row not in indices[0]:
unmatched_tracks.append(track_idx)
for row, col in zip(indices[0], indices[1]):
track_idx = track_indices[row]
detection_idx = detection_indices[col]
if cost_matrix[row, col] > max_distance:
unmatched_tracks.append(track_idx)
unmatched_detections.append(detection_idx)
else:
matches.append((track_idx, detection_idx))
return matches, unmatched_tracks, unmatched_detections
def matching_cascade(distance_metric,
max_distance,
cascade_depth,
tracks,
detections,
track_indices=None,
detection_indices=None):
"""
Run matching cascade.
Args:
distance_metric :
Callable[List[Track], List[Detection], List[int], List[int]) -> ndarray
The distance metric is given a list of tracks and detections as
well as a list of N track indices and M detection indices. The
metric should return the NxM dimensional cost matrix, where element
(i, j) is the association cost between the i-th track in the given
track indices and the j-th detection in the given detection_indices.
max_distance (float): Gating threshold. Associations with cost larger
than this value are disregarded.
cascade_depth (int): The cascade depth, should be se to the maximum
track age.
tracks (list[Track]): A list of predicted tracks at the current time
step.
detections (list[Detection]): A list of detections at the current time
step.
track_indices (list[int]): List of track indices that maps rows in
`cost_matrix` to tracks in `tracks`.
detection_indices (List[int]): List of detection indices that maps
columns in `cost_matrix` to detections in `detections`.
Returns:
A tuple (List[(int, int)], List[int], List[int]) with the following
three entries:
* A list of matched track and detection indices.
* A list of unmatched track indices.
* A list of unmatched detection indices.
"""
if track_indices is None:
track_indices = list(range(len(tracks)))
if detection_indices is None:
detection_indices = list(range(len(detections)))
unmatched_detections = detection_indices
matches = []
for level in range(cascade_depth):
if len(unmatched_detections) == 0: # No detections left
break
track_indices_l = [
k for k in track_indices if tracks[k].time_since_update == 1 + level
]
if len(track_indices_l) == 0: # Nothing to match at this level
continue
matches_l, _, unmatched_detections = \
min_cost_matching(
distance_metric, max_distance, tracks, detections,
track_indices_l, unmatched_detections)
matches += matches_l
unmatched_tracks = list(set(track_indices) - set(k for k, _ in matches))
return matches, unmatched_tracks, unmatched_detections
def gate_cost_matrix(kf,
cost_matrix,
tracks,
detections,
track_indices,
detection_indices,
gated_cost=INFTY_COST,
only_position=False):
"""
Invalidate infeasible entries in cost matrix based on the state
distributions obtained by Kalman filtering.
Args:
kf (object): The Kalman filter.
cost_matrix (ndarray): The NxM dimensional cost matrix, where N is the
number of track indices and M is the number of detection indices,
such that entry (i, j) is the association cost between
`tracks[track_indices[i]]` and `detections[detection_indices[j]]`.
tracks (list[Track]): A list of predicted tracks at the current time
step.
detections (list[Detection]): A list of detections at the current time
step.
track_indices (List[int]): List of track indices that maps rows in
`cost_matrix` to tracks in `tracks`.
detection_indices (List[int]): List of detection indices that maps
columns in `cost_matrix` to detections in `detections`.
gated_cost (Optional[float]): Entries in the cost matrix corresponding
to infeasible associations are set this value. Defaults to a very
large value.
only_position (Optional[bool]): If True, only the x, y position of the
state distribution is considered during gating. Default False.
"""
gating_dim = 2 if only_position else 4
gating_threshold = kalman_filter.chi2inv95[gating_dim]
measurements = np.asarray(
[detections[i].to_xyah() for i in detection_indices])
for row, track_idx in enumerate(track_indices):
track = tracks[track_idx]
gating_distance = kf.gating_distance(track.mean, track.covariance,
measurements, only_position)
cost_matrix[row, gating_distance > gating_threshold] = gated_cost
return cost_matrix
|
the-stack_0_12730 | """Trains a ResNet on the CIFAR10 dataset.
ResNet v1
[a] Deep Residual Learning for Image Recognition
https://arxiv.org/pdf/1512.03385.pdf
ResNet v2
[b] Identity Mappings in Deep Residual Networks
https://arxiv.org/pdf/1603.05027.pdf
"""
from __future__ import print_function
import keras
from keras.layers import Dense, Conv2D, BatchNormalization, Activation
from keras.layers import AveragePooling2D, Input, Flatten
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint, LearningRateScheduler
from keras.callbacks import ReduceLROnPlateau
from keras.preprocessing.image import ImageDataGenerator
from keras.regularizers import l2
from keras.models import Model
from keras.datasets import cifar10
from keras.utils import plot_model
import numpy as np
import os
# Training parameters
batch_size = 32 # orig paper trained all networks with batch_size=128
epochs = 200
data_augmentation = True
num_classes = 10
# Subtracting pixel mean improves accuracy
subtract_pixel_mean = True
# Model parameter
# ----------------------------------------------------------------------------
# | | 200-epoch | Orig Paper| 200-epoch | Orig Paper| sec/epoch
# Model | n | ResNet v1 | ResNet v1 | ResNet v2 | ResNet v2 | GTX1080Ti
# |v1(v2)| %Accuracy | %Accuracy | %Accuracy | %Accuracy | v1 (v2)
# ----------------------------------------------------------------------------
# ResNet20 | 3 (2)| 92.16 | 91.25 | ----- | ----- | 35 (---)
# ResNet32 | 5(NA)| 92.46 | 92.49 | NA | NA | 50 ( NA)
# ResNet44 | 7(NA)| 92.50 | 92.83 | NA | NA | 70 ( NA)
# ResNet56 | 9 (6)| 92.71 | 93.03 | 93.01 | NA | 90 (100)
# ResNet110 |18(12)| 92.65 | 93.39+-.16| 93.15 | 93.63 | 165(180)
# ResNet164 |27(18)| ----- | 94.07 | ----- | 94.54 | ---(---)
# ResNet1001| (111)| ----- | 92.39 | ----- | 95.08+-.14| ---(---)
# ---------------------------------------------------------------------------
n = 3
# Model version
# Orig paper: version = 1 (ResNet v1), Improved ResNet: version = 2 (ResNet v2)
version = 1
# Computed depth from supplied model parameter n
if version == 1:
depth = n * 6 + 2
elif version == 2:
depth = n * 9 + 2
# Model name, depth and version
model_type = 'ResNet%dv%d' % (depth, version)
# Load the CIFAR10 data.
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
# Input image dimensions.
input_shape = x_train.shape[1:]
# Normalize data.
x_train = x_train.astype('float32') / 255
x_test = x_test.astype('float32') / 255
# If subtract pixel mean is enabled
if subtract_pixel_mean:
x_train_mean = np.mean(x_train, axis=0)
x_train -= x_train_mean
x_test -= x_train_mean
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
print('y_train shape:', y_train.shape)
# Convert class vectors to binary class matrices.
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
def lr_schedule(epoch):
"""Learning Rate Schedule
Learning rate is scheduled to be reduced after 80, 120, 160, 180 epochs.
Called automatically every epoch as part of callbacks during training.
# Arguments
epoch (int): The number of epochs
# Returns
lr (float32): learning rate
"""
lr = 1e-3
if epoch > 180:
lr *= 0.5e-3
elif epoch > 160:
lr *= 1e-3
elif epoch > 120:
lr *= 1e-2
elif epoch > 80:
lr *= 1e-1
print('Learning rate: ', lr)
return lr
def resnet_layer(inputs,
num_filters=16,
kernel_size=3,
strides=1,
activation='relu',
batch_normalization=True,
conv_first=True):
"""2D Convolution-Batch Normalization-Activation stack builder
# Arguments
inputs (tensor): input tensor from input image or previous layer
num_filters (int): Conv2D number of filters
kernel_size (int): Conv2D square kernel dimensions
strides (int): Conv2D square stride dimensions
activation (string): activation name
batch_normalization (bool): whether to include batch normalization
conv_first (bool): conv-bn-activation (True) or
activation-bn-conv (False)
# Returns
x (tensor): tensor as input to the next layer
"""
conv = Conv2D(num_filters,
kernel_size=kernel_size,
strides=strides,
padding='same',
kernel_initializer='he_normal',
kernel_regularizer=l2(1e-4))
x = inputs
if conv_first:
x = conv(x)
if batch_normalization:
x = BatchNormalization()(x)
if activation is not None:
x = Activation(activation)(x)
else:
if batch_normalization:
x = BatchNormalization()(x)
if activation is not None:
x = Activation(activation)(x)
x = conv(x)
return x
def resnet_v1(input_shape, depth, num_classes=10):
"""ResNet Version 1 Model builder [a]
Stacks of 2 x (3 x 3) Conv2D-BN-ReLU
Last ReLU is after the shortcut connection.
At the beginning of each stage, the feature map size is halved (downsampled)
by a convolutional layer with strides=2, while the number of filters is
doubled. Within each stage, the layers have the same number filters and the
same number of filters.
Features maps sizes:
stage 0: 32x32, 16
stage 1: 16x16, 32
stage 2: 8x8, 64
The Number of parameters is approx the same as Table 6 of [a]:
ResNet20 0.27M
ResNet32 0.46M
ResNet44 0.66M
ResNet56 0.85M
ResNet110 1.7M
# Arguments
input_shape (tensor): shape of input image tensor
depth (int): number of core convolutional layers
num_classes (int): number of classes (CIFAR10 has 10)
# Returns
model (Model): Keras model instance
"""
if (depth - 2) % 6 != 0:
raise ValueError('depth should be 6n+2 (eg 20, 32, 44 in [a])')
# Start model definition.
num_filters = 16
num_res_blocks = int((depth - 2) / 6)
inputs = Input(shape=input_shape)
x = resnet_layer(inputs=inputs)
# Instantiate the stack of residual units
for stack in range(3):
for res_block in range(num_res_blocks):
strides = 1
if stack > 0 and res_block == 0: # first layer but not first stack
strides = 2 # downsample
y = resnet_layer(inputs=x,
num_filters=num_filters,
strides=strides)
y = resnet_layer(inputs=y,
num_filters=num_filters,
activation=None)
if stack > 0 and res_block == 0: # first layer but not first stack
# linear projection residual shortcut connection to match
# changed dims
x = resnet_layer(inputs=x,
num_filters=num_filters,
kernel_size=1,
strides=strides,
activation=None,
batch_normalization=False)
x = keras.layers.add([x, y])
x = Activation('relu')(x)
num_filters *= 2
# Add classifier on top.
# v1 does not use BN after last shortcut connection-ReLU
x = AveragePooling2D(pool_size=8)(x)
y = Flatten()(x)
outputs = Dense(num_classes,
activation='softmax',
kernel_initializer='he_normal')(y)
# Instantiate model.
model = Model(inputs=inputs, outputs=outputs)
return model
def resnet_v2(input_shape, depth, num_classes=10):
"""ResNet Version 2 Model builder [b]
Stacks of (1 x 1)-(3 x 3)-(1 x 1) BN-ReLU-Conv2D or also known as
bottleneck layer
First shortcut connection per layer is 1 x 1 Conv2D.
Second and onwards shortcut connection is identity.
At the beginning of each stage, the feature map size is halved (downsampled)
by a convolutional layer with strides=2, while the number of filter maps is
doubled. Within each stage, the layers have the same number filters and the
same filter map sizes.
Features maps sizes:
conv1 : 32x32, 16
stage 0: 32x32, 64
stage 1: 16x16, 128
stage 2: 8x8, 256
# Arguments
input_shape (tensor): shape of input image tensor
depth (int): number of core convolutional layers
num_classes (int): number of classes (CIFAR10 has 10)
# Returns
model (Model): Keras model instance
"""
if (depth - 2) % 9 != 0:
raise ValueError('depth should be 9n+2 (eg 56 or 110 in [b])')
# Start model definition.
num_filters_in = 16
num_res_blocks = int((depth - 2) / 9)
inputs = Input(shape=input_shape)
# v2 performs Conv2D with BN-ReLU on input before splitting into 2 paths
x = resnet_layer(inputs=inputs,
num_filters=num_filters_in,
conv_first=True)
# Instantiate the stack of residual units
for stage in range(3):
for res_block in range(num_res_blocks):
activation = 'relu'
batch_normalization = True
strides = 1
if stage == 0:
num_filters_out = num_filters_in * 4
if res_block == 0: # first layer and first stage
activation = None
batch_normalization = False
else:
num_filters_out = num_filters_in * 2
if res_block == 0: # first layer but not first stage
strides = 2 # downsample
# bottleneck residual unit
y = resnet_layer(inputs=x,
num_filters=num_filters_in,
kernel_size=1,
strides=strides,
activation=activation,
batch_normalization=batch_normalization,
conv_first=False)
y = resnet_layer(inputs=y,
num_filters=num_filters_in,
conv_first=False)
y = resnet_layer(inputs=y,
num_filters=num_filters_out,
kernel_size=1,
conv_first=False)
if res_block == 0:
# linear projection residual shortcut connection to match
# changed dims
x = resnet_layer(inputs=x,
num_filters=num_filters_out,
kernel_size=1,
strides=strides,
activation=None,
batch_normalization=False)
x = keras.layers.add([x, y])
num_filters_in = num_filters_out
# Add classifier on top.
# v2 has BN-ReLU before Pooling
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = AveragePooling2D(pool_size=8)(x)
y = Flatten()(x)
outputs = Dense(num_classes,
activation='softmax',
kernel_initializer='he_normal')(y)
# Instantiate model.
model = Model(inputs=inputs, outputs=outputs)
return model
if version == 2:
model = resnet_v2(input_shape=input_shape, depth=depth)
else:
model = resnet_v1(input_shape=input_shape, depth=depth)
model.compile(loss='categorical_crossentropy',
optimizer=Adam(lr=lr_schedule(0)),
metrics=['accuracy'])
model.summary()
plot_model(model, to_file="%s.png" % model_type, show_shapes=True)
print(model_type)
# Prepare model model saving directory.
save_dir = os.path.join(os.getcwd(), 'saved_models')
model_name = 'cifar10_%s_model.{epoch:03d}.h5' % model_type
if not os.path.isdir(save_dir):
os.makedirs(save_dir)
filepath = os.path.join(save_dir, model_name)
# Prepare callbacks for model saving and for learning rate adjustment.
checkpoint = ModelCheckpoint(filepath=filepath,
monitor='val_acc',
verbose=1,
save_best_only=True)
lr_scheduler = LearningRateScheduler(lr_schedule)
lr_reducer = ReduceLROnPlateau(factor=np.sqrt(0.1),
cooldown=0,
patience=5,
min_lr=0.5e-6)
callbacks = [checkpoint, lr_reducer, lr_scheduler]
# Run training, with or without data augmentation.
if not data_augmentation:
print('Not using data augmentation.')
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
validation_data=(x_test, y_test),
shuffle=True,
callbacks=callbacks)
else:
print('Using real-time data augmentation.')
# This will do preprocessing and realtime data augmentation:
datagen = ImageDataGenerator(
# set input mean to 0 over the dataset
featurewise_center=False,
# set each sample mean to 0
samplewise_center=False,
# divide inputs by std of dataset
featurewise_std_normalization=False,
# divide each input by its std
samplewise_std_normalization=False,
# apply ZCA whitening
zca_whitening=False,
# randomly rotate images in the range (deg 0 to 180)
rotation_range=0,
# randomly shift images horizontally
width_shift_range=0.1,
# randomly shift images vertically
height_shift_range=0.1,
# randomly flip images
horizontal_flip=True,
# randomly flip images
vertical_flip=False)
# Compute quantities required for featurewise normalization
# (std, mean, and principal components if ZCA whitening is applied).
datagen.fit(x_train)
# Fit the model on the batches generated by datagen.flow().
model.fit_generator(datagen.flow(x_train, y_train, batch_size=batch_size),
validation_data=(x_test, y_test),
epochs=epochs, verbose=1, workers=4,
callbacks=callbacks)
# Score trained model.
scores = model.evaluate(x_test, y_test, verbose=1)
print('Test loss:', scores[0])
print('Test accuracy:', scores[1])
|
the-stack_0_12735 | import sqlite3
import csv
import os
os.chdir('C:\OLGA\Python CS50\import_csv_db')
#currentDir = os.getcwd()
#currentFileCSV = currentDir +"\\" + csvFilename
#print(currentFileCSV)
conn = sqlite3.connect('db.sqlite3')
c = conn.cursor()
c.execute("delete from auth_user_customuser")
c.execute("delete from api_title")
c.execute("delete from api_review")
c.execute("delete from api_title_genre")
c.execute("delete from api_genre")
c.execute("delete from api_comment")
c.execute("delete from api_category")
csvFilename = 'users.csv'
with open(csvFilename, "r", encoding='utf-8') as file:
reader = csv.DictReader(file)
for row in reader:
username = row['username']
email = row['email']
role = row['role']
desc = row['description']
first_name = row['first_name']
last_name = row['last_name']
c.execute("INSERT INTO auth_user_customuser(username, email, role, bio, first_name, last_name, password, is_superuser, is_staff, is_active, date_joined) \
VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)", (username, email, role, desc, first_name, last_name, '111', 0, 0, 1, 0))
csvFilename = 'titles.csv'
with open(csvFilename, "r", encoding='utf-8') as file:
reader = csv.DictReader(file)
for row in reader:
name = row['name']
year = row['year']
cat = row['category']
c.execute("INSERT INTO api_title(name, year, category_id) \
VALUES(?, ?, ?)", (name, year, cat))
csvFilename = 'review.csv'
with open(csvFilename, "r", encoding='utf-8') as file:
reader = csv.DictReader(file)
for row in reader:
title = row['title_id']
text = row['text']
author = row['author']
score = row['score']
pub_date = row['pub_date']
c.execute("INSERT INTO api_review(title_id, text, author_id, score, pub_date) \
VALUES(?, ?, ?, ?, ?)", (title, text, author, score, pub_date))
csvFilename = 'genre_title.csv'
with open(csvFilename, "r") as file:
reader = csv.DictReader(file)
for row in reader:
title = row['title_id']
text = row['genre_id']
c.execute("INSERT INTO api_title_genre(title_id, genre_id) \
VALUES(?, ?)", (title, text))
csvFilename = 'genre.csv'
with open(csvFilename, "r", encoding='utf-8') as file:
reader = csv.DictReader(file)
for row in reader:
name = row['name']
slug = row['slug']
c.execute("INSERT INTO api_genre(name, slug) \
VALUES(?, ?)", (name, slug))
csvFilename = 'comments.csv'
with open(csvFilename, "r", encoding='utf-8') as file:
reader = csv.DictReader(file)
for row in reader:
review_id = row['review_id']
text = row['text']
author = row['author']
pub_date = row['pub_date']
c.execute("INSERT INTO api_comment(review_id, text, author_id, pub_date) \
VALUES(?, ?, ?, ?)", (review_id, text, author, pub_date))
csvFilename = 'category.csv'
with open(csvFilename, "r", encoding='utf-8') as file:
reader = csv.DictReader(file)
for row in reader:
name = row['name']
slug = row['slug']
c.execute("INSERT INTO api_category(name, slug) \
VALUES(?, ?)", (name, slug))
conn.commit()
conn.close() |
the-stack_0_12738 | # For example if user wants to input two equations like
# x1 + 2x2 = 3
# 2x1 + x2 = 3
# it will return a list like [[1,2,3],[2,1,3]]
def get_coefficients_as_list(no_of_unknowns):
all_coefficients = []
for i in range(1,no_of_unknowns+1):
coefficient = []
print("Enter the coefficients for equation ",i)
for j in range(1,no_of_unknowns+1):
num = int(input("Enter the coefficient of x"+str(j)+":- "))
coefficient.append(num)
coefficient.append(int(input("Enter the RHS constant :- ")))
all_coefficients.append(coefficient)
return all_coefficients |
the-stack_0_12739 | from dcmrtstruct2nii.adapters.convert.rtstructcontour2mask import DcmPatientCoords2Mask
from dcmrtstruct2nii.adapters.convert.filenameconverter import FilenameConverter
from dcmrtstruct2nii.adapters.input.contours.rtstructinputadapter import RtStructInputAdapter
from dcmrtstruct2nii.adapters.input.image.dcminputadapter import DcmInputAdapter
import os.path
from dcmrtstruct2nii.adapters.output.niioutputadapter import NiiOutputAdapter
from dcmrtstruct2nii.exceptions import PathDoesNotExistException, ContourOutOfBoundsException
import logging
def list_rt_structs(rtstruct_file):
"""
Lists the structures in an DICOM RT Struct file by name.
:param rtstruct_file: Path to the rtstruct file
:return: A list of names, if any structures are found
"""
if not os.path.exists(rtstruct_file):
raise PathDoesNotExistException(f'rtstruct path does not exist: {rtstruct_file}')
rtreader = RtStructInputAdapter()
rtstructs = rtreader.ingest(rtstruct_file, True)
return [struct['name'] for struct in rtstructs]
def dcmrtstruct2nii(rtstruct_file, dicom_file, output_path, structures=None, gzip=True, mask_background_value=0, mask_foreground_value=255, convert_original_dicom=True, series_id=None): # noqa: C901 E501
"""
Converts A DICOM and DICOM RT Struct file to nii
:param rtstruct_file: Path to the rtstruct file
:param dicom_file: Path to the dicom file
:param output_path: Output path where the masks are written to
:param structures: Optional, list of structures to convert
:param gzip: Optional, output .nii.gz if set to True, default: True
:param series_id: Optional, the Series Instance UID. Use to specify the ID corresponding to the image if there are
dicoms from more than one series in `dicom_file` folder
:raise InvalidFileFormatException: Raised when an invalid file format is given.
:raise PathDoesNotExistException: Raised when the given path does not exist.
:raise UnsupportedTypeException: Raised when conversion is not supported.
:raise ValueError: Raised when mask_background_value or mask_foreground_value is invalid.
"""
output_path = os.path.join(output_path, '') # make sure trailing slash is there
if not os.path.exists(rtstruct_file):
raise PathDoesNotExistException(f'rtstruct path does not exist: {rtstruct_file}')
if not os.path.exists(dicom_file):
raise PathDoesNotExistException(f'DICOM path does not exists: {dicom_file}')
if mask_background_value < 0 or mask_background_value > 255:
raise ValueError(f'Invalid value for mask_background_value: {mask_background_value}, must be between 0 and 255')
if mask_foreground_value < 0 or mask_foreground_value > 255:
raise ValueError(f'Invalid value for mask_foreground_value: {mask_foreground_value}, must be between 0 and 255')
if structures is None:
structures = []
os.makedirs(output_path, exist_ok=True)
filename_converter = FilenameConverter()
rtreader = RtStructInputAdapter()
rtstructs = rtreader.ingest(rtstruct_file)
dicom_image = DcmInputAdapter().ingest(dicom_file, series_id=series_id)
dcm_patient_coords_to_mask = DcmPatientCoords2Mask()
nii_output_adapter = NiiOutputAdapter()
for rtstruct in rtstructs:
if len(structures) == 0 or rtstruct['name'] in structures:
if 'sequence' not in rtstruct:
logging.info('Skipping mask {} no shape/polygon found'.format(rtstruct['name']))
continue
logging.info('Working on mask {}'.format(rtstruct['name']))
try:
mask = dcm_patient_coords_to_mask.convert(rtstruct['sequence'], dicom_image, mask_background_value, mask_foreground_value)
except ContourOutOfBoundsException:
logging.warning(f'Structure {rtstruct["name"]} is out of bounds, ignoring contour!')
continue
mask.CopyInformation(dicom_image)
mask_filename = filename_converter.convert(f'mask_{rtstruct["name"]}')
nii_output_adapter.write(mask, f'{output_path}{mask_filename}', gzip)
if convert_original_dicom:
logging.info('Converting original DICOM to nii')
nii_output_adapter.write(dicom_image, f'{output_path}image', gzip)
logging.info('Success!')
|
the-stack_0_12742 | # Copyright 2022 Aprendizaje Profundo, All rights reserved.
#
# Licensed under the MIT License;
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://opensource.org/licenses/MIT
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Developed by Alvaro Mauricio Montenegro Reyes and Daniel Mauricio Montenegro Reyes
# ==================================================================================
from collections import Counter
import re
def coincidence_parser(datos,column_name='Coincidencias_iniciales', verbose = True,kind = 'ruleId'):
coincidencias = datos[column_name].values
size = coincidencias.shape[0]
#list for the dictionaries
clist = []
# counter for rules
rules = Counter()
for i, index in enumerate(datos.index):
if i % 100 == 0 and verbose: print(i,end=' ')
# extract coincidences from row i
# split the rules, according to the data structure
c = coincidencias[i]
c = re.sub('Match\({','MatchXXXX({',c)
l = c.split('MatchXXXX')
# create a list with each dictionary obtained
clist.clear()
for k in range(1, len(l)):
dictionary = l[k]
try:
val = dict(eval(dictionary[1:-3]))
except:
val = dict(eval(dictionary[1:-2]))
clist.append(val)
# count the rules found by type of rule
rules.clear()
for j in range(len(clist)):
# Posibles: ruleId, ruleIssueType, category
rules[clist[j][kind]] += 1
# to the dataframe
for key, value in rules.items():
datos.at[index, key] = value
from collections import Counter
def spacy_column_parser(column_name, verbose=True):
# start
size = datos.shape[0]
print('¡¡Empezando...!! Se procesaran ', size, ' registros de la columna ', column_name)
# counter for rules
rules = Counter()
# working bucle
for i, index in enumerate(datos.index):
if i % 100 == 0 and verbose: print(i, end=' ')
# read column value in this register (index)
values = eval(datos.at[index, column_name])
# extract the rules
rules.clear()
for value in values:
rules[value] += 1
# to the dataframe
for key, value in rules.items():
datos.at[index, key] = value
print('\n¡¡Hecho!!.... Se procesaron ', i+1, ' registros de la columna ', column_name)
def spacy_parser( column_names=['Upos', 'Dep', 'Ner_type'], verbose = True):
for column_name in column_names:
spacy_column_parser(column_name, verbose=True) |
the-stack_0_12743 | #!/usr/bin/env python2
# test case courtesy of William Schaub ([email protected])
import os, sys
from socket import *
UNIXSOCKET = sys.argv[1]
server = socket(AF_UNIX,SOCK_STREAM)
server.connect(UNIXSOCKET)
while 1:
data = sys.stdin.readline()
if not data: break
server.sendall(data)
server.close()
|
the-stack_0_12746 | """
dxagent.py
This file contains the core of dxagent
@author: K.Edeline
"""
import sched
import time
import signal
import importlib
from .constants import AGENT_INPUT_PERIOD
from .core.ios import IOManager
from .core.daemon import Daemon
from .input.sysinfo import SysInfo
from .input.bm_input import BMWatcher
from .input.vm_input import VMWatcher
from .input.vpp_input import VPPWatcher
from .assurance.health import HealthEngine
from .gnmi.exporter import DXAgentExporter
class DXAgent(Daemon, IOManager):
"""
DXAgent
"""
def __init__(self, parse_args=True):
Daemon.__init__(self, pidfile='/var/run/dxagent.pid',
stdout='/var/log/dxagent.log',
stderr='/var/log/dxagent.log',
name='dxagent',
input_rate=AGENT_INPUT_PERIOD)
IOManager.__init__(self, child=self, parse_args=parse_args)
self.load_ios()
if not parse_args:
return
def _init(self):
self.sysinfo = SysInfo()
self.scheduler = sched.scheduler()
# ringbuffers are stored here
self._data = {}
# SharedMemory with dxtop.
# Drop privileges to avoid dxtop root requirements
if not self.args.disable_shm:
mod = importlib.import_module("agent.core.shareablebuffer")
with self.drop():
self.sbuffer = getattr(mod, "ShareableBuffer")(create=True)
# watchers.
self.bm_watcher = BMWatcher(self._data, self.info, self)
self.vm_watcher = VMWatcher(self._data, self.info, self)
self.vpp_watcher = VPPWatcher(self._data, self.info, self)
# health engine
self.engine = HealthEngine(self._data, self.info, self)
# exporter
if self.gnmi_target:
self.exporter = DXAgentExporter(self._data, self.info, self,
target_url=self.gnmi_target)
self.exporter.run()
# catch signal for cleanup
signal.signal(signal.SIGTERM, self.exit)
def _input(self):
self.bm_watcher.input()
self.vm_watcher.input()
self.vpp_watcher.input()
def process(self):
"""
read input data, process and write it to shmem.
re-schedule itself.
"""
# fetch input
self._input()
# compute metrics&symptoms from input
self.engine.update_health()
# write to shmem
if not self.args.disable_shm:
skip=["stats"] if not self.args.verbose else []
self.sbuffer.write(self._data, skip=skip, info=self.info)
#self.info(list(self.exporter._iterate_data()))
self.scheduler.enter(AGENT_INPUT_PERIOD,0,self.process)
def exit(self, signum=None, stackframe=None):
"""
cleanup before exiting
"""
self.running = False
time.sleep(AGENT_INPUT_PERIOD)
self.bm_watcher.exit()
self.vm_watcher.exit()
self.vpp_watcher.exit()
if not self.args.disable_shm:
self.sbuffer.unlink()
del self.sbuffer
def run(self):
"""
main function
"""
self._init()
self.running = True
self.info(self.sysinfo)
self.process()
while self.running:
self.scheduler.run(blocking=False)
time.sleep(AGENT_INPUT_PERIOD)
|
the-stack_0_12748 | from googlesearch import search
from pyppeteer import launch
from wplay.utils.helpers import chatbot_image_folder_path
async def Bot(last_Message):
"""
Function to perform instruction as instructed to bot.
"""
print('\n Bot activated')
first_last_Message = "".join(last_Message.split())
simple_menu = {
"hi": say_hi,
"help": _help_commands,
"goodmorning": say_goodmorning,
"goodnight": say_goodnight,
"howareyou?": say_fine,
}
simple_menu_keys = simple_menu.keys()
result = []
try:
command_args = first_last_Message[1:].split(" ", 1)
command_arg = last_Message[1:].split(" ", 1)
if len(command_args) == 1 and command_args[0] in simple_menu_keys:
return simple_menu[command_args[0]]()
elif command_arg[0] == 'google':
query = "".join(command_arg[1])
for j in search(query, tld="co.in", num=10, stop=10, pause=2):
result.append(j)
print("Sending links for query")
return result
elif command_arg[0] == "image":
query = "".join(command_arg[1])
await takeScreenshot(query)
print("Taking screenshot of google image for query")
return "Sending you screenshot"
elif command_arg[0] == "maps":
query = "".join(command_arg[1])
map_parameters_list = query.replace(" ", "")
map_parameters = map_parameters_list.split(',')
base_url = "https://www.google.com/maps/dir/?api=1&"
custom_url = base_url + "origin={ori}&destination={dest}&travelmode={t_mode}".format(ori=map_parameters[0], dest=map_parameters[1], t_mode=map_parameters[2])
print("Sending link for google maps")
return custom_url
else:
return "Wrong command. Send me /help to see a list of valid commands"
except KeyError as e:
print("Key Error Exception: {err}".format(err=str(e)))
def say_hi():
print("Saying hi")
return "Wplay chatbot says hi! Hope you are having a nice day..."
def say_goodmorning():
print("Saying good morning")
return "Bot says Good Morning! Have a Good Day..."
def say_goodnight():
print("Saying good night")
return "Bot says Good Night! Sweet Dreams..."
def say_fine():
print("Saying I am Fine!")
return "Bot says I am Fine Thank You! How are you?"
def _help_commands():
print("Asking for help")
return "How may I assist you with help\n"\
"List of commands:\n" \
"/hi (bot says hi), " \
"/all_commands (ist of all commands), " \
"/good morning, " \
"/good night, " \
"/how are you? " \
"/google {query} " \
"/image {query} " \
"/maps {origin}, {destination}, {mode:driving/bicycling/transit/two-wheeler/walking}"
async def takeScreenshot(qry):
browser = await launch()
page = await browser.newPage()
await page.goto('https://www.google.com/search?q={}&source=lnms&tbm=isch'.format(qry))
image_path = str(chatbot_image_folder_path / '{}.png'.format(qry))
await page.screenshot({'path': image_path})
await browser.close()
|
the-stack_0_12749 | import copy
import pytest
from ckan_api_client.exceptions import HTTPError
from ckan_api_client.objects import CkanDataset
from ckan_api_client.tests.utils.diff import diff_mappings
from ckan_api_client.tests.utils.generate import generate_dataset
from ckan_api_client.tests.utils.validation import MutableCheckpoint
def test_dataset_create(ckan_client_hl):
client = ckan_client_hl
dataset_dict = generate_dataset()
dataset = CkanDataset(dataset_dict)
created = client.create_dataset(dataset)
assert created.is_equivalent(dataset)
def test_dataset_get_by_name(ckan_client_hl):
client = ckan_client_hl
dataset_dict = generate_dataset()
dataset_dict['name'] = 'example-dataset-name'
dataset = CkanDataset(dataset_dict)
created = client.create_dataset(dataset)
assert created.is_equivalent(dataset)
dataset_id = created.id
# Try getting by id
dataset_1 = client.get_dataset(dataset_id)
assert created == dataset_1
# Try getting by name
dataset_2 = client.get_dataset_by_name('example-dataset-name')
assert created == dataset_2
# Try getting by id, but passing name instead
with pytest.raises(HTTPError) as excinfo:
client.get_dataset('example-dataset-name')
assert excinfo.value.status_code == 404
# Try getting by name, but passing id instead
with pytest.raises(HTTPError) as excinfo:
client.get_dataset_by_name(dataset_id)
assert excinfo.value.status_code == 404
def test_dataset_update_base_fields(ckan_client_hl):
client = ckan_client_hl # shortcut
ckp = MutableCheckpoint() # to check objects mutation
# Create our dataset
dataset_dict = generate_dataset()
ckp.add(dataset_dict)
dataset = CkanDataset(generate_dataset())
dataset.author = 'original author'
dataset.author_email = '[email protected]'
dataset.license_id = 'cc-zero'
created = client.create_dataset(dataset)
# Store a copy of the original dataset
original_dataset = client.get_dataset(created.id)
assert created.is_equivalent(original_dataset)
ckp.add(original_dataset)
# Update some base fields, send back & check
to_be_updated = copy.deepcopy(original_dataset)
to_be_updated.author = 'NEW_AUTHOR'
to_be_updated.author_email = 'NEW_AUTHOR_EMAIL'
to_be_updated.license_id = 'cc-by-sa'
assert to_be_updated.is_modified()
# Update, get back, check
updated = client.update_dataset(to_be_updated)
updated_2 = client.get_dataset(created.id)
assert updated.is_equivalent(to_be_updated)
assert updated.is_equivalent(updated_2)
diffs = diff_mappings(
original_dataset.serialize(),
updated.serialize())
assert diffs['differing'] == set([
'author', 'author_email', 'license_id',
])
assert diffs['left'] == set()
assert diffs['right'] == set()
# Make sure dicts did not mutate
ckp.check()
def test_dataset_update_extras(ckan_client_hl):
client = ckan_client_hl # shortcut
ds_dict = generate_dataset()
ds_dict['extras'] = {
'key-0': 'value-0',
'key-1': 'value-1',
'key-2': 'value-2',
'key-3': 'value-3',
'key-4': 'value-4',
'key-5': 'value-5',
'key-6': 'value-6',
'key-7': 'value-7',
'key-8': 'value-8',
'key-9': 'value-9',
}
stage_1pre = CkanDataset(ds_dict)
stage_1 = client.create_dataset(stage_1pre)
# --------------------------------------------------
# Try adding a new record
stage_1b = client.get_dataset(stage_1.id)
stage_2pre = copy.deepcopy(stage_1b)
stage_2pre.extras['NEW_FIELD_NAME'] = 'NEW_FIELD_VALUE'
stage_2 = client.update_dataset(stage_2pre)
assert stage_2.is_equivalent(client.get_dataset(stage_1.id))
diffs = diff_mappings(stage_1b.serialize(), stage_2.serialize())
assert diffs['left'] == diffs['right'] == set()
assert diffs['differing'] == set(['extras'])
del stage_1b, stage_2pre, stage_2, diffs
# --------------------------------------------------
# Try removing the custom field
stage_2pre = client.get_dataset(stage_1.id)
del stage_2pre.extras['NEW_FIELD_NAME']
stage_2 = client.update_dataset(stage_2pre)
assert stage_2.is_equivalent(client.get_dataset(stage_1.id))
assert 'NEW_FIELD_NAME' not in stage_2.extras
stage_2b = client.get_dataset(stage_1.id)
assert stage_2 == stage_2b
# Make sure we brought it back to its original state
assert stage_1.is_equivalent(stage_2)
del stage_2pre, stage_2
def test_dataset_update_resources(ckan_client_hl):
client = ckan_client_hl # shortcut
ds_dict = generate_dataset()
ds_dict['resources'] = [
{'name': 'example-csv-1',
'url': 'http://example.com/dataset-1.csv',
'format': 'CSV'},
{'name': 'example-json-1',
'url': 'http://example.com/dataset-1.json',
'format': 'JSON'},
]
stage_1pre = CkanDataset(ds_dict)
stage_1 = client.create_dataset(stage_1pre)
# --------------------------------------------------
# Try adding a new resource
stage_2pre = client.get_dataset(stage_1.id)
stage_2pre.resources.append({
'name': 'example-csv-2',
'url': 'http://example.com/dataset-2.csv',
'format': 'CSV'})
assert len(stage_2pre.resources) == 3
assert len(stage_2pre.serialize()['resources']) == 3
stage_2 = client.update_dataset(stage_2pre)
assert len(stage_2.resources) == 3
assert len(stage_2.serialize()['resources']) == 3
# --------------------------------------------------
# Try prepending adding a new resource
stage_3pre = client.get_dataset(stage_1.id)
stage_3pre.resources.insert(0, {
'url': 'http://example.com/dataset-2.json',
'format': 'JSON'})
assert len(stage_3pre.resources) == 4
assert len(stage_3pre.serialize()['resources']) == 4
stage_3 = client.update_dataset(stage_3pre)
assert len(stage_3.resources) == 4
assert len(stage_3.serialize()['resources']) == 4
def test_dataset_delete(ckan_client_hl):
client = ckan_client_hl
dataset_dict = generate_dataset()
dataset = CkanDataset(dataset_dict)
created = client.create_dataset(dataset)
assert created.is_equivalent(dataset)
# Make sure it is in lists
assert created.id in client.list_datasets()
# Delete it
client.delete_dataset(created.id)
assert created.id not in client.list_datasets()
# Test that our workarounds work as expected..
with pytest.raises(HTTPError) as excinfo:
client.get_dataset(created.id)
assert excinfo.value.status_code == 404
retrieved = client.get_dataset(created.id, allow_deleted=True)
assert retrieved.state == 'deleted'
def test_dataset_wipe(ckan_client_hl):
client = ckan_client_hl
# ------------------------------------------------------------
# Now delete normally and try inserting another
# one with the same name. Should fail with 409
dataset = CkanDataset(generate_dataset())
dataset.name = 'dataset-to-delete'
created = client.create_dataset(dataset)
assert created.is_equivalent(dataset)
client.delete_dataset(created.id)
new_dataset = CkanDataset(generate_dataset())
new_dataset.name = 'dataset-to-delete'
with pytest.raises(HTTPError) as excinfo:
client.create_dataset(new_dataset)
assert excinfo.value.status_code == 409
del dataset, created, new_dataset, excinfo
# ------------------------------------------------------------
# Now let's try updating + deleting
dataset = CkanDataset(generate_dataset())
dataset.name = 'dataset-to-delete-2'
created = client.create_dataset(dataset)
assert created.is_equivalent(dataset)
client.wipe_dataset(created.id)
new_dataset = CkanDataset(generate_dataset())
new_dataset.name = 'dataset-to-delete-2'
# Should not fail anymore
created = client.create_dataset(new_dataset)
assert created.name == 'dataset-to-delete-2'
|
the-stack_0_12753 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2015 Cristian van Ee <cristian at cvee.org>
# Copyright 2015 Igor Gnatenko <[email protected]>
# Copyright 2018 Adam Miller <[email protected]>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'core'}
DOCUMENTATION = '''
---
module: dnf
version_added: 1.9
short_description: Manages packages with the I(dnf) package manager
description:
- Installs, upgrade, removes, and lists packages and groups with the I(dnf) package manager.
options:
name:
description:
- "A list of package names, or package specifier with version, like C(name-1.0)
When using state=latest, this can be '*' which means run: dnf -y update.
You can also pass a url or a local path to a rpm file."
required: true
aliases:
- pkg
list:
description:
- Various (non-idempotent) commands for usage with C(/usr/bin/ansible) and I(not) playbooks. See examples.
state:
description:
- Whether to install (C(present), C(latest)), or remove (C(absent)) a package.
choices: ['absent', 'present', 'installed', 'removed', 'latest']
default: "present"
enablerepo:
description:
- I(Repoid) of repositories to enable for the install/update operation.
These repos will not persist beyond the transaction.
When specifying multiple repos, separate them with a ",".
disablerepo:
description:
- I(Repoid) of repositories to disable for the install/update operation.
These repos will not persist beyond the transaction.
When specifying multiple repos, separate them with a ",".
conf_file:
description:
- The remote dnf configuration file to use for the transaction.
disable_gpg_check:
description:
- Whether to disable the GPG checking of signatures of packages being
installed. Has an effect only if state is I(present) or I(latest).
type: bool
default: 'no'
installroot:
description:
- Specifies an alternative installroot, relative to which all packages
will be installed.
version_added: "2.3"
default: "/"
releasever:
description:
- Specifies an alternative release from which all packages will be
installed.
required: false
version_added: "2.6"
default: null
autoremove:
description:
- If C(yes), removes all "leaf" packages from the system that were originally
installed as dependencies of user-installed packages but which are no longer
required by any such package. Should be used alone or when state is I(absent)
type: bool
default: false
version_added: "2.4"
exclude:
description:
- Package name(s) to exclude when state=present, or latest. This can be a
list or a comma separated string.
version_added: "2.7"
skip_broken:
description:
- Skip packages with broken dependencies(devsolve) and are causing problems.
type: bool
default: "no"
version_added: "2.7"
update_cache:
description:
- Force yum to check if cache is out of date and redownload if needed.
Has an effect only if state is I(present) or I(latest).
type: bool
default: "no"
aliases: [ expire-cache ]
version_added: "2.7"
update_only:
description:
- When using latest, only update installed packages. Do not install packages.
- Has an effect only if state is I(latest)
required: false
default: "no"
type: bool
version_added: "2.7"
security:
description:
- If set to C(yes), and C(state=latest) then only installs updates that have been marked security related.
type: bool
default: "no"
version_added: "2.7"
bugfix:
description:
- If set to C(yes), and C(state=latest) then only installs updates that have been marked bugfix related.
required: false
default: "no"
type: bool
version_added: "2.7"
enable_plugin:
description:
- I(Plugin) name to enable for the install/update operation.
The enabled plugin will not persist beyond the transaction.
required: false
version_added: "2.7"
disable_plugin:
description:
- I(Plugin) name to disable for the install/update operation.
The disabled plugins will not persist beyond the transaction.
required: false
version_added: "2.7"
disable_excludes:
description:
- Disable the excludes defined in DNF config files.
- If set to C(all), disables all excludes.
- If set to C(main), disable excludes defined in [main] in yum.conf.
- If set to C(repoid), disable excludes defined for given repo id.
required: false
choices: [ all, main, repoid ]
version_added: "2.7"
validate_certs:
description:
- This only applies if using a https url as the source of the rpm. e.g. for localinstall. If set to C(no), the SSL certificates will not be validated.
- This should only set to C(no) used on personally controlled sites using self-signed certificates as it avoids verifying the source site.
type: bool
default: "yes"
version_added: "2.7"
allow_downgrade:
description:
- Specify if the named package and version is allowed to downgrade
a maybe already installed higher version of that package.
Note that setting allow_downgrade=True can make this module
behave in a non-idempotent way. The task could end up with a set
of packages that does not match the complete list of specified
packages to install (because dependencies between the downgraded
package and others can cause changes to the packages which were
in the earlier transaction).
type: bool
default: False
version_added: "2.7"
install_repoquery:
description:
- This is effectively a no-op in DNF as it is not needed with DNF, but is an accepted parameter for feature
parity/compatibility with the I(yum) module.
type: bool
default: True
version_added: "2.7"
download_only:
description:
- Only download the packages, do not install them.
required: false
default: "no"
type: bool
version_added: "2.7"
notes:
- When used with a `loop:` each package will be processed individually, it is much more efficient to pass the list directly to the `name` option.
- Group removal doesn't work if the group was installed with Ansible because
upstream dnf's API doesn't properly mark groups as installed, therefore upon
removal the module is unable to detect that the group is installed
(https://bugzilla.redhat.com/show_bug.cgi?id=1620324)
requirements:
- "python >= 2.6"
- python-dnf
- for the autoremove option you need dnf >= 2.0.1"
author:
- Igor Gnatenko (@ignatenkobrain) <[email protected]>
- Cristian van Ee (@DJMuggs) <cristian at cvee.org>
- Berend De Schouwer (@berenddeschouwer)
- Adam Miller (@maxamillion) <[email protected]>
'''
EXAMPLES = '''
- name: install the latest version of Apache
dnf:
name: httpd
state: latest
- name: remove the Apache package
dnf:
name: httpd
state: absent
- name: install the latest version of Apache from the testing repo
dnf:
name: httpd
enablerepo: testing
state: present
- name: upgrade all packages
dnf:
name: "*"
state: latest
- name: install the nginx rpm from a remote repo
dnf:
name: 'http://nginx.org/packages/centos/6/noarch/RPMS/nginx-release-centos-6-0.el6.ngx.noarch.rpm'
state: present
- name: install nginx rpm from a local file
dnf:
name: /usr/local/src/nginx-release-centos-6-0.el6.ngx.noarch.rpm
state: present
- name: install the 'Development tools' package group
dnf:
name: '@Development tools'
state: present
- name: Autoremove unneeded packages installed as dependencies
dnf:
autoremove: yes
- name: Uninstall httpd but keep its dependencies
dnf:
name: httpd
state: absent
autoremove: no
'''
import os
import re
import tempfile
try:
import dnf
import dnf.cli
import dnf.const
import dnf.exceptions
import dnf.subject
import dnf.util
HAS_DNF = True
except ImportError:
HAS_DNF = False
from ansible.module_utils._text import to_native, to_text
from ansible.module_utils.urls import fetch_url
from ansible.module_utils.six import PY2, text_type
from distutils.version import LooseVersion
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.yumdnf import YumDnf, yumdnf_argument_spec
# 64k. Number of bytes to read at a time when manually downloading pkgs via a url
BUFSIZE = 65536
class DnfModule(YumDnf):
"""
DNF Ansible module back-end implementation
"""
def __init__(self, module):
# This populates instance vars for all argument spec params
super(DnfModule, self).__init__(module)
self._ensure_dnf()
def _sanitize_dnf_error_msg(self, spec, error):
"""
For unhandled dnf.exceptions.Error scenarios, there are certain error
messages we want to filter. Do that here.
"""
if to_text("no package matched") in to_text(error):
return "No package {0} available.".format(spec)
return error
def _package_dict(self, package):
"""Return a dictionary of information for the package."""
# NOTE: This no longer contains the 'dnfstate' field because it is
# already known based on the query type.
result = {
'name': package.name,
'arch': package.arch,
'epoch': str(package.epoch),
'release': package.release,
'version': package.version,
'repo': package.repoid}
result['nevra'] = '{epoch}:{name}-{version}-{release}.{arch}'.format(
**result)
# Added for YUM3/YUM4 compat
if package.repoid == 'installed':
result['yumstate'] = 'installed'
else:
result['yumstate'] = 'available'
return result
def _packagename_dict(self, packagename):
"""
Return a dictionary of information for a package name string or None
if the package name doesn't contain at least all NVR elements
"""
if packagename[-4:] == '.rpm':
packagename = packagename[:-4]
# This list was auto generated on a Fedora 28 system with the following one-liner
# printf '[ '; for arch in $(ls /usr/lib/rpm/platform); do printf '"%s", ' ${arch%-linux}; done; printf ']\n'
redhat_rpm_arches = [
"aarch64", "alphaev56", "alphaev5", "alphaev67", "alphaev6", "alpha",
"alphapca56", "amd64", "armv3l", "armv4b", "armv4l", "armv5tejl", "armv5tel",
"armv5tl", "armv6hl", "armv6l", "armv7hl", "armv7hnl", "armv7l", "athlon",
"geode", "i386", "i486", "i586", "i686", "ia32e", "ia64", "m68k", "mips64el",
"mips64", "mips64r6el", "mips64r6", "mipsel", "mips", "mipsr6el", "mipsr6",
"noarch", "pentium3", "pentium4", "ppc32dy4", "ppc64iseries", "ppc64le", "ppc64",
"ppc64p7", "ppc64pseries", "ppc8260", "ppc8560", "ppciseries", "ppc", "ppcpseries",
"riscv64", "s390", "s390x", "sh3", "sh4a", "sh4", "sh", "sparc64", "sparc64v",
"sparc", "sparcv8", "sparcv9", "sparcv9v", "x86_64"
]
rpm_arch_re = re.compile(r'(.*)\.(.*)')
rpm_nevr_re = re.compile(r'(\S+)-(?:(\d*):)?(.*)-(~?\w+[\w.]*)')
try:
arch = None
rpm_arch_match = rpm_arch_re.match(packagename)
if rpm_arch_match:
nevr, arch = rpm_arch_match.groups()
if arch in redhat_rpm_arches:
packagename = nevr
rpm_nevr_match = rpm_nevr_re.match(packagename)
if rpm_nevr_match:
name, epoch, version, release = rpm_nevr_re.match(packagename).groups()
if not version or not version.split('.')[0].isdigit():
return None
else:
return None
except AttributeError as e:
self.module.fail_json(
msg='Error attempting to parse package: %s, %s' % (packagename, to_native(e)),
rc=1,
results=[]
)
if not epoch:
epoch = "0"
if ':' in name:
epoch_name = name.split(":")
epoch = epoch_name[0]
name = ''.join(epoch_name[1:])
result = {
'name': name,
'epoch': epoch,
'release': release,
'version': version,
}
return result
# Original implementation from yum.rpmUtils.miscutils (GPLv2+)
# http://yum.baseurl.org/gitweb?p=yum.git;a=blob;f=rpmUtils/miscutils.py
def _compare_evr(self, e1, v1, r1, e2, v2, r2):
# return 1: a is newer than b
# 0: a and b are the same version
# -1: b is newer than a
if e1 is None:
e1 = '0'
else:
e1 = str(e1)
v1 = str(v1)
r1 = str(r1)
if e2 is None:
e2 = '0'
else:
e2 = str(e2)
v2 = str(v2)
r2 = str(r2)
# print '%s, %s, %s vs %s, %s, %s' % (e1, v1, r1, e2, v2, r2)
rc = dnf.rpm.rpm.labelCompare((e1, v1, r1), (e2, v2, r2))
# print '%s, %s, %s vs %s, %s, %s = %s' % (e1, v1, r1, e2, v2, r2, rc)
return rc
def fetch_rpm_from_url(self, spec):
# FIXME: Remove this once this PR is merged:
# https://github.com/ansible/ansible/pull/19172
# download package so that we can query it
package_name, dummy = os.path.splitext(str(spec.rsplit('/', 1)[1]))
package_file = tempfile.NamedTemporaryFile(dir=self.module.tmpdir, prefix=package_name, suffix='.rpm', delete=False)
self.module.add_cleanup_file(package_file.name)
try:
rsp, info = fetch_url(self.module, spec)
if not rsp:
self.module.fail_json(
msg="Failure downloading %s, %s" % (spec, info['msg']),
results=[],
)
data = rsp.read(BUFSIZE)
while data:
package_file.write(data)
data = rsp.read(BUFSIZE)
package_file.close()
except Exception as e:
self.module.fail_json(
msg="Failure downloading %s, %s" % (spec, to_native(e)),
results=[],
)
return package_file.name
def _ensure_dnf(self):
if not HAS_DNF:
if PY2:
package = 'python2-dnf'
else:
package = 'python3-dnf'
if self.module.check_mode:
self.module.fail_json(
msg="`{0}` is not installed, but it is required"
"for the Ansible dnf module.".format(package),
results=[],
)
self.module.run_command(['dnf', 'install', '-y', package], check_rc=True)
global dnf
try:
import dnf
import dnf.cli
import dnf.const
import dnf.exceptions
import dnf.subject
import dnf.util
except ImportError:
self.module.fail_json(
msg="Could not import the dnf python module. "
"Please install `{0}` package.".format(package),
results=[],
)
def _configure_base(self, base, conf_file, disable_gpg_check, installroot='/'):
"""Configure the dnf Base object."""
if self.enable_plugin and self.disable_plugin:
base.init_plugins(self.disable_plugin, self.enable_plugin)
elif self.enable_plugin:
base.init_plugins(enable_plugins=self.enable_plugin)
elif self.disable_plugin:
base.init_plugins(self.disable_plugin)
conf = base.conf
# Turn off debug messages in the output
conf.debuglevel = 0
# Set whether to check gpg signatures
conf.gpgcheck = not disable_gpg_check
# Don't prompt for user confirmations
conf.assumeyes = True
# Set installroot
conf.installroot = installroot
# Set excludes
if self.exclude:
conf.exclude(self.exclude)
# Set disable_excludes
if self.disable_excludes:
conf.disable_excludes.append(self.disable_excludes)
# Set releasever
if self.releasever is not None:
conf.substitutions['releasever'] = self.releasever
# Set skip_broken (in dnf this is strict=0)
if self.skip_broken:
conf.strict = 0
if self.download_only:
conf.downloadonly = True
# Change the configuration file path if provided
if conf_file:
# Fail if we can't read the configuration file.
if not os.access(conf_file, os.R_OK):
self.module.fail_json(
msg="cannot read configuration file", conf_file=conf_file,
results=[],
)
else:
conf.config_file_path = conf_file
# Default in dnf upstream is true
conf.clean_requirements_on_remove = self.autoremove
# Read the configuration file
conf.read()
def _specify_repositories(self, base, disablerepo, enablerepo):
"""Enable and disable repositories matching the provided patterns."""
base.read_all_repos()
repos = base.repos
# Disable repositories
for repo_pattern in disablerepo:
for repo in repos.get_matching(repo_pattern):
repo.disable()
# Enable repositories
for repo_pattern in enablerepo:
for repo in repos.get_matching(repo_pattern):
repo.enable()
def _base(self, conf_file, disable_gpg_check, disablerepo, enablerepo, installroot):
"""Return a fully configured dnf Base object."""
base = dnf.Base()
self._configure_base(base, conf_file, disable_gpg_check, installroot)
self._specify_repositories(base, disablerepo, enablerepo)
base.fill_sack(load_system_repo='auto')
if self.bugfix:
key = {'advisory_type__eq': 'bugfix'}
base._update_security_filters = [base.sack.query().filter(**key)]
if self.security:
key = {'advisory_type__eq': 'security'}
base._update_security_filters = [base.sack.query().filter(**key)]
if self.update_cache:
base.update_cache()
return base
def list_items(self, command):
"""List package info based on the command."""
# Rename updates to upgrades
if command == 'updates':
command = 'upgrades'
# Return the corresponding packages
if command in ['installed', 'upgrades', 'available']:
results = [
self._package_dict(package)
for package in getattr(self.base.sack.query(), command)()]
# Return the enabled repository ids
elif command in ['repos', 'repositories']:
results = [
{'repoid': repo.id, 'state': 'enabled'}
for repo in self.base.repos.iter_enabled()]
# Return any matching packages
else:
packages = dnf.subject.Subject(command).get_best_query(self.base.sack)
results = [self._package_dict(package) for package in packages]
self.module.exit_json(msg="", results=results)
def _is_installed(self, pkg):
installed = self.base.sack.query().installed()
if installed.filter(name=pkg):
return True
else:
return False
def _is_newer_version_installed(self, pkg_name):
candidate_pkg = self._packagename_dict(pkg_name)
if not candidate_pkg:
# The user didn't provide a versioned rpm, so version checking is
# not required
return False
installed = self.base.sack.query().installed()
installed_pkg = installed.filter(name=candidate_pkg['name']).run()
if installed_pkg:
installed_pkg = installed_pkg[0]
# this looks weird but one is a dict and the other is a dnf.Package
evr_cmp = self._compare_evr(
installed_pkg.epoch, installed_pkg.version, installed_pkg.release,
candidate_pkg['epoch'], candidate_pkg['version'], candidate_pkg['release'],
)
if evr_cmp == 1:
return True
else:
return False
else:
return False
def _mark_package_install(self, pkg_spec, upgrade=False):
"""Mark the package for install."""
is_newer_version_installed = self._is_newer_version_installed(pkg_spec)
is_installed = self._is_installed(pkg_spec)
try:
if self.allow_downgrade:
# dnf only does allow_downgrade, we have to handle this ourselves
# because it allows a possibility for non-idempotent transactions
# on a system's package set (pending the yum repo has many old
# NVRs indexed)
if upgrade:
if is_installed:
self.base.upgrade(pkg_spec)
else:
self.base.install(pkg_spec)
else:
self.base.install(pkg_spec)
elif not self.allow_downgrade and is_newer_version_installed:
return {'failed': False, 'msg': '', 'failure': '', 'rc': 0}
elif not is_newer_version_installed:
if upgrade:
if is_installed:
self.base.upgrade(pkg_spec)
else:
self.base.install(pkg_spec)
else:
self.base.install(pkg_spec)
else:
if upgrade:
if is_installed:
self.base.upgrade(pkg_spec)
else:
self.base.install(pkg_spec)
else:
self.base.install(pkg_spec)
return {'failed': False, 'msg': 'Installed: {0}'.format(pkg_spec), 'failure': '', 'rc': 0}
except dnf.exceptions.MarkingError as e:
return {
'failed': True,
'msg': "No package {0} available.".format(pkg_spec),
'failure': " ".join((pkg_spec, to_native(e))),
'rc': 1,
"results": []
}
except dnf.exceptions.DepsolveError as e:
return {
'failed': True,
'msg': "Depsolve Error occured for package {0}.".format(pkg_spec),
'failure': " ".join((pkg_spec, to_native(e))),
'rc': 1,
"results": []
}
except dnf.exceptions.Error as e:
if to_text("already installed") in to_text(e):
return {'failed': False, 'msg': '', 'failure': ''}
else:
return {
'failed': True,
'msg': "Unknown Error occured for package {0}.".format(pkg_spec),
'failure': " ".join((pkg_spec, to_native(e))),
'rc': 1,
"results": []
}
def _parse_spec_group_file(self):
pkg_specs, grp_specs, filenames = [], [], []
for name in self.names:
if name.endswith(".rpm"):
if '://' in name:
name = self.fetch_rpm_from_url(name)
filenames.append(name)
elif name.startswith("@"):
grp_specs.append(name[1:])
else:
pkg_specs.append(name)
return pkg_specs, grp_specs, filenames
def _update_only(self, pkgs):
not_installed = []
for pkg in pkgs:
if self._is_installed(pkg):
try:
if isinstance(to_text(pkg), text_type):
self.base.upgrade(pkg)
else:
self.base.package_upgrade(pkg)
except Exception as e:
self.module.fail_json(
msg="Error occured attempting update_only operation: {0}".format(to_native(e)),
results=[],
rc=1,
)
else:
not_installed.append(pkg)
return not_installed
def _install_remote_rpms(self, filenames):
if int(dnf.__version__.split(".")[0]) >= 2:
pkgs = list(sorted(self.base.add_remote_rpms(list(filenames)), reverse=True))
else:
pkgs = []
try:
for filename in filenames:
pkgs.append(self.base.add_remote_rpm(filename))
except IOError as e:
if to_text("Can not load RPM file") in to_text(e):
self.module.fail_json(
msg="Error occured attempting remote rpm install of package: {0}. {1}".format(filename, to_native(e)),
results=[],
rc=1,
)
if self.update_only:
self._update_only(pkgs)
else:
for pkg in pkgs:
try:
if self._is_newer_version_installed(self._package_dict(pkg)['nevra']):
if self.allow_downgrade:
self.base.package_install(pkg)
else:
self.base.package_install(pkg)
except Exception as e:
self.module.fail_json(
msg="Error occured attempting remote rpm operation: {0}".format(to_native(e)),
results=[],
rc=1,
)
def ensure(self):
allow_erasing = False
response = {
'msg': "",
'changed': False,
'results': [],
'rc': 0
}
# Accumulate failures. Package management modules install what they can
# and fail with a message about what they can't.
failure_response = {
'msg': "",
'failures': [],
'results': [],
'rc': 1
}
# Autoremove is called alone
# Jump to remove path where base.autoremove() is run
if not self.names and self.autoremove:
self.names = []
self.state = 'absent'
if self.names == ['*'] and self.state == 'latest':
try:
self.base.upgrade_all()
except dnf.exceptions.DepsolveError as e:
failure_response['msg'] = "Depsolve Error occured attempting to upgrade all packages"
self.module.fail_json(**failure_response)
else:
pkg_specs, group_specs, filenames = self._parse_spec_group_file()
if group_specs:
self.base.read_comps()
pkg_specs = [p.strip() for p in pkg_specs]
filenames = [f.strip() for f in filenames]
groups = []
environments = []
for group_spec in (g.strip() for g in group_specs):
group = self.base.comps.group_by_pattern(group_spec)
if group:
groups.append(group.id)
else:
environment = self.base.comps.environment_by_pattern(group_spec)
if environment:
environments.append(environment.id)
else:
self.module.fail_json(
msg="No group {0} available.".format(group_spec),
results=[],
)
if self.state in ['installed', 'present']:
# Install files.
self._install_remote_rpms(filenames)
for filename in filenames:
response['results'].append("Installed {0}".format(filename))
# Install groups.
for group in groups:
try:
group_pkg_count_installed = self.base.group_install(group, dnf.const.GROUP_PACKAGE_TYPES)
if group_pkg_count_installed == 0:
response['results'].append("Group {0} already installed.".format(group))
else:
response['results'].append("Group {0} installed.".format(group))
except dnf.exceptions.DepsolveError as e:
failure_response['msg'] = "Depsolve Error occured attempting to install group: {0}".format(group)
self.module.fail_json(**failure_response)
except dnf.exceptions.Error as e:
# In dnf 2.0 if all the mandatory packages in a group do
# not install, an error is raised. We want to capture
# this but still install as much as possible.
failure_response['failures'].append(" ".join((group, to_native(e))))
for environment in environments:
try:
self.base.environment_install(environment, dnf.const.GROUP_PACKAGE_TYPES)
except dnf.exceptions.DepsolveError as e:
failure_response['msg'] = "Depsolve Error occured attempting to install environment: {0}".format(environment)
self.module.fail_json(**failure_response)
except dnf.exceptions.Error as e:
failure_response['failures'].append(" ".join((environment, to_native(e))))
# Install packages.
if self.update_only:
not_installed = self._update_only(pkg_specs)
for spec in not_installed:
response['results'].append("Packages providing %s not installed due to update_only specified" % spec)
else:
for pkg_spec in pkg_specs:
install_result = self._mark_package_install(pkg_spec)
if install_result['failed']:
failure_response['msg'] += install_result['msg']
failure_response['failures'].append(self._sanitize_dnf_error_msg(pkg_spec, install_result['failure']))
else:
response['results'].append(install_result['msg'])
elif self.state == 'latest':
# "latest" is same as "installed" for filenames.
self._install_remote_rpms(filenames)
for filename in filenames:
response['results'].append("Installed {0}".format(filename))
for group in groups:
try:
try:
self.base.group_upgrade(group)
response['results'].append("Group {0} upgraded.".format(group))
except dnf.exceptions.CompsError:
if not self.update_only:
# If not already installed, try to install.
group_pkg_count_installed = self.base.group_install(group, dnf.const.GROUP_PACKAGE_TYPES)
if group_pkg_count_installed == 0:
response['results'].append("Group {0} already installed.".format(group))
else:
response['results'].append("Group {0} installed.".format(group))
except dnf.exceptions.Error as e:
failure_response['failures'].append(" ".join((group, to_native(e))))
for environment in environments:
try:
try:
self.base.environment_upgrade(environment)
except dnf.exceptions.CompsError:
# If not already installed, try to install.
self.base.environment_install(environment, dnf.const.GROUP_PACKAGE_TYPES)
except dnf.exceptions.DepsolveError as e:
failure_response['msg'] = "Depsolve Error occured attempting to install environment: {0}".format(environment)
except dnf.exceptions.Error as e:
failure_response['failures'].append(" ".join((environment, to_native(e))))
if self.update_only:
not_installed = self._update_only(pkg_specs)
for spec in not_installed:
response['results'].append("Packages providing %s not installed due to update_only specified" % spec)
else:
for pkg_spec in pkg_specs:
# best effort causes to install the latest package
# even if not previously installed
self.base.conf.best = True
install_result = self._mark_package_install(pkg_spec, upgrade=True)
if install_result['failed']:
failure_response['msg'] += install_result['msg']
failure_response['failures'].append(self._sanitize_dnf_error_msg(pkg_spec, install_result['failure']))
else:
response['results'].append(install_result['msg'])
else:
# state == absent
if filenames:
self.module.fail_json(
msg="Cannot remove paths -- please specify package name.",
results=[],
)
for group in groups:
try:
self.base.group_remove(group)
except dnf.exceptions.CompsError:
# Group is already uninstalled.
pass
except AttributeError:
# Group either isn't installed or wasn't marked installed at install time
# because of DNF bug
#
# This is necessary until the upstream dnf API bug is fixed where installing
# a group via the dnf API doesn't actually mark the group as installed
# https://bugzilla.redhat.com/show_bug.cgi?id=1620324
pass
for environment in environments:
try:
self.base.environment_remove(environment)
except dnf.exceptions.CompsError:
# Environment is already uninstalled.
pass
installed = self.base.sack.query().installed()
for pkg_spec in pkg_specs:
if installed.filter(name=pkg_spec):
self.base.remove(pkg_spec)
# Like the dnf CLI we want to allow recursive removal of dependent
# packages
allow_erasing = True
if self.autoremove:
self.base.autoremove()
try:
if not self.base.resolve(allow_erasing=allow_erasing):
if failure_response['failures']:
failure_response['msg'] = 'Failed to install some of the specified packages'
self.module.fail_json(**failure_response)
response['msg'] = "Nothing to do"
self.module.exit_json(**response)
else:
response['changed'] = True
if self.module.check_mode:
if failure_response['failures']:
failure_response['msg'] = 'Failed to install some of the specified packages',
self.module.fail_json(**failure_response)
response['msg'] = "Check mode: No changes made, but would have if not in check mode"
self.module.exit_json(**response)
try:
self.base.download_packages(self.base.transaction.install_set)
except dnf.exceptions.DownloadError as e:
self.module.fail_json(
msg="Failed to download packages: {0}".format(to_text(e)),
results=[],
)
if self.download_only:
for package in self.base.transaction.install_set:
response['results'].append("Downloaded: {0}".format(package))
self.module.exit_json(**response)
else:
self.base.do_transaction()
for package in self.base.transaction.install_set:
response['results'].append("Installed: {0}".format(package))
for package in self.base.transaction.remove_set:
response['results'].append("Removed: {0}".format(package))
if failure_response['failures']:
failure_response['msg'] = 'Failed to install some of the specified packages',
self.module.exit_json(**response)
self.module.exit_json(**response)
except dnf.exceptions.DepsolveError as e:
failure_response['msg'] = "Depsolve Error occured: {0}".format(to_native(e))
self.module.fail_json(**failure_response)
except dnf.exceptions.Error as e:
if to_text("already installed") in to_text(e):
response['changed'] = False
response['results'].append("Package already installed: {0}".format(to_native(e)))
self.module.exit_json(**response)
else:
failure_response['msg'] = "Unknown Error occured: {0}".format(to_native(e))
self.module.fail_json(**failure_response)
@staticmethod
def has_dnf():
return HAS_DNF
def run(self):
"""The main function."""
# Check if autoremove is called correctly
if self.autoremove:
if LooseVersion(dnf.__version__) < LooseVersion('2.0.1'):
self.module.fail_json(
msg="Autoremove requires dnf>=2.0.1. Current dnf version is %s" % dnf.__version__,
results=[],
)
if self.state not in ["absent", None]:
self.module.fail_json(
msg="Autoremove should be used alone or with state=absent",
results=[],
)
# Set state as installed by default
# This is not set in AnsibleModule() because the following shouldn't happend
# - dnf: autoremove=yes state=installed
if self.state is None:
self.state = 'installed'
if self.list:
self.base = self._base(
self.conf_file, self.disable_gpg_check, self.disablerepo,
self.enablerepo, self.installroot
)
self.list_items(self.list)
else:
# Note: base takes a long time to run so we want to check for failure
# before running it.
if not dnf.util.am_i_root():
self.module.fail_json(
msg="This command has to be run under the root user.",
results=[],
)
self.base = self._base(
self.conf_file, self.disable_gpg_check, self.disablerepo,
self.enablerepo, self.installroot
)
self.ensure()
def main():
# state=installed name=pkgspec
# state=removed name=pkgspec
# state=latest name=pkgspec
#
# informational commands:
# list=installed
# list=updates
# list=available
# list=repos
# list=pkgspec
module = AnsibleModule(
**yumdnf_argument_spec
)
module_implementation = DnfModule(module)
try:
module_implementation.run()
except dnf.exceptions.RepoError as de:
module.fail_json(
msg="Failed to synchronize repodata: {0}".format(to_native(de)),
rc=1,
results=[],
changed=False
)
if __name__ == '__main__':
main()
|
the-stack_0_12754 | # -*- coding: utf-8 -*-
"""Defines several tools for monitoring net activity."""
# pylint: disable=F0401, E1101, too-many-lines, wrong-import-order
import logging as _logging
import os as _os
import subprocess as _subprocess
import collections as _collections
import numpy as _np
# pylint: disable=no-name-in-module
from scipy.stats import bernoulli as _bernoulli
from scipy.ndimage.interpolation import rotate as _rotate
from sklearn.decomposition import PCA as _PCA
from .tools import pad as _pad
# CAREFUL! This must be imported before any caffe-related import!
from .initialization import init as _init
import caffe as _caffe
try: # pragma: no cover
import cv2 as _cv2
_cv2INTER_CUBIC = _cv2.INTER_CUBIC # pylint: disable=invalid-name
_cv2INTER_LINEAR = _cv2.INTER_LINEAR # pylint: disable=invalid-name
_cv2INTER_NEAREST = _cv2.INTER_NEAREST # pylint: disable=invalid-name
_cv2resize = _cv2.resize # pylint: disable=invalid-name
except ImportError: # pragma: no cover
_cv2 = None
_cv2INTER_CUBIC = None # pylint: disable=invalid-name
_cv2INTER_LINEAR = None # pylint: disable=invalid-name
_cv2INTER_NEAREST = None # pylint: disable=invalid-name
_cv2resize = None # pylint: disable=invalid-name
try: # pragma: no cover
import matplotlib.pyplot as _plt
import matplotlib.ticker as _tkr
import matplotlib.colorbar as _colorbar
from mpl_toolkits.axes_grid1 import make_axes_locatable as _make_axes_locatable
_PLT_AVAILABLE = True
except ImportError: # pragma: no cover
_PLT_AVAILABLE = False
_init()
_LOGGER = _logging.getLogger(__name__)
class Monitor(object): # pylint: disable=R0903
"""
The monitor interface.
Should be implemented by any monitor class. The method
:py:func:`barrista.monitoring.Monitor.__call__` must be specified,
the function :py:func:`barrista.monitoring.Monitor.finalize` may
optionally be specified.
"""
def __call__(self, kwargs):
"""
The call implementation.
For available keyword arguments, see the documentation of
:py:class:`barrista.solver.SolverInterface.Fit`.
The callback signals are used as follows:
* initialize_train: called once before training starts,
* initialize_test: called once before training starts (if training with
a validation set is used) or once before testing,
* pre_fit: called before fitting mode is used (e.g., before going
back to fitting during training after a validation run),
* pre_test: called before testing mode is used (e.g., during training
before validation starts),
* post_test: called when testing finished,
* pre_train_batch: before a training batch is fed to the network,
* post_train_batch: after forwarding a training batch,
* pre_test_batch: before a test batch is fed to the network,
* post_test_batch: after a test batch was forwarded through the
network.
"""
if kwargs['callback_signal'] == 'initialize_train':
self._initialize_train(kwargs)
elif kwargs['callback_signal'] == 'initialize_test':
self._initialize_test(kwargs)
elif kwargs['callback_signal'] == 'pre_fit':
self._pre_fit(kwargs)
elif kwargs['callback_signal'] == 'pre_test':
self._pre_test(kwargs)
elif kwargs['callback_signal'] == 'post_test':
self._post_test(kwargs)
elif kwargs['callback_signal'] == 'pre_test_batch':
self._pre_test_batch(kwargs)
elif kwargs['callback_signal'] == 'post_test_batch':
self._post_test_batch(kwargs)
elif kwargs['callback_signal'] == 'pre_train_batch':
self._pre_train_batch(kwargs)
elif kwargs['callback_signal'] == 'post_train_batch':
self._post_train_batch(kwargs)
def _initialize_train(self, kwargs): # pylint: disable=C0111
pass
def _initialize_test(self, kwargs): # pylint: disable=C0111
pass
def _pre_fit(self, kwargs): # pylint: disable=C0111
pass
def _pre_test(self, kwargs): # pylint: disable=C0111
pass
def _post_test(self, kwargs): # pylint: disable=C0111
pass
def _pre_test_batch(self, kwargs): # pylint: disable=C0111
pass
def _post_test_batch(self, kwargs): # pylint: disable=C0111
pass
def _pre_train_batch(self, kwargs): # pylint: disable=C0111
pass
def _post_train_batch(self, kwargs): # pylint: disable=C0111
pass
def finalize(self, kwargs):
"""Will be called at the end of a training/fitting process."""
pass
class DataMonitor(Monitor): # pylint: disable=R0903
r"""
Monitor interface for filling the blobs of a network.
This is a specific monitor which will fill the blobs of the network
for the forward pass or solver step.
Ideally, there should only be one such monitor per callback,
but multiple ones are possible.
"""
pass
class ParallelMonitor(Monitor):
r"""
Monitor interface for monitors executed parallel to processing a batch.
The order of all monitors implementing this interface is respected. They
will work on a dummy network object with dummy blobs and prepare their
data. The dummy blob content is then copied to the real network prior
to the next batch execution.
"""
def get_parallel_blob_names(self): # pragma: no cover
"""Get the names of all blobs that must be provided for the dummy."""
raise NotImplementedError()
# pylint: disable=too-few-public-methods
class StaticDataMonitor(DataMonitor, ParallelMonitor):
r"""
Always provides the same data for a specific net input blob.
Parameters
==========
:param X: dict(string, np.ndarray)
The static input blobs to use.
"""
def __init__(self, X):
self._X = X # pylint: disable=C0103
def _initialize_train(self, kwargs):
self._initialize(kwargs)
def _initialize_test(self, kwargs):
self._initialize(kwargs)
def _initialize(self, kwargs):
if 'test' in kwargs['callback_signal']:
net = kwargs['testnet']
else:
net = kwargs['net']
for key, value in list(self._X.items()):
assert key in list(net.blobs.keys()), (
'data key has no corresponding network blob {} {}'.format(
key, str(list(net.blobs.keys()))))
assert isinstance(value, _np.ndarray), (
'data must be a numpy nd array ({})'.format(type(value))
)
def _pre_train_batch(self, kwargs):
self._pre_batch(kwargs['net'], kwargs)
def _pre_test_batch(self, kwargs):
self._pre_batch(kwargs['testnet'], kwargs)
def _pre_batch(self, net, kwargs): # pylint: disable=unused-argument
for key in list(self._X.keys()):
net.blobs[key].data[...] = self._X[key]
def get_parallel_blob_names(self):
"""Get the names of all blobs that must be provided for the dummy."""
return list(self._X.keys())
# pylint: disable=too-few-public-methods
class OversamplingDataMonitor(DataMonitor, ParallelMonitor):
r"""
Provides oversampled data.
Parameters
==========
:param blobinfos: dict(string, string|None).
Associates blob name to oversample and optional the interpolation
method to use for resize. This may be 'n' (nearest neighbour),
'c' (cubic), 'l' (linear) or None (no interpolation). If an
interpolation method is selected, `before_oversample_resize_to` must
be not None and provide a size.
:param before_oversample_resize_to: dict(string, 2-tuple).
Specifies a size to which the image inputs will be resized before the
oversampling is invoked.
"""
def __init__(self,
blobinfos,
before_oversample_resize_to=None):
for val in blobinfos.values():
assert val in ['n', 'c', 'l', None]
self._blobinfos = blobinfos
for key, val in blobinfos.items():
if val is not None:
assert key in list(before_oversample_resize_to.keys())
self._before_oversample_resize_to = before_oversample_resize_to
self._batch_size = None
def get_parallel_blob_names(self):
"""Get the names of all blobs that must be provided for the dummy."""
return list(self._blobinfos.keys())
def _initialize_train(self, kwargs):
raise Exception("The OversamplingDataMonitor can only be used during "
"testing!")
def _initialize_test(self, kwargs):
if 'test' in kwargs['callback_signal']:
net = kwargs['testnet']
else:
net = kwargs['net']
for key in list(self._blobinfos.keys()):
assert key in list(net.blobs.keys()), (
'data key has no corresponding network blob {} {}'.format(
key, str(list(net.blobs.keys()))))
def _pre_test(self, kwargs): # pragma: no cover
net = kwargs['testnet']
self._batch_size = net.blobs[
list(self._blobinfos.keys())[0]].data.shape[0]
def _pre_test_batch(self, kwargs): # pragma: no cover
for blob_name in list(self._blobinfos):
assert blob_name in kwargs['data_orig'], (
"The unchanged data must be provided by another DataProvider, "
"e.g., CyclingDataMonitor with `only_preload`!")
assert (len(kwargs['data_orig'][blob_name]) * 10 ==
self._batch_size), (
"The number of provided images * 10 must be the batch "
"size!")
# pylint: disable=invalid-name
for im_idx, im in enumerate(kwargs['data_orig'][blob_name]):
if self._blobinfos[blob_name] is not None:
if self._blobinfos[blob_name] == 'n':
interpolation = _cv2INTER_NEAREST
elif self._blobinfos[blob_name] == 'c':
interpolation = _cv2INTER_CUBIC
elif self._blobinfos[blob_name] == 'l':
interpolation = _cv2INTER_LINEAR
oversampling_prep = _cv2resize(
_np.transpose(im, (1, 2, 0)),
(self._before_oversample_resize_to[blob_name][1],
self._before_oversample_resize_to[blob_name][0]),
interpolation=interpolation)
else:
oversampling_prep = _np.transpose(im, (1, 2, 0))
imshape = kwargs['testnet'].blobs[blob_name].data.shape[2:4]
kwargs['testnet'].blobs[blob_name].data[
im_idx * 10:(im_idx+1) * 10] =\
_np.transpose(
_caffe.io.oversample(
[oversampling_prep],
imshape),
(0, 3, 1, 2))
# pylint: disable=too-many-instance-attributes, R0903
class CyclingDataMonitor(DataMonitor, ParallelMonitor):
r"""
Uses the data sequentially.
This monitor maps data to the network an cycles through the data
sequentially. It is the default monitor used if a user provides X
or X_val to the barrista.solver.fit method.
If further processing of the original data is intended, by using the flag
``only_preload``, the following monitors find a dictionary of lists of
the original datapoints with the name 'data_orig' in their ``kwargs``.
The data is in this case NOT written to the network input layers! This
can make sense, e.g., for the ``ResizingMonitor``.
:param X: dict of numpy.ndarray or list, or None.
If specified, is used as input data. It is used sequentially, so
shuffle it pre, if required. The keys of the dict must have
a corresponding layer name in the net. The values must be provided
already in network dimension order, i.e., usually channels, height,
width.
:param only_preload: list(string).
List of blobs for which the data will be loaded and stored in a dict
of (name: list) for further processing with other monitors.
:param input_processing_flags: dict(string, string).
Dictionary associating input blob names with intended preprocessing
methods. Valid values are:
* n: none,
* rn: resize, nearest neighbour,
* rc: resize, cubic,
* rl: resize, linear,
* pX: padding, with value X.
:param virtual_batch_size: int or None.
Override the network batch size. May only be used if ``only_preload`` is
set to True. Only makes sense with another DataMonitor in succession.
:param color_data_augmentation_sigmas: dict(string, float) or None.
Enhance the color of the samples as described in (Krizhevsky et al.,
2012). The parameter gives the sigma for the normal distribution that is
sampled to obtain the weights for scaled pixel principal components per
blob.
:param shuffle: Bool.
If set to True, shuffle the data every epoch. Default: False.
"""
# pylint: disable=too-many-arguments
def __init__(self,
X,
only_preload=None,
input_processing_flags=None,
virtual_batch_size=None,
color_data_augmentation_sigmas=None,
shuffle=False):
"""See class documentation."""
if only_preload is None:
only_preload = []
self.only_preload = only_preload
self._X = X # pylint: disable=C0103
assert X is not None
if input_processing_flags is None:
input_processing_flags = dict()
self._input_processing_flags = input_processing_flags
for key in input_processing_flags.keys():
assert key in self._X.keys()
self._padvals = dict()
for key, val in input_processing_flags.items():
assert (val in ['n', 'rn', 'rc', 'rl'] or
val.startswith('p')), (
"The input processing flags for the CyclingDataMonitor "
"must be in ['n', 'rn', 'rc', 'rl', 'p']: {}!".format(
val))
if val.startswith('p'):
self._padvals[key] = int(val[1:])
for key in self.only_preload:
assert key in self._X.keys()
self._sample_pointer = 0
self._len_data = None
self._initialized = False
self._batch_size = None
assert virtual_batch_size is None or self.only_preload, (
"If the virtual_batch_size is set, `only_preload` must be used!")
if virtual_batch_size is not None:
assert virtual_batch_size > 0
self._virtual_batch_size = virtual_batch_size
if color_data_augmentation_sigmas is None:
color_data_augmentation_sigmas = dict()
self._color_data_augmentation_sigmas = color_data_augmentation_sigmas
for key in list(self._color_data_augmentation_sigmas.keys()):
assert key in list(self._X.keys())
for key in list(self._X.keys()):
if key not in list(self._color_data_augmentation_sigmas.keys()):
self._color_data_augmentation_sigmas[key] = 0.
# pylint: disable=invalid-name
self._color_data_augmentation_weights = dict()
# pylint: disable=invalid-name
self._color_data_augmentation_components = dict()
self._shuffle = shuffle
self._sample_order = None
def get_parallel_blob_names(self):
return list(self._X.keys())
def _initialize_train(self, kwargs):
self._initialize(kwargs)
# Calculate the color channel PCA per blob if required.
for bname, sigma in self._color_data_augmentation_sigmas.items():
if sigma > 0.:
_LOGGER.info("Performing PCA for color data augmentation for "
"blob '%s'...", bname)
for im in self._X[bname]: # pylint: disable=invalid-name
assert im.ndim == 3 and im.shape[0] == 3, (
"To perform the color data augmentation, images must "
"be provided in shape (3, height, width).")
flldta = _np.vstack(
[im.reshape((3, im.shape[1] * im.shape[2])).T
for im in self._X[bname]])
# No need to copy the data another time, since `vstack` already
# copied it.
pca = _PCA(copy=False, whiten=False)
pca.fit(flldta)
self._color_data_augmentation_weights[bname] = _np.sqrt(
pca.explained_variance_.astype('float32'))
self._color_data_augmentation_components[bname] = \
pca.components_.T.astype('float32')
def _initialize_test(self, kwargs):
self._initialize(kwargs)
def _initialize(self, kwargs):
# we make sure, now that the network is available, that
# all names in the provided data dict has a corresponding match
# in the network
if self._initialized:
raise Exception("This DataProvider has already been intialized! "
"Did you maybe try to use it for train and test? "
"This is not possible!")
if 'test' in kwargs['callback_signal']:
net = kwargs['testnet']
else:
net = kwargs['net']
self._len_data = len(list(self._X.values())[0])
for key, value in list(self._X.items()):
if key not in self._input_processing_flags:
self._input_processing_flags[key] = 'n'
assert key in list(net.blobs.keys()), (
'data key has no corresponding network blob {} {}'.format(
key, str(list(net.blobs.keys()))))
assert len(value) == self._len_data, (
'all items need to have the same length {} vs {}'.format(
len(value), self._len_data))
assert isinstance(value, _np.ndarray) or isinstance(value, list), (
'data must be a numpy nd array or list ({})'.format(type(value))
)
self._sample_order = list(range(self._len_data))
if self._shuffle:
_np.random.seed(1)
self._sample_order = _np.random.permutation(self._sample_order)
self._initialized = True
def _pre_fit(self, kwargs):
if 'test' in kwargs['callback_signal']:
net = kwargs['testnet']
else:
net = kwargs['net']
if self._virtual_batch_size is not None:
self._batch_size = self._virtual_batch_size
else:
self._batch_size = net.blobs[list(self._X.keys())[0]].data.shape[0]
assert self._batch_size > 0
def _pre_test(self, kwargs):
self._pre_fit(kwargs)
self._sample_pointer = 0
def _pre_train_batch(self, kwargs):
self._pre_batch(kwargs['net'], kwargs)
def _pre_test_batch(self, kwargs):
self._pre_batch(kwargs['testnet'], kwargs)
def _color_augment(self, bname, sample):
sigma = self._color_data_augmentation_sigmas[bname]
if sigma == 0.:
if isinstance(sample, (int, float)):
return float(sample)
else:
return sample.astype('float32')
else:
comp_weights = _np.random.normal(0., sigma, 3).astype('float32') *\
self._color_data_augmentation_weights[bname]
noise = _np.dot(self._color_data_augmentation_components[bname],
comp_weights.T)
return (sample.astype('float32').transpose((1, 2, 0)) + noise)\
.transpose((2, 0, 1))
def _pre_batch(self, net, kwargs): # pylint: disable=C0111, W0613, R0912
# this will simply cycle through the data.
samples_ids = [self._sample_order[idx % self._len_data]
for idx in
range(self._sample_pointer,
self._sample_pointer + self._batch_size)]
# updating the sample pointer for the next time
old_sample_pointer = self._sample_pointer
self._sample_pointer = (
(self._sample_pointer + len(samples_ids)) % self._len_data)
if self._shuffle and old_sample_pointer > self._sample_pointer:
# Epoch ended. Reshuffle.
self._sample_order = _np.random.permutation(self._sample_order)
if len(self.only_preload) > 0:
sample_dict = dict()
for key in list(self._X.keys()): # pylint: disable=too-many-nested-blocks
if key in self.only_preload:
sample_dict[key] = []
# this will actually fill the data for the network
for sample_idx in range(self._batch_size):
augmented_sample = self._color_augment(
key,
self._X[key][samples_ids[sample_idx]])
if key in self.only_preload:
sample_dict[key].append(augmented_sample)
else:
if (net.blobs[key].data[sample_idx].size == 1 and (
isinstance(self._X[key][samples_ids[sample_idx]],
(int, float)) or
self._X[key][samples_ids[sample_idx]].size == 1) or
self._X[key][samples_ids[sample_idx]].size ==
net.blobs[key].data[sample_idx].size):
if net.blobs[key].data[sample_idx].size == 1:
net.blobs[key].data[sample_idx] =\
augmented_sample
else:
net.blobs[key].data[sample_idx] = (
augmented_sample.reshape(
net.blobs[key].data.shape[1:]))
else:
if self._input_processing_flags[key] == 'n': # pragma: no cover
raise Exception(("Sample size {} does not match " +
"network input size {} and no " +
"preprocessing is allowed!")
.format(
augmented_sample.size,
net.blobs[key].data[sample_idx].size))
elif self._input_processing_flags[key] in ['rn',
'rc',
'rl']:
assert (
augmented_sample.shape[0]
== net.blobs[key].data.shape[1])
if self._input_processing_flags == 'rn':
interp_method = _cv2INTER_NEAREST
elif self._input_processing_flags == 'rc':
interp_method = _cv2INTER_CUBIC
else:
interp_method = _cv2INTER_LINEAR
for channel_idx in range(
net.blobs[key].data.shape[1]):
net.blobs[key].data[sample_idx, channel_idx] =\
_cv2resize(
augmented_sample[channel_idx],
(net.blobs[key].data.shape[3],
net.blobs[key].data.shape[2]),
interpolation=interp_method)
else:
# Padding.
net.blobs[key].data[sample_idx] = _pad(
augmented_sample,
net.blobs[key].data.shape[2:4],
val=self._padvals[key])
if len(self.only_preload) > 0:
kwargs['data_orig'] = sample_dict
class ResizingMonitor(ParallelMonitor, Monitor): # pylint: disable=R0903
r"""
Optionally resizes input data and adjusts the network input shape.
This monitor optionally resizes the input data randomly and adjusts
the network input size accordingly (this works only for batch size 1
and fully convolutional networks).
For this to work, it must be used with the ``CyclingDataMonitor`` with
``only_preload`` set.
:param blobinfos: dict(string, int).
Describes which blobs to apply the resizing operation to, and which
padding value to use for the remaining space.
:param base_scale: float.
If set to a value different than 1., apply the given base scale first
to images. If set to a value different than 1., the parameter
``interp_methods`` must be set.
:param random_change_up_to: float.
If set to a value different than 0., the scale change is altered
randomly with a uniformly drawn value from -``random_change_up_to`` to
``random_change_up_to``, that is being added to the base value.
:param net_input_size_adjustment_multiple_of: int.
If set to a value greater than 0, the blobs shape is adjusted from its
initial value (which is used as minimal one) in multiples of the given
one.
:param interp_methods: dict(string, string).
Dictionary which stores for every blob the interpolation method. The
string must be for each blob in ['n', 'c', 'l'] (nearest neighbour,
cubic, linear).
"""
def __init__(self, # pylint: disable=R0913
blobinfos,
base_scale=1.,
random_change_up_to=0.,
net_input_size_adjustment_multiple_of=0,
interp_methods=None):
"""See class documentation."""
self._blobinfos = blobinfos
self._base_scale = base_scale
self._random_change_up_to = random_change_up_to
if self._base_scale != 1. or self._random_change_up_to != 0.:
assert interp_methods is not None
for key in self._blobinfos.keys():
assert key in interp_methods.keys()
assert interp_methods[key] in ['n', 'c', 'l']
self._interp_methods = interp_methods
self._adjustment_multiple_of = net_input_size_adjustment_multiple_of
self._min_input_size = None
self._batch_size = None
def _initialize_train(self, kwargs):
self._initialize(kwargs)
def _initialize_test(self, kwargs):
self._initialize(kwargs)
def _initialize(self, kwargs):
# we make sure, now that the network is available, that
# all names in the provided data dict have a corresponding match
# in the network
if 'test' in kwargs['callback_signal']:
net = kwargs['testnet']
else:
net = kwargs['net']
for key in list(self._blobinfos.keys()):
assert key in list(net.blobs.keys()), (
'data key has no corresponding network blob {} {}'.format(
key, str(list(net.blobs.keys()))))
assert net.blobs[key].data.ndim == 4
if self._adjustment_multiple_of > 0:
if self._min_input_size is None:
self._min_input_size = net.blobs[key].data.shape[2:4]
else:
assert (net.blobs[key].data.shape[2:4] ==
self._min_input_size), (
'if automatic input size adjustment is '
'activated, all inputs must be of same size '
'(first: {}, {}: {})'.format(
self._min_input_size, key,
net.blobs[key].data.shape[2:4]))
def _pre_fit(self, kwargs):
if 'test' in kwargs['callback_signal']:
net = kwargs['testnet']
else:
net = kwargs['net']
self._batch_size = net.blobs[
list(self._blobinfos.keys())[0]].data.shape[0]
if self._adjustment_multiple_of > 0:
assert self._batch_size == 1, (
"If size adjustment is activated, the batch size must be one!")
def _pre_test(self, kwargs):
self._pre_fit(kwargs)
def _pre_train_batch(self, kwargs):
self._pre_batch(kwargs['net'], kwargs)
def _pre_test_batch(self, kwargs):
self._pre_batch(kwargs['testnet'], kwargs)
# pylint: disable=C0111, W0613, R0912, too-many-locals
def _pre_batch(self, net, kwargs):
scales = None
sizes = None
if not 'data_orig' in kwargs.keys():
raise Exception(
"This data monitor needs a data providing monitor "
"to run in advance (e.g., a CyclingDataMonitor with "
"`only_preload`)!")
for key, value in kwargs['data_orig'].items():
assert len(value) == self._batch_size
if sizes is None:
sizes = []
for img in value:
sizes.append(img.shape[1:3])
else:
for img_idx, img in enumerate(value):
# pylint: disable=unsubscriptable-object
assert img.shape[1:3] == sizes[img_idx]
for key, padval in self._blobinfos.items():
if scales is None:
scales = []
for sample_idx in range(self._batch_size):
if self._random_change_up_to > 0:
scales.append(
self._base_scale +
_np.random.uniform(low=-self._random_change_up_to,
high=self._random_change_up_to))
else:
scales.append(self._base_scale)
for sample_idx in range(self._batch_size):
# Get the scaled data.
scaled_sample = kwargs['data_orig'][key][sample_idx]
if scales[sample_idx] != 1.:
scaled_sample = _np.empty((scaled_sample.shape[0],
int(scaled_sample.shape[1] *
scales[sample_idx]),
int(scaled_sample.shape[2] *
scales[sample_idx])),
dtype='float32')
if self._interp_methods[key] == 'n':
interpolation_method = _cv2INTER_NEAREST
elif self._interp_methods[key] == 'l':
interpolation_method = _cv2INTER_LINEAR
else:
interpolation_method = _cv2INTER_CUBIC
for layer_idx in range(scaled_sample.shape[0]):
scaled_sample[layer_idx] = _cv2resize(
kwargs['data_orig'][key][sample_idx][layer_idx],
(scaled_sample.shape[2],
scaled_sample.shape[1]),
interpolation=interpolation_method)
# If necessary, adjust the network input size.
if self._adjustment_multiple_of > 0:
image_height, image_width = scaled_sample.shape[1:3]
netinput_height = int(max(
self._min_input_size[0] +
_np.ceil(
float(image_height - self._min_input_size[0]) /
self._adjustment_multiple_of) *
self._adjustment_multiple_of,
self._min_input_size[0]))
netinput_width = int(max(
self._min_input_size[1] +
_np.ceil(
float(image_width - self._min_input_size[1]) /
self._adjustment_multiple_of) *
self._adjustment_multiple_of,
self._min_input_size[1]))
net.blobs[key].reshape(1,
scaled_sample.shape[0],
netinput_height,
netinput_width)
# Put the data in place.
net.blobs[key].data[sample_idx] = _pad(
scaled_sample,
net.blobs[key].data.shape[2:4],
val=padval)
def get_parallel_blob_names(self):
"""Get the names of all blobs that must be provided for the dummy."""
return list(self._blobinfos.keys())
# pylint: disable=too-few-public-methods
class RotatingMirroringMonitor(ParallelMonitor, Monitor):
r"""
Rotate and/or horizontally mirror samples within blobs.
For every sample, the rotation and mirroring will be consistent
across the blobs.
:param blobinfos: dict(string, int).
A dictionary containing the blob names and the padding values that
will be applied.
:param max_rotation_degrees: float.
The rotation will be sampled uniformly from the interval
[-rotation_degrees, rotation_degrees[ for each sample.
:param mirror_prob: float.
The probability that horizontal mirroring occurs. Is as well sampled
individually for every sample.
:param mirror_value_swaps: dict(string, dict(int, list(2-tuples))).
Specifies for every blob for every layer whether any values must be
swapped if mirroring is applied. This is important when, e.g.,
mirroring annotation maps with left-right information. Every 2-tuple
contains (original value, new value). The locations of the swaps are
determined before any change is applied, so the order of tuples does not
play a role.
:param mirror_layer_swaps: dict(string, list(2-tuples)).
Specifies for every blob whether any layers must be swapped if
mirroring is applied. Can be used together with mirror_value_swaps: in
this case, the `mirror_value_swaps` are applied first, then the layers
are swapped.
"""
# pylint: disable=too-many-arguments
def __init__(self,
blobinfos,
max_rotation_degrees,
mirror_prob=0.,
mirror_value_swaps=None,
mirror_layer_swaps=None):
"""See class documentation."""
self._blobinfos = blobinfos
self._rotation_degrees = max_rotation_degrees
self._mirror_prob = mirror_prob
self._batch_size = None
if mirror_value_swaps is None:
mirror_value_swaps = dict()
for key in list(mirror_value_swaps.keys()):
assert key in self._blobinfos, ("Blob not in handled: {}!"\
.format(key))
for layer_idx in list(mirror_value_swaps[key].keys()):
m_tochange = []
for swappair in mirror_value_swaps[key][layer_idx]:
assert len(swappair) == 2, (
"Swaps must be specified as (from_value, to_value): {}"\
.format(mirror_value_swaps[key][layer_idx]))
assert swappair[0] not in m_tochange, (
"Every value may change only to one new: {}."\
.format(mirror_value_swaps[key][layer_idx]))
m_tochange.append(swappair[0])
assert blobinfos[key] not in swappair, (
"A specified swap value is the fill value for this "
"blob: {}, {}, {}.".format(key,
blobinfos[key][layer_idx],
swappair))
if mirror_layer_swaps is None:
mirror_layer_swaps = dict()
for key in list(mirror_layer_swaps.keys()):
assert key in self._blobinfos, ("Blob not handled: {}!"\
.format(key))
idx_tochange = []
for swappair in mirror_layer_swaps[key]:
assert len(swappair) == 2, (
"Swaps must be specified as (from_value, to_value): {}"\
.format(swappair))
assert (swappair[0] not in idx_tochange and
swappair[1] not in idx_tochange), (
"Every value may only be swapped to or from one "
"position!")
idx_tochange.extend(swappair)
for key in list(self._blobinfos):
if key not in list(mirror_value_swaps.keys()):
mirror_value_swaps[key] = dict()
if key not in list(mirror_layer_swaps.keys()):
mirror_layer_swaps[key] = []
self._mirror_value_swaps = mirror_value_swaps
self._mirror_layer_swaps = mirror_layer_swaps
def get_parallel_blob_names(self):
"""Get the names of all blobs that must be provided for the dummy."""
return list(self._blobinfos.keys())
def _initialize_train(self, kwargs):
self._initialize(kwargs)
def _initialize_test(self, kwargs):
self._initialize(kwargs)
def _initialize(self, kwargs):
# we make sure, now that the network is available, that
# all names in the provided data dict have a corresponding match
# in the network
if 'test' in kwargs['callback_signal']:
net = kwargs['testnet']
else:
net = kwargs['net']
for key in list(self._blobinfos.keys()):
assert key in list(net.blobs.keys()), (
'data key has no corresponding network blob {} {}'.format(
key, str(list(net.blobs.keys()))))
assert net.blobs[key].data.ndim == 4
for layer_idx in self._mirror_value_swaps[key].keys():
assert layer_idx < net.blobs[key].data.shape[1], ((
"The data for blob {} has not enough layers for swapping "
"{}!").format(key, layer_idx))
for swappair in self._mirror_layer_swaps[key]:
assert (swappair[0] < net.blobs[key].data.shape[1] and
swappair[1] < net.blobs[key].data.shape[1]), (
"Not enough layers in blob {} to swap {}!".format(
key, swappair))
def _pre_fit(self, kwargs):
if 'test' in kwargs['callback_signal']:
net = kwargs['testnet']
else:
net = kwargs['net']
self._batch_size = net.blobs[
list(self._blobinfos.keys())[0]].data.shape[0]
def _pre_test(self, kwargs):
self._pre_fit(kwargs)
def _pre_train_batch(self, kwargs):
self._pre_batch(kwargs['net'], kwargs)
def _pre_test_batch(self, kwargs):
self._pre_batch(kwargs['testnet'], kwargs)
# pylint: disable=C0111, W0613, R0912, too-many-locals
def _pre_batch(self, net, kwargs):
rotations = None
mirrorings = None
spline_interpolation_order = 0
prefilter = False
for key, padval in self._blobinfos.items():
if rotations is None:
rotations = []
if self._rotation_degrees > 0.:
rotations = _np.random.uniform(low=-self._rotation_degrees,
high=self._rotation_degrees,
size=self._batch_size)
else:
rotations = [0.] * self._batch_size
if mirrorings is None:
mirrorings = []
if self._mirror_prob > 0.:
mirrorings = _bernoulli.rvs(self._mirror_prob,
size=self._batch_size)
else:
mirrorings = [0] * self._batch_size
for sample_idx in range(self._batch_size):
if rotations[sample_idx] != 0.:
net.blobs[key].data[sample_idx] = _rotate(
net.blobs[key].data[sample_idx],
rotations[sample_idx],
(1, 2),
reshape=False,
order=spline_interpolation_order,
mode='constant',
cval=padval,
prefilter=prefilter)
if mirrorings[sample_idx] == 1.:
net.blobs[key].data[sample_idx] = \
net.blobs[key].data[sample_idx, :, :, ::-1]
for layer_idx in range(net.blobs[key].data.shape[1]):
if (layer_idx not in
self._mirror_value_swaps[key].keys()):
continue
swap_indices = dict()
swap_tuples = self._mirror_value_swaps[key][layer_idx]
# Swaps.
for swappair in swap_tuples:
swap_indices[swappair[0]] = (
net.blobs[key].data[sample_idx, layer_idx] ==\
swappair[0])
for swappair in swap_tuples:
net.blobs[key].data[sample_idx, layer_idx][
swap_indices[swappair[0]]] = swappair[1]
if len(self._mirror_layer_swaps[key]) > 0:
new_layer_order = list(
range(net.blobs[key].data.shape[1]))
for swappair in self._mirror_layer_swaps[key]:
new_layer_order[swappair[0]],\
new_layer_order[swappair[1]] = \
new_layer_order[swappair[1]],\
new_layer_order[swappair[0]]
net.blobs[key].data[...] = net.blobs[key].data[
:, tuple(new_layer_order)]
class ResultExtractor(Monitor): # pylint: disable=R0903
r"""
This monitor is designed for monitoring scalar layer results.
The main use case are salar outputs such as loss and accuracy.
IMPORTANT: this monitor will change cbparams and add new values to it,
most likely other monitors will depend on this, thus, ResultExtractors
should be among the first monitors in the callback list, e.g. by
insert them always in the beginning.
It will extract the value of a layer and add the value to the cbparam.
:param cbparam_key: string.
The key we will overwrite/set in the cbparams dict.
:param layer_name: string.
The layer to extract the value from.
"""
def __init__(self, cbparam_key, layer_name):
"""See class documentation."""
self._layer_name = layer_name
self._cbparam_key = cbparam_key
self._init = False
self._not_layer_available = True
self._test_data = None
def __call__(self, kwargs):
"""Callback implementation."""
if self._not_layer_available and self._init:
return
Monitor.__call__(self, kwargs)
def _initialize_train(self, kwargs):
self._initialize(kwargs)
def _initialize_test(self, kwargs):
self._initialize(kwargs)
def _initialize(self, kwargs):
if self._init:
raise Exception("This ResultExtractor is already initialized! "
"Did you try to use it for train and test?")
if 'test' in kwargs['callback_signal']:
tmp_net = kwargs['testnet']
else:
tmp_net = kwargs['net']
if self._layer_name in list(tmp_net.blobs.keys()):
self._not_layer_available = False
self._init = True
assert self._cbparam_key not in kwargs, (
'it is only allowed to add keys to the cbparam,',
'not overwrite them {} {}'.format(self._cbparam_key,
list(kwargs.keys())))
def _pre_train_batch(self, kwargs):
kwargs[self._cbparam_key] = 0.0
def _post_train_batch(self, kwargs):
kwargs[self._cbparam_key] = float(
kwargs['net'].blobs[self._layer_name].data[...].ravel()[0])
def _pre_test(self, kwargs):
self._test_data = []
def _post_test(self, kwargs):
kwargs[self._cbparam_key] = _np.mean(self._test_data)
def _post_test_batch(self, kwargs):
# need to multiply by batch_size since it is normalized
# internally
self._test_data.append(float(
kwargs['testnet'].blobs[self._layer_name].data[...].ravel()[0]))
kwargs[self._cbparam_key] = self._test_data[-1]
# Again, tested in a subprocess and not discovered.
# pylint: disable=R0903
class ProgressIndicator(Monitor): # pragma: no cover
r"""
Generates a progress bar with current information about the process.
The progress bar always displays completion percentage and ETA. If
available, it also displays loss, accuracy, test loss and test accuracy.
It makes use of the following keyword arguments (\* indicates required):
* ``iter``\*,
* ``max_iter``\*,
* ``train_loss``,
* ``test_loss``,
* ``train_accuracy``,
* ``test_accuracy``.
"""
def __init__(self):
"""See class documentation."""
self.loss = None
self.test_loss = None
self.accuracy = None
self.test_accuracy = None
import tqdm
self.pbarclass = tqdm.tqdm
self.pbar = None
self.last_iter = 0
def _perf_string(self):
pstr = ''
if self.loss is not None:
pstr += 'ls: {0:.4f}|'.format(self.loss)
if self.accuracy is not None:
pstr += 'ac: {0:.4f}|'.format(self.accuracy)
if self.test_loss is not None:
pstr += 'tls: {0:.4f}|'.format(self.test_loss)
if self.test_accuracy is not None:
pstr += 'tac: {0:.4f}|'.format(self.test_accuracy)
return pstr
def _post_train_batch(self, kwargs):
if self.pbar is None:
self.pbar = self.pbarclass(total=kwargs['max_iter'])
if 'train_loss' in list(kwargs.keys()):
self.loss = kwargs['train_loss']
if 'train_accuracy' in list(kwargs.keys()):
self.accuracy = kwargs['train_accuracy']
self.pbar.set_description(self._perf_string())
self.pbar.update(kwargs['iter'] + kwargs['batch_size'] - self.last_iter)
self.last_iter = kwargs['iter'] + kwargs['batch_size']
def _post_test_batch(self, kwargs):
if self.pbar is None:
self.pbar = self.pbarclass(total=kwargs['max_iter'])
if 'test_loss' in list(kwargs.keys()):
self.test_loss = kwargs['test_loss']
if 'test_accuracy' in list(kwargs.keys()):
self.test_accuracy = kwargs['test_accuracy']
self.pbar.set_description(self._perf_string())
self.pbar.update(kwargs['iter'] - self.last_iter)
self.last_iter = kwargs['iter']
def _post_test(self, kwargs):
# Write the mean if possible.
if self.pbar is not None:
if 'test_loss' in list(kwargs.keys()):
self.test_loss = kwargs['test_loss']
if 'test_accuracy' in list(kwargs.keys()):
self.test_accuracy = kwargs['test_accuracy']
self.pbar.set_description(self._perf_string())
self.pbar.update(kwargs['iter'] - self.last_iter)
self.last_iter = kwargs['iter']
def finalize(self, kwargs): # pylint: disable=W0613
"""Call ``progressbar.finish()``."""
if self.pbar is not None:
self.pbar.close()
def _sorted_ar_from_dict(inf, key): # pragma: no cover
iters = []
vals = []
for values in inf:
if values.has_key(key):
iters.append(int(values['NumIters']))
vals.append(float(values[key]))
sortperm = _np.argsort(iters)
arr = _np.array([iters, vals]).T
return arr[sortperm, :]
def _draw_perfplot(phases, categories, ars, outfile): # pragma: no cover
"""Draw the performance plots."""
fig, axes = _plt.subplots(nrows=len(categories), sharex=True)
for category_idx, category in enumerate(categories):
ax = axes[category_idx] # pylint: disable=invalid-name
ax.set_title(category.title())
for phase in phases:
if phase + '_' + category not in ars.keys():
continue
ar = ars[phase + '_' + category] # pylint: disable=invalid-name
alpha = 0.7
color = 'b'
if phase == 'test':
alpha = 1.0
color = 'g'
ax.plot(ar[:, 0], ar[:, 1],
label=phase.title(), c=color, alpha=alpha)
if phase == 'test':
ax.scatter(ar[:, 0], ar[:, 1],
c=color, s=50)
ax.set_ylabel(category.title())
ax.grid()
ax.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
_plt.savefig(outfile, bbox_inches='tight')
_plt.close(fig)
class JSONLogger(Monitor): # pylint: disable=R0903
r"""
Logs available information to a JSON file.
The information is stored in a dictionary of lists. The lists contain
score information and the iteration at which it was obtained. The
currently logged scores are loss, accuracy, test loss and test accuracy.
The logger makes use of the following keyword arguments
(\* indicates required):
* ``iter``\*,
:param path: string.
The path to store the file in.
:param name: string.
The filename. Will be prefixed with 'barrista_' and '.json' will be
appended.
:param logging: dict of lists.
The two keys in the dict which are used are test, train.
For each of those a list of keys can be provided, those keys
have to be available in the kwargs/cbparams structure.
Usually the required data is provided by the ResultExtractor.
:param base_iter: int or None.
If provided, add this value to the number of iterations. This overrides
the number of iterations retrieved from a loaded JSON log to append to.
:param write_every: int or None.
Write the JSON log every `write_every` iterations. The log is always
written upon completion of the training. If it is None, the log is only
written on completion.
:param create_plot: bool.
If set to True, create a plot at `path` when the JSON log is written with
the name of the JSON file + `_plot.png`. Default: False.
"""
# pylint: disable=too-many-arguments
def __init__(self,
path,
name,
logging,
base_iter=None,
write_every=None,
create_plot=False):
"""See class documentation."""
import json
self.json_package = json
self.json_filename = str(_os.path.join(
path,
'barrista_' + name + '.json'))
if base_iter is None:
self.base_iter = 0
else:
self.base_iter = base_iter
if _os.path.exists(self.json_filename):
with open(self.json_filename, 'r') as infile:
self.dict = self.json_package.load(infile)
if base_iter is None:
for key in ['train', 'test']:
for infdict in self.dict[key]:
if infdict.has_key('NumIters'):
self.base_iter = max(self.base_iter,
infdict['NumIters'])
_LOGGER.info("Appending to JSON log at %s from iteration %d.",
self.json_filename,
self.base_iter)
else:
self.dict = {'train': [], 'test': [], 'barrista_produced': True}
assert write_every is None or write_every > 0
self._write_every = write_every
self._logging = logging
self._create_plot = create_plot
if self._create_plot:
assert _PLT_AVAILABLE, (
"Matplotlib must be available to use plotting!")
def _initialize_train(self, kwargs):
self._initialize(kwargs)
def _initialize_test(self, kwargs):
self._initialize(kwargs)
def _initialize(self, kwargs): # pylint: disable=unused-argument
for key in list(self._logging.keys()):
assert key in ['train', 'test'], (
'only train and test is supported by this logger')
def _post_test(self, kwargs):
self._post('test', kwargs)
def _post_train_batch(self, kwargs):
self._post('train', kwargs)
def _post(self, phase_name, kwargs): # pylint: disable=C0111
if phase_name not in self._logging: # pragma: no cover
return
if phase_name == 'train':
kwargs['iter'] += kwargs['batch_size']
if (self._write_every is not None and
kwargs['iter'] % self._write_every == 0):
with open(self.json_filename, 'w') as outf:
self.json_package.dump(self.dict, outf)
if self._create_plot: # pragma: no cover
categories = set()
arrs = dict()
for plot_phase_name in ['train', 'test']:
for key in self._logging[plot_phase_name]:
categories.add(key[len(plot_phase_name) + 1:])
arrs[key] = _sorted_ar_from_dict(self.dict[plot_phase_name],
key)
_draw_perfplot(['train', 'test'],
categories,
arrs,
self.json_filename + '_plot.png')
for key in self._logging[phase_name]:
if key in kwargs:
self.dict[phase_name].append({'NumIters':
kwargs['iter'] + self.base_iter,
key: kwargs[key]})
if phase_name == 'train':
kwargs['iter'] -= kwargs['batch_size']
def finalize(self, kwargs): # pylint: disable=W0613
"""Write the json file."""
with open(self.json_filename, 'w') as outf:
self.json_package.dump(self.dict, outf)
if self._create_plot: # pragma: no cover
categories = set()
arrs = dict()
for phase_name in ['train', 'test']:
for key in self._logging[phase_name]:
categories.add(key[len(phase_name) + 1:])
arrs[key] = _sorted_ar_from_dict(self.dict[phase_name], key)
_draw_perfplot(['train', 'test'],
categories,
arrs,
self.json_filename + '_plot.png')
class Checkpointer(Monitor): # pylint: disable=R0903
r"""
Writes the network blobs to disk at certain iteration intervals.
The logger makes use of the following keyword arguments
(\* indicates required):
* ``iter``\*,
* ``net``\*,
* ``batch_size``\*.
:param name_prefix: string or None.
The first part of the output filenames to generate. The prefix '_iter_,
the current iteration, as well as '.caffemodel' is added.
If you are using a caffe version from later than Dec. 2015, caffe's
internal snapshot method is exposed to Python and also snapshots the
solver. If it's available, then this method will be used. However,
in that case, it's not possible to influence the storage location
from Python. Please use the solver parameter ``snapshot_prefix``
when constructing the solver instead (this parameter may be None
and is unused then).
:param iterations: int > 0.
Always if the current number of iterations is divisible by iterations,
the network blobs are written to disk. Hence, this value must be a
multiple of the batch size!
"""
def __init__(self,
name_prefix,
iterations,
base_iterations=0):
"""See class documentation."""
assert iterations > 0
_LOGGER.info('Setting up checkpointing with name prefix %s every ' +
'%d iterations.', name_prefix, iterations)
self.name_prefix = name_prefix
self.iterations = iterations
self.created_checkpoints = []
self._base_iterations = base_iterations
# pylint: disable=arguments-differ
def _post_train_batch(self, kwargs, finalize=False):
assert self.iterations % kwargs['batch_size'] == 0, (
'iterations not multiple of batch_size, {} vs {}'.format(
self.iterations, kwargs['batch_size']))
# Prevent double-saving.
if kwargs['iter'] in self.created_checkpoints:
return
if ((kwargs['iter'] + self._base_iterations +
kwargs['batch_size']) % self.iterations == 0 or
finalize):
self.created_checkpoints.append(kwargs['iter'])
# pylint: disable=protected-access
if not hasattr(kwargs['solver']._solver, 'snapshot'): # pragma: no cover
checkpoint_filename = (
self.name_prefix + '_iter_' +
str(int((kwargs['iter'] + self._base_iterations) /
kwargs['batch_size']) + 1) +
'.caffemodel')
_LOGGER.debug("Writing checkpoint to file '%s'.",
checkpoint_filename)
kwargs['net'].save(checkpoint_filename)
else:
# pylint: disable=protected-access
kwargs['solver']._solver.snapshot()
caffe_checkpoint_filename = (self.name_prefix +
'_iter_' +
str((kwargs['iter'] + self._base_iterations) /
kwargs['batch_size'] + 1) +
'.caffemodel')
caffe_sstate_filename = (self.name_prefix +
'_iter_' +
str((kwargs['iter'] + self._base_iterations) /
kwargs['batch_size'] + 1) +
'.solverstate')
_LOGGER.debug('Writing checkpoint to file "[solverprefix]%s" ' +
'and "[solverprefix]%s".',
caffe_checkpoint_filename,
caffe_sstate_filename)
assert _os.path.exists(caffe_checkpoint_filename), (
"An error occured checkpointing to {}. File not found. "
"Make sure the `base_iterations` and the `name_prefix` "
"are correct.").format(caffe_checkpoint_filename)
assert _os.path.exists(caffe_sstate_filename), (
"An error occured checkpointing to {}. File not found. "
"Make sure the `base_iterations` and the `name_prefix` "
"are correct.").format(caffe_sstate_filename)
def finalize(self, kwargs):
"""Write a final checkpoint."""
# Account for the counting on iteration increase for the last batch.
kwargs['iter'] -= kwargs['batch_size']
self._post_train_batch(kwargs, finalize=True)
kwargs['iter'] += kwargs['batch_size']
class GradientMonitor(Monitor):
"""
Tools to keep an eye on the gradient.
Create plots of the gradient. Creates histograms of the gradient for all
``selected_parameters`` and creates an overview plot with the maximum
absolute gradient per layer. If ``create_videos`` is set and ffmpeg is
available, automatically creates videos.
:param write_every: int.
Write every x iterations. Since matplotlib takes some time to run, choose
with care.
:param output_folder: string.
Where to store the outputs.
:param selected_parameters: dict(string, list(int)) or None.
Which parameters to include in the plots. The string is the name of the
layer, the list of integers contains the parts to include, e.g., for a
convolution layer, specify the name of the layer as key and 0 for
the parameters of the convolution weights, 1 for the biases per channel.
The order and meaning of parameter blobs is determined by caffe. If
None, then all parameters are plotted. Default: None.
:param relative: Bool.
If set to True, will give the weights relative to the max absolute weight
in the target parameter blob. Default: False.
:param iteroffset: int.
An iteration offset if training is resumed to not overwrite existing
output. Default: 0.
:param create_videos: Bool.
If set to True, try to create a video using ffmpeg. Default: True.
:param video_frame_rate: int.
The video frame rate.
"""
def __init__(self, # pylint: disable=too-many-arguments
write_every,
output_folder,
selected_parameters=None,
relative=False,
iteroffset=0,
create_videos=True,
video_frame_rate=1):
assert write_every > 0
self._write_every = write_every
self._output_folder = output_folder
self._selected_parameters = selected_parameters
self._relative = relative
self._n_parameters = None
self._iteroffset = iteroffset
self._create_videos = create_videos
self._video_frame_rate = video_frame_rate
def _initialize_train(self, kwargs): # pragma: no cover
assert _PLT_AVAILABLE, (
"Matplotlib must be available to use the GradientMonitor!")
assert self._write_every % kwargs['batch_size'] == 0, (
"`write_every` must be a multiple of the batch size!")
self._n_parameters = 0
if self._selected_parameters is not None:
for name in self._selected_parameters.keys():
assert name in kwargs['net'].params.keys()
for p_idx in self._selected_parameters[name]:
assert p_idx >= 0
assert len(kwargs['net'].params[name]) > p_idx
self._n_parameters += 1
else:
self._selected_parameters = _collections.OrderedDict()
for name in kwargs['net'].params.keys():
self._selected_parameters[name] = range(len(
kwargs['net'].params[name]))
self._n_parameters += len(kwargs['net'].params[name])
# pylint: disable=too-many-locals
def _post_train_batch(self, kwargs): # pragma: no cover
if kwargs['iter'] % self._write_every == 0:
net = kwargs['net']
maxabsupdates = {}
maxabsupdates_flat = []
# Create histograms.
fig, axes = _plt.subplots(nrows=1,
ncols=self._n_parameters,
figsize=(self._n_parameters * 3, 3))
ax_idx = 0
xfmt = _tkr.FormatStrFormatter('%.1e')
for lname in self._selected_parameters.keys():
maxabsupdates[lname] = []
for p_idx in self._selected_parameters[lname]:
if self._relative:
lgradient = (net.params[lname][p_idx].diff /
net.params[lname][p_idx].data.max())
else:
lgradient = net.params[lname][p_idx].diff
maxabsupdates[lname].append(_np.max(_np.abs(lgradient)))
maxabsupdates_flat.append(_np.max(_np.abs(lgradient)))
axes[ax_idx].set_title(lname + ', p%d' % (p_idx))
axes[ax_idx].hist(list(lgradient.flat),
25,
normed=1,
alpha=0.5)
axes[ax_idx].set_xticks(_np.linspace(-maxabsupdates_flat[-1],
maxabsupdates_flat[-1],
num=3))
axes[ax_idx].yaxis.set_visible(False)
axes[ax_idx].xaxis.set_major_formatter(xfmt)
ax_idx += 1
_plt.tight_layout(rect=[0, 0.03, 1, 0.95])
_plt.suptitle("Gradient histograms for iteration %d" % (
kwargs['iter'] + self._iteroffset))
if self._relative:
ghname = self._output_folder + 'gradient_hists_rel_%d.png' % (
(self._iteroffset + kwargs['iter']) /
self._write_every)
else:
ghname = self._output_folder + 'gradient_hists_%d.png' % (
(self._iteroffset + kwargs['iter']) /
self._write_every)
_plt.savefig(ghname)
_plt.close(fig)
# Create the magnitude overview plot.
fig = _plt.figure(figsize=(self._n_parameters * 1, 1.5))
_plt.title("Maximum absolute gradient per layer (iteration %d)" % (
kwargs['iter'] + self._iteroffset))
ax = _plt.gca() # pylint: disable=invalid-name
# pylint: disable=invalid-name
im = ax.imshow(_np.atleast_2d(_np.array(maxabsupdates_flat)),
interpolation='none')
ax.yaxis.set_visible(False)
divider = _make_axes_locatable(ax)
cax = divider.append_axes("right", size="10%", pad=0.05)
_plt.colorbar(im, cax=cax, ticks=_np.linspace(_np.min(maxabsupdates_flat),
_np.max(maxabsupdates_flat),
5))
_plt.tight_layout(rect=[0, 0.03, 1, 0.95])
if self._relative:
gmname = self._output_folder + 'gradient_magnitude_rel_%d.png' % (
(self._iteroffset + kwargs['iter']) /
self._write_every)
else:
gmname = self._output_folder + 'gradient_magnitude_%d.png' % (
(self._iteroffset + kwargs['iter']) /
self._write_every)
_plt.savefig(gmname)
_plt.close(fig)
def finalize(self, kwargs): # pragma: no cover
if self._create_videos:
_LOGGER.debug("Creating gradient videos...")
try:
if not _os.path.exists(_os.path.join(self._output_folder,
'videos')):
_os.mkdir(_os.path.join(self._output_folder, 'videos'))
if self._relative:
rel_add = '_rel'
else:
rel_add = ''
with open(_os.devnull, 'w') as quiet:
_subprocess.check_call([
'ffmpeg',
'-y',
'-start_number', str(0),
'-r', str(self._video_frame_rate),
'-i', _os.path.join(self._output_folder,
'gradient_hists' + rel_add + '_%d.png'),
_os.path.join(self._output_folder,
'videos',
'gradient_hists' + rel_add + '.mp4')
], stdout=quiet, stderr=quiet)
_subprocess.check_call([
'ffmpeg',
'-y',
'-start_number', str(0),
'-r', str(self._video_frame_rate),
'-i', _os.path.join(self._output_folder,
'gradient_magnitude' + rel_add + '_%d.png'),
_os.path.join(self._output_folder,
'videos',
'gradient_magnitude' + rel_add + '.mp4')
], stdout=quiet, stderr=quiet)
except Exception as ex: # pylint: disable=broad-except
_LOGGER.error(
"Could not create videos! Error: %s. Is " +
"ffmpeg available on the command line?",
str(ex))
_LOGGER.debug("Done.")
class ActivationMonitor(Monitor):
"""
Tools to keep an eye on the net activations.
Create plots of the net activations. If ``create_videos`` is set and
ffmpeg is available, automatically creates videos.
:param write_every: int.
Write every x iterations. Since matplotlib takes some time to run, choose
with care.
:param output_folder: string.
Where to store the outputs.
:param selected_blobs: list(string) or None.
Which blobs to include in the plots. If
None, then all parameters are plotted. Default: None.
:param iteroffset: int.
An iteration offset if training is resumed to not overwrite existing
output. Default: 0.
:param sample: dict(string, NDarray(3D)).
A sample to use that will be forward propagated to obtain the activations.
Must contain one for every input layer of the network. Each sample is not
preprocessed and must fit the input. If None, use the existing values
from the blobs.
:param create_videos: Bool.
If set to True, try to create a video using ffmpeg. Default: True.
:param video_frame_rate: int.
The video frame rate.
"""
# pylint: disable=too-many-arguments
def __init__(self, # pragma: no cover
write_every,
output_folder,
selected_blobs=None,
iteroffset=0,
sample=None,
create_videos=True,
video_frame_rate=1):
assert write_every > 0
self._write_every = write_every
self._output_folder = output_folder
self._selected_blobs = selected_blobs
self._n_parameters = None
self._iteroffset = iteroffset
self._create_videos = create_videos
self._video_frame_rate = video_frame_rate
self._sample = sample
def _initialize_train(self, kwargs): # pragma: no cover
assert _PLT_AVAILABLE, (
"Matplotlib must be available to use the ActivationMonitor!")
assert self._write_every % kwargs['batch_size'] == 0, (
"`write_every` must be a multiple of the batch size!")
self._n_parameters = 0
if self._selected_blobs is not None:
for name in self._selected_blobs:
assert name in kwargs['net'].blobs.keys(), (
"The activation monitor should monitor {}, which is not "
"part of the net!").format(name)
self._n_parameters += 1
else:
self._selected_blobs = []
for name in kwargs['net'].blobs.keys():
bshape = kwargs['net'].blobs[name].data.shape
if len(bshape) == 4:
self._selected_blobs.append(name)
self._n_parameters += 1
if self._sample is not None:
for inp_name in self._sample.keys():
assert (kwargs['net'].blobs[inp_name].data.shape[1:] ==
self._sample[inp_name].shape), (
"All provided inputs as `sample` must have the shape "
"of an input blob, starting from its sample "
"dimension. Does not match for %s: %s vs. %s." % (
inp_name,
str(kwargs['net'].blobs[inp_name].data.shape[1:]),
str(self._sample[inp_name].shape)))
# pylint: disable=too-many-locals
def _post_train_batch(self, kwargs): # pragma: no cover
if kwargs['iter'] % self._write_every == 0:
net = kwargs['net']
if self._sample is not None:
for bname in self._sample.keys():
net.blobs[bname].data[-1, ...] = self._sample[bname]
net.forward()
for bname in self._selected_blobs:
blob = net.blobs[bname].data
nchannels = blob.shape[1]
gridlen = int(_np.ceil(_np.sqrt(nchannels)))
fig, axes = _plt.subplots(nrows=gridlen,
ncols=gridlen,
squeeze=False)
bmin = blob[-1].min()
bmax = blob[-1].max()
for c_idx in range(nchannels):
ax = axes.flat[c_idx] # pylint: disable=invalid-name
im = ax.imshow(blob[-1, c_idx], # pylint: disable=invalid-name
vmin=bmin,
vmax=bmax,
cmap='Greys_r',
interpolation='none')
ax.set_title('C%d' % (c_idx))
ax.yaxis.set_visible(False)
ax.xaxis.set_visible(False)
# pylint: disable=undefined-loop-variable
for blank_idx in range(c_idx + 1, gridlen * gridlen):
ax = axes.flat[blank_idx] # pylint: disable=invalid-name
ax.axis('off')
_plt.tight_layout(rect=[0, 0.03, 1, 0.95])
_plt.suptitle("Activations in blob %s (iteration %d)" % (
bname, self._iteroffset + kwargs['iter']))
cbax, cbkw = _colorbar.make_axes([ax for ax in axes.flat])
fig.colorbar(im, cax=cbax, **cbkw)
_plt.savefig(self._output_folder +
'activations_%s_%d.png' % (
bname,
(self._iteroffset + kwargs['iter']) /
self._write_every))
_plt.close(fig)
def finalize(self, kwargs): # pragma: no cover
if self._create_videos:
_LOGGER.debug("Creating activation videos...")
try:
if not _os.path.exists(_os.path.join(self._output_folder,
'videos')):
_os.mkdir(_os.path.join(self._output_folder, 'videos'))
for bname in self._selected_blobs:
with open(_os.devnull, 'w') as quiet:
_subprocess.check_call([
'ffmpeg',
'-y',
'-start_number', str(0),
'-r', str(self._video_frame_rate),
'-i', _os.path.join(self._output_folder,
'activations_' + bname + '_%d.png'),
_os.path.join(self._output_folder,
'videos',
'activations_' + bname + '.mp4')
], stdout=quiet, stderr=quiet)
except Exception as ex: # pylint: disable=broad-except
_LOGGER.error(
"Could not create videos! Error: %s. Is " +
"ffmpeg available on the command line?",
str(ex))
_LOGGER.debug("Done.")
class FilterMonitor(Monitor):
"""
Tools to keep an eye on the filters.
Create plots of the network filters. Creates filter plots for all
``selected_parameters``. If ``create_videos`` is set and ffmpeg is
available, automatically creates videos.
:param write_every: int.
Write every x iterations. Since matplotlib takes some time to run, choose
with care.
:param output_folder: string.
Where to store the outputs.
:param selected_parameters: dict(string, list(int)) or None.
Which parameters to include in the plots. The string is the name of the
layer, the list of integers contains the parts to include, e.g., for a
convolution layer, specify the name of the layer as key and 0 for
the parameters of the convolution weights, 1 for the biases per channel.
The order and meaning of parameter blobs is determined by caffe. If
None, then all parameters are plotted. **Only 4D blobs can be plotted!**
Default: None.
:param iteroffset: int.
An iteration offset if training is resumed to not overwrite existing
output. Default: 0.
:param create_videos: Bool.
If set to True, try to create a video using ffmpeg. Default: True.
:param video_frame_rate: int.
The video frame rate.
"""
# pylint: disable=too-many-arguments
def __init__(self, # pragma: no cover
write_every,
output_folder,
selected_parameters=None,
iteroffset=0,
create_videos=True,
video_frame_rate=1):
assert write_every > 0
self._write_every = write_every
self._output_folder = output_folder
self._selected_parameters = selected_parameters
self._n_parameters = None
self._iteroffset = iteroffset
self._create_videos = create_videos
self._video_frame_rate = video_frame_rate
def _initialize_train(self, kwargs): # pragma: no cover
assert _PLT_AVAILABLE, (
"Matplotlib must be available to use the FilterMonitor!")
assert self._write_every % kwargs['batch_size'] == 0, (
"`write_every` must be a multiple of the batch size!")
self._n_parameters = 0
if self._selected_parameters is not None:
for name in self._selected_parameters.keys():
assert name in kwargs['net'].params.keys()
for p_idx in self._selected_parameters[name]:
assert p_idx >= 0
assert len(kwargs['net'].params[name][p_idx].data.shape) == 4
self._n_parameters += 1
else:
self._selected_parameters = _collections.OrderedDict()
for name in kwargs['net'].params.keys():
self._selected_parameters[name] = []
for pindex in range(len(kwargs['net'].params[name])):
if len(kwargs['net'].params[name][pindex].data.shape) == 4:
self._selected_parameters[name].append(pindex)
self._n_parameters += 1
def _post_train_batch(self, kwargs): # pragma: no cover
if kwargs['iter'] % self._write_every == 0:
net = kwargs['net']
for pname in self._selected_parameters.keys():
for pindex in self._selected_parameters[pname]:
fig = _plt.figure()
param = net.params[pname][pindex].data
border = 2
collected_weights = _np.zeros((param.shape[0] *
(param.shape[2] + border) +
border,
param.shape[1] *
(param.shape[3] + border) +
border), dtype='float32')
pmin = param.min()
pmax = param.max()
# Build up the plot manually because matplotlib is too slow.
for filter_idx in range(param.shape[0]):
for layer_idx in range(param.shape[1]):
collected_weights[border + filter_idx * (param.shape[2] + border):
border + filter_idx * (param.shape[2] + border) +
param.shape[2],
border + layer_idx * (param.shape[3] + border):
border + layer_idx * (param.shape[3] + border) +
param.shape[3]] = (
(param[filter_idx, layer_idx] - pmin)
/ (pmax - pmin))
_plt.imshow(collected_weights,
cmap='Greys_r',
interpolation='none')
ax = _plt.gca() # pylint: disable=invalid-name
ax.yaxis.set_visible(False)
ax.xaxis.set_visible(False)
ax.set_title((
"Values of layer %s, param %d\n" +
"(iteration %d, min %.1e, max %.1e)") % (
pname, pindex, self._iteroffset + kwargs['iter'], pmin, pmax))
_plt.savefig(self._output_folder +
'parameters_%s_%d_%d.png' % (
pname,
pindex,
(self._iteroffset + kwargs['iter']) /
self._write_every))
_plt.close(fig)
def finalize(self, kwargs): # pragma: no cover
if self._create_videos:
_LOGGER.debug("Creating filter videos...")
try:
if not _os.path.exists(_os.path.join(self._output_folder,
'videos')):
_os.mkdir(_os.path.join(self._output_folder, 'videos'))
for pname in self._selected_parameters.keys():
for pindex in self._selected_parameters[pname]:
with open(_os.devnull, 'w') as quiet:
_subprocess.check_call([
'ffmpeg',
'-y',
'-start_number', str(0),
'-r', str(self._video_frame_rate),
'-i', _os.path.join(self._output_folder,
'parameters_' +
pname + '_' +
str(pindex) + '_' +
'%d.png'),
_os.path.join(self._output_folder,
'videos',
'parameters_' +
pname + '_' +
str(pindex) + '.mp4')
], stdout=quiet, stderr=quiet)
except Exception as ex: # pylint: disable=broad-except
_LOGGER.error(
"Could not create videos! Error: %s. Is " +
"ffmpeg available on the command line?",
str(ex))
_LOGGER.debug("Done.")
|
the-stack_0_12755 | from nets.segnet import convnet_segnet
from PIL import Image
import numpy as np
import random
import copy
import os
class_colors = [[0,0,0],[0,255,0]]
NCLASSES = 2
HEIGHT = 416
WIDTH = 416
model = convnet_segnet(n_classes=NCLASSES,input_height=HEIGHT, input_width=WIDTH)
model.load_weights("logs/ep021-loss0.083-val_loss0.143.h5")
imgs = os.listdir("./img/")
for jpg in imgs:
img = Image.open("./img/"+jpg)
old_img = copy.deepcopy(img)
orininal_h = np.array(img).shape[0]
orininal_w = np.array(img).shape[1]
img = img.resize((WIDTH,HEIGHT))
img = np.array(img)
img = img/255
img = img.reshape(-1,HEIGHT,WIDTH,3)
pr = model.predict(img)[0]
pr = pr.reshape((int(HEIGHT/2), int(WIDTH/2),NCLASSES)).argmax(axis=-1)
seg_img = np.zeros((int(HEIGHT/2), int(WIDTH/2),3))
colors = class_colors
for c in range(NCLASSES):
seg_img[:,:,0] += ( (pr[:,: ] == c )*( colors[c][0] )).astype('uint8')
seg_img[:,:,1] += ((pr[:,: ] == c )*( colors[c][1] )).astype('uint8')
seg_img[:,:,2] += ((pr[:,: ] == c )*( colors[c][2] )).astype('uint8')
seg_img = Image.fromarray(np.uint8(seg_img)).resize((orininal_w,orininal_h))
image = Image.blend(old_img,seg_img,0.3)
image.save("./img_out/"+jpg)
|
the-stack_0_12756 | rhacm_versions = [
('1.0', '7'),
('2.0', '7'),
('2.1', '7'),
('2.2', '7'),
('2.3', '7'),
('1.0', '8'),
('2.0', '8'),
('2.1', '8'),
('2.2', '8'),
('2.3', '8'),
]
def test_rhacm_product_version_count(rhacm_product):
assert len(rhacm_product.product_versions()) == 10
def test_rhacm_product_version_names(rhacm_product):
for index, product_version in enumerate(rhacm_product.product_versions()):
version, release = rhacm_versions[index]
assert product_version.name == 'RHEL-%s-RHACM-%s' % (release, version)
def test_rhacm_product_version_descriptions(rhacm_product):
for index, product_version in enumerate(rhacm_product.product_versions()):
version, release = rhacm_versions[index]
assert product_version.description == 'Red Hat Advanced Cluster ' + \
'Management for Kubernetes %s for RHEL %s' % (version, release)
def test_rhacm_product_version_default_brew_tags(rhacm_product):
for index, product_version in enumerate(rhacm_product.product_versions()):
version, release = rhacm_versions[index]
assert product_version.default_brew_tag == \
'rhacm-%s-rhel-%s-container-candidate' % (version, release)
|
the-stack_0_12757 | # BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE
import awkward as ak
np = ak.nplike.NumpyMetadata.instance()
def flatten(array, axis=1, highlevel=True, behavior=None):
"""
Args:
array: Data containing nested lists to flatten.
axis (None or int): If None, the operation flattens all levels of
nesting, returning a 1-dimensional array. Otherwise, it flattens
at a specified depth. The outermost dimension is `0`, followed
by `1`, etc., and negative values count backward from the
innermost: `-1` is the innermost dimension, `-2` is the next
level up, etc.
highlevel (bool): If True, return an #ak.Array; otherwise, return
a low-level #ak.layout.Content subclass.
behavior (None or dict): Custom #ak.behavior for the output array, if
high-level.
Returns an array with one level of nesting removed by erasing the
boundaries between consecutive lists. Since this operates on a level of
nesting, `axis=0` is a special case that only removes values at the
top level that are equal to None.
Consider the following doubly nested `array`.
ak.Array([[
[1.1, 2.2, 3.3],
[],
[4.4, 5.5],
[6.6]],
[],
[
[7.7],
[8.8, 9.9]
]])
At `axis=1`, the outer lists (length 4, length 0, length 2) become a single
list (of length 6).
>>> print(ak.flatten(array, axis=1))
[[1.1, 2.2, 3.3], [], [4.4, 5.5], [6.6], [7.7], [8.8, 9.9]]
At `axis=2`, the inner lists (lengths 3, 0, 2, 1, 1, and 2) become three
lists (of lengths 6, 0, and 3).
>>> print(ak.flatten(array, axis=2))
[[1.1, 2.2, 3.3, 4.4, 5.5, 6.6], [], [7.7, 8.8, 9.9]]
There's also an option to completely flatten the array with `axis=None`.
This is useful for passing the data to a function that doesn't care about
nested structure, such as a plotting routine.
>>> print(ak.flatten(array, axis=None))
[1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9]
Missing values are eliminated by flattening: there is no distinction
between an empty list and a value of None at the level of flattening.
>>> array = ak.Array([[1.1, 2.2, 3.3], None, [4.4], [], [5.5]])
>>> ak.flatten(array, axis=1)
<Array [1.1, 2.2, 3.3, 4.4, 5.5] type='5 * float64'>
As a consequence, flattening at `axis=0` does only one thing: it removes
None values from the top level.
>>> ak.flatten(array, axis=0)
<Array [[1.1, 2.2, 3.3], [4.4], [], [5.5]] type='4 * var * float64'>
As a technical detail, the flattening operation can be trivial in a common
case, #ak.layout.ListOffsetArray in which the first `offset` is `0`.
In that case, the flattened data is simply the array node's `content`.
>>> array.layout
<ListOffsetArray64>
<offsets><Index64 i="[0 4 4 6]" offset="0" length="4"/></offsets>
<content><ListOffsetArray64>
<offsets><Index64 i="[0 3 3 5 6 7 9]" offset="0" length="7"/></offsets>
<content>
<NumpyArray format="d" shape="9" data="1.1 2.2 3.3 4.4 5.5 6.6 7.7 8.8 9.9"/>
</content>
</ListOffsetArray64></content>
</ListOffsetArray64>
>>> np.asarray(array.layout.content.content)
array([1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9])
However, it is important to keep in mind that this is a special case:
#ak.flatten and `content` are not interchangeable!
"""
with ak._v2._util.OperationErrorContext(
"ak._v2.flatten",
dict(array=array, axis=axis, highlevel=highlevel, behavior=behavior),
):
return _impl(array, axis, highlevel, behavior)
def _impl(array, axis, highlevel, behavior):
layout = ak._v2.operations.convert.to_layout(
array, allow_record=False, allow_other=False
)
nplike = ak.nplike.of(layout)
if axis is None:
out = layout.completely_flatten(function_name="ak.flatten")
assert isinstance(out, tuple) and all(
isinstance(x, nplike.ndarray) for x in out
)
out = ak._v2.contents.NumpyArray(nplike.concatenate(out))
elif axis == 0 or layout.axis_wrap_if_negative(axis) == 0:
def apply(layout):
if layout.is_UnknownType:
return apply(ak._v2.contents.NumpyArray(nplike.array([])))
elif layout.is_IndexedType:
return apply(layout.project())
elif layout.is_UnionType:
if not any(
x.is_OptionType and not isinstance(x, ak._v2.contents.UnmaskedArray)
for x in layout.contents
):
return layout
tags = nplike.asarray(layout.tags)
index = nplike.array(nplike.asarray(layout.index), copy=True)
bigmask = nplike.empty(len(index), dtype=np.bool_)
for tag, content in enumerate(layout.contents):
if content.is_OptionType and not isinstance(
content, ak._v2.contents.UnmaskedArray
):
bigmask[:] = False
bigmask[tags == tag] = nplike.asarray(
content.mask_as_bool(valid_when=False)
).view(np.bool_)
index[bigmask] = -1
good = index >= 0
return ak._v2.contents.UnionArray(
ak._v2.index.Index8(tags[good]),
ak._v2.index.Index64(index[good]),
layout.contents,
)
elif layout.is_OptionType:
return layout.project()
else:
return layout
out = apply(layout)
return ak._v2._util.wrap(out, behavior, highlevel)
else:
out = layout.flatten(axis)
return ak._v2._util.wrap(out, behavior, highlevel)
|
the-stack_0_12758 | '''
@Author: Kai Song, ks838 _at_ cam.ac.uk
@Notes : This part gives the constants and parameters.
'''
import numpy as np
# parameters for the system
#the inverse temperature
beta = 0.05 # a.u.
mass = 1.0
# ------ params for propagation ------
dt = 2 * pow(10,-3) # time step
# F is displacement of harmonic oscillator : Fx
F = 6
# frequency of nuclear motion
omega = 1
# coupling between electronic state.
Delta = 1
# displacement (1/2 M omega^2 (R + R0 * sigma_z )^2 ) in Hamiltonian H0 . initial state is sampled from e^{-\beta H0}
# R0 = 0 # displacement for initial Hamiltonian
R0 = F / ( mass * np.power(omega,2) )
equil_dt = pow(10,-2) # time step for thermalization of initial state
equil_time = 10
# steps for the equilibrating part
nsteps_equil = int(equil_time / equil_dt )
# steps for the dynamics
dynamics_time = 10
nsteps_dynamics = int(dynamics_time / dt )
print_time = 0.1
nsteps_print = int(print_time / dt )
# --------- for electronic state --------
n_electronic_state = 2
# -------- for the n beads ----------
# for simple potential forms (e.g., a double-well form), n_beads <10 are
# engough. And, for harmonic form, n_beads = 1 is engough
n_beads = 16 # should be an even number in our settings
omega_N = n_beads/beta # we have used hbar = 1 . omega_N = 1/(beta_N * hbar)
beta_N = beta/n_beads
# Normal mode frequency for free ring polymer. See eq.(36) in Ceriotti et al. J. Chem. Phys. 133, 124104 2010.
omegak = np.zeros(n_beads)
for i_bead in range(n_beads):
omegak[i_bead] = 2* omega_N * np.sin(i_bead*np.pi/n_beads)
#------ parameter for Ceriotti thermostatting-----
tau0 = 0.7 # an input parameter for tuning the efficiency
# The number of samplings (from the thermostatting).
# Typically, we need ~10^4 to get converged results.
# We started using a small number for testing
n_samplings = 100
# sampling initial momentum and coordinate.
mu_p = 0.0
sigma_p = np.sqrt(mass*n_beads/beta)
mu_q = 0.0
|
the-stack_0_12759 | import os,time,math,sys,json,re,string,json
import importlib
import get_dataflow
import pandas as pd
import joblib
import json
import requests
import bs4
import lxml
from sklearn.ensemble import RandomForestClassifier
from nltk.tokenize import word_tokenize
stdlib=['string','re','difflib','textwrap','unicodedata','stringprep','readline','rlcompleter',
'struct','codecs','datatime','calendar','collections','collections.abc','heapq','bisect',
'array','weakref','types','copy','pprint','reprlib','enum','numbers','math','cmath',
'decimal','fractions','random','statistics','itertools','functools','operator','pathlib',
'os.path','fileinput','stat','filecmp','tempfile','glob','fnmatch','linecache','shutil',
'pickle','copyreg','shelve','marshal','dbm','sqlite3','zlib','gzip','bz2','lzma','zipfile',
'tarfile','csv','configparser','netrc','xdrlib','plistlib','hashlib','hmac','secrets',
'os','io','time','argparse','getopt','logging','logging.config','logging.handlers',
'getpass','curses','curses.textpad','curses.ascii','curses.panel','platform','errno',
'ctypes','threading','multiprocessing','multiprocessing.shared_memory','concurrent',
'concurrent.futures','subprocess','sched','queue','_thread','_dummy_thread','dummy_threading',
'contextvars','asyncio','socket','ssl','select','selectors','asyncore','asynchat','signal',
'mmap','email','json','mailcap','mailbox','mimetypes','base64','binhex','binascii',
'quopri','uu','html','html.parser','html.entities','xml','webbrowser','xml.etree.ElementTree',
'xml.dom','xml.dom.minidom','xml.dom.pulldom','xml.sax','xml.sax.handler','xml.sax.saxutils',
'xml.sax.xmlreader','xml.parsers.expat','cgi','cgitb','wsgiref','urllib','urllib.request',
'urllib.response','urllib.parse','urllib.error','urllib.robotparser','http','http.client',
'ftplib','poplib','imaplib','nntplib','smtplib','smtpd','telnetlib','uuid','socketserver',
'http.server','http.cookies','http.cookiejar','xmlrpc','xmlrpc.client','xmlrpc.server',
'ipaddress','audioop','aifc','sunau','wave','chunk','colorsys','imghdr','sndhdr','ossaudiodev',
'gettext','locale','turtle','cmd','shlex','tkinter','tkinter.ttk','tkinter.tix','tkinter.scrolledtext',
'typing','pydoc','doctest','unittest','unittest.mock','unittest.mock','test','test.support',
'test.support.script_helper','bdb','faulthandler','pdb','timeit','trace','tracemalloc','distutils',
'ensurepip','venv','zipapp','sys','sysconfig','builtins','__main__','warnings','dataclasses',
'contextlib','abc','atexit','traceback','__future__','gc','inspect','site','code','codeop','zipimport',
'pkgutil','modulefinder','runpy','importlib','ast','symtable','symbol','token','keyword',
'tokenize','tabnanny','pyclbr','py_compile','compileall','dis','pickletools','formatter','msilib',
'msvcrt','winreg','winsound','posix','pwd','spwd','grp','crypt','termios','tty','pty','fcntl','pipes',
'resource','nis','optparse','imp']
topk_array = [0,0,0,0,0,0,0]
num_of_apis = 0
class ShowProcess():
i = 0
max_steps = 0
max_arrow = 50
infoDone = 'done'
def __init__(self, max_steps, infoDone = 'Done'):
self.max_steps = max_steps
self.i = 0
self.infoDone = infoDone
def show_process(self, i=None):
if i is not None:
self.i = i
else:
self.i += 1
num_arrow = int(self.i * self.max_arrow / self.max_steps)
num_line = self.max_arrow - num_arrow
percent = self.i * 100.0 / self.max_steps
process_bar = '[' + '>' * num_arrow + '-' * num_line + ']'\
+ '%.2f' % percent + '%' + '\r'
sys.stdout.write(process_bar)
sys.stdout.flush()
if self.i >= self.max_steps:
self.close()
def close(self):
print('')
print(self.infoDone)
self.i = 0
def get_file_path(root_path,file_list,dir_list):
global ret_list
dir_or_files = os.listdir(root_path)
for dir_file in dir_or_files:
dir_file_path = os.path.join(root_path,dir_file)
if os.path.isdir(dir_file_path):
dir_list.append(dir_file_path)
get_file_path(dir_file_path,file_list,dir_list)
elif dir_file_path.endswith('.py') and not dir_file_path.endswith('tmp.py'):
#print(dir_file_path)
ret_list.append(dir_file_path)
file_list.append(dir_file_path)
def GetMiddleStr(content,startStr,endStr):
startIndex = content.index(startStr)
if startIndex>=0:
startIndex += len(startStr)
endIndex = content.index(endStr)
return content[startIndex:endIndex]
def get_module_funcs(modulename):
modulename=modulename.strip()
flag=0
ms=[]
for curapi in cur_apis:
items=curapi.split('.')
if modulename+'.' in curapi or curapi.startswith(modulename+'.'):
#print('yes!',curapi)
api=items[-1]
ms.append(api)
flag=1
if flag==1:
ms=list(set(ms))
return {modulename:ms}
#print(modulename)
rootmodule=''
try:
module=importlib.import_module(modulename)
except Exception:
if '.' in modulename:
index=modulename.find('.')
rootmodule=modulename[:index]
os.system('pip3 install '+rootmodule)
else:
os.system('pip3 install '+modulename)
try:
module=importlib.import_module(modulename)
except Exception as err:
print(err)
return {}
ms=dir(module)
return {modulename:ms}
def get_alias_funcs(modulename,alias):
modulename=modulename.strip()
flag=0
ms=[]
for curapi in cur_apis:
items=curapi.split('.')
if modulename+'.' in curapi or curapi.startswith(modulename+'.'):
#print('yes!',curapi)
api=items[-1]
ms.append(api)
flag=1
if flag==1:
ms=list(set(ms))
return {alias:ms}
#print(modulename)
rootmodule=''
try:
module=importlib.import_module(modulename)
except Exception:
if '.' in modulename:
index=modulename.find('.')
rootmodule=modulename[:index]
os.system('pip3 install '+rootmodule)
else:
os.system('pip3 install '+modulename)
try:
module=importlib.import_module(modulename)
except Exception as err:
print(err)
return {}
ms=dir(module)
return {alias:ms}
def GetMiddleStr(content,startStr,endStr):
startIndex = content.index(startStr)
if startIndex>=0:
startIndex += len(startStr)
endIndex = content.index(endStr)
return content[startIndex:endIndex]
def get_alias_item(modulename,itname,aliasname):
modulename=modulename.strip()
flag=0
ms=[]
for curapi in cur_apis:
items=curapi.split('.')
if modulename+'.'+itname in curapi or curapi.startswith(modulename+'.'+itname):
#print('yes!',curapi)
api=items[-1]
ms.append(api)
flag=1
if flag==1:
ms=list(set(ms))
return {aliasname:ms}
#print(modulename,itname)
rootmodule=''
submodule=''
try:
module=importlib.import_module(modulename)
except Exception:
try:
if '.' in modulename:
index=modulename.find('.')
rootmodule=modulename[:index]
os.system('pip3 install '+rootmodule)
else:
os.system('pip3 install '+modulename)
module=importlib.import_module(modulename)
except Exception:
try:
submodule=importlib.import_module(modulename+'.'+itname)
return {aliasname:dir(submodule)}
except Exception as err:
print(err)
return {}
try:
item=getattr(module,itname)
return {aliasname:dir(item)}
except Exception:
try:
submodule=importlib.import_module(modulename+'.'+itname)
return {aliasname:dir(submodule)}
except Exception as err:
print(err)
return {}
def get_item_methods(modulename,itname):
modulename=modulename.strip()
flag=0
ms=[]
for curapi in cur_apis:
items=curapi.split('.')
if modulename+'.'+itname in curapi or curapi.startswith(modulename+'.'+itname):
#print('yes!',curapi)
api=items[-1]
ms.append(api)
flag=1
if flag==1:
ms=list(set(ms))
return {modulename:ms}
#print(modulename,itname)
rootmodule=''
submodule=''
try:
module=importlib.import_module(modulename)
except Exception:
try:
if '.' in modulename:
index=modulename.find('.')
rootmodule=modulename[:index]
os.system('pip3 install '+rootmodule)
else:
os.system('pip3 install '+modulename)
module=importlib.import_module(modulename)
except Exception:
try:
submodule=importlib.import_module(modulename+'.'+itname)
return {itname:dir(submodule)}
except Exception as err:
print(err)
return {}
try:
item=getattr(module,itname)
return {itname:dir(item)}
except Exception:
try:
submodule=importlib.import_module(modulename+'.'+itname)
return {itname:dir(submodule)}
except Exception as err:
print(err)
return {}
def deal_with_current_module(modulename,file,names):
modulename=modulename.strip()
#current_file='/home/user/PRIAN/targetProj/abu/abupy/TLineBu/ABuTLExecute.py'
current_file=file
layer=0
for c in modulename:
if c=='.':
layer+=1
else:
break
#print(layer)
ls7=current_file.split('/')
newdirs=ls7[:(0-layer)]
newdir=''
for d in newdirs:
newdir+=d+'/'
realdir=newdir
#print(realdir)
newdir=newdir+'end'
rootdir=GetMiddleStr(newdir,root_path,'/end')
if modulename=='.':
rootmodule=re.sub('/','.',rootdir)
else:
rootmodule=re.sub('/','.',rootdir)+'.'+modulename[layer:]
#print("Note!",rootmodule)
ret={}
for n in names:
x=get_item_methods(rootmodule,n)
ret.update(x)
return ret
def get_item_funcs(rootmodule,module,item):
try:
module1=importlib.import_module(module)
except Exception:
try:
os.system('pip3 install '+rootmodule)
module1=importlib.import_module(module)
except Exception:
try:
submodule=importlib.import_module(module+'.'+item)
return {item:dir(submodule)}
except Exception as err:
print(err)
return {}
try:
it=getattr(module1,item)
return {item:dir(it)}
except Exception:
try:
submodule=importlib.import_module(module+'.'+item)
return {item:dir(submodule)}
except Exception as err:
print(err)
return {}
def get_real_module(modulename,file):
current_file=file
layer=0
for c in modulename:
if c=='.':
layer+=1
else:
break
#print(layer)
ls7=current_file.split('/')
newdirs=ls7[:(0-layer)]
newdir=''
for d in newdirs:
newdir+=d+'/'
realdir=newdir
#print(realdir)
newdir=newdir+'end'
rootdir=GetMiddleStr(newdir,root_path,'/end')
if modulename=='.':
rootmodule=re.sub('/','.',rootdir)
else:
rootmodule=re.sub('/','.',rootdir)+'.'+modulename[layer:]
#print("Note!",rootmodule)
return rootmodule
def get_module_methods(file):
modulemethods=[]
all_candidates={}
with open(file) as f:
lines=f.readlines()
for line in lines:
line=line.strip()
#in most cases, we choose to get all fuctions of the module imported directly using inspect
#maybe need all classes and all methods of the classes in the module
if re.match('import [a-zA-Z0-9\.\_\,\s]+$',line) and ' as ' not in line:
#print(1,line)
modulename=line.split('import')[-1].strip()
if ',' not in modulename:
x1=get_module_funcs(modulename)
all_candidates.update(x1)
else:
ls3=modulename.split(',')
#global all_candidates
for j in ls3:
itemname=j.strip()
x2=get_module_funcs(itemname)
all_candidates.update(x2)
#should choose another example
elif re.match('import [a-zA-Z0-9\.\_\,]+ as [a-zA-Z0-9\.\_\,\s]+$',line):
#print(2,line)
if ',' not in line:
modulename=GetMiddleStr(line,'import',' as ').strip()
alias=line.split(' as ')[-1].strip()
#print(modulename,alias)
x3=get_alias_funcs(modulename,alias)
#global all_candidates
all_candidates.update(x3)
#many combing methods, checked by ','
else:
body=line.split('import')[-1].strip()
#print("multias:",body)
mas=body.split(',')
#print(mas)
for ma in mas:
if ' as ' in ma:
ls4=ma.split(' as ')
maname=ls4[0].strip()
aliasname=ls4[1].strip()
#print(maname,aliasname)
x4=get_alias_funcs(maname,aliasname)
#global all_candidates
all_candidates.update(x4)
else:
maname=ma.strip()
#print(maname)
x5=get_module_funcs(maname)
#global all_candidates
all_candidates.update(x5)
elif re.match('from [a-zA-Z0-9\.\_]+ import [a-zA-Z0-9\_\.\*\,\s]+$',line) and 'as' not in line:
#print(3,line)
modulename=GetMiddleStr(line,'from','import').strip()
itemname=line.split('import')[-1].strip()
names=[]
if ',' in itemname:
ns=itemname.split(',')
for n in ns:
names.append(n.strip())
else:
names.append(itemname)
#print(modulename,names)
if modulename.startswith('.'):
#print(modulename)
#print(file)
x6=deal_with_current_module(modulename,file,names)
#global all_candidates
all_candidates.update(x6)
continue
'''
firmname=modulename.split('.')[0]
if firmname==curmodule:
print("current module:",modulename)
deal_with_current_module(modulename,names)
continue
#need other ops get all methods defined in modules
#try1:copy the current proj to root path
'''
for n in names:
x7=get_item_methods(modulename,n)
#global all_candidates
all_candidates.update(x7)
elif re.match('from [a-zA-Z0-9\.\_]+ import [a-zA-Z0-9\_\.\*\,]+ as [a-zA-Z0-9\_\.\*\,\s]+$',line):
#print(4,line)
modulename=GetMiddleStr(line,'from','import').strip()
if modulename.startswith('.'):
#print(modulename)
#print(4,file)
modulename=get_real_module(modulename,file)
#continue
#print(modulename)
#need other ops to change the modulename as absmodule
itemname=line.split('import')[-1]
#print(modulename,itemname)
if ',' not in itemname:
lsx=itemname.split(' as ')
if len(lsx)<2:
continue
itname=lsx[0].strip()
aliasname=lsx[1].strip()
x8=get_alias_item(modulename,itname,aliasname)
#global all_candidates
all_candidates.update(x8)
else:
ls5=itemname.split(',')
for it in ls5:
if ' as ' not in it:
itname=it.strip()
x9=get_item_methods(modulename,itname)
#global all_candidates
all_candidates.update(x9)
else:
itname=it.split(' as ')[0].strip()
aliasname=it.split(' as ')[1].strip()
x10=get_alias_item(modulename,itname,aliasname)
#global all_candidates
all_candidates.update(x10)
#pass
#else:
#print('SyntaxError: invalid syntax')
#print(all_candidates)
return all_candidates
def get_caller(rec):
nrec=re.sub('\(.*\)','',rec)
pindex=nrec.rfind('.')
return nrec[:pindex]
def check(newcontext):
ls=newcontext.split('\n')
i=0
for i in range(len(ls)-1,-1,-1):
if ls[i].strip().startswith('def'):
break
nc=''
for j in range(i,len(ls)):
nc+=ls[j]+'\n'
#nc=newcontext
#print(nc)
nc=re.sub('\'[\\\[\]\(\)\{\}A-Za-z0-9_\,\:]+\'','',nc)
nc=re.sub('\"[\\\[\]\(\)\{\}A-Za-z0-9_\,\:]+\"','',nc)
lk=nc.count('(')
rk=nc.count(')')
ll=nc.count('[')
rl=nc.count(']')
ld=nc.count('{')
rd=nc.count('}')
kc=lk-rk
lc=ll-rl
dc=ld-rd
addc=''
#print(kc,lc,dc)
if kc==lc==dc==0:
return newcontext
else:
ks=''
#print(nc)
for i in range(0,len(nc)):
c=nc[i]
if re.match('[\(\)\[\]\{\}]',c):
ks+=c
#print(ks)
while('{}' in ks or '[]' in ks or '()' in ks):
while '()' in ks:
ks=re.sub('\[\]','',ks)
ks=re.sub('\{\}','',ks)
ks=re.sub('\(\)','',ks)
while '[]' in ks:
ks=re.sub('\{\}','',ks)
ks=re.sub('\(\)','',ks)
ks=re.sub('\[\]','',ks)
while '{}' in ks:
ks=re.sub('\[\]','',ks)
ks=re.sub('\(\)','',ks)
ks=re.sub('\{\}','',ks)
#print(ks)
for i in range(len(ks)-1,-1,-1):
if ks[i]=='(':
addc+=')'
elif ks[i]=='[':
addc+=']'
else:
addc+='}'
#print(newcontext)
#sys.exit(0)
#x=re.sub('return ','',newcontext+addc)
return newcontext+addc
def get_type(finalc,file):
lindex=file.rfind('/')
tmp=file[:lindex]+'/tmp.py'
with open(tmp,'w+') as f:
f.write(finalc)
#with open(tmp2,'w+') as f2:
#f2.write(finalc)
try:
#os.system('pytype '+tmp)
os.system('pytype '+tmp+' > log.txt')
#os.system('rm '+tmp)
except Exception:
sys.exit()
with open('log.txt') as f:
lines=f.readlines()
vtype='None'
for line in lines:
if '[reveal-type]' in line:
tp=line.split(':')[1]
vtype=re.sub('\[reveal\-type\]','',tp)
#print(vtype)
break
#if '[python-compiler-error]' in line:
#sys.exit()
global Nonenum,Anynum,OKnum
if vtype=='None':
#print(tmp)
#sys.exit()
Nonenum+=1
elif vtype=='Any' or vtype=='nothing':
Anynum+=1
else:
OKnum+=1
return vtype
def get_bank(line):
ip=0
for ip in range(0,len(line)):
if line[ip]!=' ':
break
return (line[:ip],ip)
def check_try(code,trycache):
#print(trycache)
ret=code
#l=sorted(trycache)
#print(l)
for i in range(len(trycache)-1,-1,-1):
ret+='\n'+trycache[i][0]+'except Exception:\n'+trycache[i][0]+' '+'pass'
return ret
def get_curr_apis(ft,file):
#print('Note! ',ft,file)
tmp_file=re.sub(root_path,'',file)
rmodule=re.sub('\/','.',tmp_file)
rmodule=rmodule[:-3]
#print("Note!",rmodule)
ret=get_item_methods(rmodule,ft)
#print('Note! ',ret)
return ret
def get_typeshed_apis(ft):
ret=[]
ft=ft.strip()
ft=re.sub('\[.*\]','',ft)
with open('typeshed.txt') as f:
lines=f.readlines()
s1='.'+ft+'.'
s2=ft+'.'
for line in lines:
if s1 in line or line.startswith(s2):
#print('Find typeshed: '+line.strip())
s3=line.strip()
index=s3.rfind('.')
s4=s3[index+1:]
if not s4 in ret:
ret.append(s4)
return ret
#inferred type, caller
def get_candidates(ft,caller,file):
if ft.startswith('Type['):
ft=ft[5:-1]
print('type:',ft)
candidates={}
global if_from_current_proj
if_from_current_proj=1
if ft=='module':
for k,v in module_apis.items():
if k==caller:
candidates={caller:v}
#print(candidates)
return candidates
candidates=get_module_funcs(caller)
elif ft=='str':
candidates={caller:dir(str)}
elif re.match('List\[.*\]',ft):
candidates={caller:dir(list)}
elif re.match('Dict\[.*\]',ft):
apsx=dir(dict)
apsx.append('iteritems')
candidates={caller:apsx}
elif ft=='set' or re.match('Set\[.*\]',ft):
candidates={caller:dir(set)}
elif ft.endswith('[str]'):
candidates=get_candidates(ft[:-5],caller,file)
elif ft=='bool':
candidates={caller:dir(bool)}
elif re.match('Union\[.*\]',ft):
ft=ft+'end'
contents=GetMiddleStr(ft,'Union[',']end')
contents=re.sub('\[.*\]','',contents)
lss=contents.split(',')
tmp=[]
for k in lss:
#print('Note!!')
k=k.strip()
#print(k)
if k=='Any' or k=='nothing':
continue
tpdic=get_candidates(k,caller,file)
for k,v in tpdic.items():
tmp.extend(v)
if_from_current_proj=0
candidates={caller:tmp}
elif re.match('Optional\[.*\]',ft):
#ft=ft+'end'
#contents=GetMiddleStr(ft,'Optional[',']end')
#contents=re.sub('\[.*\]','',contents)
#candidates=get_candidates(ft,caller,file)
candidates={}
if_from_current_proj=0
#elif tuple int float since we haven't found these kinds of caller templely ignore.
#elif re.match('Pattern\[.*\]',ft):
#candidates={caller:dir(re.Pattern)}
#elif re.match('Match\[.*\]',ft):
#candidates={caller:dir(re.Match)}
elif '.' in ft:
index=ft.rfind('.')
module=ft[:index]
item=ft[index+1:]
rindex=ft.find('.')
rootmodule=ft[:rindex]
candidates=get_item_funcs(rootmodule,module,item)
elif ft=='Any' or ft=='None' or ft=='nothing':
candidates=get_all_apis()
if_from_current_proj=0
#print('Note!All types:')
#print(candidates)
return candidates
elif re.match('[a-zA-Z0-9_]+',ft):
#since in many case, the caller calls funcs defined behind the caller, we copy the original file into python lib to get candidates.
candidates=get_curr_apis(ft,file)
#print('Other types: '+ft)
if len(candidates)==0:
typeshed_apis=get_typeshed_apis(ft)
candidates.update({caller:typeshed_apis})
#else:
#if_from_current_proj=1
for k,v in candidates.items():
dag=[]
#print('yes')
#print(v,len(v))
for j in range(0,len(v)):
#print(j)
if not v[j].startswith('__'):
dag.append(v[j])
#print("yes")
#print(dag)
candidates[k]=dag
#print(candidates)
return candidates
def get_callee(rec):
nrec=re.sub('\(.*\)','',rec)
pindex=nrec.rfind('.')
return nrec[pindex+1:],rec[pindex+1:]
def get_total(w,naming_context,files):
ret=0.0
#print(w)
for fi in files:
key=w+'##'+fi
if key in proj_token_count:
ret+=proj_token_count[key]
ret+=naming_context.count(w)
#print(ret)
#sys.exit(0)
return ret
def get_conum(w,n,naming_context,files):
ret=0.0
for fi in files:
k1=w+'##'+fi
k2=n+'##'+fi
if k1 in proj_token_no and k2 in proj_token_no:
x1=proj_token_no[k1]
y1=proj_token_no[k2]
ctis=[x for x in x1 if x in y1]
ret+=float(len(ctis))
return ret
def get_conum_of_line(api,naming_line,naming_context,files):
del_estr = string.punctuation + string.digits
replace = " "*len(del_estr)
tran_tab = str.maketrans(del_estr, replace)
tmp=naming_line.translate(tran_tab)
nl=word_tokenize(tmp)
cs=api.translate(tran_tab)
wcs=word_tokenize(cs)
#print(api,wcs,naming_line,nl)
#sys.exit(0)
total=0.0
conum=0.0
score=0.0
#print(wcs,nl)
#TODO:gao fan le !!!!
for w in wcs:
total=total+get_total(w,naming_context,files)
#print(1)
for n in nl:
conum+=get_conum(w,n,naming_context,files)
if total!=0:
total=float(total)
conum=float(conum)
score=float( conum / total )
return score
#proj_tokens
#proj_depends
def get_line_scores(aps,naming_line,naming_context,file):
line_scores={}
tokens=[]
fi=re.sub('\.py','',file)
index=fi.rfind('/')
curname=fi[index+1:]
#print(curname)
files=[]
for k,v in proj_depends.items():
if k==file:
continue
#print(k)
flag=0
for imports in v:
#print
if curname in imports:
#print(imports)
flag=1
break
if flag==0:
#print(proj_tokens[k])
#sys.exit(0)
files.append(k)
#print(tokens)
for api in aps:
if api.startswith('__') or re.match('[A-Z0-9_]+$',api) or api.strip()=='_':
#process_bar.show_process()
continue
line_ret=get_conum_of_line(api,naming_line,naming_context,files)
line_scores[api]=line_ret
return line_scores
def get_total_infile(w,files):
ret=0.0
for fi in files:
key=w+'##'+fi
if key in proj_token_count:
ret+=1.0
return ret
def get_conum_infile(w,item,files):
ret=0.0
for fi in files:
k1=w+'##'+fi
k2=item+'##'+fi
if k1 in proj_token_no and k2 in proj_token_no:
ret+=1.0
return ret
def get_conum_of_con(api,naming_context,files):
code=naming_context.strip()
lines=code.split('\n')
del_estr = string.punctuation + string.digits
replace = " "*len(del_estr)
tran_tab = str.maketrans(del_estr, replace)
rets=0.0
for i in range(0,len(lines)):
tmp=lines[i].translate(tran_tab)
nl=word_tokenize(tmp)
cs=api.translate(tran_tab)
wcs=word_tokenize(cs)
total=0.0
#print(wcs,nl)
for w in wcs:
total=total+get_total_infile(w,files)
conum=0.0
for w in wcs:
for item in nl:
conum=conum+get_conum_infile(w,item,files)
if total!=0:
total=float(total)
conum=float(conum)
score=float( conum / total )
rets+=float(i+1)*score
context_ret=float(float(rets) / float(len(lines)+1.0))
return context_ret
def get_conum_scores(aps,naming_context,file):
conum_scores={}
fi=re.sub('\.py','',file)
index=fi.rfind('/')
curname=fi[index+1:]
#print(curname)
files=[]
for k,v in proj_depends.items():
if k==file:
continue
#print(k)
flag=0
for imports in v:
#print
if curname in imports:
#print(imports)
flag=1
break
if flag==0:
files.append(k)
for api in aps:
if api.startswith('__') or re.match('[A-Z0-9_]+$',api) or api.strip()=='_':
continue
con_ret=get_conum_of_con(api,naming_context,files)
conum_scores[api]=con_ret
return conum_scores
def get_results(arr):
print('Ranks :'+str(arr))
mrr=0.0
top1=0
top2=0
top3=0
top4=0
top5=0
top10=0
top20=0
for i in range(0,len(arr)):
mrr+=float(1.0/float(arr[i]))
if arr[i]==1:
top1+=1
top2+=1
top3+=1
top4+=1
top5+=1
top10+=1
top20+=1
elif arr[i]==2:
top2+=1
top3+=1
top4+=1
top5+=1
top10+=1
top20+=1
elif arr[i]==3:
top3+=1
top4+=1
top5+=1
top10+=1
top20+=1
elif arr[i]==4:
top4+=1
top5+=1
top10+=1
top20+=1
elif arr[i]==5:
top5+=1
top10+=1
top20+=1
elif arr[i]<=10:
top10+=1
top20+=1
elif arr[i]<=20:
top20+=1
tp1=float(top1/len(arr))
tp2=float(top2/len(arr))
tp3=float(top3/len(arr))
tp4=float(top4/len(arr))
tp5=float(top5/len(arr))
tp10=float(top10/len(arr))
tp20=float(top20/len(arr))
mrr=float(mrr/float(len(arr)))
print("Top-k:",top1,top2,top3,top4,top5,top10,top20,len(arr))
print("Top-k+mrr:",tp1,tp2,tp3,tp4,tp5,tp10,tp20,mrr)
return [tp1,tp2,tp3,tp4,tp5,tp10, mrr]
s=str(tp1)+','+str(tp2)+','+str(tp3)+','+str(tp4)+','+str(tp5)+','+str(tp10)+','+str(tp20)+','+str(mrr)+'\n'
with open('testdata/'+CURRENT_PROJ+'_result.txt','w+') as ft:
ft.write(s)
def get_time(ts):
totalt=0.0
for t in ts:
totalt+=t
ret=float(totalt/float(len(ts)))
print('Average time: ',ret)
with open('testdata/'+CURRENT_PROJ+'_result.txt','a+') as ft:
ft.write(str(ret)+'\n')
def get_rec_point(file):
print('DEAL-WITH:'+file)
#with open('types/types.txt','a+') as ff:
#ff.write('FILE:'+file)
with open(file) as f:
lines=f.readlines()
#print(lines)
precode=''
trynum=0
trycache=[]
kflag=0
lno=0
#s=''
comment_flag=0
calls=[]
for line in lines:
#print(line)
lno+=1
if line.strip().startswith('#'):
continue
if re.match('[bru]*\'\'\'$',line.strip()) or re.match('[bru]*\"\"\"$',line.strip()):
if comment_flag==0:
comment_flag=1
else:
comment_flag=0
continue
elif (re.match('[bru]*\'\'\'',line.strip()) or re.match('[bru]*\"\"\"',line.strip())) and (re.match('.*[bru]*\'\'\'$',line.strip()) or re.match('.*[bru]*\"\"\"$',line.strip())):
continue
elif re.match('[bru]*\'\'\'',line.strip()) or re.match('[bru]*\"\"\"',line.strip()) or re.match('.*[bru]*\'\'\'$',line.strip()) or re.match('.*[bru]*\"\"\"$',line.strip()):
if comment_flag==0:
comment_flag=1
else:
comment_flag=0
continue
if comment_flag==1:
continue
if 'try:' in line:
trynum+=1
trycache.append(get_bank(line))
elif trynum>0 and ('except' in line or 'finally:' in line):
(bank,lenth)=get_bank(line)
for i in range(len(trycache)-1,-1,-1):
if trycache[i][1]==lenth:
trynum-=1
del trycache[i]
recobj=re.findall('[a-zA-Z0-9_\.\[\]]+\.[a-zA-Z0-9\_]+\(.*\)',line)
#print(recobj)
if len(recobj)==0:
precode+=line
continue
#print(file)
#print(recobj)
rec=recobj[0]
caller=get_caller(rec)
if caller.startswith('['):
caller=caller[1:]
callee,rcallee=get_callee(rec)
if callee.startswith('_') or re.match('[A-Z0-9_]+$',callee) or callee.strip()=='_':
precode+=line
continue
cp=caller+'.'+callee
if cp in calls:
precode+=line
continue
else:
calls.append(cp)
i=0
latest_line=line.replace(rcallee,'unknown_api()')
#print('NOTE!',latest_line)
tpp=precode.strip()
if tpp.endswith(','):
newcontext=tpp[:-1]
finalc=check(newcontext)
#print(finalc)
current_context=finalc+'\n'+latest_line
prelast=precode.strip().split('\n')[-1]
for i in range(0,len(prelast)):
if prelast[i]!=' ':
break
finalc+='\n'+line[:i-4]+'reveal_type('+caller+')'
elif tpp.endswith('(') or tpp.endswith('{') or tpp.endswith('['):
newcontext=tpp
finalc=check(newcontext)
current_context=finalc+'\n'+latest_line
#print(finalc)
prelast=precode.strip().split('\n')[-1]
for i in range(0,len(prelast)):
if prelast[i]!=' ':
break
finalc+='\n'+line[:i]+'reveal_type('+caller+')'
else:
for i in range(0,len(line)):
if line[i]!=' ':
break
#print(i)
#print(line)
newcontext=tpp
finalc=check(newcontext)
finalc+='\n'+line[:i]+'reveal_type('+caller+')'
current_context=precode+latest_line
if len(trycache)>0:
finalc=check_try(finalc,trycache)
#print(finalc)
#print('[Process[1] : Preprocessing # Getting reommendation point, simple type inference, possible API candidates and current incomplete code context.]')
#print(file+'#'+str(lno)+'#'+caller+'#'+callee)
#if '.' in caller:
#ft='Any'
#else:
ft=get_type(finalc,file)
ft=ft.strip()
print(line.strip())
print(file+'#'+str(lno)+'#'+caller+':'+ft+'#'+callee)
#print(Nonenum,Anynum,OKnum)
aps=[]
if ft=='None' or ft=='Any':
if caller=='self':
for d in all_defs:
dname=d.strip().split(' ')[1]
aps.append(dname)
elif caller=='str' or caller=='s' or caller=='string':
ft='str'
elif caller=='sys.stderr' or caller=='sys.stdout' or caller=='sys.stdin':
ft='module'
elif caller=='log':
ft='logging.Logger'
caller=ft
elif re.match('for .* in .*\..*\(.*\).*\:',line.strip()):
aps=dir(dict)
aps.append('iteritems')
else:
#tp=caller.split('.')
#fc=tp[0]
if '.' in caller:
xindex=caller.find('.')
fc=caller[:xindex]
xattr=caller[xindex+1:]
else:
xattr=caller
fc=caller
#print('check module:',fc)
#print('check attr:',xattr)
if fc in stdlib:
ft='module'
print('stdlib!',fc)
#print('module!',caller)
try:
module1=importlib.import_module(caller)
aps=dir(module1)
except Exception:
try:
module2=importlib.import_module(fc)
attr=getattr(module2,xattr)
aps=dir(attr)
except Exception:
aps=[]
else:
for curapi in cur_apis:
if '.'+caller+'.' in curapi:
idx=curapi.find('.'+caller+'.')
canapi=curapi[idx+1:]
if not '.' in canapi:
aps.append(canapi)
print('get api form json!')
print(canapi)
if len(aps)==0:
apis = get_candidates(ft,caller,file)
for k,v in apis.items():
aps.extend(v)
if len(aps)==0:
precode+=line
continue
global pranks,ptimes,pinranks
if re.match('[A-Z]+[A-Za-z]+',callee) or callee.startswith('_'):
print('CONSTRUCTOR,IGNORE')
precode+=line
continue
if callee in aps:
print('API IV')
else:
print('API OOV')
pranks.append(100)
global all_apis_add,all_apis
all_apis_add.append(callee)
tmpx=all_apis['all_apis']
tmpx.extend(all_apis_add)
tmpx=list(set(tmpx))
all_apis['all_apis']=tmpx
ptimes.append(0.0)
precode+=line
continue
#ss=''
#for ap in aps:
#ss=ss+ap+','
#ss=ss[:-1]+'\n'
#s=caller+':'+ft+'#'+callee+'\n'
s1=time.time()
#print('[Process[2] : Constructing dataflow hints.]')
current_dataflow=get_dataflow.get_current_dataflow2(current_context,caller)
#print(maxflow)
if len(current_dataflow)==0:
precode+=line
continue
maxflow=max(current_dataflow,key=len)
#print(maxflow)
dataflow_scores=get_dataflow.get_dataflow_scores(aps,maxflow,current_dataflow,ft,callee)
tosim_scores=get_dataflow.get_tosim_scores(aps,maxflow,current_dataflow,ft,callee)
try:
naming_line=re.sub(callee,'',line)
except Exception as err:
print(err)
print(line)
sys.exit()
precode+=line
continue
naming_context=precode
line_scores=get_line_scores(aps,naming_line,naming_context,file)
e1=time.time()
print(e1-s1)
label=0
apis=[]
with open('test.csv','w+') as f:
f.write('f1,f2,f3,f4\n')
start=time.time()
if ft=='None' or ft=='Any' or ft=='nothing':
for api in aps:
if api.startswith('__') or re.match('[A-Z0-9_]+$',api) or api.strip()=='_':
continue
if api==callee:
label=1
else:
label=0
apis.append(api)
try:
s=str(dataflow_scores[api])+','+str(tosim_scores[api])+','+str(line_scores[api])+',0.0\n'
with open('test.csv','a+') as f:
f.write(s)
except Exception as err:
print(err)
sys.exit(0)
else:
flag=0
conum_scores=get_conum_scores(aps,naming_context,file)
for api in aps:
if api.startswith('__') or re.match('[A-Z0-9_]+$',api) or api.strip()=='_':
continue
if api==callee:
label=1
else:
label=0
apis.append(api)
try:
s=str(dataflow_scores[api])+','+str(tosim_scores[api])+','+str(line_scores[api])+','+str(conum_scores[api])+'\n'
with open('test.csv','a+') as f:
f.write(s)
except Exception as err:
print(err)
sys.exit(0)
test_data=pd.read_csv('test.csv')
#print(apis)
#print(len(apis))
#print(test_data)
clf=joblib.load('traincsv/'+CURRENT_PROJ+'_svm.pkl')
result=clf.predict_proba(test_data)
candidates={}
for i in range(0,len(apis)):
candidates[apis[i]]=result[i][1]
cans=sorted(candidates.items(), key=lambda x: x[1], reverse=True)
#print(cans)
end = time.time()
ts=end - start
print(ts)
print('--------------------------------------------------------------------------------------------------')
print('Recommended Functions for Caller: ' + caller)
print('--------------------------------------------------------------------------------------------------')
lenthk=len(cans)
exists_rec = []
if lenthk > 10:
lenthk = 10
for i in range(0,lenthk):
print(str(i+1)+' : ' + caller + '.' + cans[i][0] + '()')
exists_rec.append(cans[i][0])
rev_cans = sorted(candidates.items(), key=lambda x: x[1])
print('--------------------------------------------------------------------------------------------------')
print('Functions not Reccomended for Caller: ' + caller)
print('--------------------------------------------------------------------------------------------------')
lenghk=len(rev_cans)
if lenthk > 5:
lenthk = 5
for i in range(0,lenthk):
if rev_cans[i][0] not in exists_rec:
print(str(i+1)+' : ' + caller + '.' + rev_cans[i][0] + '()')
# Temporarily commented out Google search feature for faster testing of accuracy. Uncomment to reimplement.
"""
print('--------------------------------------------------------------------------------------------------')
print('Press c to continue or type a number from the reccomended function list to search google for more information about the function: ')
google_input = input()
while google_input != 'c':
if google_input == '1':
req_result = requests.get('https://www.google.com/search?q=python+' + cans[0][0]).text
soup = bs4.BeautifulSoup(req_result, 'html.parser')
headings = soup.find_all('h3')
links = soup.find_all('a')
print('Top google search result for function: ')
print('--------------------------------------------------------------------------------------------------')
for heading in headings:
if 'python' in heading.getText().lower():
print(heading.getText())
break
for link in links:
link_string = link.get('href').lower()
if 'python' in link_string and 'www' in link_string and 'google' not in link_string:
print(link.get('href').replace('/url?q=', '').split('&', 1)[0])
break
elif google_input == '2':
req_result = requests.get('https://www.google.com/search?q=python+' + cans[1][0]).text
soup = bs4.BeautifulSoup(req_result, 'html.parser')
headings = soup.find_all('h3')
links = soup.find_all('a')
print('Top google search result for function: ')
print('--------------------------------------------------------------------------------------------------')
for heading in headings:
if 'python' in heading.getText().lower():
print(heading.getText())
break
for link in links:
link_string = link.get('href').lower()
if 'python' in link_string and 'www' in link_string and 'google' not in link_string:
print(link.get('href').replace('/url?q=', '').split('&', 1)[0])
break
elif google_input == '3':
req_result = requests.get('https://www.google.com/search?q=python+' + cans[2][0]).text
soup = bs4.BeautifulSoup(req_result, 'html.parser')
headings = soup.find_all('h3')
links = soup.find_all('a')
print('Top google search result for function: ')
print('--------------------------------------------------------------------------------------------------')
for heading in headings:
if 'python' in heading.getText().lower():
print(heading.getText())
break
for link in links:
link_string = link.get('href').lower()
if 'python' in link_string and 'www' in link_string and 'google' not in link_string:
print(link.get('href').replace('/url?q=', '').split('&', 1)[0])
break
elif google_input == '4':
req_result = requests.get('https://www.google.com/search?q=python+' + cans[3][0]).text
soup = bs4.BeautifulSoup(req_result, 'html.parser')
headings = soup.find_all('h3')
links = soup.find_all('a')
print('Top google search result for function: ')
print('--------------------------------------------------------------------------------------------------')
for heading in headings:
if 'python' in heading.getText().lower():
print(heading.getText())
break
for link in links:
link_string = link.get('href').lower()
if 'python' in link_string and 'www' in link_string and 'google' not in link_string:
print(link.get('href').replace('/url?q=', '').split('&', 1)[0])
break
elif google_input == '5':
req_result = requests.get('https://www.google.com/search?q=python+' + cans[4][0]).text
soup = bs4.BeautifulSoup(req_result, 'html.parser')
headings = soup.find_all('h3')
links = soup.find_all('a')
print('Top google search result for function: ')
print('--------------------------------------------------------------------------------------------------')
for heading in headings:
if 'python' in heading.getText().lower():
print(heading.getText())
break
for link in links:
link_string = link.get('href').lower()
if 'python' in link_string and 'www' in link_string and 'google' not in link_string:
print(link.get('href').replace('/url?q=', '').split('&', 1)[0])
break
elif google_input == '6':
req_result = requests.get('https://www.google.com/search?q=python+' + cans[5][0]).text
soup = bs4.BeautifulSoup(req_result, 'html.parser')
headings = soup.find_all('h3')
links = soup.find_all('a')
print('Top google search result for function: ')
print('--------------------------------------------------------------------------------------------------')
for heading in headings:
if 'python' in heading.getText().lower():
print(heading.getText())
break
for link in links:
link_string = link.get('href').lower()
if 'python' in link_string and 'www' in link_string and 'google' not in link_string:
print(link.get('href').replace('/url?q=', '').split('&', 1)[0])
break
elif google_input == '7':
req_result = requests.get('https://www.google.com/search?q=python+' + cans[6][0]).text
soup = bs4.BeautifulSoup(req_result, 'html.parser')
headings = soup.find_all('h3')
links = soup.find_all('a')
print('Top google search result for function: ')
print('--------------------------------------------------------------------------------------------------')
for heading in headings:
if 'python' in heading.getText().lower():
print(heading.getText())
break
for link in links:
link_string = link.get('href').lower()
if 'python' in link_string and 'www' in link_string and 'google' not in link_string:
print(link.get('href').replace('/url?q=', '').split('&', 1)[0])
break
elif google_input == '8':
req_result = requests.get('https://www.google.com/search?q=python+' + cans[7][0]).text
soup = bs4.BeautifulSoup(req_result, 'html.parser')
headings = soup.find_all('h3')
links = soup.find_all('a')
print('Top google search result for function: ')
print('--------------------------------------------------------------------------------------------------')
for heading in headings:
if 'python' in heading.getText().lower():
print(heading.getText())
break
for link in links:
link_string = link.get('href').lower()
if 'python' in link_string and 'www' in link_string and 'google' not in link_string:
print(link.get('href').replace('/url?q=', '').split('&', 1)[0])
break
elif google_input == '9':
req_result = requests.get('https://www.google.com/search?q=python+' + cans[8][0]).text
soup = bs4.BeautifulSoup(req_result, 'html.parser')
headings = soup.find_all('h3')
links = soup.find_all('a')
print('Top google search result for function: ')
print('--------------------------------------------------------------------------------------------------')
for heading in headings:
if 'python' in heading.getText().lower():
print(heading.getText())
break
for link in links:
link_string = link.get('href').lower()
if 'python' in link_string and 'www' in link_string and 'google' not in link_string:
print(link.get('href').replace('/url?q=', '').split('&', 1)[0])
break
elif google_input == '10':
req_result = requests.get('https://www.google.com/search?q=python+' + cans[9][0]).text
soup = bs4.BeautifulSoup(req_result, 'html.parser')
headings = soup.find_all('h3')
links = soup.find_all('a')
print('Top google search result for function: ')
print('--------------------------------------------------------------------------------------------------')
for heading in headings:
if 'python' in heading.getText().lower():
print(heading.getText())
break
for link in links:
link_string = link.get('href').lower()
if 'python' in link_string and 'www' in link_string and 'google' not in link_string:
print(link.get('href').replace('/url?q=', '').split('&', 1)[0])
break
print('--------------------------------------------------------------------------------------------------')
print('Type another number to search google or press c to continue')
google_input = input()
"""
rank=21
for k in range(0,len(cans)):
if cans[k][0]==callee:
rank=k+1
#print('Ranked '+str(rank))
if rank > 20:
pranks.append(rank)
#if atag==1:
#aranks.append(rank)
# Record: PRIAN cannot recommend, jumo to next recommendation.
else:
# PRIAN successfully recommends.
pranks.append(rank)
#if atag==1:
#aranks.append(rank)
ptimes.append(ts)
#alltimes+=ts+'\n'
pinranks.append(rank)
precode+=line
temp_arr = get_results(pinranks)
topk_array[0] += temp_arr[0]
topk_array[1] += temp_arr[1]
topk_array[2] += temp_arr[2]
topk_array[3] += temp_arr[3]
topk_array[4] += temp_arr[4]
topk_array[5] += temp_arr[5]
topk_array[6] += temp_arr[6]
global num_of_apis
if topk_array[5] != 0:
num_of_apis += 1
get_results(pranks)
#get_time(ptimes)
def count_all_apis():
#TODO:count all apis,including module_apis,builtin_apis,proj_apis
ret=[]
for k,v in module_apis.items():
for f in v:
if (not f.startswith('__')) and (not re.match('[A-Z0-9]+',f)) and (not f in ret):
ret.append(f)
#print(ret)
with open('testJson/'+CURRENT_PROJ+'.json') as f:
lines=f.readlines()
for line in lines:
line=line.strip()
index=line.rfind('.')
item=line[index+1:]
if (not item.startswith('__')) and (not item in ret):
ret.append(item)
with open('builtin.txt') as f2:
l2=f2.readlines()
for line2 in l2:
it=line2.strip()
if not it in ret:
ret.append(it)
return {'all_apis':ret}
def dealwith(curfile):
global module_apis,all_apis
module_apis={}
all_apis={}
module_apis=get_module_methods(curfile)
all_apis=count_all_apis()
tmpx=all_apis['all_apis']
tmpx.extend(all_apis_add)
tmpx=list(set(tmpx))
all_apis['all_apis']=tmpx
get_rec_point(curfile)
def get_all_apis():
return all_apis
def get_proj_tokens(iret_list):
global proj_token_count,proj_token_no,proj_depends
del_estr = string.punctuation + string.digits
replace = " "*len(del_estr)
tran_tab = str.maketrans(del_estr, replace)
#tmp=lines[i].strip().translate(tran_tab)
#file_label=0
for file in iret_list:
#file_label+=1
with open(file,encoding='ISO-8859-1') as f:
lines=f.readlines()
line_label=0
for i in range(0,len(lines)):
line_label+=1
if lines[i].strip()=='':
continue
elif re.sub(' ','',lines[i].strip())=='':
continue
elif 'import ' in lines[i]:
if file in proj_depends:
imports=proj_depends[file]
else:
imports=[]
imports.append(lines[i])
proj_depends[file]=imports
tmp=lines[i].strip().translate(tran_tab)
tokens=word_tokenize(tmp)
for tk in tokens:
token=tk+'##'+file
if token in proj_token_count:
tcount=proj_token_count[token]
else:
tcount=0
tcount+=lines[i].count(tk)
proj_token_count[token]=tcount
if token in proj_token_no:
no=proj_token_no[token]
else:
no=[]
no.append(line_label)
proj_token_no[token]=no
###main entry###
# if __name__=="main":
# __main__(CURRENT_PROJ,filePath)
ret_list=[]
proj_token_count={}
proj_token_no={}
proj_depends={}
cur_apis=[]
module_apis={}
all_apis={}
pranks=[]
ptimes=[]
pinranks=[]
all_apis_add=[]
root_path=''
Nonenum=Anynum=OKnum=0
all_defs=[]
all_recs=''
#alltimes=''
CURRENT_PROJ='pyspider'
filePath='testdata/'
with open('test.csv','w+') as f:
f.write('')
Nonenum=Anynum=OKnum=0
pranks=[]
ptimes=[]
pinranks=[]
all_apis_add=[]
root_path = filePath+CURRENT_PROJ
print('LOAD-PROJ:',root_path)
file_list = dir_list = []
ret_list=[]
get_file_path(root_path,file_list,dir_list)
#ret_list=list(set(ret_list))
print(len(ret_list))
trainlen=int(len(ret_list)/10*9)
#print(trainlen)
train_list=ret_list[:trainlen]
test_list=ret_list[trainlen:]
print(train_list)
print(test_list)
#sys.exit()
#proj_tokens={}
proj_token_count={}
proj_token_no={}
proj_depends={}
get_proj_tokens(ret_list)
module_apis={}
id=0
special_flag=0
if_from_current_proj=0
callps=[]
all_apis={}
#======MAIN FUNC ENTRY======
for ifile in test_list:
dealwith(ifile)
#with open('/home/user/PyART/testdatak/'+CURRENT_PROJ+'_time.txt','w+') as f:
#f.write(str(ptimes))
for x, y in enumerate(topk_array):
topk_array[x] = y/num_of_apis
print("Top K Averages for SVM: Top 1: " + str(topk_array[0]) + " Top 2: " + str(topk_array[1]) + " Top 3: " + str(topk_array[2]) + " Top 4: " + str(topk_array[3]) + " Top 5: " + str(topk_array[4]) + " Top 10: " + str(topk_array[5]) + " MRR: " + str(topk_array[6])) |
the-stack_0_12760 | """
Copyright (c) 2022 Huawei Technologies Co.,Ltd.
openGauss is licensed under Mulan PSL v2.
You can use this software according to the terms and conditions of the Mulan PSL v2.
You may obtain a copy of Mulan PSL v2 at:
http://license.coscl.org.cn/MulanPSL2
THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
See the Mulan PSL v2 for more details.
"""
"""
Case Type : 系统内部使用工具
Case Name : pssh工具远程连接备机并输出备节点主机名
Description :
1.pssh -H 添加所有参数,远程连接备节点并输出备节点主机名
2.查看执行结果文件
3.查看错误结果文件
4.pssh -H 添加所有参数,远程连接备节点并输出备节点主机名,命令错误
5.查看执行结果文件
6.查看错误结果文件
7.清理环境
Expect :
1.执行成功;屏幕显示备1节点主机名
2.结果文件显示备节点主机名
3.错误结果文件为空
4.合理报错
5.结果文件为空
6.错误结果文件显示报错提示信息
7.清理环境完成
History :
"""
import os
import unittest
from testcase.utils.CommonSH import CommonSH
from testcase.utils.Logger import Logger
from yat.test import Node
from yat.test import macro
COMMONSH = CommonSH("PrimaryDbUser")
@unittest.skipIf(1 == COMMONSH.get_node_num(), "单机不执行")
class SystemInternalTools(unittest.TestCase):
def setUp(self):
self.log = Logger()
self.log.info('-Opengauss_Function_Tools_Pssh_Case0002开始执行-')
self.PrimaryNode = Node('PrimaryDbUser')
self.Standby_User_Node = Node('Standby1DbUser')
self.IP1 = self.Standby_User_Node.db_host
self.parent_path = os.path.dirname(macro.DB_INSTANCE_PATH)
self.pssh_path = os.path.join(self.parent_path, 'tool', 'script',
'gspylib', 'pssh', 'bin')
self.output_file = os.path.join(self.parent_path, 'output.log')
self.generate_file = os.path.join(self.output_file, self.IP1)
self.err_output_file = os.path.join(self.parent_path, 'err_output.log')
self.generate_err_file = os.path.join(self.err_output_file, self.IP1)
self.expect_result = "bash: hostname123: command not found"
def test_pssh(self):
text = '--step1:pssh -H 添加所有参数,远程连接备节点并输出备节点' \
'主机名;expect:执行成功;屏幕显示备1节点主机名--'
self.log.info(text)
cmd = 'hostname'
check_hostname = self.Standby_User_Node.sh(cmd).result()
self.log.info(check_hostname)
pssh_cmd = f" cd {self.pssh_path};" \
f"source {macro.DB_ENV_PATH};" \
f"python3 pssh " \
f"-H {self.Standby_User_Node.db_host} " \
f"-t 5 " \
f"-p 2 " \
f"-o {self.output_file} " \
f"-e {self.err_output_file} " \
f"-P " \
f"-s " \
f"-i 'echo $HOSTNAME';"
self.log.info(pssh_cmd)
msg = self.PrimaryNode.sh(pssh_cmd).result()
self.log.info(msg)
self.assertEqual(check_hostname, msg.splitlines()[-1].strip(),
'执行失败:' + text)
text = '--step2:查看执行结果文件;expect:结果文件显示备节点主机名--'
self.log.info(text)
cat_cmd = f"cat {self.generate_file}"
self.log.info(cat_cmd)
msg = self.PrimaryNode.sh(cat_cmd).result()
self.log.info(msg)
self.assertEqual(check_hostname, msg, '执行失败:' + text)
text = '--step3:查看错误结果文件;expect:错误结果文件为空--'
self.log.info(text)
cat_cmd = f"cat {self.generate_err_file}"
self.log.info(cat_cmd)
msg = self.PrimaryNode.sh(cat_cmd).result()
self.log.info(msg)
self.assertEqual('', msg, '执行失败:' + text)
text = '--step4:pssh -H 添加所有参数,远程连接备节点并输出备节点' \
'主机名,命令错误;expect:合理报错--'
self.log.info(text)
pssh_cmd = f" cd {self.pssh_path};" \
f"source {macro.DB_ENV_PATH};" \
f"python3 pssh " \
f"-H {self.Standby_User_Node.db_host} " \
f"-t 5 " \
f"-p 2 " \
f"-o {self.output_file} " \
f"-e {self.err_output_file} " \
f"-P " \
f"-s " \
f"-i hostname123;"
self.log.info(pssh_cmd)
msg = self.PrimaryNode.sh(pssh_cmd).result()
self.log.info(msg)
self.assertTrue(self.expect_result in msg, '执行失败:' + text)
text = '--step5:查看执行结果文件;expect:结果文件为空--'
self.log.info(text)
cat_cmd = f"cat {self.generate_file}"
self.log.info(cat_cmd)
msg = self.PrimaryNode.sh(cat_cmd).result()
self.log.info(msg)
self.assertEqual('', msg, '执行失败:' + text)
text = '--step6:查看错误结果文件;expect:错误结果文件显示报错提示信息--'
self.log.info(text)
cat_cmd = f"cat {self.generate_err_file}"
self.log.info(cat_cmd)
msg = self.PrimaryNode.sh(cat_cmd).result()
self.log.info(msg)
self.assertTrue(self.expect_result in msg, '执行失败:' + text)
def tearDown(self):
text = '--step7:清理环境;expect:清理环境完成--'
self.log.info(text)
rm_cmd = f"rm -rf {self.output_file};" \
f"rm -rf {self.err_output_file}"
self.log.info(rm_cmd)
msg = self.PrimaryNode.sh(rm_cmd).result()
self.log.info(msg)
self.log.info('断言teardown执行成功')
self.assertEqual('', msg, '执行失败:' + text)
self.log.info('-Opengauss_Function_Tools_Pssh_Case0002执行完成-')
|
the-stack_0_12761 | # coding=utf8
"""
Test that the expression parser returns proper Unicode strings.
"""
from __future__ import print_function
import os
import time
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
# this test case fails because of rdar://12991846
# the expression parser does not deal correctly with Unicode expressions
# e.g.
#(lldb) expr L"Hello"
#(const wchar_t [6]) $0 = {
# [0] = \0\0\0\0
# [1] = \0\0\0\0
# [2] = \0\0\0\0
# [3] = \0\0\0\0
# [4] = H\0\0\0
# [5] = e\0\0\0
#}
class UnicodeLiteralsTestCase(TestBase):
mydir = TestBase.compute_mydir(__file__)
def test_expr1(self):
"""Test that the expression parser returns proper Unicode strings."""
self.build()
self.rdar12991846(expr=1)
def test_expr2(self):
"""Test that the expression parser returns proper Unicode strings."""
self.build()
self.rdar12991846(expr=2)
def test_expr3(self):
"""Test that the expression parser returns proper Unicode strings."""
self.build()
self.rdar12991846(expr=3)
def setUp(self):
# Call super's setUp().
TestBase.setUp(self)
# Find the line number to break for main.cpp.
self.source = 'main.cpp'
self.line = line_number(
self.source, '// Set break point at this line.')
def rdar12991846(self, expr=None):
"""Test that the expression parser returns proper Unicode strings."""
if self.getArchitecture() in ['i386']:
self.skipTest(
"Skipping because this test is known to crash on i386")
exe = self.getBuildArtifact("a.out")
# Create a target by the debugger.
target = self.dbg.CreateTarget(exe)
self.assertTrue(target, VALID_TARGET)
# Break on the struct declration statement in main.cpp.
lldbutil.run_break_set_by_file_and_line(self, "main.cpp", self.line)
# Now launch the process, and do not stop at entry point.
process = target.LaunchSimple(
None, None, self.get_process_working_directory())
if not process:
self.fail("SBTarget.Launch() failed")
if expr == 1:
self.expect('expression L"hello"', substrs=['hello'])
if expr == 2:
self.expect('expression u"hello"', substrs=['hello'])
if expr == 3:
self.expect('expression U"hello"', substrs=['hello'])
|
the-stack_0_12762 | from django.contrib.auth import get_user_model
from django.urls import reverse
from django.test import TestCase
from rest_framework.test import APIClient
from core.models import Ingredient
from recipe.serializers import IngredientSerializer
INGREDIENTS_URL = reverse('recipe:ingredient-list')
class PublicIngredientsApiTest(TestCase):
def setUp(self):
self.client = APIClient()
def test_login_required(self):
res = self.client.get(INGREDIENTS_URL)
self.assertEqual(res.status_code, 401)
class PrivateIngredientsApiTest(TestCase):
def setUp(self):
self.client = APIClient()
self.user = get_user_model().objects.create_user(
'[email protected]',
'password'
)
self.client.force_authenticate(self.user)
def test_retrieve_ingredients_list(self):
Ingredient.objects.create(user=self.user, name='Kale')
Ingredient.objects.create(user=self.user, name='Salt')
res = self.client.get(INGREDIENTS_URL)
ingredients = Ingredient.objects.all().order_by('-name')
serializer = IngredientSerializer(ingredients, many=True)
self.assertEqual(res.status_code, 200)
self.assertEqual(res.data, serializer.data)
def test_ingredients_limited_to_user(self):
user2 = get_user_model().objects.create_user(
'[email protected]',
'password'
)
Ingredient.objects.create(user=user2, name='Vinegar')
ingredient = Ingredient.objects.create(user=self.user, name='Turmeric')
res = self.client.get(INGREDIENTS_URL)
self.assertEqual(res.status_code, 200)
self.assertEqual(len(res.data), 1)
self.assertEqual(res.data[0]['name'], ingredient.name)
def test_create_ingredient_successful(self):
payload = {'name': 'Cabbage'}
self.client.post(INGREDIENTS_URL, payload)
exists = Ingredient.objects.filter(
user=self.user,
name=payload['name']
).exists()
self.assertTrue(exists)
def test_create_ingredient_invalid(self):
payload = {'name': ''}
res = self.client.post(INGREDIENTS_URL, payload)
self.assertEqual(res.status_code, 400) |
the-stack_0_12763 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any
from azure.core import AsyncPipelineClient
from msrest import Deserializer, Serializer
from ._configuration import AzureCommunicationSMSServiceConfiguration
from .operations import SmsOperations
from .. import models
class AzureCommunicationSMSService(object):
"""Azure Communication SMS Service.
:ivar sms: SmsOperations operations
:vartype sms: azure.communication.sms.aio.operations.SmsOperations
:param endpoint: The communication resource, for example https://my-resource.communication.azure.com.
:type endpoint: str
"""
def __init__(
self,
endpoint: str,
**kwargs: Any
) -> None:
base_url = '{endpoint}'
self._config = AzureCommunicationSMSServiceConfiguration(endpoint, **kwargs)
self._client = AsyncPipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._serialize.client_side_validation = False
self._deserialize = Deserializer(client_models)
self.sms = SmsOperations(
self._client, self._config, self._serialize, self._deserialize)
async def close(self) -> None:
await self._client.close()
async def __aenter__(self) -> "AzureCommunicationSMSService":
await self._client.__aenter__()
return self
async def __aexit__(self, *exc_details) -> None:
await self._client.__aexit__(*exc_details)
|
the-stack_0_12764 | #!/usr/bin/env python
#############################################################################
##
## Copyright (C) 2013 Riverbank Computing Limited.
## Copyright (C) 2010 Nokia Corporation and/or its subsidiary(-ies).
## All rights reserved.
##
## This file is part of the examples of PyQt.
##
## $QT_BEGIN_LICENSE:BSD$
## You may use this file under the terms of the BSD license as follows:
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are
## met:
## * Redistributions of source code must retain the above copyright
## notice, this list of conditions and the following disclaimer.
## * Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in
## the documentation and/or other materials provided with the
## distribution.
## * Neither the name of Nokia Corporation and its Subsidiary(-ies) nor
## the names of its contributors may be used to endorse or promote
## products derived from this software without specific prior written
## permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
## "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
## LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
## A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
## OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
## SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
## LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
## DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
## THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
## (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
## OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
## $QT_END_LICENSE$
##
#############################################################################
import random
from PyQt5.QtCore import (pyqtSignal, QByteArray, QDataStream, QIODevice,
QMimeData, QPoint, QRect, QSize, Qt)
from PyQt5.QtGui import QDrag, QColor, QCursor, QIcon, QPainter, QPixmap
from PyQt5.QtWidgets import (QApplication, QFileDialog, QFrame, QHBoxLayout,
QListView, QListWidget, QListWidgetItem, QMainWindow, QMessageBox,
QSizePolicy, QWidget)
import puzzle_rc
class PuzzleWidget(QWidget):
puzzleCompleted = pyqtSignal()
def __init__(self, parent=None):
super(PuzzleWidget, self).__init__(parent)
self.piecePixmaps = []
self.pieceRects = []
self.pieceLocations = []
self.highlightedRect = QRect()
self.inPlace = 0
self.setAcceptDrops(True)
self.setMinimumSize(400, 400)
self.setMaximumSize(400, 400)
def clear(self):
self.pieceLocations = []
self.piecePixmaps = []
self.pieceRects = []
self.highlightedRect = QRect()
self.inPlace = 0
self.update()
def dragEnterEvent(self, event):
if event.mimeData().hasFormat('image/x-puzzle-piece'):
event.accept()
else:
event.ignore()
def dragLeaveEvent(self, event):
updateRect = self.highlightedRect
self.highlightedRect = QRect()
self.update(updateRect)
event.accept()
def dragMoveEvent(self, event):
updateRect = self.highlightedRect.united(self.targetSquare(event.pos()))
if event.mimeData().hasFormat('image/x-puzzle-piece') and self.findPiece(self.targetSquare(event.pos())) == -1:
self.highlightedRect = self.targetSquare(event.pos())
event.setDropAction(Qt.MoveAction)
event.accept()
else:
self.highlightedRect = QRect()
event.ignore()
self.update(updateRect)
def dropEvent(self, event):
if event.mimeData().hasFormat('image/x-puzzle-piece') and self.findPiece(self.targetSquare(event.pos())) == -1:
pieceData = event.mimeData().data('image/x-puzzle-piece')
dataStream = QDataStream(pieceData, QIODevice.ReadOnly)
square = self.targetSquare(event.pos())
pixmap = QPixmap()
location = QPoint()
dataStream >> pixmap >> location
self.pieceLocations.append(location)
self.piecePixmaps.append(pixmap)
self.pieceRects.append(square)
self.hightlightedRect = QRect()
self.update(square)
event.setDropAction(Qt.MoveAction)
event.accept()
if location == QPoint(square.x() / 80, square.y() / 80):
self.inPlace += 1
if self.inPlace == 25:
self.puzzleCompleted.emit()
else:
self.highlightedRect = QRect()
event.ignore()
def findPiece(self, pieceRect):
try:
return self.pieceRects.index(pieceRect)
except ValueError:
return -1
def mousePressEvent(self, event):
square = self.targetSquare(event.pos())
found = self.findPiece(square)
if found == -1:
return
location = self.pieceLocations[found]
pixmap = self.piecePixmaps[found]
del self.pieceLocations[found]
del self.piecePixmaps[found]
del self.pieceRects[found]
if location == QPoint(square.x() / 80, square.y() / 80):
self.inPlace -= 1
self.update(square)
itemData = QByteArray()
dataStream = QDataStream(itemData, QIODevice.WriteOnly)
dataStream << pixmap << location
mimeData = QMimeData()
mimeData.setData('image/x-puzzle-piece', itemData)
drag = QDrag(self)
drag.setMimeData(mimeData)
drag.setHotSpot(event.pos() - square.topLeft())
drag.setPixmap(pixmap)
if drag.exec_(Qt.MoveAction) != Qt.MoveAction:
self.pieceLocations.insert(found, location)
self.piecePixmaps.insert(found, pixmap)
self.pieceRects.insert(found, square)
self.update(self.targetSquare(event.pos()))
if location == QPoint(square.x() / 80, square.y() / 80):
self.inPlace += 1
def paintEvent(self, event):
painter = QPainter()
painter.begin(self)
painter.fillRect(event.rect(), Qt.white)
if self.highlightedRect.isValid():
painter.setBrush(QColor("#ffcccc"))
painter.setPen(Qt.NoPen)
painter.drawRect(self.highlightedRect.adjusted(0, 0, -1, -1))
for rect, pixmap in zip(self.pieceRects, self.piecePixmaps):
painter.drawPixmap(rect, pixmap)
painter.end()
def targetSquare(self, position):
return QRect(position.x() // 80 * 80, position.y() // 80 * 80, 80, 80)
class PiecesList(QListWidget):
def __init__(self, parent=None):
super(PiecesList, self).__init__(parent)
self.setDragEnabled(True)
self.setViewMode(QListView.IconMode)
self.setIconSize(QSize(60, 60))
self.setSpacing(10)
self.setAcceptDrops(True)
self.setDropIndicatorShown(True)
def dragEnterEvent(self, event):
if event.mimeData().hasFormat('image/x-puzzle-piece'):
event.accept()
else:
event.ignore()
def dragMoveEvent(self, event):
if event.mimeData().hasFormat('image/x-puzzle-piece'):
event.setDropAction(Qt.MoveAction)
event.accept()
else:
event.ignore()
def dropEvent(self, event):
if event.mimeData().hasFormat('image/x-puzzle-piece'):
pieceData = event.mimeData().data('image/x-puzzle-piece')
dataStream = QDataStream(pieceData, QIODevice.ReadOnly)
pixmap = QPixmap()
location = QPoint()
dataStream >> pixmap >> location
self.addPiece(pixmap, location)
event.setDropAction(Qt.MoveAction)
event.accept()
else:
event.ignore()
def addPiece(self, pixmap, location):
pieceItem = QListWidgetItem(self)
pieceItem.setIcon(QIcon(pixmap))
pieceItem.setData(Qt.UserRole, pixmap)
pieceItem.setData(Qt.UserRole+1, location)
pieceItem.setFlags(Qt.ItemIsEnabled | Qt.ItemIsSelectable | Qt.ItemIsDragEnabled)
def startDrag(self, supportedActions):
item = self.currentItem()
itemData = QByteArray()
dataStream = QDataStream(itemData, QIODevice.WriteOnly)
pixmap = QPixmap(item.data(Qt.UserRole))
location = item.data(Qt.UserRole+1)
dataStream << pixmap << location
mimeData = QMimeData()
mimeData.setData('image/x-puzzle-piece', itemData)
drag = QDrag(self)
drag.setMimeData(mimeData)
drag.setHotSpot(QPoint(pixmap.width()/2, pixmap.height()/2))
drag.setPixmap(pixmap)
if drag.exec_(Qt.MoveAction) == Qt.MoveAction:
if self.currentItem() is not None:
self.takeItem(self.row(item))
class MainWindow(QMainWindow):
def __init__(self, parent=None):
super(MainWindow, self).__init__(parent)
self.puzzleImage = QPixmap()
self.setupMenus()
self.setupWidgets()
self.setSizePolicy(QSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed))
self.setWindowTitle("Puzzle")
def openImage(self, path=None):
if not path:
path, _ = QFileDialog.getOpenFileName(self, "Open Image", '',
"Image Files (*.png *.jpg *.bmp)")
if path:
newImage = QPixmap()
if not newImage.load(path):
QMessageBox.warning(self, "Open Image",
"The image file could not be loaded.",
QMessageBox.Cancel)
return
self.puzzleImage = newImage
self.setupPuzzle()
def setCompleted(self):
QMessageBox.information(self, "Puzzle Completed",
"Congratulations! You have completed the puzzle!\nClick OK "
"to start again.",
QMessageBox.Ok)
self.setupPuzzle()
def setupPuzzle(self):
size = min(self.puzzleImage.width(), self.puzzleImage.height())
self.puzzleImage = self.puzzleImage.copy(
(self.puzzleImage.width() - size)/2,
(self.puzzleImage.height() - size)/2, size, size).scaled(400, 400, Qt.IgnoreAspectRatio, Qt.SmoothTransformation)
self.piecesList.clear()
for y in range(5):
for x in range(5):
pieceImage = self.puzzleImage.copy(x*80, y*80, 80, 80)
self.piecesList.addPiece(pieceImage, QPoint(x,y))
random.seed(QCursor.pos().x() ^ QCursor.pos().y())
for i in range(self.piecesList.count()):
if random.random() < 0.5:
item = self.piecesList.takeItem(i)
self.piecesList.insertItem(0, item)
self.puzzleWidget.clear()
def setupMenus(self):
fileMenu = self.menuBar().addMenu("&File")
openAction = fileMenu.addAction("&Open...")
openAction.setShortcut("Ctrl+O")
exitAction = fileMenu.addAction("E&xit")
exitAction.setShortcut("Ctrl+Q")
gameMenu = self.menuBar().addMenu("&Game")
restartAction = gameMenu.addAction("&Restart")
openAction.triggered.connect(self.openImage)
exitAction.triggered.connect(QApplication.instance().quit)
restartAction.triggered.connect(self.setupPuzzle)
def setupWidgets(self):
frame = QFrame()
frameLayout = QHBoxLayout(frame)
self.piecesList = PiecesList()
self.puzzleWidget = PuzzleWidget()
self.puzzleWidget.puzzleCompleted.connect(self.setCompleted,
Qt.QueuedConnection)
frameLayout.addWidget(self.piecesList)
frameLayout.addWidget(self.puzzleWidget)
self.setCentralWidget(frame)
if __name__ == '__main__':
import sys
app = QApplication(sys.argv)
window = MainWindow()
window.openImage(':/images/example.jpg')
window.show()
sys.exit(app.exec_())
|
the-stack_0_12768 | #!/usr/bin/env python3
"""Combine logs from multiple compchain nodes as well as the test_framework log.
This streams the combined log output to stdout. Use combine_logs.py > outputfile
to write to an outputfile."""
import argparse
from collections import defaultdict, namedtuple
import heapq
import itertools
import os
import re
import sys
# Matches on the date format at the start of the log event
TIMESTAMP_PATTERN = re.compile(r"^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d{6}Z")
LogEvent = namedtuple('LogEvent', ['timestamp', 'source', 'event'])
def main():
"""Main function. Parses args, reads the log files and renders them as text or html."""
parser = argparse.ArgumentParser(usage='%(prog)s [options] <test temporary directory>', description=__doc__)
parser.add_argument('-c', '--color', dest='color', action='store_true', help='outputs the combined log with events colored by source (requires posix terminal colors. Use less -r for viewing)')
parser.add_argument('--html', dest='html', action='store_true', help='outputs the combined log as html. Requires jinja2. pip install jinja2')
args, unknown_args = parser.parse_known_args()
if args.color and os.name != 'posix':
print("Color output requires posix terminal colors.")
sys.exit(1)
if args.html and args.color:
print("Only one out of --color or --html should be specified")
sys.exit(1)
# There should only be one unknown argument - the path of the temporary test directory
if len(unknown_args) != 1:
print("Unexpected arguments" + str(unknown_args))
sys.exit(1)
log_events = read_logs(unknown_args[0])
print_logs(log_events, color=args.color, html=args.html)
def read_logs(tmp_dir):
"""Reads log files.
Delegates to generator function get_log_events() to provide individual log events
for each of the input log files."""
files = [("test", "%s/test_framework.log" % tmp_dir)]
for i in itertools.count():
logfile = "{}/node{}/regtest/debug.log".format(tmp_dir, i)
if not os.path.isfile(logfile):
break
files.append(("node%d" % i, logfile))
return heapq.merge(*[get_log_events(source, f) for source, f in files])
def get_log_events(source, logfile):
"""Generator function that returns individual log events.
Log events may be split over multiple lines. We use the timestamp
regex match as the marker for a new log event."""
try:
with open(logfile, 'r', encoding='utf-8') as infile:
event = ''
timestamp = ''
for line in infile:
# skip blank lines
if line == '\n':
continue
# if this line has a timestamp, it's the start of a new log event.
time_match = TIMESTAMP_PATTERN.match(line)
if time_match:
if event:
yield LogEvent(timestamp=timestamp, source=source, event=event.rstrip())
event = line
timestamp = time_match.group()
# if it doesn't have a timestamp, it's a continuation line of the previous log.
else:
event += "\n" + line
# Flush the final event
yield LogEvent(timestamp=timestamp, source=source, event=event.rstrip())
except FileNotFoundError:
print("File %s could not be opened. Continuing without it." % logfile, file=sys.stderr)
def print_logs(log_events, color=False, html=False):
"""Renders the iterator of log events into text or html."""
if not html:
colors = defaultdict(lambda: '')
if color:
colors["test"] = "\033[0;36m" # CYAN
colors["node0"] = "\033[0;34m" # BLUE
colors["node1"] = "\033[0;32m" # GREEN
colors["node2"] = "\033[0;31m" # RED
colors["node3"] = "\033[0;33m" # YELLOW
colors["reset"] = "\033[0m" # Reset font color
for event in log_events:
print("{0} {1: <5} {2} {3}".format(colors[event.source.rstrip()], event.source, event.event, colors["reset"]))
else:
try:
import jinja2
except ImportError:
print("jinja2 not found. Try `pip install jinja2`")
sys.exit(1)
print(jinja2.Environment(loader=jinja2.FileSystemLoader('./'))
.get_template('combined_log_template.html')
.render(title="Combined Logs from testcase", log_events=[event._asdict() for event in log_events]))
if __name__ == '__main__':
main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.