id
stringlengths 2
8
| text
stringlengths 16
264k
| dataset_id
stringclasses 1
value |
---|---|---|
3247120
|
"""Module for generating reports on the results of input files.
.. important::
While this should work generally, autojob is currently tested on the
following types of code: FEFF 9.9.1, VASP 6.2.1, and the default CONFIG
assumes these code versions.
"""
from collections import Counter
from pathlib import Path
from autojob import logger
from autojob.file_utils import (
exhaustive_directory_search,
run_command,
check_if_substring_match,
)
CONFIG = {
"in": {
"FEFF": ["feff.inp"],
"VASP": ["INCAR", "POSCAR", "KPOINTS", "POTCAR"],
},
"out": {
"FEFF": [["xmu.dat", None], ["feff.out", "feff ends at"]],
"VASP": [
[
"OUTCAR",
" General timing and accounting informations for this job:",
]
],
},
}
def check_computation_type(root, input_files=CONFIG["in"]):
"""Determines which type of computation has been completed in the directory
of interest. In the cases when no input file types can be matched, or when
multiple input file types are found, warnings/errors will be logged and
None will be returned.
Parameters
----------
root : os.PathLike
The directory containing the
input_files : dict, optional
A dictionary that contains keys corresponding to computation types
(e.g. VASP) and values of lists of file names. These file names must
_all_ be present in a given directory to confirm that the calculation
is of a certain type.
Returns
-------
str
The type of calculation that the directory contains. The available
options are found in DEFAULT_INPUT_FILES.
"""
contained = {xx.parts[-1] for xx in list(Path(root).iterdir())}
overlap = {
key: set(value).issubset(contained)
for key, value in input_files.items()
}
# Check to see if for some reason there are multiple computations' input
# files in one directory. This obvious is a problem.
N = sum(list(overlap.values()))
if N != 1:
if N < 1:
logger.warning(f"No matching input files found in {root}")
elif N > 1:
logger.error(f"More than one type of calculation found in {root}")
return None
# Otherwise, we simply find which one is true
calc_type = [key for key, value in overlap.items() if value][0]
logger.debug(f"{root} identified as {calc_type}")
return calc_type
def check_job_status(root, checks):
"""Checks the status of a job by looking in the directory of interest for
the appropriate completion status. This function does not check that the
provided root directory actually corresponds to the type of calculation
provided will error ungracefully if it does not contain the appropriate
files. Output files have their last 100 lines checked.
Parameters
----------
root : os.PathLike
The directory containing input and output files.
checks : list of list of str
A doubly nested list. The outer lists correspond to filename-substring
pairs. If the substring is None, then this will simply check whether or
not the file exists and is not empty.
Returns
-------
bool
True if the job has completed successfully, False otherwise.
"""
for filename, substring in checks:
path = Path(root) / Path(filename)
logger.debug(f"Running checks {checks} on {path}")
# Check for existence and that the file size is > 0
if substring is None:
if not path.exists():
logger.debug(f"{path} does not exist - status FALSE")
return False
if path.stat().st_size == 0:
logger.debug(f"{path} is empty - status FALSE")
return False
else:
command = f"tail -n 100 {str(path)}"
res = run_command(command)
lines = res["stdout"].split("\n")
cond = check_if_substring_match(lines, str(substring))
if not cond:
logger.debug(f"{path} missing {substring} - status FALSE")
return False
logger.debug(f"{path} - status TRUE")
return True
def generate_report(root, filename, output_files=CONFIG["out"]):
"""Generates a report of which jobs have finished, which are still ongoing
and which have failed. Currently, returns True if the job completed with
seemingly no issues, and False otherwise.
.. warning::
If the directories of interest cannot be identified, None will be
returned. This happens if check_computation_type returns None. Warnings
are issued in this case, and these directories will be ignored.
Notes
-----
What is checked given some calculation type is detailed below:
* VASP: If the job completed, the OUTCAR file will contain timing
information.
* FEFF: If the job completed, there will be a non-empty xmu.dat file.
Parameters
----------
root : os.PathLike
Root location for the exhaustive directory search.
filename : str
Looks exhaustively in root for directories containing a file matching
this name.
identifiers : dict, optional
A dictionary containing strings as keys, which identify the computation
type, and sets as values, which identify input files that all must be
contained in the directory to identify the directory as corresponding
to a certain computation type. Default is DEFAULT_INPUT_FILES.
Returns
-------
dict
A dictionary with keys as the paths to the directories checked and
boolean values, indicating the success status of the calculation.
"""
logger.info(f"Generating report at {root} (searching for {filename})")
# Get the directories matching the filename of the directory search
directories = exhaustive_directory_search(root, filename)
# For each directory in the tree, determine the type of calculation that
# was run.
calculation_types = {dd: check_computation_type(dd) for dd in directories}
calculation_types = {
key: value
for key, value in calculation_types.items()
if value is not None
}
cc = Counter(list(calculation_types.values()))
# Get the statuses
status = dict()
complete = {ctype: 0 for ctype in cc.keys()}
report = {ctype: {"success": [], "fail": []} for ctype in cc.keys()}
for dd, ctype in calculation_types.items():
checks = output_files[ctype] if ctype is not None else None
status[dd] = check_job_status(dd, checks=checks)
complete[ctype] += int(status[dd])
if status[dd]:
report[ctype]["success"].append(str(dd))
else:
report[ctype]["fail"].append(str(dd))
for ctype, ncomplete in complete.items():
if ncomplete == cc[ctype]:
logger.success(f"{ctype}: all {ncomplete} complete")
else:
logger.warning(f"{ctype} incomplete: {ncomplete}/{cc[ctype]}")
return report
|
StarcoderdataPython
|
11350062
|
'''
Module to evaluate the fitness value of a scenario
'''
from pymoo.model.problem import Problem
class MyProblem(Problem):
def __init__(self, **kwargs):
super().__init__(
n_var=1, n_obj=1, n_constr=0, elementwise_evaluation=True, **kwargs
)
# self.pool = ThreadPool(8)
def _evaluate(self, x, out, *args, **kwargs):
s = x[0]
s.eval_fitness()
out["F"] = s.fitness
|
StarcoderdataPython
|
1723849
|
from simulation.visualization import Visualizer
import game_events.game_events as events
class TextualVisualizer(Visualizer):
def __init__(self):
pass
def visualize(self, event, params):
if isinstance(event, events.AbilityCastStarted):
self.visualize_ability_cast_started(params)
elif isinstance(event, events.AbilityCastEnded):
self.visualize_ability_cast_ended(params)
elif isinstance(event, events.EveryoneRestoreHealthAndResources):
self.visualize_everyone_restore_health_and_resources(params)
def visualize_ability_cast_started(self, params):
time = params["current_time"]
source = params["sender"]
target = params["target"]
ability = params["ability"]
print("[Time: {}] {} is casting {} on {}".format(time, source.name, ability.name, target.name))
def visualize_ability_cast_ended(self, params):
time = params["current_time"]
source = params["sender"]
target = params["target"]
ability = params["ability"]
health_before = params["target_health_before"]
health_after = params["target_health_after"]
success = params["success"]
if success:
if health_after > 0:
print("[Time: {}] {} has hit {} for {} damage, {} left.".format(
time,
source.name,
target.name,
health_before - health_after,
health_after
))
else:
print("[Time: {}] {} has hit {} for {} damage, {} has died.".format(
time,
source.name,
target.name,
health_before - health_after,
target.name
))
else:
print("[Time: {}] {} was not able to cast {} on {}.".format(time, source.name, ability.name, target.name))
def visualize_everyone_restore_health_and_resources(self, params):
time = params["current_time"]
players = params["sender"].players
# pass
|
StarcoderdataPython
|
3592540
|
import enchant
from quasimodo.data_structures.submodule_interface import SubmoduleInterface
import logging
dirty_words = ["their", "so", "also"]
forbidden = ["used", "called", "xbox", "youtube", "xo", "quote",
"quotes", "minecraft", "important", "considered", "why",
"using", "as", "for", "as a", "like", "doing", "the", "would",
"of", "in", "now", "tonight", "today"]
totally_forbidden = ["xbox", "youtube", "xo", "quote",
"quotes", "minecraft", "why", "quizlet", "nz", "wz",
"quora", "reddit", "skyrim", "shippuden", "yahoo",
"wikipedia", "how", "why", "brainly", "joke", "jokes", "quiz"]
D_ENCHANT = enchant.Dict("en_US")
def _is_totally_forbidden(sentence, forbidden):
s = sentence.split(" ")
if "quiz let" in sentence:
return True
for w in s:
if w in forbidden:
return True
return False
class FilterObjectSubmodule(SubmoduleInterface):
def __init__(self, module_reference):
super().__init__()
self._module_reference = module_reference
self._name = "Filter Object"
def process(self, input_interface):
logging.info("Start the filtering object")
new_generated_facts = []
all_subjects = input_interface.get_subjects()
for generated_fact in input_interface.get_generated_facts():
obj = generated_fact.get_object().get()
if obj in forbidden or _is_totally_forbidden(obj, totally_forbidden) or len(obj) == 1:
continue
if " " not in obj and not D_ENCHANT.check(obj) and not obj in all_subjects:
continue
obj = generated_fact.get_object().get().split(" ")
predicate = generated_fact.get_predicate().get()
if predicate == obj[0]:
obj = obj[1:]
# Remove last punctuation
changed_last = False
if obj and obj[-1] and not obj[-1][-1].isalnum():
obj[-1] = obj[-1][:-1]
changed_last = True
new_obj = []
for p in obj:
if p not in dirty_words:
new_obj.append(p)
if (obj != new_obj or changed_last) and len(new_obj) != 0:
generated_fact = generated_fact.change_object(" ".join(new_obj).strip())
if len(new_obj) != 0:
new_generated_facts.append(generated_fact)
logging.info("%d facts were removed by the object cleaner",
len(input_interface.get_generated_facts()) - len(new_generated_facts))
return input_interface.replace_generated_facts(new_generated_facts)
|
StarcoderdataPython
|
9635204
|
print('Ola mundo!!')
a = 10
|
StarcoderdataPython
|
11397623
|
<gh_stars>0
#!/usr/bin/python
#
# Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests to cover AdWordsClient."""
__author__ = ('<EMAIL> (<NAME>)',
'<EMAIL> (<NAME>)')
import os
import StringIO
import sys
import unittest
sys.path.insert(0, os.path.join('..', '..', '..'))
import mock
from adspygoogle.adwords.AdWordsClient import AdWordsClient
from adspygoogle.common.Errors import ValidationError
DEFAULT_HEADERS = {
'userAgent': 'Foo Bar',
'developerToken': 'devtoken'
}
class AdWordsClientValidationTest(unittest.TestCase):
"""Tests the validation logic when instantiating AdWordsClient."""
def testEmailPassOnly(self):
"""Tests that specifying solely email & password works."""
with mock.patch('adspygoogle.common.Utils.GetAuthToken') as mock_get_token:
mock_get_token.return_value = 'FooBar'
headers = DEFAULT_HEADERS.copy()
headers['email'] = '<EMAIL>'
headers['password'] = 'password'
client = AdWordsClient(headers=headers)
self.assertEquals(client._headers['authToken'], 'FooBar')
mock_get_token.assert_called_once_with(
'<EMAIL>', 'password', mock.ANY, mock.ANY, mock.ANY,
mock.ANY, mock.ANY)
def testEmailPassOthersBlank(self):
"""Tests that email and password with other auth blank works."""
with mock.patch('adspygoogle.common.Utils.GetAuthToken') as mock_get_token:
mock_get_token.return_value = 'FooBar'
headers = DEFAULT_HEADERS.copy()
headers['email'] = '<EMAIL>'
headers['password'] = 'password'
headers['authToken'] = ''
headers['oauth_credentials'] = None
client = AdWordsClient(headers=headers)
self.assertEquals(client._headers['authToken'], 'FooBar')
mock_get_token.assert_called_once_with(
'<EMAIL>', 'password', mock.ANY, mock.ANY, mock.ANY,
mock.ANY, mock.ANY)
def testAuthTokenOnly(self):
"""Tests that specifying solely authtoken works."""
headers = DEFAULT_HEADERS.copy()
headers['authToken'] = 'MyToken'
client = AdWordsClient(headers=headers)
self.assertEquals(client._headers['authToken'], 'MyToken')
def testAuthTokenOthersBlank(self):
"""Tests that authToken with other auth blank works."""
headers = DEFAULT_HEADERS.copy()
headers['authToken'] = 'MyToken'
headers['email'] = ''
headers['password'] = ''
headers['oauth_credentials'] = None
client = AdWordsClient(headers=headers)
self.assertEquals(client._headers['authToken'], 'MyToken')
def testOAuth2CredentialsOnly(self):
"""Tests that specifying solely oauth_credentials works."""
headers = DEFAULT_HEADERS.copy()
headers['oauth2credentials'] = 'credential!'
client = AdWordsClient(headers=headers)
self.assertTrue(client.oauth2credentials)
def testOAuthCredentialsOthersBlank(self):
"""Tests that oauth_credentials with other auth blank works."""
headers = DEFAULT_HEADERS.copy()
headers['oauth2credentials'] = 'credential!'
headers['email'] = ''
headers['password'] = ''
headers['authToken'] = ''
client = AdWordsClient(headers=headers)
self.assertTrue(client.oauth2credentials)
def testNonStrictThrowsValidationError(self):
"""Tests that even when using non-strict mode, we still raise a
ValidationError when no auth credentials are provided."""
headers = DEFAULT_HEADERS.copy()
config = {'strict': 'n'}
def Run():
_ = AdWordsClient(headers=headers, config=config)
self.assertRaises(ValidationError, Run)
class AdWordsClientCaptchaHandlingTest(unittest.TestCase):
"""Tests the captcha handling logic."""
CAPTCHA_CHALLENGE = '''Url=http://www.google.com/login/captcha
Error=CaptchaRequired
CaptchaToken=DQAA<PASSWORD>
CaptchaUrl=Captcha?ctoken=<KEY>'''
SUCCESS = '''SID=DQAAAGgA...7Zg8CTN
LSID=DQAAAGsA...lk8BBbG
Auth=DQAAAGgA...dk3fA5N'''
def testCaptchaHandling(self):
headers = DEFAULT_HEADERS.copy()
headers['email'] = '<EMAIL>'
headers['password'] = 'password'
client = None
try:
with mock.patch('urllib2.urlopen') as mock_urlopen:
mock_urlopen.return_value = StringIO.StringIO(self.CAPTCHA_CHALLENGE)
client = AdWordsClient(headers=headers)
self.fail('Expected a CaptchaError to be thrown')
except ValidationError, e:
with mock.patch('urllib2.urlopen') as mock_urlopen:
mock_urlopen.return_value = StringIO.StringIO(self.SUCCESS)
client = AdWordsClient(headers=headers,
login_token=e.root_cause.captcha_token,
login_captcha='foo bar')
self.assertEquals(client._headers['authToken'], 'DQAAAGgA...dk3fA5N')
class AdWordsClientServiceTest(unittest.TestCase):
"""Tests for retrieving SOAP services via AdWordsClient."""
def setUp(self):
"""Prepare unittest."""
self.client = AdWordsClient(headers={'authToken': 'AUTH TOKEN',
'userAgent': 'USER AGENT',
'developerToken': 'DEV TOKEN'})
def testGetBudgetService(self):
with mock.patch('adspygoogle.SOAPpy.WSDL.Proxy'):
service = self.client.GetBudgetService()
self.assertEquals('BudgetService', service._service_name)
def testGetAdGroupFeedService(self):
with mock.patch('adspygoogle.SOAPpy.WSDL.Proxy'):
service = self.client.GetAdGroupFeedService()
self.assertEquals('AdGroupFeedService', service._service_name)
def testGetCampaignFeedService(self):
with mock.patch('adspygoogle.SOAPpy.WSDL.Proxy'):
service = self.client.GetCampaignFeedService()
self.assertEquals('CampaignFeedService', service._service_name)
def testGetFeedItemService(self):
with mock.patch('adspygoogle.SOAPpy.WSDL.Proxy'):
service = self.client.GetFeedItemService()
self.assertEquals('FeedItemService', service._service_name)
def testGetFeedMappingService(self):
with mock.patch('adspygoogle.SOAPpy.WSDL.Proxy'):
service = self.client.GetFeedMappingService()
self.assertEquals('FeedMappingService', service._service_name)
def testGetFeedService(self):
with mock.patch('adspygoogle.SOAPpy.WSDL.Proxy'):
service = self.client.GetFeedService()
self.assertEquals('FeedService', service._service_name)
def testGetCampaignSharedSetService(self):
with mock.patch('adspygoogle.SOAPpy.WSDL.Proxy'):
service = self.client.GetCampaignSharedSetService()
self.assertEquals('CampaignSharedSetService', service._service_name)
def testGetSharedSetService(self):
with mock.patch('adspygoogle.SOAPpy.WSDL.Proxy'):
service = self.client.GetSharedSetService()
self.assertEquals('SharedSetService', service._service_name)
def testGetSharedCriterionService(self):
with mock.patch('adspygoogle.SOAPpy.WSDL.Proxy'):
service = self.client.GetSharedCriterionService()
self.assertEquals('SharedCriterionService', service._service_name)
def testGetAdGroupBidModifierService(self):
with mock.patch('adspygoogle.SOAPpy.WSDL.Proxy'):
service = self.client.GetAdGroupBidModifierService()
self.assertEquals('AdGroupBidModifierService', service._service_name)
def testGetOfflineConversionFeedService(self):
with mock.patch('adspygoogle.SOAPpy.WSDL.Proxy'):
service = self.client.GetOfflineConversionFeedService()
self.assertEquals('OfflineConversionFeedService', service._service_name)
if __name__ == '__main__':
unittest.main()
|
StarcoderdataPython
|
12844560
|
<gh_stars>100-1000
from typing import Union, Callable, Iterable, Optional
from typing_extensions import Literal
from anndata import AnnData
from cellrank import logging as logg
from cellrank.ul._docs import d, inject_docs
from cellrank.tl._utils import _deprecate
from cellrank.tl.kernels import VelocityKernel, ConnectivityKernel
from cellrank.tl.kernels._base_kernel import KernelExpression
from cellrank.tl.kernels._velocity_kernel import BackwardMode, VelocityMode
from cellrank.tl.kernels._velocity_schemes import Scheme
@_deprecate(version="2.0")
@inject_docs(m=VelocityMode, b=BackwardMode, s=Scheme) # don't swap the order
@d.dedent
def transition_matrix(
adata: AnnData,
backward: bool = False,
vkey: str = "velocity",
xkey: str = "Ms",
conn_key: str = "connectivities",
gene_subset: Optional[Iterable] = None,
mode: Literal[
"deterministic", "stochastic", "sampling", "monte_carlo"
] = VelocityMode.DETERMINISTIC,
backward_mode: Literal["transpose", "negate"] = BackwardMode.TRANSPOSE,
scheme: Union[
Literal["dot_product", "cosine", "correlation"], Callable
] = Scheme.CORRELATION,
softmax_scale: Optional[float] = None,
weight_connectivities: float = 0.2,
density_normalize: bool = True,
key: Optional[str] = None,
**kwargs,
) -> KernelExpression:
"""
Compute a transition matrix based on a combination of RNA Velocity and transcriptomic or spatial similarity.
To learn more about the way in which the transition matrices are computed, see
:class:`cellrank.tl.kernels.VelocityKernel` for the velocity-based transition matrix and
:class:`cellrank.tl.kernels.ConnectivityKernel` for the similarity-based transition matrix.
Parameters
----------
%(adata)s
%(backward)s
vkey
Key from ``adata.layers`` to access the velocities.
xkey
Key in ``adata.layers`` where expected gene expression counts are stored.
conn_key
Key in :attr:`anndata.AnnData.obsp` to obtain the connectivity matrix, describing cell-cell similarity.
gene_subset
List of genes to be used to compute transition probabilities.
By default, genes from ``adata.var['velocity_genes']`` are used.
%(velocity_mode)s
%(velocity_backward_mode_high_lvl)s
%(velocity_scheme)s
%(softmax_scale)s
weight_connectivities
Weight given to similarities as opposed to velocities. Must be in `[0, 1]`.
density_normalize
Whether to use density correction when computing the transition probabilities based on similarities.
Density correction is done as by :cite:`haghverdi:16`.
%(write_to_adata.parameters)s
kwargs
Keyword arguments for :meth:`cellrank.tl.kernels.VelocityKernel.compute_transition_matrix`.
Returns
-------
A kernel expression object containing the computed transition matrix.
%(write_to_adata)s
"""
def compute_velocity_kernel() -> VelocityKernel:
return VelocityKernel(
adata,
backward=backward,
vkey=vkey,
xkey=xkey,
gene_subset=gene_subset,
conn_key=conn_key,
).compute_transition_matrix(
softmax_scale=softmax_scale,
mode=mode,
backward_mode=backward_mode,
scheme=scheme,
**kwargs,
)
if 0 < weight_connectivities < 1:
vk = compute_velocity_kernel()
logg.info(f"Using a connectivity kernel with weight `{weight_connectivities}`")
ck = ConnectivityKernel(
adata, backward=backward, conn_key=conn_key
).compute_transition_matrix(density_normalize=density_normalize)
final = (
(1 - weight_connectivities) * vk + weight_connectivities * ck
).compute_transition_matrix()
elif weight_connectivities == 0:
final = compute_velocity_kernel()
elif weight_connectivities == 1:
final = ConnectivityKernel(
adata,
backward=backward,
conn_key=conn_key,
).compute_transition_matrix(density_normalize=density_normalize)
else:
raise ValueError(
f"Parameter `weight_connectivities` must be in range `[0, 1]`, found `{weight_connectivities}`."
)
final.write_to_adata(key=key)
return final
|
StarcoderdataPython
|
6456620
|
<reponame>sireliah/polish-python
# Copyright 2006 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Fixer that turns <> into !=."""
# Local imports
z .. zaimportuj pytree
z ..pgen2 zaimportuj token
z .. zaimportuj fixer_base
klasa FixNe(fixer_base.BaseFix):
# This jest so simple that we don't need the pattern compiler.
_accept_type = token.NOTEQUAL
def match(self, node):
# Override
zwróć node.value == "<>"
def transform(self, node, results):
new = pytree.Leaf(token.NOTEQUAL, "!=", prefix=node.prefix)
zwróć new
|
StarcoderdataPython
|
6703335
|
<reponame>sanghyun-son/srwarp<filename>src/data/sr/benchmark/urban100.py
from os import path
from data.sr import base
class Urban100(base.SRBase):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def get_path(self) -> str:
return path.join(self.dpath, 'benchmark', 'urban100')
|
StarcoderdataPython
|
5100665
|
<reponame>dy1zan/softwarecapstone
from .company import Company
|
StarcoderdataPython
|
1707995
|
# coding: utf-8
#
# This code is part of qclib.
#
# Copyright (c) 2021, <NAME>
import os.path
import pickle
from collections.abc import MutableMapping
from typing import Iterable, Any
def cachekey(*args, **kwargs):
argstr = "; ".join(str(x) for x in args)
kwargstr = "; ".join(f"{k}={v}" for k, v in kwargs.items())
return "; ".join(s for s in [argstr, kwargstr] if s)
class Cache(MutableMapping):
def __init__(self, file: str = "", autosave=True):
super().__init__()
self._data = dict()
self.file = file
self.auotsave = autosave
self.load()
@staticmethod
def key(*args, **kwargs):
return cachekey(*args, **kwargs)
def on_change(self):
if self.auotsave:
self.save()
def __len__(self) -> int:
return len(self._data)
def __iter__(self) -> Iterable[Any]:
return iter(self._data)
def __getitem__(self, key: str) -> Any:
return self._data[key]
def __setitem__(self, key: str, value: Any) -> None:
self._data[key] = value
self.on_change()
def __delitem__(self, key: str) -> None:
del self._data[key]
self.on_change()
def clear(self) -> None:
self._data.clear()
self.on_change()
def save(self) -> None:
if self.file:
with open(self.file, "wb") as fh:
pickle.dump(self._data, fh)
def load(self) -> None:
if os.path.isfile(self.file):
with open(self.file, "rb") as fh:
self._data = pickle.load(fh)
cache = Cache("cache.pkl", autosave=True)
|
StarcoderdataPython
|
12836341
|
<filename>pitfall/utils.py
# Copyright 2019 Ali (@bincyber)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from . import exceptions
from Cryptodome.Cipher import AES
from Cryptodome.Hash import SHA1, SHA256
from Cryptodome.Protocol.KDF import PBKDF2
from Cryptodome.Random import get_random_bytes
from datetime import datetime
from pathlib import Path
from typing import Dict, Tuple
import base64
import distutils.spawn
import uuid
def get_random_string(length: int = 32) -> str:
"""
This function returns an alphanumeric string of the requested length.
:param int length: the length of the random string. Max of 32 characters
:returns: a random string
:rtype: str
"""
if length > 32:
length = 32
elif length <= 0:
length = 1
random_string = uuid.uuid4().hex
return random_string[:length]
def generate_project_name() -> str:
"""
This fuction generates and returns a unique name for the Pulumi Project.
:returns: a unique project name
:rtype: str
"""
random_string = get_random_string(16)
project_name = f"pitf-project-{random_string}"
return project_name
def generate_stack_name() -> str:
"""
This fuction generates and returns a unique name for the Pulumi Stack
"""
random_string = get_random_string(16)
stack_name = f"pitf-stack-{random_string}"
return stack_name
def get_project_backend_url(path: Path = None) -> Dict[str, str]:
"""
This fuction returns the location of the Pulumi state directory. By default,
the current working directory.
:param Path path: a path object
:returns: dictionary containing a file URL pointing to the Pulumi state directory
:rtype: dict
"""
if path is None:
path = Path.cwd()
return {"url": path.as_uri()}
def generate_aes_encryption_key(password: str, salt: bytes = None) -> Tuple[bytes, bytes]:
""" uses PBKDF2 with SHA256 HMAC to derive a 32-byte encryption key from the provided password """
if salt is None:
salt = get_random_bytes(8)
return PBKDF2(password, salt, 32, count=1000000, hmac_hash_module=SHA256), salt
def encrypt_with_aes_gcm(key: bytes, plaintext: bytes) -> Tuple[bytes, bytes, bytes]:
""" encrypts plaintext using 256-bit AES in GCM mode """
nonce = get_random_bytes(12)
cipher = AES.new(key=key, nonce=nonce, mode=AES.MODE_GCM, mac_len=16)
ciphertext, mac = cipher.encrypt_and_digest(plaintext)
return nonce, ciphertext, mac
def decrypt_with_aes_gcm(key: bytes, nonce: bytes, ciphertext: bytes, mac: bytes) -> bytes:
""" decrypts 256-bit AES encrypted ciphertext """
cipher = AES.new(key=key, nonce=nonce, mode=AES.MODE_GCM, mac_len=16)
plaintext = cipher.decrypt_and_verify(ciphertext, mac)
return plaintext
def generate_encryptionsalt(password: str) -> Tuple[bytes, str]:
""" generates the base64 encoded string for the encryptionsalt field in Pulumi stack files """
plaintext = b'pulumi'
key, salt = generate_aes_encryption_key(password)
nonce, ciphertext, mac = encrypt_with_aes_gcm(key, plaintext)
# 16-byte MAC tag is appended to the ciphertext
message = ciphertext + mac
salt_b64 = base64.b64encode(salt).decode('utf-8')
nonce_b64 = base64.b64encode(nonce).decode('utf-8')
message_b64 = base64.b64encode(message).decode('utf-8')
encryptionsalt = f"v1:{salt_b64}:v1:{nonce_b64}:{message_b64}"
return key, encryptionsalt
def get_encrypted_secret(plaintext: bytes, key: bytes) -> str:
""" returns a base64 formatted encrypted Pulumi secret """
nonce, ciphertext, mac = encrypt_with_aes_gcm(key, plaintext)
# 16-byte MAC tag is appended to the ciphertext
message = ciphertext + mac
nonce_b64 = base64.b64encode(nonce).decode('utf-8')
message_b64 = base64.b64encode(message).decode('utf-8')
encrypted_secret = f"v1:{nonce_b64}:{message_b64}"
return encrypted_secret
def get_current_timestamp() -> str:
""" returns the current date and time in ISO 8601 format """
return datetime.now().astimezone().isoformat()
def sha1sum(data: bytes) -> str:
""" returns the SHA1 hash of the provided data """
h = SHA1.new()
h.update(data)
return h.hexdigest()
def sha256sum(data: bytes) -> str:
""" returns the SHA256 hash of the provided data """
h = SHA256.new()
h.update(data)
return h.hexdigest()
def decode_utf8(data: bytes) -> str:
return data.decode('utf-8')
def get_directory_abspath(path: Path) -> Path:
if not path.is_dir():
path = path.parent
return path.absolute()
def find_pulumi_binary() -> str:
location = distutils.spawn.find_executable('pulumi')
if location is None:
raise exceptions.PulumiBinaryNotFoundError("Could not find the pulumi binary on the system")
return location
|
StarcoderdataPython
|
6463718
|
# Stem module for Tensorflow. A stem is a structure that contains one or more
# substructures called subobjects. These subobjects may be classes that inherit from stem
# or from leaf. Note it is up to inheriting classes whether the subobjects are arranged
# in series, in parallel, or a combination.
#
# <NAME>
#-------------------------------------------------------------------------------
from deepnodal.python.concepts.leaf import *
#-------------------------------------------------------------------------------
class stem (structure): # we inherit structure because a stem is never a leaf
"""
A stem is a structure that supports and broadcasts specifications to many subobjects.
Note a subobject may be another stem or a leaf (the default).
The stem class is abstract and inheriting classes must define self.__call__(inp)
to be instantiated.
"""
# public
def_name = 'stem' # default name
def_subobject = leaf # default subobject class
def_subobject_name = 'subobject' # default subobject name
# protected
_subobjects = None # subobject instances which may be leaves or stems
_n_subobjects = None # number of subobjects
_subobject = leaf # subobject class
_unit_subobject = None # flag to state unit subobject
_subobject_name = 'subobject' # default subobject name if not unit_subobject
_spec_type = None # container type to give specifications to subobjects (dict not allowed)
_inp = None # input
_out = None # output
_params = None # parameters - collated from subobjects
_n_params = None # len(parameters)
_outputs = None # parameters - collated from subobjects
_n_outputs = None # len(parameters)
_ex_outputs = False # flag to include exhaustive list of all outputs
#-------------------------------------------------------------------------------
def __init__(self, name = None, dev = None):
self.set_name(name)
self.set_dev(dev)
self.set_ex_outputs() # Note this flag is only pertinent the function classes
self.set_subobject()
self.set_subobjects()
#-------------------------------------------------------------------------------
def set_name(self, name = None):
self.name = name if name is not None else self.def_name
if self._subobjects is None: return
for i, subobject in enumerate(self._subobjects):
# no point re-naming subobjects if unit_subobject is true
subobject_name = self.name if self._unit_subobject else self.name + "/" + self._subobject_name + "_" + str(i)
subobject.set_name(subobject_name)
#-------------------------------------------------------------------------------
def set_dev(self, dev = None):
self.dev = dev
if self._subobjects is None: return
for subobject in self._subobjects:
subobject.set_dev(dev)
#-------------------------------------------------------------------------------
def set_ex_outputs(self, _ex_outputs = False):
self.ex_outputs = _ex_outputs
#-------------------------------------------------------------------------------
def ret_ex_outputs(self, _ex_outputs = False):
return self.ex_outputs
#-------------------------------------------------------------------------------
def set_subobject(self, subobject = None, subobject_name = None):
"""
This sets the class-type of each subobject and associated name prior to indexing.
"""
self._subobject = subobject if subobject is not None else self.def_subobject
self._subobject_name = subobject_name if subobject_name is not None else self.def_subobject_name
#-------------------------------------------------------------------------------
def set_subobjects(self, subobjects = None):
"""
This allows either manually setting the list of subobjects, or if subobjects is an
integer it instantiates that number of subobjects.
"""
self._subobjects = subobjects
self._n_subobjects = 0
self._unit_subobject = None
if self._subobjects is None:
return self._subobjects
elif type(self._subobjects) is list:
self._n_subobjects = len(self._subobjects)
self._unit_subobject = self._n_subobjects == 1
# it would be quite rude to rename or redevice these subobjects so we won't
elif type(subobjects) is int:
self._n_subobjects = subobjects
self._unit_subobject = self._n_subobjects == 1
self._subobjects = [self._subobject() for i in range(self._n_subobjects)]
self.set_name(self.name) # this renames all subobjects
self.set_dev(self.dev) # this redevices all subobjects
else:
raise TypeError("Unrecognised subobjects specification.")
return self._subobjects
#-------------------------------------------------------------------------------
def add_subobjects(self, count = 1):
if self._subobjects is None:
self._subobjects = []
for i in range(count):
self._subobjects.append(self._subobject())
self._n_subobjects = len(self._subobjects)
self._unit_subobject = self._n_subobjects == 1
self.set_name(self.name) # this renames all subobjects
self.set_dev(self.dev) # this redevices all subobjects
if count == 1:
return self._subobjects[-1]
return self._subobjects[-count:]
#-------------------------------------------------------------------------------
def set_inp(self, inp = None):
self._inp = inp
self._out = None
return self.inp
#-------------------------------------------------------------------------------
def __getitem__(self, index):
return self._subobjects[index]
#-------------------------------------------------------------------------------
def __len__(self):
return self._n_subobjects
#-------------------------------------------------------------------------------
@abstractmethod
def __call__(self, inp = None): # this function is for calling graph objects
pass
#-------------------------------------------------------------------------------
def ret_inp(self):
return self._inp
#-------------------------------------------------------------------------------
def ret_out(self):
return self._out
#-------------------------------------------------------------------------------
def _set_spec(self, func, spec = None, *args, **kwds):
"""
Allows set_specing of a specification to all subobjects in the following form:
return [func(subobject, spec, *args, **kwds) for subobject in enumerate(self.subobjects)]
...or...
return [func(subobject, spec[i], *args[i], **kwds[i]) for i, subobject in enumerate(self.subobjects)]
...or...
any such combination.
"""
if type(spec) is tuple:
if len(spec) > 0:
if isinstance(spec[0], structure):
return [func(subobject, spec, *args, **kwds) for subobject in self._subobjects]
if isinstance(spec, (dict, set)):
return [func(subobject, spec, *args, **kwds) for subobject in self._subobjects]
if self._spec_type is None:
return [func(subobject, spec, *args, **kwds) for i, subobject in enumerate(self._subobjects)]
if type(spec) is not self._spec_type:
spec = self._spec_type([spec] * self._n_subobjects)
elif len(spec) != self._n_subobjects:
raise ValueError("Specification incommensurate with number of subobjects")
if len(kwds):
if len(args) == 1:
args = args[0]
if type(args) is not self._spec_type:
args = self._spec_type([args] * self._n_subobjects)
elif len(args) != self._n_subobjects:
raise ValueError("Specification arguments incommensurate with number of subobjects")
return [func(subobject, spec[i], args[i], **kwds) for i, subobject in enumerate(self._subobjects)]
elif len(args):
return [func(subobject, spec[i], *args, **kwds) for i, subobject in enumerate(self._subobjects)]
else:
return [func(subobject, spec[i], **kwds) for i, subobject in enumerate(self._subobjects)]
elif len(args) == 1:
args = args[0]
if type(args) is dict:
kwds = dict(args)
return [func(subobject, spec[i], **kwds) for i, subobject in enumerate(self._subobjects)]
elif type(args) is self._spec_type:
if len(args) != self._n_subobjects:
raise ValueError("Specification arguments incommensurate with number of subobjects")
return [func(subobject, spec[i], args[i]) for i, subobject in enumerate(self._subobjects)]
else:
return [func(subobject, spec[i], args) for i, subobject in enumerate(self._subobjects)]
elif len(args):
return [func(subobject, spec[i], args) for i, subobject in enumerate(self._subobjects)]
else:
return [func(subobject, spec[i]) for i, subobject in enumerate(self._subobjects)]
#-------------------------------------------------------------------------------
def _setup_params(self):
"""
Collates lists of parameter ordered dictionaries to a single list self.params.
Classes inheriting from stemes do not possess autonomous parameter lists
but must collate their lists from subobjects, until eventually reaching leaf-derived
classes each of which may posses an autonomous parameter list associated with
a single TensorFlow call.
"""
assert self._called, "Cannot setup params without object being called"
self._params = []
if self._subobjects:
for obj in self._subobjects:
self._params.extend(obj._setup_params())
self._n_params = len(self._params)
return self._params
#-------------------------------------------------------------------------------
def ret_params(self, param_spec = None, ret_indices = False):
"""
Returns parameter mappings (and associated indices if ret_indices is True)
depending on the value of param_spec:
param_spec = None (or True): returns all parameters
param_spec = a string: returns parameters which includes that string in the name
param_spec = list of booleans/sublists of booleans: allow subobject specification
"""
if not(self._called): return self, stem.ret_params, param_spec, ret_indices
if self._params is None:
self._setup_params()
elif not(len(self._params)):
self._setup_params()
if param_spec is None:
if not ret_indices:
return self._params
else:
return list(range(len(self._params)))
params = []
indices = []
if type(param_spec) is bool:
if param_spec:
params = self._params
indices = list(range(len(self._params)))
elif type(param_spec) is str:
for i, param in enumerate(self._params):
if param_spec in list(param)[0]:
params.append(param)
indices.append(i)
elif len(param_spec) != self._n_subobjects:
raise ValueError("Parameter specification incommensurate with hierarchical structure")
else:
for i, spec in enumerate(param_spec):
params += self._subobjects[i].ret_params(spec)
for param in params:
param_name = list(param)[0]
param_object = param[param_name]
for i, _param in enumerate(self.params):
_param_name = list(_param)[0]
if _param[_param_name] == param_object:
indices.append(i)
if not ret_indices:
return params
else:
return indices
#-------------------------------------------------------------------------------
def _ret_param(self, param_spec = None):
"""
Identical to self.ret_params but with checking of a unique result and returns
the mapping itself.
"""
if not(self._called): return self, stem._ret_param, param_spec
param = self.ret_params(param_spec)
if len(param) != 1:
raise ValueError("Specification " + str(param_spec) +
" returns " + str(len(param)) + " results.")
return param[0]
#-------------------------------------------------------------------------------
def ret_param(self, param_spec = None):
"""
Identical to self._ret_param but returns the graph object rather than mapping.
"""
if not(self._called): return self, stem.ret_param, param_spec
param = self._ret_param(param_spec)
return list(param.values())[0]
#-------------------------------------------------------------------------------
def _setup_moments(self):
"""
Collates lists of moments ordered dictionaries to a single list self._moments.
Classes inheriting from stemes do not possess autonomous moment lists
but must collate their lists from subobjects, until eventually reaching leaf-derived
classes each of which may posses an autonomous parameter list associated with
a single TensorFlow call.
"""
self._moments = []
if self._subobjects:
for subobject in self._subobjects:
subobject._setup_moments()
self._moments += subobject._moments
self._n_moments = len(self._moments)
return self._moments
#-------------------------------------------------------------------------------
def ret_moments(self):
"""
Returns moment mappings
"""
return self._moments
#-------------------------------------------------------------------------------
def _setup_updates(self):
"""
Collates lists of updates ordered dictionaries to a single list self._updates.
Classes inheriting from stemes do not possess autonomous moment lists
but must collate their lists from subobjects, until eventually reaching leaf-derived
classes each of which may posses an autonomous parameter list associated with
a single TensorFlow call.
"""
self._updates = []
if self._subobjects:
for subobject in self._subobjects:
subobject._setup_updates()
self._updates += subobject._updates
self._n_updates = len(self._updates)
return self._updates
#-------------------------------------------------------------------------------
def ret_updates(self):
"""
Returns update mappings
"""
return self._updates
#-------------------------------------------------------------------------------
def _setup_outputs(self):
"""
Collates lists of output ordered dictionaries to a single list self.outputs.
Classes inheriting from stemes do not possess autonomous outputs lists
but must collate their lists from subobjects, until eventually reaching leaf-derived
classes each of which may posses an autonomous output list associated with
a single TensorFlow call.
"""
self._outputs = []
for subobject in self._subobjects:
subobject._setup_outputs()
self._outputs += subobject._outputs
self._n_outputs = len(self._outputs)
return self._outputs
#-------------------------------------------------------------------------------
def ret_outputs(self, output_spec = None):
"""
Similar to self.ret_params but for outputs rather than parameters.
"""
if not(self._called): return self, stem.ret_outputs, output_spec
if output_spec is None:
return self._outputs
outputs = []
if type(output_spec) is bool:
if output_spec:
outputs = self._outputs
elif len(output_spec) != self._n_subobjects:
raise ValueError("Outputs specification incommensurate with hierarchical structure")
else:
for i, spec in enumerate(output_spec):
outputs += self._subobjects[i].ret_outputs(spec)
return outputs
#-------------------------------------------------------------------------------
def _ret_output(self, output_spec = None):
"""
Identical to self.ret_outputs but with checking of a unique result and returns
the mapping itself.
"""
if not(self._called): return self, stem._ret_output, output_spec
output = self._ret_outputs(outputs_spec)
if len(output) != 1:
raise ValueError("Specification " + str(output_spec) +
" returns " + str(len(output)) + " results.")
return output[0]
#-------------------------------------------------------------------------------
def ret_output(self, output_spec = None):
"""
Identical to self._ret_output but returns the graph object rather than mapping.
"""
if not(self._called): return self, stem._ret_output, output_spec
output = self._ret_output(output_spec)
return list(output.values())[0]
#-------------------------------------------------------------------------------
|
StarcoderdataPython
|
3360701
|
<gh_stars>0
def get_form():
from .forms import CustomCommentForm
return CustomCommentForm
|
StarcoderdataPython
|
6444473
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from auto_scan_test import MkldnnAutoScanTest
from program_config import TensorConfig, ProgramConfig, OpConfig
import numpy as np
from functools import partial
import unittest
from hypothesis import given
import hypothesis.strategies as st
class TestMkldnnShapeOp(MkldnnAutoScanTest):
def is_program_valid(self, program_config: ProgramConfig) -> bool:
return True
def sample_program_configs(self, *args, **kwargs):
def generate_input(*args, **kwargs):
return np.random.random(kwargs['in_shape']).astype(kwargs[
'in_dtype'])
shape_op = OpConfig(
type="shape",
inputs={"Input": ["input_data"]},
outputs={"Out": ["output_data"]})
program_config = ProgramConfig(
ops=[shape_op],
weights={},
inputs={
"input_data": TensorConfig(data_gen=partial(generate_input,
*args, **kwargs)),
},
outputs=["output_data"])
yield program_config
def sample_predictor_configs(self, program_config):
config = self.create_inference_config(use_mkldnn=True)
yield config, (1e-5, 1e-5)
@given(
in_shape=st.lists(
st.integers(
min_value=1, max_value=3), min_size=1, max_size=9),
in_dtype=st.sampled_from([np.float32, np.uint16, np.int8, np.uint8]))
def test(self, *args, **kwargs):
self.run_test(quant=False, *args, **kwargs)
if __name__ == "__main__":
unittest.main()
|
StarcoderdataPython
|
8104911
|
<gh_stars>1-10
import os
files = ['maya_modeling_v001.mb','maya_modeling_v002.mb','maya_modeling_v003.mb',
'head_model_v001.mb','head_model_v002.mb','head_model_v003.mb','head_model_v004.mb']
def create_file_list(files):
files_list = []
for file in files:
name , ext = os.path.splitext(file)
name = name.split('_')
del name[-1]
name = "_".join(name)
if name not in files_list:
files_list.append(name)
return files_list
def get_version(files, file_names):
for file in files:
for parent in file_names:
if file.startswith(parent):
print(file)
# if file in file_names:
# print(file)
file_names = (create_file_list(files))
get_version(files, file_names)
|
StarcoderdataPython
|
3205175
|
<reponame>jorisvandenbossche/DS-python-geospatial<gh_stars>10-100
# Make a Grey scale plot
gent_f.sum(dim="band").plot.imshow(cmap="Greys", figsize=(9, 5))
|
StarcoderdataPython
|
1806071
|
import edgeiq
import pandas as pd
import os
import cv2
"""
Use pose estimation to determine human poses in realtime. Human Pose returns
a list of key points indicating joints that can be used for applications such
as activity recognition and augmented reality.
To change the engine and accelerator, follow this guide:
https://alwaysai.co/docs/application_development/changing_the_engine_and_accelerator.html
To install app dependencies in the runtime container, list them in the
requirements.txt file.
"""
POSES = [
"Tree_Pose_or_Vrksasana_",
"Extended_Revolved_Triangle_Pose_or_Utthita_Trikonasana_",
"Warrior_I_Pose_or_Virabhadrasana_I_",
"Warrior_II_Pose_or_Virabhadrasana_II_",
"Warrior_III_Pose_or_Virabhadrasana_III_"
]
def main():
pose_estimator = edgeiq.PoseEstimation("alwaysai/human-pose")
pose_estimator.load(engine=edgeiq.Engine.DNN)
print("Loaded model:\n{}\n".format(pose_estimator.model_id))
print("Engine: {}".format(pose_estimator.engine))
print("Accelerator: {}\n".format(pose_estimator.accelerator))
key_points = [
'Neck', 'Right Shoulder', 'Right Elbow', 'Right Wrist',
'Left Shoulder', 'Left Elbow', 'Left Wrist', 'Right Hip', 'Right Knee',
'Right Ankle', 'Left Hip', 'Left Knee', 'Left Ankle']
header = {}
for key_point in key_points:
header['{} x'.format(key_point)] = []
header['{} y'.format(key_point)] = []
for pose in POSES:
df = pd.DataFrame(header)
print('Generating results for {}'.format(pose))
image_paths = edgeiq.list_images(os.path.join('images', 'downloads', pose))
for image_path in image_paths:
try:
image = cv2.imread(image_path)
results = pose_estimator.estimate(image)
if len(results.poses) > 0:
results = results.poses[0].key_points
# Filter only desired key points
results = {key: value for key, value in results.items() if key in key_points}
new_row = pd.DataFrame(header)
for key, value in results.items():
if key not in key_points:
continue
new_row['{} x'.format(key)] = [value[0]]
new_row['{} y'.format(key)] = value[1]
df = df.append(new_row, ignore_index=True)
else:
print('Skipping {}, no pose detected!'.format(image_path))
except Exception as e:
print('Exception on {}! {}'.format(image_path, e))
df.to_csv('{}.csv'.format(pose))
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
5093399
|
#!/usr/bin/env python
#
# Author: <NAME> (mmckerns @caltech and @uqfoundation)
# Copyright (c) 2008-2016 California Institute of Technology.
# Copyright (c) 2016-2021 The Uncertainty Quantification Foundation.
# License: 3-clause BSD. The full license text is available at:
# - https://github.com/uqfoundation/dill/blob/master/LICENSE
"""
global settings for Pickler
"""
try:
from pickle import DEFAULT_PROTOCOL
except ImportError:
from pickle import HIGHEST_PROTOCOL as DEFAULT_PROTOCOL
settings = {
#'main' : None,
'protocol' : DEFAULT_PROTOCOL,
'byref' : False,
#'strictio' : False,
'fmode' : 0, #HANDLE_FMODE
'recurse' : False,
'ignore' : False,
}
del DEFAULT_PROTOCOL
|
StarcoderdataPython
|
1960328
|
<reponame>JCSDA/mpas-jedi<gh_stars>1-10
import datetime as dt
import os
import sys
import numpy
import numpy as np
from netCDF4 import Dataset # http://code.google.com/p/netcdf4-python/
import matplotlib.cm as cm
import matplotlib.pyplot as plt
from copy import deepcopy
from datetime import datetime, timedelta
import modelsp_utils as mu
def write_diag_stats():
if os.path.exists(mu.expStats+'.nc'):
os.remove(mu.expStats+'.nc')
path = os.getcwd()
date = path.split('/')[-2]
initDate = datetime.strptime(date,"%Y%m%d%H")
lats, lons = mu.readGrid()
for varName in mu.varNames:
for latBand in range(0, len(mu.latBands)):
tmp = []
meanncs = []
rmsncs = []
msncs = []
for fcTDelta in np.arange(0, mu.fcRange+mu.interval,mu.interval):
fcDate = initDate + timedelta(days=fcTDelta)
fileDate= fcDate.strftime("%Y-%m-%d_%H.%M.%S")
ncFile1 = mu.GFSANA_DIR+'/x1.40962.init.'+fileDate+'.nc'
ncFile2 = '../restart.'+fileDate+'.nc'
tmp = mu.varDiff(varName,ncFile1,ncFile2)
#bin for regions
tmpbin = []
tmpbin = deepcopy(tmp)
tmpbin[np.logical_or(lats < mu.latBandsBounds [latBand+1], lats > mu.latBandsBounds [latBand])] = np.NaN
#save every level stat
newfile = write_stats(tmpbin,varName,mu.latBands[latBand],str(fcTDelta))
meannc = np.nanmean(tmpbin.flatten(),axis=0)
rmsnc = np.sqrt(np.nanmean(tmpbin.flatten()**2,axis=0))
msnc = np.nanmean(tmpbin.flatten()**2,axis=0)
meanncs = np.append(meanncs,meannc)
rmsncs = np.append(rmsncs,rmsnc)
msncs = np.append(msncs,msnc)
#save all levels stat
newfile = write_stats_levall(meanncs,varName,mu.latBands[latBand],'Mean')
newfile = write_stats_levall(rmsncs,varName,mu.latBands[latBand],'RMS')
newfile = write_stats_levall(msncs,varName,mu.latBands[latBand],'MS')
#TODO: move the following functions to plot_utils.py or basic_plot_functions.py
def write_stats(array_f,varNames,band,fcTDelta):
STATS = {}
STATS['Mean'] = np.nanmean(array_f,axis=0)
STATS['MS'] = np.nanmean(array_f**2,axis=0)
STATS['RMS'] = np.sqrt(np.nanmean(array_f**2,axis=0))
STATS['STD'] = np.nanstd(array_f,axis=0)
STATS['Min'] = np.nanmin(array_f,axis=0)
STATS['Max'] = np.nanmax(array_f,axis=0)
if os.path.exists(mu.expStats+'.nc'):
w_nc_fid = Dataset(mu.expStats+'.nc', 'a', format='NETCDF4')
else:
w_nc_fid = Dataset(mu.expStats+'.nc', 'w', format='NETCDF4')
w_nc_fid.description = "MPAS diagnostics/statistics"
w_nc_fid.createDimension('level', 55)
w_nc_fid.createDimension('fcnums', int(mu.fcNums))
w_nc_fid.createDimension('levelP1', 56)
w_nc_fid.createDimension('levelSurface', 1)
for statName in mu.allFileStats:
if varNames in mu.varNames3d:
if (varNames == 'w'):
bias2exp = w_nc_fid.createVariable(mu.expStats+"_day"+fcTDelta+"_"+band+"_"+varNames+"_"+statName,'f4', "levelP1")
else:
bias2exp = w_nc_fid.createVariable(mu.expStats+"_day"+fcTDelta+"_"+band+"_"+varNames+"_"+statName,'f4', "level")
else:
bias2exp = w_nc_fid.createVariable(mu.expStats+"_day"+fcTDelta+"_"+band+"_"+varNames+"_"+statName,'f4', "levelSurface")
w_nc_fid.variables[mu.expStats+"_day"+fcTDelta+'_'+band+'_'+varNames+'_'+statName][:] = STATS[statName]
w_nc_fid.close()
def write_stats_levall(array_f,varNames,band,statName):
if os.path.exists(mu.expStats+'.nc'):
w_nc_fid = Dataset(mu.expStats+'.nc', 'a', format='NETCDF4')
else:
w_nc_fid = Dataset(mu.expStats+'.nc', 'w', format='NETCDF4')
w_nc_fid.description = "MPAS diagnostics/statistics"
bias2exp = w_nc_fid.createVariable(mu.expStats+"_"+band+"_"+varNames+"_"+statName,'f4', "fcnums")
w_nc_fid.variables[mu.expStats+'_'+band+'_'+varNames+'_'+statName][:] = array_f
w_nc_fid.close()
def main():
write_diag_stats()
if __name__ == '__main__': main()
|
StarcoderdataPython
|
6558338
|
from node import Node
# Comment it before submitting
# class Node:
# def __init__(self, left=None, right=None, value=0):
# self.right = right
# self.left = left
# self.value = value
def insert(root, key):
if root is None:
root = Node(value=key)
return root
if key < root.value:
if root.left is not None:
insert(root.left, key)
else:
root.left = Node(value=key)
elif key >= root.value:
if root.right is not None:
insert(root.right, key)
else:
root.right = Node(value=key)
return root
|
StarcoderdataPython
|
162376
|
<filename>morpfw/authn/pas/user/typeinfo.py<gh_stars>1-10
from .model import UserCollection, UserModel
from .schema import UserSchema
from .path import get_user, get_user_collection
from ..app import App
@App.typeinfo(name='morpfw.pas.user',schema=UserSchema)
def get_typeinfo(request):
return {
'title': 'User',
'description': 'User type',
'schema': UserSchema,
'collection': UserCollection,
'collection_factory': get_user_collection,
'model': UserModel,
'model_factory': get_user,
'internal': True
}
|
StarcoderdataPython
|
1612447
|
from flask import json
from flaskapp.models import Paper
from test.main.base_classes import BaseMCQQuestion
from test.main.base_classes import BaseSubQuestion
from test.main.utils import test_post_request
class PaperGenerateRequest(BaseSubQuestion, BaseMCQQuestion):
def test_paper_generate_request(self):
data = dict(questions=[1, 2, 3], total_marks=30)
response = self.client.post(
"/course/1/papers/generate/request",
data=json.dumps(data),
headers={"Content-Type": "application/json"},
)
self.assertIn(
(b"You should be redirected automatically to target URL: "
b"<a href=/course/1/papers/generate/form/ >"),
response.data,
)
def test_handle_conflicting_questions(self):
data = dict(mcq={
"ask": [1, 3],
"nask": [2, 4]
},
sub={
"ask": [1, 3],
"nask": [2, 4]
})
response = self.client.post(
"/papers/handle/conflicts",
data=json.dumps(data),
headers={"Content-Type": "application/json"},
)
data1 = json.loads(response.get_data(as_text=True))
self.assertEqual(data1["status"], "OK")
def test_mark_distribution_form(self):
self.test_paper_generate_request()
data = {
"Unit:01": "30",
"Knowledge": "10",
"Comprehension": "10",
"Application": "10",
"Easy": "10",
"Medium": "10",
"Hard": "10",
"Que.1.A": "5",
"Que.2.A": "5",
"Que.2.B": "5",
"Que.3.A": "5",
"Que.3.B": "5",
"Que.3.C": "5",
"sub": 15,
"mcq": 15,
}
response, _ = test_post_request(self,
"/course/1/papers/generate/form/",
data)
self.assertIn(b"<title>Mark Distribution</title>", response.data)
response = self.client.post(
"/course/1/papers/confirm/template/",
data=json.dumps(dict(status="OK")),
headers={"Content-Type": "application/json"},
)
self.assertIn(
(b"You should be redirected automatically to target URL: "
b"<a href=/course/1/papers/generate/ >"),
response.data,
)
def test_generate_and_confirm_paper(self):
self.test_paper_generate_request()
self.test_mark_distribution_form()
data = {
"name": "paper1",
"term": "winter",
"exam_date": "2020-10-15",
"time_limit": "2",
}
test_post_request(self, "/course/1/papers/generate/", data, Paper, 1)
# testing gerenated paper
with self.mail.record_messages() as outbox:
data = {"generate": "YES", "examiner_email": "<EMAIL>"}
test_post_request(self, "papers/confirm/1", data=data)
self.assertEqual(1, len(outbox))
self.assertEqual("Paper for paper1", outbox[0].subject)
def test_pdf_paper(self):
self.test_paper_generate_request()
self.test_mark_distribution_form()
self.test_generate_and_confirm_paper()
response = self.client.get('/papers/1')
self.assertIn(b"Answer the following Multiple choice questions", response.data)
|
StarcoderdataPython
|
8090856
|
"""The module contains package wide custom exceptions and warnings.
"""
class NotProfiledError(ValueError, AttributeError):
"""Exception class to raise if profiling results are acquired before
calling `profile`.
This class inherits from both ValueError and AttributeError to help with
exception handling
"""
|
StarcoderdataPython
|
6453670
|
<reponame>keithfma/py_ice_cascade
from setuptools import setup
import py_ice_cascade as ic
setup(
name=ic.__name__,
version=ic.__version__,
description=ic._description,
url=ic._url,
author=ic._author,
author_email=ic._author_email,
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Programming Language :: Python :: 3.5',
'Operating System :: POSIX :: Linux',
'License :: OSI Approved :: MIT License'],
packages=['py_ice_cascade'],
package_data={'py_ice_cascade' : ['data/*']},
install_requires=[],
)
|
StarcoderdataPython
|
11200516
|
<filename>cli.py
"""
Inputs:
- predefined config
- GPU
# check with nvidia-smi first
# Run from within tmux
python cli.py run ./output/cli/ 1 wiki.bert_base__joint__seq512
python cli.py run ./output/cli/ 2 wiki.bert_base__joint__seq256
...
"""
import json
import logging
import os
import pickle
import sys
from importlib import import_module
import fire
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__name__)
def run(output_dir, gpu_id: int, config_name, **override_config):
"""
:param output_dir:
:param gpu_id: GPU (-1 == CPU)
:param config_name: Predefined experiment config
:param override_config: Use kwargs to override config variables, e.g., --foo__bar=1 (nested dict with __)
:return:
"""
output_dir = os.path.join(output_dir, config_name)
logger.info(f'Starting... {config_name}')
if os.path.exists(output_dir):
logger.error(f'Output dir exist already: {output_dir}')
sys.exit(1)
# GPU
if gpu_id < 0:
logger.info('GPU is disabled')
else:
os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id) # check with nvidia-smi
import torch
if not torch.cuda.is_available():
logger.error('CUDA is not available!')
sys.exit(1)
# Predefined configs
config_name = 'experiments.predefined.' + config_name
try:
package, module_name = config_name.rsplit('.', 1)
module = import_module(package)
config = getattr(module, module_name)
assert isinstance(config, dict) == True
except ValueError:
logger.error(f'Cannot load experiment config from: {config_name}')
sys.exit(1)
# Override config
from experiments.predefined import update
from experiments.utils import unflatten
if override_config:
override_config = unflatten(override_config)
logger.info(f'Override config with: {override_config}')
config = update(config, override_config)
from experiments import Experiment
exp = Experiment(**config)
exp.run(mode=2)
# save
os.makedirs(output_dir)
exp.output_dir = output_dir
with open(os.path.join(output_dir, 'experiment.pickle'), 'wb') as f:
# json.dump(exp.to_dict(), f)
pickle.dump(exp.to_dict(), f)
with open(os.path.join(output_dir, 'reports.json'), 'w') as f:
json.dump(exp.reports, f)
exp.save()
logger.info('Done')
def build_script(input_dir, output_dir, gpu_ids, missing_only=False, **override_config):
"""
python cli.py build_script ./output/4fold ./output/4fold_results/ 0,1,2,3 --missing_only=1
:param input_dir:
:param output_dir:
:param gpu_ids:
:param missing_only:
:param override_config:
:return:
"""
from experiments.utils import chunk
configs = [
'bert_base__joint__seq128',
'bert_base__joint__seq256',
'bert_base__joint__seq512',
'bert_base__siamese__seq128__2d',
'bert_base__siamese__seq128__3d',
'bert_base__siamese__seq128__4d',
'bert_base__siamese__seq256__2d',
'bert_base__siamese__seq256__3d',
'bert_base__siamese__seq256__4d',
'bert_base__siamese__seq512__2d',
'bert_base__siamese__seq512__3d',
'bert_base__siamese__seq512__4d',
'xlnet_base__joint__seq128',
'xlnet_base__joint__seq256',
'xlnet_base__joint__seq512',
'xlnet_base__siamese__seq128__2d',
'xlnet_base__siamese__seq128__3d',
'xlnet_base__siamese__seq128__4d',
'xlnet_base__siamese__seq256__2d',
'xlnet_base__siamese__seq256__3d',
'xlnet_base__siamese__seq256__4d',
'xlnet_base__siamese__seq512__2d',
'xlnet_base__siamese__seq512__3d',
'xlnet_base__siamese__seq512__4d',
]
# python cli.py run ./output/cli/ 2 wiki.bert_base__siamese__seq128__2d
runs = []
for k in sorted(os.listdir(input_dir)):
for k_config in configs:
runs.append((os.path.join(output_dir, k), os.path.join(input_dir, k), k_config))
gpu_ids = list(gpu_ids) if isinstance(gpu_ids, list) else [gpu_ids]
missing = 0
for i, gpu_runs in enumerate(chunk(runs, len(gpu_ids))):
gpu_id = gpu_ids[i]
for out_dir, in_dir, cfg in gpu_runs:
if not missing_only or not os.path.exists(out_dir):
missing += 1
print(f'python cli.py run {out_dir} {gpu_id} wiki.{cfg} --data_helper_params__train_dataframe_path={in_dir}/train.csv --data_helper_params__test_dataframe_path={in_dir}/test.csv')
#print(gpu_id)
# pass
print()
print(f'# GPU cores: {len(gpu_ids)}')
print(f'# Configs: {len(configs)}')
print(f'# Runs: {len(runs)} (missing: {missing}')
print(f'# Folds: {len(os.listdir(input_dir))}')
if __name__ == '__main__':
fire.Fire()
|
StarcoderdataPython
|
3269332
|
import mobula
@mobula.op.register
class AttSamplerGrid:
def __init__(self, scale=1.0, dense=4, iters=5):
self.scale = scale
self.dense = dense
self.iters = iters
def forward(self, data, attx, atty):#data[1, 1, 224, 224] attx[1, 224, 1]
F = self.F._mobula_hack
# attx: (N, W, 1)
# atty: (N, H, 1)
N, _, in_size, in_sizey = data.shape#N大概是batch_size. in_size是图片的宽度224
att_size = attx.shape[1]#的到图片的宽度
att_sizey = atty.shape[1]
out_size = int(in_size * self.scale)#输出的size和输入的size一样大
out_sizey = int(in_sizey * self.scale)#输出的size和输入的size一样大
#print('out_sizey',out_sizey)
#threshold应该是 方大缩小的界限
threshold = float(self.scale * self.dense * in_size) / att_size
#print('threshold',threshold)
#attention的尺寸根据输入和输出的尺寸改变
attx = attx * out_size
atty = atty * out_sizey
#print('attx',attx)
for j in range(self.iters):
max_attx = F.max(attx, 1, keepdims=True) # (N, 1, 1)
#print('max_attx',max_attx)
max_atty = F.max(atty, 1, keepdims=True) # (N, 1, 1)
#print('max_atty',max_atty)
if j == 0:
threshold = F.minimum(F.minimum(
max_attx, max_atty), threshold) # (N, 1, 1)
else:
threshold = F.minimum(max_attx, max_atty)
#print(j)
#print(threshold)
#print('attx',attx)
F.broadcast_minimum(threshold, attx, out=attx)
#print('attx',attx)
F.broadcast_minimum(threshold, atty, out=atty)
sum_x = F.sum(attx, 1, keepdims=True) # (N, 1, 1)
sum_y = F.sum(atty, 1, keepdims=True) # (N, 1, 1)
deltax = (out_size - sum_x) / att_size
deltay = (out_sizey - sum_y) / att_sizey
# compensate the drop value
attx += deltax
atty += deltay
'''
it is the same as the original implemenation.
the first element is 1.
'''
attx[:, 0] = 1
#print(attx)
atty[:, 0] = 1
#产生逆变换函数的过程
attxi = F.cumsum(attx, 1)#新的attention坐标300
#print('attxi',attxi)
attyi = F.cumsum(atty, 1)
stepx = attxi[:, -1] / out_size
stepy = attyi[:, -1] / out_sizey #stepy tensor([[1.0034]])
ctx = F.get_ctx(stepx)
#创建随机变量的过程
index_x = F.empty((N, out_sizey, 1), ctx=ctx)#-1,1 应该是
index_y = F.empty((N, out_size, 1), ctx=ctx)
#应该是逆变换的过程(离散逆变换的过程,涉及插值的部分)
mobula.func.map_step(N, attxi, index_y, stepx, att_size, out_size)
mobula.func.map_step(N, attyi, index_x, stepy, att_sizey, out_sizey)
#GG = F.tile(F.reshape(index_x, (N, 1, out_sizey)), (1, out_size, 1))
#MM = F.tile(index_y, (1, 1, out_sizey))
#print('GG',GG)
#print('GG',GG.shape)
#print('MM',MM)
#print('GG',MM.shape)
return F.tile(F.reshape(index_x, (N, 1, out_sizey)), (1, out_size, 1)),\
F.tile(index_y, (N, 1, out_sizey))
def backward(self, dy_x, dy_y):
return [0, 0, 0]
def infer_shape(self, in_shape):
#in_shape [torch.Size([1, 1, 342, 400]), torch.Size([1, 342, 1]), torch.Size([1, 400, 1])]
dshape = in_shape[0]
out_size = int(dshape[2] * self.scale)
#dshape1 = in_shape[1]
out_size1 = int(dshape[3] * self.scale)
#print('out_size1',out_size1.shape)
oshape = (dshape[0], out_size, out_size1)
return in_shape, [oshape, oshape]
|
StarcoderdataPython
|
172821
|
<gh_stars>1-10
from tkinter import *
from tkinter import END
class SimpleInfoBox(Frame):
def __init__(self, master, row, column, background='#ADD8E6'):
Frame.__init__(self, master, background=background)
self.grid(row=row, column=column)
self.info = 'Loading Information'
self.info_text = None
self.main_theme = '#ADD8E6'
self.highlight_theme = '#91B6CE'
self.on_create()
def on_create(self):
info_box = Frame(self)
info_box.grid(row=0, column=1)
inner_info_box = Frame(info_box, width=640, height=240, background=self.highlight_theme)
inner_info_box.grid(row=0, column=0)
upper_border_info_box = Frame(inner_info_box, width=660, height=2, background=self.highlight_theme)
upper_border_info_box.grid(row=0, column=0)
info_text = Text(inner_info_box, bg='#DCDCDC', width=99, height=17)
info_text.grid(row=1, column=0)
lower_border_info_box = Frame(inner_info_box, width=660, height=2, background=self.highlight_theme)
lower_border_info_box.grid(row=2, column=0)
scroll_bar = Scrollbar(inner_info_box)
scroll_bar.grid(row=1, column=1, sticky='nw')
scroll_bar.configure(relief='flat')
info_text.configure(font=('calibri', (10)), relief='flat')
scroll_bar.config(command=info_text.yview)
info_text.config(yscrollcommand=scroll_bar.set)
info_text.insert(END, self.info)
info_text.config(state=DISABLED)
self.info_text = info_text
lower_border_info_box = Frame(info_box, width=714, height=5, background= self.main_theme)
lower_border_info_box.grid(row=3, column=0)
def set_info_text(self, info):
self.info_text.config(state=NORMAL)
self.info_text.delete('1.0', END)
self.info_text.insert(END, info)
self.info_text.config(state=DISABLED)
|
StarcoderdataPython
|
9723347
|
<reponame>hboshnak/python_toolbox
# Copyright 2009-2017 <NAME>.
# This program is distributed under the MIT license.
import sys
from python_toolbox.math_tools import binomial
def test():
assert binomial(7, 3) == 35
assert binomial(0, 0) == 1
assert binomial(1, 0) == 1
assert binomial(0, 1) == 0
assert binomial(1543, 634) == 127103521979248139661884595050302692072114625333816461647571438364482801578062268185939019831927710543644891679108093639691467979466411177318250931548804667267192030646116875003881007119966764992383016174407823444352165249728639847571592229832113238415348489512831060701471487834812550521403788703534654431344329462541634971732197170414906629071055802381322184009159362499960475076746698583466181504060523973736761406471112019069930703149760846502951040
|
StarcoderdataPython
|
12806992
|
#Escreva um programa que obtenha um nome de um arquivo texto do usuário e crie um processo para executar o programa do sistema Windows bloco de notas (notepad) para abrir o arquivo.
import subprocess,sys
def cria_arquivo():
nome = str(input("Digite o nome do arquivo:"))
file = open(f"{nome}.txt", "w")
file.write("This is my notepad!")
file.close()
def abre_arquivo():
arquivo = str(input("Digite o nome do arquivo que deseja abrir:"))
print(subprocess.run(["notepad", f"{arquivo}.txt"]))
def main():
print("1-Criar arquivo notepad\n2-Abrir arquivo notepad\n3-Fechar programa")
while True:
opção = int(input("Escolha uma opção:"))
if opção == 1:
cria_arquivo()
elif opção == 2:
abre_arquivo()
elif opção == 3:
sys.exit()
else:
print("Opção inválida")
main()
|
StarcoderdataPython
|
6518207
|
<reponame>Srkline3/25-TkinterAndMQTT
"""
Using a Brickman (robot) as the receiver of messages.
"""
# Same as m2_fake_robot_as_mqtt_sender,
# but have the robot really do the action.
# Implement just FORWARD at speeds X and Y is enough.
import ev3dev.ev3 as ev3
import time
import math
class SimpleRoseBot(object):
def __init__(self):
self.left_motor = Motor('B')
self.right_motor = Motor('C')
def go(self, left_speed, right_speed):
self.left_motor.turn_on(left_speed)
self.right_motor.turn_on(right_speed)
def stop(self):
self.left_motor.turn_off()
self.right_motor.turn_off()
def go_straight_for_seconds(self, seconds, speed):
start_time = time.time()
self.go(speed, speed)
while True:
current_time = time.time()
if current_time - start_time >= seconds:
break
self.stop()
def go_straight_for_inches(self, inches, speed):
delta_s = (inches/self.left_motor.WheelCircumference)*360
start_distance = self.left_motor.get_position()
self.go(speed, speed)
while True:
current_distance = self.left_motor.get_position()
if current_distance - start_distance >= delta_s:
break
self.stop()
class DelegateThatReceives(object):
# self.bobob = SimpleRoseBot()
def forward(self, left_speed, right_speed):
bobob = SimpleRoseBot()
bobob.go(left_speed, right_speed)
def main():
name1 = input("Enter one name (subscriber): ")
name2 = input("Enter another name (publisher): ")
my_delegate = DelegateThatReceives()
mqtt_client = com.MqttClient(my_delegate)
mqtt_client.connect(name1, name2)
time.sleep(1) # Time to allow the MQTT setup.
print()
while True:
time.sleep(0.01) # Time to allow message processing
###############################################################################
# The Motor and ColorSensor classes. USE them, but do NOT modify them.
###############################################################################
class Motor(object):
WheelCircumference = 1.3 * math.pi
def __init__(self, port): # port must be 'B' or 'C' for left/right wheels
self._motor = ev3.LargeMotor('out' + port)
def turn_on(self, speed): # speed must be -100 to 100
self._motor.run_direct(duty_cycle_sp=speed)
def turn_off(self):
self._motor.stop(stop_action="brake")
def get_position(self): # Units are degrees (that the motor has rotated).
return self._motor.position
def reset_position(self):
self._motor.position = 0
|
StarcoderdataPython
|
9638089
|
<reponame>FreeDiscovery/jwzthreading<filename>setup.py
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
from jwzthreading.jwzthreading import __version__
kw = {
'name': 'jwzthreading',
'version': __version__,
'description': 'Algorithm for threading mail messages.',
'long_description' : '''Contains an implementation of an algorithm for threading mail
messages, as described at http://www.jwz.org/doc/threading.html.''',
'author': "<NAME> et al",
'packages': ['jwzthreading'],
'classifiers': [
'Development Status :: 3 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'Topic :: Software Development :: Libraries',
'Topic :: Communications :: Email',
]
}
setup(**kw)
|
StarcoderdataPython
|
281914
|
<reponame>wenhaopeter/read_pytorch_code<gh_stars>0
import torch.distributed.rpc as rpc
import torch.testing._internal.dist_utils
class RpcAgentTestFixture(object):
@property
def world_size(self):
return 4
@property
def init_method(self):
return torch.testing._internal.dist_utils.INIT_METHOD_TEMPLATE.format(
file_name=self.file_name
)
@property
def rpc_backend(self):
return rpc.backend_registry.BackendType[
torch.testing._internal.dist_utils.TEST_CONFIG.rpc_backend_name
]
@property
def rpc_backend_options(self):
return torch.testing._internal.dist_utils.TEST_CONFIG.build_rpc_backend_options(
self
)
|
StarcoderdataPython
|
89513
|
<gh_stars>0
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('geoinfo', '0006_auto_20160524_1954'),
]
operations = [
migrations.AlterField(
model_name='spatialreport',
name='report_on',
field=models.ForeignKey(related_name='layers_reported_on', to='geoinfo.GISLayerMaster', null=True, blank=True),
),
]
|
StarcoderdataPython
|
9727300
|
<filename>AtCoder/ABC066/B/abc066_b.py<gh_stars>1-10
s = list(input())[0:-1]
t = 1
while s[0:len(s)//2] != s[len(s)//2:]:
s.pop()
t += 1
print(len("".join(s)))
|
StarcoderdataPython
|
3527273
|
# Copyright 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from tensorflow.keras.preprocessing import image
from tensorflow.keras.applications.resnet50 import preprocess_input
import tensorflow as tf
import numpy as np
import tritonclient.http as httpclient
from tritonclient.utils import triton_to_np_dtype
def process_image(image_path="img1.jpg"):
img = image.load_img(image_path, target_size=(224, 224))
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
return preprocess_input(x)
transformed_img = process_image()
# Setting up client
triton_client = httpclient.InferenceServerClient(url="localhost:8000")
test_input = httpclient.InferInput("input_1", transformed_img.shape, datatype="FP32")
test_input.set_data_from_numpy(transformed_img, binary_data=True)
test_output = httpclient.InferRequestedOutput("predictions", binary_data=True, class_count=1000)
# Querying the server
results = triton_client.infer(model_name="resnet50", inputs=[test_input], outputs=[test_output])
test_output_fin = results.as_numpy('predictions')
print(test_output_fin)
|
StarcoderdataPython
|
4879018
|
<reponame>kaka-lin/ML-Courses<gh_stars>0
import numpy as np
import matplotlib.pyplot as plt
import math
def sign(x):
if x >= 0:
return 1
else:
return -1
def logistic(x):
return 1 / (1 + math.exp(-x))
def err01(x, y):
if sign(x) == y:
return 0
else:
return 1
def err1(x, y):
return max(0, 1 - y * x)
def err2(x, y):
return pow(max(0, 1 - y * x), 2)
def err3(x, y):
return max(0, -y * x)
def err4(x, y):
return logistic(-y * x)
def err5(x, y):
return math.exp(-y * x)
x_range = np.arange(-2, 2, 0.0001)
y_log = []
err_01 = []
err_1 = []
err_2 = []
err_3 = []
err_4 = []
err_5 = []
y = 1
for i in range(len(x_range)):
y_log.append(logistic(x_range[i]))
err_01.append(err01(x_range[i], y))
err_1.append(err1(x_range[i], y))
err_2.append(err2(x_range[i], y))
err_3.append(err3(x_range[i], y))
err_4.append(err4(x_range[i], y))
err_5.append(err5(x_range[i], y))
plt.figure(figsize = (20, 7))
plt.subplot(321)
plt.plot(x_range, y_log, label = r'$\frac{1}{1 + \mathrm{exp(-w^Tx)}}$', color = 'red' )
plt.plot(x_range, err_01, label = r'$err0/1$' )
plt.legend()
plt.subplot(322)
plt.plot(x_range, err_1, label = r'$(max(0, 1-y\mathrm{\mathbf{w^Tx}}))^2$', color = 'blue' ) # max(0, 1 − ywT x)
plt.plot(x_range, err_01, label = r'$[[sign(\mathrm{\mathbf{w^Tx}} )\neq y]]$' )
plt.legend()
plt.subplot(323)
plt.plot(x_range, err_2, label = r'$(max(0, 1-y\mathrm{\mathbf{w^Tx}}))^2$', color = 'black' ) # pow(max(0, 1 − ywT x), 2)
plt.plot(x_range, err_01, label = r'$[[sign(\mathrm{\mathbf{w^Tx}} )\neq y]]$' )
plt.legend()
plt.subplot(324)
plt.plot(x_range, err_3, label = r'$max(0, -y\mathrm{\mathbf{w^Tx}})$', color = 'yellow' ) # max(0, −ywT x)
plt.plot(x_range, err_01, label = r'$[[sign(\mathrm{\mathbf{w^Tx}} )\neq y]]$' )
plt.legend()
plt.subplot(325)
plt.plot(x_range, err_4, label = r'$\theta(-y\mathrm{\mathbf{w^Tx}})$', color = 'green' ) # θ(−ywT x)
plt.plot(x_range, err_01, label = r'$[[sign(\mathrm{\mathbf{w^Tx}} )\neq y]]$' )
plt.legend()
plt.subplot(326)
plt.plot(x_range, err_5, label = r'$exp(-y\mathrm{\mathbf{w^Tx}})$', color = '#AE0000' ) # exp(−ywT x)
plt.plot(x_range, err_01, label = r'$[[sign(\mathrm{\mathbf{w^Tx}} )\neq y]]$' )
plt.legend()
plt.savefig('hw3_2.png', dpi = 100)
plt.show()
|
StarcoderdataPython
|
3300692
|
from urllib import request
url = "http://quotes.toscrape.com/"
# 设置需要打开的链接
resp = request.urlopen(url)
# 使用请求函数打开链接
print(resp.read())
# 打印获取到的信息
|
StarcoderdataPython
|
4964393
|
<reponame>jdavidagudelo/tensorflow-models
# Copyright 2018 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import tensorflow as tf
import numpy as np
from research.steve import nn
from research.steve.learner import CoreModel
class DeterministicWorldModel(CoreModel):
"""
A simple feed-forward neural network world model, with an option for an ensemble.
"""
@property
def saveid(self):
return "worldmodel"
def create_params(self, env_config, learner_config):
self.obs_dim = np.prod(env_config["obs_dims"])
self.action_dim = env_config["action_dim"]
self.reward_scale = env_config["reward_scale"]
self.discount = env_config["discount"]
self.aux_hidden_dim = self.learner_config["aux_hidden_dim"]
self.transition_hidden_dim = self.learner_config["transition_hidden_dim"]
self.bayesian_config = self.learner_config["bayesian"]
with tf.variable_scope(self.name):
if self.bayesian_config:
self.transition_predictor = nn.EnsembleFeedForwardNet('transition_predictor',
self.obs_dim + self.action_dim, [self.obs_dim],
layers=8, hidden_dim=self.transition_hidden_dim,
get_uncertainty=True,
ensemble_size=self.bayesian_config["transition"][
"ensemble_size"], train_sample_count=
self.bayesian_config["transition"][
"train_sample_count"], eval_sample_count=
self.bayesian_config["transition"][
"eval_sample_count"])
self.done_predictor = nn.EnsembleFeedForwardNet('done_predictor',
self.obs_dim + self.obs_dim + self.action_dim, [],
layers=4, hidden_dim=self.aux_hidden_dim,
get_uncertainty=True,
ensemble_size=self.bayesian_config["transition"][
"ensemble_size"],
train_sample_count=self.bayesian_config["transition"][
"train_sample_count"],
eval_sample_count=self.bayesian_config["transition"][
"eval_sample_count"])
self.reward_predictor = nn.EnsembleFeedForwardNet('reward_predictor',
self.obs_dim + self.obs_dim + self.action_dim, [],
layers=4, hidden_dim=self.aux_hidden_dim,
get_uncertainty=True,
ensemble_size=self.bayesian_config["reward"][
"ensemble_size"],
train_sample_count=self.bayesian_config["reward"][
"train_sample_count"],
eval_sample_count=self.bayesian_config["reward"][
"eval_sample_count"])
else:
self.transition_predictor = nn.FeedForwardNet('transition_predictor', self.obs_dim + self.action_dim,
[self.obs_dim], layers=8,
hidden_dim=self.transition_hidden_dim,
get_uncertainty=True)
self.done_predictor = nn.FeedForwardNet('done_predictor', self.obs_dim + self.obs_dim + self.action_dim,
[], layers=4, hidden_dim=self.aux_hidden_dim,
get_uncertainty=True)
self.reward_predictor = nn.FeedForwardNet('reward_predictor',
self.obs_dim + self.obs_dim + self.action_dim, [], layers=4,
hidden_dim=self.aux_hidden_dim, get_uncertainty=True)
def get_ensemble_idx_info(self):
if self.bayesian_config is not False:
ensemble_idxs = tf.random_shuffle(tf.range(self.transition_predictor.ensemble_size))
transition_ensemble_sample_n = self.transition_predictor.eval_sample_count
reward_ensemble_sample_n = self.reward_predictor.eval_sample_count
ensemble_idxs = ensemble_idxs[:transition_ensemble_sample_n]
return ensemble_idxs, transition_ensemble_sample_n, reward_ensemble_sample_n
else:
return None, 1, 1
def build_training_graph(self, obs, next_obs, actions, rewards, dones, data_size):
info = tf.concat([obs, actions], -1)
predicted_next_obs = self.transition_predictor(info, is_eval=False, reduce_mode="random") + obs
next_info = tf.concat([next_obs, info], -1)
predicted_dones = self.done_predictor(next_info, is_eval=False, reduce_mode="random")
predicted_rewards = self.reward_predictor(next_info, is_eval=False, reduce_mode="random")
done_losses = tf.nn.sigmoid_cross_entropy_with_logits(labels=dones, logits=predicted_dones)
reward_losses = .5 * tf.square(rewards - predicted_rewards)
next_obs_losses = .5 * tf.reduce_sum(tf.square(next_obs - predicted_next_obs), -1)
done_loss = tf.reduce_mean(done_losses)
reward_loss = tf.reduce_mean(reward_losses)
next_obs_loss = tf.reduce_mean(next_obs_losses)
reg_loss = .0001 * (self.done_predictor.l2_loss() +
self.reward_predictor.l2_loss() +
self.transition_predictor.l2_loss())
total_loss = done_loss + reward_loss + next_obs_loss + reg_loss
inspect = (total_loss, done_loss, reward_loss, next_obs_loss, reg_loss)
return total_loss, inspect
def init_extra_info(self, obs):
return tf.zeros_like(obs)
def transition(self, obs, action, extra_info, ensemble_idxs=None, pre_expanded=None):
info = tf.concat([obs, action], -1)
next_obs_delta = self.transition_predictor(info, reduce_mode="none", ensemble_idxs=ensemble_idxs,
pre_expanded=pre_expanded)
if ensemble_idxs is None:
next_obs = tf.expand_dims(obs, -2) + next_obs_delta
next_info = tf.concat([next_obs, tf.expand_dims(info, -2)], -1)
else:
next_obs = obs + next_obs_delta
next_info = tf.concat([next_obs, info], -1)
done = tf.nn.sigmoid(
self.done_predictor(next_info, reduce_mode="none", ensemble_idxs=ensemble_idxs, pre_expanded=True))
extra_info = tf.zeros_like(obs)
return next_obs, done, extra_info
def get_rewards(self, obs, action, next_obs):
next_info = tf.concat([next_obs, obs, action], -1)
reward = self.reward_predictor(next_info, reduce_mode="none")
return reward
|
StarcoderdataPython
|
3228254
|
import cv2
from glob import glob
import numpy as np
import random
from sklearn.utils import shuffle
import pickle
import os
def pickle_images_labels():
images_labels = []
images = glob("gestures/*/*.jpg")
images.sort()
for image in images:
print(image)
label = image[image.find(os.sep)+1: image.rfind(os.sep)]
img = cv2.imread(image, 0)
images_labels.append((np.array(img, dtype=np.uint8), int(label)))
return images_labels
images_labels = pickle_images_labels()
images_labels = shuffle(shuffle(shuffle(shuffle(images_labels))))
images, labels = zip(*images_labels)
print("Length of images_labels", len(images_labels))
train_images = images[:int(5/6*len(images))]
print("Length of train_images", len(train_images))
with open("train_images", "wb") as f:
pickle.dump(train_images, f)
del train_images
train_labels = labels[:int(5/6*len(labels))]
print("Length of train_labels", len(train_labels))
with open("train_labels", "wb") as f:
pickle.dump(train_labels, f)
del train_labels
test_images = images[int(5/6*len(images)):int(11/12*len(images))]
print("Length of test_images", len(test_images))
with open("test_images", "wb") as f:
pickle.dump(test_images, f)
del test_images
test_labels = labels[int(5/6*len(labels)):int(11/12*len(images))]
print("Length of test_labels", len(test_labels))
with open("test_labels", "wb") as f:
pickle.dump(test_labels, f)
del test_labels
val_images = images[int(11/12*len(images)):]
print("Length of test_images", len(val_images))
with open("val_images", "wb") as f:
pickle.dump(val_images, f)
del val_images
val_labels = labels[int(11/12*len(labels)):]
print("Length of val_labels", len(val_labels))
with open("val_labels", "wb") as f:
pickle.dump(val_labels, f)
del val_labels
|
StarcoderdataPython
|
5172781
|
# -*- coding: utf-8 -*-
# Copyright 2019 Cohesity Inc.
class GlacierRetrievalTypeEnum(object):
"""Implementation of the 'GlacierRetrievalType' enum.
Specifies the way data needs to be retrieved from the external target.
This information will be filled in by Iris and Magneto will pass it along
to the Icebox as it is to support bulk retrieval from Glacier.
Specifies the type of Restore Task.
'kStandard' specifies retrievals that allow to access any of your
archives
within several hours. Standard retrievals typically complete within 3–5
hours.This is the default option for retrieval requests that do not
specify
the retrieval option.
'kBulk' specifies retrievals that are Glacier’s lowest-cost retrieval
option, which can be use to retrieve large amounts, even petabytes, of
data
inexpensively in a day. Bulk retrieval typically complete within 5–12
hours.
'kExpedited' specifies retrievals that allows to quickly access your data
when occasional urgent requests for a subset of archives are required.
For
all but the largest archives (250 MB+), data accessed using Expedited
retrievals are typically made available within 1–5 minutes.
Attributes:
KSTANDARD: TODO: type description here.
KBULK: TODO: type description here.
KEXPEDITED: TODO: type description here.
"""
KSTANDARD = 'kStandard'
KBULK = 'kBulk'
KEXPEDITED = 'kExpedited'
|
StarcoderdataPython
|
4863530
|
import os
import sys
import logging
import inspect
import fossor
from fossor.engine import Fossor
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
log = logging.getLogger(__name__)
def test_no_popen_usage():
f = Fossor()
for plugin in f.list_plugins():
for line in inspect.getsourcelines(plugin)[0]:
print(line)
if 'popen' in line:
log.error("os.popen is deprecated as of python 2.6, please use the plugin.py shell_call method instead.")
log.error('os.popen usage located in {plugin} for line {line}'.format(plugin=plugin, line=line))
assert 'popen' not in line
def test_has_license():
fossor_path = fossor.__path__[0]
test_path = fossor_path + '/../test'
paths = [fossor_path, test_path]
for path in paths:
for root, dirs, files in os.walk(path):
for file in files:
if file == '__init__.py':
continue
if os.path.splitext(file)[-1] != '.py':
continue
filepath = os.path.join(root, file)
has_license = False
license = 'BSD-2 Clause license'
with open(filepath, 'rt') as f:
for line in f:
if license in line:
has_license = True
break
if not has_license:
raise Exception(f"File: {filepath} does not have the license: {license}")
|
StarcoderdataPython
|
8048712
|
import torch.nn as nn
from .dropout import LockedDropout
class Embedding(nn.Embedding):
def __init__(self, num_embeddings, embedding_dim, dropoute=.0, dropout=.0, **kwargs):
super(Embedding, self).__init__(num_embeddings, embedding_dim, **kwargs)
self.dropoute = dropoute
self.drop = LockedDropout(dropout)
self.weight.data.uniform_(-0.1, 0.1)
if self.padding_idx is not None:
self.weight.data[self.padding_idx] = 0
def forward(self, input):
emb = super(Embedding, self).forward(input)
if self.training and self.dropoute > 0:
input_flatten = input.flatten()
mask = emb.new(self.num_embeddings).bernoulli_(1 - self.dropoute) / (1 - self.dropoute)
mask = mask[input_flatten].view_as(input).unsqueeze(-1).expand_as(emb)
emb = emb * mask
return self.drop(emb)
|
StarcoderdataPython
|
3320544
|
from pynwb import TimeSeries
import numpy as np
from bisect import bisect, bisect_left
def get_timeseries_tt(node: TimeSeries, istart=0, istop=None) -> np.ndarray:
"""
For any TimeSeries, return timestamps. If the TimeSeries uses starting_time and rate, the timestamps will be
generated.
Parameters
----------
node: pynwb.TimeSeries
istart: int, optional
Optionally sub-select the returned times - lower bound
istop: int, optional
Optionally sub-select the returned times - upper bound
Returns
-------
numpy.ndarray
"""
if node.timestamps is not None:
return node.timestamps[istart:istop]
else:
if not np.isfinite(node.starting_time):
starting_time = 0
else:
starting_time = node.starting_time
if istop is None:
return np.arange(istart, len(node.data)) / node.rate + starting_time
elif istop > 0:
return np.arange(istart, istop) / node.rate + starting_time
else:
return (
np.arange(istart, len(node.data) + istop - 1) / node.rate
+ starting_time
)
def get_timeseries_maxt(node: TimeSeries) -> float:
"""
Returns the maximum time of any TimeSeries
Parameters
----------
node: pynwb.TimeSeries
Returns
-------
float
"""
if node.timestamps is not None:
return node.timestamps[-1]
elif np.isnan(node.starting_time):
return (len(node.data) - 1) / node.rate
else:
return (len(node.data) - 1) / node.rate + node.starting_time
def get_timeseries_mint(node: TimeSeries) -> float:
"""
Returns the minimum time of any TimeSeries
Parameters
----------
node: pynwb.TimeSeries
Returns
-------
float
"""
if node.timestamps is not None:
return node.timestamps[0]
elif np.isnan(node.starting_time):
return 0
else:
return node.starting_time
def get_timeseries_in_units(node: TimeSeries, istart=None, istop=None):
"""
Convert data into the designated units
Parameters
----------
node: pynwb.TimeSeries
istart: int
istop: int
Returns
-------
numpy.ndarray, str
"""
data = node.data[istart:istop]
if node.conversion and np.isfinite(node.conversion):
data = data * node.conversion
unit = node.unit
else:
unit = None
return data, unit
def timeseries_time_to_ind(node: TimeSeries, time, ind_min=None, ind_max=None) -> int:
"""
Get the index of a certain time for any TimeSeries. For TimeSeries that use timestamps, bisect is used. You can
optionally provide ind_min and ind_max to constrain the search.
Parameters
----------
node: pynwb.TimeSeries
time: float
ind_min: int, optional
ind_max: int, optional
Returns
-------
"""
if node.timestamps is not None:
kwargs = dict()
if ind_min is not None:
kwargs.update(lo=ind_min)
if ind_max is not None:
kwargs.update(hi=ind_max)
id_found = bisect_left(node.timestamps, time, **kwargs)
return id_found if id_found < len(node.data) else len(node.data) - 1
else:
if np.isnan(node.starting_time):
starting_time = 0
else:
starting_time = node.starting_time
id_found = int(np.ceil((time - starting_time) * node.rate))
return id_found if id_found < len(node.data) else len(node.data) - 1
def bisect_timeseries_by_times(
timeseries: TimeSeries, starts, duration: float, traces=None
):
"""
Parameters
----------
timeseries: TimeSeries
starts: iterable
time at which to bisect
duration: float
duration of window after start
traces: int
index into the second dim of data
Returns
-------
out: list
list with bisected arrays from data
"""
out = []
for start in starts:
if timeseries.rate is not None:
idx_start = int((start - timeseries.starting_time) * timeseries.rate)
idx_stop = int(idx_start + duration * timeseries.rate)
else:
idx_start = bisect(timeseries.timestamps, start)
idx_stop = bisect(timeseries.timestamps, start + duration, lo=idx_start)
if len(timeseries.data.shape) > 1 and traces is not None:
out.append(timeseries.data[idx_start:idx_stop, traces])
else:
out.append(timeseries.data[idx_start:idx_stop])
return out
def align_by_times_with_timestamps(
timeseries: TimeSeries, starts, duration: float, traces=None
):
"""
Parameters
----------
timeseries: TimeSeries
timeseries with variable timestamps
starts: array-like
starts in seconds
duration: float
duration in seconds
Returns
-------
out: list
list: length=(n_trials); list[0]: array, shape=(n_time, ...)
"""
assert timeseries.timestamps is not None, "supply timeseries with timestamps"
return bisect_timeseries_by_times(timeseries, starts, duration, traces)
def align_by_times_with_rate(
timeseries: TimeSeries, starts, duration: float, traces=None
):
"""
Parameters
----------
timeseries: TimeSeries
timeseries with variable timestamps
starts: array-like
starts in seconds
duration: float
duration in seconds
Returns
-------
out: list
list: length=(n_trials); list[0]: array, shape=(n_time, ...)
"""
assert timeseries.rate is not None, "supply timeseries with start_time and rate"
return np.array(bisect_timeseries_by_times(timeseries, starts, duration, traces))
def align_timestamps_by_trials(
timeseries: TimeSeries, starts, before: float, after: float
):
"""
Parameters
----------
timeseries: TimeSeries
timeseries with variable timestamps
starts: array-like
starts in seconds
duration: float
duration in seconds
Returns
-------
out: list
list: length=(n_trials); list[0]: array, shape=(n_time, ...)
"""
assert timeseries.timestamps is not None, "supply timeseries with timestamps"
out = []
for start in starts:
idx_start = bisect(timeseries.timestamps, start)
idx_stop = bisect(timeseries.timestamps, start + before + after, lo=idx_start)
out.append(timeseries.timestamps[idx_start:idx_stop])
return [list(np.array(i) - i[0] - before) for i in out]
def align_by_trials(
timeseries: TimeSeries,
start_label="start_time",
before=0.0,
after=1.0,
):
"""
Args:
timeseries: TimeSeries
start_label: str
default: 'start_time'
before: float
time after start_label in secs (positive goes back in time)
after: float
time after stop_label in secs (positive goes forward in time)
Returns:
np.array(shape=(n_trials, n_time, ...))
"""
trials = timeseries.get_ancestor("NWBFile").trials
return align_by_time_intervals(timeseries, trials, start_label, before, after)
def align_by_time_intervals(
timeseries: TimeSeries,
intervals,
start_label="start_time",
before=0.0,
after=0.0,
traces=None,
):
"""
Args:
timeseries: pynwb.TimeSeries
intervals: pynwb.epoch.TimeIntervals
start_label: str
default: 'start_time'
before: float
time after start_label in secs (positive goes back in time)
after: float
time after stop_label in secs (positive goes forward in time)
timestamps: bool
if alignment uses timestamps or constant rate and starting time in TimeSeries
Returns:
np.array(shape=(n_trials, n_time, ...))
"""
starts = np.array(intervals[start_label][:]) - before
if timeseries.rate is not None:
return align_by_times_with_rate(
timeseries, starts, duration=after + before, traces=traces
)
else:
return align_by_times_with_timestamps(
timeseries, starts, duration=after + before, traces=traces
)
|
StarcoderdataPython
|
11332624
|
<reponame>drewtray/spotify_net<gh_stars>0
# AUTOGENERATED! DO NOT EDIT! File to edit: 01_retrieve_last.ipynb (unless otherwise specified).
__all__ = ['last_cred', 'last_get', 'last_format']
# Cell
import pandas as pd
import requests
import boto3
import json
# Cell
def last_cred():
secret_name = "last_keys"
region_name = "us-east-1"
# Create a Secrets Manager client
session = boto3.session.Session()
client = session.client(
service_name='secretsmanager',
region_name=region_name
)
get_secret_value_response = client.get_secret_value(SecretId=secret_name)
last_key = json.loads(get_secret_value_response['SecretString'])['last_apiKEY']
last_agent = json.loads(get_secret_value_response['SecretString'])['last_userAGENT']
last_user = json.loads(get_secret_value_response['SecretString'])['last_username']
return last_key, last_agent, last_user
# Cell
def last_get(method, agent, user, key, period='1month'):
headers = {
'user-agent': agent
}
payload = {'method': method,
'period': period,
'user': user,
'api_key': key,
'format': 'json'
}
r = requests.get('https://ws.audioscrobbler.com/2.0/', headers=headers, params=payload)
# print(r.status_code)
return r.json()
# Cell
def last_format(f_tracks, cutoff):
f_tracks = pd.DataFrame(f_tracks['toptracks']['track'])
f_tracks = f_tracks[['name', 'artist', 'playcount']]
f_tracks['artist'] = f_tracks['artist'].apply(lambda x: x['name'])
f_tracks['playcount'] = f_tracks['playcount'].astype(int)
f_tracks = f_tracks.sort_values('playcount', ascending=False)
f_tracks = f_tracks[f_tracks['playcount'] >= cutoff]
return f_tracks
# Cell
if __name__ == '__main__':
API_KEY, USER_AGENT, USERNAME = last_cred()
tracks = last_get('user.gettoptracks', USER_AGENT, USERNAME, API_KEY)
formatted = last_format(tracks, 7)
formatted.to_csv('s3://spotify-net/df_tracks.csv')
print('Retrieved')
|
StarcoderdataPython
|
3288053
|
<filename>mmgen/core/scheduler/lr_updater.py<gh_stars>1-10
from mmcv.runner import HOOKS, LrUpdaterHook
@HOOKS.register_module()
class LinearLrUpdaterHook(LrUpdaterHook):
"""Linear learning rate scheduler for image generation.
In the beginning, the learning rate is 'base_lr' defined in mmcv.
We give a target learning rate 'target_lr' and a start point 'start'
(iteration / epoch). Before 'start', we fix learning rate as 'base_lr';
After 'start', we linearly update learning rate to 'target_lr'.
Args:
target_lr (float): The target learning rate. Default: 0.
start (int): The start point (iteration / epoch, specified by args
'by_epoch' in its parent class in mmcv) to update learning rate.
Default: 0.
interval (int): The interval to update the learning rate. Default: 1.
"""
def __init__(self, target_lr=0, start=0, interval=1, **kwargs):
super().__init__(**kwargs)
self.target_lr = target_lr
self.start = start
self.interval = interval
def get_lr(self, runner, base_lr):
"""Calculates the learning rate.
Args:
runner (object): The passed runner.
base_lr (float): Base learning rate.
Returns:
float: Current learning rate.
"""
if self.by_epoch:
progress = runner.epoch
max_progress = runner.max_epochs
else:
progress = runner.iter
max_progress = runner.max_iters
assert max_progress >= self.start
if max_progress == self.start:
return base_lr
# Before 'start', fix lr; After 'start', linearly update lr.
factor = (max(0, progress - self.start) // self.interval) / (
(max_progress - self.start) // self.interval)
return base_lr + (self.target_lr - base_lr) * factor
|
StarcoderdataPython
|
17354
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
from .. import _utilities
import typing
# Export this package's modules as members:
from ._enums import *
from .application import *
from .application_package import *
from .batch_account import *
from .certificate import *
from .get_application import *
from .get_application_package import *
from .get_batch_account import *
from .get_certificate import *
from .get_pool import *
from .list_batch_account_keys import *
from .pool import *
from ._inputs import *
from . import outputs
# Make subpackages available:
if typing.TYPE_CHECKING:
import pulumi_azure_native.batch.v20151201 as __v20151201
v20151201 = __v20151201
import pulumi_azure_native.batch.v20170101 as __v20170101
v20170101 = __v20170101
import pulumi_azure_native.batch.v20170501 as __v20170501
v20170501 = __v20170501
import pulumi_azure_native.batch.v20170901 as __v20170901
v20170901 = __v20170901
import pulumi_azure_native.batch.v20181201 as __v20181201
v20181201 = __v20181201
import pulumi_azure_native.batch.v20190401 as __v20190401
v20190401 = __v20190401
import pulumi_azure_native.batch.v20190801 as __v20190801
v20190801 = __v20190801
import pulumi_azure_native.batch.v20200301 as __v20200301
v20200301 = __v20200301
import pulumi_azure_native.batch.v20200501 as __v20200501
v20200501 = __v20200501
import pulumi_azure_native.batch.v20200901 as __v20200901
v20200901 = __v20200901
import pulumi_azure_native.batch.v20210101 as __v20210101
v20210101 = __v20210101
import pulumi_azure_native.batch.v20210601 as __v20210601
v20210601 = __v20210601
else:
v20151201 = _utilities.lazy_import('pulumi_azure_native.batch.v20151201')
v20170101 = _utilities.lazy_import('pulumi_azure_native.batch.v20170101')
v20170501 = _utilities.lazy_import('pulumi_azure_native.batch.v20170501')
v20170901 = _utilities.lazy_import('pulumi_azure_native.batch.v20170901')
v20181201 = _utilities.lazy_import('pulumi_azure_native.batch.v20181201')
v20190401 = _utilities.lazy_import('pulumi_azure_native.batch.v20190401')
v20190801 = _utilities.lazy_import('pulumi_azure_native.batch.v20190801')
v20200301 = _utilities.lazy_import('pulumi_azure_native.batch.v20200301')
v20200501 = _utilities.lazy_import('pulumi_azure_native.batch.v20200501')
v20200901 = _utilities.lazy_import('pulumi_azure_native.batch.v20200901')
v20210101 = _utilities.lazy_import('pulumi_azure_native.batch.v20210101')
v20210601 = _utilities.lazy_import('pulumi_azure_native.batch.v20210601')
|
StarcoderdataPython
|
1731116
|
<reponame>pervcomp/Procem
# -*- coding: utf-8 -*-
"""Module for handling electricity SPOT price data from Nord Pool."""
# Copyright (c) TUT Tampere University of Technology 2015-2018.
# This software has been developed in Procem-project funded by Business Finland.
# This code is licensed under the MIT license.
# See the LICENSE.txt in the project root for the license terms.
#
# Main author(s): <NAME>, <NAME>, <NAME>,
# <NAME> ja <NAME>
import datetime
import json
import time
try:
import adapters.common_utils as common_utils
import adapters.rest_utils as rest_utils
except:
# used when running the module directly
import common_utils
import rest_utils
def timeToMicrosecondFormat(time_str, nzeros=6):
"""Helper function to change the given time string to a microsecond precision."""
dot = time_str.rfind(".")
if dot < 0:
return time_str + "." + "0" * nzeros
else:
return time_str + "0" * (nzeros - (len(time_str) - dot - 1))
def isDSTTime(timestamp):
"""Helper function for determining whether the local time currently uses daylight saving time."""
local_time = time.localtime(timestamp)
return time.daylight > 0 and local_time.tm_isdst > 0
class Nordpool:
"""Class for receiving and handling data from Nord Pool."""
def __init__(self, params, data_queue):
self.__config = params.get("config", {})
self.__currency = params.get("currency", "")
self.__areas = params.get("areas", [])
self.__last_query_date = None
self.__last_update = None
self.__time_interval = params.get("time_interval_s", 3600)
self.__time_interval_min = params.get("time_interval_min_s", 60)
self.__rtl_id_base = int(params.get("rtl_id_base", 0))
self.__path = params.get("iot_ticket_path_base", "/{area_long:}")
self.__name = params.get("iot_ticket_name_base", "{area:}_price")
self.__desc = params.get("description", "{area_long:} price")
# TODO: handle the clock changes better
self.__from_dst_to_normal_days = [datetime.date(2018, 10, 28)]
self.__clock_change_hour_utc = 0
self.__data_info = self.getNordpoolInfo()
self.__data_queue = data_queue
if self.__config.get("write_csv", False):
self.writeNordPoolCsv()
def getNordpoolInfo(self):
"""Loads the data information using the given configurations."""
try:
# determine the unit by making a request to the Nord Pool API
kwargs = {
"config": self.__config,
"currency": self.__currency,
"date": time.time()
}
req = rest_utils.runAPIQuery(**kwargs)
if req.status_code == rest_utils.STATUS_OK:
js = json.loads(req.text)
unit = js["data"]["Units"][0]
else:
unit = ""
# collect the data information for each considered area.
info = {}
count = 0
for area in self.__areas:
info[area] = {}
count += 1
area_long = self.__config.get("long_names", {}).get(area, area)
info[area]["rtl_id"] = self.__rtl_id_base + count
info[area]["name"] = self.__name.format(area=area)
info[area]["path"] = self.__path.format(area_long=area_long)
info[area]["unit"] = unit
info[area]["datatype"] = "float"
info[area]["confidential"] = False
info[area]["description"] = self.__desc.format(area_long=area_long)
return info
except:
return {}
def writeNordPoolCsv(self):
"""Writes the data information to a CSV file."""
try:
delimiter = ";"
filename = self.__config["csv_filename"]
columns = [
"rtl_id",
"area",
"datatype",
"unit",
"name",
"path",
"confidential",
"description"
]
header = delimiter.join(columns)
with open(filename, "w") as file:
file.write(header + "\n")
for area, area_info in self.__data_info.items():
rtl_id = str(area_info["rtl_id"])
datatype = area_info["datatype"]
unit = area_info["unit"]
name = area_info["name"]
path = area_info["path"]
if area_info["confidential"]:
confidential = "x"
else:
confidential = ""
desc = area_info["description"]
file.write(delimiter.join([rtl_id, area, datatype, unit, name, path, confidential, desc]) + "\n")
except:
print(common_utils.getTimeString(), "Error while writing Nord Pool csv file.")
def getData(self):
"""Tries to get new data from the Nord Pool API. If new data is found, it is send to the Procem RTL handler and
the function returns True. Otherwise, the function returns False."""
try:
if self.__last_query_date is None:
timestamp = time.time()
else:
timestamp = (self.__last_query_date + datetime.timedelta(days=1)).timestamp()
kwargs = {
"config": self.__config,
"currency": self.__currency,
"date": timestamp
}
req = rest_utils.runAPIQuery(**kwargs)
if req.status_code != rest_utils.STATUS_OK:
print(common_utils.getTimeString(), "Nord Pool, received status code:", req.status_code)
return False
js = json.loads(req.text)
if self.__currency != js["currency"]:
print(common_utils.getTimeString(), "Nord Pool, received currency:", js["currency"])
return False
data = js["data"]
# use the time zone field in the response to get the proper timestamps
# NOTE: changes to hardcoded timezone since the website started giving wrong timezone on 2018-10-28
time_zone_info = 1
# time_zone_info = int(data["TimeZoneInformation"])
if isDSTTime(timestamp) or datetime.date.fromtimestamp(timestamp) in self.__from_dst_to_normal_days:
time_zone_info += 1
timezone = datetime.timezone(datetime.timedelta(hours=time_zone_info))
result_datetime_format = self.__config["result_datetime_format"]
update_time_str = timeToMicrosecondFormat(data["DateUpdated"])
update_date = datetime.datetime.strptime(update_time_str, result_datetime_format).replace(tzinfo=timezone)
update_timestamp = update_date.timestamp()
price_data = self.getPriceData(data["Rows"], timezone)
received_prices = [len(prices) for area, prices in price_data.items()]
if datetime.date.fromtimestamp(timestamp) in self.__from_dst_to_normal_days:
hour_count = 25
else:
hour_count = 24
if max(received_prices) != hour_count or min(received_prices) != hour_count:
print(common_utils.getTimeString(), " Nord Pool: ", max(received_prices), "/", hour_count,
" prices received.", sep="")
return False
self.sendDataToProcem(price_data)
self.__last_update = update_timestamp
self.__last_query_date = datetime.datetime.fromtimestamp(timestamp).replace(
hour=12, minute=0, second=0, microsecond=0)
return True
except Exception as error:
print(common_utils.getTimeString(), "Nord Pool:", error)
return False
def getWaitingTime(self):
"""Returns the time in seconds that should be waited before making the next data query."""
if self.__last_update is None or self.__last_query_date is None:
# no data received yet at all
return self.__time_interval_min / 2
elif self.__last_query_date.day == datetime.datetime.fromtimestamp(self.__last_update).day:
# last data query was for today, try to get tomorrows data as soon as possible
return self.__time_interval_min
else:
return max(
self.__time_interval_min,
self.__time_interval - (time.time() - self.__last_update) + self.__time_interval_min / 2)
def getPriceData(self, rows, timezone):
"""Parses the price data from the given response data."""
clock_changed = False
price_data = {}
for row in rows:
if row["IsExtraRow"]:
continue
result_datetime_format = self.__config["result_datetime_format"]
start_time_str = timeToMicrosecondFormat(row["StartTime"])
dt = datetime.datetime.strptime(start_time_str, result_datetime_format).replace(
tzinfo=timezone).astimezone(datetime.timezone.utc)
ts = int(dt.timestamp() * 1000)
if (dt.date() in self.__from_dst_to_normal_days and
not clock_changed and
dt.hour == self.__clock_change_hour_utc):
timezone = datetime.timezone(timezone.utcoffset(dt) - datetime.timedelta(hours=1))
clock_changed = True
for area in self.__areas:
if area not in price_data:
price_data[area] = []
column = [column for column in row["Columns"] if column["Name"] == area][0]
if not column["IsValid"] or not column["IsOfficial"] or column["Value"] == "-":
continue
value = float(column["Value"].replace(",", ".").replace(" ", ""))
price_data[area].append({
"v": value,
"ts": ts
})
return price_data
def sendDataToProcem(self, price_data):
"""Sends the price data to Procem RTL handler."""
for area, values in price_data.items():
rtl_id = self.__data_info[area]["rtl_id"]
unit = self.__data_info[area]["unit"]
datatype = self.__data_info[area]["datatype"]
name = self.__data_info[area]["name"]
path = self.__data_info[area]["path"]
confidential = self.__data_info[area]["confidential"]
for value in values:
v = value["v"]
ts = value["ts"]
pkt_str = common_utils.getProcemRTLpkt(name, path, v, ts, unit, datatype, rtl_id, confidential)
packet = bytes(pkt_str, "utf-8")
self.__data_queue.put(packet)
# put empty item to the queue as a mark that the buffer should be emptied
self.__data_queue.put(bytes())
|
StarcoderdataPython
|
8023548
|
<reponame>c-yan/atcoder<filename>abc/abc186/abc186d.py<gh_stars>1-10
N, *A = map(int, open(0).read().split())
A.sort()
s = sum(A)
result = 0
for i in range(N):
a = A[i]
s -= a
result += s - a * (N - i - 1)
print(result)
|
StarcoderdataPython
|
3338183
|
import itertools
from datetime import timedelta
from unittest.mock import Mock
from pfun import schedule, success
two_seconds = timedelta(seconds=2)
def test_spaced():
deltas = schedule.spaced(two_seconds).run(None)
assert list(itertools.islice(deltas, 3)) == [two_seconds] * 3
def test_exponential():
deltas = schedule.exponential(two_seconds).run(None)
assert list(itertools.islice(deltas, 3)) == [two_seconds,
two_seconds * 2,
two_seconds * 4]
def test_recurs():
deltas = schedule.recurs(3, schedule.spaced(two_seconds)).run(None)
assert list(deltas) == [two_seconds] * 3
def test_take_while():
deltas = schedule.take_while(
lambda delta: delta < timedelta(seconds=8),
schedule.exponential(two_seconds)
).run(None)
assert list(deltas) == [two_seconds, two_seconds * 2]
def test_until():
deltas = schedule.until(
timedelta(seconds=8),
schedule.exponential(two_seconds)
).run(None)
assert list(deltas) == [two_seconds, two_seconds * 2]
def test_jitter():
mock_random = Mock()
mock_random.random.return_value = success(.5)
modules = Mock()
modules.random = mock_random
deltas = schedule.jitter(schedule.spaced(two_seconds)).run(modules)
assert list(itertools.islice(deltas, 3)) == [timedelta(seconds=2.5)] * 3
|
StarcoderdataPython
|
3434299
|
def test_logger():
import tempfile
from tblogging import TBLogger
with tempfile.TemporaryDirectory() as logdir:
logger = TBLogger(logdir, "test")
logger.register_scalar("mean", "scalar")
logger.freeze()
logger.log(1, {"mean": 0.0})
logger.close()
|
StarcoderdataPython
|
3393366
|
import itertools
__author__ = 'danny'
#Human helper - cheats
NO_JUMP = -1
class Cpu(object):
def __init__(self, memory, pc):
"""Memory should be an array. Which is writable/readable is up to implementation.
pc should be an offset into that memory. Memory word size, a, b and pc should be the same."""
self.memory = memory
self.pc = pc
def run(self):
while True:
a, b, j = self.fetch()
self.execute(a, b, j)
yield
def fetch(self):
"""Fetch the instruction"""
return self.memory[self.pc:self.pc + 3]
def execute(self, a, b, jump_to):
"""Execute the instruction"""
if a < 0 or b < 0 or (jump_to != NO_JUMP and jump_to < 0):
raise StopIteration
self.memory[b] -= self.memory[a]
if jump_to != NO_JUMP and self.memory[b] <= 0:
self.pc = jump_to
else:
self.pc += 3
class Memory(object):
highest_mark =0
def __init__(self):
self.memory_sections = []
def add_section(self, address, data):
"""Add a section of addressable memory.
address - the location in the memory map,
data - an addressable object. Addresses used with that object will
be relative to the start of the object, not the absolute address.
It must implement len, and getitem. To be writeable, it must implement setitem too.
"""
if address < self.highest_mark:
raise RuntimeError("Sections must be added in order")
self.memory_sections.append((address, data))
self.highest_mark = address + len(data)
def _getsection(self, address):
"""Get a section for a an address.
Returns the section, and the new address relative to the section start"""
for section_start, section in self.memory_sections:
if address >= section_start:
if address < section_start + len(section):
real_addr = address - section_start
return section, real_addr
return None, None
def __getitem__(self, item):
"""Get item at item"""
if type(item) is slice:
return [self[i] for i in range(item.start, item.stop, item.step or 1)]
else:
section, real_addr = self._getsection(item)
if section:
return section[real_addr]
return 0
def __setitem__(self, key, value):
"""Set items if possible"""
section, real_addr = self._getsection(key)
if section:
try:
section[real_addr] = value
except TypeError:
pass
def __len__(self):
return self.highest_mark
def __iter__(self):
for n in range(self.highest_mark):
n = yield(self[n])
def CLW():
"""Clear the work register"""
return [
0, 0, NO_JUMP,
]
def MOV(in_addr, out_addr):
return CLW + [
out_addr, out_addr, NO_JUMP,
in_addr, 0, NO_JUMP,
0, out_addr, NO_JUMP,
]
def HALT():
return [
-1, -1, -1
]
def ADD(a, b):
"""Synthesize b += a for subleq"""
return CLW + [
a, 0, NO_JUMP,
0, b, NO_JUMP,
]
def JMP(j):
return [0, 0, j]
#def JIND(aj):
# """Jump indirect - load the value at aj,
# use as address to jump to"""
# output = CLW +
# #Zero the work register
# #Sub the address from it (negative)
# #Sub the address from the j of the jump instruction
# #Sub the jump instruction address from work area, and jump.
def hello():
#hello
registers_start = 0
registers = [0]
rom_start = len(registers)
rom= tuple([ord(c) for c in "Hello"])
# 128 bytes of output
ram_start = rom_start + len(rom)
ram=([0] * 6)
prog_start = ram_start + len(ram)
#Start code at 0xff
program = (
MOV(rom_start, ram_start) +
MOV(rom_start + 1, ram_start + 1) +
MOV(rom_start + 2, ram_start + 2) +
MOV(rom_start + 3, ram_start + 3) +
MOV(rom_start + 4, ram_start + 4) +
HALT()
)
mem = Memory()
mem.add_section(registers_start, registers)
mem.add_section(rom_start, rom)
mem.add_section(ram_start, ram)
mem.add_section(prog_start, program)
cpu = Cpu(mem, prog_start)
r = cpu.run()
_ = [n for n in r]
print [chr(n) for n in ram[0:5]]
def add12and13():
registers_start = 0
registers = [0]
rom_start = len(registers)
rom= (12, 13)
# 128 bytes of output
ram_start = rom_start + len(rom)
ram=[0]
prog_start = ram_start + len(ram)
program = (
MOV(rom_start, ram_start) +
ADD(rom_start + 1, ram_start) +
HALT()
)
mem = Memory()
mem.add_section(registers_start, registers)
mem.add_section(rom_start, rom)
mem.add_section(ram_start, ram)
mem.add_section(prog_start, program)
cpu = Cpu(mem, prog_start)
r = cpu.run()
try:
while True:
r.next()
except StopIteration:
pass
print "Result is ", mem[ram_start]
if __name__ == "__main__":
hello()
add12and13()
|
StarcoderdataPython
|
3553302
|
<reponame>JoshPattman/Spot-Puppy-Lib<filename>spotpuppy/rotation/mpu6050_rotation_sensor.py
from math import atan, sqrt, pow, radians, degrees
from . import rotation_sensor_base
IS_IMPORTED=False
class sensor(rotation_sensor_base.sensor):
def __init__(self, inverse_x=False, inverse_z=False, accelerometer_bias=0.05):
global IS_IMPORTED
if not IS_IMPORTED:
global mpu6050
from mpu6050 import mpu6050
IS_IMPORTED = True
rotation_sensor_base.sensor.__init__(self, inverse_x=inverse_x, inverse_z=inverse_z)
self.accelerometer_bias=accelerometer_bias
self.mpu = mpu6050(0x68)
self.rotation[0] = 0
self.rotation[1] = 0
self.dx = 0
self.dy = 0
self.ax = 0
self.ay = 0
self.last_update = time.time()
def update(self):
# Get gyro data
data = self.mpu.get_gyro_data()
# Find elapsed time
t = time.time()
elaplsed = t - self.last_update
self.last_update = t
# Add the rotation velocity * time
self.rotation[0] += (data['x'] - self.dx) * elaplsed
self.rotation[1] += (data['y'] - self.dy) * elaplsed
# Get accel angle
aang = self._get_acc_ang()
# Add accel angle into the actual angle (slowly introducing it to reduce noise, as it is only really used to stop gyro drift)
self.rotation[0] = (self.rotation[0] * (1 - self.accelerometer_bias)) + (aang[0] * self.accelerometer_bias)
self.rotation[1] = (self.rotation[1] * (1 - self.accelerometer_bias)) + (aang[1] * -self.accelerometer_bias)
def calibrate(self):
data1 = self.mpu.get_gyro_data()
time.sleep(0.5)
data2 = self.mpu.get_gyro_data()
time.sleep(0.5)
data3 = self.mpu.get_gyro_data()
self.dx = (data1['x'] + data2['x'] + data3['x']) / 3
self.dy = (data1['y'] + data2['y'] + data3['y']) / 3
self.ax = 0
self.ay = 0
adata = self._get_acc_ang()
self.ax = adata[0]
self.ay = adata[1]
self.rotation[0] = 0
self.rotation[1] = 0
def _get_acc_ang(self):
data = self.mpu.get_accel_data()
ax = data['y']
ay = data['x']
az = data['z']
xAngle = degrees(atan(ax / (sqrt(pow(ay, 2) + pow(az, 2)))))
yAngle = degrees(atan(ay / (sqrt(pow(ax, 2) + pow(az, 2)))))
zAngle = degrees(atan(sqrt(pow(ax, 2) + pow(ay, 2)) / az))
return [xAngle - self.ax, yAngle - self.ay]
|
StarcoderdataPython
|
313710
|
from __future__ import annotations
import abc
import string
from typing import TypeVar
class _EndStringFormatter(string.Formatter):
""" Custom string formatter to not throw errors when args or kwargs are missing."""
def get_value(self, key, args, kwargs):
if isinstance(key, int):
if len(args) > key:
return args[key]
else:
return '{{}}'
return kwargs[key] if key in kwargs else '{{{}}}'.format(key)
_SelfType = TypeVar('_SelfType', bound='IPrefixSuffix')
class IPrefixSuffix(abc.ABC):
""" Interface used for everything in this library.
This interface allows for prefixes and suffixes to be added to a class, and
to allow the formatting of those prefixes/suffixes without throwing errors
when an argument is missing from formatting.
The default prefix and suffix is an empty string.
The default kwargs for prefix and suffix are empty dicts.
Usage:
Assume x is an instance of a class that inherits from IPrefixSuffix.
# Set a prefix while optionally specifying replacement fields as kwargs
x.set_prefix('print before everything')
x.set_prefix('print before with formatting {arg}', arg='my kwarg')
x.set_prefix('print before with formatting {arg}')
# Set a suffix while optionally specifying replacement fields as kwargs
x.set_suffix('print after everything')
x.set_suffix('print after with formatting {arg}', arg='my kwarg')
x.set_suffix('print after with formatting {arg}')
# Set the replacement field values for prefix/suffix
x.set_prefix_replacement_fields({'arg': 'my kwarg'})
x.set_suffix_replacement_fields({'arg': 'my kwarg'})
# Get the prefix/suffix after formatting
x.formatted_prefix()
x.formatted_suffix()
# Get the prefix/suffix before formatting
x.get_prefix()
x.get_suffix()
# Get the current prefix/suffix replacement field mappings
x.get_prefix_replacement_fields()
x.get_suffix_replacement_fields()
"""
_FORMATTER = _EndStringFormatter()
def __init__(self):
self._prefix: str = ''
self._prefix_replacement_fields: dict = {}
self._suffix: str = ''
self._suffix_replacement_fields: dict = {}
def get_prefix(self) -> str:
""" Get the prefix without formatting."""
return self._prefix
def set_prefix(self: _SelfType, val: str, **kwargs) -> _SelfType:
""" Set the prefix while optionally specifying any replacement fields.
Keyword arguments:
val -- The new prefix
Optional kwargs:
Any replacement fields in the prefix
"""
self._prefix = val
if len(kwargs) > 0:
self._prefix_replacement_fields = kwargs
return self
def get_prefix_replacement_fields(self) -> dict:
""" Get the replacement field values for the prefix."""
return self._prefix_replacement_fields
def set_prefix_replacement_fields(self: _SelfType, val: dict) -> _SelfType:
""" Set the replacement field values for the prefix.
Keyword arguments:
val -- Dict mapping of replaecment field names to their values
"""
self._prefix_replacement_fields = val
return self
def get_suffix(self) -> str:
""" Get the suffix without any formatting."""
return self._suffix
def set_suffix(self: _SelfType, val: str, **kwargs) -> _SelfType:
""" Set the suffix while optionally specifying any replacement fields.
Keyword arugments:
val -- The new suffix
Optional kwargs:
Any replacement fields in the suffix
"""
self._suffix = val
if len(kwargs) > 0:
self._suffix_replacement_fields = kwargs
return self
def get_suffix_replacement_fields(self) -> dict:
""" Get the replacement field values for the suffix."""
return self._suffix_replacement_fields
def set_suffix_replacement_fields(self: _SelfType, val: dict) -> _SelfType:
""" Set the replacement field values for the prefix.
Keyword arguments:
val -- Dict mapping of replacement field names to their values
"""
self._suffix_replacement_fields = val
return self
def formatted_prefix(self) -> str:
""" Get the prefix after formatting using the prefix replacement field mapping."""
return self._custom_format(self._prefix, self._prefix_replacement_fields)
def formatted_suffix(self) -> str:
""" Get the suffix after formatting using the suffix replacement field mapping."""
return self._custom_format(self._suffix, self._suffix_replacement_fields)
def _custom_format(self, text: str, relevant_kwargs: dict = {}) -> str:
""" Format a string with the values in a dict.
The formatter does not throw errors, even if a replacement field exists
in the string that does not have a corresponding positional arg or kwarg
to replace it.
When one of these replacement fields is found, it is
ignored and left in the string as-is.
"""
return IPrefixSuffix._FORMATTER.format(text, **relevant_kwargs)
|
StarcoderdataPython
|
11251505
|
<reponame>dntoll/loraMesh
from simulator.FakePycomInterface import FakePycomInterface
from simulator.Radio import Radio
from simulator.SimTestView import SimTestView
from view.CompositeView import CompositeView
from simulator.SimulatorSocket import SimulatorSocket
from meshlibrary.PymeshAdapter import PymeshAdapter
from meshlibrary.Message import Message
from time import sleep
class SimTest:
def __init__(self, showOutput = False):
self.radio = Radio()
self.fpi = FakePycomInterface()
self.views = {}
self.clients = {}
self.showOutput = showOutput
def callBack(nodeID, MessageBytes):
print(MessageBytes)
def add(self, nodeId, x, y):
socket = SimulatorSocket(nodeId, x, y, 1.1)
self.radio.add(nodeId, socket)
self.views[nodeId] = SimTestView(nodeId)
self.clients[nodeId] = PymeshAdapter(self.views[nodeId], socket, self.fpi, SimTest.callBack)
def disableRadio(self, nodeId):
self.radio.disableRadio(nodeId)
def clearMessages(self, nodeId):
self.views[nodeId].clearMessages()
def send(self, fromNodeID, to, message):
self.clients[fromNodeID].sendMessage(to, message)
def endSim(self):
self.fpi.die()
def processUntilSilent(self, secondsOfSilence):
self.radio.processUntilSilent(secondsOfSilence)
print("process until silent")
def assertHasMessage(self, nodeID, messageType):
if messageType == Message.TYPE_ACC:
t = "acc"
elif messageType == Message.TYPE_FIND:
t = "find"
else:
t = "message"
hasMess = self.views[nodeID].hasMessage(messageType)
assert hasMess, "No message on node " + str(nodeID) + " of type " + t
def assertHasNoMessage(self, nodeID, messageType):
if messageType == Message.TYPE_ACC:
t = "acc"
elif messageType == Message.TYPE_FIND:
t = "find"
else:
t = "message"
hasMess = self.views[nodeID].hasMessage(messageType)
assert not hasMess, "No message on node " + str(nodeID) + " of type " + t
|
StarcoderdataPython
|
9765893
|
<reponame>tomichec/covid-cz-regions
import json
def main():
dates = ["2020-03-25@21-45",
"2020-03-26@12-45",
"2020-03-26@18-04",
"2020-03-27@09-49",
"2020-03-27@18-09",
"2020-03-28@09-44",
"2020-03-29@18-28",
"2020-03-30@09-27",
"2020-03-31@10-06",
"2020-03-31@19-18",
"2020-04-01@17-51",
"2020-04-02@19-49",
"2020-04-03@10-38",
"2020-04-04@09-15",
"2020-04-06@21-40",
"2020-04-08@21-04",
"2020-04-09@12-21",]
# get all time data into array
all_time = {}
for date in dates:
filename = 'covid_' + date + '.json'
with open(filename) as f:
daily = json.load(f)
key = ("%s" % date)
all_time[key] = daily
# transpose the data into desired format
transposed = []
for region_name in all_time["2020-04-09@12-21"].keys():
data = []
for date in dates:
for key in all_time[date].keys():
if key == region_name:
data.append({
'date': date,
'value': all_time[date][key]
})
transposed.append({
"name": region_name,
"data": data
})
print(json.dumps(transposed,indent=4, sort_keys=True, ensure_ascii=False))
if (__name__ == "__main__"):
main()
|
StarcoderdataPython
|
4878743
|
import ast
import logging
from .environment import Robot
import numpy as np
import itertools
import matplotlib
import matplotlib.style
import pandas as pd
import sys
from collections import defaultdict
from . import plotting_r as plotting
import json
matplotlib.style.use('ggplot')
SPEED = 0.7
logging.basicConfig(filename='reinforcement-learning.log', filemode='w', level=logging.DEBUG)
def createEpsilonGreedyPolicy(Q, epsilon, num_actions):
"""
Creates an epsilon-greedy policy based
on a given Q-function and epsilon.
Returns a function that takes the state
as an input and returns the probabilities
for each action in the form of a numpy array
of length of the action space(set of possible actions).
"""
def policyFunction(state):
action_probabilities = np.ones(num_actions,dtype=float) * epsilon / num_actions
best_action = np.argmax(Q[state])
action_probabilities[best_action] += (1.0 - epsilon)
return action_probabilities
return policyFunction
def qLearning(env, num_episodes, discount_factor=1, alpha=0.01, epsilon=0.1):
"""
Q-Learning algorithm: Off-policy TD control.
Finds the optimal greedy policy while improving
following an epsilon-greedy policy
"""
# Action value function
# A nested dictionary that maps
# state -> (action -> action-value).
# import pdb; pdb.set_trace()
Q = defaultdict(lambda: np.zeros(3))
with open('model.txt', 'r') as f:
json_file = json.load(f)
old_dict = {ast.literal_eval(k): np.array(ast.literal_eval(v)) for k, v in json_file.items()}
for v, k in old_dict.items():
Q[v][0] = k[0]
Q[v][1] = k[1]
Q[v][2] = k[2]
# Keeps track of useful statistics
stats = plotting.EpisodeStats(
episode_lengths = np.zeros(num_episodes),
episode_rewards = np.zeros(num_episodes)
)
# Create an epsilon greedy policy function
# appropriately for environment action space
policy = createEpsilonGreedyPolicy(Q, epsilon, 3)
# For every episode
for ith_episode in range(num_episodes):
# Reset the environment and pick the first action
state = env.reset_sim()
state = tuple(state['proxy_sensor'][0])
logging.debug('Ith_Episode: {}'.format(ith_episode))
for t in itertools.count():
logging.debug('\tt_episode: {}'.format(t))
# get probabilities of all actions from current state
action_probabilities = policy(state)
logging.debug('\t\taction_probabilities: {}'.format(action_probabilities))
# choose action according to
# the probability distribution
action = np.random.choice(np.arange(len(action_probabilities)), p=action_probabilities)
logging.debug("\t\tActions: {}".format(action))
if action == 0:
action_env = [1.5, 1.5]
elif action == 1:
action_env = [0.5, 1.5]
elif action == 2:
action_env = [1.5, 0.5]
else:
raise Exception("Invalid action!")
# take action and get reward, transit to next state
next_state, reward, done = env.step(action_env)
next_state = tuple(next_state['proxy_sensor'][0])
reward = reward['proxy_sensor']
logging.debug("\t\tReward: {}".format(reward))
# Update statistics
stats.episode_rewards[ith_episode] += reward
stats.episode_lengths[ith_episode] = t
# TD Update
best_next_action = np.argmax(Q[next_state])
logging.debug("\t\tBest Next Action: {}".format(best_next_action))
td_target = reward + discount_factor * Q[next_state][best_next_action]
logging.debug("\t\tTD Target: {}".format(td_target))
td_delta = td_target - Q[state][action]
logging.debug("\t\tTD Delta: {}".format(td_delta))
Q[state][action] += alpha * td_delta
# done is True if episode terminated
if done:
break
state = next_state
epsilon -= 0.001
env.destroy_sim()
return Q, stats
if __name__ == '__main__':
env = Robot()
Q, stats = qLearning(env, 500)
try:
# save the learned model
with open('model.txt', 'w') as f:
json.dump({str(k): str(tuple(v)) for k, v in Q.items()}, f)
except:
import pdb; pdb.set_trace()
plotting.plot_episode_stats(stats)
|
StarcoderdataPython
|
6524767
|
<gh_stars>1-10
from unittest import TestCase
from .helpers.parser import ParserTesterMixin
from jaqalpaq.parser.extract_let import extract_let
from jaqalpaq.parser.tree import make_lark_parser
from jaqalpaq.parser.identifier import Identifier
class ExtractLetTester(ParserTesterMixin, TestCase):
def test_extract_integer(self):
text = "let a 5"
exp_value = {Identifier("a"): 5}
self.run_test(text, exp_value)
def test_extract_negative_integer(self):
text = "let a -5"
exp_value = {Identifier("a"): -5}
self.run_test(text, exp_value)
def test_extract_float(self):
text = "let a 5.5"
exp_value = {Identifier("a"): 5.5}
self.run_test(text, exp_value)
def test_extract_negative_float(self):
text = "let a -5.5"
exp_value = {Identifier("a"): -5.5}
self.run_test(text, exp_value)
def test_extract_duplicate(self):
with self.assertRaises(Exception):
text = "let a 5; let a 6"
parser = make_lark_parser()
extract_let(parser.parse(text))
def run_test(self, text, exp_value):
parser = make_lark_parser()
tree = parser.parse(text)
act_value = extract_let(tree, use_float=True)
self.assertEqual(exp_value, act_value)
|
StarcoderdataPython
|
4915943
|
import re
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
with open("jsview/__init__.py") as fh:
version = re.search(r'^__version__\s*=\s*"(.*)"', fh.read(), re.M).group(1)
setuptools.setup(
name="jsview",
version=version,
author="<NAME>",
author_email="<EMAIL>",
description="A smarter JSON indenter",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/fab13n/jsview",
packages=setuptools.find_packages(),
entry_points={
"console_scripts": ['jsview = jsview:main']
},
classifiers=(
"Programming Language :: Python :: 2",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
),
)
|
StarcoderdataPython
|
95496
|
<reponame>zhongtianxie/fm-orchestrator
# -*- coding: utf-8 -*-
# SPDX-License-Identifier: MIT
from __future__ import absolute_import
from mock import patch, PropertyMock
import pytest
from module_build_service.common import models
from module_build_service.common.modulemd import Modulemd
from module_build_service.common.utils import load_mmd
from module_build_service.common.resolve import get_base_module_mmds
from module_build_service.scheduler.db_session import db_session
from tests import clean_database, make_module_in_db, init_data, read_staged_data
class TestResolve:
def setup_method(self, test_method):
clean_database(False)
def teardown_method(self, test_method):
clean_database()
def test__get_base_module_mmds(self):
"""Ensure the correct results are returned without duplicates."""
init_data(data_size=1, multiple_stream_versions=True)
mmd = load_mmd(read_staged_data("testmodule_v2.yaml"))
deps = mmd.get_dependencies()[0]
new_deps = Modulemd.Dependencies()
for stream in deps.get_runtime_streams("platform"):
new_deps.add_runtime_stream("platform", stream)
new_deps.add_buildtime_stream("platform", "f29.1.0")
new_deps.add_buildtime_stream("platform", "f29.2.0")
mmd.remove_dependencies(deps)
mmd.add_dependencies(new_deps)
mmds = get_base_module_mmds(db_session, mmd)
expected = {"platform:f29.0.0", "platform:f29.1.0", "platform:f29.2.0"}
# Verify no duplicates were returned before doing set operations
assert len(mmds["ready"]) == len(expected)
# Verify the expected ones were returned
actual = set()
for mmd_ in mmds["ready"]:
actual.add("{}:{}".format(mmd_.get_module_name(), mmd_.get_stream_name()))
assert actual == expected
@pytest.mark.parametrize("virtual_streams", (None, ["f29"], ["lp29"]))
def test__get_base_module_mmds_virtual_streams(self, virtual_streams):
"""Ensure the correct results are returned without duplicates."""
init_data(data_size=1, multiple_stream_versions=True)
mmd = load_mmd(read_staged_data("testmodule_v2"))
deps = mmd.get_dependencies()[0]
new_deps = Modulemd.Dependencies()
for stream in deps.get_runtime_streams("platform"):
new_deps.add_runtime_stream("platform", stream)
new_deps.add_buildtime_stream("platform", "f29.2.0")
mmd.remove_dependencies(deps)
mmd.add_dependencies(new_deps)
make_module_in_db("platform:lp29.1.1:12:c11", virtual_streams=virtual_streams)
mmds = get_base_module_mmds(db_session, mmd)
if virtual_streams == ["f29"]:
expected = {
"platform:f29.0.0",
"platform:f29.1.0",
"platform:f29.2.0",
"platform:lp29.1.1"
}
else:
expected = {"platform:f29.0.0", "platform:f29.1.0", "platform:f29.2.0"}
# Verify no duplicates were returned before doing set operations
assert len(mmds["ready"]) == len(expected)
# Verify the expected ones were returned
actual = set()
for mmd_ in mmds["ready"]:
actual.add("{}:{}".format(mmd_.get_module_name(), mmd_.get_stream_name()))
assert actual == expected
@patch(
"module_build_service.common.config.Config.allow_only_compatible_base_modules",
new_callable=PropertyMock, return_value=False
)
def test__get_base_module_mmds_virtual_streams_only_major_versions(self, cfg):
"""Ensure the correct results are returned without duplicates."""
init_data(data_size=1, multiple_stream_versions=["foo28", "foo29", "foo30"])
# Mark platform:foo28 as garbage to test that it is still considered as compatible.
platform = db_session.query(models.ModuleBuild).filter_by(
name="platform", stream="foo28").first()
platform.state = "garbage"
db_session.add(platform)
db_session.commit()
mmd = load_mmd(read_staged_data("testmodule_v2"))
deps = mmd.get_dependencies()[0]
new_deps = Modulemd.Dependencies()
for stream in deps.get_runtime_streams("platform"):
new_deps.add_runtime_stream("platform", stream)
new_deps.add_buildtime_stream("platform", "foo29")
mmd.remove_dependencies(deps)
mmd.add_dependencies(new_deps)
mmds = get_base_module_mmds(db_session, mmd)
expected = {}
expected["ready"] = {"platform:foo29", "platform:foo30"}
expected["garbage"] = {"platform:foo28"}
# Verify no duplicates were returned before doing set operations
assert len(mmds) == len(expected)
for k in expected.keys():
assert len(mmds[k]) == len(expected[k])
# Verify the expected ones were returned
actual = set()
for mmd_ in mmds[k]:
actual.add("{}:{}".format(mmd_.get_module_name(), mmd_.get_stream_name()))
assert actual == expected[k]
|
StarcoderdataPython
|
9706738
|
"""Simple periodic timer"""
from threading import Timer
from typing import Callable, Optional
class PeriodicTimer:
"""Simple periodic timer"""
# Note: callback is not optional but mypy has a bug:
# https://github.com/python/mypy/issues/708
_callback: Optional[Callable[[], None]]
_period: float
_timer: Optional[Timer]
def __init__(self, period: float, callback: Callable[[], None]) -> None:
self._callback = callback
self._period = period
self._timer = None
def start(self) -> None:
"""Starts the periodic execution of the callback function"""
self._tick()
def _tick(self) -> None:
# Execute the callback
assert self._callback is not None
self._callback()
# Enqueue to run again
self._timer = Timer(self._period, self._tick)
self._timer.start()
def stop(self) -> None:
"""Cancel the timers"""
if self._timer is not None:
self._timer.cancel()
|
StarcoderdataPython
|
3227449
|
<filename>src/atcoder/abc226/a/sol_0.py
import typing
def main() -> typing.NoReturn:
a, b = input().split('.')
print(int(a) + (int(b[0]) >= 5))
main()
|
StarcoderdataPython
|
8103473
|
<reponame>joeyzhou85/python<gh_stars>1000+
"""
LCS Problem Statement: Given two sequences, find the length of longest subsequence present in both of them.
A subsequence is a sequence that appears in the same relative order, but not necessarily continious.
Example:"abc", "abg" are subsequences of "abcdefgh".
"""
from __future__ import print_function
try:
xrange # Python 2
except NameError:
xrange = range # Python 3
def lcs_dp(x, y):
# find the length of strings
m = len(x)
n = len(y)
# declaring the array for storing the dp values
L = [[None] * (n + 1) for i in xrange(m + 1)]
seq = []
for i in range(m + 1):
for j in range(n + 1):
if i == 0 or j == 0:
L[i][j] = 0
elif x[i - 1] == y[ j - 1]:
L[i][j] = L[i - 1][j - 1] + 1
seq.append(x[i -1])
else:
L[i][j] = max(L[i - 1][j], L[i][j - 1])
# L[m][n] contains the length of LCS of X[0..n-1] & Y[0..m-1]
return L[m][n], seq
if __name__=='__main__':
x = 'AGGTAB'
y = 'GXTXAYB'
print(lcs_dp(x, y))
|
StarcoderdataPython
|
8041458
|
begin_unit
comment|'# Copyright (c) 2014 Hewlett-Packard Development Company, L.P.'
nl|'\n'
comment|'# All Rights Reserved.'
nl|'\n'
comment|'#'
nl|'\n'
comment|'# Licensed under the Apache License, Version 2.0 (the "License"); you may'
nl|'\n'
comment|'# not use this file except in compliance with the License. You may obtain'
nl|'\n'
comment|'# a copy of the License at'
nl|'\n'
comment|'#'
nl|'\n'
comment|'# http://www.apache.org/licenses/LICENSE-2.0'
nl|'\n'
comment|'#'
nl|'\n'
comment|'# Unless required by applicable law or agreed to in writing, software'
nl|'\n'
comment|'# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT'
nl|'\n'
comment|'# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the'
nl|'\n'
comment|'# License for the specific language governing permissions and limitations'
nl|'\n'
comment|'# under the License.'
nl|'\n'
nl|'\n'
name|'import'
name|'copy'
newline|'\n'
nl|'\n'
name|'from'
name|'oslo_serialization'
name|'import'
name|'jsonutils'
newline|'\n'
name|'from'
name|'oslo_utils'
name|'import'
name|'versionutils'
newline|'\n'
name|'import'
name|'six'
newline|'\n'
nl|'\n'
name|'from'
name|'nova'
name|'import'
name|'objects'
newline|'\n'
name|'from'
name|'nova'
op|'.'
name|'objects'
name|'import'
name|'base'
newline|'\n'
name|'from'
name|'nova'
op|'.'
name|'objects'
name|'import'
name|'fields'
newline|'\n'
nl|'\n'
nl|'\n'
op|'@'
name|'base'
op|'.'
name|'NovaObjectRegistry'
op|'.'
name|'register'
newline|'\n'
DECL|class|PciDevicePool
name|'class'
name|'PciDevicePool'
op|'('
name|'base'
op|'.'
name|'NovaObject'
op|')'
op|':'
newline|'\n'
comment|'# Version 1.0: Initial version'
nl|'\n'
comment|'# Version 1.1: Added numa_node field'
nl|'\n'
DECL|variable|VERSION
indent|' '
name|'VERSION'
op|'='
string|"'1.1'"
newline|'\n'
nl|'\n'
DECL|variable|fields
name|'fields'
op|'='
op|'{'
nl|'\n'
string|"'product_id'"
op|':'
name|'fields'
op|'.'
name|'StringField'
op|'('
op|')'
op|','
nl|'\n'
string|"'vendor_id'"
op|':'
name|'fields'
op|'.'
name|'StringField'
op|'('
op|')'
op|','
nl|'\n'
string|"'numa_node'"
op|':'
name|'fields'
op|'.'
name|'IntegerField'
op|'('
name|'nullable'
op|'='
name|'True'
op|')'
op|','
nl|'\n'
string|"'tags'"
op|':'
name|'fields'
op|'.'
name|'DictOfNullableStringsField'
op|'('
op|')'
op|','
nl|'\n'
string|"'count'"
op|':'
name|'fields'
op|'.'
name|'IntegerField'
op|'('
op|')'
op|','
nl|'\n'
op|'}'
newline|'\n'
nl|'\n'
DECL|member|obj_make_compatible
name|'def'
name|'obj_make_compatible'
op|'('
name|'self'
op|','
name|'primitive'
op|','
name|'target_version'
op|')'
op|':'
newline|'\n'
indent|' '
name|'target_version'
op|'='
name|'versionutils'
op|'.'
name|'convert_version_to_tuple'
op|'('
name|'target_version'
op|')'
newline|'\n'
name|'if'
name|'target_version'
op|'<'
op|'('
number|'1'
op|','
number|'1'
op|')'
name|'and'
string|"'numa_node'"
name|'in'
name|'primitive'
op|':'
newline|'\n'
indent|' '
name|'del'
name|'primitive'
op|'['
string|"'numa_node'"
op|']'
newline|'\n'
nl|'\n'
comment|'# NOTE(pmurray): before this object existed the pci device pool data was'
nl|'\n'
comment|'# stored as a dict. For backward compatibility we need to be able to read'
nl|'\n'
comment|'# it in from a dict'
nl|'\n'
dedent|''
dedent|''
op|'@'
name|'classmethod'
newline|'\n'
DECL|member|from_dict
name|'def'
name|'from_dict'
op|'('
name|'cls'
op|','
name|'value'
op|')'
op|':'
newline|'\n'
indent|' '
name|'pool_dict'
op|'='
name|'copy'
op|'.'
name|'copy'
op|'('
name|'value'
op|')'
newline|'\n'
name|'pool'
op|'='
name|'cls'
op|'('
op|')'
newline|'\n'
name|'pool'
op|'.'
name|'vendor_id'
op|'='
name|'pool_dict'
op|'.'
name|'pop'
op|'('
string|'"vendor_id"'
op|')'
newline|'\n'
name|'pool'
op|'.'
name|'product_id'
op|'='
name|'pool_dict'
op|'.'
name|'pop'
op|'('
string|'"product_id"'
op|')'
newline|'\n'
name|'pool'
op|'.'
name|'numa_node'
op|'='
name|'pool_dict'
op|'.'
name|'pop'
op|'('
string|'"numa_node"'
op|','
name|'None'
op|')'
newline|'\n'
name|'pool'
op|'.'
name|'count'
op|'='
name|'pool_dict'
op|'.'
name|'pop'
op|'('
string|'"count"'
op|')'
newline|'\n'
name|'pool'
op|'.'
name|'tags'
op|'='
name|'pool_dict'
newline|'\n'
name|'return'
name|'pool'
newline|'\n'
nl|'\n'
comment|'# NOTE(sbauza): Before using objects, pci stats was a list of'
nl|'\n'
comment|"# dictionaries not having tags. For compatibility with other modules, let's"
nl|'\n'
comment|'# create a reversible method'
nl|'\n'
DECL|member|to_dict
dedent|''
name|'def'
name|'to_dict'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'pci_pool'
op|'='
name|'base'
op|'.'
name|'obj_to_primitive'
op|'('
name|'self'
op|')'
newline|'\n'
name|'tags'
op|'='
name|'pci_pool'
op|'.'
name|'pop'
op|'('
string|"'tags'"
op|','
op|'{'
op|'}'
op|')'
newline|'\n'
name|'for'
name|'k'
op|','
name|'v'
name|'in'
name|'six'
op|'.'
name|'iteritems'
op|'('
name|'tags'
op|')'
op|':'
newline|'\n'
indent|' '
name|'pci_pool'
op|'['
name|'k'
op|']'
op|'='
name|'v'
newline|'\n'
dedent|''
name|'return'
name|'pci_pool'
newline|'\n'
nl|'\n'
nl|'\n'
dedent|''
dedent|''
op|'@'
name|'base'
op|'.'
name|'NovaObjectRegistry'
op|'.'
name|'register'
newline|'\n'
DECL|class|PciDevicePoolList
name|'class'
name|'PciDevicePoolList'
op|'('
name|'base'
op|'.'
name|'ObjectListBase'
op|','
name|'base'
op|'.'
name|'NovaObject'
op|')'
op|':'
newline|'\n'
comment|'# Version 1.0: Initial version'
nl|'\n'
comment|'# PciDevicePool <= 1.0'
nl|'\n'
comment|'# Version 1.1: PciDevicePool version 1.1'
nl|'\n'
DECL|variable|VERSION
indent|' '
name|'VERSION'
op|'='
string|"'1.1'"
newline|'\n'
DECL|variable|fields
name|'fields'
op|'='
op|'{'
nl|'\n'
string|"'objects'"
op|':'
name|'fields'
op|'.'
name|'ListOfObjectsField'
op|'('
string|"'PciDevicePool'"
op|')'
op|','
nl|'\n'
op|'}'
newline|'\n'
nl|'\n'
nl|'\n'
DECL|function|from_pci_stats
dedent|''
name|'def'
name|'from_pci_stats'
op|'('
name|'pci_stats'
op|')'
op|':'
newline|'\n'
indent|' '
string|'"""Create and return a PciDevicePoolList from the data stored in the db,\n which can be either the serialized object, or, prior to the creation of the\n device pool objects, a simple dict or a list of such dicts.\n """'
newline|'\n'
name|'pools'
op|'='
op|'['
op|']'
newline|'\n'
name|'if'
name|'isinstance'
op|'('
name|'pci_stats'
op|','
name|'six'
op|'.'
name|'string_types'
op|')'
op|':'
newline|'\n'
indent|' '
name|'try'
op|':'
newline|'\n'
indent|' '
name|'pci_stats'
op|'='
name|'jsonutils'
op|'.'
name|'loads'
op|'('
name|'pci_stats'
op|')'
newline|'\n'
dedent|''
name|'except'
op|'('
name|'ValueError'
op|','
name|'TypeError'
op|')'
op|':'
newline|'\n'
indent|' '
name|'pci_stats'
op|'='
name|'None'
newline|'\n'
dedent|''
dedent|''
name|'if'
name|'pci_stats'
op|':'
newline|'\n'
comment|'# Check for object-ness, or old-style storage format.'
nl|'\n'
indent|' '
name|'if'
string|"'nova_object.namespace'"
name|'in'
name|'pci_stats'
op|':'
newline|'\n'
indent|' '
name|'return'
name|'objects'
op|'.'
name|'PciDevicePoolList'
op|'.'
name|'obj_from_primitive'
op|'('
name|'pci_stats'
op|')'
newline|'\n'
dedent|''
name|'else'
op|':'
newline|'\n'
comment|'# This can be either a dict or a list of dicts'
nl|'\n'
indent|' '
name|'if'
name|'isinstance'
op|'('
name|'pci_stats'
op|','
name|'list'
op|')'
op|':'
newline|'\n'
indent|' '
name|'pools'
op|'='
op|'['
name|'objects'
op|'.'
name|'PciDevicePool'
op|'.'
name|'from_dict'
op|'('
name|'stat'
op|')'
nl|'\n'
name|'for'
name|'stat'
name|'in'
name|'pci_stats'
op|']'
newline|'\n'
dedent|''
name|'else'
op|':'
newline|'\n'
indent|' '
name|'pools'
op|'='
op|'['
name|'objects'
op|'.'
name|'PciDevicePool'
op|'.'
name|'from_dict'
op|'('
name|'pci_stats'
op|')'
op|']'
newline|'\n'
dedent|''
dedent|''
dedent|''
name|'return'
name|'objects'
op|'.'
name|'PciDevicePoolList'
op|'('
name|'objects'
op|'='
name|'pools'
op|')'
newline|'\n'
dedent|''
endmarker|''
end_unit
|
StarcoderdataPython
|
9685754
|
import unittest
import os
import time
from flow.core.experiment import Experiment
from flow.core.params import VehicleParams
from flow.controllers import IDMController, RLController, ContinuousRouter
from flow.core.params import SumoCarFollowingParams
from flow.core.params import SumoParams
from flow.core.params import EnvParams, InitialConfig, NetParams
from flow.core.params import TrafficLightParams
from flow.envs import AccelEnv
from flow.networks import RingNetwork
from tests.setup_scripts import ring_road_exp_setup
import numpy as np
os.environ["TEST_FLAG"] = "True"
class TestNumSteps(unittest.TestCase):
"""
Tests that experiment class runs for the number of steps requested.
"""
def setUp(self):
# create the environment and network classes for a ring road
env, _, flow_params = ring_road_exp_setup()
flow_params['sim'].render = False
flow_params['env'].horizon = 10
# instantiate an experiment class
self.exp = Experiment(flow_params)
self.exp.env = env
def tearDown(self):
# free up used memory
self.exp = None
def test_steps(self):
self.exp.run(num_runs=1)
self.assertEqual(self.exp.env.time_counter, 10)
class TestNumRuns(unittest.TestCase):
"""
Tests that the experiment class properly resets as many times as requested,
after the correct number of iterations.
"""
def test_num_runs(self):
# run the experiment for 1 run and collect the last position of all
# vehicles
env, _, flow_params = ring_road_exp_setup()
flow_params['sim'].render = False
flow_params['env'].horizon = 10
exp = Experiment(flow_params)
exp.env = env
exp.run(num_runs=1)
vel1 = [exp.env.k.vehicle.get_speed(exp.env.k.vehicle.get_ids())]
# run the experiment for 2 runs and collect the last position of all
# vehicles
env, _, flow_params = ring_road_exp_setup()
flow_params['sim'].render = False
flow_params['env'].horizon = 10
exp = Experiment(flow_params)
exp.env = env
exp.run(num_runs=2)
vel2 = [exp.env.k.vehicle.get_speed(exp.env.k.vehicle.get_ids())]
# check that the final position is the same in both instances
np.testing.assert_array_almost_equal(vel1, vel2)
class TestRLActions(unittest.TestCase):
"""
Test that the rl_actions parameter acts as it should when it is specified,
and does not break the simulation when it is left blank.
"""
def test_rl_actions(self):
def rl_actions(*_):
return [1] # actions are always an acceleration of 1 for one veh
# create an environment using AccelEnv with 1 RL vehicle
vehicles = VehicleParams()
vehicles.add(
veh_id="rl",
acceleration_controller=(RLController, {}),
routing_controller=(ContinuousRouter, {}),
car_following_params=SumoCarFollowingParams(
speed_mode="aggressive",
),
num_vehicles=1)
env, _, flow_params = ring_road_exp_setup(vehicles=vehicles)
flow_params['sim'].render = False
flow_params['env'].horizon = 10
exp = Experiment(flow_params)
exp.env = env
exp.run(1, rl_actions=rl_actions)
# check that the acceleration of the RL vehicle was that specified by
# the rl_actions method
self.assertAlmostEqual(exp.env.k.vehicle.get_speed("rl_0"), 1,
places=1)
class TestConvertToCSV(unittest.TestCase):
"""
Tests that the emission files are converted to csv's if the parameter
is requested.
"""
def test_convert_to_csv(self):
dir_path = os.path.dirname(os.path.realpath(__file__))
sim_params = SumoParams(emission_path="{}/".format(dir_path))
vehicles = VehicleParams()
vehicles.add(
veh_id="idm",
acceleration_controller=(IDMController, {}),
routing_controller=(ContinuousRouter, {}),
car_following_params=SumoCarFollowingParams(
speed_mode="aggressive",
),
num_vehicles=1)
additional_env_params = {
"target_velocity": 8,
"max_accel": 1,
"max_decel": 1,
"sort_vehicles": False,
}
env_params = EnvParams(
horizon=10,
additional_params=additional_env_params)
additional_net_params = {
"length": 230,
"lanes": 1,
"speed_limit": 30,
"resolution": 40
}
net_params = NetParams(additional_params=additional_net_params)
flow_params = dict(
exp_tag="RingRoadTest",
env_name=AccelEnv,
network=RingNetwork,
simulator='traci',
sim=sim_params,
env=env_params,
net=net_params,
veh=vehicles,
initial=InitialConfig(lanes_distribution=1),
tls=TrafficLightParams(),
)
exp = Experiment(flow_params)
exp.run(num_runs=1, convert_to_csv=True)
time.sleep(1.0)
# check that both the csv file exists and the xml file doesn't.
self.assertFalse(os.path.isfile(dir_path + "/{}-emission.xml".format(
exp.env.network.name)))
self.assertTrue(os.path.isfile(dir_path + "/{}-emission.csv".format(
exp.env.network.name)))
time.sleep(0.1)
# delete the files
os.remove(os.path.expanduser(dir_path + "/{}-emission.csv".format(
exp.env.network.name)))
if __name__ == '__main__':
unittest.main()
|
StarcoderdataPython
|
11242155
|
<filename>tests/test_list.py
from django.test import TestCase
from rest_assured.testcases import ListAPITestCaseMixin
from tests import mocks
class TestListTestCase(TestCase):
def get_case(self, **kwargs):
class MockListTestCase(ListAPITestCaseMixin, mocks.MockTestCase):
base_name = 'stuff'
factory_class = mocks.StuffFactory
self.case_class = MockListTestCase
return MockListTestCase(**kwargs)
def test_get_list_url(self):
instance = self.get_case(methodName='dummy')
instance.setUp()
assert instance.get_list_url() == '/stuff/'
def test_get_list_response(self):
instance = self.get_case(methodName='dummy')
instance.setUp()
response = instance.get_list_response()
assert response
assert response.status_code == 200
assert response.data
def test_test_list(self):
instance = self.get_case(methodName='dummy')
instance.setUp()
response = instance.test_list()
assert response
|
StarcoderdataPython
|
6657368
|
# ------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License (MIT). See LICENSE in the repo root for license information.
# ------------------------------------------------------------------------------------------
import numpy as np
from InnerEyeDataQuality.selection.selectors.label_based import SampleSelector
class RandomSelector(SampleSelector):
"""
Selects samples at random
"""
def __init__(self, num_samples: int, num_classes: int, name: str = "Random Selector") -> None:
super().__init__(num_samples=num_samples, num_classes=num_classes, name=name)
def get_relabelling_scores(self, current_labels: np.ndarray) -> np.ndarray:
return np.random.choice(self.num_samples, self.num_samples, replace=False)
def get_ambiguity_scores(self, current_labels: np.ndarray) -> np.ndarray:
raise NotImplementedError
def get_mislabelled_scores(self, current_labels: np.ndarray) -> np.ndarray:
raise NotImplementedError
|
StarcoderdataPython
|
8072497
|
<gh_stars>1-10
# adata scripts/mqtt_subscription
'''
Simple example MQTT
'''
from adata import echo, Module
from adata.mqtt import Broker
class Define(Module):
name = "mqtt_channels"
menu = "Service"
def task(self):
broker = ScanTopics("test.mosquitto.org", 1883)
broker.app = self.app
broker.subscribe("adata/#")
def menuitem(self):
return {
'name': "MQTT connection example",
}
class ScanTopics(Broker):
def on_message(self, client, userdata, msg):
try:
echo("%s: " % msg.topic, "FFFF00", lf=False)
echo("%s" % msg.payload)
#print("%s %s" % (msg.topic, repr()))
except Exception as e:
echo("Error: %s" % e)
def publish(self, topic, payload):
def on_connect(client, userdata, flags, rc):
if rc!=0:
print("Error %s" % rc)
sys.exit(rc)
client.publish(topic, payload=payload)
self.sent = True
self.sent = False
self.connect(on_connect)
while not self.sent: self.client.loop()
self.client.disconnect()
|
StarcoderdataPython
|
5158267
|
<gh_stars>1-10
# Copyright 2017 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------------
import unittest
import subprocess
import shlex
import time
import logging
from sawtooth_intkey.intkey_message_factory import IntkeyMessageFactory
from sawtooth_integration.tests.integration_tools import wait_for_rest_apis
from sawtooth_cli.rest_client import RestClient
LOGGER = logging.getLogger(__name__)
LOGGER.setLevel(logging.DEBUG)
# This needs to be coordinated with the test's yaml file.
VALIDATOR_COUNT = 3
# Wait times and batch count can be increased for more rigor.
INITIAL_WAIT_TIME = 30
TIME_BETWEEN_BATCHES = 1
CATCH_UP_TIME = 30
TRY_AGAIN_TIME = 60
BATCH_COUNT = 20
class TestPoetSmoke(unittest.TestCase):
def setUp(self):
endpoints = ['rest-api-{}:8080'.format(i)
for i in range(VALIDATOR_COUNT)]
wait_for_rest_apis(endpoints, tries=10)
self.clients = [IntkeyClient('http://' + endpoint)
for endpoint in endpoints]
def test_poet_smoke(self):
'''
$VALIDATOR_COUNT validators are started, each with config,
intkey, and validator registry transaction processors. After
waiting for the validators to register, do the following:
1) Send a batch of intkey 'set' transactions to one validator.
2) Send one batch of intkey 'inc' transactions to each validator
one after the other $BATCH_COUNT times.
3) Loop through the validators, sending each one $BATCH_COUNT
batches of intkey 'inc' transactions.
4) Assert that the validators are in consensus with each.
'''
populate, increment = _make_txns()
# wait for validators to get genesis block
time.sleep(INITIAL_WAIT_TIME)
self.assert_consensus()
LOGGER.info('Sending populate txns')
self.clients[0].send_txns(populate)
time.sleep(TIME_BETWEEN_BATCHES)
# send txns to each validator in succession
for i in range(BATCH_COUNT):
for client in self.clients:
LOGGER.info('Sending batch {} @ {}'.format(i, client.url))
client.send_txns(increment)
time.sleep(TIME_BETWEEN_BATCHES)
# send txns to one validator at a time
for client in self.clients:
for i in range(BATCH_COUNT):
LOGGER.info('Sending batch {} @ {}'.format(i, client.url))
client.send_txns(increment)
time.sleep(TIME_BETWEEN_BATCHES)
# wait for validators to catch up
time.sleep(CATCH_UP_TIME)
self.assert_consensus()
# if the validators aren't in consensus, wait and try again
def assert_consensus(self):
try:
self._assert_consensus()
except AssertionError:
time.sleep(TRY_AGAIN_TIME)
self._assert_consensus()
def _assert_consensus(self):
tolerance = self.clients[0].calculate_tolerance()
LOGGER.info('Verifying consensus @ tolerance {}'.format(tolerance))
# for convenience, list the blocks
for client in self.clients:
url = client.url
LOGGER.info('Blocks @ {}'.format(url))
subprocess.run(shlex.split(
'sawtooth block list --url {}'.format(url)))
list_of_sig_lists = [
client.recent_block_signatures(tolerance)
for client in self.clients
]
sig_list_0 = list_of_sig_lists[0]
for sig_list in list_of_sig_lists[1:]:
self.assertTrue(
any(sig in sig_list for sig in sig_list_0),
'Validators are not in consensus')
class IntkeyClient(RestClient):
def __init__(self, url):
super().__init__(url)
self.url = url
def send_txns(self, txns):
batch = IntkeyMessageFactory().create_batch(txns)
self.send_batches(batch)
def recent_block_signatures(self, tolerance):
signatures = self.list_block_signatures()
return self.list_block_signatures()[:tolerance]
def list_block_signatures(self):
return [block['header_signature'] for block in self.list_blocks()]
def calculate_tolerance(self):
length = len(self.list_blocks())
# the most recent nth of the chain, at least 2 blocks
return max(
2,
length // 5)
def _make_txns():
fruits = 'fig', 'quince', 'medlar', 'cornel', 'pomegranate'
populate = [('set', fruit, 10000) for fruit in fruits]
increment = [('inc', fruit, 1) for fruit in fruits]
return populate, increment
|
StarcoderdataPython
|
8114430
|
from json import JSONDecodeError
from pathlib import Path
from bx_py_utils.test_utils.assertion import assert_equal
from bx_py_utils.test_utils.snapshot import assert_snapshot
def build_requests_mock_history(mock, only_json=True):
history = []
for request in mock.request_history:
request_info = {
'request': f'{request.method} {request.url}'
}
try:
json_data = request.json()
except JSONDecodeError as err:
if only_json:
raise AssertionError(
f'{request.method} {request.url} without valid JSON: {err} in:\n{err.doc!r}'
)
request_info['text'] = request.text
else:
request_info['json'] = json_data
history.append(request_info)
return history
def assert_json_requests_mock(mock, data):
"""
Check the requests mock history. In this case all requests must be JSON.
e.g.:
with requests_mock.mock() as m:
m.post('http://test.tld', text='resp')
requests.post('http://test.tld', json={'foo': 'bar'})
assert_json_requests_mock(mock=m, data=[{
'request': 'POST http://test.tld/',
'json': {'foo': 'bar'},
}])
"""
history = build_requests_mock_history(mock, only_json=True)
assert_equal(history, data, msg='Request history are not equal:')
def assert_json_requests_mock_snapshot(mock):
"""
Check requests mock history via snapshot. Accepts only JSON requests.
:param mock:
:return:
"""
history = build_requests_mock_history(mock, only_json=True)
assert_snapshot(got=history, self_file_path=Path(__file__))
def assert_requests_mock(mock, data):
"""
Check the requests mock history. Accept mixed "text" and "JSON".
e.g.:
with requests_mock.mock() as m:
m.get('http://test.tld', text='foo')
m.post('http://test.tld', text='bar')
requests.post('http://test.tld', data={'foo': 'one'})
requests.post('http://test.tld', json={'foo': 'two'})
assert_requests_mock(mock=m, data=[{
'request': 'POST http://test.tld/',
'text': 'foo=one',
}, {
'request': 'POST http://test.tld/',
'json': {'foo': 'two'},
}])
"""
history = build_requests_mock_history(mock, only_json=False)
assert_equal(history, data, msg='Request history are not equal:')
def assert_requests_mock_snapshot(mock):
"""
Check requests mock history via snapshot. Accept mixed "text" and "JSON".
"""
history = build_requests_mock_history(mock, only_json=False)
assert_snapshot(got=history, self_file_path=Path(__file__))
|
StarcoderdataPython
|
6443645
|
from pyspark.sql import SparkSession
from pyspark.sql import functions as func
from pyspark.sql.types import StructType, StructField, StringType, IntegerType, FloatType
# creates a SparkSession
spark = SparkSession.builder.appName("MinTemperatures").getOrCreate()
# we are determining the Schema of the Table
schema = StructType([
StructField("stationID", StringType(), True),
StructField("date", IntegerType(), True),
StructField("measure_type", StringType(), True),
StructField("temperature", FloatType(), True)])
# // Read the file as dataframe
df = spark.read.schema(schema).csv("file:///home/sambiase/courses/SparkCourse/1800.csv")
df.printSchema()
# Filter out all but TMIN entries
minTemps = df.filter(df.measure_type == "TMIN")
# Select only stationID and temperature
stationTemps = minTemps.select("stationID", "temperature")
# Aggregate to find minimum temperature for every station
minTempsByStation = stationTemps.groupBy("stationID").min("temperature")
minTempsByStation.show()
# Convert temperature to fahrenheit and sort the dataset
# withColumn creates a new column called temperature
minTempsByStationF = minTempsByStation.withColumn("temperature",
func.round(func.col("min(temperature)") * 0.1 * (9.0 / 5.0) + 32.0, 2))\
.select("stationID", "temperature").sort("temperature")
# Collect, format, and print the results
results = minTempsByStationF.collect()
for result in results:
print(result[0] + "\t{:.2f}F".format(result[1]))
spark.stop()
|
StarcoderdataPython
|
291218
|
<gh_stars>10-100
# Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import copy
import decimal
import json
import os
import uuid
import boto3
import math
import urllib3
from decimal import Decimal
from datetime import datetime
from boto3.dynamodb.conditions import Key, Attr
from botocore.config import Config
MAX_INPUTS_PER_JOB = int(os.environ['MediaConvertMaxInputJobs'])
ACCELERATION_MEDIA_CONVERT_QUEUE = os.environ['MediaConvertAcceleratorQueueArn']
OUTPUT_BUCKET = os.environ['OutputBucket']
ssm = boto3.client('ssm')
class Mp4Generator:
def __init__(self, event):
from MediaReplayEngineWorkflowHelper import ControlPlane
self._controlPlane = ControlPlane()
tmpEvent = self.__get_dataplane_payload(event)
self.__eventName = tmpEvent['Event']['Name']
self.__program = tmpEvent['Event']['Program']
self.__profile = tmpEvent['Profile']['Name']
self.__framerate = tmpEvent['Profile']['ProcessingFrameRate']
from MediaReplayEnginePluginHelper import DataPlane
self._dataplane = DataPlane(tmpEvent)
self.__event = event
def __get_dataplane_payload(self, event):
program = event['ReplayRequest']['Program']
event = event['ReplayRequest']['Event']
event_details = self._controlPlane.get_event(event, program)
profile_name = event_details['Profile']
profile_detail = self._controlPlane.get_profile(profile_name)
final_event = {
"Event": {
"Program": program,
"Name": event
},
"Profile": {
"Name": profile_name,
"ChunkSize": profile_detail['ChunkSize'],
"ProcessingFrameRate": profile_detail['ProcessingFrameRate'],
"Classifier": profile_detail['Classifier']['Name'],
"MaxSegmentLengthSeconds": profile_detail['MaxSegmentLengthSeconds']
}
}
return final_event
def generate_mp4(self):
replay_id = self.__event['ReplayRequest']['ReplayId']
audio_track = self.__event['ReplayRequest']['AudioTrack']
event_details = self._controlPlane.get_event(self.__eventName, self.__program)
profile_name = event_details['Profile']
output_resolutions = self.__event['ReplayRequest']['Resolutions']
# Segments that have been created for the current Replay
replay_segments = self._dataplane.get_all_segments_for_replay(self.__program, self.__eventName, replay_id)
print('---------------- get_all_segments_for_replay -----------------------')
print(replay_segments)
#batch_id = f"{str(self.__eventName).lower()}-{str(self.__program).lower()}-{replay_id}"
batch_id = f"{str(uuid.uuid4())}"
job_metadata = []
resolution_thumbnail_mapping = []
# For each Resolution in the Replay Request, create Media Convert Jobs
# by configuring the Output Resolution and Input Clip settings using Replay Segment Information
for resolution in output_resolutions:
# Contains Job IDs for all the MP4 Jobs. We will need to
# check if all Jobs have completed before updating the Replay request with the S3 location
all_mp4_clip_job_metadata = []
# Create Input settings per segment
input_job_settings = []
for segment in replay_segments:
startTime = segment['OptoStart'] if 'OptoStart' in segment else segment['Start']
endTime = segment['OptoEnd'] if 'OptoEnd' in segment else segment['End']
chunks = self._dataplane.get_chunks_for_segment(startTime, endTime, self.__program, self.__eventName, profile_name )
input_settings = self.__build_mp4_input(chunks, audio_track, startTime, endTime)
print('---------------- __build_mp4_input -----------------------')
print(input_settings)
input_job_settings.extend(input_settings)
groups_of_input_settings = [input_job_settings[x:x+MAX_INPUTS_PER_JOB] for x in range(0, len(input_job_settings), MAX_INPUTS_PER_JOB)]
index = 1
res = resolution.split(' ')[0]
print('---------------- groups_of_input_settings -----------------------')
print(groups_of_input_settings)
for inputsettings in groups_of_input_settings:
# Each Input setting will have the relevant AudioTrack embedded.
print('---------------- inputsettings -----------------------')
print(inputsettings)
job, job_output_destination = self.__create_mp4_clips(inputsettings, index, batch_id, res.strip(), resolution_thumbnail_mapping)
print('---------------- after __create_mp4_clips -----------------------')
print(job)
if job != None:
all_mp4_clip_job_metadata.append({
"JobsId": job['Job']['Id'],
"OutputDestination": job_output_destination,
"BatchId": batch_id
})
index += 1
job_metadata.append({
"Resolution": res,
"JobMetadata": all_mp4_clip_job_metadata,
"ThumbnailLocations": resolution_thumbnail_mapping
})
return job_metadata
def __build_mp4_input(self, chunks ,audioTrack, start_time, end_time):
inputs = []
# Only chunk, so will have Start and End Clipping time
if len(chunks) == 1:
inputClippings = []
inputClip = {}
ic = {}
endtime = self._dataplane.get_mediaconvert_clip_format(end_time, program=self.__program, event=self.__eventName, profile=self.__profile, frame_rate=self.__framerate)
starttime = self._dataplane.get_mediaconvert_clip_format(start_time, program=self.__program, event=self.__eventName, profile=self.__profile, frame_rate=self.__framerate)
#endtime, starttime = self._dataplane.get_mediaconvert_clip_format(end_time), self._dataplane.get_mediaconvert_clip_format(start_time)
ic['EndTimecode'] = str(endtime)
ic['StartTimecode'] = str(starttime)
#If we have a single Chunk we don't need the Endtime Configured if it is less than Start time. Remove it.
if datetime.strptime(endtime, "%H:%M:%S:%f") < datetime.strptime(starttime, "%H:%M:%S:%f"):
ic.pop('EndTimecode', None)
#ic['EndTimecode'], ic['StartTimecode'] = get_clip_timings(segment, event)
inputClippings.append(ic)
inputClip['InputClippings'] = inputClippings
#------------- Update MediaConvert AudioSelectors Input -------------
# Leave the default Input AudioSelectors as is if we are dealing with default Track or only one.
# If we have multiple AudioTracks, this lambda will be provided with one.
inputClip['AudioSelectors'] = {
"Audio Selector 1": {
"Tracks": [
int(audioTrack)
],
"DefaultSelection": "NOT_DEFAULT",
"SelectorType": "TRACK"
}
}
inputClip['AudioSelectorGroups'] = {
"Audio Selector Group 1": {
"AudioSelectorNames": [
"Audio Selector 1"
]
}
}
#------------- Update MediaConvert AudioSelectors Input Ends -------------
inputClip['VideoSelector'] = {}
inputClip['TimecodeSource'] = "ZEROBASED"
inputClip['FileInput'] = f"s3://{chunks[0]['S3Bucket']}/{chunks[0]['S3Key']}"
inputs.append(inputClip)
elif len(chunks) > 1:
for chunk_index in range(len(chunks)):
ic = {}
inputClippings = []
inputClip = {}
if chunk_index == 0: # First Chunk
ic['StartTimecode'] = self._dataplane.get_mediaconvert_clip_format(start_time, program=self.__program, event=self.__eventName, profile=self.__profile, frame_rate=self.__framerate)
inputClippings.append(ic)
inputClip['InputClippings'] = inputClippings
elif chunk_index == len(chunks)-1: # Last Chunk
ic['EndTimecode'] = self._dataplane.get_mediaconvert_clip_format(end_time, program=self.__program, event=self.__eventName, profile=self.__profile, frame_rate=self.__framerate)
inputClippings.append(ic)
inputClip['InputClippings'] = inputClippings
else: # Sandwitch Chunks have no clippings
inputClip['InputClippings'] = []
#------------- Update MediaConvert AudioSelectors Input -------------
# Leave the default Input AudioSelectors as is if we are dealing with default Track or only one.
# If we have multiple AudioTracks, this lambda will be provided with one.
inputClip['AudioSelectors'] = {
"Audio Selector 1": {
"Tracks": [
int(audioTrack)
],
"DefaultSelection": "NOT_DEFAULT",
"SelectorType": "TRACK"
}
}
inputClip['AudioSelectorGroups'] = {
"Audio Selector Group 1": {
"AudioSelectorNames": [
"Audio Selector 1"
]
}
}
#------------- Update MediaConvert AudioSelectors Input Ends -------------
inputClip['VideoSelector'] = {}
inputClip['TimecodeSource'] = "ZEROBASED"
inputClip['FileInput'] = f"s3://{chunks[chunk_index]['S3Bucket']}/{chunks[chunk_index]['S3Key']}"
inputs.append(inputClip)
return inputs
def __create_mp4_clips(self, inputSettings, index, batch_id, resolution, resolution_thumbnail_mapping):
if len(inputSettings) == 0:
return None
try:
job_settings_filename = os.path.join(os.path.dirname(__file__), 'job_settings_mp4.json')
with open(job_settings_filename) as json_data:
jobSettings = json.load(json_data)
job_output_destination = f"s3://{OUTPUT_BUCKET}/mp4replay/{batch_id}/{resolution}/"
jobSettings["OutputGroups"][0]["OutputGroupSettings"]["FileGroupSettings"]["Destination"] = job_output_destination
#jobSettings["OutputGroups"][0]['Outputs'][0]["NameModifier"] = f"Part-{index}"
# Set Resolution to the Output Groups for Video
res = resolution.split(' ')[0]
video_res_width, video_res_height = self.__get_output_jobsetting_by_resolution(res)
jobSettings["OutputGroups"][0]['Outputs'][0]["VideoDescription"]["Width"] = video_res_width
jobSettings["OutputGroups"][0]['Outputs'][0]["VideoDescription"]["Height"] = video_res_height
# Set Thumbnail location as another Output Group
thumbnail_destination = f"s3://{OUTPUT_BUCKET}/mp4replay/{batch_id}/thumbnails/{resolution}/"
jobSettings["OutputGroups"][1]['OutputGroupSettings']['FileGroupSettings']['Destination'] = thumbnail_destination
jobSettings["OutputGroups"][1]['Outputs'][0]["VideoDescription"]["Width"] = video_res_width
jobSettings["OutputGroups"][1]['Outputs'][0]["VideoDescription"]["Height"] = video_res_height
resolution_thumbnail_mapping.append({
resolution: thumbnail_destination
})
jobSettings['Inputs'] = inputSettings
# Convert the video using AWS Elemental MediaConvert
jobMetadata = { 'BatchId': batch_id }
return self.__create_job(jobMetadata, jobSettings), job_output_destination
except Exception as e:
print ('Exception: %s' % e)
raise
def __create_job(self, jobMetadata, jobSettings):
# get the account-specific mediaconvert endpoint for this region
endpoint = ssm.get_parameter(Name='/MRE/ClipGen/MediaConvertEndpoint', WithDecryption=False)['Parameter']['Value']
# Customizing Exponential backoff
# Retries with additional client side throttling.
boto_config = Config(
retries = {
'max_attempts': 10,
'mode': 'adaptive'
}
)
# add the account-specific endpoint to the client session
client = boto3.client('mediaconvert', config=boto_config, endpoint_url=endpoint, verify=False)
mediaConvertRole = os.environ['MediaConvertRole']
return client.create_job(Role=mediaConvertRole, UserMetadata=jobMetadata, Settings=jobSettings)
def __get_output_jobsetting_by_resolution(self, resolution):
if "360p" in resolution:
return 640, 360
elif "480p" in resolution:
return 854, 480
elif "720p" in resolution:
return 1280, 720
elif "16:9" in resolution:
return 1920, 1080
elif "1:1" in resolution:
return 1080, 1080
elif "4:5" in resolution:
return 864, 1080
elif "9:16" in resolution:
return 608, 1080
elif "2K" in resolution:
return 2560, 1440
elif "4K" in resolution:
return 3840, 2160
|
StarcoderdataPython
|
1665125
|
import numpy as np
from astropy import wcs
def makeGaussian(size, fwhm=3, center=None):
x = np.arange(0, size, 1, float)
y = x[:,np.newaxis]
if center is None:
x0 = y0 = size // 2
else:
x0 = center[0]
y0 = center[1]
return np.exp(-4*np.log(2)*((x-x0)**2 + (y-y0)**2)/fwhm**2)
def makeWCS(coords=[0.,0.],coord_types=["RA---TAN","DEC--TAN"],xsize=1,ysize=1,pa=0.,scale=[1.,1.],sip=None):
w = wcs.WCS(naxis=2)
w.wcs.ctype = coord_types
w.wcs.crpix = [int(np.floor(xsize/2.)),int(np.floor(ysize/2.))]
w.wcs.crval = coords
w.wcs.cdelt = np.array(scale) / 3600.
cpa = np.cos(np.radians(pa%360.))
spa = np.sin(np.radians(pa%360.))
w.wcs.pc = np.array([[cpa,-spa],[spa,cpa]])
if sip is not None:
w.sip = sip
return w
def verifyData(dat1,dat2):
assert dat1.shape[0] == dat2.shape[0]
assert dat1.shape[1] == dat2.shape[1]
# np.testing.assert_allclose(dat1,dat2,atol=1e-3)
for y in range(dat1.shape[0]):
for x in range(dat1.shape[1]):
verifyPoint(dat1,dat2,x,y)
def verifyPoint(dat1, dat2, x, y, threshold=1.e-3):
if abs(dat1[y,x] - dat2[y,x]) > threshold:
print(x, y, dat1[y,x], dat2[y,x], abs(dat1[y,x] - dat2[y,x]))
assert abs(dat1[y,x] - dat2[y,x]) <= threshold
def verifyImage(im1,im2):
assert im1.out_path == im2.out_path
assert im1.name == im2.name
assert im1.xsize == im2.xsize
assert im1.ysize == im2.ysize
np.testing.assert_allclose((im1.xscale,im1.yscale),(im2.xscale,im2.yscale),atol=1e-3)
assert im1.distorted == im2.distorted
np.testing.assert_allclose((im1.ra,im1.dec,im1.pa),(im2.ra,im2.dec,im2.pa),atol=1e-3)
assert im1.history == im2.history
assert im1.zeropoint == im2.zeropoint
assert im1.header['EQUINOX'] == im2.header['EQUINOX']
np.testing.assert_allclose((im1.header['PA_APER']),(im2.header['PA_APER']),atol=1e-3)
assert im1.header['VAFACTOR'] == im2.header['VAFACTOR']
np.testing.assert_allclose((im1.header['ORIENTAT']),(im2.header['ORIENTAT']),atol=1e-3)
np.testing.assert_allclose((im1.header['RA_APER']),(im2.header['RA_APER']),atol=1e-3)
np.testing.assert_allclose((im1.header['DEC_APER']),(im2.header['DEC_APER']),atol=1e-3)
assert im1.header['NAXIS1'] == im2.header['NAXIS1']
assert im1.header['NAXIS2'] == im2.header['NAXIS2']
def verifyParameters(image,results):
assert image.out_path == results['out_path']
assert image.name == results['name']
assert image.xsize == results['xsize']
assert image.ysize == results['ysize']
np.testing.assert_allclose((image.xscale,image.yscale),(results['xscale'],results['yscale']),atol=1e-3)
assert image.distorted == results['distorted']
np.testing.assert_allclose((image.ra,image.dec,image.pa),(results['ra'],results['dec'],results['pa']),atol=1e-3)
assert image.history == results['history']
assert image.zeropoint == results['zeropoint']
assert image.header['EQUINOX'] == results['equinox']
np.testing.assert_allclose((image.header['PA_APER']),(results['pa_aper']),atol=1e-3)
assert image.header['VAFACTOR'] == results['vafactor']
np.testing.assert_allclose((image.header['ORIENTAT']),(results['orientat']),atol=1e-3)
np.testing.assert_allclose((image.header['RA_APER']),(results['ra_aper']),atol=1e-3)
np.testing.assert_allclose((image.header['DEC_APER']),(results['dec_aper']),atol=1e-3)
assert image.header['NAXIS1'] == results['naxis1']
assert image.header['NAXIS2'] == results['naxis2']
|
StarcoderdataPython
|
154963
|
import unittest
import models.EndNode as n
class TestEndNode(unittest.TestCase):
def setUp(self):
self.a = n.EndNode('192.168.0.1', id = 1)
self.b = n.EndNode('192.168.0.1')
self.c = n.EndNode('192.168.0.3')
def testEquality(self):
self.assertTrue(self.a == self.a)
self.assertTrue(self.b == self.b)
self.assertTrue(self.c == self.c)
self.assertTrue(self.a == self.b)
self.assertFalse(self.a == self.c)
self.assertFalse(self.b == self.c)
def testIp(self):
self.assertEqual(self.a.ip,"192.168.0.1")
self.assertNotEqual(self.a.ip,"192.168.0.3")
self.assertEqual(self.a.ip, self.b.ip)
self.assertEqual(self.b.ip,"192.168.0.1")
self.assertEqual(self.c.ip, "192.168.0.3")
if __name__ == '__main__':
unittest.main()
|
StarcoderdataPython
|
1613765
|
<filename>src/reader.py
import collections
import numpy as np
import re
class TextProcessor(object):
@staticmethod
def from_file(input_file):
with open(input_file, 'r', encoding = 'utf8') as fh:
text = fh.read()
return TextProcessor(text)
def __init__(self, text):
# self.words = self._text2words(text)
self.words = [w for w in text.split()]
self.id2word = None
self.word2id = None
self.vector = None
def set_vocab(self, word2id):
self.word2id = word2id
return self
def create_vocab(self, size):
counter = collections.Counter(self.words)
print( 'Vocabulary size reduced from %s to %s' % (len(counter), size) )
count_pairs = counter.most_common(size-1)
self.id2word = list(dict(count_pairs).keys())
self.id2word[-1] = '<unk>'
self.word2id = dict(zip(self.id2word, range(len(self.id2word))))
def get_vector(self):
unk = self.word2id['<unk>']
self.vector = [self.word2id[word] if word in self.word2id else unk for word in self.words]
return self.vector
def save_converted(self, filename):
with open(filename, 'w') as fh:
for wid in self.vector:
fh.write(self.id2word[wid]+' ')
@staticmethod
def _text2words(text):
# prepare for word based processing
re4 = re.compile(r'\.\.+')
re5 = re.compile(r' +')
text = text.lower()
text = re4.sub(' <3dot> ', text)
text = text.replace(',', ' , ')
text = text.replace('.', ' . ')
text = text.replace('/', ' . ')
text = text.replace('(', ' ( ')
text = text.replace(')', ' ) ')
text = text.replace('[', ' ( ')
text = text.replace(']', ' ) ')
text = text.replace(':', ' : ')
text = text.replace("'", " '")
text = text.replace('?', ' ? ')
text = text.replace(';', ' . ')
text = text.replace('-', ' -')
text = text.replace('<3dot>', ' ... ')
text = text.replace('"', '')
text = re5.sub(' ', text)
text = text.replace('\n', ' <nl> ')
return ['\n' if w == '<nl>' else w for w in text.split()]
def train_iterator(raw_data, batch_size, num_steps):
raw_data = np.array(raw_data, dtype=np.int32)
data_len = len(raw_data)
batch_len = data_len // batch_size
data = np.zeros([batch_size, batch_len], dtype=np.int32)
for i in range(batch_size):
data[i] = raw_data[batch_len * i:batch_len * (i + 1)]
epoch_size = (batch_len - 1) // num_steps
if epoch_size == 0:
raise ValueError("epoch_size == 0, decrease batch_size or num_steps")
for i in range(epoch_size):
x = data[:, i * num_steps:(i + 1) * num_steps]
y = data[:, i * num_steps + 1:(i + 1) * num_steps + 1]
yield (x, y)
|
StarcoderdataPython
|
3376239
|
from multiprocessing import Pipe
# 双工
conn1, conn2 = Pipe()
conn1.send('conn1第1次发送的数据')
conn1.send('conn1第2次发送的数据')
conn2.send('conn2第1次发送的数据')
conn2.send('conn2第2次发送的数据')
print(conn1.recv())
print(conn1.recv())
print(conn2.recv())
print(conn2.recv())
# 单工
c1, c2 = Pipe(False)
c2.send('c2发送的数据')
print(c1.recv())
# c1.send('c1发送的数据')
|
StarcoderdataPython
|
3387708
|
<filename>csc work/misc/w5/r/collatz.py
def count_collatz_steps(n):
''' (int) -> int
Return the number of steps it takes to reach 1, by repeating the two steps
of the Collatz conjecture beginning from n.
>>> count_collatz_steps(6)
8
'''
count = 0
while n > 1:
if n % 2 == 0:
n /= 2
else:
n = (3 * n) + 1
count = count + 1
return count
|
StarcoderdataPython
|
134732
|
import toolz
import toolz.curried
from toolz.curried import (take, first, second, sorted, merge_with, reduce,
merge, operator as cop)
from collections import defaultdict
from importlib import import_module
from operator import add
def test_take():
assert list(take(2)([1, 2, 3])) == [1, 2]
def test_first():
assert first is toolz.itertoolz.first
def test_merge():
assert merge(factory=lambda: defaultdict(int))({1: 1}) == {1: 1}
assert merge({1: 1}) == {1: 1}
assert merge({1: 1}, factory=lambda: defaultdict(int)) == {1: 1}
def test_merge_with():
assert merge_with(sum)({1: 1}, {1: 2}) == {1: 3}
def test_merge_with_list():
assert merge_with(sum, [{'a': 1}, {'a': 2}]) == {'a': 3}
def test_sorted():
assert sorted(key=second)([(1, 2), (2, 1)]) == [(2, 1), (1, 2)]
def test_reduce():
assert reduce(add)((1, 2, 3)) == 6
def test_module_name():
assert toolz.curried.__name__ == 'toolz.curried'
def test_curried_operator():
for k, v in vars(cop).items():
if not callable(v):
continue
if not isinstance(v, toolz.curry):
try:
# Make sure it is unary
v(1)
except TypeError:
try:
v('x')
except TypeError:
pass
else:
continue
raise AssertionError(
'toolz.curried.operator.%s is not curried!' % k,
)
# Make sure this isn't totally empty.
assert len(set(vars(cop)) & {'add', 'sub', 'mul'}) == 3
def test_curried_namespace():
exceptions = import_module('toolz.curried.exceptions')
namespace = {}
def should_curry(func):
if not callable(func) or isinstance(func, toolz.curry):
return False
nargs = toolz.functoolz.num_required_args(func)
if nargs is None or nargs > 1:
return True
return nargs == 1 and toolz.functoolz.has_keywords(func)
def curry_namespace(ns):
return {
name: toolz.curry(f) if should_curry(f) else f
for name, f in ns.items() if '__' not in name
}
from_toolz = curry_namespace(vars(toolz))
from_exceptions = curry_namespace(vars(exceptions))
namespace.update(toolz.merge(from_toolz, from_exceptions))
namespace = toolz.valfilter(callable, namespace)
curried_namespace = toolz.valfilter(callable, toolz.curried.__dict__)
if namespace != curried_namespace:
missing = set(namespace) - set(curried_namespace)
if missing:
raise AssertionError('There are missing functions in toolz.curried:\n %s'
% ' \n'.join(sorted(missing)))
extra = set(curried_namespace) - set(namespace)
if extra:
raise AssertionError('There are extra functions in toolz.curried:\n %s'
% ' \n'.join(sorted(extra)))
unequal = toolz.merge_with(list, namespace, curried_namespace)
unequal = toolz.valfilter(lambda x: x[0] != x[1], unequal)
messages = []
for name, (orig_func, auto_func) in sorted(unequal.items()):
if name in from_exceptions:
messages.append('%s should come from toolz.curried.exceptions' % name)
elif should_curry(getattr(toolz, name)):
messages.append('%s should be curried from toolz' % name)
else:
messages.append('%s should come from toolz and NOT be curried' % name)
raise AssertionError('\n'.join(messages))
|
StarcoderdataPython
|
395664
|
from codecs import open
import toml
from setuptools import find_packages, setup
with open("README.rst") as f:
readme = f.read()
project = toml.load("pyproject.toml")["mewo_project"]
setup(
name="sty",
version=project["version"],
author="<NAME>",
author_email="<EMAIL>",
maintainer="<NAME>",
maintainer_email="<EMAIL>",
url="https://github.com/feluxe/sty",
description="String styling for your terminal",
long_description=readme,
long_description_content_type="text/x-rst",
download_url="https://github.com/feluxe/sty" + "/tarball/" + project["version"],
license="Apache 2.0",
keywords=["styling", "color", "colour", "terminal", "ansi"],
include_package_data=True,
platforms="",
classifiers=[
"Operating System :: OS Independent",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.7",
"Typing :: Typed",
"Topic :: Printing",
"Topic :: Terminals",
"Topic :: System :: Shells",
],
install_requires=[],
packages=find_packages(where=".", exclude=("tests", "tests.*")),
package_dir={"sty": "sty"},
package_data={},
data_files=[],
entry_points={"console_scripts": [], "gui_scripts": []},
tests_require=[],
)
|
StarcoderdataPython
|
12848471
|
"""
A PyTorch implmentation of the KL-Divergence Loss as described in (https://arxiv.org/abs/1511.06321)
Lua Implementation (not inspected yet TODO) (https://github.com/yenchanghsu/NNclustering/blob/master/BatchKLDivCriterion.lua)
"""
import torch
import torch.nn.functional as F
from torch import nn
import numpy as np
from IPython import embed
class triplet_loss(nn.Module):
def __init__(self, alpha = 7.18):
super(triplet_loss, self).__init__()
self.alpha = alpha
def forward(self, outputs, clusters):
"""
:param indices The index of each embedding
:param outputs The set of embeddings
:param clusters Cluster assignments for each index
:return Loss Magnet loss calculated for current batch
"""
assert not clusters.requires_grad, \
"nn criterions don't compute the gradient w.r.t. targets - please " \
"mark these variables as volatile or not requiring gradients"
_min_float = 1e-6
num_instances = 0.0
outputs = outputs.float()
clusters = clusters.cpu().data.numpy()
batch_size = outputs.size(0)
loss = torch.zeros(1)
# If GPU is available compute loss on it
loss = loss.cuda()
loss = torch.autograd.Variable(loss).cuda()
######################### Cluster Assignments ##########################
# Generate a set of clusters in the batch
# and the local indices corresponding to each of those clusters
# batch_clusters = { cluster_number : [ local_indices] }
# TODO fix later!!! -- for now assiming indices are irrelevant!
batch_clusters = {}
for i in range(0, len(clusters)):
if clusters[i] in batch_clusters.keys():
batch_clusters[clusters[i]].append(i)
else:
batch_clusters[clusters[i]] = [i]
######################### Cluster Assignments ##########################
old_clusters = list(batch_clusters.keys())
clusters = []
# remove clusters with less than D instances TODO
for c in old_clusters:
if len(batch_clusters[c]) >= 2:
clusters.append(c)
########################## CALCULATE THE LOSS #########################
instances_1 = []
instances_2 = []
instances_3 = []
for m in range(0, len(clusters)):
c = clusters[m]
for d1 in range(0, len(batch_clusters[c]) - 1):
for d2 in range(d1+1, len(batch_clusters[c])):
ins_i1 = batch_clusters[c][d1]
ins_i2 = batch_clusters[c][d2]
for mN in range(0, len(clusters)):
if mN != m:
cN = clusters[mN]
for dN in range(0, len(batch_clusters[cN])):
ins_iN = batch_clusters[cN][dN]
instances_1.append(ins_i1)
instances_2.append(ins_i2)
instances_3.append(ins_iN)
return ((outputs[instances_1] - outputs[instances_2]).norm(p=2, dim = 1) + self.alpha - (outputs[instances_1] - outputs[instances_3]).norm(p=2, dim = 1)).clamp(min = 0.0).mean()
|
StarcoderdataPython
|
3538375
|
<reponame>helderthh/leetcode<filename>medium/merge-intervals.py
# 56. Merge Intervals
# https://leetcode.com/problems/merge-intervals/
class Solution:
def merge(self, intervals: List[List[int]]) -> List[List[int]]:
res = [] # list of final intervals
# for each interval
for interval in intervals:
final_interval = interval
# for each res
i = 0
while i < len(res):
# check if interval.left is lower or equal res.right
if self.are_overlapping(final_interval, res[i]):
# if it is, merge those intervals
final_interval = self._merge(final_interval, res[i])
# swap and pop interval
res[i], res[-1] = res[-1], res
res.pop()
i -= 1
i += 1
res.append(final_interval)
return res
# insert merged interval
def are_overlapping(self, a, b):
return (a[0] <= b[1] \
and a[1] >= b[0]) \
or (a[0] >= b[1] \
and a[1] <= b[0])
def _merge(self, a, b):
return [min(a[0], b[0]), max(a[1], b[1])]
|
StarcoderdataPython
|
11219614
|
<filename>practice/filecompross/rarunrar/unrarfiles.py
# -*- coding:utf-8 -*-
"""
安装解压rar文件的插件: pip install unrar
注意: 这个插件需要rarlib的支持.
安装rarlib:
https://www.rarlab.com/rar_add.htm 下载UnRARDLL.exe并安装,然后添加到环境变量中即可.
"""
# 说明:
# 如果不安装UnRARDLL.exe,运行python脚本会报:LookupError: Couldn't find path to unrar library.错误.
# 意思是找不到unrar library的路径,这里我们就需要去下载这个unrar library,事实上它就是UnRAR.dll这个东西,下载网址: http://www.rarlab.com/rar/UnRARDLL.exe或者去http://www.rarlab.com/rar_add.htm找到UnRAR.dll下载.
# 第二步:
# 安装完后我电脑中的路径为C:\Program Files (x86)\UnrarDLL,win7 32位的朋友可以将它添加到环境变量中,64位的需要将其中的X64文件夹设置为环境变量,因为unrar模块识别的文件是unrar.dll和unrar.lib,所以将文件夹中的UnRAR.dll和UnRAR.lib用小写重命名.
from unrar import rarfile
import os
import shutil
def Un_rar(rardir,rarname,Archiverar):
"""
解压rar文件
rardir: 压缩包的存放路径
rarname: 要解压压缩包的文件名
Archiverar: 解压缩后的存放路径.
filestat: 判断文件后缀
"""
try:
Unrar_path = os.path.join(rardir,rarname)
r = rarfile.RarFile(Unrar_path)
if r:
if os.path.isdir(Archiverar):
shutil.rmtree(Archiverar)
print("如果存放目录已存在,就删除该存放目录.")
os.mkdir(Archiverar)
print("解压后的存放目录已经成功创建.")
r.extractall(Archiverar)
print("解压完成,请查看解压后的存放目录")
else:
os.mkdir(Archiverar)
print("解压后的存放目录已经成功创建.")
r.extractall(Archiverar)
print("解压完成,请查看解压后的存放目录")
except Exception as e:
return e
if __name__=="__main__":
decompress = None
rarname = ""
rardir = ""
if rarname:
print("已经找到压缩文件名为:{}".format(rarname))
if rardir:
print("已经找到压缩文件的目录为:{}".format(rardir))
else:
rarname = input("请输入你想要解压的压缩文件名称(file.rar):")
rarsuf = ".rar"
zipsuf = ".zip"
if rarname.endswith(rarsuf):
rardir = input("请输入解压文件存放的目录(path):")
if os.path.isdir(rardir):
unrarpath = r'F:\PythonProject\python-scripts\practice'
Archiverar = os.path.join(unrarpath + "\\") + "files_" + rarname
decompress = Un_rar(rardir,rarname,Archiverar)
else:
print("没有指定解压文件存放的目录,请重新运行程序输入目录:")
elif rarname.endswith(zipsuf):
print("您输入的是zip压缩文件,暂不支持.")
else:
print("其他的压缩类型,暂时不支持.")
|
StarcoderdataPython
|
5142252
|
<gh_stars>0
def sub_gen1(x):
r = yield x
print('subgen1', r)
def gen1(x):
r = yield sub_gen1(x)
print('gen1', r)
g = gen1(3)
g.send(None)
g.send(21)
# g = gen1(3)
# g
# <generator object gen1 at 0x10f9ce450>
# g.send(None)
# <generator object sub_gen1 at 0x10f9ce3d0>
# g.send(21)
# gen1 21
# Traceback (most recent call last):
# File "<input>", line 1, in <module>
# StopIteration
def sub_gen2(x):
r = yield x
print('subgen2', r)
return x + 10
def gen2(x):
r = yield from sub_gen2(x)
print('gen2', r)
g2 = gen2(2)
g2.send(None)
g2.send(9)
# g2=gen2(2)
# g2.send(None)
# 2
# g2.send(100)
# subgen2 100
# gen2 12
|
StarcoderdataPython
|
6501311
|
<reponame>nutanixdev/calm-dsl
from calm.dsl.decompile.render import render_template
from calm.dsl.decompile.task import render_task_template
from calm.dsl.decompile.parallel_task import render_parallel_task_template
from calm.dsl.decompile.variable import render_variable_template
from calm.dsl.builtins import action, ActionType, RefType
from calm.dsl.log import get_logging_handle
LOG = get_logging_handle(__name__)
RUNBOOK_ACTION_MAP = {}
def render_action_template(cls, entity_context=""):
global RUNBOOK_ACTION_MAP
LOG.debug("Rendering {} action template".format(cls.__name__))
if not isinstance(cls, ActionType):
raise TypeError("{} is not of type {}".format(cls, action))
# Update entity context
# TODO for now, not adding runbook to context as current mapping -is 1:1
entity_context = entity_context + "_Action_" + cls.__name__
runbook = cls.runbook
# Note cls.__name__ should be used for call_runbook tasks
RUNBOOK_ACTION_MAP[runbook.__name__] = cls.__name__
# NOTE Not using main_task_local_reference for now,
# bcz type of main task is "DAG"
levelled_tasks = get_task_order(runbook.tasks)
tasks = []
for task_list in levelled_tasks:
if len(task_list) != 1:
tasks.append(
render_parallel_task_template(
task_list, entity_context, RUNBOOK_ACTION_MAP
)
)
else:
tasks.append(
render_task_template(task_list[0], entity_context, RUNBOOK_ACTION_MAP)
)
variables = []
for variable in runbook.variables:
variables.append(render_variable_template(variable, entity_context))
if not (variables or tasks):
return ""
user_attrs = {
"name": cls.__name__,
"description": cls.__doc__ or "",
"tasks": tasks,
"variables": variables,
}
gui_display_name = getattr(cls, "name", "") or cls.__name__
if gui_display_name != cls.__name__:
user_attrs["gui_display_name"] = gui_display_name
text = render_template(schema_file="action.py.jinja2", obj=user_attrs)
return text.strip()
def get_task_order(task_list):
"""Returns the list where each index represents a list of task that executes parallely"""
dag_task = None
for ind, task in enumerate(task_list):
if task.type == "DAG":
dag_task = task
task_list.pop(ind)
break
if not dag_task:
raise ValueError("Dag task not found")
# Edges between tasks
edges = dag_task.attrs["edges"]
# Final resultant task list with level as index
res_task_list = []
# map to store the edges from given task
task_edges_map = {}
# map to store indegree of everyu task
task_indegree_count_map = {}
# create task map with name
task_name_data_map = {}
for task in task_list:
task_name = task.name
task_name_data_map[task_name] = task
task_indegree_count_map[task_name] = 0
task_edges_map[task_name] = []
# store in degree of every task
for edge in edges:
from_task = RefType.decompile(edge["from_task_reference"])
to_task = RefType.decompile(edge["to_task_reference"])
task_indegree_count_map[to_task.name] += 1
task_edges_map[from_task.name].append(to_task.name)
# Queue to store elements having indegree 0
queue = []
# Push elements having indegree = 0
for task_name, indegree in task_indegree_count_map.items():
if indegree == 0:
queue.append(task_name)
# Topological sort
while queue:
# length of queue
ql = len(queue)
# Inserting task with current indegree = 0
task_data_list = []
for task in queue:
task_data_list.append(task_name_data_map[task])
if task_data_list:
res_task_list.append(task_data_list)
while ql:
# Popping the element at start
cur_task = queue.pop(0)
# Iterating its edges, and decrease the indegree of to_edge task by 1
for to_task in task_edges_map[cur_task]:
task_indegree_count_map[to_task] -= 1
# If indegree is 0, push to queue
if task_indegree_count_map[to_task] == 0:
queue.append(to_task)
# decrement the counter for queue length
ql -= 1
return res_task_list
|
StarcoderdataPython
|
8060264
|
import sys
import pygame
from pygame.locals import *
# init pygame
pygame.init()
DISPLAYSURFACE = pygame.display.set_mode((400, 300), 0, 32)
pygame.display.set_caption('Hello World')
# color constants
WHITE = (255, 255, 255)
GREEN = (0, 255, 0)
BLUE = (0, 0, 255)
# create text
fontObj = pygame.font.Font('freesansbold.ttf', 32)
textSurfaceObj = fontObj.render('Hello World!', True, GREEN, BLUE)
textRectObj = textSurfaceObj.get_rect()
textRectObj.center = (200, 150)
# game loop
while True:
# process events
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
sys.exit()
# draw
DISPLAYSURFACE.fill(WHITE)
DISPLAYSURFACE.blit(textSurfaceObj, textRectObj)
pygame.display.update()
|
StarcoderdataPython
|
9653711
|
# Copyright © 2019 Province of British Columbia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests to assure the users API end-point.
Test-Suite to ensure that the /users endpoint is working as expected.
"""
import copy
from auth_api import status as http_status
from auth_api.models import ContactLink as ContactLinkModel
from auth_api.schemas import utils as schema_utils
from auth_api.services import Org as OrgService
from tests.utilities.factory_scenarios import TestJwtClaims, TestOrgInfo, TestUserInfo
from tests.utilities.factory_utils import (
factory_auth_header, factory_contact_model, factory_user_model, patch_token_info)
def test_get_user_settings(client, jwt, session, keycloak_mock, monkeypatch): # pylint:disable=unused-argument
"""Assert that get works and adhere to schema."""
user_model = factory_user_model(user_info=TestUserInfo.user_test)
contact = factory_contact_model()
contact_link = ContactLinkModel()
contact_link.contact = contact
contact_link.user = user_model
contact_link.commit()
kc_id = user_model.keycloak_guid
claims = copy.deepcopy(TestJwtClaims.updated_test.value)
claims['sub'] = str(kc_id)
patch_token_info(claims, monkeypatch)
OrgService.create_org(TestOrgInfo.org1, user_id=user_model.id)
# post token with updated claims
headers = factory_auth_header(jwt=jwt, claims=claims)
rv = client.get(f'/api/v1/users/{kc_id}/settings', headers=headers, content_type='application/json')
item_list = rv.json
account = next(obj for obj in item_list if obj['type'] == 'ACCOUNT')
assert account['accountType'] == 'BASIC'
assert rv.status_code == http_status.HTTP_200_OK
assert schema_utils.validate(item_list, 'user_settings_response')[0]
assert account['productSettings'] == f'/account/{account["id"]}/settings/restricted-product'
|
StarcoderdataPython
|
9614165
|
<filename>countries/management/commands/_base.py
from pathlib import Path
from django.core import serializers
from django.core.management.base import BaseCommand
__all__ = ['DumperBaseCommand']
class TextIOWrapper(object):
def __init__(self, path, mode, format, is_fake=False):
self.format = format
self.is_fake = is_fake
if not is_fake:
self._file = open(path.as_posix(), mode)
else:
self._file = None
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
if not self.is_fake:
self._file.close()
def read(self):
if self.is_fake:
return []
return serializers.deserialize(self.format, self._file.read())
def write(self, queryset, **kwargs):
if not self.is_fake:
data = serializers.serialize(self.format, queryset, **kwargs)
self._file.write(data)
class DumperBaseCommand(BaseCommand):
exclude_fixtures = ()
def __init__(self, *args, **kwargs):
self._rootdir = Path(__file__).parents[2] / 'fixtures'
super().__init__(*args, **kwargs)
def get_fixture_path(self, path):
path = Path(path)
if not path.is_absolute():
return self._rootdir / path.with_name(path.name + '.json')
return path
def get_country_path(self, country, name):
return Path('countries', country.cca2.lower()).with_suffix('.' + name)
def open_fixture(self, path, mode):
path = self.get_fixture_path(path)
return TextIOWrapper(
path=path,
mode=mode,
format='json',
is_fake=self.is_excluded(path))
def is_excluded(self, path):
return next((
True for pattern in self.exclude_fixtures
if path.match(pattern)), False)
|
StarcoderdataPython
|
41965
|
<gh_stars>0
'''
AUTOR: <NAME>
Date: 08/10/2020
WilliamHillURLs class to managed info about William Hill web pages.
'''
import requests
from bs4 import BeautifulSoup
from urlvalidator import validate_url, ValidationError
class WilliamHillURLs:
"""Auxiliar class with data about William Hill Web to scraping data.
Returns:
WilliamHillURLs: An object with data and auxiliar functions.
Attributes
----------
BaseURL : str
William Hill URL base web page.
URL_FootballOnDirect : str
Football matches URL in direct on William Hill web page.
URL_TenisOnDirect : str
Tenis matches URL in direct on William Hill web page.
URL_BasketOnDirect : str
Basket matches URL in direct on William Hill web page.
"""
BaseURL = 'https://sports.williamhill.es/'
URL_FootballOnDirect = 'https://sports.williamhill.es/betting/es-es/en-directo/f%C3%BAtbol'
URL_TenisOnDirect = 'https://sports.williamhill.es/betting/es-es/en-directo/tenis'
URL_BasketOnDirect = 'https://sports.williamhill.es/betting/es-es/en-directo/baloncesto'
def GetAllUrlMatches(self, urlSport=URL_FootballOnDirect):
"""Get all url matchs from a sport (by default foorball urls.). Validate each URL if it´s a URL valid.
Args:
urlSport (str, optional): Football mathes on direct URL William Hill . Defaults to "https://sports.williamhill.es/betting/es-es/en-directo/f%C3%BAtbol".
Returns:
list: List with all URL matches.
"""
req = requests.get(urlSport)
soup = BeautifulSoup(req.content.decode('utf-8','ignore'), "html.parser")
aux = soup.findAll("a", {"class": ['btmarket__name btmarket__name--featured']})
auxList = []
for item in aux:
try:
theUrl = (self.BaseURL + item['href']).replace("//","/").replace("https:/","https://")
validate_url(theUrl)
auxList.append(theUrl)
except ValidationError:
raise ValidationError(theUrl)
return auxList
def GetAllMatchsPlayedActually(self, urlSport=URL_FootballOnDirect):
"""Get all sport matches played in the actuall moment.
Args:
urlSport (str, optional): A William Hill URL sport. Defaults to URL_FootballOnDirect.
Returns:
list: List with all matches and its bets.
"""
req = requests.get(urlSport)
soup = BeautifulSoup(req.content.decode('utf-8','ignore'), "lxml")
matches = soup.findAll("div", {"class": "btmarket__link-name btmarket__link-name--ellipsis show-for-desktop-medium"})
listaApuestas = soup.findAll("div", {"class": "btmarket__selection"})
matchList = []
for item in matches:
var = item.text + ': ' + listaApuestas[0].text + ' | ' + listaApuestas[1].text + ' | ' + listaApuestas[2].text
matchList.append(var)
return matchList
def ConvertFractionalBetToDecimalBet(self, theBet):
"""Convert a fraccioanl bet str to
Args:
theBet (str): A fractional bet.
Returns:
[str]: A decimal bet.
"""
bet = 0.0
aux = str(theBet).split('/')
bet = (int(aux[0], 10) / int(aux[1], 10) ) + 1
return str(round(bet, 2))
def GetAllBetsFromURLMatch(self, url):
"""Get all bets actually from a match.
Args:
url (str): Match URL
Returns:
[type]: A list with the diferents bets availables.
"""
allBetsList = []
req = requests.get(url)
soup = BeautifulSoup(req.content.decode('utf-8','ignore'), "html.parser")
aux = soup.findAll("h2", {"class" : ['fl']})
# print('Number of diferent bets: ', len(aux), ', Match URL: ', url)
for item in aux:
allBetsList.append(item.text)
# print(item.text,'|',type(item.text), item['class'])
return allBetsList
|
StarcoderdataPython
|
9751507
|
from pathlib import Path
import os, sys
def get_script_path():
return os.path.dirname(os.path.realpath(sys.argv[0]))
# this size is required for embedding
FACE_PIC_SIZE = 160
EMBEDDING_SIZE = 512
#PRETREINED_MODEL_DIR = os.path.join(str(Path.home()), 'pretrained_models')
PRETREINED_MODEL_DIR = "/workspace/pretrained_models"
UNKNOWN_CLASS = "unknown"
|
StarcoderdataPython
|
9705298
|
<reponame>czbiohub/special_ops_crispr_tools
#!/usr/bin/env python3
import sys
if sys.version_info < (3,5):
print("This script requires Python >= 3.5.")
print("Current Python version is {}.".format(sys.version.split()[0]))
sys.exit(-1)
import subprocess, traceback, time, requests
from requests.exceptions import ConnectionError
import logging
log = logging.getLogger(__name__)
# For given radius c5_c10_c20 around a target we can determine if there are
# any offtargets in that radius. There are two radii of interest.
#
offtarget_proximity = {
"far": "5_9_18",
"near": "5_9_19"
}
assert offtarget_proximity["far"] < offtarget_proximity["near"]
#
# Def: A radius c5_c10_c20 specifies maximum Hamming distances on nested
# suffixes of 5, 10, 20 bases. A 20-mer Y is within 5_9_18 radius of 20-mer X
# if and only if the 5-char suffixes of X and Y match exactly, the 10-char
# suffixes have at most 1 positional difference, and the 20-char suffixes
# (i.e. the entire X and Y) have at most 2 positional differences.
gvc_top = "generated_files/under_version_control"
# Contains .txt files with various indexes of target => target properties
target_index_dir = "{}/target_index".format(gvc_top)
# Produced by make_genes_and_identify_all_targets.py, formerly split_fasta
genes_dir = "{}/genes".format(gvc_top)
genes_temp_dir = genes_dir + "_temp"
all_targets_path = "{}/all_targets.txt".format(target_index_dir)
ambiguous_targets_path = "{}/ambiguous_targets.txt".format(target_index_dir)
antibiotics_by_gene_path = "{}/antibiotics_by_gene.txt".format(target_index_dir)
genes_by_antibiotic_path = "{}/genes_by_antibiotic.txt".format(target_index_dir)
antibiotics_path = "{}/antibiotics.txt".format(target_index_dir)
# Produced by filter_offtargets.py with input from all_targets_path
# and using the GO server for offtarget filtering.
off_targets_path = "{}/off_targets.txt".format(target_index_dir)
# Produced by filter_targets.py with input from all_targets_path
# and off_targets_path.
filtered_targets_path = "{}/filtered_targets.txt".format(target_index_dir)
# Produced by make_gene_index, formerly known as crispr_sites.py.
gene_index_dir = "{}/gene_index".format(gvc_top)
gene_index_temp_dir = "{}/gene_index_temp".format(gvc_top)
padding_input_path = "inputs/additional/padding.json"
special_token = "<PASSWORD>"
# It is convenient to track changes with git for the smaller and more human
# readable generated files.
#
# When files are (re)generated, the build process presents a git status
# report showing which, if any, of the generated files have changed.
#
# The changes are automatically added to the git index, i.e., to the
# user's pending commit.
def git_reset_and_remove_generated_folder(output_dir, return_values={}):
try:
assert ' ' not in output_dir
# t = time.time()
print("Deleting every file in {}.".format(output_dir))
subprocess.check_call("rm -rf {}".format(output_dir).split())
subprocess.check_call("git rm -r --cached --force --ignore-unmatch --quiet {}".format(output_dir).split())
# print("GIT houskeeping took {:3.1f} seconds.".format(time.time() - t))
return_values['status'] = 'Success'
except:
traceback.print_exc()
return_values['status'] = 'Failure'
def git_add_back_generated_folder(output_dir, return_values={}):
try:
assert output_dir
assert ' ' not in output_dir
print("Adding every file in {output_dir} to git.".format(output_dir=output_dir))
subprocess.check_call("git add -A {}".format(output_dir).split())
subprocess.check_call("git status --short {}".format(output_dir).split())
return_values['status'] = 'Success'
except:
traceback.print_exc()
return_values['status'] = 'Failure'
def git_remove_generated_file(output_file):
assert ' ' not in output_file
subprocess.check_call("rm -f {}".format(output_file).split())
subprocess.check_call("git rm --cached --force --ignore-unmatch --quiet {}".format(output_file).split())
def git_add_back_generated_file(output_file, skip_status=False):
assert output_file
assert ' ' not in output_file
print("Adding {} back to git.".format(output_file))
subprocess.check_call("git add -A {}".format(output_file).split())
if not skip_status:
subprocess.check_call("git status --short {}".format(output_file).split())
def fetch_with_retries(targets, c5, c10, c20, max_attempts=5, timeout=600):
failures = 0
while True:
try:
url = "http://localhost:8080/search?targets=%s&limits=%s"
url = url % (",".join(map(str, targets)), ",".join(map(str, [c5, c10, c20])))
return requests.get(url, timeout=timeout)
except (ConnectionResetError, ConnectionError):
log.warning('Offtarget server not ready, trying again in {} seconds, attempt {}/{}'.format(timeout, failures + 1, max_attempts))
failures += 1
if failures > max_attempts:
raise
time.sleep(timeout)
def main():
t = time.time()
print("-------------------------------------------------------------------------------------")
subprocess.check_call("python make_genes_and_identify_all_targets.py {}".format(special_token).split())
print("-------------------------------------------------------------------------------------")
subprocess.check_call("python filter_offtarget.py".split())
print("-------------------------------------------------------------------------------------")
subprocess.check_call("python filter_targets.py".split())
print("-------------------------------------------------------------------------------------")
subprocess.check_call("python make_gene_index.py {}".format(special_token).split())
print("-------------------------------------------------------------------------------------")
print("Complete rebuild took {:3.1f} seconds.".format(time.time() - t))
return 0
if __name__ == "__main__":
print("Builder of FLASH. For usage, see README.TXT.")
try:
print("Poking offtarget server. Timeout 30 seconds.")
fetch_with_retries(["ACGT" * 5], 5, 9, 18, max_attempts=5, timeout=30)
print("Offtarget server is alive.")
except:
traceback.print_exc()
print("*********************************************************************************")
print("*** Did you forget to start the offtarget server? Did it finish loading? ***")
print("*** Please follow the instructions in README.TXT and try again. ***")
print("*********************************************************************************")
sys.exit(-1)
retcode = main()
sys.exit(retcode)
|
StarcoderdataPython
|
1778825
|
# coding: utf-8
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
import numpy as np
class Model(nn.Module):
def __init__(self, args):
super(Model, self).__init__()
if args['embedding_pretrained'] is not None:
self.embedding = nn.Embedding.from_pretrained(args['embedding_pretrained'], freeze=False)
else:
self.embedding = nn.Embedding(args['n_vocab'], args['embed'], padding_idx=args['n_vocab'] - 1)
self.lstm = nn.LSTM(args['embed'], args['hidden_size'], args['num_layers'],
bidirectional=True, batch_first=True, dropout=args['dropout'])
self.tanh = nn.Tanh()
self.w = nn.Parameter(torch.zeros(args['hidden_size'] * 2))
#self.fc1 = nn.Linear(args['hidden_size'] * 2, args['hidden_size2'])
#self.fc2 = nn.Linear(args['hidden_size2'], args['num_classes'])
self.fc = nn.Linear(args['hidden_size'] * 2, args['num_classes'])
def forward(self, x):
# x[0]: [batch_size, seq_len]
# x[1]: [batch_size] actual len of each sentence
emb = self.embedding(x[0])
# emb: [batch_size, seq_len, embed]
packed_input = pack_padded_sequence(emb, x[1], batch_first=True, enforce_sorted=False) # use this function to speed up
packed_output, (hidden, cell) = self.lstm(packed_input)
# hidden: [num_directions, batch_size, hidden_size]
out, seq_len = pad_packed_sequence(packed_output, batch_first=True)
# out: [batch_size, seq_len, hidden_size*num_directions]
M = self.tanh(out)
# M: [batch_size, seq_len, hidden_size*num_directions]
alpha = F.softmax(torch.matmul(M, self.w), dim=1).unsqueeze(-1)
# alpha: [batch_size, seq_len, 1]
out = out*alpha
# out: [batch_size, seq_len, hidden_size*num_directions]
out = torch.sum(out,1)
# out: [batch_size, hidden_size*num_directions]
out = F.relu(out)
out = self.fc(out)
#out = self.fc1(out)
# out: [batch_size, hidden_size2]
#out = self.fc2(out)
# out: [batch_size, num_classes]
return out
|
StarcoderdataPython
|
43060
|
import os
import csv
import subprocess
import matplotlib.pyplot as plt
from math import ceil
from tqdm import tqdm
from pandas import read_csv
from netCDF4 import Dataset, num2date
from multiprocessing import cpu_count, Process
from .plot import plot_filtered_profiles_data
def download_data(files, storage_path):
# Download data from Argo rsync's server
with tqdm(total=files.shape[0]) as pbar:
for file in files:
subprocess.call(["rsync", "-azh", f"vdmzrs.ifremer.fr::argo/{file}", storage_path])
pbar.update(1)
def filter_point_in_polygon(data, start, end, polygon, thread, storage_path, file_name, source_path):
N = len(polygon)
with open(f'{storage_path}/{file_name}-{thread}.csv', 'a', newline='') as file:
writer = csv.writer(file)
for i in range(start, end):
# Point-in-polygon filter
if(is_inside_the_polygon(polygon, N, [data.latitude.values[i],data.longitude.values[i]])):
writer.writerow(data.values[i])
def get_data_from_nc(data, start, end, polygon, thread, storage_path, file_name, source_path):
with open(f'{storage_path}/{file_name}-{thread}.csv', 'a', newline='') as file:
writer = csv.writer(file)
measurements = []
for k in range(start, end):
try:
# Extract data from NetCDF files
nc = Dataset(f"{source_path}/{data.values[k]}")
PLATFORM_NUMBER = nc.variables['PLATFORM_NUMBER'][:]
CYCLE_NUMBER = nc.variables['CYCLE_NUMBER'][:]
DATA_MODE = nc.variables['DATA_MODE'][:]
JULD = nc.variables['JULD']
JULD = num2date(JULD[:],JULD.units)
LATITUDE = nc.variables['LATITUDE'][:]
LONGITUDE = nc.variables['LONGITUDE'][:]
PRES = nc.variables['PRES'][:]
PRES_ADJUSTED = nc.variables['PRES_ADJUSTED'][:]
TEMP = nc.variables['TEMP'][:]
TEMP_ADJUSTED = nc.variables['TEMP_ADJUSTED'][:]
PSAL = nc.variables['PSAL'][:]
PSAL_ADJUSTED = nc.variables['PSAL_ADJUSTED'][:]
for j in range(PRES_ADJUSTED.shape[1]):
if(str(DATA_MODE[0], 'utf-8').strip() == 'R'):
if(PRES[0][j] > 0 and TEMP[0][j] > 0 and PSAL[0][j] > 0):
measurements.append([str(PLATFORM_NUMBER[0], 'utf-8').strip(),CYCLE_NUMBER[0],str(DATA_MODE[0], 'utf-8').strip(),JULD[0],LATITUDE[0],LONGITUDE[0],PRES[0][j],TEMP[0][j],PSAL[0][j]])
else:
if(PRES_ADJUSTED[0][j] > 0 and TEMP_ADJUSTED[0][j] > 0 and PSAL_ADJUSTED[0][j] > 0):
measurements.append([str(PLATFORM_NUMBER[0], 'utf-8').strip(),CYCLE_NUMBER[0],str(DATA_MODE[0], 'utf-8').strip(),JULD[0],LATITUDE[0],LONGITUDE[0],PRES_ADJUSTED[0][j],TEMP_ADJUSTED[0][j],PSAL_ADJUSTED[0][j]])
except:
print(f"File [error]: {data.values[k]}")
writer.writerows(measurements)
def get_data_from_source(files, source_path, storage_path):
columns = ['PLATFORM_NUMBER','CYCLE_NUMBER','DATA_MODE','DATE','LATITUDE','LONGITUDE','PRES','TEMP','PSAL']
# Execute parallel computation with function "get_data_from_nc"
exec_parallel_computation(files, columns, get_data_from_nc, storage_path, "measurements", source_path=source_path)
def get_index(storage_path):
subprocess.call(["rsync", "-azh", "vdmzrs.ifremer.fr::argo-index/ar_index_global_prof.txt", storage_path])
def get_profiles_within_polygon(data, polygon, storage_path):
# Maximum and minimum filter
filtered_data = data[(data.latitude > polygon.latitude.min()) & (data.latitude < polygon.latitude.max()) & (data.longitude > polygon.longitude.min()) & (data.longitude < polygon.longitude.max())].reset_index()
# Execute parallel computation
exec_parallel_computation(filtered_data, filtered_data.columns, filter_point_in_polygon, storage_path, "filtered_profiles", polygon)
filtered_profiles = read_csv(f"{storage_path}/filtered_profiles.csv")
#Plot study area
plot_filtered_profiles_data(polygon, filtered_profiles, data, storage_path)
return filtered_profiles
def is_inside_the_polygon(polygon, N, p):
xinters = 0
counter = 0
p1 = polygon.iloc[0]
# Even-odd algorithm
for i in range(1, N+1):
p2 = polygon.iloc[i % N]
if (p[0] > min(p1[0],p2[0])):
if (p[0] <= max(p1[0],p2[0])):
if (p[1] <= max(p1[1],p2[1])):
if (p1[0] != p2[0]):
xinters = (p[0]-p1[0])*(p2[1]-p1[1])/(p2[0]-p1[0])+p1[1]
if (p1[1] == p2[1] or p[1] <= xinters):
counter += 1
p1 = p2
return counter % 2 != 0
def exec_parallel_computation(data, columns, function, storage_path, file_name, polygon=[], source_path=""):
# Get number of CPUs in the system
processes = []
cpucount = cpu_count()
r_range = ceil(data.shape[0]/cpucount)
# Parallel computation
for i in range(cpucount):
with open(f"{storage_path}/{file_name}-{i}.csv", 'w', newline='') as file:
writer = csv.writer(file)
writer.writerow(columns)
start = i * r_range
end = start + r_range
if(end > data.shape[0]):
end = data.shape[0]
p = Process(target=function, args=(data, start, end, polygon, i, storage_path, file_name, source_path))
processes.append(p)
p.start()
# Block threads until the process join() method is called
for p in processes:
p.join()
# Collect parallel compute data
filtered_profiles_path = f"{storage_path}/{file_name}.csv"
with open(filtered_profiles_path, 'w', newline='') as file:
writer = csv.writer(file)
writer.writerow(columns)
for i in range(cpucount):
writer.writerows(read_csv(f"{storage_path}/{file_name}-{i}.csv").values)
os.remove(f"{storage_path}/{file_name}-{i}.csv")
|
StarcoderdataPython
|
9620605
|
from django.core.context_processors import csrf
from django.db.utils import IntegrityError
from django.shortcuts import render_to_response
from django.template import RequestContext
from TwRestApiPlaces.models import *
def test(request):
test_place = None
try:
test_place = Place.objects.create(name = 'Spain', code = 'es')
except IntegrityError as e:
#Write to log file e exception
pass
if not test_place: test_place = Place.objects.get(name = 'Spain')
try:
test_place.get_woeid_from_yahoo()
except YahooSearchException as e:
#Write to log file e exception
pass
#Get Actual Trends Of Spain Place
test_place.get_trends()
context = {
'Places': Place.objects.all(),
'Trends_Spain': test_place.trend_set.all(),
}
context.update(csrf(request))
return render_to_response('test.html', context, RequestContext(request))
|
StarcoderdataPython
|
12840407
|
<reponame>yyang08/swagger-spec-compatibility
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
from copy import deepcopy
import mock
import pytest
from swagger_spec_compatibility.spec_utils import load_spec_from_spec_dict
from swagger_spec_compatibility.util import EntityMapping
from swagger_spec_compatibility.walkers.enum_values import _different_enum_values_mapping
from swagger_spec_compatibility.walkers.enum_values import EnumValuesDiff
from swagger_spec_compatibility.walkers.enum_values import EnumValuesDifferWalker
@pytest.mark.parametrize(
'left_dict, right_dict, expected_value',
[
(None, None, None),
({}, {}, None),
({'type': 'object'}, {}, None),
({'enum': ['v1']}, {}, None),
({'type': 'string', 'enum': ['v1']}, {}, EntityMapping({'v1'}, set())),
({}, {'type': 'string', 'enum': ['v1']}, EntityMapping(set(), {'v1'})),
({'type': 'string', 'enum': ['v1']}, {'type': 'string', 'enum': ['v1']}, None),
({'type': 'string', 'enum': ['v1', 'v2']}, {'type': 'string', 'enum': ['v2', 'v1']}, None),
({'type': 'string', 'enum': ['old', 'common']}, {'type': 'string', 'enum': ['common', 'new']}, EntityMapping({'old'}, {'new'})),
],
)
def test__different_enum_values_mapping(left_dict, right_dict, expected_value):
assert _different_enum_values_mapping(
left_spec=mock.sentinel.LEFT_SPEC,
right_spec=mock.sentinel.RIGHT_SPEC,
left_schema=left_dict,
right_schema=right_dict,
) == expected_value
def test_EnumValuesDifferWalker_returns_no_paths_if_no_endpoints_defined(minimal_spec):
assert EnumValuesDifferWalker(minimal_spec, minimal_spec).walk() == []
def test_EnumValuesDifferWalker_returns_paths_of_endpoints_responses(minimal_spec_dict):
old_spec_dict = dict(
minimal_spec_dict,
definitions={
'enum_1': {
'type': 'string',
'enum': ['value_to_remove', 'E2', 'E3'],
'x-model': 'enum_1',
},
'enum_2': {
'type': 'string',
'enum': ['E1', 'E2', 'E3'],
'x-model': 'enum_2',
},
'object': {
'properties': {
'enum_1': {'$ref': '#/definitions/enum_1'},
'enum_2': {'$ref': '#/definitions/enum_2'},
},
'type': 'object',
'x-model': 'object',
},
},
paths={
'/endpoint': {
'get': {
'parameters': [{
'in': 'body',
'name': 'body',
'required': True,
'schema': {
'$ref': '#/definitions/object',
},
}],
'responses': {
'200': {
'description': '',
'schema': {
'$ref': '#/definitions/object',
},
},
},
},
},
},
)
new_spec_dict = deepcopy(old_spec_dict)
del new_spec_dict['definitions']['enum_1']['enum'][0]
new_spec_dict['definitions']['enum_2']['enum'].append('new_value')
old_spec = load_spec_from_spec_dict(old_spec_dict)
new_spec = load_spec_from_spec_dict(new_spec_dict)
assert sorted(EnumValuesDifferWalker(old_spec, new_spec).walk()) == sorted([
EnumValuesDiff(
path=('definitions', 'enum_2'),
mapping=EntityMapping(old=set(), new={'new_value'}),
),
EnumValuesDiff(
path=('definitions', 'object', 'properties', 'enum_2'),
mapping=EntityMapping(old=set(), new={'new_value'}),
),
EnumValuesDiff(
path=('definitions', 'object', 'properties', 'enum_1'),
mapping=EntityMapping(old={'value_to_remove'}, new=set()),
),
EnumValuesDiff(
path=('definitions', 'enum_1'),
mapping=EntityMapping(old={'value_to_remove'}, new=set()),
),
EnumValuesDiff(
path=('paths', '/endpoint', 'get', 'responses', '200', 'schema', 'properties', 'enum_2'),
mapping=EntityMapping(old=set(), new={'new_value'}),
),
EnumValuesDiff(
path=('paths', '/endpoint', 'get', 'responses', '200', 'schema', 'properties', 'enum_1'),
mapping=EntityMapping(old={'value_to_remove'}, new=set()),
),
EnumValuesDiff(
path=('paths', '/endpoint', 'get', 'parameters', 0, 'schema', 'properties', 'enum_2'),
mapping=EntityMapping(old=set(), new={'new_value'}),
),
EnumValuesDiff(
path=('paths', '/endpoint', 'get', 'parameters', 0, 'schema', 'properties', 'enum_1'),
mapping=EntityMapping(old={'value_to_remove'}, new=set()),
),
])
|
StarcoderdataPython
|
133818
|
<reponame>CircleLiu/DataWarehouse
import redis
import re
import csv
from itertools import islice
redis_pool = redis.ConnectionPool(host='127.0.0.1', port=6379, db=0)
redis_conn = redis.StrictRedis(connection_pool=redis_pool)
def cache():
total_lines = 7911684 * 9
with open('./movies.txt', 'r', errors='ignore') as f:
for n, line in enumerate(f):
if line.startswith('review/userId:'):
user_id = line.split(':')[1].strip()
if line.startswith('review/profileName:'):
user_name = line.split(':')[1].strip()
if n % 9 == 8:
redis_conn.hset('user', user_id, user_name)
print('\r {}%'.format(100*n/total_lines), end='')
def save():
with open('./csv/user.csv', 'w', newline='') as csvfile:
cw = csv.writer(csvfile)
cw.writerow(['userID', 'profileName'])
for i in range(redis_conn.scard('user:id')):
user_id = str(redis_conn.spop('user:id'), encoding='utf-8')
name = str(redis_conn.hget('user', user_id), encoding='utf-8')
cw.writerow([user_id, name])
print('\r{}'.format(i))
def load_id():
ids = redis_conn.hkeys('user')
for i in ids:
redis_conn.sadd('user:id', i)
save()
|
StarcoderdataPython
|
3485809
|
<filename>FastEMRIWaveforms/few/amplitude/interp2dcubicspline.py
# Schwarzschild Eccentric amplitude module for Fast EMRI Waveforms
# performed with bicubic splines
# Copyright (C) 2020 <NAME>, <NAME>, <NAME>, <NAME>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import os
import h5py
import numpy as np
# Cython/C++ imports
from pyInterp2DAmplitude import pyAmplitudeGenerator
# Python imports
from few.utils.baseclasses import SchwarzschildEccentric, AmplitudeBase
from few.utils.utility import check_for_file_download
from few.utils.citations import *
# get path to file
dir_path = os.path.dirname(os.path.realpath(__file__))
class Interp2DAmplitude(AmplitudeBase, SchwarzschildEccentric):
"""Calculate Teukolsky amplitudes by 2D Cubic Spline interpolation.
Please see the documentations for
:class:`few.utils.baseclasses.SchwarzschildEccentric`
for overall aspects of these models.
Each mode is setup with a 2D cubic spline interpolant. When the user
inputs :math:`(p,e)`, the interpolatant determines the corresponding
amplitudes for each mode in the model.
args:
**kwargs (dict, optional): Keyword arguments for the base class:
:class:`few.utils.baseclasses.SchwarzschildEccentric`. Default is
{}.
"""
def __init__(self, **kwargs):
SchwarzschildEccentric.__init__(self, **kwargs)
AmplitudeBase.__init__(self, **kwargs)
# check if you have the necessary file
# it will download from Zenodo if the user does not have it.
few_dir = dir_path + "/../../"
fp = "Teuk_amps_a0.0_lmax_10_nmax_30_new.h5"
check_for_file_download(fp, few_dir)
self.amplitude_generator = pyAmplitudeGenerator(self.lmax, self.nmax, few_dir)
def attributes_Interp2DAmplitude(self):
"""
attributes:
amplitude_generator (obj): C++ class that performs the bicubic
interpolation. It stores all of the splines during initialization
steps.
"""
pass
@property
def citation(self):
"""Return citations for this class"""
return few_citation + few_software_citation
def get_amplitudes(self, p, e, *args, specific_modes=None, **kwargs):
"""Calculate Teukolsky amplitudes for Schwarzschild eccentric.
This function takes the inputs the trajectory in :math:`(p,e)` as arrays
and returns the complex amplitude of all modes to adiabatic order at
each step of the trajectory.
args:
p (1D double numpy.ndarray): Array containing the trajectory for values of
the semi-latus rectum.
e (1D double numpy.ndarray): Array containing the trajectory for values of
the eccentricity.
l_arr (1D int numpy.ndarray): :math:`l` values to evaluate.
m_arr (1D int numpy.ndarray): :math:`m` values to evaluate.
n_arr (1D int numpy.ndarray): :math:`ns` values to evaluate.
*args (tuple, placeholder): Added to create flexibility when calling different
amplitude modules. It is not used.
specific_modes (list, optional): List of tuples for (l, m, n) values
desired modes. Default is None.
**kwargs (dict, placeholder): Added to create flexibility when calling different
amplitude modules. It is not used.
returns:
2D array (double): If specific_modes is None, Teukolsky modes in shape (number of trajectory points, number of modes)
dict: Dictionary with requested modes.
"""
input_len = len(p)
# set the l,m,n arrays
# if all modes, return modes from the model
if specific_modes is None:
l_arr, m_arr, n_arr = (
self.l_arr[self.m_zero_up_mask],
self.m_arr[self.m_zero_up_mask],
self.n_arr[self.m_zero_up_mask],
)
# prepare arrays if specific modes are requested
else:
l_arr = np.zeros(len(specific_modes), dtype=int)
m_arr = np.zeros(len(specific_modes), dtype=int)
n_arr = np.zeros(len(specific_modes), dtype=int)
# to deal with weird m structure
inds_revert = []
for i, (l, m, n) in enumerate(specific_modes):
l_arr[i] = l
m_arr[i] = np.abs(m)
n_arr[i] = n
if m < 0:
inds_revert.append(i)
inds_revert = np.asarray(inds_revert)
# interface to C++
teuk_modes = self.amplitude_generator(
p,
e,
l_arr.astype(np.int32),
m_arr.astype(np.int32),
n_arr.astype(np.int32),
input_len,
len(l_arr),
)
# determine return quantities
# return array of all modes
if specific_modes is None:
return teuk_modes
# dict containing requested modes
else:
temp = {}
for i, lmn in enumerate(specific_modes):
temp[lmn] = teuk_modes[:, i]
l, m, n = lmn
# apply +/- m symmetry
if m < 0:
temp[lmn] = np.conj(temp[lmn])
return temp
|
StarcoderdataPython
|
225866
|
import os
import sys
from io import StringIO
from autouri import AutoURI
from caper.cromwell import Cromwell
from caper.cromwell_metadata import CromwellMetadata
from .example_wdl import make_directory_with_failing_wdls, make_directory_with_wdls
def test_on_successful_workflow(tmp_path, cromwell, womtool):
fileobj_stdout = sys.stdout
make_directory_with_wdls(str(tmp_path / 'successful'))
# Run Cromwell to get metadata JSON
c = Cromwell(cromwell=cromwell, womtool=womtool)
th = c.run(
wdl=str(tmp_path / 'successful' / 'main.wdl'),
inputs=str(tmp_path / 'successful' / 'inputs.json'),
fileobj_stdout=fileobj_stdout,
cwd=str(tmp_path / 'successful'),
)
th.join()
metadata = th.returnvalue
assert metadata
cm = CromwellMetadata(metadata)
# test all properties
assert cm.data == metadata
assert cm.metadata == metadata
assert CromwellMetadata(metadata).data == metadata
assert cm.workflow_id == metadata['id']
assert cm.workflow_status == metadata['status']
# no failures for successful workflow's metadata
assert cm.failures is None
assert cm.calls == metadata['calls']
# test recurse_calls(): test with a simple function
def fnc(call_name, call, parent_call_names):
assert call_name in ('main.t1', 'sub.t2', 'sub_sub.t3')
assert call['executionStatus'] == 'Done'
if call_name == 'main.t1':
assert not parent_call_names
elif call_name == 'sub.t2':
assert parent_call_names == ('main.sub',)
elif call_name == 'sub_sub.t3':
assert parent_call_names == ('main.sub', 'sub.sub_sub')
else:
raise ValueError('Wrong call_name: {name}'.format(name=call_name))
cm.recurse_calls(fnc)
# test write_on_workflow_root()
m_file_on_root = os.path.join(cm.metadata['workflowRoot'], 'metadata.json')
u = AutoURI(m_file_on_root)
u.rm()
assert not u.exists
cm.write_on_workflow_root()
assert os.path.exists(m_file_on_root)
assert CromwellMetadata(m_file_on_root).metadata == cm.metadata
def test_on_failed_workflow(tmp_path, cromwell, womtool):
fileobj_stdout = sys.stdout
make_directory_with_failing_wdls(str(tmp_path / 'failed'))
# Run Cromwell to get metadata JSON
# designed to fail in a subworkflow
c = Cromwell(cromwell=cromwell, womtool=womtool)
th = c.run(
wdl=str(tmp_path / 'failed' / 'main.wdl'),
inputs=str(tmp_path / 'failed' / 'inputs.json'),
fileobj_stdout=fileobj_stdout,
cwd=str(tmp_path / 'failed'),
)
th.join()
# check failed
assert th.returncode
metadata = th.returnvalue
assert metadata
cm = CromwellMetadata(metadata)
assert cm.failures == metadata['failures']
assert cm.calls == metadata['calls']
# test troubleshoot()
fileobj = StringIO()
cm.troubleshoot(fileobj=fileobj)
fileobj.seek(0)
s = fileobj.read()
assert '* Found failures JSON object' in s
assert 'NAME=sub.t2_failing' in s
assert 'INTENTED_ERROR: command not found'
|
StarcoderdataPython
|
4957466
|
import argparse
import pdb
def get_args():
"""Get arguments from CLI"""
parser = argparse.ArgumentParser(
description="""Program description""")
parser.add_argument(
"--input",
required=True,
help="""The input BED file"""
)
parser.add_argument(
"--output",
required=True,
help="""The output BED file"""
)
parser.add_argument(
"--min-length",
type=int,
default=100,
help="""The minimum length to filter""",
)
return parser.parse_args()
def main():
args = get_args()
with open(args.input, 'rU') as input:
with open(args.output, 'w') as outf:
for line in input:
ls = line.strip().split('\t')
if int(ls[2]) - int(ls[1]) >= args.min_length:
outf.write(line)
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
8109747
|
<reponame>hsm207/sage
"""
SAT Functions for Boolean Polynomials
These highlevel functions support solving and learning from Boolean polynomial systems. In this
context, "learning" means the construction of new polynomials in the ideal spanned by the original
polynomials.
AUTHOR:
- <NAME> (2012): initial version
Functions
^^^^^^^^^
"""
##############################################################################
# Copyright (C) 2012 <NAME> <<EMAIL>>
# Distributed under the terms of the GNU General Public License (GPL)
# The full text of the GPL is available at:
# http://www.gnu.org/licenses/
##############################################################################
from sage.sat.solvers import SatSolver
from sage.sat.converters import ANF2CNFConverter
from sage.rings.polynomial.multi_polynomial_sequence import PolynomialSequence
def solve(F, converter=None, solver=None, n=1, target_variables=None, **kwds):
"""
Solve system of Boolean polynomials ``F`` by solving the
SAT-problem -- produced by ``converter`` -- using ``solver``.
INPUT:
- ``F`` - a sequence of Boolean polynomials
- ``n`` - number of solutions to return. If ``n`` is +infinity
then all solutions are returned. If ``n <infinity`` then ``n``
solutions are returned if ``F`` has at least ``n``
solutions. Otherwise, all solutions of ``F`` are
returned. (default: ``1``)
- ``converter`` - an ANF to CNF converter class or object. If
``converter`` is ``None`` then
:class:`sage.sat.converters.polybori.CNFEncoder` is used to
construct a new converter. (default: ``None``)
- ``solver`` - a SAT-solver class or object. If ``solver`` is
``None`` then :class:`sage.sat.solvers.cryptominisat.CryptoMiniSat`
is used to construct a new converter. (default: ``None``)
- ``target_variables`` - a list of variables. The elements of the list are
used to exclude a particular combination of variable assignments of a
solution from any further solution. Furthermore ``target_variables``
denotes which variable-value pairs appear in the solutions. If
``target_variables`` is ``None`` all variables appearing in the
polynomials of ``F`` are used to construct exclusion clauses.
(default: ``None``)
- ``**kwds`` - parameters can be passed to the converter and the
solver by prefixing them with ``c_`` and ``s_`` respectively. For
example, to increase CryptoMiniSat's verbosity level, pass
``s_verbosity=1``.
OUTPUT:
A list of dictionaries, each of which contains a variable
assignment solving ``F``.
EXAMPLES:
We construct a very small-scale AES system of equations::
sage: sr = mq.SR(1,1,1,4,gf2=True,polybori=True)
sage: F,s = sr.polynomial_system()
and pass it to a SAT solver::
sage: from sage.sat.boolean_polynomials import solve as solve_sat # optional - cryptominisat
sage: s = solve_sat(F) # optional - cryptominisat
sage: F.subs(s[0]) # optional - cryptominisat
Polynomial Sequence with 36 Polynomials in 0 Variables
This time we pass a few options through to the converter and the solver::
sage: s = solve_sat(F, s_verbosity=1, c_max_vars_sparse=4, c_cutting_number=8) # optional - cryptominisat
c ...
...
sage: F.subs(s[0]) # optional - cryptominisat
Polynomial Sequence with 36 Polynomials in 0 Variables
We construct a very simple system with three solutions and ask for a specific number of solutions::
sage: B.<a,b> = BooleanPolynomialRing() # optional - cryptominisat
sage: f = a*b # optional - cryptominisat
sage: l = solve_sat([f],n=1) # optional - cryptominisat
sage: len(l) == 1, f.subs(l[0]) # optional - cryptominisat
(True, 0)
sage: l = solve_sat([a*b],n=2) # optional - cryptominisat
sage: len(l) == 2, f.subs(l[0]), f.subs(l[1]) # optional - cryptominisat
(True, 0, 0)
sage: sorted((d[a], d[b]) for d in solve_sat([a*b],n=3)) # optional - cryptominisat
[(0, 0), (0, 1), (1, 0)]
sage: sorted((d[a], d[b]) for d in solve_sat([a*b],n=4)) # optional - cryptominisat
[(0, 0), (0, 1), (1, 0)]
sage: sorted((d[a], d[b]) for d in solve_sat([a*b],n=infinity)) # optional - cryptominisat
[(0, 0), (0, 1), (1, 0)]
In the next example we see how the ``target_variables`` parameter works::
sage: from sage.sat.boolean_polynomials import solve as solve_sat # optional - cryptominisat
sage: R.<a,b,c,d> = BooleanPolynomialRing() # optional - cryptominisat
sage: F = [a+b,a+c+d] # optional - cryptominisat
First the normal use case::
sage: sorted((D[a], D[b], D[c], D[d]) for D in solve_sat(F,n=infinity)) # optional - cryptominisat
[(0, 0, 0, 0), (0, 0, 1, 1), (1, 1, 0, 1), (1, 1, 1, 0)]
Now we are only interested in the solutions of the variables a and b::
sage: solve_sat(F,n=infinity,target_variables=[a,b]) # optional - cryptominisat
[{b: 0, a: 0}, {b: 1, a: 1}]
Here, we generate and solve the cubic equations of the AES SBox (see :trac:`26676`)::
sage: from sage.rings.polynomial.multi_polynomial_sequence import PolynomialSequence # optional - cryptominisat, long time
sage: from sage.sat.boolean_polynomials import solve as solve_sat # optional - cryptominisat, long time
sage: sr = sage.crypto.mq.SR(1, 4, 4, 8, allow_zero_inversions = True) # optional - cryptominisat, long time
sage: sb = sr.sbox() # optional - cryptominisat, long time
sage: eqs = sb.polynomials(degree = 3) # optional - cryptominisat, long time
sage: eqs = PolynomialSequence(eqs) # optional - cryptominisat, long time
sage: variables = map(str, eqs.variables()) # optional - cryptominisat, long time
sage: variables = ",".join(variables) # optional - cryptominisat, long time
sage: R = BooleanPolynomialRing(16, variables) # optional - cryptominisat, long time
sage: eqs = [R(eq) for eq in eqs] # optional - cryptominisat, long time
sage: sls_aes = solve_sat(eqs, n = infinity) # optional - cryptominisat, long time
sage: len(sls_aes) # optional - cryptominisat, long time
256
TESTS:
Test that :trac:`26676` is fixed::
sage: varl = ['k{0}'.format(p) for p in range(29)]
sage: B = BooleanPolynomialRing(names = varl)
sage: B.inject_variables(verbose=False)
sage: keqs = [
....: k0 + k6 + 1,
....: k3 + k9 + 1,
....: k5*k18 + k6*k18 + k7*k16 + k7*k10,
....: k9*k17 + k8*k24 + k11*k17,
....: k1*k13 + k1*k15 + k2*k12 + k3*k15 + k4*k14,
....: k5*k18 + k6*k16 + k7*k18,
....: k3 + k26,
....: k0 + k19,
....: k9 + k28,
....: k11 + k20]
sage: from sage.sat.boolean_polynomials import solve as solve_sat
sage: solve_sat(keqs, n=1, solver=SAT('cryptominisat')) # optional - cryptominisat
[{k28: 0,
k26: 1,
k24: 0,
k20: 0,
k19: 0,
k18: 0,
k17: 0,
k16: 0,
k15: 0,
k14: 0,
k13: 0,
k12: 0,
k11: 0,
k10: 0,
k9: 0,
k8: 0,
k7: 0,
k6: 1,
k5: 0,
k4: 0,
k3: 1,
k2: 0,
k1: 0,
k0: 0}]
sage: solve_sat(keqs, n=1, solver=SAT('picosat')) # optional - pycosat
[{k28: 0,
k26: 1,
k24: 0,
k20: 0,
k19: 0,
k18: 0,
k17: 0,
k16: 0,
k15: 0,
k14: 0,
k13: 1,
k12: 1,
k11: 0,
k10: 0,
k9: 0,
k8: 0,
k7: 0,
k6: 1,
k5: 0,
k4: 1,
k3: 1,
k2: 1,
k1: 1,
k0: 0}]
.. NOTE::
Although supported, passing converter and solver objects
instead of classes is discouraged because these objects are
stateful.
"""
assert(n>0)
try:
len(F)
except AttributeError:
F = F.gens()
len(F)
P = next(iter(F)).parent()
K = P.base_ring()
if target_variables is None:
target_variables = PolynomialSequence(F).variables()
else:
target_variables = PolynomialSequence(target_variables).variables()
assert(set(target_variables).issubset(set(P.gens())))
# instantiate the SAT solver
if solver is None:
from sage.sat.solvers import CryptoMiniSat as solver
if not isinstance(solver, SatSolver):
solver_kwds = {}
for k, v in kwds.items():
if k.startswith("s_"):
solver_kwds[k[2:]] = v
solver = solver(**solver_kwds)
# instantiate the ANF to CNF converter
if converter is None:
from sage.sat.converters.polybori import CNFEncoder as converter
if not isinstance(converter, ANF2CNFConverter):
converter_kwds = {}
for k, v in kwds.items():
if k.startswith("c_"):
converter_kwds[k[2:]] = v
converter = converter(solver, P, **converter_kwds)
phi = converter(F)
rho = dict((phi[i], i) for i in range(len(phi)))
S = []
while True:
s = solver()
if s:
S.append(dict((x, K(s[rho[x]])) for x in target_variables))
if n is not None and len(S) == n:
break
exclude_solution = tuple(-rho[x] if s[rho[x]] else rho[x] for x in target_variables)
solver.add_clause(exclude_solution)
else:
try:
learnt = solver.learnt_clauses(unitary_only=True)
if learnt:
S.append(dict((phi[abs(i)-1], K(i<0)) for i in learnt))
else:
S.append(s)
break
except (AttributeError, NotImplementedError):
# solver does not support recovering learnt clauses
S.append(s)
break
if len(S) == 1:
if S[0] is False:
return False
if S[0] is None:
return None
elif S[-1] is False:
return S[0:-1]
return S
def learn(F, converter=None, solver=None, max_learnt_length=3, interreduction=False, **kwds):
"""
Learn new polynomials by running SAT-solver ``solver`` on
SAT-instance produced by ``converter`` from ``F``.
INPUT:
- ``F`` - a sequence of Boolean polynomials
- ``converter`` - an ANF to CNF converter class or object. If ``converter`` is ``None`` then
:class:`sage.sat.converters.polybori.CNFEncoder` is used to construct a new
converter. (default: ``None``)
- ``solver`` - a SAT-solver class or object. If ``solver`` is ``None`` then
:class:`sage.sat.solvers.cryptominisat.CryptoMiniSat` is used to construct a new converter.
(default: ``None``)
- ``max_learnt_length`` - only clauses of length <= ``max_length_learnt`` are considered and
converted to polynomials. (default: ``3``)
- ``interreduction`` - inter-reduce the resulting polynomials (default: ``False``)
.. NOTE::
More parameters can be passed to the converter and the solver by prefixing them with ``c_`` and
``s_`` respectively. For example, to increase CryptoMiniSat's verbosity level, pass
``s_verbosity=1``.
OUTPUT:
A sequence of Boolean polynomials.
EXAMPLES::
sage: from sage.sat.boolean_polynomials import learn as learn_sat # optional - cryptominisat
We construct a simple system and solve it::
sage: set_random_seed(2300) # optional - cryptominisat
sage: sr = mq.SR(1,2,2,4,gf2=True,polybori=True) # optional - cryptominisat
sage: F,s = sr.polynomial_system() # optional - cryptominisat
sage: H = learn_sat(F) # optional - cryptominisat
sage: H[-1] # optional - cryptominisat
k033 + 1
"""
try:
len(F)
except AttributeError:
F = F.gens()
len(F)
P = next(iter(F)).parent()
K = P.base_ring()
# instantiate the SAT solver
if solver is None:
from sage.sat.solvers.cryptominisat import CryptoMiniSat as solver
solver_kwds = {}
for k, v in kwds.items():
if k.startswith("s_"):
solver_kwds[k[2:]] = v
solver = solver(**solver_kwds)
# instantiate the ANF to CNF converter
if converter is None:
from sage.sat.converters.polybori import CNFEncoder as converter
converter_kwds = {}
for k, v in kwds.items():
if k.startswith("c_"):
converter_kwds[k[2:]] = v
converter = converter(solver, P, **converter_kwds)
phi = converter(F)
rho = dict((phi[i], i) for i in range(len(phi)))
s = solver()
if s:
learnt = [x + K(s[rho[x]]) for x in P.gens()]
else:
learnt = []
try:
lc = solver.learnt_clauses()
except (AttributeError, NotImplementedError):
# solver does not support recovering learnt clauses
lc = []
for c in lc:
if len(c) <= max_learnt_length:
try:
learnt.append(converter.to_polynomial(c))
except (ValueError, NotImplementedError, AttributeError):
# the solver might have learnt clauses that contain CNF
# variables which have no correspondence to variables in our
# polynomial ring (XOR chaining variables for example)
pass
learnt = PolynomialSequence(P, learnt)
if interreduction:
learnt = learnt.ideal().interreduced_basis()
return learnt
|
StarcoderdataPython
|
3469020
|
"""Set version helper
"""
#!/usr/bin/env python
import argparse
import semantic_version
from utils_file import FileContext
from utils_iotedge import get_modules
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('type', choices=['patch', 'minor', 'major', 'set'], help="Update type")
parser.add_argument('--version', type=semantic_version.Version, help="set version")
args = parser.parse_args()
fc = FileContext(__file__)
modules = get_modules(fc.git_root + "/factory-ai-vision/EdgeSolution/modules")
for module in modules:
if args.type == 'patch':
module.next_patch()
elif args.type == 'minor':
module.next_minor()
elif args.type == 'major':
module.next_major()
elif args.type == 'set':
if not args.version:
parser.error('please provide version number (--version)')
module.version = args.version
|
StarcoderdataPython
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.