blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7d153215cdaf6e880f5ca74101116bc24be6340e | b2e9e3db0202a6bd06b5d1f4c4fd3369b5260261 | /python/p032.py | 2e8202df7f17ac73122932747adf0ec4d9e10ad0 | [] | no_license | jackmoody11/project-euler-solutions | 66e7128cae130499ce518c2008e5df91a6883a68 | 8b6e00bfac7855f5c892f5b3094415935358cb98 | refs/heads/master | 2020-04-12T23:52:57.347142 | 2020-01-10T00:23:16 | 2020-01-10T00:23:16 | 162,831,576 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 976 | py | from utils import is_pandigital
def is_nine_pandigital_product(a, b):
my_str = str(a) + str(b) + str(a*b)
if len(my_str) == 9 and is_pandigital(my_str):
return True
else:
return False
def compute():
# 1 will never return a pandigital product (will repeat digits)
pandigitals = set()
# sqrt(987654321) = 31426.96, so we know this is the
# upper limit for our larger number (we define b as the larger number here)
for a in range(2, 31426):
for b in range(a, 31426):
mult = a * b
if len(str(a) + str(b) + str(mult)) > 9:
# Once the concatenation of a, b, and a + b gives
# a string of length > 9, we can skip to the next
# value for a
break
if is_nine_pandigital_product(a, b) and mult not in pandigitals:
pandigitals.add(mult)
return sum(pandigitals)
if __name__ == "__main__":
print(compute()) | [
"[email protected]"
] | |
4f504c0716dacadf713fabd2507a80603e3b8c13 | d2b53b3568890dd805575035d09635c422c6bc4d | /python/ray/autoscaler/util.py | d436be426411827e6fe4f2b19e777c1de368ccc8 | [
"Apache-2.0",
"MIT"
] | permissive | mehrdadn/ray | 939deda7099eb30371cbb920a9725b314c58c0b5 | 3506910c5da257215d38d02f424acc4f419ddbaf | refs/heads/master | 2020-09-03T15:33:35.578248 | 2020-07-31T21:33:27 | 2020-07-31T21:33:27 | 219,498,150 | 2 | 1 | Apache-2.0 | 2019-11-04T12:37:23 | 2019-11-04T12:37:22 | null | UTF-8 | Python | false | false | 6,188 | py | import collections
import hashlib
import json
import jsonschema
import os
import threading
from typing import Any, Dict
import ray
import ray.services as services
from ray.autoscaler.node_provider import get_default_config
from ray.autoscaler.docker import dockerize_if_needed
REQUIRED, OPTIONAL = True, False
RAY_SCHEMA_PATH = os.path.join(
os.path.dirname(ray.autoscaler.__file__), "ray-schema.json")
# Internal kv keys for storing debug status.
DEBUG_AUTOSCALING_ERROR = "__autoscaling_error"
DEBUG_AUTOSCALING_STATUS = "__autoscaling_status"
class ConcurrentCounter:
def __init__(self):
self._lock = threading.RLock()
self._counter = collections.defaultdict(int)
def inc(self, key, count):
with self._lock:
self._counter[key] += count
return self.value
def dec(self, key, count):
with self._lock:
self._counter[key] -= count
assert self._counter[key] >= 0, "counter cannot go negative"
return self.value
def breakdown(self):
with self._lock:
return dict(self._counter)
@property
def value(self):
with self._lock:
return sum(self._counter.values())
def validate_config(config: Dict[str, Any]) -> None:
"""Required Dicts indicate that no extra fields can be introduced."""
if not isinstance(config, dict):
raise ValueError("Config {} is not a dictionary".format(config))
with open(RAY_SCHEMA_PATH) as f:
schema = json.load(f)
try:
jsonschema.validate(config, schema)
except jsonschema.ValidationError as e:
raise jsonschema.ValidationError(message=e.message) from None
def prepare_config(config):
with_defaults = fillout_defaults(config)
merge_setup_commands(with_defaults)
dockerize_if_needed(with_defaults)
return with_defaults
def fillout_defaults(config: Dict[str, Any]) -> Dict[str, Any]:
defaults = get_default_config(config["provider"])
defaults.update(config)
defaults["auth"] = defaults.get("auth", {})
return defaults
def merge_setup_commands(config):
config["head_setup_commands"] = (
config["setup_commands"] + config["head_setup_commands"])
config["worker_setup_commands"] = (
config["setup_commands"] + config["worker_setup_commands"])
return config
def with_head_node_ip(cmds):
head_ip = services.get_node_ip_address()
out = []
for cmd in cmds:
out.append("export RAY_HEAD_IP={}; {}".format(head_ip, cmd))
return out
def hash_launch_conf(node_conf, auth):
hasher = hashlib.sha1()
hasher.update(
json.dumps([node_conf, auth], sort_keys=True).encode("utf-8"))
return hasher.hexdigest()
# Cache the file hashes to avoid rescanning it each time. Also, this avoids
# inadvertently restarting workers if the file mount content is mutated on the
# head node.
_hash_cache = {}
def hash_runtime_conf(file_mounts,
cluster_synced_files,
extra_objs,
generate_file_mounts_contents_hash=False):
"""Returns two hashes, a runtime hash and file_mounts_content hash.
The runtime hash is used to determine if the configuration or file_mounts
contents have changed. It is used at launch time (ray up) to determine if
a restart is needed.
The file_mounts_content hash is used to determine if the file_mounts or
cluster_synced_files contents have changed. It is used at monitor time to
determine if additional file syncing is needed.
"""
runtime_hasher = hashlib.sha1()
contents_hasher = hashlib.sha1()
def add_content_hashes(path, allow_non_existing_paths: bool = False):
def add_hash_of_file(fpath):
with open(fpath, "rb") as f:
for chunk in iter(lambda: f.read(2**20), b""):
contents_hasher.update(chunk)
path = os.path.expanduser(path)
if allow_non_existing_paths and not os.path.exists(path):
return
if os.path.isdir(path):
dirs = []
for dirpath, _, filenames in os.walk(path):
dirs.append((dirpath, sorted(filenames)))
for dirpath, filenames in sorted(dirs):
contents_hasher.update(dirpath.encode("utf-8"))
for name in filenames:
contents_hasher.update(name.encode("utf-8"))
fpath = os.path.join(dirpath, name)
add_hash_of_file(fpath)
else:
add_hash_of_file(path)
conf_str = (json.dumps(file_mounts, sort_keys=True).encode("utf-8") +
json.dumps(extra_objs, sort_keys=True).encode("utf-8"))
# Only generate a contents hash if generate_contents_hash is true or
# if we need to generate the runtime_hash
if conf_str not in _hash_cache or generate_file_mounts_contents_hash:
for local_path in sorted(file_mounts.values()):
add_content_hashes(local_path)
head_node_contents_hash = contents_hasher.hexdigest()
# Generate a new runtime_hash if its not cached
# The runtime hash does not depend on the cluster_synced_files hash
# because we do not want to restart nodes only if cluster_synced_files
# contents have changed.
if conf_str not in _hash_cache:
runtime_hasher.update(conf_str)
runtime_hasher.update(head_node_contents_hash.encode("utf-8"))
_hash_cache[conf_str] = runtime_hasher.hexdigest()
# Add cluster_synced_files to the file_mounts_content hash
if cluster_synced_files is not None:
for local_path in sorted(cluster_synced_files):
# For cluster_synced_files, we let the path be non-existant
# because its possible that the source directory gets set up
# anytime over the life of the head node.
add_content_hashes(local_path, allow_non_existing_paths=True)
file_mounts_contents_hash = contents_hasher.hexdigest()
else:
file_mounts_contents_hash = None
return (_hash_cache[conf_str], file_mounts_contents_hash)
| [
"[email protected]"
] | |
eeeb8737f788a99b63bc14fa4ee601c37472ba24 | 28dbe47aba287ed94ef7bba734203736bcc06249 | /.history/run_dmac_20200702162638.py | 87a34ccf19b64f8739c194e0fbccc0dad71a4485 | [] | no_license | ntung88/Trading_Algorithms | 242fd816b19df95e02e9fcd8c5c91c862d2ede40 | d96488b1754e3751f739d9c3f094a8f8dc54a0a9 | refs/heads/master | 2022-11-19T16:04:07.800344 | 2020-07-17T21:14:10 | 2020-07-17T21:14:10 | 276,239,640 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 549 | py | '''
Script for running dmac on current data. Outputs decision for paper trading since I don't have the resources
to trade electronically :(((
'''
import dmac
import yfinance as yf
import numpy as np
import sys
def main():
args = sys.argv[1:]
tickers = ' '.join(args)
data = yf.download(tickers, period='max', group_by='ticker')
dirty = pd.DataFrame(data['TSLA'])
#Currently using only closing prices
frame = clean_data(dirty)['Close']
periods = optimize(frame)
print(periods)
if __name__ == "__main__":
main(_ | [
"[email protected]"
] | |
97d790031b2efb4f1acf57eee08a3880d3106887 | 0bbd11c91de6ed2315a463809cb1094d6523ca02 | /proj03/lattice5/step-afm.py | 690ed8ae14b16b5ae6e3f4b02c7c33b88e22ddfe | [] | no_license | impurity80/emto | b232048829002f2ba721019c45df420696f48973 | 0a7a0d2fcdf41e7763bb4de4244d6598a74ab270 | refs/heads/master | 2021-01-18T19:46:39.102514 | 2017-02-20T04:04:42 | 2017-02-20T04:04:42 | 69,660,962 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,175 | py |
import csv
import os
from ase import Atom, Atoms
from ase.lattice.cubic import *
from ase.visualize import view
from numpy import *
from emto import *
from ase.utils.eos import EquationOfState
import matplotlib.pyplot as plt
from ase.lattice import bulk
name = 'afm'
curr_dir = os.getcwd()
os.system('mkdir eos')
os.system('mkdir result')
result = '{0}/result/result-{1}.txt'.format(curr_dir,name)
os.system('rm {0}'.format(result))
result_all = '{0}/result/result_summary-{1}.csv'.format(curr_dir,name)
os.system('rm {0}'.format(result_all))
save(result, 'delta calculation {0}'.format(name))
save(result_all, 'delta calculation {0}'.format(name))
csvfile = open('mole.csv', 'rb')
buffer = csv.reader(csvfile, delimiter=',', quoting=csv.QUOTE_NONNUMERIC)
for row in buffer:
id = int(row[0])
c = row[1]
mn = round(row[2]/2.0, 3 )*2.0
ni = round(row[3]/2.0, 3 )*2.0
cr = round(row[4]/2.0, 3 )*2.0
al = round(row[5]/2.0, 3 )*2.0
si = round(row[6]/2.0, 3 )*2.0
mo = round(row[7]/2.0, 3 )*2.0
co = round(row[8]/2.0, 3 )*2.0
cu = round(row[9]/2.0, 3 )*2.0
nb = round(row[10]/2.0, 3 )*2.0
ti = round(row[11]/2.0, 3 )*2.0
v = round(row[12]/2.0, 3 )*2.0
w = round(row[13]/2.0, 3 )*2.0
print row
print mn, ni, cr
fe = 1-mn-ni-cr-al-si-mo-co-cu-nb-ti-v-w
save(result, 'alloy id {0}'.format(id))
OPTIONS = np.linspace(0.98, 1.02, 9)
volumes = []
energies = []
save(result, 'nonmagnetic calculate {0}'.format(id))
for opt in OPTIONS:
l = 3.59 * opt
a = l / sqrt(2)
c = l
atoms = Atoms('Fe2',
scaled_positions=[
(0.0, 0.0, 0),
(0.5, 0.5, 0.5)],
cell=[a, a, c],
pbc=(1, 1, 1))
atoms.set_tags([1, 2])
alloys = []
alloys.append(Alloy(1, 'Fe', fe, 1.0))
alloys.append(Alloy(2, 'Fe', fe, -1.0))
if mn > 1e-7:
alloys.append(Alloy(1, 'Mn', mn, 1.0))
alloys.append(Alloy(2, 'Mn', mn, -1.0))
if ni > 1e-7:
alloys.append(Alloy(1, 'Ni', ni, 1.0))
alloys.append(Alloy(2, 'Ni', ni, -1.0))
if cr > 1e-7:
alloys.append(Alloy(1, 'Cr', cr, 1.0))
alloys.append(Alloy(2, 'Cr', cr, -1.0))
if al > 1e-7:
alloys.append(Alloy(1, 'Al', al, 1.0))
alloys.append(Alloy(2, 'Al', al, -1.0))
if si > 1e-7:
alloys.append(Alloy(1, 'Si', si, 1.0))
alloys.append(Alloy(2, 'Si', si, -1.0))
if mo > 1e-7:
alloys.append(Alloy(1, 'Mo', mo, 1.0))
alloys.append(Alloy(2, 'Mo', mo, -1.0))
if co > 1e-7:
alloys.append(Alloy(1, 'Co', co, 1.0))
alloys.append(Alloy(2, 'Co', co, -1.0))
if cu > 1e-7:
alloys.append(Alloy(1, 'Cu', cu, 1.0))
alloys.append(Alloy(2, 'Cu', cu, -1.0))
if nb > 1e-7:
alloys.append(Alloy(1, 'Nb', nb, 1.0))
alloys.append(Alloy(2, 'Nb', nb, -1.0))
if ti > 1e-7:
alloys.append(Alloy(1, 'Ti', ti, 1.0))
alloys.append(Alloy(2, 'Ti', ti, -1.0))
if v > 1e-7:
alloys.append(Alloy(1, 'V', v, 1.0))
alloys.append(Alloy(2, 'V', v, -1.0))
if w > 1e-7:
alloys.append(Alloy(1, 'W', w, 1.0))
alloys.append(Alloy(2, 'W', w, -1.0))
calc = EMTO()
calc.set(dir='work/{1}/alloy-{2}/opt-{0:0.4f}'.format(opt, name, id),
lat=6,
ncpa=20,
amix=0.05,
afm='F',
kpts=[13, 13, 13]
)
calc.set_alloys(alloys)
atoms.set_calculator(calc)
nm_e = atoms.get_potential_energy()/atoms.get_number_of_atoms()
nm_v = atoms.get_volume()/atoms.get_number_of_atoms()
if nm_e < -0.001:
volumes.append(nm_v)
energies.append(nm_e)
save(result, '{3} result : {0} {1} {2}'.format(opt, nm_v, nm_e, name))
print volumes, energies
temp_volumes = []
temp_energies = []
pivot = energies[0]
for v, e in zip(volumes, energies):
if e-pivot > -0.04 and e-pivot < 0.01:
temp_volumes.append(v)
temp_energies.append(e)
eos = EquationOfState(temp_volumes, temp_energies)
v0, e0, B = eos.fit()
eos.plot('eos/{1}-{0}.png'.format(id,name))
save(result, '{0} {1} {2} {3}'.format(v0, e0, B, (4.0 * v0) ** (1.0 / 3.0)))
save(result, OPTIONS)
save(result, volumes)
save(result, energies)
save(result, '------------------------')
save(result_all, '{0}, {1}, {2}, {3}, {4}, {5}'.format(id, e0, v0, B, volumes, energies ))
# save(result_all, '{0}, {1}, {2}, {3}, {4}, {5}, {6}, {7}, {8}, {9}, {10}, {11}, {12}, {13}, {14}, {15}, {16}, {17}, {18}, {19}, {20}, {21}, {22} '.format(id, hcp_e0-bct_e0, hcp_e0-fcc_e0, hcp_e0-fccf_e0, fcc_e0-bct_e0, fccf_e0-bct_e0, row, fcc_v0, fcc_e0, fcc_B, fccf_v0, fccf_e0, fccf_B, bct_v0, bct_e0, bct_B, hcp_v0, hcp_e0, hcp_B, fcc_energies, fccf_energies, bct_energies, hcp_energies))
| [
"[email protected]"
] | |
8daa6b9759fb0bebb3876f6b8de6e79afbb4fbc0 | 5537eec7f43098d216d2b550678c8d10b2a26f09 | /venv/tower/lib/python2.7/site-packages/M2Crypto/httpslib.py | 32f536fe85e061063e6ea82c5dbb835c0e73650b | [] | no_license | wipro-sdx/Automation | f0ae1512b8d9d491d7bacec94c8906d06d696407 | a8c46217d0fbe51a71597b5db87cbe98ed19297a | refs/heads/master | 2021-07-08T11:09:05.314435 | 2018-05-02T07:18:54 | 2018-05-02T07:18:54 | 131,812,982 | 0 | 1 | null | 2020-07-23T23:22:33 | 2018-05-02T07:15:28 | Python | UTF-8 | Python | false | false | 10,229 | py | from __future__ import absolute_import
"""M2Crypto support for Python's httplib.
Copyright (c) 1999-2004 Ng Pheng Siong. All rights reserved."""
import base64
import socket
from M2Crypto import SSL, six, util
from urlparse import urlsplit, urlunsplit
from httplib import * # noqa
from httplib import HTTPS_PORT # This is not imported with just '*'
if util.py27plus:
from typing import Any, AnyStr, Callable, Dict, List, Optional # noqa
class HTTPSConnection(HTTPConnection):
"""
This class allows communication via SSL using M2Crypto.
"""
default_port = HTTPS_PORT
def __init__(self, host, port=None, strict=None, **ssl):
# type: (str, Optional[int], Optional[bool], **Dict[Any, Any]) -> None
"""
Represents one transaction with an HTTP server over the SSL
connection.
@param host: host name
@param port: port number
@param strict: if switched on, it raises BadStatusLine to be
raised if the status line can't be parsed as
a valid HTTP/1.0 or 1.1 status line.
@param ssl: dict with all remaining named real parameters of the
function. Specifically, ``ssl_context`` is expected
to be included with SSL.Context; if it is not
default ``'sslv23'`` is substituted).
"""
self.session = None # type: bytes
self.host = host
self.port = port
keys = ssl.keys()
try:
keys.remove('key_file')
except ValueError:
pass
try:
keys.remove('cert_file')
except ValueError:
pass
try:
keys.remove('ssl_context')
except ValueError:
pass
if keys:
raise ValueError('unknown keyword argument')
try:
self.ssl_ctx = ssl['ssl_context']
assert isinstance(self.ssl_ctx, SSL.Context), self.ssl_ctx
except KeyError:
self.ssl_ctx = SSL.Context()
HTTPConnection.__init__(self, host, port, strict)
def connect(self):
# type: () -> None
error = None
# We ignore the returned sockaddr because SSL.Connection.connect needs
# a host name.
for (family, _, _, _, _) in \
socket.getaddrinfo(self.host, self.port, 0,
socket.SOCK_STREAM):
sock = None
try:
sock = SSL.Connection(self.ssl_ctx, family=family)
if self.session is not None:
sock.set_session(self.session)
sock.connect((self.host, self.port))
self.sock = sock
sock = None
return
except socket.error as e:
# Other exception are probably SSL-related, in that case we
# abort and the exception is forwarded to the caller.
error = e
finally:
if sock is not None:
sock.close()
if error is None:
raise AssertionError("Empty list returned by getaddrinfo")
raise error
def close(self):
# type: () -> None
# This kludges around line 545 of httplib.py,
# which closes the connection in this object;
# the connection remains open in the response
# object.
#
# M2Crypto doesn't close-here-keep-open-there,
# so, in effect, we don't close until the whole
# business is over and gc kicks in.
#
# XXX Long-running callers beware leakage.
#
# XXX 05-Jan-2002: This module works with Python 2.2,
# XXX but I've not investigated if the above conditions
# XXX remain.
pass
def get_session(self):
# type: () -> SSL.Session.Session
return self.sock.get_session()
def set_session(self, session):
# type: (SSL.Session.Session) -> None
self.session = session
class ProxyHTTPSConnection(HTTPSConnection):
"""
An HTTPS Connection that uses a proxy and the CONNECT request.
When the connection is initiated, CONNECT is first sent to the proxy (along
with authorization headers, if supplied). If successful, an SSL connection
will be established over the socket through the proxy and to the target
host.
Finally, the actual request is sent over the SSL connection tunneling
through the proxy.
"""
_ports = {'http': 80, 'https': 443}
_AUTH_HEADER = "Proxy-Authorization"
_UA_HEADER = "User-Agent"
def __init__(self, host, port=None, strict=None, username=None,
password=None, **ssl):
# type: (str, Optional[int], Optional[bool], Optional[AnyStr], Optional[AnyStr], **Dict[Any, Any]) -> None
"""
Create the ProxyHTTPSConnection object.
@param host: host name of the proxy server
@param port: port number of the proxy server
@param strict: if switched on, it raises BadStatusLine to be
raised if the status line can't be parsed as
a valid HTTP/1.0 or 1.1 status line.
@param username: username on the proxy server, when required
Username can be ``str``, but preferred type
is ``bytes``. M2Crypto does some conversion to
``bytes`` when necessary, but it's better when
the user of the library does it on its own.
@param password: password on the proxy server, when required
The same as with ``username``, ``str`` is accepted,
but ``bytes`` are preferred.
@param ssl: dict with all remaining named real parameters of the
function. Specifically, ``ssl_context`` is expected
to be included with SSL.Context; if it is not
default ``'sslv23'`` is substituted).
"""
HTTPSConnection.__init__(self, host, port, strict, **ssl)
self._username = username.encode('utf8') \
if isinstance(username, six.string_types) else username
self._password = password.encode('utf8') \
if isinstance(password, six.string_types) else password
self._proxy_auth = None # type: str
self._proxy_UA = None # type: str
def putrequest(self, method, url, skip_host=0, skip_accept_encoding=0):
# type: (AnyStr, AnyStr, int, int) -> None
"""
putrequest is called before connect, so can interpret url and get
real host/port to be used to make CONNECT request to proxy
"""
proto, netloc, path, query, fragment = urlsplit(url)
if not proto:
raise ValueError("unknown URL type: %s" % url)
# get host & port
try:
username_password, host_port = netloc.split('@')
except ValueError:
host_port = netloc
try:
host, port_s = host_port.split(':')
port = int(port_s)
except ValueError:
host = host_port
# try to get port from proto
try:
port = self._ports[proto]
except KeyError:
raise ValueError("unknown protocol for: %s" % url)
self._real_host = host # type: str
self._real_port = port # type: int
rest = urlunsplit((None, None, path, query, fragment))
HTTPSConnection.putrequest(self, method, rest, skip_host,
skip_accept_encoding)
def putheader(self, header, value):
# type: (AnyStr, AnyStr) -> None
# Store the auth header if passed in.
if header.lower() == self._UA_HEADER.lower():
self._proxy_UA = value
if header.lower() == self._AUTH_HEADER.lower():
self._proxy_auth = value
else:
HTTPSConnection.putheader(self, header, value)
def endheaders(self, *args, **kwargs):
# type: (*List[Any], **Dict[Any, Any]) -> None
# We've recieved all of hte headers. Use the supplied username
# and password for authorization, possibly overriding the authstring
# supplied in the headers.
if not self._proxy_auth:
self._proxy_auth = self._encode_auth()
HTTPSConnection.endheaders(self, *args, **kwargs)
def connect(self):
# type: () -> None
HTTPConnection.connect(self)
# send proxy CONNECT request
self.sock.sendall(self._get_connect_msg())
response = HTTPResponse(self.sock)
response.begin()
code = response.status
if code != 200:
# proxy returned and error, abort connection, and raise exception
self.close()
raise socket.error("Proxy connection failed: %d" % code)
self._start_ssl()
def _get_connect_msg(self):
# type: () -> bytes
""" Return an HTTP CONNECT request to send to the proxy. """
msg = "CONNECT %s:%d HTTP/1.1\r\n" % (self._real_host, self._real_port)
msg = msg + "Host: %s:%d\r\n" % (self._real_host, self._real_port)
if self._proxy_UA:
msg = msg + "%s: %s\r\n" % (self._UA_HEADER, self._proxy_UA)
if self._proxy_auth:
msg = msg + "%s: %s\r\n" % (self._AUTH_HEADER, self._proxy_auth)
msg = msg + "\r\n"
return util.py3bytes(msg)
def _start_ssl(self):
# type: () -> None
""" Make this connection's socket SSL-aware. """
self.sock = SSL.Connection(self.ssl_ctx, self.sock)
self.sock.setup_ssl()
self.sock.set_connect_state()
self.sock.connect_ssl()
def _encode_auth(self):
# type: () -> Optional[bytes]
""" Encode the username and password for use in the auth header. """
if not (self._username and self._password):
return None
# Authenticated proxy
userpass = "%s:%s" % (self._username, self._password)
enc_userpass = base64.encodestring(userpass).replace("\n", "")
return util.py3bytes("Basic %s" % enc_userpass)
| [
"[email protected]"
] | |
e165ad0b94f1b8de683540cc8e774ccd5204c0cb | 59ac1d0f09ebfb527701031f3ab2cfbfb8055f51 | /soapsales/event/apis/__init__.py | dfde242069a4b49a35ed6d838fa738e62af56bcf | [] | no_license | DUMBALINYOLO/erpmanu | d4eb61b66cfa3704bd514b58580bdfec5639e3b0 | db979bafcc7481f60af467d1f48d0a81bbbfc1aa | refs/heads/master | 2023-04-28T13:07:45.593051 | 2021-05-12T09:30:23 | 2021-05-12T09:30:23 | 288,446,097 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 22 | py | from .event import *
| [
"[email protected]"
] | |
f9a8acabb60a466dfe29c4ce1b191ff815101635 | 574c640c4adf212db0bcc1f93e9ca48d2296ad72 | /backend/delivery_order/migrations/0001_initial.py | 479852031897524fe8d7963958bdc69b06220b1b | [] | no_license | crowdbotics-apps/test2-27795 | 15331bc54c504607a5cb97369f557e08b1a28343 | 593c571eb9fff06e48e392b4fbcd2c3cb9f82151 | refs/heads/master | 2023-05-12T02:36:09.348868 | 2021-06-07T08:28:50 | 2021-06-07T08:28:50 | 374,590,711 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,609 | py | # Generated by Django 2.2.20 on 2021-06-07 08:28
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('delivery_user_profile', '0001_initial'),
('menu', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Bill',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('total_amount', models.FloatField()),
('timestamp_created', models.DateTimeField(auto_now_add=True)),
('contact_info', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='bill_contact_info', to='delivery_user_profile.ContactInfo')),
('profile', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='bill_profile', to='delivery_user_profile.Profile')),
],
),
migrations.CreateModel(
name='PaymentMethod',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('detail', models.TextField()),
],
),
migrations.CreateModel(
name='Order',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('quantity', models.IntegerField()),
('total_price', models.FloatField()),
('status', models.CharField(max_length=20)),
('notes', models.TextField()),
('timestamp_created', models.DateTimeField(auto_now_add=True)),
('bill', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='order_bill', to='delivery_order.Bill')),
('item_variant', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='order_item_variant', to='menu.ItemVariant')),
('payment_method', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='order_payment_method', to='delivery_order.PaymentMethod')),
('profile', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='order_profile', to='delivery_user_profile.Profile')),
],
),
]
| [
"[email protected]"
] | |
b92a4b791f5e98e824c6bf9f13238386d080e65d | 996003d0ee6444480f98eeb496793be6420c9b63 | /tests/mongodb/events/test_database_events.py | bc5eb31811161a6ce52e572e2f77568a27c43dfe | [
"BSD-3-Clause",
"BSD-3-Clause-Clear"
] | permissive | Wytamma/fasteve | fb8b54cd6d8c8bb7c058a04543dfa3781a3a869c | 0d28a50dc6b6e017bbae5ff82150d081e3ad818e | refs/heads/master | 2023-06-08T04:52:02.510353 | 2023-06-01T07:32:07 | 2023-06-01T07:32:07 | 199,584,609 | 41 | 1 | BSD-3-Clause-Clear | 2022-03-17T12:04:28 | 2019-07-30T05:58:32 | Python | UTF-8 | Python | false | false | 1,136 | py | from typing import Optional
from fasteve import Fasteve, MongoModel, Resource, MongoObjectId
from starlette.testclient import TestClient
from pydantic import Field
class People(MongoModel):
id: Optional[MongoObjectId] = Field(alias="_id")
name: Optional[str]
people = Resource(
name="people",
model=People,
resource_methods=["GET", "POST", "DELETE"],
item_methods=["GET", "DELETE", "PUT", "PATCH"],
)
resources = [people]
app = Fasteve(resources=resources)
@app.on_event("after_read_resource")
async def after_read_resource_callback(name, response):
events.append("after_read_resource")
@app.on_event("after_read_item")
async def after_read_item_callback(name, response):
events.append("after_read_item")
events = []
def test_database_events():
with TestClient(app) as test_client:
response = test_client.get("/people")
data = {"name": "Curie"}
response = test_client.post("/people", json=data)
response = test_client.get(f"/people/{response.json()['_data'][0]['_id']}")
assert "after_read_resource" in events
assert "after_read_item" in events
| [
"[email protected]"
] | |
5c7c7fdbcb35fd6878837d4f230c5bc598b4168c | 9adc810b07f7172a7d0341f0b38088b4f5829cf4 | /experiments/vitchyr/disentanglement/mix_vectorized_and_single_reward/pnp_first_try.py | 740841243f26fa33669cbc0f7de986c37b36b3b4 | [
"MIT"
] | permissive | Asap7772/railrl_evalsawyer | 7ee9358b5277b9ddf2468f0c6d28beb92a5a0879 | baba8ce634d32a48c7dfe4dc03b123e18e96e0a3 | refs/heads/main | 2023-05-29T10:00:50.126508 | 2021-06-18T03:08:12 | 2021-06-18T03:08:12 | 375,810,557 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,205 | py | import torch.nn.functional as F
import rlkit.misc.hyperparameter as hyp
from rlkit.launchers.experiments.disentanglement.contextual_encoder_distance_launcher import (
encoder_goal_conditioned_sac_experiment
)
from rlkit.launchers.launcher_util import run_experiment
if __name__ == "__main__":
variant = dict(
env_id='OneObjectPickAndPlace2DEnv-v0',
disentangled_qf_kwargs=dict(
encode_state=True,
),
qf_kwargs=dict(
hidden_sizes=[400, 300],
),
policy_kwargs=dict(
hidden_sizes=[400, 300],
),
policy_using_encoder_settings=dict(
encode_state=False,
encode_goal=False,
detach_encoder_via_goal=False,
detach_encoder_via_state=False,
),
sac_trainer_kwargs=dict(
reward_scale=1,
discount=0.99,
soft_target_tau=1e-3,
target_update_period=1,
single_loss_weight=0.5,
use_automatic_entropy_tuning=True,
),
num_presampled_goals=5000,
max_path_length=100,
algo_kwargs=dict(
batch_size=256,
num_epochs=300,
num_eval_steps_per_epoch=1000,
num_expl_steps_per_train_loop=1000,
num_trains_per_train_loop=1000,
min_num_steps_before_training=1000,
# num_epochs=10,
# num_eval_steps_per_epoch=100,
# num_expl_steps_per_train_loop=100,
# num_trains_per_train_loop=100,
# min_num_steps_before_training=100,
),
replay_buffer_kwargs=dict(
fraction_future_context=0.5,
fraction_distribution_context=0.5,
max_size=int(1e6),
),
save_debug_video=True,
debug_visualization_kwargs=dict(
save_period=20,
initial_save_period=2,
),
save_video=True,
save_video_kwargs=dict(
save_video_period=20,
rows=2,
columns=3,
subpad_length=1,
subpad_color=127,
pad_length=1,
pad_color=0,
num_columns_per_rollout=5,
),
evaluation_goal_sampling_mode='random',
exploration_goal_sampling_mode='random',
exploration_policy_kwargs=dict(
exploration_version='occasionally_repeat',
repeat_prob=0.5,
),
encoder_cnn_kwargs=dict(
kernel_sizes=[3, 3, 3],
n_channels=[8, 16, 32],
strides=[1, 1, 1],
paddings=[0, 0, 0],
pool_type='none',
hidden_activation='relu',
),
use_image_observations=True,
env_renderer_kwargs=dict(
width=12,
height=12,
output_image_format='CHW',
),
video_renderer_kwargs=dict(
width=48,
height=48,
output_image_format='CHW',
),
debug_renderer_kwargs=dict(
width=48,
height=48,
output_image_format='CHW',
),
use_separate_encoder_for_policy=True,
skip_encoder_mlp=False,
encoder_kwargs=dict(
hidden_sizes=[],
),
distance_scatterplot_save_period=20,
distance_scatterplot_initial_save_period=2,
)
search_space = {
'reward_type': [
'encoder_distance',
],
'encoder_kwargs.output_size': [
8,
],
'max_path_length': [
20,
],
'encoder_kwargs.hidden_sizes': [
[],
# [64],
# [64, 64],
],
'replay_buffer_kwargs.fraction_future_context': [
0.5,
],
'disentangled_qf_kwargs.architecture': [
# 'single_head_match_many_heads',
'many_heads',
],
'sac_trainer_kwargs.single_loss_weight': [
1.0,
0.9,
0.5,
0.1,
0.0,
]
}
sweeper = hyp.DeterministicHyperparameterSweeper(
search_space, default_parameters=variant,
)
n_seeds = 1
mode = 'local'
exp_name = 'dev-{}'.format(
__file__.replace('/', '-').replace('_', '-').split('.')[0]
)
n_seeds = 2
mode = 'sss'
exp_name = 'pnp-img-obs-enc-d-rew-many-heads--sweep-single-loss-weight'
for exp_id, variant in enumerate(sweeper.iterate_hyperparameters()):
for seed in range(n_seeds):
variant['exp_id'] = exp_id
# variant['seed'] = seed
run_experiment(
encoder_goal_conditioned_sac_experiment,
exp_name=exp_name,
mode=mode,
variant=variant,
use_gpu=True,
num_exps_per_instance=3,
# slurm_config_name='cpu_co',
gcp_kwargs=dict(
zone='us-east1-c',
gpu_kwargs=dict(
gpu_model='nvidia-tesla-k80',
num_gpu=1,
)
),
time_in_mins=int(2.5*24*60),
)
| [
"[email protected]"
] | |
a78915eef8809887752870022b28b634ec01beb0 | aea74a8c1d4ad17eb65b7c70da5342c01fd1a930 | /websites_postgres/scraper_topwatch.py | 9889b1522ed08d8759a96633ee4c81cc9235c4e1 | [] | no_license | savusebastian/angular_project | 4e6d8b398e17ca91842d7579d8f4da8650e7a13a | 9c28c25e4b9875abf346f7e9a7e8baa34bc3f9ee | refs/heads/main | 2023-04-17T07:03:32.016850 | 2021-05-09T09:07:55 | 2021-05-09T09:07:55 | 365,710,891 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,037 | py | from bs4 import BeautifulSoup
import psycopg2
import requests
def topwatch_DB():
con = psycopg2.connect(
host='localhost',
database='postgres',
user='postgres',
password='winding1127!'
)
cur = con.cursor()
URL = 'https://www.topwatch.ro/fossil-fs4735'
shop = URL.split('/')[2].split('.')[1]
page = requests.get(URL)
soup = BeautifulSoup(page.content, 'html.parser')
available_data = soup.find_all('loc')
links = [item.get_text() for item in available_data]
for link in links:
try:
web_page = requests.get(link)
web_soup = BeautifulSoup(web_page.content, 'html.parser')
schemaorg_data = web_soup.find_all(itemprop=True)
data = {}
exists_name = False
exists_image = False
for item in schemaorg_data:
if item.get('itemprop') == 'name' and exists_name == False:
data[item.get('itemprop')] = item.get_text().strip()
exists_name = True
if item.get('itemprop') == 'priceCurrency' or item.get('itemprop') == 'price':
data[item.get('itemprop')] = item.get('content')
if item.get('itemprop') == 'model':
data[item.get('itemprop')] = item.get_text()
if item.get('itemprop') == 'image' and exists_image == False:
data[item.get('itemprop')] = item.get('src')
exists_image = True
cur.execute("SELECT model FROM product WHERE model = '%s'" % data['model'])
result = cur.fetchall()
if result != []:
# print('Update', link)
cur.execute("UPDATE product SET price = '%s' WHERE model = '%s'" % (data['price'], data['model']))
con.commit()
else:
# print('Insert', link)
cur.execute("INSERT INTO product(%s, %s, %s, %s, %s, %s, %s) VALUES ('%s', '%s', '%s', '%s', '%s', '%s', '%s')" % ('url', 'shop', 'product_name', 'image', 'model', 'price', 'price_currency', link, shop, data['name'], data['image'], data['model'], data['price'], data['priceCurrency']))
con.commit()
except:
print(link)
# for item in data:
# print(item, ':', data[item])
cur.close()
con.close()
if __name__ == '__main__':
topwatch_DB()
| [
"[email protected]"
] | |
63724cd5abaef45153bdee596d8bdd703ee2dcdc | 9edaf93c833ba90ae9a903aa3c44c407a7e55198 | /crossref/models/gov/nih/nlm/ncbi/jats1/table_wrap_group_orientation.py | ec33b15cb988fe0f5c36ce8ce7d99b32f3e84539 | [] | no_license | tefra/xsdata-samples | c50aab4828b8c7c4448dbdab9c67d1ebc519e292 | ef027fe02e6a075d8ed676c86a80e9647d944571 | refs/heads/main | 2023-08-14T10:31:12.152696 | 2023-07-25T18:01:22 | 2023-07-25T18:01:22 | 222,543,692 | 6 | 1 | null | 2023-06-25T07:21:04 | 2019-11-18T21:00:37 | Python | UTF-8 | Python | false | false | 170 | py | from enum import Enum
__NAMESPACE__ = "http://www.ncbi.nlm.nih.gov/JATS1"
class TableWrapGroupOrientation(Enum):
LANDSCAPE = "landscape"
PORTRAIT = "portrait"
| [
"[email protected]"
] | |
e3feccc9956618a66353c2c0564ac4e266a9b46c | a9c359681631e8344f55163a2d69018ed02c0a90 | /openr/py/openr/cli/commands/tech_support.py | 2193ec40af9b74d4d2d772c4d554f30c0e0d3e8f | [
"MIT",
"LicenseRef-scancode-proprietary-license"
] | permissive | facebook/openr | 66c82707ae47fa5ed711c20f0355ad7100a3cf1c | 8e4c6e553f0314763c1595dd6097dd578d771f1c | refs/heads/main | 2023-09-03T02:55:03.399114 | 2023-07-26T16:46:46 | 2023-07-26T16:46:46 | 108,306,129 | 936 | 295 | MIT | 2023-08-31T23:03:31 | 2017-10-25T17:59:53 | C++ | UTF-8 | Python | false | false | 4,893 | py | #!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import subprocess
import sys
from builtins import object
from openr.cli.commands import config, decision, fib, kvstore, lm, monitor, openr, perf
from openr.cli.utils.utils import parse_nodes
from openr.utils.consts import Consts
class TechSupportCmd(object):
def __init__(self, cli_opts):
"""initialize the tech support command"""
self.cli_opts = cli_opts
# Keep short timeout
self.cli_opts.timeout = 1000
# Print routes or not
self.print_routes = False
def run(self, routes):
self.print_routes = routes
funcs = [
("openr config file", self.print_config_file),
("openr runtime params", self.print_runtime_params),
("openr version", self.print_openr_version),
("openr config", self.print_config),
("breeze lm links", self.print_lm_links),
("breeze kvstore peers", self.print_kvstore_peers),
("breeze kvstore nodes", self.print_kvstore_nodes),
("breeze kvstore prefixes", self.print_kvstore_prefixes),
("breeze kvstore keys --ttl", self.print_kvstore_keys),
("breeze decision adj", self.print_decision_adjs),
("breeze decision validate", self.print_decision_validate),
("breeze decision routes", self.print_decision_routes),
("breeze fib validate", self.print_fib_validate),
("breeze fib unicast-routes", self.print_fib_unicast_routes),
("breeze fib mpls-routes", self.print_fib_mpls_routes),
("breeze fib routes-installed", self.print_fib_routes_installed),
("breeze perf fib", self.print_perf_fib),
("breeze monitor counters", self.print_monitor_counters),
("breeze monitor logs", self.print_monitor_logs),
]
failures = []
for title, func in funcs:
self.print_title(title)
try:
func()
except Exception as e:
failures.append(title)
print(e, file=sys.stderr)
if failures:
self.print_title("openr-tech-support failures")
print("\n".join(failures))
ret = 1 if failures else 0
sys.exit(ret)
def print_title(self, title):
print("\n-------- {} --------\n".format(title))
def print_config_file(self):
if not os.path.isfile(Consts.OPENR_CONFIG_FILE):
print("Missing Config File")
return
with open(Consts.OPENR_CONFIG_FILE) as f:
print(f.read())
def print_runtime_params(self):
output = subprocess.check_output(
["pgrep", "-a", "openr"], stderr=subprocess.STDOUT
)
print(output)
def print_openr_version(self):
openr.VersionCmd(self.cli_opts).run(False)
def print_config(self):
config.ConfigLinkMonitorCmd(self.cli_opts).run()
config.ConfigPrefixManagerCmd(self.cli_opts).run()
def print_lm_links(self):
lm.LMLinksCmd(self.cli_opts).run(False, False)
def print_kvstore_peers(self):
kvstore.PeersCmd(self.cli_opts).run()
def print_kvstore_nodes(self):
kvstore.NodesCmd(self.cli_opts).run()
def print_kvstore_prefixes(self):
kvstore.PrefixesCmd(self.cli_opts).run(["all"], False)
def print_kvstore_keys(self):
kvstore.KeysCmd(self.cli_opts).run(False, "", originator=None, ttl=True)
def print_decision_adjs(self):
decision.DecisionAdjCmd(self.cli_opts).run({"all"}, {"all"}, True, False)
def print_decision_validate(self):
decision.DecisionValidateCmd(self.cli_opts).run()
def print_decision_routes(self):
if not self.print_routes:
return
nodes = parse_nodes(self.cli_opts, "")
decision.DecisionRoutesComputedCmd(self.cli_opts).run(nodes, [], [], False)
def print_fib_validate(self):
fib.FibValidateRoutesCmd(self.cli_opts).run()
def print_fib_unicast_routes(self):
if not self.print_routes:
return
fib.FibUnicastRoutesCmd(self.cli_opts).run([], False, False)
def print_fib_mpls_routes(self):
if not self.print_routes:
return
fib.FibMplsRoutesCmd(self.cli_opts).run([], False)
def print_fib_routes_installed(self):
if not self.print_routes:
return
fib.FibRoutesInstalledCmd(self.cli_opts).run([])
def print_perf_fib(self):
perf.ViewFibCmd(self.cli_opts).run()
def print_monitor_counters(self):
monitor.CountersCmd(self.cli_opts).run()
def print_monitor_logs(self):
monitor.LogCmd(self.cli_opts).run()
| [
"[email protected]"
] | |
c8c60f4d6c2defd5798062f40691b3b44f82ac3c | d66818f4b951943553826a5f64413e90120e1fae | /hackerearth/Basic Programming/Implementation/Basics of Implementation/Bear and Medals/test.py | eef855d601ec9351deb2295e35c080702103ba53 | [
"MIT"
] | permissive | HBinhCT/Q-project | 0f80cd15c9945c43e2e17072416ddb6e4745e7fa | 19923cbaa3c83c670527899ece5c3ad31bcebe65 | refs/heads/master | 2023-08-30T08:59:16.006567 | 2023-08-29T15:30:21 | 2023-08-29T15:30:21 | 247,630,603 | 8 | 1 | MIT | 2020-07-22T01:20:23 | 2020-03-16T06:48:02 | Python | UTF-8 | Python | false | false | 619 | py | import io
import unittest
from contextlib import redirect_stdout
from unittest.mock import patch
class TestQ(unittest.TestCase):
@patch('builtins.input', side_effect=[
'2',
'4',
'0 0 2',
'1 2 1',
'2 0 0',
'0 2 0',
'1',
'0 1000 0',
])
def test_case_0(self, input_mock=None):
text_trap = io.StringIO()
with redirect_stdout(text_trap):
import solution
self.assertEqual(text_trap.getvalue(),
'4\n' +
'1000\n')
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
2403716147c912ea268a503b86ef1623df2f12a0 | bdbc9cd8c64cfa92efffb9e138cb282d36f69b0a | /addons/anonymization/anonymization.py | e89bd173883e90e39936edaa10347503992baf19 | [] | no_license | clebaresu/impra-adns | d330cece1b710643625627bfd7ed66bac7d233ef | 8b9889d86c6ea194cfb7b0db8bdc3284635cc081 | refs/heads/master | 2020-05-02T16:51:41.798969 | 2019-03-27T22:03:32 | 2019-03-27T22:03:32 | 178,080,681 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 29,273 | py | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from lxml import etree
import os
import base64
try:
import cPickle as pickle
except ImportError:
import pickle
import random
import datetime
from openerp.osv import fields, osv
from openerp.tools.translate import _
from itertools import groupby
from operator import itemgetter
FIELD_STATES = [('clear', 'Clear'), ('anonymized', 'Anonymized'), ('not_existing', 'Not Existing'), ('new', 'New')]
ANONYMIZATION_STATES = FIELD_STATES + [('unstable', 'Unstable')]
WIZARD_ANONYMIZATION_STATES = [('clear', 'Clear'), ('anonymized', 'Anonymized'), ('unstable', 'Unstable')]
ANONYMIZATION_HISTORY_STATE = [('started', 'Started'), ('done', 'Done'), ('in_exception', 'Exception occured')]
ANONYMIZATION_DIRECTION = [('clear -> anonymized', 'clear -> anonymized'), ('anonymized -> clear', 'anonymized -> clear')]
def group(lst, cols):
if isinstance(cols, basestring):
cols = [cols]
return dict((k, [v for v in itr]) for k, itr in groupby(sorted(lst, key=itemgetter(*cols)), itemgetter(*cols)))
class ir_model_fields_anonymization(osv.osv):
_name = 'ir.model.fields.anonymization'
_rec_name = 'field_id'
_columns = {
'model_name': fields.char('Object Name', required=True),
'model_id': fields.many2one('ir.model', 'Object', ondelete='set null'),
'field_name': fields.char('Field Name', required=True),
'field_id': fields.many2one('ir.model.fields', 'Field', ondelete='set null'),
'state': fields.selection(selection=FIELD_STATES, String='Status', required=True, readonly=True),
}
_sql_constraints = [
('model_id_field_id_uniq', 'unique (model_name, field_name)', _("You cannot have two fields with the same name on the same object!")),
]
def _get_global_state(self, cr, uid, context=None):
ids = self.search(cr, uid, [('state', '<>', 'not_existing')], context=context)
fields = self.browse(cr, uid, ids, context=context)
if not len(fields) or len(fields) == len([f for f in fields if f.state == 'clear']):
state = 'clear' # all fields are clear
elif len(fields) == len([f for f in fields if f.state == 'anonymized']):
state = 'anonymized' # all fields are anonymized
else:
state = 'unstable' # fields are mixed: this should be fixed
return state
def _check_write(self, cr, uid, context=None):
"""check that the field is created from the menu and not from an database update
otherwise the database update can crash:"""
if context is None:
context = {}
if context.get('manual'):
global_state = self._get_global_state(cr, uid, context=context)
if global_state == 'anonymized':
raise osv.except_osv('Error!', "The database is currently anonymized, you cannot create, modify or delete fields.")
elif global_state == 'unstable':
msg = _("The database anonymization is currently in an unstable state. Some fields are anonymized," + \
" while some fields are not anonymized. You should try to solve this problem before trying to create, write or delete fields.")
raise osv.except_osv('Error!', msg)
return True
def _get_model_and_field_ids(self, cr, uid, vals, context=None):
model_and_field_ids = (False, False)
if 'field_name' in vals and vals['field_name'] and 'model_name' in vals and vals['model_name']:
ir_model_fields_obj = self.pool.get('ir.model.fields')
ir_model_obj = self.pool.get('ir.model')
model_ids = ir_model_obj.search(cr, uid, [('model', '=', vals['model_name'])], context=context)
if model_ids:
field_ids = ir_model_fields_obj.search(cr, uid, [('name', '=', vals['field_name']), ('model_id', '=', model_ids[0])], context=context)
if field_ids:
field_id = field_ids[0]
model_and_field_ids = (model_ids[0], field_id)
return model_and_field_ids
def create(self, cr, uid, vals, context=None):
# check field state: all should be clear before we can add a new field to anonymize:
self._check_write(cr, uid, context=context)
global_state = self._get_global_state(cr, uid, context=context)
if 'field_name' in vals and vals['field_name'] and 'model_name' in vals and vals['model_name']:
vals['model_id'], vals['field_id'] = self._get_model_and_field_ids(cr, uid, vals, context=context)
# check not existing fields:
if not vals.get('field_id'):
vals['state'] = 'not_existing'
else:
vals['state'] = global_state
res = super(ir_model_fields_anonymization, self).create(cr, uid, vals, context=context)
return res
def write(self, cr, uid, ids, vals, context=None):
# check field state: all should be clear before we can modify a field:
if not (len(vals.keys()) == 1 and vals.get('state') == 'clear'):
self._check_write(cr, uid, context=context)
if 'field_name' in vals and vals['field_name'] and 'model_name' in vals and vals['model_name']:
vals['model_id'], vals['field_id'] = self._get_model_and_field_ids(cr, uid, vals, context=context)
# check not existing fields:
if 'field_id' in vals:
if not vals.get('field_id'):
vals['state'] = 'not_existing'
else:
global_state = self._get_global_state(cr, uid, context)
if global_state != 'unstable':
vals['state'] = global_state
res = super(ir_model_fields_anonymization, self).write(cr, uid, ids, vals, context=context)
return res
def unlink(self, cr, uid, ids, context=None):
# check field state: all should be clear before we can unlink a field:
self._check_write(cr, uid, context=context)
res = super(ir_model_fields_anonymization, self).unlink(cr, uid, ids, context=context)
return res
def onchange_model_id(self, cr, uid, ids, model_id, context=None):
res = {'value': {
'field_name': False,
'field_id': False,
'model_name': False,
}}
if model_id:
ir_model_obj = self.pool.get('ir.model')
model_ids = ir_model_obj.search(cr, uid, [('id', '=', model_id)])
model_id = model_ids and model_ids[0] or None
model_name = model_id and ir_model_obj.browse(cr, uid, model_id).model or False
res['value']['model_name'] = model_name
return res
def onchange_model_name(self, cr, uid, ids, model_name, context=None):
res = {'value': {
'field_name': False,
'field_id': False,
'model_id': False,
}}
if model_name:
ir_model_obj = self.pool.get('ir.model')
model_ids = ir_model_obj.search(cr, uid, [('model', '=', model_name)])
model_id = model_ids and model_ids[0] or False
res['value']['model_id'] = model_id
return res
def onchange_field_name(self, cr, uid, ids, field_name, model_name):
res = {'value': {
'field_id': False,
}}
if field_name and model_name:
ir_model_fields_obj = self.pool.get('ir.model.fields')
field_ids = ir_model_fields_obj.search(cr, uid, [('name', '=', field_name), ('model', '=', model_name)])
field_id = field_ids and field_ids[0] or False
res['value']['field_id'] = field_id
return res
def onchange_field_id(self, cr, uid, ids, field_id, model_name):
res = {'value': {
'field_name': False,
}}
if field_id:
ir_model_fields_obj = self.pool.get('ir.model.fields')
field = ir_model_fields_obj.browse(cr, uid, field_id)
res['value']['field_name'] = field.name
return res
_defaults = {
'state': lambda *a: 'clear',
}
class ir_model_fields_anonymization_history(osv.osv):
_name = 'ir.model.fields.anonymization.history'
_order = "date desc"
_columns = {
'date': fields.datetime('Date', required=True, readonly=True),
'field_ids': fields.many2many('ir.model.fields.anonymization', 'anonymized_field_to_history_rel', 'field_id', 'history_id', 'Fields', readonly=True),
'state': fields.selection(selection=ANONYMIZATION_HISTORY_STATE, string='Status', required=True, readonly=True),
'direction': fields.selection(selection=ANONYMIZATION_DIRECTION, string='Direction', size=20, required=True, readonly=True),
'msg': fields.text('Message', readonly=True),
'filepath': fields.char(string='File path', readonly=True),
}
class ir_model_fields_anonymize_wizard(osv.osv_memory):
_name = 'ir.model.fields.anonymize.wizard'
def _get_state(self, cr, uid, ids, name, arg, context=None):
res = {}
state = self._get_state_value(cr, uid, context=None)
for id in ids:
res[id] = state
return res
def _get_summary(self, cr, uid, ids, name, arg, context=None):
res = {}
summary = self._get_summary_value(cr, uid, context)
for id in ids:
res[id] = summary
return res
_columns = {
'name': fields.char(string='File Name'),
'summary': fields.function(_get_summary, type='text', string='Summary'),
'file_export': fields.binary(string='Export'),
'file_import': fields.binary(string='Import', help="This is the file created by the anonymization process. It should have the '.pickle' extention."),
'state': fields.function(_get_state, string='Status', type='selection', selection=WIZARD_ANONYMIZATION_STATES, readonly=False),
'msg': fields.text(string='Message'),
}
def _get_state_value(self, cr, uid, context=None):
state = self.pool.get('ir.model.fields.anonymization')._get_global_state(cr, uid, context=context)
return state
def _get_summary_value(self, cr, uid, context=None):
summary = u''
anon_field_obj = self.pool.get('ir.model.fields.anonymization')
ir_model_fields_obj = self.pool.get('ir.model.fields')
anon_field_ids = anon_field_obj.search(cr, uid, [('state', '<>', 'not_existing')], context=context)
anon_fields = anon_field_obj.browse(cr, uid, anon_field_ids, context=context)
field_ids = [anon_field.field_id.id for anon_field in anon_fields if anon_field.field_id]
fields = ir_model_fields_obj.browse(cr, uid, field_ids, context=context)
fields_by_id = dict([(f.id, f) for f in fields])
for anon_field in anon_fields:
field = fields_by_id.get(anon_field.field_id.id)
values = {
'model_name': field.model_id.name,
'model_code': field.model_id.model,
'field_code': field.name,
'field_name': field.field_description,
'state': anon_field.state,
}
summary += u" * %(model_name)s (%(model_code)s) -> %(field_name)s (%(field_code)s): state: (%(state)s)\n" % values
return summary
def default_get(self, cr, uid, fields_list, context=None):
res = {}
res['name'] = '.pickle'
res['summary'] = self._get_summary_value(cr, uid, context)
res['state'] = self._get_state_value(cr, uid, context)
res['msg'] = _("""Before executing the anonymization process, you should make a backup of your database.""")
return res
def fields_view_get(self, cr, uid, view_id=None, view_type='form', context=None, *args, **kwargs):
state = self.pool.get('ir.model.fields.anonymization')._get_global_state(cr, uid, context=context)
if context is None:
context = {}
step = context.get('step', 'new_window')
res = super(ir_model_fields_anonymize_wizard, self).fields_view_get(cr, uid, view_id, view_type, context, *args, **kwargs)
eview = etree.fromstring(res['arch'])
placeholder = eview.xpath("group[@name='placeholder1']")
if len(placeholder):
placeholder = placeholder[0]
if step == 'new_window' and state == 'clear':
# clicked in the menu and the fields are not anonymized: warn the admin that backuping the db is very important
placeholder.addnext(etree.Element('field', {'name': 'msg', 'colspan': '4', 'nolabel': '1'}))
placeholder.addnext(etree.Element('newline'))
placeholder.addnext(etree.Element('label', {'string': 'Warning'}))
eview.remove(placeholder)
elif step == 'new_window' and state == 'anonymized':
# clicked in the menu and the fields are already anonymized
placeholder.addnext(etree.Element('newline'))
placeholder.addnext(etree.Element('field', {'name': 'file_import', 'required': "1"}))
placeholder.addnext(etree.Element('label', {'string': 'Anonymization file'}))
eview.remove(placeholder)
elif step == 'just_anonymized':
# we just ran the anonymization process, we need the file export field
placeholder.addnext(etree.Element('newline'))
placeholder.addnext(etree.Element('field', {'name': 'file_export'}))
# we need to remove the button:
buttons = eview.xpath("button")
for button in buttons:
eview.remove(button)
# and add a message:
placeholder.addnext(etree.Element('field', {'name': 'msg', 'colspan': '4', 'nolabel': '1'}))
placeholder.addnext(etree.Element('newline'))
placeholder.addnext(etree.Element('label', {'string': 'Result'}))
# remove the placeholer:
eview.remove(placeholder)
elif step == 'just_desanonymized':
# we just reversed the anonymization process, we don't need any field
# we need to remove the button
buttons = eview.xpath("button")
for button in buttons:
eview.remove(button)
# and add a message
# and add a message:
placeholder.addnext(etree.Element('field', {'name': 'msg', 'colspan': '4', 'nolabel': '1'}))
placeholder.addnext(etree.Element('newline'))
placeholder.addnext(etree.Element('label', {'string': 'Result'}))
# remove the placeholer:
eview.remove(placeholder)
else:
msg = _("The database anonymization is currently in an unstable state. Some fields are anonymized," + \
" while some fields are not anonymized. You should try to solve this problem before trying to do anything else.")
raise osv.except_osv('Error!', msg)
res['arch'] = etree.tostring(eview)
return res
def _raise_after_history_update(self, cr, uid, history_id, error_type, error_msg):
self.pool.get('ir.model.fields.anonymization.history').write(cr, uid, history_id, {
'state': 'in_exception',
'msg': error_msg,
})
raise osv.except_osv(error_type, error_msg)
def anonymize_database(self, cr, uid, ids, context=None):
"""Sets the 'anonymized' state to defined fields"""
# create a new history record:
anonymization_history_model = self.pool.get('ir.model.fields.anonymization.history')
vals = {
'date': datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
'state': 'started',
'direction': 'clear -> anonymized',
}
history_id = anonymization_history_model.create(cr, uid, vals)
# check that all the defined fields are in the 'clear' state
state = self.pool.get('ir.model.fields.anonymization')._get_global_state(cr, uid, context=context)
if state == 'anonymized':
self._raise_after_history_update(cr, uid, history_id, _('Error !'), _("The database is currently anonymized, you cannot anonymize it again."))
elif state == 'unstable':
msg = _("The database anonymization is currently in an unstable state. Some fields are anonymized," + \
" while some fields are not anonymized. You should try to solve this problem before trying to do anything.")
self._raise_after_history_update(cr, uid, history_id, 'Error !', msg)
# do the anonymization:
dirpath = os.environ.get('HOME') or os.getcwd()
rel_filepath = 'field_anonymization_%s_%s.pickle' % (cr.dbname, history_id)
abs_filepath = os.path.abspath(os.path.join(dirpath, rel_filepath))
ir_model_fields_anonymization_model = self.pool.get('ir.model.fields.anonymization')
field_ids = ir_model_fields_anonymization_model.search(cr, uid, [('state', '<>', 'not_existing')], context=context)
fields = ir_model_fields_anonymization_model.browse(cr, uid, field_ids, context=context)
if not fields:
msg = "No fields are going to be anonymized."
self._raise_after_history_update(cr, uid, history_id, 'Error !', msg)
data = []
for field in fields:
model_name = field.model_id.model
field_name = field.field_id.name
field_type = field.field_id.ttype
table_name = self.pool[model_name]._table
# get the current value
sql = "select id, %s from %s" % (field_name, table_name)
cr.execute(sql)
records = cr.dictfetchall()
for record in records:
data.append({"model_id": model_name, "field_id": field_name, "id": record['id'], "value": record[field_name]})
# anonymize the value:
anonymized_value = None
sid = str(record['id'])
if field_type == 'char':
anonymized_value = 'xxx'+sid
elif field_type == 'selection':
anonymized_value = 'xxx'+sid
elif field_type == 'text':
anonymized_value = 'xxx'+sid
elif field_type == 'boolean':
anonymized_value = random.choice([True, False])
elif field_type == 'date':
anonymized_value = '2011-11-11'
elif field_type == 'datetime':
anonymized_value = '2011-11-11 11:11:11'
elif field_type == 'float':
anonymized_value = 0.0
elif field_type == 'integer':
anonymized_value = 0
elif field_type in ['binary', 'many2many', 'many2one', 'one2many', 'reference']: # cannot anonymize these kind of fields
msg = _("Cannot anonymize fields of these types: binary, many2many, many2one, one2many, reference.")
self._raise_after_history_update(cr, uid, history_id, 'Error !', msg)
if anonymized_value is None:
self._raise_after_history_update(cr, uid, history_id, _('Error !'), _("Anonymized value is None. This cannot happens."))
sql = "update %(table)s set %(field)s = %%(anonymized_value)s where id = %%(id)s" % {
'table': table_name,
'field': field_name,
}
cr.execute(sql, {
'anonymized_value': anonymized_value,
'id': record['id']
})
# save pickle:
fn = open(abs_filepath, 'w')
pickle.dump(data, fn, pickle.HIGHEST_PROTOCOL)
# update the anonymization fields:
values = {
'state': 'anonymized',
}
ir_model_fields_anonymization_model.write(cr, uid, field_ids, values, context=context)
# add a result message in the wizard:
msgs = ["Anonymization successful.",
"",
"Donot forget to save the resulting file to a safe place because you will not be able to revert the anonymization without this file.",
"",
"This file is also stored in the %s directory. The absolute file path is: %s.",
]
msg = '\n'.join(msgs) % (dirpath, abs_filepath)
fn = open(abs_filepath, 'r')
self.write(cr, uid, ids, {
'msg': msg,
'file_export': base64.encodestring(fn.read()),
})
fn.close()
# update the history record:
anonymization_history_model.write(cr, uid, history_id, {
'field_ids': [[6, 0, field_ids]],
'msg': msg,
'filepath': abs_filepath,
'state': 'done',
})
# handle the view:
view_id = self._id_get(cr, uid, 'ir.ui.view', 'view_ir_model_fields_anonymize_wizard_form', 'anonymization')
return {
'res_id': ids[0],
'view_id': [view_id],
'view_type': 'form',
"view_mode": 'form',
'res_model': 'ir.model.fields.anonymize.wizard',
'type': 'ir.actions.act_window',
'context': {'step': 'just_anonymized'},
'target':'new',
}
def reverse_anonymize_database(self, cr, uid, ids, context=None):
"""Set the 'clear' state to defined fields"""
ir_model_fields_anonymization_model = self.pool.get('ir.model.fields.anonymization')
anonymization_history_model = self.pool.get('ir.model.fields.anonymization.history')
# create a new history record:
vals = {
'date': datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
'state': 'started',
'direction': 'anonymized -> clear',
}
history_id = anonymization_history_model.create(cr, uid, vals)
# check that all the defined fields are in the 'anonymized' state
state = ir_model_fields_anonymization_model._get_global_state(cr, uid, context=context)
if state == 'clear':
raise osv.except_osv_('Error!', "The database is not currently anonymized, you cannot reverse the anonymization.")
elif state == 'unstable':
msg = _("The database anonymization is currently in an unstable state. Some fields are anonymized," + \
" while some fields are not anonymized. You should try to solve this problem before trying to do anything.")
raise osv.except_osv('Error!', msg)
wizards = self.browse(cr, uid, ids, context=context)
for wizard in wizards:
if not wizard.file_import:
msg = _("It is not possible to reverse the anonymization process without supplying the anonymization export file.")
self._raise_after_history_update(cr, uid, history_id, 'Error !', msg)
# reverse the anonymization:
# load the pickle file content into a data structure:
data = pickle.loads(base64.decodestring(wizard.file_import))
migration_fix_obj = self.pool.get('ir.model.fields.anonymization.migration.fix')
fix_ids = migration_fix_obj.search(cr, uid, [('target_version', '=', '7.0')])
fixes = migration_fix_obj.read(cr, uid, fix_ids, ['model_name', 'field_name', 'query', 'query_type', 'sequence'])
fixes = group(fixes, ('model_name', 'field_name'))
for line in data:
queries = []
table_name = self.pool[line['model_id']]._table if line['model_id'] in self.pool else None
# check if custom sql exists:
key = (line['model_id'], line['field_id'])
custom_updates = fixes.get(key)
if custom_updates:
custom_updates.sort(key=itemgetter('sequence'))
queries = [(record['query'], record['query_type']) for record in custom_updates if record['query_type']]
elif table_name:
queries = [("update %(table)s set %(field)s = %%(value)s where id = %%(id)s" % {
'table': table_name,
'field': line['field_id'],
}, 'sql')]
for query in queries:
if query[1] == 'sql':
sql = query[0]
cr.execute(sql, {
'value': line['value'],
'id': line['id']
})
elif query[1] == 'python':
raw_code = query[0]
code = raw_code % line
eval(code)
else:
raise Exception("Unknown query type '%s'. Valid types are: sql, python." % (query['query_type'], ))
# update the anonymization fields:
ir_model_fields_anonymization_model = self.pool.get('ir.model.fields.anonymization')
field_ids = ir_model_fields_anonymization_model.search(cr, uid, [('state', '<>', 'not_existing')], context=context)
values = {
'state': 'clear',
}
ir_model_fields_anonymization_model.write(cr, uid, field_ids, values, context=context)
# add a result message in the wizard:
msg = '\n'.join(["Successfully reversed the anonymization.",
"",
])
self.write(cr, uid, ids, {'msg': msg})
# update the history record:
anonymization_history_model.write(cr, uid, history_id, {
'field_ids': [[6, 0, field_ids]],
'msg': msg,
'filepath': False,
'state': 'done',
})
# handle the view:
view_id = self._id_get(cr, uid, 'ir.ui.view', 'view_ir_model_fields_anonymize_wizard_form', 'anonymization')
return {
'res_id': ids[0],
'view_id': [view_id],
'view_type': 'form',
"view_mode": 'form',
'res_model': 'ir.model.fields.anonymize.wizard',
'type': 'ir.actions.act_window',
'context': {'step': 'just_desanonymized'},
'target':'new',
}
def _id_get(self, cr, uid, model, id_str, mod):
if '.' in id_str:
mod, id_str = id_str.split('.')
try:
idn = self.pool.get('ir.model.data')._get_id(cr, uid, mod, id_str)
res = int(self.pool.get('ir.model.data').read(cr, uid, [idn], ['res_id'])[0]['res_id'])
except:
res = None
return res
class ir_model_fields_anonymization_migration_fix(osv.osv):
_name = 'ir.model.fields.anonymization.migration.fix'
_order = "sequence"
_columns = {
'target_version': fields.char('Target Version'),
'model_name': fields.char('Model'),
'field_name': fields.char('Field'),
'query': fields.text('Query'),
'query_type': fields.selection(string='Query', selection=[('sql', 'sql'), ('python', 'python')]),
'sequence': fields.integer('Sequence'),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| [
"[email protected]"
] | |
374819e09c7095e180257d044a6656442ae72ef5 | 5fcc3fd608a794d260368318c62547f74d4c1416 | /lindenmayer.py | db17a6e4335ac8dda5edf69ca1b53747b3ec0257 | [] | no_license | ds-gurukandhamoorthi/intro-python-exs | 241fb9158096479a100ef378f291ba83e1a7d5d4 | 68c386e51c13d0f31e273016eefc4e29ddecdc04 | refs/heads/master | 2022-02-25T22:28:41.061722 | 2019-10-22T18:36:46 | 2019-10-22T18:36:46 | 103,829,521 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 471 | py | def lindenmayer(frm, production_rules,nb_transf=1):
if nb_transf < 1:
return frm
transformed = ''.join(production_rules.get(c, c) for c in frm)
if nb_transf == 1:
return transformed
return lindenmayer(transformed, production_rules, nb_transf - 1)
return
if __name__ == "__main__":
hilb = lindenmayer('L', {'L':'+RF-LFL-FR+', 'R':'-LF+RFR+FL-'}, 2)
print(hilb)
koch = lindenmayer('F', {'F':'F+F-F-F+F'},2)
print(koch)
| [
"[email protected]"
] | |
4c91ab1744fb3d18d6e5459b8bb50c5db92d0031 | f9d564f1aa83eca45872dab7fbaa26dd48210d08 | /huaweicloud-sdk-kms/huaweicloudsdkkms/v2/__init__.py | ce5630e2e36d2d2fb42f93f95413a83a2d84865e | [
"Apache-2.0"
] | permissive | huaweicloud/huaweicloud-sdk-python-v3 | cde6d849ce5b1de05ac5ebfd6153f27803837d84 | f69344c1dadb79067746ddf9bfde4bddc18d5ecf | refs/heads/master | 2023-09-01T19:29:43.013318 | 2023-08-31T08:28:59 | 2023-08-31T08:28:59 | 262,207,814 | 103 | 44 | NOASSERTION | 2023-06-22T14:50:48 | 2020-05-08T02:28:43 | Python | UTF-8 | Python | false | false | 11,734 | py | # coding: utf-8
from __future__ import absolute_import
from huaweicloudsdkkms.v2.kms_client import KmsClient
from huaweicloudsdkkms.v2.kms_async_client import KmsAsyncClient
from huaweicloudsdkkms.v2.model.action_resources import ActionResources
from huaweicloudsdkkms.v2.model.api_link import ApiLink
from huaweicloudsdkkms.v2.model.api_version_detail import ApiVersionDetail
from huaweicloudsdkkms.v2.model.batch_create_kms_tags_request import BatchCreateKmsTagsRequest
from huaweicloudsdkkms.v2.model.batch_create_kms_tags_request_body import BatchCreateKmsTagsRequestBody
from huaweicloudsdkkms.v2.model.batch_create_kms_tags_response import BatchCreateKmsTagsResponse
from huaweicloudsdkkms.v2.model.cancel_grant_request import CancelGrantRequest
from huaweicloudsdkkms.v2.model.cancel_grant_response import CancelGrantResponse
from huaweicloudsdkkms.v2.model.cancel_key_deletion_request import CancelKeyDeletionRequest
from huaweicloudsdkkms.v2.model.cancel_key_deletion_response import CancelKeyDeletionResponse
from huaweicloudsdkkms.v2.model.cancel_self_grant_request import CancelSelfGrantRequest
from huaweicloudsdkkms.v2.model.cancel_self_grant_response import CancelSelfGrantResponse
from huaweicloudsdkkms.v2.model.create_datakey_request import CreateDatakeyRequest
from huaweicloudsdkkms.v2.model.create_datakey_request_body import CreateDatakeyRequestBody
from huaweicloudsdkkms.v2.model.create_datakey_response import CreateDatakeyResponse
from huaweicloudsdkkms.v2.model.create_datakey_without_plaintext_request import CreateDatakeyWithoutPlaintextRequest
from huaweicloudsdkkms.v2.model.create_datakey_without_plaintext_response import CreateDatakeyWithoutPlaintextResponse
from huaweicloudsdkkms.v2.model.create_grant_request import CreateGrantRequest
from huaweicloudsdkkms.v2.model.create_grant_request_body import CreateGrantRequestBody
from huaweicloudsdkkms.v2.model.create_grant_response import CreateGrantResponse
from huaweicloudsdkkms.v2.model.create_key_request import CreateKeyRequest
from huaweicloudsdkkms.v2.model.create_key_request_body import CreateKeyRequestBody
from huaweicloudsdkkms.v2.model.create_key_response import CreateKeyResponse
from huaweicloudsdkkms.v2.model.create_key_store_request import CreateKeyStoreRequest
from huaweicloudsdkkms.v2.model.create_key_store_request_body import CreateKeyStoreRequestBody
from huaweicloudsdkkms.v2.model.create_key_store_response import CreateKeyStoreResponse
from huaweicloudsdkkms.v2.model.create_kms_tag_request import CreateKmsTagRequest
from huaweicloudsdkkms.v2.model.create_kms_tag_request_body import CreateKmsTagRequestBody
from huaweicloudsdkkms.v2.model.create_kms_tag_response import CreateKmsTagResponse
from huaweicloudsdkkms.v2.model.create_parameters_for_import_request import CreateParametersForImportRequest
from huaweicloudsdkkms.v2.model.create_parameters_for_import_response import CreateParametersForImportResponse
from huaweicloudsdkkms.v2.model.create_random_request import CreateRandomRequest
from huaweicloudsdkkms.v2.model.create_random_response import CreateRandomResponse
from huaweicloudsdkkms.v2.model.decrypt_data_request import DecryptDataRequest
from huaweicloudsdkkms.v2.model.decrypt_data_request_body import DecryptDataRequestBody
from huaweicloudsdkkms.v2.model.decrypt_data_response import DecryptDataResponse
from huaweicloudsdkkms.v2.model.decrypt_datakey_request import DecryptDatakeyRequest
from huaweicloudsdkkms.v2.model.decrypt_datakey_request_body import DecryptDatakeyRequestBody
from huaweicloudsdkkms.v2.model.decrypt_datakey_response import DecryptDatakeyResponse
from huaweicloudsdkkms.v2.model.delete_imported_key_material_request import DeleteImportedKeyMaterialRequest
from huaweicloudsdkkms.v2.model.delete_imported_key_material_response import DeleteImportedKeyMaterialResponse
from huaweicloudsdkkms.v2.model.delete_key_request import DeleteKeyRequest
from huaweicloudsdkkms.v2.model.delete_key_response import DeleteKeyResponse
from huaweicloudsdkkms.v2.model.delete_key_store_request import DeleteKeyStoreRequest
from huaweicloudsdkkms.v2.model.delete_key_store_response import DeleteKeyStoreResponse
from huaweicloudsdkkms.v2.model.delete_tag_request import DeleteTagRequest
from huaweicloudsdkkms.v2.model.delete_tag_response import DeleteTagResponse
from huaweicloudsdkkms.v2.model.disable_key_request import DisableKeyRequest
from huaweicloudsdkkms.v2.model.disable_key_response import DisableKeyResponse
from huaweicloudsdkkms.v2.model.disable_key_rotation_request import DisableKeyRotationRequest
from huaweicloudsdkkms.v2.model.disable_key_rotation_response import DisableKeyRotationResponse
from huaweicloudsdkkms.v2.model.disable_key_store_request import DisableKeyStoreRequest
from huaweicloudsdkkms.v2.model.disable_key_store_response import DisableKeyStoreResponse
from huaweicloudsdkkms.v2.model.enable_key_request import EnableKeyRequest
from huaweicloudsdkkms.v2.model.enable_key_response import EnableKeyResponse
from huaweicloudsdkkms.v2.model.enable_key_rotation_request import EnableKeyRotationRequest
from huaweicloudsdkkms.v2.model.enable_key_rotation_response import EnableKeyRotationResponse
from huaweicloudsdkkms.v2.model.enable_key_store_request import EnableKeyStoreRequest
from huaweicloudsdkkms.v2.model.enable_key_store_response import EnableKeyStoreResponse
from huaweicloudsdkkms.v2.model.encrypt_data_request import EncryptDataRequest
from huaweicloudsdkkms.v2.model.encrypt_data_request_body import EncryptDataRequestBody
from huaweicloudsdkkms.v2.model.encrypt_data_response import EncryptDataResponse
from huaweicloudsdkkms.v2.model.encrypt_datakey_request import EncryptDatakeyRequest
from huaweicloudsdkkms.v2.model.encrypt_datakey_request_body import EncryptDatakeyRequestBody
from huaweicloudsdkkms.v2.model.encrypt_datakey_response import EncryptDatakeyResponse
from huaweicloudsdkkms.v2.model.gen_random_request_body import GenRandomRequestBody
from huaweicloudsdkkms.v2.model.get_parameters_for_import_request_body import GetParametersForImportRequestBody
from huaweicloudsdkkms.v2.model.grants import Grants
from huaweicloudsdkkms.v2.model.import_key_material_request import ImportKeyMaterialRequest
from huaweicloudsdkkms.v2.model.import_key_material_request_body import ImportKeyMaterialRequestBody
from huaweicloudsdkkms.v2.model.import_key_material_response import ImportKeyMaterialResponse
from huaweicloudsdkkms.v2.model.ke_k_info import KeKInfo
from huaweicloudsdkkms.v2.model.key_alias_info import KeyAliasInfo
from huaweicloudsdkkms.v2.model.key_description_info import KeyDescriptionInfo
from huaweicloudsdkkms.v2.model.key_details import KeyDetails
from huaweicloudsdkkms.v2.model.key_status_info import KeyStatusInfo
from huaweicloudsdkkms.v2.model.key_store_state_info import KeyStoreStateInfo
from huaweicloudsdkkms.v2.model.keystore_details import KeystoreDetails
from huaweicloudsdkkms.v2.model.keystore_info import KeystoreInfo
from huaweicloudsdkkms.v2.model.list_grants_request import ListGrantsRequest
from huaweicloudsdkkms.v2.model.list_grants_request_body import ListGrantsRequestBody
from huaweicloudsdkkms.v2.model.list_grants_response import ListGrantsResponse
from huaweicloudsdkkms.v2.model.list_key_detail_request import ListKeyDetailRequest
from huaweicloudsdkkms.v2.model.list_key_detail_response import ListKeyDetailResponse
from huaweicloudsdkkms.v2.model.list_key_stores_request import ListKeyStoresRequest
from huaweicloudsdkkms.v2.model.list_key_stores_response import ListKeyStoresResponse
from huaweicloudsdkkms.v2.model.list_keys_request import ListKeysRequest
from huaweicloudsdkkms.v2.model.list_keys_request_body import ListKeysRequestBody
from huaweicloudsdkkms.v2.model.list_keys_response import ListKeysResponse
from huaweicloudsdkkms.v2.model.list_kms_by_tags_request import ListKmsByTagsRequest
from huaweicloudsdkkms.v2.model.list_kms_by_tags_request_body import ListKmsByTagsRequestBody
from huaweicloudsdkkms.v2.model.list_kms_by_tags_response import ListKmsByTagsResponse
from huaweicloudsdkkms.v2.model.list_kms_tags_request import ListKmsTagsRequest
from huaweicloudsdkkms.v2.model.list_kms_tags_response import ListKmsTagsResponse
from huaweicloudsdkkms.v2.model.list_retirable_grants_request import ListRetirableGrantsRequest
from huaweicloudsdkkms.v2.model.list_retirable_grants_request_body import ListRetirableGrantsRequestBody
from huaweicloudsdkkms.v2.model.list_retirable_grants_response import ListRetirableGrantsResponse
from huaweicloudsdkkms.v2.model.operate_key_request_body import OperateKeyRequestBody
from huaweicloudsdkkms.v2.model.quotas import Quotas
from huaweicloudsdkkms.v2.model.resources import Resources
from huaweicloudsdkkms.v2.model.revoke_grant_request_body import RevokeGrantRequestBody
from huaweicloudsdkkms.v2.model.schedule_key_deletion_request_body import ScheduleKeyDeletionRequestBody
from huaweicloudsdkkms.v2.model.show_key_rotation_status_request import ShowKeyRotationStatusRequest
from huaweicloudsdkkms.v2.model.show_key_rotation_status_response import ShowKeyRotationStatusResponse
from huaweicloudsdkkms.v2.model.show_key_store_request import ShowKeyStoreRequest
from huaweicloudsdkkms.v2.model.show_key_store_response import ShowKeyStoreResponse
from huaweicloudsdkkms.v2.model.show_kms_tags_request import ShowKmsTagsRequest
from huaweicloudsdkkms.v2.model.show_kms_tags_response import ShowKmsTagsResponse
from huaweicloudsdkkms.v2.model.show_public_key_request import ShowPublicKeyRequest
from huaweicloudsdkkms.v2.model.show_public_key_response import ShowPublicKeyResponse
from huaweicloudsdkkms.v2.model.show_user_instances_request import ShowUserInstancesRequest
from huaweicloudsdkkms.v2.model.show_user_instances_response import ShowUserInstancesResponse
from huaweicloudsdkkms.v2.model.show_user_quotas_request import ShowUserQuotasRequest
from huaweicloudsdkkms.v2.model.show_user_quotas_response import ShowUserQuotasResponse
from huaweicloudsdkkms.v2.model.show_version_request import ShowVersionRequest
from huaweicloudsdkkms.v2.model.show_version_response import ShowVersionResponse
from huaweicloudsdkkms.v2.model.show_versions_request import ShowVersionsRequest
from huaweicloudsdkkms.v2.model.show_versions_response import ShowVersionsResponse
from huaweicloudsdkkms.v2.model.sign_request import SignRequest
from huaweicloudsdkkms.v2.model.sign_request_body import SignRequestBody
from huaweicloudsdkkms.v2.model.sign_response import SignResponse
from huaweicloudsdkkms.v2.model.tag import Tag
from huaweicloudsdkkms.v2.model.tag_item import TagItem
from huaweicloudsdkkms.v2.model.update_key_alias_request import UpdateKeyAliasRequest
from huaweicloudsdkkms.v2.model.update_key_alias_request_body import UpdateKeyAliasRequestBody
from huaweicloudsdkkms.v2.model.update_key_alias_response import UpdateKeyAliasResponse
from huaweicloudsdkkms.v2.model.update_key_description_request import UpdateKeyDescriptionRequest
from huaweicloudsdkkms.v2.model.update_key_description_request_body import UpdateKeyDescriptionRequestBody
from huaweicloudsdkkms.v2.model.update_key_description_response import UpdateKeyDescriptionResponse
from huaweicloudsdkkms.v2.model.update_key_rotation_interval_request import UpdateKeyRotationIntervalRequest
from huaweicloudsdkkms.v2.model.update_key_rotation_interval_request_body import UpdateKeyRotationIntervalRequestBody
from huaweicloudsdkkms.v2.model.update_key_rotation_interval_response import UpdateKeyRotationIntervalResponse
from huaweicloudsdkkms.v2.model.validate_signature_request import ValidateSignatureRequest
from huaweicloudsdkkms.v2.model.validate_signature_response import ValidateSignatureResponse
from huaweicloudsdkkms.v2.model.verify_request_body import VerifyRequestBody
| [
"[email protected]"
] | |
c0862bd436f7908175d607f2dbb549efe9d45c55 | e5504d8c4880993b82d5583a11c5cc4623e0eac2 | /LeetCode/30-day-challenge/June/june 8th - june 14th/randomizedSet.py | d1ab1fc8c9c3c8e3c1c1da13ad85aa74f7ab096b | [] | no_license | noorulameenkm/DataStructuresAlgorithms | e5f87f426fc444d18f830e48569d2a7a50f5d7e0 | 7c3bb89326d2898f9e98590ceb8ee5fd7b3196f0 | refs/heads/master | 2023-06-08T19:29:42.507761 | 2023-05-28T16:20:19 | 2023-05-28T16:20:19 | 219,270,731 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,402 | py | import random
class RandomizedSet:
def __init__(self):
"""
Initialize your data structure here.
"""
self.random_set = []
self.index_store = {}
def insert(self, val: int) -> bool:
"""
Inserts a value to the set. Returns true if the set did not already contain the specified element.
"""
if val in self.index_store:
return False
self.random_set.append(val)
self.index_store[val] = len(self.random_set) - 1
return True
def remove(self, val: int) -> bool:
"""
Removes a value from the set. Returns true if the set contained the specified element.
"""
if val in self.index_store:
index = self.index_store[val]
last_element = self.random_set[-1]
self.random_set[index], self.index_store[last_element] = last_element, index
del self.index_store[val]
self.random_set.pop()
return True
return False
def getRandom(self) -> int:
"""
Get a random element from the set.
"""
return random.choice(self.random_set)
# Your RandomizedSet object will be instantiated and called as such:
obj = RandomizedSet()
print(f'Answer is {obj.insert(1)}')
print(f'Answer is {obj.remove(2)}')
print(f'Answer is {obj.getRandom()}')
| [
"[email protected]"
] | |
7a36658ae593bde936c367f2ef8c46229d4f76b0 | 22fbe9c0fc8cc366123111f54f103e3c109bce7a | /zeabus_vision/src/read_exposure.py | fcc70b006aa286c7a67b094dcef463adeab7568c | [] | no_license | zeabusTeam/zeabus_software_ros1 | 3730021eb3eb6d98df585d172a44c4d6176e8963 | 86a07f4da03457bad3ce9b0c63b3867403780bc0 | refs/heads/master | 2020-03-25T23:02:22.816144 | 2019-03-19T17:30:37 | 2019-03-19T17:30:37 | 144,256,396 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,364 | py | #!/usr/bin/python2.7
"""
File name: read_exposure.py
Author: zeabus
Date created: 2018/10/16
Python Version: 2.7
"""
import rospy
import cv2 as cv
import numpy as np
import matplotlib.pyplot as plt
from statistic import Statistic
from sensor_msgs.msg import CompressedImage
from dynamic_reconfigure.client import Client
import time
# bgr = None
# sub_sampling = 0.4
# stat = Statistic()
# def image_callback(msg):
# global bgr, sub_sampling
# arr = np.fromstring(msg.data, np.uint8)
# bgr = cv.resize(cv.imdecode(arr, 1), (0, 0),
# fx=sub_sampling, fy=sub_sampling)
def read_exposure():
time_avg = []
while not rospy.is_shutdown():
start = time.time()
client_name = "ueye_cam_nodelet_front"
client = Client(client_name)
params = {"exposure": 33.0}
client.update_configuration(params)
rospy.get_param("/" + str(client_name) + "exposure", None)
stop = time.time()
duration = stop - start
print(duration)
time_avg.append(duration)
print("Duration between call exposure API: ")
time_avg = np.array(time_avg)
print('max:',time_avg.max())
print('min:',time_avg.min())
print('mean:',time_avg.mean())
if __name__ == '__main__':
rospy.init_node('read_exposure', anonymous=False)
read_exposure()
| [
"[email protected]"
] | |
985e9efd8ab0fefb39ec3e631887fb7ce65a29f5 | 811ee1e3bba45419e6c17068027d54bf6c8d4f07 | /python/gpmp_utils/plotPlanarMobile2Arms.py | e6aa3a802cbaf75ae3a9859c50c99e102c449c40 | [
"BSD-3-Clause"
] | permissive | kalyanvasudev/gpmp2 | 7dfe19873c72a7b9202c06eb794ef779c2917032 | 1ee99c743d978ab20dc804c8cd9cfa7813084957 | refs/heads/master | 2021-12-23T13:01:27.320270 | 2020-05-03T00:44:59 | 2020-05-03T00:44:59 | 227,194,751 | 0 | 0 | NOASSERTION | 2019-12-10T19:01:00 | 2019-12-10T19:00:59 | null | UTF-8 | Python | false | false | 1,366 | py |
import numpy as np
from gtsam import *
from gpmp2 import *
def plotPlanarMobile2Arms(figure, axis, marm, p, vehsize, color, width):
#PLOTPLANARMOBILE2ARMS Summary of this function goes here
# Detailed explanation goes here
# color = [(r,g,b)] where all values lie between 0 and 1
pose = p.pose()
# vehicle corners
corner1 = pose.transform_from(Point2(vehsize[0]/2, vehsize[1]/2))
corner2 = pose.transform_from(Point2(-vehsize[1]/2, vehsize[2]/2))
corner3 = pose.transform_from(Point2(-vehsize[1]/2, -vehsiz[2]/2))
corner4 = pose.transform_from(Point2(vehsize[1]/2, -vehsize[2]/2))
# vehicle base black lines
axis.plot([corner1.x() corner2.x() corner3.x() corner4.x() corner1.x()], \
[corner1.y() corner2.y() corner3.y() corner4.y() corner1.y()], 'k-')
# arm
position = marm.forwardKinematicsPosition(p)
position = position[0:2, :] # Todo: check rows and columns
#style = strcat(color, '-');
axis.plot(position[0,0:marm.arm1.dof+1], position[1,0:marm.arm1.dof+1], \
color=color, linewidth=width)
axis.plot(position[0,[0,marm.arm1.dof+1:end+1]], position[1,[0,marm.arm1.dof+1:end+1]], \
color=color, linewidth=width)
axis.plot(position[0,0:marm.arm1.dof+1], position[1,0:marm.arm1.dof+1], \
'k.', markersize=5);
axis.plot(position[0,marm.arm1.dof+1:end], position[1,marm.arm1.dof+1:end], \
'k.', markersize=5);
| [
"[email protected]"
] | |
2fec9430049b72f2e0ef566a4a08ec641022877e | 926c7a9760702d3c56adfa3eec0e164cb2c766b6 | /gunnery/gunnery/settings/development.py | 8dcd148202883fecfc670ba753d381b3add3c197 | [
"Apache-2.0"
] | permissive | hunslater/gunnery | 7d947942f0c9db56d4102e68758b95b4292efbc3 | 2b9457ef899f7367dc07ba28cc1b7e4ff2c47d8e | refs/heads/master | 2021-01-17T23:14:38.387308 | 2014-06-11T20:43:51 | 2014-06-11T20:43:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 579 | py | from .common import *
ENVIRONMENT = 'development'
DEBUG = True
TEMPLATE_DEBUG = True
INSTALLED_APPS += (
'debug_toolbar',
'django_nose',
)
MIDDLEWARE_CLASSES += (
'debug_toolbar.middleware.DebugToolbarMiddleware',
)
from fnmatch import fnmatch
class glob_list(list):
def __contains__(self, key):
for elt in self:
if fnmatch(key, elt): return True
return False
INTERNAL_IPS = glob_list(['127.0.0.1', '10.0.*.*'])
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
TEST_RUNNER = 'django_nose.NoseTestSuiteRunner' | [
"[email protected]"
] | |
bdfc04a2ac39f2bc9a169b4e3ae7b83d76b39078 | c9e616e6f5146805c6d9c19d35220e9b76c93aa6 | /박현채/비타알고 시즌2/19년9월1주차/시공의 폭풍속으로.py | d5b58e5858d58444c9471d2b902b1d7e5b4f7e13 | [] | no_license | inje-illab/Algorithmer | 2d29244d38a2aeec07ad83e47e69016269ff4e88 | ed5c67d0a1b0c720e5a8ce8fe5bafba4bb0f36b8 | refs/heads/master | 2023-03-16T05:46:41.795826 | 2020-01-05T14:59:36 | 2020-01-05T14:59:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 391 | py | TEAM_SELECT_HERO = []
USER_SELECT_HERO = []
USEABLE_HERO = []
TEAM_SELECT_HERO.extend(map(int, input().split()))
USER_SELECT_HERO.extend(map(int, input().split()))
USEABLE_HERO = list(set(USER_SELECT_HERO) - set(TEAM_SELECT_HERO)) # 사용자 선택 영웅의 리스트 집합 - 팀 선택 영웅의 리스트 집합
print(len(USEABLE_HERO)) # 사용가능 영웅 리스트 길이 출력 | [
"[email protected]"
] | |
af9ab97508d3762e24f80f6c7f04143f5b825c27 | bbf3a1b2f2f4ec3fa468a089c042643ec8243c15 | /ML/research/object_detection/webcam_detection.py | 35ee40f148009ce924d3b470369ca8ad01da9298 | [
"Apache-2.0"
] | permissive | lasarox/Code | 94aa9b3d816016a171e4a3babd9127cb01a6cd03 | 2c04be4e7a066340f1cf2b45bec18298d010312a | refs/heads/master | 2023-04-05T16:51:46.159055 | 2021-05-01T06:24:02 | 2021-05-01T06:24:02 | 361,516,400 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,099 | py | import numpy as np
import os
import six.moves.urllib as urllib
import sys
import tarfile
import tensorflow as tf
import zipfile
from collections import defaultdict
from io import StringIO
from matplotlib import pyplot as plt
from PIL import Image
from utils import label_map_util
from utils import visualization_utils as vis_util
import cv2
cap = cv2.VideoCapture(0) # if you have multiple webcams change the value to the correct one
# Any model exported using the `export_inference_graph.py` tool can be loaded here simply by changing `PATH_TO_CKPT` to point to a new .pb file.
#
# By default we use an "SSD with Mobilenet" model here. See the [detection model zoo](https://github.com/tensorflow/models/blob/master/object_detection/g3doc/detection_model_zoo.md) for a list of other models that can be run out-of-the-box with varying speeds and accuracies.
# In[4]:
# What model to download.
MODEL_NAME = 'ssd_mobilenet_v1_coco_11_06_2017'
MODEL_FILE = MODEL_NAME + '.tar.gz'
DOWNLOAD_BASE = 'http://download.tensorflow.org/models/object_detection/'
# Path to frozen detection graph. This is the actual model that is used for the object detection.
PATH_TO_CKPT = MODEL_NAME + '/frozen_inference_graph.pb'
# List of the strings that is used to add correct label for each box.
PATH_TO_LABELS = os.path.join('data', 'mscoco_label_map.pbtxt')
NUM_CLASSES = 90
# ## Download Model
# In[5]:
opener = urllib.request.URLopener()
opener.retrieve(DOWNLOAD_BASE + MODEL_FILE, MODEL_FILE)
tar_file = tarfile.open(MODEL_FILE)
for file in tar_file.getmembers():
file_name = os.path.basename(file.name)
if 'frozen_inference_graph.pb' in file_name:
tar_file.extract(file, os.getcwd())
# ## Load a (frozen) Tensorflow model into memory.
# In[6]:
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
# ## Loading label map
# Label maps map indices to category names, so that when our convolution network predicts `5`, we know that this corresponds to `airplane`. Here we use internal utility functions, but anything that returns a dictionary mapping integers to appropriate string labels would be fine
# In[7]:
label_map = label_map_util.load_labelmap(PATH_TO_LABELS)
categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES, use_display_name=True)
category_index = label_map_util.create_category_index(categories)
# ## Helper code
# In[8]:
def load_image_into_numpy_array(image):
(im_width, im_height) = image.size
return np.array(image.getdata()).reshape(
(im_height, im_width, 3)).astype(np.uint8)
# # Detection
# In[9]:
# For the sake of simplicity we will use only 2 images:
# image1.jpg
# image2.jpg
# If you want to test the code with your images, just add path to the images to the TEST_IMAGE_PATHS.
PATH_TO_TEST_IMAGES_DIR = 'test_images'
TEST_IMAGE_PATHS = [ os.path.join(PATH_TO_TEST_IMAGES_DIR, 'image{}.jpg'.format(i)) for i in range(1, 3) ] # change this value if you want to add more pictures to test
# Size, in inches, of the output images.
IMAGE_SIZE = (12, 8)
# In[10]:
with detection_graph.as_default():
with tf.Session(graph=detection_graph) as sess:
while True:
ret, image_np = cap.read()
# Expand dimensions since the model expects images to have shape: [1, None, None, 3]
image_np_expanded = np.expand_dims(image_np, axis=0)
image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
# Each box represents a part of the image where a particular object was detected.
boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
# Each score represent how level of confidence for each of the objects.
# Score is shown on the result image, together with the class label.
scores = detection_graph.get_tensor_by_name('detection_scores:0')
classes = detection_graph.get_tensor_by_name('detection_classes:0')
num_detections = detection_graph.get_tensor_by_name('num_detections:0')
# Actual detection.
(boxes, scores, classes, num_detections) = sess.run(
[boxes, scores, classes, num_detections],
feed_dict={image_tensor: image_np_expanded})
# Visualization of the results of a detection.
vis_util.visualize_boxes_and_labels_on_image_array(
image_np,
np.squeeze(boxes),
np.squeeze(classes).astype(np.int32),
np.squeeze(scores),
category_index,
use_normalized_coordinates=True,
line_thickness=8)
cv2.imshow('object detection', cv2.resize(image_np, (800, 600)))
if cv2.waitKey(25) & 0xFF == ord('q'):
cv2.destroyAllWindows()
break
| [
"[email protected]"
] | |
27a2bfac21348a6fb2463f4306fb6d253e6c0790 | 1c343f610133030fbe160a1cd864bfc29be84fa8 | /tests/test_topicmod_visualize.py | 8d7321d8a7c0276326733f8c7338eda06ac194ec | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | mcooper/tmtoolkit | 56e585c71a553b3344c05a9a1e77adfa5044b29a | cdfbaf7f20095ea45edbdf9e773544e3bb63089d | refs/heads/master | 2020-04-29T01:09:42.137637 | 2019-03-15T23:37:06 | 2019-03-15T23:37:06 | 175,721,140 | 0 | 0 | Apache-2.0 | 2019-03-15T00:35:56 | 2019-03-15T00:35:56 | null | UTF-8 | Python | false | false | 3,586 | py | import os
import six
import PIL
from tmtoolkit.topicmod import model_io, visualize
try:
from wordcloud import WordCloud
def test_generate_wordclouds_for_topic_words():
py3file = '.py3' if six.PY3 else ''
data = model_io.load_ldamodel_from_pickle('tests/data/tiny_model_reuters_5_topics%s.pickle' % py3file)
model = data['model']
vocab = data['vocab']
phi = model.topic_word_
assert phi.shape == (5, len(vocab))
topic_word_clouds = visualize.generate_wordclouds_for_topic_words(phi, vocab, 10)
assert len(topic_word_clouds) == 5
assert set(topic_word_clouds.keys()) == set('topic_%d' % i for i in range(1, 6))
assert all(isinstance(wc, PIL.Image.Image) for wc in topic_word_clouds.values())
topic_word_clouds = visualize.generate_wordclouds_for_topic_words(phi, vocab, 10,
which_topics=('topic_1', 'topic_2'),
return_images=False,
width=640, height=480)
assert set(topic_word_clouds.keys()) == {'topic_1', 'topic_2'}
assert all(isinstance(wc, WordCloud) for wc in topic_word_clouds.values())
assert all(wc.width == 640 and wc.height == 480 for wc in topic_word_clouds.values())
def test_generate_wordclouds_for_document_topics():
py3file = '.py3' if six.PY3 else ''
data = model_io.load_ldamodel_from_pickle('tests/data/tiny_model_reuters_5_topics%s.pickle' % py3file)
model = data['model']
doc_labels = data['doc_labels']
theta = model.doc_topic_
assert theta.shape == (len(doc_labels), 5)
doc_topic_clouds = visualize.generate_wordclouds_for_document_topics(theta, doc_labels, 3)
assert len(doc_topic_clouds) == len(doc_labels)
assert set(doc_topic_clouds.keys()) == set(doc_labels)
assert all(isinstance(wc, PIL.Image.Image) for wc in doc_topic_clouds.values())
which_docs = doc_labels[:2]
assert len(which_docs) == 2
doc_topic_clouds = visualize.generate_wordclouds_for_document_topics(theta, doc_labels, 3,
which_documents=which_docs,
return_images=False,
width=640, height=480)
assert set(doc_topic_clouds.keys()) == set(which_docs)
assert all(isinstance(wc, WordCloud) for wc in doc_topic_clouds.values())
assert all(wc.width == 640 and wc.height == 480 for wc in doc_topic_clouds.values())
def test_write_wordclouds_to_folder(tmpdir):
path = tmpdir.mkdir('wordclouds').dirname
py3file = '.py3' if six.PY3 else ''
data = model_io.load_ldamodel_from_pickle('tests/data/tiny_model_reuters_5_topics%s.pickle' % py3file)
model = data['model']
vocab = data['vocab']
phi = model.topic_word_
assert phi.shape == (5, len(vocab))
topic_word_clouds = visualize.generate_wordclouds_for_topic_words(phi, vocab, 10)
visualize.write_wordclouds_to_folder(topic_word_clouds, path, 'cloud_{label}.png')
for label in topic_word_clouds.keys():
assert os.path.exists(os.path.join(path, 'cloud_{label}.png'.format(label=label)))
except:
# wordcloud module not found
pass
| [
"[email protected]"
] | |
fa557f36f229ffae04fde795ddaad2491c3d8cb8 | fe6cbc51ef5043ff2f953fd2202540fd0f7d7cbc | /mnist_deploy.py | 025830d8ddccfeeac54cc4347bec752d88be5c3a | [] | no_license | Tveek/caffe_learning | f87c9abecb879a9807368b733772d669315cca41 | e841abb2d0f92c5e0f9f558fbdd9e128c526f1b2 | refs/heads/master | 2021-01-22T07:32:57.173028 | 2016-09-26T13:32:45 | 2016-09-26T13:32:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,212 | py | # -*- coding: utf-8 -*-
from caffe import layers as L,params as P,to_proto
root='./'
deploy=root+'mnist/deploy.prototxt' #文件保存路径
def create_deploy():
#少了第一层,data层
conv1=L.Convolution(bottom='data', kernel_size=5, stride=1,num_output=20, pad=0,weight_filler=dict(type='xavier'))
pool1=L.Pooling(conv1, pool=P.Pooling.MAX, kernel_size=2, stride=2)
conv2=L.Convolution(pool1, kernel_size=5, stride=1,num_output=50, pad=0,weight_filler=dict(type='xavier'))
pool2=L.Pooling(conv2, pool=P.Pooling.MAX, kernel_size=2, stride=2)
fc3=L.InnerProduct(pool2, num_output=500,weight_filler=dict(type='xavier'))
relu3=L.ReLU(fc3, in_place=True)
fc4 = L.InnerProduct(relu3, num_output=10,weight_filler=dict(type='xavier'))
#最后没有accuracy层,但有一个Softmax层
prob=L.Softmax(fc4)
return to_proto(prob)
def write_deploy():
with open(deploy, 'w') as f:
f.write('name:"Lenet"\n')
f.write('input:"data"\n')
f.write('input_dim:1\n')
f.write('input_dim:3\n')
f.write('input_dim:28\n')
f.write('input_dim:28\n')
f.write(str(create_deploy()))
if __name__ == '__main__':
write_deploy() | [
"[email protected]"
] | |
c0c907c9480629a7fb74fc8e8f851f465c9ed21a | 5b85703aa0dd5a6944d99370a5dde2b6844517ec | /03.Python/13.XML1_Find_the_Score.py | a6dcf1e4cc24fe1e036199a2dd8a7b3941d3931c | [] | no_license | alda07/hackerrank | 255329196e6a4b9d598c3f51790caf4a99a755bc | a09091f859e87462c95ee856cbbd0ad9b5992159 | refs/heads/master | 2021-10-24T07:38:34.795632 | 2019-03-23T17:29:32 | 2019-03-23T17:29:32 | 90,329,292 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 418 | py | import sys
import xml.etree.ElementTree as etree
def get_attr_number(node):
# your code goes here
count_atribs = 0
for element in node.iter():
count_atribs += len(element.attrib)
return count_atribs
if __name__ == '__main__':
sys.stdin.readline()
xml = sys.stdin.read()
tree = etree.ElementTree(etree.fromstring(xml))
root = tree.getroot()
print(get_attr_number(root))
| [
"[email protected]"
] | |
bb290fb3de03a8e1382fb626fc7fa2776ea5e699 | f18003f682be98942b0955f76c8b3d39cacadda3 | /ansible/roles/kubernetes-modules/library/k8s_v1alpha1_initializer_configuration_list.py | 8de3f563a370fa778896525df707b708ad0c3bd9 | [
"Apache-2.0",
"MIT"
] | permissive | request-yo-racks/infra | 77c205d3b887ead8a249a5865edcecd213023dc3 | deade6804bf061bdf69325f0442695bd4ad55ef5 | refs/heads/master | 2022-01-22T06:56:41.271540 | 2019-05-09T18:58:43 | 2019-05-09T18:58:43 | 111,489,644 | 1 | 8 | MIT | 2018-11-11T03:44:46 | 2017-11-21T02:41:50 | Python | UTF-8 | Python | false | false | 26,052 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
from ansible.module_utils.k8s_common import KubernetesAnsibleModule, KubernetesAnsibleException
DOCUMENTATION = '''
module: k8s_v1alpha1_initializer_configuration_list
short_description: Kubernetes InitializerConfigurationList
description:
- Retrieve a list of initializer_configurations. List operations provide a snapshot
read of the underlying objects, returning a resource_version representing a consistent
version of the listed objects.
version_added: 2.3.0
author: OpenShift (@openshift)
options:
api_key:
description:
- Token used to connect to the API.
cert_file:
description:
- Path to a certificate used to authenticate with the API.
type: path
context:
description:
- The name of a context found in the Kubernetes config file.
debug:
description:
- Enable debug output from the OpenShift helper. Logging info is written to KubeObjHelper.log
default: false
type: bool
force:
description:
- If set to C(True), and I(state) is C(present), an existing object will updated,
and lists will be replaced, rather than merged.
default: false
type: bool
host:
description:
- Provide a URL for acessing the Kubernetes API.
key_file:
description:
- Path to a key file used to authenticate with the API.
type: path
kubeconfig:
description:
- Path to an existing Kubernetes config file. If not provided, and no other connection
options are provided, the openshift client will attempt to load the default
configuration file from I(~/.kube/config.json).
type: path
password:
description:
- Provide a password for connecting to the API. Use in conjunction with I(username).
resource_definition:
description:
- Provide the YAML definition for the object, bypassing any modules parameters
intended to define object attributes.
type: dict
src:
description:
- Provide a path to a file containing the YAML definition of the object. Mutually
exclusive with I(resource_definition).
type: path
ssl_ca_cert:
description:
- Path to a CA certificate used to authenticate with the API.
type: path
state:
description:
- Determines if an object should be created, patched, or deleted. When set to
C(present), the object will be created, if it does not exist, or patched, if
parameter values differ from the existing object's attributes, and deleted,
if set to C(absent). A patch operation results in merging lists and updating
dictionaries, with lists being merged into a unique set of values. If a list
contains a dictionary with a I(name) or I(type) attribute, a strategic merge
is performed, where individual elements with a matching I(name_) or I(type)
are merged. To force the replacement of lists, set the I(force) option to C(True).
default: present
choices:
- present
- absent
username:
description:
- Provide a username for connecting to the API.
verify_ssl:
description:
- Whether or not to verify the API server's SSL certificates.
type: bool
requirements:
- kubernetes == 3.0.0
'''
EXAMPLES = '''
'''
RETURN = '''
api_version:
type: string
description: Requested API version
initializer_configuration_list:
type: complex
returned: when I(state) = C(present)
contains:
api_version:
description:
- APIVersion defines the versioned schema of this representation of an object.
Servers should convert recognized schemas to the latest internal value, and
may reject unrecognized values.
type: str
items:
description:
- List of InitializerConfiguration.
type: list
contains:
api_version:
description:
- APIVersion defines the versioned schema of this representation of an object.
Servers should convert recognized schemas to the latest internal value,
and may reject unrecognized values.
type: str
initializers:
description:
- Initializers is a list of resources and their default initializers Order-sensitive.
When merging multiple InitializerConfigurations, we sort the initializers
from different InitializerConfigurations by the name of the InitializerConfigurations;
the order of the initializers from the same InitializerConfiguration is
preserved.
type: list
contains:
failure_policy:
description:
- FailurePolicy defines what happens if the responsible initializer
controller fails to takes action. Allowed values are Ignore, or Fail.
If "Ignore" is set, initializer is removed from the initializers list
of an object if the timeout is reached; If "Fail" is set, admissionregistration
returns timeout error if the timeout is reached.
type: str
name:
description:
- Name is the identifier of the initializer. It will be added to the
object that needs to be initialized. Name should be fully qualified,
e.g., alwayspullimages.kubernetes.io, where "alwayspullimages" is
the name of the webhook, and kubernetes.io is the name of the organization.
Required
type: str
rules:
description:
- Rules describes what resources/subresources the initializer cares
about. The initializer cares about an operation if it matches _any_
Rule. Rule.Resources must not include subresources.
type: list
contains:
api_groups:
description:
- APIGroups is the API groups the resources belong to. '*' is all
groups. If '*' is present, the length of the slice must be one.
Required.
type: list
contains: str
api_versions:
description:
- APIVersions is the API versions the resources belong to. '*' is
all versions. If '*' is present, the length of the slice must
be one. Required.
type: list
contains: str
resources:
description:
- "Resources is a list of resources this rule applies to. For example:\
\ 'pods' means pods. 'pods/log' means the log subresource of pods.\
\ '*' means all resources, but not subresources. 'pods/*' means\
\ all subresources of pods. '*/scale' means all scale subresources.\
\ '*/*' means all resources and their subresources. If wildcard\
\ is present, the validation rule will ensure resources do not\
\ overlap with each other. Depending on the enclosing object,\
\ subresources might not be allowed. Required."
type: list
contains: str
kind:
description:
- Kind is a string value representing the REST resource this object represents.
Servers may infer this from the endpoint the client submits requests to.
Cannot be updated. In CamelCase.
type: str
metadata:
description:
- Standard object metadata;
type: complex
contains:
annotations:
description:
- Annotations is an unstructured key value map stored with a resource
that may be set by external tools to store and retrieve arbitrary
metadata. They are not queryable and should be preserved when modifying
objects.
type: complex
contains: str, str
cluster_name:
description:
- The name of the cluster which the object belongs to. This is used
to distinguish resources with same name and namespace in different
clusters. This field is not set anywhere right now and apiserver is
going to ignore it if set in create or update request.
type: str
creation_timestamp:
description:
- CreationTimestamp is a timestamp representing the server time when
this object was created. It is not guaranteed to be set in happens-before
order across separate operations. Clients may not set this value.
It is represented in RFC3339 form and is in UTC. Populated by the
system. Read-only. Null for lists.
type: complex
contains: {}
deletion_grace_period_seconds:
description:
- Number of seconds allowed for this object to gracefully terminate
before it will be removed from the system. Only set when deletionTimestamp
is also set. May only be shortened. Read-only.
type: int
deletion_timestamp:
description:
- DeletionTimestamp is RFC 3339 date and time at which this resource
will be deleted. This field is set by the server when a graceful deletion
is requested by the user, and is not directly settable by a client.
The resource is expected to be deleted (no longer visible from resource
lists, and not reachable by name) after the time in this field. Once
set, this value may not be unset or be set further into the future,
although it may be shortened or the resource may be deleted prior
to this time. For example, a user may request that a pod is deleted
in 30 seconds. The Kubelet will react by sending a graceful termination
signal to the containers in the pod. After that 30 seconds, the Kubelet
will send a hard termination signal (SIGKILL) to the container and
after cleanup, remove the pod from the API. In the presence of network
partitions, this object may still exist after this timestamp, until
an administrator or automated process can determine the resource is
fully terminated. If not set, graceful deletion of the object has
not been requested. Populated by the system when a graceful deletion
is requested. Read-only.
type: complex
contains: {}
finalizers:
description:
- Must be empty before the object is deleted from the registry. Each
entry is an identifier for the responsible component that will remove
the entry from the list. If the deletionTimestamp of the object is
non-nil, entries in this list can only be removed.
type: list
contains: str
generate_name:
description:
- GenerateName is an optional prefix, used by the server, to generate
a unique name ONLY IF the Name field has not been provided. If this
field is used, the name returned to the client will be different than
the name passed. This value will also be combined with a unique suffix.
The provided value has the same validation rules as the Name field,
and may be truncated by the length of the suffix required to make
the value unique on the server. If this field is specified and the
generated name exists, the server will NOT return a 409 - instead,
it will either return 201 Created or 500 with Reason ServerTimeout
indicating a unique name could not be found in the time allotted,
and the client should retry (optionally after the time indicated in
the Retry-After header). Applied only if Name is not specified.
type: str
generation:
description:
- A sequence number representing a specific generation of the desired
state. Populated by the system. Read-only.
type: int
initializers:
description:
- An initializer is a controller which enforces some system invariant
at object creation time. This field is a list of initializers that
have not yet acted on this object. If nil or empty, this object has
been completely initialized. Otherwise, the object is considered uninitialized
and is hidden (in list/watch and get calls) from clients that haven't
explicitly asked to observe uninitialized objects. When an object
is created, the system will populate this list with the current set
of initializers. Only privileged users may set or modify this list.
Once it is empty, it may not be modified further by any user.
type: complex
contains:
pending:
description:
- Pending is a list of initializers that must execute in order before
this object is visible. When the last pending initializer is removed,
and no failing result is set, the initializers struct will be
set to nil and the object is considered as initialized and visible
to all clients.
type: list
contains:
name:
description:
- name of the process that is responsible for initializing this
object.
type: str
result:
description:
- If result is set with the Failure field, the object will be persisted
to storage and then deleted, ensuring that other clients can observe
the deletion.
type: complex
contains:
api_version:
description:
- APIVersion defines the versioned schema of this representation
of an object. Servers should convert recognized schemas to
the latest internal value, and may reject unrecognized values.
type: str
code:
description:
- Suggested HTTP return code for this status, 0 if not set.
type: int
details:
description:
- Extended data associated with the reason. Each reason may
define its own extended details. This field is optional and
the data returned is not guaranteed to conform to any schema
except that defined by the reason type.
type: complex
contains:
causes:
description:
- The Causes array includes more details associated with
the StatusReason failure. Not all StatusReasons may provide
detailed causes.
type: list
contains:
field:
description:
- 'The field of the resource that has caused this error,
as named by its JSON serialization. May include dot
and postfix notation for nested attributes. Arrays
are zero-indexed. Fields may appear more than once
in an array of causes due to fields having multiple
errors. Optional. Examples: "name" - the field "name"
on the current resource "items[0].name" - the field
"name" on the first array entry in "items"'
type: str
message:
description:
- A human-readable description of the cause of the error.
This field may be presented as-is to a reader.
type: str
reason:
description:
- A machine-readable description of the cause of the
error. If this value is empty there is no information
available.
type: str
group:
description:
- The group attribute of the resource associated with the
status StatusReason.
type: str
kind:
description:
- The kind attribute of the resource associated with the
status StatusReason. On some operations may differ from
the requested resource Kind.
type: str
name:
description:
- The name attribute of the resource associated with the
status StatusReason (when there is a single name which
can be described).
type: str
retry_after_seconds:
description:
- If specified, the time in seconds before the operation
should be retried.
type: int
uid:
description:
- UID of the resource. (when there is a single resource
which can be described).
type: str
kind:
description:
- Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint
the client submits requests to. Cannot be updated. In CamelCase.
type: str
message:
description:
- A human-readable description of the status of this operation.
type: str
metadata:
description:
- Standard list metadata.
type: complex
contains:
resource_version:
description:
- String that identifies the server's internal version of
this object that can be used by clients to determine when
objects have changed. Value must be treated as opaque
by clients and passed unmodified back to the server. Populated
by the system. Read-only.
type: str
self_link:
description:
- SelfLink is a URL representing this object. Populated
by the system. Read-only.
type: str
reason:
description:
- A machine-readable description of why this operation is in
the "Failure" status. If this value is empty there is no information
available. A Reason clarifies an HTTP status code but does
not override it.
type: str
status:
description:
- 'Status of the operation. One of: "Success" or "Failure".'
type: str
labels:
description:
- Map of string keys and values that can be used to organize and categorize
(scope and select) objects. May match selectors of replication controllers
and services.
type: complex
contains: str, str
name:
description:
- Name must be unique within a namespace. Is required when creating
resources, although some resources may allow a client to request the
generation of an appropriate name automatically. Name is primarily
intended for creation idempotence and configuration definition. Cannot
be updated.
type: str
namespace:
description:
- Namespace defines the space within each name must be unique. An empty
namespace is equivalent to the "default" namespace, but "default"
is the canonical representation. Not all objects are required to be
scoped to a namespace - the value of this field for those objects
will be empty. Must be a DNS_LABEL. Cannot be updated.
type: str
owner_references:
description:
- List of objects depended by this object. If ALL objects in the list
have been deleted, this object will be garbage collected. If this
object is managed by a controller, then an entry in this list will
point to this controller, with the controller field set to true. There
cannot be more than one managing controller.
type: list
contains:
api_version:
description:
- API version of the referent.
type: str
block_owner_deletion:
description:
- If true, AND if the owner has the "foregroundDeletion" finalizer,
then the owner cannot be deleted from the key-value store until
this reference is removed. Defaults to false. To set this field,
a user needs "delete" permission of the owner, otherwise 422 (Unprocessable
Entity) will be returned.
type: bool
controller:
description:
- If true, this reference points to the managing controller.
type: bool
kind:
description:
- Kind of the referent.
type: str
name:
description:
- Name of the referent.
type: str
uid:
description:
- UID of the referent.
type: str
resource_version:
description:
- An opaque value that represents the internal version of this object
that can be used by clients to determine when objects have changed.
May be used for optimistic concurrency, change detection, and the
watch operation on a resource or set of resources. Clients must treat
these values as opaque and passed unmodified back to the server. They
may only be valid for a particular resource or set of resources. Populated
by the system. Read-only. Value must be treated as opaque by clients
and .
type: str
self_link:
description:
- SelfLink is a URL representing this object. Populated by the system.
Read-only.
type: str
uid:
description:
- UID is the unique in time and space value for this object. It is typically
generated by the server on successful creation of a resource and is
not allowed to change on PUT operations. Populated by the system.
Read-only.
type: str
kind:
description:
- Kind is a string value representing the REST resource this object represents.
Servers may infer this from the endpoint the client submits requests to. Cannot
be updated. In CamelCase.
type: str
metadata:
description:
- Standard list metadata.
type: complex
contains:
resource_version:
description:
- String that identifies the server's internal version of this object that
can be used by clients to determine when objects have changed. Value must
be treated as opaque by clients and passed unmodified back to the server.
Populated by the system. Read-only.
type: str
self_link:
description:
- SelfLink is a URL representing this object. Populated by the system. Read-only.
type: str
'''
def main():
try:
module = KubernetesAnsibleModule('initializer_configuration_list', 'v1alpha1')
except KubernetesAnsibleException as exc:
# The helper failed to init, so there is no module object. All we can do is raise the error.
raise Exception(exc.message)
try:
module.execute_module()
except KubernetesAnsibleException as exc:
module.fail_json(msg="Module failed!", error=str(exc))
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
e0cdf5d0874e4a9d85af092ca398c8a044ce45d5 | 681602725fe045a2c63be02d7b8e1aacfd8c70de | /InvenTree/stock/views.py | 4ac415ef23ed5623ca6544fa78a500588fff35d7 | [
"MIT"
] | permissive | Zeigren/InvenTree | 6af06992a3a7be75e82fa57424655d8d8ce83624 | 6c415bc9227063ef3bf21b66d2f6b51b995aaf56 | refs/heads/master | 2020-11-26T17:08:17.565941 | 2020-02-18T13:02:59 | 2020-02-18T13:02:59 | 229,151,367 | 0 | 0 | MIT | 2019-12-19T22:53:13 | 2019-12-19T22:53:12 | null | UTF-8 | Python | false | false | 33,206 | py | """
Django views for interacting with Stock app
"""
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.core.exceptions import ValidationError
from django.views.generic.edit import FormMixin
from django.views.generic import DetailView, ListView, UpdateView
from django.forms.models import model_to_dict
from django.forms import HiddenInput
from django.urls import reverse
from django.utils.translation import ugettext as _
from InvenTree.views import AjaxView
from InvenTree.views import AjaxUpdateView, AjaxDeleteView, AjaxCreateView
from InvenTree.views import QRCodeView
from InvenTree.helpers import str2bool, DownloadFile, GetExportFormats
from InvenTree.helpers import ExtractSerialNumbers
from decimal import Decimal, InvalidOperation
from datetime import datetime
from company.models import Company, SupplierPart
from part.models import Part
from .models import StockItem, StockLocation, StockItemTracking
from .admin import StockItemResource
from .forms import EditStockLocationForm
from .forms import CreateStockItemForm
from .forms import EditStockItemForm
from .forms import AdjustStockForm
from .forms import TrackingEntryForm
from .forms import SerializeStockForm
from .forms import ExportOptionsForm
class StockIndex(ListView):
""" StockIndex view loads all StockLocation and StockItem object
"""
model = StockItem
template_name = 'stock/location.html'
context_obect_name = 'locations'
def get_context_data(self, **kwargs):
context = super(StockIndex, self).get_context_data(**kwargs).copy()
# Return all top-level locations
locations = StockLocation.objects.filter(parent=None)
context['locations'] = locations
context['items'] = StockItem.objects.all()
context['loc_count'] = StockLocation.objects.count()
context['stock_count'] = StockItem.objects.count()
return context
class StockLocationDetail(DetailView):
"""
Detailed view of a single StockLocation object
"""
context_object_name = 'location'
template_name = 'stock/location.html'
queryset = StockLocation.objects.all()
model = StockLocation
class StockItemDetail(DetailView):
"""
Detailed view of a single StockItem object
"""
context_object_name = 'item'
template_name = 'stock/item.html'
queryset = StockItem.objects.all()
model = StockItem
class StockItemNotes(UpdateView):
""" View for editing the 'notes' field of a StockItem object """
context_object_name = 'item'
template_name = 'stock/item_notes.html'
model = StockItem
fields = ['notes']
def get_success_url(self):
return reverse('stock-item-notes', kwargs={'pk': self.get_object().id})
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
ctx['editing'] = str2bool(self.request.GET.get('edit', ''))
return ctx
class StockLocationEdit(AjaxUpdateView):
"""
View for editing details of a StockLocation.
This view is used with the EditStockLocationForm to deliver a modal form to the web view
"""
model = StockLocation
form_class = EditStockLocationForm
context_object_name = 'location'
ajax_template_name = 'modal_form.html'
ajax_form_title = _('Edit Stock Location')
def get_form(self):
""" Customize form data for StockLocation editing.
Limit the choices for 'parent' field to those which make sense.
"""
form = super(AjaxUpdateView, self).get_form()
location = self.get_object()
# Remove any invalid choices for the 'parent' field
parent_choices = StockLocation.objects.all()
parent_choices = parent_choices.exclude(id__in=location.getUniqueChildren())
form.fields['parent'].queryset = parent_choices
return form
class StockLocationQRCode(QRCodeView):
""" View for displaying a QR code for a StockLocation object """
ajax_form_title = _("Stock Location QR code")
def get_qr_data(self):
""" Generate QR code data for the StockLocation """
try:
loc = StockLocation.objects.get(id=self.pk)
return loc.format_barcode()
except StockLocation.DoesNotExist:
return None
class StockExportOptions(AjaxView):
""" Form for selecting StockExport options """
model = StockLocation
ajax_form_title = _('Stock Export Options')
form_class = ExportOptionsForm
def post(self, request, *args, **kwargs):
self.request = request
fmt = request.POST.get('file_format', 'csv').lower()
cascade = str2bool(request.POST.get('include_sublocations', False))
# Format a URL to redirect to
url = reverse('stock-export')
url += '?format=' + fmt
url += '&cascade=' + str(cascade)
data = {
'form_valid': True,
'format': fmt,
'cascade': cascade
}
return self.renderJsonResponse(self.request, self.form_class(), data=data)
def get(self, request, *args, **kwargs):
return self.renderJsonResponse(request, self.form_class())
class StockExport(AjaxView):
""" Export stock data from a particular location.
Returns a file containing stock information for that location.
"""
model = StockItem
def get(self, request, *args, **kwargs):
export_format = request.GET.get('format', 'csv').lower()
# Check if a particular location was specified
loc_id = request.GET.get('location', None)
location = None
if loc_id:
try:
location = StockLocation.objects.get(pk=loc_id)
except (ValueError, StockLocation.DoesNotExist):
pass
# Check if a particular supplier was specified
sup_id = request.GET.get('supplier', None)
supplier = None
if sup_id:
try:
supplier = Company.objects.get(pk=sup_id)
except (ValueError, Company.DoesNotExist):
pass
# Check if a particular supplier_part was specified
sup_part_id = request.GET.get('supplier_part', None)
supplier_part = None
if sup_part_id:
try:
supplier_part = SupplierPart.objects.get(pk=sup_part_id)
except (ValueError, SupplierPart.DoesNotExist):
pass
# Check if a particular part was specified
part_id = request.GET.get('part', None)
part = None
if part_id:
try:
part = Part.objects.get(pk=part_id)
except (ValueError, Part.DoesNotExist):
pass
if export_format not in GetExportFormats():
export_format = 'csv'
filename = 'InvenTree_Stocktake_{date}.{fmt}'.format(
date=datetime.now().strftime("%d-%b-%Y"),
fmt=export_format
)
if location:
# CHeck if locations should be cascading
cascade = str2bool(request.GET.get('cascade', True))
stock_items = location.get_stock_items(cascade)
else:
cascade = True
stock_items = StockItem.objects.all()
if part:
stock_items = stock_items.filter(part=part)
if supplier:
stock_items = stock_items.filter(supplier_part__supplier=supplier)
if supplier_part:
stock_items = stock_items.filter(supplier_part=supplier_part)
# Filter out stock items that are not 'in stock'
# TODO - This might need some more thought in the future...
stock_items = stock_items.filter(customer=None)
stock_items = stock_items.filter(belongs_to=None)
# Pre-fetch related fields to reduce DB queries
stock_items = stock_items.prefetch_related('part', 'supplier_part__supplier', 'location', 'purchase_order', 'build')
dataset = StockItemResource().export(queryset=stock_items)
filedata = dataset.export(export_format)
return DownloadFile(filedata, filename)
class StockItemQRCode(QRCodeView):
""" View for displaying a QR code for a StockItem object """
ajax_form_title = _("Stock Item QR Code")
def get_qr_data(self):
""" Generate QR code data for the StockItem """
try:
item = StockItem.objects.get(id=self.pk)
return item.format_barcode()
except StockItem.DoesNotExist:
return None
class StockAdjust(AjaxView, FormMixin):
""" View for enacting simple stock adjustments:
- Take items from stock
- Add items to stock
- Count items
- Move stock
- Delete stock items
"""
ajax_template_name = 'stock/stock_adjust.html'
ajax_form_title = _('Adjust Stock')
form_class = AdjustStockForm
stock_items = []
def get_GET_items(self):
""" Return list of stock items initally requested using GET.
Items can be retrieved by:
a) List of stock ID - stock[]=1,2,3,4,5
b) Parent part - part=3
c) Parent location - location=78
d) Single item - item=2
"""
# Start with all 'in stock' items
items = StockItem.objects.filter(customer=None, belongs_to=None)
# Client provides a list of individual stock items
if 'stock[]' in self.request.GET:
items = items.filter(id__in=self.request.GET.getlist('stock[]'))
# Client provides a PART reference
elif 'part' in self.request.GET:
items = items.filter(part=self.request.GET.get('part'))
# Client provides a LOCATION reference
elif 'location' in self.request.GET:
items = items.filter(location=self.request.GET.get('location'))
# Client provides a single StockItem lookup
elif 'item' in self.request.GET:
items = [StockItem.objects.get(id=self.request.GET.get('item'))]
# Unsupported query (no items)
else:
items = []
for item in items:
# Initialize quantity to zero for addition/removal
if self.stock_action in ['take', 'add']:
item.new_quantity = 0
# Initialize quantity at full amount for counting or moving
else:
item.new_quantity = item.quantity
return items
def get_POST_items(self):
""" Return list of stock items sent back by client on a POST request """
items = []
for item in self.request.POST:
if item.startswith('stock-id-'):
pk = item.replace('stock-id-', '')
q = self.request.POST[item]
try:
stock_item = StockItem.objects.get(pk=pk)
except StockItem.DoesNotExist:
continue
stock_item.new_quantity = q
items.append(stock_item)
return items
def get_context_data(self):
context = super().get_context_data()
context['stock_items'] = self.stock_items
context['stock_action'] = self.stock_action.strip().lower()
context['stock_action_title'] = self.stock_action.capitalize()
# Quantity column will be read-only in some circumstances
context['edit_quantity'] = not self.stock_action == 'delete'
return context
def get_form(self):
form = super().get_form()
if not self.stock_action == 'move':
form.fields.pop('destination')
form.fields.pop('set_loc')
return form
def get(self, request, *args, **kwargs):
self.request = request
# Action
self.stock_action = request.GET.get('action', '').lower()
# Pick a default action...
if self.stock_action not in ['move', 'count', 'take', 'add', 'delete']:
self.stock_action = 'count'
# Choose the form title based on the action
titles = {
'move': _('Move Stock Items'),
'count': _('Count Stock Items'),
'take': _('Remove From Stock'),
'add': _('Add Stock Items'),
'delete': _('Delete Stock Items')
}
self.ajax_form_title = titles[self.stock_action]
# Save list of items!
self.stock_items = self.get_GET_items()
return self.renderJsonResponse(request, self.get_form())
def post(self, request, *args, **kwargs):
self.request = request
self.stock_action = request.POST.get('stock_action', 'invalid').strip().lower()
# Update list of stock items
self.stock_items = self.get_POST_items()
form = self.get_form()
valid = form.is_valid()
for item in self.stock_items:
try:
item.new_quantity = Decimal(item.new_quantity)
except ValueError:
item.error = _('Must enter integer value')
valid = False
continue
if item.new_quantity < 0:
item.error = _('Quantity must be positive')
valid = False
continue
if self.stock_action in ['move', 'take']:
if item.new_quantity > item.quantity:
item.error = _('Quantity must not exceed {x}'.format(x=item.quantity))
valid = False
continue
confirmed = str2bool(request.POST.get('confirm'))
if not confirmed:
valid = False
form.errors['confirm'] = [_('Confirm stock adjustment')]
data = {
'form_valid': valid,
}
if valid:
result = self.do_action()
data['success'] = result
# Special case - Single Stock Item
# If we deplete the stock item, we MUST redirect to a new view
single_item = len(self.stock_items) == 1
if result and single_item:
# Was the entire stock taken?
item = self.stock_items[0]
if item.quantity == 0:
# Instruct the form to redirect
data['url'] = reverse('stock-index')
return self.renderJsonResponse(request, form, data=data)
def do_action(self):
""" Perform stock adjustment action """
if self.stock_action == 'move':
destination = None
set_default_loc = str2bool(self.request.POST.get('set_loc', False))
try:
destination = StockLocation.objects.get(id=self.request.POST.get('destination'))
except StockLocation.DoesNotExist:
pass
except ValueError:
pass
return self.do_move(destination, set_default_loc)
elif self.stock_action == 'add':
return self.do_add()
elif self.stock_action == 'take':
return self.do_take()
elif self.stock_action == 'count':
return self.do_count()
elif self.stock_action == 'delete':
return self.do_delete()
else:
return 'No action performed'
def do_add(self):
count = 0
note = self.request.POST['note']
for item in self.stock_items:
if item.new_quantity <= 0:
continue
item.add_stock(item.new_quantity, self.request.user, notes=note)
count += 1
return _("Added stock to {n} items".format(n=count))
def do_take(self):
count = 0
note = self.request.POST['note']
for item in self.stock_items:
if item.new_quantity <= 0:
continue
item.take_stock(item.new_quantity, self.request.user, notes=note)
count += 1
return _("Removed stock from {n} items".format(n=count))
def do_count(self):
count = 0
note = self.request.POST['note']
for item in self.stock_items:
item.stocktake(item.new_quantity, self.request.user, notes=note)
count += 1
return _("Counted stock for {n} items".format(n=count))
def do_move(self, destination, set_loc=None):
""" Perform actual stock movement """
count = 0
note = self.request.POST['note']
for item in self.stock_items:
# Avoid moving zero quantity
if item.new_quantity <= 0:
continue
# If we wish to set the destination location to the default one
if set_loc:
item.part.default_location = destination
item.part.save()
# Do not move to the same location (unless the quantity is different)
if destination == item.location and item.new_quantity == item.quantity:
continue
item.move(destination, note, self.request.user, quantity=item.new_quantity)
count += 1
if count == 0:
return _('No items were moved')
else:
return _('Moved {n} items to {dest}'.format(
n=count,
dest=destination.pathstring))
def do_delete(self):
""" Delete multiple stock items """
count = 0
# note = self.request.POST['note']
for item in self.stock_items:
# TODO - In the future, StockItems should not be 'deleted'
# TODO - Instead, they should be marked as "inactive"
item.delete()
count += 1
return _("Deleted {n} stock items".format(n=count))
class StockItemEdit(AjaxUpdateView):
"""
View for editing details of a single StockItem
"""
model = StockItem
form_class = EditStockItemForm
context_object_name = 'item'
ajax_template_name = 'modal_form.html'
ajax_form_title = _('Edit Stock Item')
def get_form(self):
""" Get form for StockItem editing.
Limit the choices for supplier_part
"""
form = super(AjaxUpdateView, self).get_form()
item = self.get_object()
# If the part cannot be purchased, hide the supplier_part field
if not item.part.purchaseable:
form.fields['supplier_part'].widget = HiddenInput()
else:
query = form.fields['supplier_part'].queryset
query = query.filter(part=item.part.id)
form.fields['supplier_part'].queryset = query
if not item.part.trackable:
form.fields.pop('serial')
return form
class StockLocationCreate(AjaxCreateView):
"""
View for creating a new StockLocation
A parent location (another StockLocation object) can be passed as a query parameter
"""
model = StockLocation
form_class = EditStockLocationForm
context_object_name = 'location'
ajax_template_name = 'modal_form.html'
ajax_form_title = _('Create new Stock Location')
def get_initial(self):
initials = super(StockLocationCreate, self).get_initial().copy()
loc_id = self.request.GET.get('location', None)
if loc_id:
try:
initials['parent'] = StockLocation.objects.get(pk=loc_id)
except StockLocation.DoesNotExist:
pass
return initials
class StockItemSerialize(AjaxUpdateView):
""" View for manually serializing a StockItem """
model = StockItem
ajax_template_name = 'stock/item_serialize.html'
ajax_form_title = _('Serialize Stock')
form_class = SerializeStockForm
def get_initial(self):
initials = super().get_initial().copy()
item = self.get_object()
initials['quantity'] = item.quantity
initials['destination'] = item.location.pk
return initials
def get(self, request, *args, **kwargs):
return super().get(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
form = self.get_form()
item = self.get_object()
quantity = request.POST.get('quantity', 0)
serials = request.POST.get('serial_numbers', '')
dest_id = request.POST.get('destination', None)
notes = request.POST.get('note', '')
user = request.user
valid = True
try:
destination = StockLocation.objects.get(pk=dest_id)
except (ValueError, StockLocation.DoesNotExist):
destination = None
try:
numbers = ExtractSerialNumbers(serials, quantity)
except ValidationError as e:
form.errors['serial_numbers'] = e.messages
valid = False
numbers = []
if valid:
try:
item.serializeStock(quantity, numbers, user, notes=notes, location=destination)
except ValidationError as e:
messages = e.message_dict
for k in messages.keys():
if k in ['quantity', 'destination', 'serial_numbers']:
form.errors[k] = messages[k]
else:
form.non_field_errors = messages[k]
valid = False
data = {
'form_valid': valid,
}
return self.renderJsonResponse(request, form, data=data)
class StockItemCreate(AjaxCreateView):
"""
View for creating a new StockItem
Parameters can be pre-filled by passing query items:
- part: The part of which the new StockItem is an instance
- location: The location of the new StockItem
If the parent part is a "tracked" part, provide an option to create uniquely serialized items
rather than a bulk quantity of stock items
"""
model = StockItem
form_class = CreateStockItemForm
context_object_name = 'item'
ajax_template_name = 'modal_form.html'
ajax_form_title = _('Create new Stock Item')
def get_form(self):
""" Get form for StockItem creation.
Overrides the default get_form() method to intelligently limit
ForeignKey choices based on other selections
"""
form = super().get_form()
# If the user has selected a Part, limit choices for SupplierPart
if form['part'].value():
part_id = form['part'].value()
try:
part = Part.objects.get(id=part_id)
# Hide the 'part' field (as a valid part is selected)
form.fields['part'].widget = HiddenInput()
# trackable parts get special consideration
if part.trackable:
form.fields['delete_on_deplete'].widget = HiddenInput()
form.fields['delete_on_deplete'].initial = False
else:
form.fields.pop('serial_numbers')
# If the part is NOT purchaseable, hide the supplier_part field
if not part.purchaseable:
form.fields['supplier_part'].widget = HiddenInput()
else:
# Pre-select the allowable SupplierPart options
parts = form.fields['supplier_part'].queryset
parts = parts.filter(part=part.id)
form.fields['supplier_part'].queryset = parts
# If there is one (and only one) supplier part available, pre-select it
all_parts = parts.all()
if len(all_parts) == 1:
# TODO - This does NOT work for some reason? Ref build.views.BuildItemCreate
form.fields['supplier_part'].initial = all_parts[0].id
except Part.DoesNotExist:
pass
# Otherwise if the user has selected a SupplierPart, we know what Part they meant!
elif form['supplier_part'].value() is not None:
pass
return form
def get_initial(self):
""" Provide initial data to create a new StockItem object
"""
# Is the client attempting to copy an existing stock item?
item_to_copy = self.request.GET.get('copy', None)
if item_to_copy:
try:
original = StockItem.objects.get(pk=item_to_copy)
initials = model_to_dict(original)
self.ajax_form_title = _("Copy Stock Item")
except StockItem.DoesNotExist:
initials = super(StockItemCreate, self).get_initial().copy()
else:
initials = super(StockItemCreate, self).get_initial().copy()
part_id = self.request.GET.get('part', None)
loc_id = self.request.GET.get('location', None)
sup_part_id = self.request.GET.get('supplier_part', None)
part = None
location = None
supplier_part = None
# Part field has been specified
if part_id:
try:
part = Part.objects.get(pk=part_id)
initials['part'] = part
initials['location'] = part.get_default_location()
initials['supplier_part'] = part.default_supplier
except (ValueError, Part.DoesNotExist):
pass
# SupplierPart field has been specified
# It must match the Part, if that has been supplied
if sup_part_id:
try:
supplier_part = SupplierPart.objects.get(pk=sup_part_id)
if part is None or supplier_part.part == part:
initials['supplier_part'] = supplier_part
except (ValueError, SupplierPart.DoesNotExist):
pass
# Location has been specified
if loc_id:
try:
location = StockLocation.objects.get(pk=loc_id)
initials['location'] = location
except (ValueError, StockLocation.DoesNotExist):
pass
return initials
def post(self, request, *args, **kwargs):
""" Handle POST of StockItemCreate form.
- Manage serial-number valdiation for tracked parts
"""
form = self.get_form()
data = {}
valid = form.is_valid()
if valid:
part_id = form['part'].value()
try:
part = Part.objects.get(id=part_id)
quantity = Decimal(form['quantity'].value())
except (Part.DoesNotExist, ValueError, InvalidOperation):
part = None
quantity = 1
valid = False
form.errors['quantity'] = [_('Invalid quantity')]
if part is None:
form.errors['part'] = [_('Invalid part selection')]
else:
# A trackable part must provide serial numbesr
if part.trackable:
sn = request.POST.get('serial_numbers', '')
sn = str(sn).strip()
# If user has specified a range of serial numbers
if len(sn) > 0:
try:
serials = ExtractSerialNumbers(sn, quantity)
existing = []
for serial in serials:
if not StockItem.check_serial_number(part, serial):
existing.append(serial)
if len(existing) > 0:
exists = ",".join([str(x) for x in existing])
form.errors['serial_numbers'] = [_('The following serial numbers already exist: ({sn})'.format(sn=exists))]
valid = False
else:
# At this point we have a list of serial numbers which we know are valid,
# and do not currently exist
form.clean()
form_data = form.cleaned_data
for serial in serials:
# Create a new stock item for each serial number
item = StockItem(
part=part,
quantity=1,
serial=serial,
supplier_part=form_data.get('supplier_part'),
location=form_data.get('location'),
batch=form_data.get('batch'),
delete_on_deplete=False,
status=form_data.get('status'),
URL=form_data.get('URL'),
)
item.save(user=request.user)
data['success'] = _('Created {n} new stock items'.format(n=len(serials)))
valid = True
except ValidationError as e:
form.errors['serial_numbers'] = e.messages
valid = False
else:
# We have a serialized part, but no serial numbers specified...
form.clean()
form._post_clean()
item = form.save(commit=False)
item.save(user=request.user)
data['pk'] = item.pk
data['url'] = item.get_absolute_url()
data['success'] = _("Created new stock item")
else: # Referenced Part object is not marked as "trackable"
# For non-serialized items, simply save the form.
# We need to call _post_clean() here because it is prevented in the form implementation
form.clean()
form._post_clean()
item = form.save(commit=False)
item.save(user=request.user)
data['pk'] = item.pk
data['url'] = item.get_absolute_url()
data['success'] = _("Created new stock item")
data['form_valid'] = valid
return self.renderJsonResponse(request, form, data=data)
class StockLocationDelete(AjaxDeleteView):
"""
View to delete a StockLocation
Presents a deletion confirmation form to the user
"""
model = StockLocation
success_url = '/stock'
ajax_template_name = 'stock/location_delete.html'
context_object_name = 'location'
ajax_form_title = _('Delete Stock Location')
class StockItemDelete(AjaxDeleteView):
"""
View to delete a StockItem
Presents a deletion confirmation form to the user
"""
model = StockItem
success_url = '/stock/'
ajax_template_name = 'stock/item_delete.html'
context_object_name = 'item'
ajax_form_title = _('Delete Stock Item')
class StockItemTrackingDelete(AjaxDeleteView):
"""
View to delete a StockItemTracking object
Presents a deletion confirmation form to the user
"""
model = StockItemTracking
ajax_template_name = 'stock/tracking_delete.html'
ajax_form_title = _('Delete Stock Tracking Entry')
class StockTrackingIndex(ListView):
"""
StockTrackingIndex provides a page to display StockItemTracking objects
"""
model = StockItemTracking
template_name = 'stock/tracking.html'
context_object_name = 'items'
class StockItemTrackingEdit(AjaxUpdateView):
""" View for editing a StockItemTracking object """
model = StockItemTracking
ajax_form_title = _('Edit Stock Tracking Entry')
form_class = TrackingEntryForm
class StockItemTrackingCreate(AjaxCreateView):
""" View for creating a new StockItemTracking object.
"""
model = StockItemTracking
ajax_form_title = _("Add Stock Tracking Entry")
form_class = TrackingEntryForm
def post(self, request, *args, **kwargs):
self.request = request
self.form = self.get_form()
valid = False
if self.form.is_valid():
stock_id = self.kwargs['pk']
if stock_id:
try:
stock_item = StockItem.objects.get(id=stock_id)
# Save new tracking information
tracking = self.form.save(commit=False)
tracking.item = stock_item
tracking.user = self.request.user
tracking.quantity = stock_item.quantity
tracking.date = datetime.now().date()
tracking.system = False
tracking.save()
valid = True
except (StockItem.DoesNotExist, ValueError):
pass
data = {
'form_valid': valid
}
return self.renderJsonResponse(request, self.form, data=data)
| [
"[email protected]"
] | |
2e18be84a9b21e63411b034b37872fab50661b4a | b7125b27e564d2cc80a2ce8d0a6f934aa22c8445 | /.history/sudoku_20201101180452.py | 0b0cccefb8ae974ad8a008aea7fbc96caffa7116 | [] | no_license | JensVL96/Puzzle-solver-for-fun | 4c15dcd570c3705b7ac555efb56b52913e81083c | 6d8a4378a480372213a596a336a4deca727a00fc | refs/heads/master | 2021-07-15T05:19:42.185495 | 2020-11-08T13:59:49 | 2020-11-08T13:59:49 | 224,855,888 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,035 | py | # -*- coding: utf-8 -*-
from __future__ import print_function
from config import *
from create_board import *
from solve_bloard import *
from display_board import *
from string import *
from math import floor
import pygame as pg
import numpy as np
# For error highlighting
def set_highlight(row, col, blk, lock):
global input_lock
input_lock = lock
global row_index
row_index = row
global col_index
col_index = blk
global blk_index
blk_index = col
def get_cord(pos):
global box_index_x
box_index_x = int((pos[0] - TOP_LX)//BLOCK_SIZE)
global box_index_y
box_index_y = int((pos[1] - TOP_LY)//BLOCK_SIZE)
def valid(grid, x, y, val):
input_lock = 0
row = col = blk = (0, 0)
for index in range(9):
# Check if value in column
if grid[x][index] == val:
col = (x, index)
input_lock = 1
# Check if value in row
if grid[index][y] == val:
row = (index, y)
input_lock = 1
# Finds the block
index_x = x // 3 # integer division
index_y = y // 3
# Check if value in block
for i in range(index_x * 3, index_x * 3 + 3):
for j in range (index_y * 3, index_y * 3 + 3):
if grid[i][j] == val:
blk = (i, j)
input_lock = 1
if input_lock == 1:
set_highlight(row, col, blk, input_lock)
return False
return True
class Main():
def __init__(self):
self.board = []
self.run()
def run(self):
pg.init()
self.screen = pg.display.set_mode(SCREEN_RES)
pg.display.set_caption('Sudoku solver')
display = Display_board(self.screen)
val = 0
blink = False
alpha = 1
a_change = True
blink_color = GREEN
candidates = []
get_cord(INITIAL_CORDS)
set_highlight(INITIAL_CORDS, INITIAL_CORDS, INITIAL_CORDS, INITIAL_LOCK)
board = create_board().board
while 1:
for event in pg.event.get():
if event.type == pg.QUIT or (event.type == pg.KEYDOWN and event.key == pg.K_ESCAPE):
exit()
if event.type == pg.MOUSEBUTTONDOWN and input_lock != 1:
pos = pg.mouse.get_pos()
get_cord(pos)
# Checks if selection is on the board
if pos[0] < TOP_LX or pos[1] < TOP_LY or pos[0] > int(BOT_RX) or pos[1] > int(BOT_RY):
blink = False
else:
blink = True
if event.type == pg.KEYDOWN and input_lock != 1:
if event.key == pg.K_1:
val = 1
if event.key == pg.K_2:
val = 2
if event.key == pg.K_3:
val = 3
if event.key == pg.K_4:
val = 4
if event.key == pg.K_5:
val = 5
if event.key == pg.K_6:
val = 6
if event.key == pg.K_7:
val = 7
if event.key == pg.K_8:
val = 8
if event.key == pg.K_9:
val = 9
if event.key == pg.K_BACKSPACE:
board[int(box_index_x)][int(box_index_y)] = 0
elif event.type == pg.KEYDOWN and input_lock == 1:
if event.key == pg.K_BACKSPACE:
val = 0
set_highlight(INITIAL_CORDS, INITIAL_CORDS, INITIAL_CORDS, INITIAL_LOCK)
blink_color = GREEN
board[int(box_index_x)][int(box_index_y)] = 0
if val != 0:
# display.draw_val(val, box_index_x, box_index_y)
print(board[box_index_x][box_index_y])
candidates.append(board[box_index_x][box_index_y])
if valid(board, box_index_x, box_index_y, val) and board[box_index_x][box_index_y] != 0:
if type(board[box_index_x][box_index_y]) == int:
print("hey there", len(candidates))
if len(candidates) < 9:
candidates.append(val)
print("candidates: ", candidates[0], candidates[1])
board[box_index_x][box_index_y] = candidates
else:
board[box_index_x][box_index_y].append(val)
elif valid(board,box_index_x, box_index_y, val):
board[box_index_x][box_index_y] = val
else:
board[box_index_x][box_index_y] = val
# Draws the screen
pg.draw.rect(self.screen, BLACK, (0, 0, self.screen.get_width(), self.screen.get_height()))
self.screen.fill(BEIGE)
# Draws the board
display.draw(board)
# Check if cell is selected
if blink:
cell = display.find_cell(box_index_x, box_index_y)
blink = display.blink(alpha, a_change)
alpha = blink[0]
a_change = blink[1]
myRect = pg.Rect(cell)
rectSurf = pg.Surface(myRect.size, pg.SRCALPHA)
rectSurf.fill(blink_color)
rectSurf.set_alpha(alpha)
self.screen.blit(rectSurf, (myRect.x, myRect.y))
# Check if incorrect input
if input_lock == 1 and val != 0:
display.update(board, row_index, col_index, blk_index)
blink_color = RED
val = 0
# display.draw_box()
pg.display.update()
self.solution = solve_board(board)
self.solution.assign_flags(board)
if __name__ == '__main__':
Main()
| [
"[email protected]"
] | |
f31fee6475c7cf0e9200fc4f6762bf92a3f9cdb1 | 7dd88d4ae218b8de8fe54780fb48884ef92c0b5c | /python/leetcode/search.py | 0e887d9acf7a2cc74431826670bfe92836cdbc67 | [] | no_license | BenThomas33/practice | c98654ec3bb38740d7f69a21ea5832782abdb4f8 | 5dffbdfbdb65f959a534ed2e2ec7773ab4bc7ed9 | refs/heads/master | 2021-01-17T23:26:11.538707 | 2014-10-18T20:49:12 | 2014-10-18T20:49:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 139 | py | class Solution:
# @param A a list of integers
# @param target an integer
# @return a boolean
def search(self, A, target):
| [
"[email protected]"
] | |
45a39d564c5ccfcb158db6c41322736bf97d4f25 | 830b34e369fcfb94a8eaa855c918ab66ed2050b2 | /gui/layouts/grid.py | 4725707f04c497a09f57f383344ad657c886ec53 | [] | no_license | treinaweb/treinaweb-kivy-framework-python | 78e8ab1087a49e8463ebf4ecafca80fe41286cb7 | 2ddf0a881f28209a118ec893019c179bc39e75fc | refs/heads/master | 2020-03-27T16:47:07.452396 | 2019-10-15T19:04:14 | 2019-10-15T19:04:14 | 146,805,958 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 259 | py | from kivy.app import App
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.gridlayout import GridLayout
from kivy.uix.widget import Widget
class TelaApp(GridLayout):
pass
class Grid(App):
def build(self):
return TelaApp()
Grid().run() | [
"[email protected]"
] | |
446864b08c4d7945ec228b68519031d1bbce51c0 | 5be79d6cbc8a55f0b6518b28fb748c34316b385d | /sentinel_api/__init__.py | 01de33cdaec552e63d559aea414ed9ab6e1d9c99 | [
"MIT"
] | permissive | jonas-eberle/esa_sentinel | 1b9aa57a78972d93a20d03bbf0875c35f7bee4b2 | c9498e8835ae0a585068cfd6be953319ea34ca29 | refs/heads/master | 2022-06-24T18:44:42.726012 | 2022-06-06T16:15:52 | 2022-06-06T16:15:52 | 47,712,138 | 51 | 16 | MIT | 2019-10-17T15:45:06 | 2015-12-09T18:57:35 | Python | UTF-8 | Python | false | false | 238 | py | from .sentinel_api import SentinelDownloader
from pkg_resources import get_distribution, DistributionNotFound
try:
__version__ = get_distribution(__name__).version
except DistributionNotFound:
# package is not installed
pass
| [
"[email protected]"
] | |
630fa7c67e54aef67a51ee9d03be97d872a64cc5 | ec1f8cdbf52bcc5516a833e02ac99301a1664ed9 | /setup.py | 0e962f9c6b7ca5a5352113a317f655c549c3cf3b | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | senarvi/theanolm | 8fe85dcf07358a331807b9002a56b6089d5f0ff3 | 9904faec19ad5718470f21927229aad2656e5686 | refs/heads/master | 2023-06-24T10:39:21.985241 | 2023-06-12T06:55:26 | 2023-06-12T06:55:26 | 42,454,187 | 95 | 37 | Apache-2.0 | 2020-11-05T11:22:31 | 2015-09-14T14:35:54 | Python | UTF-8 | Python | false | false | 2,563 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""This setup script can be used to run unit tests, manually install the
package, and upload the package to PyPI.
python3 setup.py --help - Display help.
python3 setup.py test - Execute unit tests.
python3 setup.py install - Install the package.
python3 setup.py sdist upload - Upload the project to PyPI.
"""
from os import path
from setuptools import setup, find_packages
SCRIPT_DIR = path.dirname(path.realpath(__file__))
VERSION_PATH = path.join(SCRIPT_DIR, 'theanolm', 'version.py')
# Don't import theanolm, as the user may not have the dependencies installed
# yet. This will import __version__.
with open(VERSION_PATH, 'r') as version_file:
exec(version_file.read())
VERSION = __version__ #@UndefinedVariable
LONG_DESCRIPTION = 'TheanoLM is a recurrent neural network language modeling ' \
'toolkit implemented using Theano. Theano allows the user ' \
'to customize and extend the neural network very ' \
'conveniently, still generating highly efficient code ' \
'that can utilize multiple GPUs or CPUs for parallel ' \
'computation. TheanoLM allows the user to specify ' \
'arbitrary network architecture. New layer types and ' \
'optimization methods can be easily implemented.'
KEYWORDS = 'theano neural network language modeling machine learning research'
CLASSIFIERS = ['Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Programming Language :: Python :: 3',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Topic :: Scientific/Engineering']
setup(name='TheanoLM',
version=VERSION,
author='Seppo Enarvi',
author_email='[email protected]',
url='https://github.com/senarvi/theanolm',
download_url='https://github.com/senarvi/theanolm/tarball/v' + VERSION,
description='Toolkit for neural network language modeling using Theano',
long_description=LONG_DESCRIPTION,
license='Apache License, Version 2.0',
keywords=KEYWORDS,
classifiers=CLASSIFIERS,
packages=find_packages(exclude=['tests']),
package_data={'theanolm': ['architectures/*.arch']},
scripts=['bin/theanolm', 'bin/wctool'],
install_requires=['numpy', 'Theano', 'h5py'],
test_suite='tests')
| [
"[email protected]"
] | |
aa4a3ec5872f3bd9c81e30dbe94c795cb732bc34 | f82757475ea13965581c2147ff57123b361c5d62 | /gi-stubs/repository/GstVideo/VideoAggregatorConvertPadPrivate.py | 52c003b39ff29287c99298ff8a25c4d0fb82cb6b | [] | no_license | ttys3/pygobject-stubs | 9b15d1b473db06f47e5ffba5ad0a31d6d1becb57 | d0e6e93399212aada4386d2ce80344eb9a31db48 | refs/heads/master | 2022-09-23T12:58:44.526554 | 2020-06-06T04:15:00 | 2020-06-06T04:15:00 | 269,693,287 | 8 | 2 | null | 2020-06-05T15:57:54 | 2020-06-05T15:57:54 | null | UTF-8 | Python | false | false | 4,481 | py | # encoding: utf-8
# module gi.repository.GstVideo
# from /usr/lib64/girepository-1.0/GstVideo-1.0.typelib
# by generator 1.147
"""
An object which wraps an introspection typelib.
This wrapping creates a python module like representation of the typelib
using gi repository as a foundation. Accessing attributes of the module
will dynamically pull them in and create wrappers for the members.
These members are then cached on this introspection module.
"""
# imports
import gi as __gi
import gi.overrides.GObject as __gi_overrides_GObject
import gi.repository.Gst as __gi_repository_Gst
import gi.repository.GstBase as __gi_repository_GstBase
import gobject as __gobject
class VideoAggregatorConvertPadPrivate(__gi.Struct):
# no doc
def __delattr__(self, *args, **kwargs): # real signature unknown
""" Implement delattr(self, name). """
pass
def __dir__(self, *args, **kwargs): # real signature unknown
""" Default dir() implementation. """
pass
def __eq__(self, *args, **kwargs): # real signature unknown
""" Return self==value. """
pass
def __format__(self, *args, **kwargs): # real signature unknown
""" Default object formatter. """
pass
def __getattribute__(self, *args, **kwargs): # real signature unknown
""" Return getattr(self, name). """
pass
def __ge__(self, *args, **kwargs): # real signature unknown
""" Return self>=value. """
pass
def __gt__(self, *args, **kwargs): # real signature unknown
""" Return self>value. """
pass
def __hash__(self, *args, **kwargs): # real signature unknown
""" Return hash(self). """
pass
def __init_subclass__(self, *args, **kwargs): # real signature unknown
"""
This method is called when a class is subclassed.
The default implementation does nothing. It may be
overridden to extend subclasses.
"""
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
def __le__(self, *args, **kwargs): # real signature unknown
""" Return self<=value. """
pass
def __lt__(self, *args, **kwargs): # real signature unknown
""" Return self<value. """
pass
@staticmethod # known case of __new__
def __new__(*args, **kwargs): # real signature unknown
""" Create and return a new object. See help(type) for accurate signature. """
pass
def __ne__(self, *args, **kwargs): # real signature unknown
""" Return self!=value. """
pass
def __reduce_ex__(self, *args, **kwargs): # real signature unknown
""" Helper for pickle. """
pass
def __reduce__(self, *args, **kwargs): # real signature unknown
""" Helper for pickle. """
pass
def __repr__(self, *args, **kwargs): # real signature unknown
""" Return repr(self). """
pass
def __setattr__(self, *args, **kwargs): # real signature unknown
""" Implement setattr(self, name, value). """
pass
def __sizeof__(self, *args, **kwargs): # real signature unknown
""" Size of object in memory, in bytes. """
pass
def __str__(self, *args, **kwargs): # real signature unknown
""" Return str(self). """
pass
def __subclasshook__(self, *args, **kwargs): # real signature unknown
"""
Abstract classes can override this to customize issubclass().
This is invoked early on by abc.ABCMeta.__subclasscheck__().
It should return True, False or NotImplemented. If it returns
NotImplemented, the normal algorithm is used. Otherwise, it
overrides the normal algorithm (and the outcome is cached).
"""
pass
def __weakref__(self, *args, **kwargs): # real signature unknown
pass
__class__ = None # (!) real value is "<class 'gi.types.StructMeta'>"
__dict__ = None # (!) real value is "mappingproxy({'__info__': StructInfo(VideoAggregatorConvertPadPrivate), '__module__': 'gi.repository.GstVideo', '__gtype__': <GType void (4)>, '__dict__': <attribute '__dict__' of 'VideoAggregatorConvertPadPrivate' objects>, '__weakref__': <attribute '__weakref__' of 'VideoAggregatorConvertPadPrivate' objects>, '__doc__': None})"
__gtype__ = None # (!) real value is '<GType void (4)>'
__info__ = StructInfo(VideoAggregatorConvertPadPrivate)
| [
"[email protected]"
] | |
13bda21c18d48ad3eb96b1e6efb52ec7823dc23b | 81fbac614ad0f6795960a7a1f615c1a7d2938fa8 | /setup.py | cc4ce16119bf94cc444d7050bfea2abeedc051df | [
"MIT"
] | permissive | Rue-Foundation/eth-bloom | f0c4a0fc4b41b16cb1ed103c693a44c22464d805 | 930b740267992fc7c2fbc7f38eed8c1ea3c79d40 | refs/heads/master | 2021-08-20T09:03:18.794271 | 2017-11-28T17:50:35 | 2017-11-28T17:50:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,234 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
setup(
name='eth-bloom',
# *IMPORTANT*: Don't manually change the version here. Use the 'bumpversion' utility.
version='0.5.2',
description="""Python implementation of the Ethereum Trie structure""",
long_description_markdown_filename='README.md',
author='Piper Merriam',
author_email='[email protected]',
url='https://github.com/ethereum/eth-bloom',
include_package_data=True,
py_modules=['eth_bloom'],
setup_requires=['setuptools-markdown'],
install_requires=[
"pysha3>=0.3",
],
license="MIT",
zip_safe=False,
keywords='ethereum blockchain evm trie merkle',
packages=find_packages(exclude=["tests", "tests.*"]),
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
)
| [
"[email protected]"
] | |
017b467592469746e83158bfd8f35d935f1207e6 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_385/ch44_2019_04_22_23_24_01_514593.py | 3006efd7bceca5e33b58e34e164af9c84824b29a | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 148 | py | def soma_valores(soma):
lista = []
i=0
soma=0
while i<len(lista):
soma+= lista (i)
i+=1
return soma
| [
"[email protected]"
] | |
97b909e4b45e90041f2deb5ee471e5666c2c7ab2 | b5f05426d811303c0bc2d37a7ebff67cc369f536 | /python/crawl/study_crawl.py | fb7bde6519e53d66be9f5bca6de409ba80581f1b | [] | no_license | chenwangwww/paddlehub | 54a310c2b627868aa22e6172497d60ddd2291d24 | 8583a705af6f82512ea5473f3d8961a798852913 | refs/heads/master | 2023-03-13T10:17:55.589558 | 2021-03-01T02:35:43 | 2021-03-01T02:35:43 | 293,667,091 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,088 | py | import requests
from bs4 import BeautifulSoup
from dbCtr import ctr
from funcs import listToStr
rooturl = 'http://www.cn56.net.cn/diming/'
def insertFullData(tb, data):
for item in data:
result = {
'name': item.get_text(),
'link': item.get('href'),
}
ctr.insertData(tb, result)
def crtb(tb, datas):
ctr.createTb(tb)
for data in datas:
insertFullData(tb, data)
def crawlUrl(suburl):
url = rooturl + suburl
strhtml = requests.get(url)
strhtml.encoding = 'gbk'
soup = BeautifulSoup(strhtml.text, 'lxml')
search_list = soup.select('#page_left > div.wrpn > a')
search_name = listToStr(search_list, '_')
if len(search_list) == 1:
selecter = '#page_left > table:nth-child(4) > tr > td:nth-child(1) > strong > a'
else:
selecter = '#page_left > div.infotree > table > tr > td:nth-child(1) > strong > a'
data = soup.select(selecter)
if len(data) > 0 and len(search_list) <= 2:
print(search_name)
crtb(search_name, [data])
for item in data:
subtempurl = item.get('href').split('/')[-1]
crawlUrl(subtempurl)
# strhtml = requests.get(rooturl)
# strhtml.encoding = 'gbk'
# soup = BeautifulSoup(strhtml.text, 'lxml')
# search_name = '中国'
# selecter1 = 'body > div:nth-child(6) > div.w650 > div > li:nth-child(1) > a'
# data1 = soup.select(selecter1)
# selecter2 = 'body > div:nth-child(6) > div.w650 > div > li > b > a'
# data2 = soup.select(selecter2)
#如果不存在,创建数据表
# ctr.createTb(search_name)
#往数据表插入数据
# for item in data1:
# result = {
# 'name': item.get_text(),
# 'link': item.get('href'),
# }
# ctr.insertData(search_name, result)
# for item in data2:
# result = {
# 'name': item.get_text(),
# 'link': item.get('href'),
# }
# ctr.insertData(search_name, result)
queryData = ctr.queryData('中国')
for search_item in queryData:
_, suburl = search_item
suburl = suburl.split('/')[-1]
crawlUrl(suburl)
| [
"[email protected]"
] | |
2b88fd7b2e949c24551cc1cf034ead697fef65d5 | f5a53f0f2770e4d7b3fdace83486452ddcc996e1 | /netbox/netbox/tests/test_api.py | 0ee2d78dc1d0d9df2df846ce9c7d29f4c43c5347 | [
"Apache-2.0"
] | permissive | fireman0865/PingBox | 35e8fc9966b51320d571b63967e352a134022128 | 0f00eaf88b88e9441fffd5173a1501e56c13db03 | refs/heads/master | 2023-01-20T07:55:59.433046 | 2020-03-15T13:36:31 | 2020-03-15T13:36:31 | 247,466,832 | 1 | 0 | Apache-2.0 | 2022-12-26T21:30:32 | 2020-03-15T12:59:16 | Python | UTF-8 | Python | false | false | 298 | py | from django.urls import reverse
from utilities.testing import APITestCase
class AppTest(APITestCase):
def test_root(self):
url = reverse('api-root')
response = self.client.get('{}?format=api'.format(url), **self.header)
self.assertEqual(response.status_code, 200)
| [
"[email protected]"
] | |
ff1cfc0d4c92efc8ad7d7076ffb8e161a74bc3e5 | 709b1549033c9a547c67ee507fdc10b7e5d234ad | /octopus_deploy_swagger_client/models/resource_collection_library_variable_set_resource.py | 771a9e5e32ec2f7362c72fef0cb8929bf52d9827 | [
"Apache-2.0"
] | permissive | cvent/octopus-deploy-api-client | d622417286b348c0be29678a86005a809c77c005 | 0e03e842e1beb29b132776aee077df570b88366a | refs/heads/master | 2020-12-05T14:17:46.229979 | 2020-01-07T05:06:58 | 2020-01-07T05:06:58 | 232,135,963 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 11,367 | py | # coding: utf-8
"""
Octopus Server API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: 2019.6.7+Branch.tags-2019.6.7.Sha.aa18dc6809953218c66f57eff7d26481d9b23d6a
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from octopus_deploy_swagger_client.models.library_variable_set_resource import LibraryVariableSetResource # noqa: F401,E501
class ResourceCollectionLibraryVariableSetResource(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'id': 'str',
'item_type': 'str',
'total_results': 'int',
'items_per_page': 'int',
'number_of_pages': 'int',
'last_page_number': 'int',
'items': 'list[LibraryVariableSetResource]',
'last_modified_on': 'datetime',
'last_modified_by': 'str',
'links': 'dict(str, str)'
}
attribute_map = {
'id': 'Id',
'item_type': 'ItemType',
'total_results': 'TotalResults',
'items_per_page': 'ItemsPerPage',
'number_of_pages': 'NumberOfPages',
'last_page_number': 'LastPageNumber',
'items': 'Items',
'last_modified_on': 'LastModifiedOn',
'last_modified_by': 'LastModifiedBy',
'links': 'Links'
}
def __init__(self, id=None, item_type=None, total_results=None, items_per_page=None, number_of_pages=None, last_page_number=None, items=None, last_modified_on=None, last_modified_by=None, links=None): # noqa: E501
"""ResourceCollectionLibraryVariableSetResource - a model defined in Swagger""" # noqa: E501
self._id = None
self._item_type = None
self._total_results = None
self._items_per_page = None
self._number_of_pages = None
self._last_page_number = None
self._items = None
self._last_modified_on = None
self._last_modified_by = None
self._links = None
self.discriminator = None
if id is not None:
self.id = id
if item_type is not None:
self.item_type = item_type
if total_results is not None:
self.total_results = total_results
if items_per_page is not None:
self.items_per_page = items_per_page
if number_of_pages is not None:
self.number_of_pages = number_of_pages
if last_page_number is not None:
self.last_page_number = last_page_number
if items is not None:
self.items = items
if last_modified_on is not None:
self.last_modified_on = last_modified_on
if last_modified_by is not None:
self.last_modified_by = last_modified_by
if links is not None:
self.links = links
@property
def id(self):
"""Gets the id of this ResourceCollectionLibraryVariableSetResource. # noqa: E501
:return: The id of this ResourceCollectionLibraryVariableSetResource. # noqa: E501
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this ResourceCollectionLibraryVariableSetResource.
:param id: The id of this ResourceCollectionLibraryVariableSetResource. # noqa: E501
:type: str
"""
self._id = id
@property
def item_type(self):
"""Gets the item_type of this ResourceCollectionLibraryVariableSetResource. # noqa: E501
:return: The item_type of this ResourceCollectionLibraryVariableSetResource. # noqa: E501
:rtype: str
"""
return self._item_type
@item_type.setter
def item_type(self, item_type):
"""Sets the item_type of this ResourceCollectionLibraryVariableSetResource.
:param item_type: The item_type of this ResourceCollectionLibraryVariableSetResource. # noqa: E501
:type: str
"""
self._item_type = item_type
@property
def total_results(self):
"""Gets the total_results of this ResourceCollectionLibraryVariableSetResource. # noqa: E501
:return: The total_results of this ResourceCollectionLibraryVariableSetResource. # noqa: E501
:rtype: int
"""
return self._total_results
@total_results.setter
def total_results(self, total_results):
"""Sets the total_results of this ResourceCollectionLibraryVariableSetResource.
:param total_results: The total_results of this ResourceCollectionLibraryVariableSetResource. # noqa: E501
:type: int
"""
self._total_results = total_results
@property
def items_per_page(self):
"""Gets the items_per_page of this ResourceCollectionLibraryVariableSetResource. # noqa: E501
:return: The items_per_page of this ResourceCollectionLibraryVariableSetResource. # noqa: E501
:rtype: int
"""
return self._items_per_page
@items_per_page.setter
def items_per_page(self, items_per_page):
"""Sets the items_per_page of this ResourceCollectionLibraryVariableSetResource.
:param items_per_page: The items_per_page of this ResourceCollectionLibraryVariableSetResource. # noqa: E501
:type: int
"""
self._items_per_page = items_per_page
@property
def number_of_pages(self):
"""Gets the number_of_pages of this ResourceCollectionLibraryVariableSetResource. # noqa: E501
:return: The number_of_pages of this ResourceCollectionLibraryVariableSetResource. # noqa: E501
:rtype: int
"""
return self._number_of_pages
@number_of_pages.setter
def number_of_pages(self, number_of_pages):
"""Sets the number_of_pages of this ResourceCollectionLibraryVariableSetResource.
:param number_of_pages: The number_of_pages of this ResourceCollectionLibraryVariableSetResource. # noqa: E501
:type: int
"""
self._number_of_pages = number_of_pages
@property
def last_page_number(self):
"""Gets the last_page_number of this ResourceCollectionLibraryVariableSetResource. # noqa: E501
:return: The last_page_number of this ResourceCollectionLibraryVariableSetResource. # noqa: E501
:rtype: int
"""
return self._last_page_number
@last_page_number.setter
def last_page_number(self, last_page_number):
"""Sets the last_page_number of this ResourceCollectionLibraryVariableSetResource.
:param last_page_number: The last_page_number of this ResourceCollectionLibraryVariableSetResource. # noqa: E501
:type: int
"""
self._last_page_number = last_page_number
@property
def items(self):
"""Gets the items of this ResourceCollectionLibraryVariableSetResource. # noqa: E501
:return: The items of this ResourceCollectionLibraryVariableSetResource. # noqa: E501
:rtype: list[LibraryVariableSetResource]
"""
return self._items
@items.setter
def items(self, items):
"""Sets the items of this ResourceCollectionLibraryVariableSetResource.
:param items: The items of this ResourceCollectionLibraryVariableSetResource. # noqa: E501
:type: list[LibraryVariableSetResource]
"""
self._items = items
@property
def last_modified_on(self):
"""Gets the last_modified_on of this ResourceCollectionLibraryVariableSetResource. # noqa: E501
:return: The last_modified_on of this ResourceCollectionLibraryVariableSetResource. # noqa: E501
:rtype: datetime
"""
return self._last_modified_on
@last_modified_on.setter
def last_modified_on(self, last_modified_on):
"""Sets the last_modified_on of this ResourceCollectionLibraryVariableSetResource.
:param last_modified_on: The last_modified_on of this ResourceCollectionLibraryVariableSetResource. # noqa: E501
:type: datetime
"""
self._last_modified_on = last_modified_on
@property
def last_modified_by(self):
"""Gets the last_modified_by of this ResourceCollectionLibraryVariableSetResource. # noqa: E501
:return: The last_modified_by of this ResourceCollectionLibraryVariableSetResource. # noqa: E501
:rtype: str
"""
return self._last_modified_by
@last_modified_by.setter
def last_modified_by(self, last_modified_by):
"""Sets the last_modified_by of this ResourceCollectionLibraryVariableSetResource.
:param last_modified_by: The last_modified_by of this ResourceCollectionLibraryVariableSetResource. # noqa: E501
:type: str
"""
self._last_modified_by = last_modified_by
@property
def links(self):
"""Gets the links of this ResourceCollectionLibraryVariableSetResource. # noqa: E501
:return: The links of this ResourceCollectionLibraryVariableSetResource. # noqa: E501
:rtype: dict(str, str)
"""
return self._links
@links.setter
def links(self, links):
"""Sets the links of this ResourceCollectionLibraryVariableSetResource.
:param links: The links of this ResourceCollectionLibraryVariableSetResource. # noqa: E501
:type: dict(str, str)
"""
self._links = links
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(ResourceCollectionLibraryVariableSetResource, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ResourceCollectionLibraryVariableSetResource):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
] | |
e40a4cc665597fd71aa392ec795fc50fe1fd605a | 8e8e4becd0ccf35a4d2397eac05c46741941a3f2 | /examples/e2e/cli/04ignore/commands.py | 981edeb546ef06f9b1979175c86be2be29e5cb76 | [] | no_license | podhmo/monogusa | 13469c59e3a366f11e2d0b1d649991aceed40092 | 1129249cbfbf2d7925f69e484f1488799d2f637d | refs/heads/master | 2020-09-30T18:28:27.215942 | 2020-02-29T15:17:05 | 2020-02-29T15:17:05 | 227,347,182 | 0 | 0 | null | 2020-02-29T15:17:06 | 2019-12-11T11:13:47 | Python | UTF-8 | Python | false | false | 137 | py | from monogusa import ignore
def hello() -> None:
pass
def byebye() -> None:
pass
@ignore
def ignore_me() -> None:
pass
| [
"[email protected]"
] | |
947eeef7fb19a7b211cedd5dbc26edf741e2fb26 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/nouns/_rut.py | cfbcb5070a3a50c4c9cb663637014cb5d57b50f5 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 499 | py |
#calss header
class _RUT():
def __init__(self,):
self.name = "RUT"
self.definitions = [u'a deep, narrow mark made in soft ground especially by a wheel', u'the period of the year during which particular male animals, especially deer and sheep, are sexually active: ', u'(of particular male animals) sexually excited']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'nouns'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
| [
"[email protected]"
] | |
5e16fc0774c5c554570383b8aa8d7141c4ca1c84 | 09e5cfe06e437989a2ccf2aeecb9c73eb998a36c | /modules/dials/test/command_line/test_integrate.py | 51a33e5e0fce78bea4ee4271c99a79e1a8c316d1 | [
"BSD-3-Clause"
] | permissive | jorgediazjr/dials-dev20191018 | b81b19653624cee39207b7cefb8dfcb2e99b79eb | 77d66c719b5746f37af51ad593e2941ed6fbba17 | refs/heads/master | 2020-08-21T02:48:54.719532 | 2020-01-25T01:41:37 | 2020-01-25T01:41:37 | 216,089,955 | 0 | 1 | BSD-3-Clause | 2020-01-25T01:41:39 | 2019-10-18T19:03:17 | Python | UTF-8 | Python | false | false | 11,135 | py | from __future__ import absolute_import, division, print_function
import json
import math
import os
import pickle
import shutil
from dials.array_family import flex
import procrunner
def test2(dials_data, tmpdir):
# Call dials.integrate
result = procrunner.run(
[
"dials.integrate",
dials_data("centroid_test_data").join("experiments.json"),
"profile.fitting=False",
"integration.integrator=3d",
"prediction.padding=0",
],
working_directory=tmpdir,
)
assert not result.returncode and not result.stderr
with tmpdir.join("integrated.refl").open("rb") as fh:
table = pickle.load(fh)
mask = table.get_flags(table.flags.integrated, all=False)
assert len(table) == 1996
assert mask.count(True) == 1666
assert "id" in table
for row in table.rows():
assert row["id"] == 0
originaltable = table
tmpdir.join("integrated.refl").remove()
for i in range(1, 10):
source = dials_data("centroid_test_data").join("centroid_000%d.cbf" % i)
destination = source.new(
dirname=tmpdir.strpath, basename="centroid_001%d.cbf" % i
)
source.copy(destination)
with dials_data("centroid_test_data").join("experiments.json").open("r") as fh:
j = json.load(fh)
assert j["scan"][0]["image_range"] == [1, 9]
j["scan"][0]["image_range"] = [11, 19]
assert j["scan"][0]["oscillation"] == [0.0, 0.2]
j["scan"][0]["oscillation"] = [360.0, 0.2]
with tmpdir.join("models.expt").open("w") as fh:
json.dump(j, fh)
# Call dials.integrate
result = procrunner.run(
[
"dials.integrate",
"models.expt",
"profile.fitting=False",
"integration.integrator=3d",
"prediction.padding=0",
],
working_directory=tmpdir,
)
assert not result.returncode and not result.stderr
with tmpdir.join("integrated.refl").open("rb") as fh:
table = pickle.load(fh)
mask1 = table.get_flags(table.flags.integrated, all=False)
assert len(table) == 1996
assert mask1.count(True) == 1666
mask2 = originaltable.get_flags(table.flags.integrated, all=False)
assert mask1.all_eq(mask2)
t1 = table.select(mask1)
t2 = originaltable.select(mask1)
Cal_P1 = t1["xyzcal.mm"].parts()[2]
Cal_Z1 = t1["xyzcal.px"].parts()[2]
Obs_Z1 = t1["xyzobs.px.value"].parts()[2]
# Obs_P1 = t1['xyzobs.mm.value'].parts()[2]
Cal_Z2 = t2["xyzcal.px"].parts()[2]
Cal_P2 = t2["xyzcal.mm"].parts()[2]
Obs_Z2 = t2["xyzobs.px.value"].parts()[2]
# Obs_P2 = t2['xyzobs.mm.value'].parts()[2]
diff_I = t1["intensity.sum.value"] - t2["intensity.sum.value"]
diff_Cal_Z = Cal_Z1 - (Cal_Z2 + 10)
diff_Obs_Z = Obs_Z1 - (Obs_Z2 + 10)
diff_Cal_P = Cal_P1 - (Cal_P2 + 2 * math.pi)
# diff_Obs_P = Obs_P1 - (Obs_P2 + 2*math.pi)
assert flex.abs(diff_I).all_lt(1e-7)
assert flex.abs(diff_Cal_Z).all_lt(1e-7)
assert flex.abs(diff_Cal_P).all_lt(1e-7)
assert flex.abs(diff_Obs_Z).all_lt(1e-7)
# assert(flex.abs(diff_Obs_P).all_lt(1e-7))
def test_integration_with_sampling(dials_data, tmpdir):
result = procrunner.run(
[
"dials.integrate",
dials_data("centroid_test_data").join("experiments.json"),
"profile.fitting=False",
"sampling.integrate_all_reflections=False",
"prediction.padding=0",
],
working_directory=tmpdir,
)
assert not result.returncode and not result.stderr
with tmpdir.join("integrated.refl").open("rb") as fh:
table = pickle.load(fh)
assert len(table) == 1000
def test_integration_with_sample_size(dials_data, tmpdir):
result = procrunner.run(
[
"dials.integrate",
dials_data("centroid_test_data").join("experiments.json"),
"profile.fitting=False",
"sampling.integrate_all_reflections=False",
"sampling.minimum_sample_size=500",
"prediction.padding=0",
],
working_directory=tmpdir,
)
assert not result.returncode and not result.stderr
with tmpdir.join("integrated.refl").open("rb") as fh:
table = pickle.load(fh)
assert len(table) == 500
def test_multi_sequence(dials_regression, run_in_tmpdir):
result = procrunner.run(
[
"dials.integrate",
os.path.join(
dials_regression,
"integration_test_data",
"multi_sweep",
"experiments.json",
),
os.path.join(
dials_regression,
"integration_test_data",
"multi_sweep",
"indexed.pickle",
),
"prediction.padding=0",
]
)
assert not result.returncode and not result.stderr
assert os.path.exists("integrated.refl")
with open("integrated.refl", "rb") as fh:
table = pickle.load(fh)
assert len(table) == 4020
# Check the results
T1 = table[:2010]
T2 = table[2010:]
ID1 = list(set(T1["id"]))
ID2 = list(set(T2["id"]))
assert len(ID1) == 1
assert len(ID2) == 1
assert ID1[0] == 0
assert ID2[0] == 1
I1 = T1["intensity.prf.value"]
I2 = T2["intensity.prf.value"]
F1 = T1.get_flags(T1.flags.integrated_prf)
F2 = T2.get_flags(T2.flags.integrated_prf)
assert F1 == F2
I1 = I1.select(F1)
I2 = I2.select(F2)
assert flex.abs(I1 - I2) < 1e-6
def test_multi_lattice(dials_regression, tmpdir):
result = procrunner.run(
[
"dials.integrate",
os.path.join(
dials_regression,
"integration_test_data",
"multi_lattice",
"experiments.json",
),
os.path.join(
dials_regression,
"integration_test_data",
"multi_lattice",
"indexed.pickle",
),
"prediction.padding=0",
],
working_directory=tmpdir,
)
assert not result.returncode and not result.stderr
assert tmpdir.join("integrated.refl").check()
table = flex.reflection_table.from_file(tmpdir.join("integrated.refl"))
assert len(table) == 5605
# Check output contains from two lattices
exp_id = list(set(table["id"]))
assert len(exp_id) == 2
# Check both lattices have integrated reflections
mask = table.get_flags(table.flags.integrated_prf)
table = table.select(mask)
exp_id = list(set(table["id"]))
assert len(exp_id) == 2
def test_output_rubbish(dials_data, tmpdir):
result = procrunner.run(
[
"dials.index",
dials_data("centroid_test_data").join("datablock.json"),
dials_data("centroid_test_data").join("strong.pickle"),
],
working_directory=tmpdir,
)
assert not result.returncode and not result.stderr
assert tmpdir.join("indexed.expt").check(file=1)
assert tmpdir.join("indexed.refl").check(file=1)
# Call dials.integrate
result = procrunner.run(
[
"dials.integrate",
"indexed.expt",
"indexed.refl",
"profile.fitting=False",
"prediction.padding=0",
],
working_directory=tmpdir,
)
assert not result.returncode and not result.stderr
assert tmpdir.join("integrated.refl").check(file=1)
with tmpdir.join("integrated.refl").open("rb") as fh:
table = pickle.load(fh)
assert "id" in table
for row in table.rows():
assert row["id"] == 0
def test_integrate_with_kapton(dials_regression, tmpdir):
tmpdir.chdir()
loc = tmpdir.strpath
pickle_name = "idx-20161021225550223_indexed.pickle"
json_name = "idx-20161021225550223_refined_experiments.json"
image_name = "20161021225550223.pickle"
pickle_path = os.path.join(
dials_regression, "integration_test_data", "stills_PSII", pickle_name
)
json_path = os.path.join(
dials_regression, "integration_test_data", "stills_PSII", json_name
)
image_path = os.path.join(
dials_regression, "integration_test_data", "stills_PSII", image_name
)
assert os.path.exists(pickle_path)
assert os.path.exists(json_path)
shutil.copy(pickle_path, loc)
shutil.copy(image_path, loc)
with open(json_name, "w") as w, open(json_path, "r") as r:
w.write(r.read() % loc.replace("\\", "\\\\"))
templ_phil = """
output {
experiments = 'idx-20161021225550223_integrated_experiments_%s.expt'
reflections = 'idx-20161021225550223_integrated_%s.refl'
}
integration {
lookup.mask = '%s'
integrator = stills
profile.fitting = False
background.algorithm = simple
debug {
output = True
separate_files = False
split_experiments = False
}
}
profile {
gaussian_rs.min_spots.overall = 0
}
absorption_correction {
apply = %s
algorithm = fuller_kapton
fuller_kapton {
smart_sigmas = True
}
}
"""
without_kapton_phil = templ_phil % (
"nokapton",
"nokapton",
os.path.join(
dials_regression, "integration_test_data", "stills_PSII", "mask.pickle"
).replace("\\", "\\\\"),
"False",
)
with_kapton_phil = templ_phil % (
"kapton",
"kapton",
os.path.join(
dials_regression, "integration_test_data", "stills_PSII", "mask.pickle"
).replace("\\", "\\\\"),
"True",
)
with open("integrate_without_kapton.phil", "w") as f:
f.write(without_kapton_phil)
with open("integrate_with_kapton.phil", "w") as f:
f.write(with_kapton_phil)
# Call dials.integrate with and without kapton correction
for phil in "integrate_without_kapton.phil", "integrate_with_kapton.phil":
result = procrunner.run(["dials.integrate", pickle_name, json_name, phil])
assert not result.returncode and not result.stderr
results = []
for mode in "kapton", "nokapton":
result = os.path.join(loc, "idx-20161021225550223_integrated_%s.refl" % mode)
with open(result, "rb") as f:
table = pickle.load(f)
millers = table["miller_index"]
test_indices = {"zero": (-5, 2, -6), "low": (-2, -20, 7), "high": (-1, -10, 4)}
test_rows = {k: millers.first_index(v) for k, v in test_indices.items()}
test_I_sigsqI = {
k: (table[v]["intensity.sum.value"], table[v]["intensity.sum.variance"])
for k, v in test_rows.items()
}
results.append(test_I_sigsqI)
assert results[0]["zero"][0] == results[1]["zero"][0]
assert results[0]["zero"][1] - results[1]["zero"][1] < 0.0001
assert False not in [results[0]["low"][i] > results[1]["low"][i] for i in (0, 1)]
assert False not in [results[0]["high"][i] > results[1]["high"][i] for i in (0, 1)]
| [
"[email protected]"
] | |
0c4043442316a2cf94c4f5be142ff5d603430e8d | 4b3ae6048ced0d7f88a585af29fa3a7b15005749 | /Python/Django/dojo_ninjas/apps/books_authors/models.py | 69cda65e65a601d28283fe6149c273df57bf038c | [] | no_license | ajag408/DojoAssignments | a6320856466ac21d38e8387bdcbbe2a02009e418 | 03baa0ff5261aee6ffedf724657b3a8c7cdffe47 | refs/heads/master | 2022-12-11T15:50:46.839881 | 2021-06-07T20:57:17 | 2021-06-07T20:57:17 | 79,872,914 | 0 | 0 | null | 2022-12-08T00:35:09 | 2017-01-24T02:58:15 | Python | UTF-8 | Python | false | false | 915 | py | from __future__ import unicode_literals
from django.db import models
# Create your models here.
class Book(models.Model):
name = models.CharField(max_length = 255)
desc = models.TextField()
created_at = models.DateTimeField(auto_now_add = True)
updated_at = models.DateTimeField(auto_now = True)
def __repr__(self):
return "<Book object: {} {}>".format(self.name, self.desc)
class Author(models.Model):
first_name = models.CharField(max_length = 255)
last_name = models.CharField(max_length = 255)
email = models.CharField(max_length = 255)
created_at = models.DateTimeField(auto_now_add = True)
updated_at = models.DateTimeField(auto_now = True)
books = models.ManyToManyField(Book, related_name = 'authors')
notes = models.TextField()
def __repr__(self):
return "<Author object: {} {} {}>".format(self.first_name, self.last_name, self.email)
| [
"[email protected]"
] | |
73e8001de0324ba056ab3490a6008921eeda2852 | f07a42f652f46106dee4749277d41c302e2b7406 | /Data Set/bug-fixing-5/efc5dac52cdfab25c3b163958830325f6898d3b6-<exec_command>-fix.py | c96b8e5b71acfcfaf0ad856b372cd9ccc0252b3f | [] | no_license | wsgan001/PyFPattern | e0fe06341cc5d51b3ad0fe29b84098d140ed54d1 | cc347e32745f99c0cd95e79a18ddacc4574d7faa | refs/heads/main | 2023-08-25T23:48:26.112133 | 2021-10-23T14:11:22 | 2021-10-23T14:11:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,477 | py | def exec_command(self, cmd, in_data=None, sudoable=True):
' run a command on the local host '
super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)
display.debug('in local.exec_command()')
executable = (C.DEFAULT_EXECUTABLE.split()[0] if C.DEFAULT_EXECUTABLE else None)
display.vvv('EXEC {0}'.format(cmd), host=self._play_context.remote_addr)
display.debug('opening command with Popen()')
if isinstance(cmd, (text_type, binary_type)):
cmd = to_bytes(cmd)
else:
cmd = map(to_bytes, cmd)
p = subprocess.Popen(cmd, shell=isinstance(cmd, (text_type, binary_type)), executable=executable, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
display.debug('done running command with Popen()')
if (self._play_context.prompt and sudoable):
fcntl.fcntl(p.stdout, fcntl.F_SETFL, (fcntl.fcntl(p.stdout, fcntl.F_GETFL) | os.O_NONBLOCK))
fcntl.fcntl(p.stderr, fcntl.F_SETFL, (fcntl.fcntl(p.stderr, fcntl.F_GETFL) | os.O_NONBLOCK))
become_output = ''
while ((not self.check_become_success(become_output)) and (not self.check_password_prompt(become_output))):
(rfd, wfd, efd) = select.select([p.stdout, p.stderr], [], [p.stdout, p.stderr], self._play_context.timeout)
if (p.stdout in rfd):
chunk = p.stdout.read()
elif (p.stderr in rfd):
chunk = p.stderr.read()
else:
(stdout, stderr) = p.communicate()
raise AnsibleError(('timeout waiting for privilege escalation password prompt:\n' + become_output))
if (not chunk):
(stdout, stderr) = p.communicate()
raise AnsibleError(('privilege output closed while waiting for password prompt:\n' + become_output))
become_output += chunk
if (not self.check_become_success(become_output)):
p.stdin.write((to_bytes(self._play_context.become_pass, errors='surrogate_or_strict') + b'\n'))
fcntl.fcntl(p.stdout, fcntl.F_SETFL, (fcntl.fcntl(p.stdout, fcntl.F_GETFL) & (~ os.O_NONBLOCK)))
fcntl.fcntl(p.stderr, fcntl.F_SETFL, (fcntl.fcntl(p.stderr, fcntl.F_GETFL) & (~ os.O_NONBLOCK)))
display.debug('getting output with communicate()')
(stdout, stderr) = p.communicate(in_data)
display.debug('done communicating')
display.debug('done with local.exec_command()')
return (p.returncode, stdout, stderr) | [
"[email protected]"
] | |
273883a6eab58c2867888d3648f56983ccb66680 | c577f5380b4799b4db54722749cc33f9346eacc1 | /BugSwarm/scikit-learn-scikit-learn-423768208/buggy_files/sklearn/impute.py | b9afbc76bd2d8053e2234938c1f6e21776c954d5 | [] | no_license | tdurieux/BugSwarm-dissection | 55db683fd95f071ff818f9ca5c7e79013744b27b | ee6b57cfef2119523a083e82d902a6024e0d995a | refs/heads/master | 2020-04-30T17:11:52.050337 | 2019-05-09T13:42:03 | 2019-05-09T13:42:03 | 176,972,414 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 46,369 | py | """Transformers for missing value imputation"""
# Authors: Nicolas Tresegnie <[email protected]>
# Sergey Feldman <[email protected]>
# License: BSD 3 clause
from __future__ import division
import warnings
import numbers
import time
import numpy as np
import numpy.ma as ma
from scipy import sparse
from scipy import stats
from collections import namedtuple
from .base import BaseEstimator, TransformerMixin
from .base import clone
from .preprocessing import normalize
from .utils import check_array, check_random_state, safe_indexing
from .utils.sparsefuncs import _get_median
from .utils.validation import check_is_fitted
from .utils.validation import FLOAT_DTYPES
from .utils.fixes import _object_dtype_isnan
from .utils import is_scalar_nan
from .externals import six
zip = six.moves.zip
map = six.moves.map
ImputerTriplet = namedtuple('ImputerTriplet', ['feat_idx',
'neighbor_feat_idx',
'predictor'])
__all__ = [
'MissingIndicator',
'SimpleImputer',
'ChainedImputer',
]
def _check_inputs_dtype(X, missing_values):
if (X.dtype.kind in ("f", "i", "u") and
not isinstance(missing_values, numbers.Real)):
raise ValueError("'X' and 'missing_values' types are expected to be"
" both numerical. Got X.dtype={} and "
" type(missing_values)={}."
.format(X.dtype, type(missing_values)))
def _get_mask(X, value_to_mask):
"""Compute the boolean mask X == missing_values."""
if is_scalar_nan(value_to_mask):
if X.dtype.kind == "f":
return np.isnan(X)
elif X.dtype.kind in ("i", "u"):
# can't have NaNs in integer array.
return np.zeros(X.shape, dtype=bool)
else:
# np.isnan does not work on object dtypes.
return _object_dtype_isnan(X)
else:
# X == value_to_mask with object dytpes does not always perform
# element-wise for old versions of numpy
return np.equal(X, value_to_mask)
def _most_frequent(array, extra_value, n_repeat):
"""Compute the most frequent value in a 1d array extended with
[extra_value] * n_repeat, where extra_value is assumed to be not part
of the array."""
# Compute the most frequent value in array only
if array.size > 0:
with warnings.catch_warnings():
# stats.mode raises a warning when input array contains objects due
# to incapacity to detect NaNs. Irrelevant here since input array
# has already been NaN-masked.
warnings.simplefilter("ignore", RuntimeWarning)
mode = stats.mode(array)
most_frequent_value = mode[0][0]
most_frequent_count = mode[1][0]
else:
most_frequent_value = 0
most_frequent_count = 0
# Compare to array + [extra_value] * n_repeat
if most_frequent_count == 0 and n_repeat == 0:
return np.nan
elif most_frequent_count < n_repeat:
return extra_value
elif most_frequent_count > n_repeat:
return most_frequent_value
elif most_frequent_count == n_repeat:
# Ties the breaks. Copy the behaviour of scipy.stats.mode
if most_frequent_value < extra_value:
return most_frequent_value
else:
return extra_value
class SimpleImputer(BaseEstimator, TransformerMixin):
"""Imputation transformer for completing missing values.
Read more in the :ref:`User Guide <impute>`.
Parameters
----------
missing_values : number, string, np.nan (default) or None
The placeholder for the missing values. All occurrences of
`missing_values` will be imputed.
strategy : string, optional (default="mean")
The imputation strategy.
- If "mean", then replace missing values using the mean along
each column. Can only be used with numeric data.
- If "median", then replace missing values using the median along
each column. Can only be used with numeric data.
- If "most_frequent", then replace missing using the most frequent
value along each column. Can be used with strings or numeric data.
- If "constant", then replace missing values with fill_value. Can be
used with strings or numeric data.
.. versionadded:: 0.20
strategy="constant" for fixed value imputation.
fill_value : string or numerical value, optional (default=None)
When strategy == "constant", fill_value is used to replace all
occurrences of missing_values.
If left to the default, fill_value will be 0 when imputing numerical
data and "missing_value" for strings or object data types.
verbose : integer, optional (default=0)
Controls the verbosity of the imputer.
copy : boolean, optional (default=True)
If True, a copy of X will be created. If False, imputation will
be done in-place whenever possible. Note that, in the following cases,
a new copy will always be made, even if `copy=False`:
- If X is not an array of floating values;
- If X is encoded as a CSR matrix.
Attributes
----------
statistics_ : array of shape (n_features,)
The imputation fill value for each feature.
Examples
--------
>>> import numpy as np
>>> from sklearn.impute import SimpleImputer
>>> imp_mean = SimpleImputer(missing_values=np.nan, strategy='mean')
>>> imp_mean.fit([[7, 2, 3], [4, np.nan, 6], [10, 5, 9]])
... # doctest: +NORMALIZE_WHITESPACE
SimpleImputer(copy=True, fill_value=None, missing_values=nan,
strategy='mean', verbose=0)
>>> X = [[np.nan, 2, 3], [4, np.nan, 6], [10, np.nan, 9]]
>>> print(imp_mean.transform(X))
... # doctest: +NORMALIZE_WHITESPACE
[[ 7. 2. 3. ]
[ 4. 3.5 6. ]
[10. 3.5 9. ]]
Notes
-----
Columns which only contained missing values at `fit` are discarded upon
`transform` if strategy is not "constant".
"""
def __init__(self, missing_values=np.nan, strategy="mean",
fill_value=None, verbose=0, copy=True):
self.missing_values = missing_values
self.strategy = strategy
self.fill_value = fill_value
self.verbose = verbose
self.copy = copy
def _validate_input(self, X):
allowed_strategies = ["mean", "median", "most_frequent", "constant"]
if self.strategy not in allowed_strategies:
raise ValueError("Can only use these strategies: {0} "
" got strategy={1}".format(allowed_strategies,
self.strategy))
if self.strategy in ("most_frequent", "constant"):
dtype = None
else:
dtype = FLOAT_DTYPES
if not is_scalar_nan(self.missing_values):
force_all_finite = True
else:
force_all_finite = "allow-nan"
try:
X = check_array(X, accept_sparse='csc', dtype=dtype,
force_all_finite=force_all_finite, copy=self.copy)
except ValueError as ve:
if "could not convert" in str(ve):
raise ValueError("Cannot use {0} strategy with non-numeric "
"data. Received datatype :{1}."
"".format(self.strategy, X.dtype.kind))
else:
raise ve
_check_inputs_dtype(X, self.missing_values)
if X.dtype.kind not in ("i", "u", "f", "O"):
raise ValueError("SimpleImputer does not support data with dtype "
"{0}. Please provide either a numeric array (with"
" a floating point or integer dtype) or "
"categorical data represented either as an array "
"with integer dtype or an array of string values "
"with an object dtype.".format(X.dtype))
return X
def fit(self, X, y=None):
"""Fit the imputer on X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Input data, where ``n_samples`` is the number of samples and
``n_features`` is the number of features.
Returns
-------
self : SimpleImputer
"""
X = self._validate_input(X)
# default fill_value is 0 for numerical input and "missing_value"
# otherwise
if self.fill_value is None:
if X.dtype.kind in ("i", "u", "f"):
fill_value = 0
else:
fill_value = "missing_value"
else:
fill_value = self.fill_value
# fill_value should be numerical in case of numerical input
if (self.strategy == "constant" and
X.dtype.kind in ("i", "u", "f") and
not isinstance(fill_value, numbers.Real)):
raise ValueError("'fill_value'={0} is invalid. Expected a "
"numerical value when imputing numerical "
"data".format(fill_value))
if sparse.issparse(X):
# missing_values = 0 not allowed with sparse data as it would
# force densification
if self.missing_values == 0:
raise ValueError("Imputation not possible when missing_values "
"== 0 and input is sparse. Provide a dense "
"array instead.")
else:
self.statistics_ = self._sparse_fit(X,
self.strategy,
self.missing_values,
fill_value)
else:
self.statistics_ = self._dense_fit(X,
self.strategy,
self.missing_values,
fill_value)
return self
def _sparse_fit(self, X, strategy, missing_values, fill_value):
"""Fit the transformer on sparse data."""
mask_data = _get_mask(X.data, missing_values)
n_implicit_zeros = X.shape[0] - np.diff(X.indptr)
statistics = np.empty(X.shape[1])
if strategy == "constant":
# for constant strategy, self.statistcs_ is used to store
# fill_value in each column
statistics.fill(fill_value)
else:
for i in range(X.shape[1]):
column = X.data[X.indptr[i]:X.indptr[i + 1]]
mask_column = mask_data[X.indptr[i]:X.indptr[i + 1]]
column = column[~mask_column]
# combine explicit and implicit zeros
mask_zeros = _get_mask(column, 0)
column = column[~mask_zeros]
n_explicit_zeros = mask_zeros.sum()
n_zeros = n_implicit_zeros[i] + n_explicit_zeros
if strategy == "mean":
s = column.size + n_zeros
statistics[i] = np.nan if s == 0 else column.sum() / s
elif strategy == "median":
statistics[i] = _get_median(column,
n_zeros)
elif strategy == "most_frequent":
statistics[i] = _most_frequent(column,
0,
n_zeros)
return statistics
def _dense_fit(self, X, strategy, missing_values, fill_value):
"""Fit the transformer on dense data."""
mask = _get_mask(X, missing_values)
masked_X = ma.masked_array(X, mask=mask)
# Mean
if strategy == "mean":
mean_masked = np.ma.mean(masked_X, axis=0)
# Avoid the warning "Warning: converting a masked element to nan."
mean = np.ma.getdata(mean_masked)
mean[np.ma.getmask(mean_masked)] = np.nan
return mean
# Median
elif strategy == "median":
median_masked = np.ma.median(masked_X, axis=0)
# Avoid the warning "Warning: converting a masked element to nan."
median = np.ma.getdata(median_masked)
median[np.ma.getmaskarray(median_masked)] = np.nan
return median
# Most frequent
elif strategy == "most_frequent":
# scipy.stats.mstats.mode cannot be used because it will no work
# properly if the first element is masked and if its frequency
# is equal to the frequency of the most frequent valid element
# See https://github.com/scipy/scipy/issues/2636
# To be able access the elements by columns
X = X.transpose()
mask = mask.transpose()
if X.dtype.kind == "O":
most_frequent = np.empty(X.shape[0], dtype=object)
else:
most_frequent = np.empty(X.shape[0])
for i, (row, row_mask) in enumerate(zip(X[:], mask[:])):
row_mask = np.logical_not(row_mask).astype(np.bool)
row = row[row_mask]
most_frequent[i] = _most_frequent(row, np.nan, 0)
return most_frequent
# Constant
elif strategy == "constant":
# for constant strategy, self.statistcs_ is used to store
# fill_value in each column
return np.full(X.shape[1], fill_value, dtype=X.dtype)
def transform(self, X):
"""Impute all missing values in X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
The input data to complete.
"""
check_is_fitted(self, 'statistics_')
X = self._validate_input(X)
statistics = self.statistics_
if X.shape[1] != statistics.shape[0]:
raise ValueError("X has %d features per sample, expected %d"
% (X.shape[1], self.statistics_.shape[0]))
# Delete the invalid columns if strategy is not constant
if self.strategy == "constant":
valid_statistics = statistics
else:
# same as np.isnan but also works for object dtypes
invalid_mask = _get_mask(statistics, np.nan)
valid_mask = np.logical_not(invalid_mask)
valid_statistics = statistics[valid_mask]
valid_statistics_indexes = np.flatnonzero(valid_mask)
if invalid_mask.any():
missing = np.arange(X.shape[1])[invalid_mask]
if self.verbose:
warnings.warn("Deleting features without "
"observed values: %s" % missing)
X = X[:, valid_statistics_indexes]
# Do actual imputation
if sparse.issparse(X):
if self.missing_values == 0:
raise ValueError("Imputation not possible when missing_values "
"== 0 and input is sparse. Provide a dense "
"array instead.")
else:
mask = _get_mask(X.data, self.missing_values)
indexes = np.repeat(np.arange(len(X.indptr) - 1, dtype=np.int),
np.diff(X.indptr))[mask]
X.data[mask] = valid_statistics[indexes].astype(X.dtype,
copy=False)
else:
mask = _get_mask(X, self.missing_values)
n_missing = np.sum(mask, axis=0)
values = np.repeat(valid_statistics, n_missing)
coordinates = np.where(mask.transpose())[::-1]
X[coordinates] = values
return X
class ChainedImputer(BaseEstimator, TransformerMixin):
"""Chained imputer transformer to impute missing values.
Basic implementation of chained imputer from MICE (Multivariate
Imputations by Chained Equations) package from R. This version assumes all
of the features are Gaussian.
Read more in the :ref:`User Guide <mice>`.
Parameters
----------
missing_values : int, np.nan, optional (default=np.nan)
The placeholder for the missing values. All occurrences of
``missing_values`` will be imputed.
imputation_order : str, optional (default="ascending")
The order in which the features will be imputed. Possible values:
"ascending"
From features with fewest missing values to most.
"descending"
From features with most missing values to fewest.
"roman"
Left to right.
"arabic"
Right to left.
"random"
A random order for each round.
n_imputations : int, optional (default=100)
Number of chained imputation rounds to perform, the results of which
will be used in the final average.
n_burn_in : int, optional (default=10)
Number of initial imputation rounds to perform the results of which
will not be returned.
predictor : estimator object, default=BayesianRidge()
The predictor to use at each step of the round-robin imputation.
It must support ``return_std`` in its ``predict`` method.
n_nearest_features : int, optional (default=None)
Number of other features to use to estimate the missing values of
the each feature column. Nearness between features is measured using
the absolute correlation coefficient between each feature pair (after
initial imputation). Can provide significant speed-up when the number
of features is huge. If ``None``, all features will be used.
initial_strategy : str, optional (default="mean")
Which strategy to use to initialize the missing values. Same as the
``strategy`` parameter in :class:`sklearn.impute.SimpleImputer`
Valid values: {"mean", "median", "most_frequent", or "constant"}.
min_value : float, optional (default=None)
Minimum possible imputed value. Default of ``None`` will set minimum
to negative infinity.
max_value : float, optional (default=None)
Maximum possible imputed value. Default of ``None`` will set maximum
to positive infinity.
verbose : int, optional (default=0)
Verbosity flag, controls the debug messages that are issued
as functions are evaluated. The higher, the more verbose. Can be 0, 1,
or 2.
random_state : int, RandomState instance or None, optional (default=None)
The seed of the pseudo random number generator to use when shuffling
the data. If int, random_state is the seed used by the random number
generator; If RandomState instance, random_state is the random number
generator; If None, the random number generator is the RandomState
instance used by ``np.random``.
Attributes
----------
initial_imputer_ : object of class :class:`sklearn.preprocessing.Imputer`'
The imputer used to initialize the missing values.
imputation_sequence_ : list of tuples
Each tuple has ``(feat_idx, neighbor_feat_idx, predictor)``, where
``feat_idx`` is the current feature to be imputed,
``neighbor_feat_idx`` is the array of other features used to impute the
current feature, and ``predictor`` is the trained predictor used for
the imputation.
Notes
-----
The R version of MICE does not have inductive functionality, i.e. first
fitting on ``X_train`` and then transforming any ``X_test`` without
additional fitting. We do this by storing each feature's predictor during
the round-robin ``fit`` phase, and predicting without refitting (in order)
during the ``transform`` phase.
Features which contain all missing values at ``fit`` are discarded upon
``transform``.
Features with missing values in transform which did not have any missing
values in fit will be imputed with the initial imputation method only.
References
----------
.. [1] `Stef van Buuren, Karin Groothuis-Oudshoorn (2011). "mice:
Multivariate Imputation by Chained Equations in R". Journal of
Statistical Software 45: 1-67.
<https://www.jstatsoft.org/article/view/v045i03>`_
"""
def __init__(self,
missing_values=np.nan,
imputation_order='ascending',
n_imputations=100,
n_burn_in=10,
predictor=None,
n_nearest_features=None,
initial_strategy="mean",
min_value=None,
max_value=None,
verbose=False,
random_state=None):
self.missing_values = missing_values
self.imputation_order = imputation_order
self.n_imputations = n_imputations
self.n_burn_in = n_burn_in
self.predictor = predictor
self.n_nearest_features = n_nearest_features
self.initial_strategy = initial_strategy
self.min_value = min_value
self.max_value = max_value
self.verbose = verbose
self.random_state = random_state
def _impute_one_feature(self,
X_filled,
mask_missing_values,
feat_idx,
neighbor_feat_idx,
predictor=None,
fit_mode=True):
"""Impute a single feature from the others provided.
This function predicts the missing values of one of the features using
the current estimates of all the other features. The ``predictor`` must
support ``return_std=True`` in its ``predict`` method for this function
to work.
Parameters
----------
X_filled : ndarray
Input data with the most recent imputations.
mask_missing_values : ndarray
Input data's missing indicator matrix.
feat_idx : int
Index of the feature currently being imputed.
neighbor_feat_idx : ndarray
Indices of the features to be used in imputing ``feat_idx``.
predictor : object
The predictor to use at this step of the round-robin imputation.
It must support ``return_std`` in its ``predict`` method.
If None, it will be cloned from self._predictor.
fit_mode : boolean, default=True
Whether to fit and predict with the predictor or just predict.
Returns
-------
X_filled : ndarray
Input data with ``X_filled[missing_row_mask, feat_idx]`` updated.
predictor : predictor with sklearn API
The fitted predictor used to impute
``X_filled[missing_row_mask, feat_idx]``.
"""
# if nothing is missing, just return the default
# (should not happen at fit time because feat_ids would be excluded)
missing_row_mask = mask_missing_values[:, feat_idx]
if not np.any(missing_row_mask):
return X_filled, predictor
if predictor is None and fit_mode is False:
raise ValueError("If fit_mode is False, then an already-fitted "
"predictor should be passed in.")
if predictor is None:
predictor = clone(self._predictor)
if fit_mode:
X_train = safe_indexing(X_filled[:, neighbor_feat_idx],
~missing_row_mask)
y_train = safe_indexing(X_filled[:, feat_idx],
~missing_row_mask)
predictor.fit(X_train, y_train)
# get posterior samples
X_test = safe_indexing(X_filled[:, neighbor_feat_idx],
missing_row_mask)
mus, sigmas = predictor.predict(X_test, return_std=True)
good_sigmas = sigmas > 0
imputed_values = np.zeros(mus.shape, dtype=X_filled.dtype)
imputed_values[~good_sigmas] = mus[~good_sigmas]
imputed_values[good_sigmas] = self.random_state_.normal(
loc=mus[good_sigmas], scale=sigmas[good_sigmas])
# clip the values
imputed_values = np.clip(imputed_values,
self._min_value,
self._max_value)
# update the feature
X_filled[missing_row_mask, feat_idx] = imputed_values
return X_filled, predictor
def _get_neighbor_feat_idx(self,
n_features,
feat_idx,
abs_corr_mat):
"""Get a list of other features to predict ``feat_idx``.
If self.n_nearest_features is less than or equal to the total
number of features, then use a probability proportional to the absolute
correlation between ``feat_idx`` and each other feature to randomly
choose a subsample of the other features (without replacement).
Parameters
----------
n_features : int
Number of features in ``X``.
feat_idx : int
Index of the feature currently being imputed.
abs_corr_mat : ndarray, shape (n_features, n_features)
Absolute correlation matrix of ``X``. The diagonal has been zeroed
out and each feature has been normalized to sum to 1. Can be None.
Returns
-------
neighbor_feat_idx : array-like
The features to use to impute ``feat_idx``.
"""
if (self.n_nearest_features is not None and
self.n_nearest_features < n_features):
p = abs_corr_mat[:, feat_idx]
neighbor_feat_idx = self.random_state_.choice(
np.arange(n_features), self.n_nearest_features, replace=False,
p=p)
else:
inds_left = np.arange(feat_idx)
inds_right = np.arange(feat_idx + 1, n_features)
neighbor_feat_idx = np.concatenate((inds_left, inds_right))
return neighbor_feat_idx
def _get_ordered_idx(self, mask_missing_values):
"""Decide in what order we will update the features.
As a homage to the MICE R package, we will have 4 main options of
how to order the updates, and use a random order if anything else
is specified.
Also, this function skips features which have no missing values.
Parameters
----------
mask_missing_values : array-like, shape (n_samples, n_features)
Input data's missing indicator matrix, where "n_samples" is the
number of samples and "n_features" is the number of features.
Returns
-------
ordered_idx : ndarray, shape (n_features,)
The order in which to impute the features.
"""
frac_of_missing_values = mask_missing_values.mean(axis=0)
missing_values_idx = np.nonzero(frac_of_missing_values)[0]
if self.imputation_order == 'roman':
ordered_idx = missing_values_idx
elif self.imputation_order == 'arabic':
ordered_idx = missing_values_idx[::-1]
elif self.imputation_order == 'ascending':
n = len(frac_of_missing_values) - len(missing_values_idx)
ordered_idx = np.argsort(frac_of_missing_values,
kind='mergesort')[n:][::-1]
elif self.imputation_order == 'descending':
n = len(frac_of_missing_values) - len(missing_values_idx)
ordered_idx = np.argsort(frac_of_missing_values,
kind='mergesort')[n:]
elif self.imputation_order == 'random':
ordered_idx = missing_values_idx
self.random_state_.shuffle(ordered_idx)
else:
raise ValueError("Got an invalid imputation order: '{0}'. It must "
"be one of the following: 'roman', 'arabic', "
"'ascending', 'descending', or "
"'random'.".format(self.imputation_order))
return ordered_idx
def _get_abs_corr_mat(self, X_filled, tolerance=1e-6):
"""Get absolute correlation matrix between features.
Parameters
----------
X_filled : ndarray, shape (n_samples, n_features)
Input data with the most recent imputations.
tolerance : float, optional (default=1e-6)
``abs_corr_mat`` can have nans, which will be replaced
with ``tolerance``.
Returns
-------
abs_corr_mat : ndarray, shape (n_features, n_features)
Absolute correlation matrix of ``X`` at the beginning of the
current round. The diagonal has been zeroed out and each feature's
absolute correlations with all others have been normalized to sum
to 1.
"""
n_features = X_filled.shape[1]
if (self.n_nearest_features is None or
self.n_nearest_features >= n_features):
return None
abs_corr_mat = np.abs(np.corrcoef(X_filled.T))
# np.corrcoef is not defined for features with zero std
abs_corr_mat[np.isnan(abs_corr_mat)] = tolerance
# ensures exploration, i.e. at least some probability of sampling
np.clip(abs_corr_mat, tolerance, None, out=abs_corr_mat)
# features are not their own neighbors
np.fill_diagonal(abs_corr_mat, 0)
# needs to sum to 1 for np.random.choice sampling
abs_corr_mat = normalize(abs_corr_mat, norm='l1', axis=0, copy=False)
return abs_corr_mat
def _initial_imputation(self, X):
"""Perform initial imputation for input X.
Parameters
----------
X : ndarray, shape (n_samples, n_features)
Input data, where "n_samples" is the number of samples and
"n_features" is the number of features.
Returns
-------
Xt : ndarray, shape (n_samples, n_features)
Input data, where "n_samples" is the number of samples and
"n_features" is the number of features.
X_filled : ndarray, shape (n_samples, n_features)
Input data with the most recent imputations.
mask_missing_values : ndarray, shape (n_samples, n_features)
Input data's missing indicator matrix, where "n_samples" is the
number of samples and "n_features" is the number of features.
"""
if is_scalar_nan(self.missing_values):
force_all_finite = "allow-nan"
else:
force_all_finite = True
X = check_array(X, dtype=FLOAT_DTYPES, order="F",
force_all_finite=force_all_finite)
_check_inputs_dtype(X, self.missing_values)
mask_missing_values = _get_mask(X, self.missing_values)
if self.initial_imputer_ is None:
self.initial_imputer_ = SimpleImputer(
missing_values=self.missing_values,
strategy=self.initial_strategy)
X_filled = self.initial_imputer_.fit_transform(X)
else:
X_filled = self.initial_imputer_.transform(X)
valid_mask = np.flatnonzero(np.logical_not(
np.isnan(self.initial_imputer_.statistics_)))
Xt = X[:, valid_mask]
mask_missing_values = mask_missing_values[:, valid_mask]
return Xt, X_filled, mask_missing_values
def fit_transform(self, X, y=None):
"""Fits the imputer on X and return the transformed X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data, where "n_samples" is the number of samples and
"n_features" is the number of features.
y : ignored.
Returns
-------
Xt : array-like, shape (n_samples, n_features)
The imputed input data.
"""
self.random_state_ = getattr(self, "random_state_",
check_random_state(self.random_state))
if self.predictor is None:
from .linear_model import BayesianRidge
self._predictor = BayesianRidge()
else:
self._predictor = clone(self.predictor)
self._min_value = np.nan if self.min_value is None else self.min_value
self._max_value = np.nan if self.max_value is None else self.max_value
self.initial_imputer_ = None
X, X_filled, mask_missing_values = self._initial_imputation(X)
# edge case: in case the user specifies 0 for n_imputations,
# then there is no need to do burn in and the result should be
# just the initial imputation (before clipping)
if self.n_imputations < 1:
return X_filled
X_filled = np.clip(X_filled, self._min_value, self._max_value)
# order in which to impute
# note this is probably too slow for large feature data (d > 100000)
# and a better way would be good.
# see: https://goo.gl/KyCNwj and subsequent comments
ordered_idx = self._get_ordered_idx(mask_missing_values)
abs_corr_mat = self._get_abs_corr_mat(X_filled)
# impute data
n_rounds = self.n_burn_in + self.n_imputations
n_samples, n_features = X_filled.shape
Xt = np.zeros((n_samples, n_features), dtype=X.dtype)
self.imputation_sequence_ = []
if self.verbose > 0:
print("[ChainedImputer] Completing matrix with shape %s"
% (X.shape,))
start_t = time()
for i_rnd in range(n_rounds):
if self.imputation_order == 'random':
ordered_idx = self._get_ordered_idx(mask_missing_values)
for feat_idx in ordered_idx:
neighbor_feat_idx = self._get_neighbor_feat_idx(n_features,
feat_idx,
abs_corr_mat)
X_filled, predictor = self._impute_one_feature(
X_filled, mask_missing_values, feat_idx, neighbor_feat_idx,
predictor=None, fit_mode=True)
predictor_triplet = ImputerTriplet(feat_idx,
neighbor_feat_idx,
predictor)
self.imputation_sequence_.append(predictor_triplet)
if i_rnd >= self.n_burn_in:
Xt += X_filled
if self.verbose > 0:
print('[ChainedImputer] Ending imputation round '
'%d/%d, elapsed time %0.2f'
% (i_rnd + 1, n_rounds, time() - start_t))
Xt /= self.n_imputations
Xt[~mask_missing_values] = X[~mask_missing_values]
return Xt
def transform(self, X):
"""Imputes all missing values in X.
Note that this is stochastic, and that if random_state is not fixed,
repeated calls, or permuted input, will yield different results.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
The input data to complete.
Returns
-------
Xt : array-like, shape (n_samples, n_features)
The imputed input data.
"""
check_is_fitted(self, 'initial_imputer_')
X, X_filled, mask_missing_values = self._initial_imputation(X)
# edge case: in case the user specifies 0 for n_imputations,
# then there is no need to do burn in and the result should be
# just the initial imputation (before clipping)
if self.n_imputations < 1:
return X_filled
X_filled = np.clip(X_filled, self._min_value, self._max_value)
n_rounds = self.n_burn_in + self.n_imputations
n_imputations = len(self.imputation_sequence_)
imputations_per_round = n_imputations // n_rounds
i_rnd = 0
Xt = np.zeros(X.shape, dtype=X.dtype)
if self.verbose > 0:
print("[ChainedImputer] Completing matrix with shape %s"
% (X.shape,))
start_t = time()
for it, predictor_triplet in enumerate(self.imputation_sequence_):
X_filled, _ = self._impute_one_feature(
X_filled,
mask_missing_values,
predictor_triplet.feat_idx,
predictor_triplet.neighbor_feat_idx,
predictor=predictor_triplet.predictor,
fit_mode=False
)
if not (it + 1) % imputations_per_round:
if i_rnd >= self.n_burn_in:
Xt += X_filled
if self.verbose > 1:
print('[ChainedImputer] Ending imputation round '
'%d/%d, elapsed time %0.2f'
% (i_rnd + 1, n_rounds, time() - start_t))
i_rnd += 1
Xt /= self.n_imputations
Xt[~mask_missing_values] = X[~mask_missing_values]
return Xt
def fit(self, X, y=None):
"""Fits the imputer on X and return self.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data, where "n_samples" is the number of samples and
"n_features" is the number of features.
y : ignored
Returns
-------
self : object
Returns self.
"""
self.fit_transform(X)
return self
class MissingIndicator(BaseEstimator, TransformerMixin):
"""Binary indicators for missing values.
Parameters
----------
missing_values : number, string, np.nan (default) or None
The placeholder for the missing values. All occurrences of
`missing_values` will be imputed.
features : str, optional
Whether the imputer mask should represent all or a subset of
features.
- If "missing-only" (default), the imputer mask will only represent
features containing missing values during fit time.
- If "all", the imputer mask will represent all features.
sparse : boolean or "auto", optional
Whether the imputer mask format should be sparse or dense.
- If "auto" (default), the imputer mask will be of same type as
input.
- If True, the imputer mask will be a sparse matrix.
- If False, the imputer mask will be a numpy array.
error_on_new : boolean, optional
If True (default), transform will raise an error when there are
features with missing values in transform that have no missing values
in fit This is applicable only when ``features="missing-only"``.
Attributes
----------
features_ : ndarray, shape (n_missing_features,) or (n_features,)
The features indices which will be returned when calling ``transform``.
They are computed during ``fit``. For ``features='all'``, it is
to ``range(n_features)``.
Examples
--------
>>> import numpy as np
>>> from sklearn.impute import MissingIndicator
>>> X1 = np.array([[np.nan, 1, 3],
... [4, 0, np.nan],
... [8, 1, 0]])
>>> X2 = np.array([[5, 1, np.nan],
... [np.nan, 2, 3],
... [2, 4, 0]])
>>> indicator = MissingIndicator()
>>> indicator.fit(X1)
MissingIndicator(error_on_new=True, features='missing-only',
missing_values=nan, sparse='auto')
>>> X2_tr = indicator.transform(X2)
>>> X2_tr
array([[False, True],
[ True, False],
[False, False]])
"""
def __init__(self, missing_values=np.nan, features="missing-only",
sparse="auto", error_on_new=True):
self.missing_values = missing_values
self.features = features
self.sparse = sparse
self.error_on_new = error_on_new
def _get_missing_features_info(self, X):
"""Compute the imputer mask and the indices of the features
containing missing values.
Parameters
----------
X : {ndarray or sparse matrix}, shape (n_samples, n_features)
The input data with missing values. Note that ``X`` has been
checked in ``fit`` and ``transform`` before to call this function.
Returns
-------
imputer_mask : {ndarray or sparse matrix}, shape \
(n_samples, n_features) or (n_samples, n_features_with_missing)
The imputer mask of the original data.
features_with_missing : ndarray, shape (n_features_with_missing)
The features containing missing values.
"""
if sparse.issparse(X) and self.missing_values != 0:
mask = _get_mask(X.data, self.missing_values)
# The imputer mask will be constructed with the same sparse format
# as X.
sparse_constructor = (sparse.csr_matrix if X.format == 'csr'
else sparse.csc_matrix)
imputer_mask = sparse_constructor(
(mask, X.indices.copy(), X.indptr.copy()),
shape=X.shape, dtype=bool)
missing_values_mask = imputer_mask.copy()
missing_values_mask.eliminate_zeros()
features_with_missing = (
np.flatnonzero(np.diff(missing_values_mask.indptr))
if missing_values_mask.format == 'csc'
else np.unique(missing_values_mask.indices))
if self.sparse is False:
imputer_mask = imputer_mask.toarray()
elif imputer_mask.format == 'csr':
imputer_mask = imputer_mask.tocsc()
else:
if sparse.issparse(X):
# case of sparse matrix with 0 as missing values. Implicit and
# explicit zeros are considered as missing values.
X = X.toarray()
imputer_mask = _get_mask(X, self.missing_values)
features_with_missing = np.flatnonzero(imputer_mask.sum(axis=0))
if self.sparse is True:
imputer_mask = sparse.csc_matrix(imputer_mask)
return imputer_mask, features_with_missing
def fit(self, X, y=None):
"""Fit the transformer on X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Input data, where ``n_samples`` is the number of samples and
``n_features`` is the number of features.
Returns
-------
self : object
Returns self.
"""
if not is_scalar_nan(self.missing_values):
force_all_finite = True
else:
force_all_finite = "allow-nan"
X = check_array(X, accept_sparse=('csc', 'csr'),
force_all_finite=force_all_finite)
_check_inputs_dtype(X, self.missing_values)
self._n_features = X.shape[1]
if self.features not in ('missing-only', 'all'):
raise ValueError("'features' has to be either 'missing-only' or "
"'all'. Got {} instead.".format(self.features))
if not ((isinstance(self.sparse, six.string_types) and
self.sparse == "auto") or isinstance(self.sparse, bool)):
raise ValueError("'sparse' has to be a boolean or 'auto'. "
"Got {!r} instead.".format(self.sparse))
self.features_ = (self._get_missing_features_info(X)[1]
if self.features == 'missing-only'
else np.arange(self._n_features))
return self
def transform(self, X):
"""Generate missing values indicator for X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
The input data to complete.
Returns
-------
Xt : {ndarray or sparse matrix}, shape (n_samples, n_features)
The missing indicator for input data. The data type of ``Xt``
will be boolean.
"""
check_is_fitted(self, "features_")
if not is_scalar_nan(self.missing_values):
force_all_finite = True
else:
force_all_finite = "allow-nan"
X = check_array(X, accept_sparse=('csc', 'csr'),
force_all_finite=force_all_finite)
_check_inputs_dtype(X, self.missing_values)
if X.shape[1] != self._n_features:
raise ValueError("X has a different number of features "
"than during fitting.")
imputer_mask, features = self._get_missing_features_info(X)
if self.features == "missing-only":
features_diff_fit_trans = np.setdiff1d(features, self.features_)
if (self.error_on_new and features_diff_fit_trans.size > 0):
raise ValueError("The features {} have missing values "
"in transform but have no missing values "
"in fit.".format(features_diff_fit_trans))
if (self.features_.size > 0 and
self.features_.size < self._n_features):
imputer_mask = imputer_mask[:, self.features_]
return imputer_mask
def fit_transform(self, X, y=None):
"""Generate missing values indicator for X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
The input data to complete.
Returns
-------
Xt : {ndarray or sparse matrix}, shape (n_samples, n_features)
The missing indicator for input data. The data type of ``Xt``
will be boolean.
"""
return self.fit(X, y).transform(X)
| [
"[email protected]"
] | |
3c0bf67436cd0e0e3ae2dbe2ecd91f8ab58dff95 | 047d6c1f1097e1a6055b4408e3bf80a9e01c7e5d | /avrae/misc/rspell.py | 09a575c4bea6b09ebe476924d931863a592b0eec | [] | no_license | countpauper/countpauper | 274246f50e297a9ec1cd8d7842149e0ef1da53bd | efb1eea44152e9a55aed1ee1478e29df447c24c3 | refs/heads/master | 2023-07-23T00:35:58.619290 | 2023-07-08T14:09:06 | 2023-07-08T14:09:06 | 20,813,292 | 4 | 1 | null | 2021-02-15T08:48:50 | 2014-06-13T18:11:51 | C | UTF-8 | Python | false | false | 358 | py | !alias rspell <drac2>
spell_db=spell_list=load_json(get_gvar('13dc3e0a-a230-40ca-8fb3-a39846300b18'))
args=argparse(&ARGS&)
levels=args.get('l',type_=int) or range(10)
spells=[n for n,p in spell_db.items() if p.level in levels]
if not spells:
return f'echo No spells {levels}'
spell=spells[randint(len(spells))]
return f'spell "{spell}"'
</drac2> | [
"[email protected]"
] | |
c205af018c3d6e98d0415f1a316565f2cdd8032e | d799ab92fff30ec3b4efc5aa079628971451c17a | /coilmq/tests/functional/test_basic.py | e4d4f999aa12ffc165ce4254075aa8d103028381 | [] | no_license | LucaLanziani/coilmq | cf87a3daed400ccc64548873827f148097d7d780 | dce6254801617b5612816dc8d95c3249a284e99a | refs/heads/master | 2021-01-15T16:00:07.231608 | 2014-12-18T12:29:30 | 2014-12-18T12:29:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,156 | py | # -*- coding: utf-8 -*-
"""
Functional tests that use the default memory-based storage backends and default
scheduler implementations.
"""
import zlib
from coilmq.auth.simple import SimpleAuthenticator
from coilmq.tests.functional import BaseFunctionalTestCase
__authors__ = ['"Hans Lellelid" <[email protected]>']
__copyright__ = "Copyright 2009 Hans Lellelid"
__license__ = """Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License."""
class BasicTest(BaseFunctionalTestCase):
"""
Functional tests using default storage engine, etc.
"""
def test_connect(self):
""" Test a basic (non-auth) connection. """
c = self._new_client()
def test_connect_auth(self):
""" Test connecting when auth is required. """
self.server.authenticator = SimpleAuthenticator(store={'user': 'pass'})
c1 = self._new_client(connect=False)
c1.connect()
r = c1.received_frames.get(timeout=1)
assert r.command == 'ERROR'
assert 'Auth' in r.body
c2 = self._new_client(connect=False)
c2.connect(headers={'login': 'user', 'passcode': 'pass'})
r2 = c2.received_frames.get(timeout=1)
print r2
assert r2.command == 'CONNECTED'
c3 = self._new_client(connect=False)
c3.connect(headers={'login': 'user', 'passcode': 'pass-invalid'})
r3 = c3.received_frames.get(timeout=1)
print r3
assert r3.command == 'ERROR'
def test_send_receipt(self):
c1 = self._new_client()
c1.send('/topic/foo', 'A message', extra_headers={'receipt': 'FOOBAR'})
r = c1.received_frames.get(timeout=1)
assert r.command == "RECEIPT"
assert r.receipt_id == "FOOBAR"
def test_subscribe(self):
c1 = self._new_client()
c1.subscribe('/queue/foo')
c2 = self._new_client()
c2.subscribe('/queue/foo2')
c2.send('/queue/foo', 'A message')
assert c2.received_frames.qsize() == 0
r = c1.received_frames.get()
assert r.command == 'MESSAGE'
assert r.body == 'A message'
def test_disconnect(self):
"""
Test the 'polite' disconnect.
"""
c1 = self._new_client()
c1.connect()
c1.disconnect()
assert c1.received_frames.qsize() == 0
def test_send_binary(self):
"""
Test sending binary data.
"""
c1 = self._new_client()
c1.subscribe('/queue/foo')
# Read some random binary data.
# (This should be cross-platform.)
message = 'This is the message that will be compressed.'
c2 = self._new_client()
compressed = zlib.compress(message)
print '%r' % compressed
c2.send('/queue/foo', zlib.compress(message))
r = c1.received_frames.get()
assert r.command == 'MESSAGE'
print '%r' % r.body
assert zlib.decompress(r.body) == message
def test_send_utf8(self):
"""
Test sending utf-8-encoded strings.
"""
c1 = self._new_client()
c1.subscribe('/queue/foo')
unicodemsg = u'我能吞下玻璃而不伤身体'
utf8msg = unicodemsg.encode('utf-8')
print "len(unicodemsg) = %d" % len(unicodemsg)
print "len(utf8msg) = %d" % len(utf8msg)
c2 = self._new_client()
print '%r' % utf8msg
c2.send('/queue/foo', utf8msg)
r = c1.received_frames.get()
assert r.command == 'MESSAGE'
print '%r' % r.body
assert r.body == utf8msg | [
"[email protected]"
] | |
388d6c6f32594b5c7de8fdd8ce1816fdb26a9b8c | b11da8046764b45911f358593db746d93005d359 | /crew/__init__.py | 95f5e4521e0d8d3b5ccddc3e348987c721673216 | [] | no_license | linearregression/crew | 4f1848ca587e3fac777e305fb5ae481ec3df0c4a | d6c01f5ff2ffc83d5a672206ad2819968887c778 | refs/heads/master | 2021-01-17T20:58:37.969353 | 2015-06-16T15:42:04 | 2015-06-16T15:42:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 82 | py | from .exceptions import TimeoutError, ExpirationError, TaskError, DuplicateTaskId
| [
"[email protected]"
] | |
ec540c52386303d5ed7435b2de48a96f7ed7af0b | ff5eea95bb0827cb086c32f4ec1c174b28e5b82d | /gammapy/background/template.py | 73305041b2c8006fd38496bb6d09cdbd15753079 | [] | no_license | pflaumenmus/gammapy | 4830cc5506a4052658f30077fa4e11d8c685ede0 | 7b5caf832c9950c886528ca107203ce9b83c7ebf | refs/heads/master | 2021-01-15T23:27:46.521337 | 2013-09-25T14:23:35 | 2013-09-25T14:23:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 199 | py | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Template background estimation
Reference: http://adsabs.harvard.edu/abs/2003A%26A...410..389R
"""
from __future__ import division
| [
"[email protected]"
] | |
2685dacd5f1d60f180f0a4944f949508f8762f83 | 8ce2ef401bfa8a7edc075f30671ceb7e12001566 | /tensorflow/contrib/all_reduce/python/all_reduce_test.py | 304fd7fb8a37f1aab91f47d754eb2efba81304a5 | [
"Apache-2.0"
] | permissive | TomZRoid/tensorflow | e8167a31dcd707279365c8ee5ec283c00edaafba | 89390faf68c153ef8bea0e20ba128c0d54cee0e0 | refs/heads/master | 2020-03-30T22:38:50.662448 | 2018-11-08T06:25:34 | 2018-11-08T06:25:34 | 151,673,686 | 2 | 0 | Apache-2.0 | 2018-10-05T05:15:45 | 2018-10-05T05:15:44 | null | UTF-8 | Python | false | false | 10,522 | py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.contrib.all_reduce.python..all_reduce."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import numpy as np
from tensorflow.contrib.all_reduce.python import all_reduce as ar
from tensorflow.core.framework import types_pb2
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
class AllReduceTest(test_util.TensorFlowTestCase):
def testFlattenTensorsShapesDefined(self):
x = array_ops.placeholder(types_pb2.DT_FLOAT, [None])
with self.assertRaisesRegexp(ValueError,
"must have statically known shape"):
ar._flatten_tensors([x, x])
def testRingPermutations(self):
# 0 devices
pred_by_c_d, rank_by_c_d = ar._ring_permutations(1, 0, [])
self.assertEqual(pred_by_c_d, [])
self.assertEqual(rank_by_c_d, [])
# 1 worker, 1 subchunk cases
pred_by_c_d, rank_by_c_d = ar._ring_permutations(1, 1, [0])
self.assertEqual(pred_by_c_d, [[0]])
self.assertEqual(rank_by_c_d, [[0]])
pred_by_c_d, rank_by_c_d = ar._ring_permutations(1, 1, [0, 1, 2])
self.assertEqual(pred_by_c_d, [[2, 0, 1]])
self.assertEqual(rank_by_c_d, [[0, 1, 2]])
# multiple workers, 1 subchunk cases
pred_by_c_d, rank_by_c_d = ar._ring_permutations(2, 1, [0, 1, 2])
self.assertEqual(pred_by_c_d, [[5, 0, 1, 2, 3, 4]])
self.assertEqual(rank_by_c_d, [[0, 1, 2, 3, 4, 5]])
pred_by_c_d, rank_by_c_d = ar._ring_permutations(3, 1, [0, 1, 2])
self.assertEqual(pred_by_c_d, [[8, 0, 1, 2, 3, 4, 5, 6, 7]])
self.assertEqual(rank_by_c_d, [[0, 1, 2, 3, 4, 5, 6, 7, 8]])
pred_by_c_d, rank_by_c_d = ar._ring_permutations(2, 1, [2, 1, 0])
self.assertEqual(pred_by_c_d, [[1, 2, 3, 4, 5, 0]])
self.assertEqual(rank_by_c_d, [[2, 1, 0, 5, 4, 3]])
# 1 worker, multiple subchunk cases
pred_by_c_d, rank_by_c_d = ar._ring_permutations(1, 2, [0, 1, 2, 3])
self.assertEqual(pred_by_c_d, [[3, 0, 1, 2], [3, 0, 1, 2]])
self.assertEqual(rank_by_c_d, [[0, 1, 2, 3], [2, 3, 0, 1]])
pred_by_c_d, rank_by_c_d = ar._ring_permutations(1, 4, [0, 1, 2, 3])
self.assertEqual(pred_by_c_d, [[3, 0, 1, 2], [3, 0, 1, 2],
[3, 0, 1, 2], [3, 0, 1, 2]])
self.assertEqual(rank_by_c_d, [[0, 1, 2, 3], [3, 0, 1, 2],
[2, 3, 0, 1], [1, 2, 3, 0]])
# multiple worker, multiple subchunk cases
pred_by_c_d, rank_by_c_d = ar._ring_permutations(2, 2, [0, 1, 2, 3])
self.assertEqual(pred_by_c_d, [[7, 0, 1, 2, 3, 4, 5, 6],
[3, 0, 5, 2, 7, 4, 1, 6]])
self.assertEqual(rank_by_c_d, [[0, 1, 2, 3, 4, 5, 6, 7],
[2, 3, 0, 1, 6, 7, 4, 5]])
pred_by_c_d, rank_by_c_d = ar._ring_permutations(2, 2, [0, 3, 2, 1])
self.assertEqual(pred_by_c_d, [[5, 2, 3, 0, 1, 6, 7, 4],
[1, 2, 7, 0, 5, 6, 3, 4]])
self.assertEqual(rank_by_c_d, [[0, 3, 2, 1, 4, 7, 6, 5],
[2, 1, 0, 3, 6, 5, 4, 7]])
def _buildInput(self, num_workers, num_gpus):
t8 = constant_op.constant(
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15],
types_pb2.DT_FLOAT)
input_tensors = []
device_names = []
for w in range(0, num_workers):
for d in range(0, num_gpus):
dn = "/replica:0/task:%d/device:GPU:%d" % (w, d % num_gpus)
device_names.append(dn)
with ops.device(dn):
input_tensors.append(array_ops.identity(t8))
return input_tensors, device_names
def testBuildRingGatherPassStructure(self):
# 1 worker, 1 device
input_tensors, device_names = self._buildInput(1, 1)
pred_by_c_d, rank_by_c_d = ar._ring_permutations(1, 1, [0])
output_tensors = ar._build_ring_gather(input_tensors, device_names, 1,
pred_by_c_d, rank_by_c_d,
math_ops.add)
self.assertEqual(output_tensors, input_tensors)
# 1 worker, 4 devices, 2 subchunks
input_tensors, device_names = self._buildInput(1, 4)
pred_by_c_d, rank_by_c_d = ar._ring_permutations(1, 2, [0, 1, 2, 3])
output_tensors, pad_len = ar._build_ring_gather(
input_tensors, device_names, 2, pred_by_c_d, rank_by_c_d, math_ops.add)
self.assertEqual(0, pad_len)
# same number outputs as inputs
self.assertEqual(len(output_tensors), len(input_tensors))
num_chunks = 2 * len(input_tensors)
tlen = tensor_shape.dimension_value(input_tensors[0].shape[0])
for otl in output_tensors:
self.assertEqual(len(otl), num_chunks)
for ot in otl:
self.assertEqual(ot.shape, [tlen/num_chunks])
def _buildInitialVars(self, shape, dev_list):
values = []
num_devices = len(dev_list)
dim = np.prod(shape) if shape else 1
for d in range(0, num_devices):
with ops.device(dev_list[d]):
npt = np.zeros(shape).astype(np.float32)
alias = np.frombuffer(npt.data, dtype=np.float32)
for i in range(0, dim):
alias[i] = i + 0.01 * d
var = state_ops.variable_op(shape, types_pb2.DT_FLOAT)
state_ops.init_variable(var, npt).op.run()
values.append(var)
return values
# pylint: disable=g-long-lambda
def _buildRing(self, num_workers, num_gpus, subdiv):
gpu_perm = range(0, num_gpus)
return lambda x, un_op: ar.build_ring_all_reduce(
x, num_workers, subdiv, gpu_perm, math_ops.add, un_op)
def _testAllReduce(self, num_workers, num_gpus, shape, build_f):
# Use local CPU as device for all inputs.
num_devices = num_workers * num_gpus
dev_list = ["/replica:0/task:0/device:CPU:0"
for _ in range(num_devices)]
with self.cached_session():
input_tensors = self._buildInitialVars(shape, dev_list)
un_op = lambda x: math_ops.div(
x, constant_op.constant(num_devices, dtype=types_pb2.DT_FLOAT))
simple_sum = math_ops.add_n(input_tensors)
simple_sum.op.run()
output_tensors = build_f(input_tensors, un_op)
sum_reduced = math_ops.add_n(output_tensors)
sum_reduced.op.run()
self.assertAllClose(sum_reduced.eval(), simple_sum.eval())
def _testRingAllReduce(self, num_workers, num_gpus, shape, subdiv):
start_time = time.time()
build_f = self._buildRing(num_workers, num_gpus, subdiv)
self._testAllReduce(num_workers, num_gpus, shape, build_f)
elapsed = time.time() - start_time
tf_logging.info("RingAllReduce num_workers=%d num_gpus=%d shape=%s "
"subdiv=%d elapsed=%f" %
(num_workers, num_gpus, shape, subdiv, elapsed))
def testRingAllReduce(self):
self._testRingAllReduce(1, 2, [], 1)
self._testRingAllReduce(1, 2, [8], 1)
self._testRingAllReduce(1, 2, [4, 4], 1)
self._testRingAllReduce(6, 1, [8], 1)
self._testRingAllReduce(1, 8, [32], 1)
self._testRingAllReduce(1, 8, [120], 1)
self._testRingAllReduce(2, 8, [7, 13], 1)
self._testRingAllReduce(2, 8, [8, 8], 2)
self._testRingAllReduce(2, 8, [8, 8], 4)
# TODO(tucker): The following test is surprisingly slow.
# Diagnose and fix before re-enabling.
# self._testRingAllReduce(4, 8, [8, 8, 2], 4)
def _buildShuffle(self, num_workers, num_gpus, num_shards):
# Use local CPU for all shuffle shards
gather_devices = ["/replica:0/task:0/device:CPU:0"
for _ in range(num_shards)]
return lambda x, un_op: ar.build_shuffle_all_reduce(
x, gather_devices, math_ops.add_n, un_op)
def _testShuffleAllReduce(self, num_workers, num_gpus, shape, num_shards):
start_time = time.time()
build_f = self._buildShuffle(num_workers, num_gpus, num_shards)
self._testAllReduce(num_workers, num_gpus, shape, build_f)
elapsed = time.time() - start_time
tf_logging.info("ShuffleAllReduce num_workers=%d num_gpus=%d shape=%s "
"elapsed=%f" % (num_workers, num_gpus, shape, elapsed))
def testShuffleAllReduce(self):
self._testShuffleAllReduce(1, 2, [], 1)
self._testShuffleAllReduce(1, 2, [8], 1)
self._testShuffleAllReduce(1, 2, [4, 4], 1)
self._testShuffleAllReduce(1, 8, [32], 1)
self._testShuffleAllReduce(1, 8, [120], 1)
self._testShuffleAllReduce(2, 8, [7, 13], 3)
self._testShuffleAllReduce(2, 8, [8, 8], 2)
self._testShuffleAllReduce(2, 8, [8, 8], 4)
self._testShuffleAllReduce(4, 8, [8, 8, 2], 4)
def _buildRecursiveHD(self, num_workers, num_gpus):
return lambda x, un_op: ar.build_recursive_hd_all_reduce(
x, math_ops.add, un_op)
# pylint: enable=g-long-lambda
def _testRecursiveHDAllReduce(self, num_workers, num_gpus, shape):
start_time = time.time()
build_f = self._buildRecursiveHD(num_workers, num_gpus)
self._testAllReduce(num_workers, num_gpus, shape, build_f)
elapsed = time.time() - start_time
tf_logging.info("RecursiveHDAllReduce num_workers=%d num_gpus=%d "
"shape=%s elapsed=%f" %
(num_workers, num_gpus, shape, elapsed))
def testRecursiveHDAllReduce(self):
self._testRecursiveHDAllReduce(1, 2, [8])
self._testRecursiveHDAllReduce(1, 2, [4, 4])
self._testRecursiveHDAllReduce(1, 8, [32])
self._testRecursiveHDAllReduce(1, 8, [120])
self._testRecursiveHDAllReduce(2, 8, [8, 8])
self._testRecursiveHDAllReduce(4, 8, [8, 8, 2])
if __name__ == "__main__":
test.main()
| [
"[email protected]"
] | |
cb9b245593a93be30c210ad1b70214a87685c0a0 | fa32f7fe4068323b719725558423927ad307cc4b | /build_isolated/rostopic/catkin_generated/pkg.develspace.context.pc.py | 531b3ad0a23091aaac1729f6620ee17b90c89e2e | [] | no_license | CJohnson5136/ros_catkin_ws | d07ee8c20bc1ebe6c05abdea24ef1f5dab14954b | 05193a7e587ab82e696c66176b151c43d2bcef82 | refs/heads/master | 2021-05-09T03:05:12.373334 | 2018-01-28T03:13:33 | 2018-01-28T03:13:33 | 119,227,181 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 389 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "rostopic"
PROJECT_SPACE_DIR = "/home/pi/ros_catkin_ws/devel_isolated/rostopic"
PROJECT_VERSION = "1.13.5"
| [
"[email protected]"
] | |
0313e4040f7e129dd7f7dc51cb61c6c53b03576d | 3dcc44bf8acd3c6484b57578d8c5595d8119648d | /casp9_scripts/truncate_rosetta_files.py | 57c62df38219834ef0e52eba352a85cd322a04aa | [] | no_license | rhiju/rhiju_python | f0cab4dfd4dd75b72570db057a48e3d65e1d92c6 | eeab0750fb50a3078a698d190615ad6684dc2411 | refs/heads/master | 2022-10-29T01:59:51.848906 | 2022-10-04T21:28:41 | 2022-10-04T21:28:41 | 8,864,938 | 0 | 3 | null | null | null | null | UTF-8 | Python | false | false | 2,352 | py | #!/usr/bin/python
from os import popen,system
from os.path import exists,dirname,basename,expanduser
import sys
import string
from glob import glob
indir = sys.argv[1]
outdir = sys.argv[2]
PYDIR = expanduser('~rhiju')+'/python/'
assert( exists( PYDIR ) )
inputres = 0
if len(sys.argv)>4:
startseq = int(sys.argv[3])
endseq = int(sys.argv[4])
inputres = 1
newprefix = 'truncate_termini_'
if len(sys.argv)>5:
newprefix = sys.argv[5]
command = 'mkdir '+outdir
print(command)
system(command)
if not inputres:
secstructprobfile = glob(indir+'/*.secstructprob')
outfile = outdir+'/truncate_sequence.txt'
assert(len(secstructprobfile)>0)
command = PYDIR+'/decide_termini_truncate.py '+secstructprobfile[0]+ ' ' + outfile
print(command)
system(command)
assert( exists( outfile))
line = open(outfile).readlines()
cols = string.split(line[0])
startseq = int(cols[0])
endseq = int(cols[1])
print
print 'Using start and end residues: ',startseq,endseq
print
infile = glob(indir+'/*.pdb')
if(len(infile)>0): # PDB file is optional.
infile = infile[0]
outfile = outdir + '/'+newprefix+basename(infile)
command = PYDIR+'/termini_truncate_pdb.py %s %d %d %s' % \
(infile,startseq,endseq,outfile)
print(command)
system(command)
else:
print 'COULD NOT FIND PDB FILE BUT THAT IS OK IF YOU ARE DOING CASP.'
infile = glob(indir+'/*.fasta*')
assert(len(infile)>0)
infile = infile[0]
outfile = outdir + '/'+newprefix+basename(infile)
command = PYDIR+'/termini_truncate_fasta.py %s %d %d %s' % \
(infile,startseq,endseq,outfile)
print(command)
system(command)
infile = glob(indir+'/*.psipred_ss2*')
assert(len(infile)>0)
infile = infile[0]
outfile = outdir + '/'+newprefix+basename(infile)
command = PYDIR+'/termini_truncate_psipred_ss2.py %s %d %d %s' % \
(infile,startseq,endseq,outfile)
print(command)
system(command)
infiles = glob(indir+'/*v1_3*')
assert(len(infiles)>1)
for infile in infiles:
outfile = outdir + '/'+newprefix+basename(infile)
if basename(infile)[:6] == 'boinc_': # A special case.
outfile = outdir + '/boinc_'+newprefix + basename(infile)[6:]
command = PYDIR+'/termini_truncate_fragfile.py %s %d %d %s' % \
(infile,startseq,endseq,outfile)
print(command)
system(command)
| [
"[email protected]"
] | |
fe461e7e82c4c955bb78b8eb572cb70236f500b7 | 9508879fcf1cff718f3fe80502baff8b82c04427 | /misc/divide_and_conquer/max_subarray.py | cb0fa14f92663cf7a62457fc0285511c67efa967 | [] | no_license | davidozhang/hackerrank | e37b4aace7d63c8be10b0d4d2bffb4d34d401d55 | bdc40d6ff3e603949eb294bbc02a1e24a4ba5b80 | refs/heads/master | 2021-05-04T11:31:59.110118 | 2017-11-15T09:17:27 | 2017-11-15T09:17:27 | 47,906,672 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 846 | py | #!/usr/bin/python
def max_subarray(l, left, right):
if left == right:
return l[left]
else:
middle = (left+right)/2
l1 = max_subarray(l, left, middle)
l2 = max_subarray(l, middle+1, right)
return max(l1, l2, max_crossing(l, left, middle, right))
def max_crossing(l, left, middle, right):
left_sum, right_sum = None, None
left_temp = middle
while left_temp>=left:
if not left_sum:
left_sum = l[left_temp]
else:
left_sum = max(left_sum, left_sum+l[left_temp])
left_temp -= 1
right_temp = middle+1
while right_temp<=right:
if not right_sum:
right_sum = l[right_temp]
else:
right_sum = max(right_sum, right_sum+l[right_temp])
right_temp += 1
return left_sum + right_sum
def main():
l = map(int, raw_input().split())
print max_subarray(l, 0, len(l)-1)
if __name__ == '__main__':
main() | [
"[email protected]"
] | |
b1265bbbf4df82bff42bdc625d6cc5ff1f518356 | e5838acd890b711d53fa7b37e0405c236dd52bb2 | /trails/feeds/emergingthreatscip.py | 63efdf9f7f92e5e3dc6e6c4843a8a14c2c631072 | [
"MIT"
] | permissive | stamparm/maltrail | 34c40fe593f82c5f78d511c21a1cbe049aa04856 | 21422d7acbdfd1157c0b2188b5050f74d0adecbb | refs/heads/master | 2023-08-31T20:08:48.881765 | 2023-08-31T19:39:55 | 2023-08-31T19:39:55 | 27,561,102 | 5,663 | 1,193 | MIT | 2023-08-14T03:13:03 | 2014-12-04T21:33:46 | Python | UTF-8 | Python | false | false | 655 | py | #!/usr/bin/env python
"""
Copyright (c) 2014-2023 Maltrail developers (https://github.com/stamparm/maltrail/)
See the file 'LICENSE' for copying permission
"""
from core.common import retrieve_content
__url__ = "https://rules.emergingthreats.net/open/suricata/rules/compromised-ips.txt"
__info__ = "compromised (suspicious)"
__reference__ = "emergingthreats.net"
def fetch():
retval = {}
content = retrieve_content(__url__)
for line in content.split('\n'):
line = line.strip()
if not line or line.startswith('#') or '.' not in line:
continue
retval[line] = (__info__, __reference__)
return retval
| [
"[email protected]"
] | |
7f953edebb9ebf147f2adabc9dbc217311cbcf9e | c3b766858bacfec396b839fd881f719db5ef5fc5 | /setup.py | ef9419515da41c4cfa21ea315f99c875c440ad3e | [
"MIT"
] | permissive | ferchaure/spikesorters | 2e20dcdeac67c4e5b442628fadc851c38fc090d5 | 8577572c63c531a239452cdb48f631ec1f490121 | refs/heads/master | 2021-06-16T07:49:31.758572 | 2021-04-24T15:22:11 | 2021-04-24T15:22:11 | 254,942,645 | 0 | 0 | MIT | 2020-04-11T19:44:04 | 2020-04-11T19:44:03 | null | UTF-8 | Python | false | false | 990 | py | from setuptools import setup, find_packages
d = {}
exec(open("spikesorters/version.py").read(), None, d)
version = d['version']
long_description = open("README.md").read()
pkg_name = "spikesorters"
setup(
name=pkg_name,
version=version,
author="Alessio Buccino, Cole Hurwitz, Samuel Garcia, Jeremy Magland, Matthias Hennig",
author_email="[email protected]",
description="Python wrappers for popular spike sorters",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/SpikeInterface/spikesorters",
packages=find_packages(),
package_data={},
include_package_data=True,
install_requires=[
'numpy',
'spikeextractors>=0.9.4',
'spiketoolkit>=0.7.3',
'requests'
],
classifiers=(
"Programming Language :: Python :: 3",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
)
)
| [
"[email protected]"
] | |
2a36e5cd3f010c2f1259d602719aaf71b79d3889 | a5e56a1f20f41714987fd8567359b9b6ab4b1a97 | /src.baseline/src/chapter19/ks19_07/mainwindow.py | 31df047b8011a03a20b98a8ec8d0fa14e413f893 | [] | no_license | mach8686devops/pyside-example | cc40996b888b71b73334bffecf368c640e367129 | 090710b60fc55dbe3c8fb14a8a2c15150659704a | refs/heads/main | 2023-04-16T08:25:03.055200 | 2021-04-25T14:08:50 | 2021-04-25T14:08:50 | 361,446,057 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,865 | py | # -*- coding: utf-8 -*-
from PyQt5.QtWidgets import QMainWindow, QAction, QActionGroup, QMenu, QLabel, QFrame, QMessageBox
from PyQt5.QtGui import QIcon, QKeySequence
from PyQt5.QtCore import Qt, QFile, QTextStream, pyqtSignal, QThread
from textedit import CTextEdit
import os
from splashscreen import CSplashScreen
class CMainWindow(QMainWindow):
fileMenu = None # 文件菜单
editMenu = None # 编辑菜单
formatMenu = None # 格式菜单
helpMenu = None # 帮助菜单
alignmentGroup = None # 对齐菜单项组
openAct = None # 【打开】子菜单
saveAct = None # 【保存】子菜单
exitAct = None # 【退出】子菜单
cutAct = None # 【剪切】子菜单
copyAct = None # 【拷贝】子菜单
pasteAct = None # 【粘贴】子菜单
boldAct = None # 【粗体】子菜单
italicAct = None # 【斜体】子菜单
leftAlignAct = None # 【左对齐】子菜单
rightAlignAct = None # 【右对齐】子菜单
centerAct = None # 【居中对齐】子菜单
setLineSpacingAct = None # 【设置行间距】子菜单
setParagraphSpacingAct = None # 【设置段间距】子菜单
aboutAct = None # 【帮助】子菜单
infoLabel = None # 信息标签
fileToolBar = None # 【文件】工具条
editToolBar = None # 【编辑】工具条
mouseLabel = None # 显示鼠标位置的标签
textEdit = None # 视图
sig_progress = pyqtSignal(int)
def __init__(self, splashScreen, parent=None) :
super(CMainWindow, self).__init__(parent)
self.createActions()
self.createMenus()
self.createToolBars()
self.createStatusBar()
self.sig_progress.connect(splashScreen.slot_setProgress)
QThread.sleep(1) # 模拟耗时操作
self.sig_progress.emit(10)
self.initialize()
self.setWindowTitle('菜单')
self.setMinimumSize(160, 160)
self.resize(480, 320)
def initialize(self):
# 构建视图对象
self.textEdit = CTextEdit(self)
file = QFile()
trainDevHome = os.getenv('TRAINDEVHOME')
if None is trainDevHome:
trainDevHome = 'usr/local/gui'
strFile = trainDevHome + '/test/chapter19/ks19_02/input.txt'
file.setFileName(strFile)
strText = str()
if (file.open(QFile.ReadOnly | QFile.Text)) :
input = QTextStream(file)
input.setCodec('UTF-8')
strText = input.readAll()
self.textEdit.setText(strText)
self.setCentralWidget(self.textEdit)
# 关联信号-槽
self.textEdit.sig_viewMouseMove .connect(self.slot_mouseMoveInView)
self.readData()
# 模拟构造过程中的耗时操作
def readData(self):
QThread.sleep(1)
self.sig_progress.emit(30)
QThread.sleep(1)
self.sig_progress.emit(50)
QThread.sleep(1)
self.sig_progress.emit(70)
QThread.sleep(1)
self.sig_progress.emit(100)
def open(self):
self.infoLabel.setText('Invoked <b>File|Open</b>')
def save(self):
self.infoLabel.setText('Invoked <b>File|Save</b>')
def cut(self):
self.infoLabel.setText('Invoked <b>Edit|Cut</b>')
def copy(self):
self.infoLabel.setText('Invoked <b>Edit|Copy</b>')
def paste(self):
self.infoLabel.setText('Invoked <b>Edit|Paste</b>')
def bold(self):
self.infoLabel.setText('Invoked <b>Edit|Format|Bold</b>')
def italic(self):
self.infoLabel.setText('Invoked <b>Edit|Format|Italic</b>')
def leftAlign(self):
self.infoLabel.setText('Invoked <b>Edit|Format|Left Align</b>')
def rightAlign(self):
self.infoLabel.setText('Invoked <b>Edit|Format|Right Align</b>')
def center(self):
self.infoLabel.setText('Invoked <b>Edit|Format|Center</b>')
def setLineSpacing(self):
self.infoLabel.setText('Invoked <b>Edit|Format|Set Line Spacing</b>')
def setParagraphSpacing(self):
self.infoLabel.setText('Invoked <b>Edit|Format|Set Paragraph Spacing</b>')
def about(self):
self.infoLabel.setText('Invoked <b>Help|About</b>')
QMessageBox.about(self,
'About Menu',
'The <b>Menu</b> example shows how to create menu-bar menus and context menus.')
def createActions(self):
self.openAct = QAction(QIcon(':/images/open.png'),'打开...', self)
self.openAct.setShortcuts(QKeySequence.Open)
self.openAct.setStatusTip('Open an existing file')
self.openAct.triggered.connect(self.open)
self.saveAct = QAction('保存', self)
self.saveAct.setShortcuts(QKeySequence.Save)
self.saveAct.setStatusTip('Save the document to disk')
self.saveAct.triggered.connect(self.save)
self.exitAct = QAction('退出', self)
self.exitAct.setShortcuts(QKeySequence.Quit)
self.exitAct.setStatusTip('Exit the application')
self.exitAct.triggered.connect(self.close)
self.cutAct = QAction('剪切', self)
self.cutAct.setShortcuts(QKeySequence.Cut)
self.cutAct.setStatusTip("Cut the current selection's contents to the clipboard")
self.cutAct.triggered.connect(self.cut)
self.copyAct = QAction('复制', self)
self.copyAct.setShortcuts(QKeySequence.Copy)
self.copyAct.setStatusTip("Copy the current selection's contents to the clipboard")
self.copyAct.triggered.connect(self.copy)
self.pasteAct = QAction('粘贴', self)
self.pasteAct.setShortcuts(QKeySequence.Paste)
self.pasteAct.setStatusTip("Paste the clipboard's contents into the current selection")
self.pasteAct.triggered.connect(self.paste)
self.boldAct = QAction('粗体', self)
self.boldAct.setCheckable(True)
self.boldAct.setShortcut(QKeySequence.Bold)
self.boldAct.setStatusTip('Make the text bold')
self.boldAct.triggered.connect(self.bold)
boldFont = self.boldAct.font()
boldFont.setBold(True)
self.boldAct.setFont(boldFont)
self.italicAct = QAction('斜体', self)
self.italicAct.setCheckable(True)
self.italicAct.setShortcut(QKeySequence.Italic)
self.italicAct.setStatusTip('Make the text italic')
self.italicAct.triggered.connect(self.italic)
italicFont = self.italicAct.font()
italicFont.setItalic(True)
self.italicAct.setFont(italicFont)
self.setLineSpacingAct = QAction('行间距...', self)
self.setLineSpacingAct.setStatusTip('Change the gap between the lines of a paragraph')
self.setLineSpacingAct.triggered.connect(self.setLineSpacing)
self.setParagraphSpacingAct = QAction('段间距...', self)
self.setParagraphSpacingAct.setStatusTip('Change the gap between paragraphs')
self.setParagraphSpacingAct.triggered.connect(self.setParagraphSpacing)
self.aboutAct = QAction('关于', self)
self.aboutAct.setStatusTip("Show the application's About box")
self.aboutAct.triggered.connect(self.about)
self.leftAlignAct = QAction('左对齐', self)
self.leftAlignAct.setCheckable(True)
self.leftAlignAct.setShortcut('Ctrl+L')
self.leftAlignAct.setStatusTip('Left align the selected text')
self.leftAlignAct.triggered.connect(self.leftAlign)
self.rightAlignAct = QAction('右对齐', self)
self.rightAlignAct.setCheckable(True)
self.rightAlignAct.setShortcut('Ctrl+R')
self.rightAlignAct.setStatusTip('Right align the selected text')
self.rightAlignAct.triggered.connect(self.rightAlign)
self.centerAct = QAction('居中对齐', self)
self.centerAct.setCheckable(True)
self.centerAct.setShortcut('Ctrl+E')
self.centerAct.setStatusTip('Center the selected text')
self.centerAct.triggered.connect(self.center)
self.alignmentGroup = QActionGroup(self)
self.alignmentGroup.addAction(self.leftAlignAct)
self.alignmentGroup.addAction(self.rightAlignAct)
self.alignmentGroup.addAction(self.centerAct)
self.leftAlignAct.setChecked(True)
def createMenus(self):
self.fileMenu = self.menuBar().addMenu('文件')
self.fileMenu.addAction(self.openAct)
self.fileMenu.addAction(self.saveAct)
self.fileMenu.addSeparator()
self.fileMenu.addAction(self.exitAct)
self.editMenu = self.menuBar().addMenu('编辑')
self.editMenu.addAction(self.cutAct)
self.editMenu.addAction(self.copyAct)
self.editMenu.addAction(self.pasteAct)
self.editMenu.addSeparator()
self.helpMenu = self.menuBar().addMenu('帮助')
self.helpMenu.addAction(self.aboutAct)
self.formatMenu = self.editMenu.addMenu('格式化')
self.formatMenu.addAction(self.boldAct)
self.formatMenu.addAction(self.italicAct)
self.formatMenu.addSeparator().setText('对齐')
self.formatMenu.addAction(self.leftAlignAct)
self.formatMenu.addAction(self.rightAlignAct)
self.formatMenu.addAction(self.centerAct)
self.formatMenu.addSeparator()
self.formatMenu.addAction(self.setLineSpacingAct)
self.formatMenu.addAction(self.setParagraphSpacingAct)
def createToolBars(self):
self.fileToolBar = self.addToolBar('文件工具条')
self.fileToolBar.setObjectName('file toolbar')
self.fileToolBar.addAction(self.openAct)
self.fileToolBar.addAction(self.saveAct)
self.editToolBar = self.addToolBar('编辑工具条')
self.editToolBar.setObjectName("edit toolbar")
self.editToolBar.addAction(self.cutAct)
self.editToolBar.addAction(self.copyAct)
self.editToolBar.addAction(self.pasteAct)
def createStatusBar(self):
self.infoLabel = QLabel('')
self.infoLabel.setFrameStyle(QFrame.StyledPanel | QFrame.Sunken)
self.infoLabel.setAlignment(Qt.AlignCenter)
self.statusBar().addPermanentWidget(self.infoLabel)
self.mouseLabel = QLabel('', self.statusBar())
self.mouseLabel.setMinimumWidth(100)
self.statusBar().addPermanentWidget(self.mouseLabel)
self.statusBar().show()
def slot_mouseMoveInView(self, evt):
ptLocal = evt.localPos()
pt = ptLocal.toPoint()
strPos = str.format('{0},{1}', pt.x(), pt.y())
self.mouseLabel.setText(strPos)
| [
"[email protected]"
] | |
b44795123712f010089fc09f81ce2ff9435eb6cb | c849188f25de5cb87d9278aa7cfd0772c698c870 | /account_report/hooks.py | 4916696af10ec7bf8ad6c2c28c86dfe43bf6f541 | [
"MIT"
] | permissive | dineshpanchal93/helpremove | 19c36131dc2d057ddfaf316c5f964cd211878e1b | 37e03e922645d52a7bc5d293fa936b0b82017715 | refs/heads/master | 2020-03-27T10:35:04.729818 | 2019-02-09T07:24:45 | 2019-02-09T07:24:45 | 146,430,359 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,109 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from . import __version__ as app_version
app_name = "account_report"
app_title = "Account Report"
app_publisher = "Scantech Laser"
app_description = "Account Report"
app_icon = "octicon octicon-file-directory"
app_color = "grey"
app_email = "[email protected]"
app_license = "MIT"
# Includes in <head>
# ------------------
# include js, css files in header of desk.html
app_include_css = "/assets/account_report/css/account_report.css"
app_include_js = "/assets/account_report/js/account_report.js"
# include js, css files in header of web template
app_include_css = "/assets/account_report/css/account_report.css"
web_include_js = "/assets/account_report/js/account_report.js"
# include js in page
# page_js = {"page" : "public/js/file.js"}
# include js in doctype views
# doctype_js = {"doctype" : "public/js/doctype.js"}
# doctype_list_js = {"doctype" : "public/js/doctype_list.js"}
# doctype_tree_js = {"doctype" : "public/js/doctype_tree.js"}
# doctype_calendar_js = {"doctype" : "public/js/doctype_calendar.js"}
# Home Pages
# ----------
# application home page (will override Website Settings)
# home_page = "login"
# website user home page (by Role)
# role_home_page = {
# "Role": "home_page"
# }
# Website user home page (by function)
# get_website_user_home_page = "account_report.utils.get_home_page"
# Generators
# ----------
# automatically create page for each record of this doctype
# website_generators = ["Web Page"]
# Installation
# ------------
# before_install = "account_report.install.before_install"
# after_install = "account_report.install.after_install"
# Desk Notifications
# ------------------
# See frappe.core.notifications.get_notification_config
# notification_config = "account_report.notifications.get_notification_config"
# Permissions
# -----------
# Permissions evaluated in scripted ways
# permission_query_conditions = {
# "Event": "frappe.desk.doctype.event.event.get_permission_query_conditions",
# }
#
# has_permission = {
# "Event": "frappe.desk.doctype.event.event.has_permission",
# }
# Document Events
# ---------------
# Hook on document methods and events
# doc_events = {
# "*": {
# "on_update": "method",
# "on_cancel": "method",
# "on_trash": "method"
# }
# }
# Scheduled Tasks
# ---------------
# scheduler_events = {
# "all": [
# "account_report.tasks.all"
# ],
# "daily": [
# "account_report.tasks.daily"
# ],
# "hourly": [
# "account_report.tasks.hourly"
# ],
# "weekly": [
# "account_report.tasks.weekly"
# ]
# "monthly": [
# "account_report.tasks.monthly"
# ]
# }
# Testing
# -------
# before_tests = "account_report.install.before_tests"
# Overriding Whitelisted Methods
# ------------------------------
#
# override_whitelisted_methods = {
# "frappe.desk.doctype.event.event.get_events": "account_report.event.get_events"
# }
website_context = {
"favicon": "/assets/account_report/images/logo.png",
"splash_image": "/assets/account_report/images/logo.png"
}
email_brand_image = "/assets/account_report/images/logo.png"
| [
"[email protected]"
] | |
ef64daf27156245233a072e330507895eb46631f | 50948d4cb10dcb1cc9bc0355918478fb2841322a | /azure-mgmt-storage/azure/mgmt/storage/v2018_02_01/models/immutability_policy_py3.py | 443d619aebb4752e5b7a550f3121f3b8f72c4077 | [
"MIT"
] | permissive | xiafu-msft/azure-sdk-for-python | de9cd680b39962702b629a8e94726bb4ab261594 | 4d9560cfd519ee60667f3cc2f5295a58c18625db | refs/heads/master | 2023-08-12T20:36:24.284497 | 2019-05-22T00:55:16 | 2019-05-22T00:55:16 | 187,986,993 | 1 | 0 | MIT | 2020-10-02T01:17:02 | 2019-05-22T07:33:46 | Python | UTF-8 | Python | false | false | 2,824 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .azure_entity_resource_py3 import AzureEntityResource
class ImmutabilityPolicy(AzureEntityResource):
"""The ImmutabilityPolicy property of a blob container, including Id, resource
name, resource type, Etag.
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Fully qualified resource Id for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
:vartype id: str
:ivar name: The name of the resource
:vartype name: str
:ivar type: The type of the resource. Ex-
Microsoft.Compute/virtualMachines or Microsoft.Storage/storageAccounts.
:vartype type: str
:ivar etag: Resource Etag.
:vartype etag: str
:param immutability_period_since_creation_in_days: Required. The
immutability period for the blobs in the container since the policy
creation, in days.
:type immutability_period_since_creation_in_days: int
:ivar state: The ImmutabilityPolicy state of a blob container, possible
values include: Locked and Unlocked. Possible values include: 'Locked',
'Unlocked'
:vartype state: str or
~azure.mgmt.storage.v2018_02_01.models.ImmutabilityPolicyState
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'etag': {'readonly': True},
'immutability_period_since_creation_in_days': {'required': True},
'state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
'immutability_period_since_creation_in_days': {'key': 'properties.immutabilityPeriodSinceCreationInDays', 'type': 'int'},
'state': {'key': 'properties.state', 'type': 'str'},
}
def __init__(self, *, immutability_period_since_creation_in_days: int, **kwargs) -> None:
super(ImmutabilityPolicy, self).__init__(**kwargs)
self.immutability_period_since_creation_in_days = immutability_period_since_creation_in_days
self.state = None
| [
"[email protected]"
] | |
6d1ebf844e5baa83b39344681b4d72082c9febf4 | 673e829dda9583c8dd2ac8d958ba1dc304bffeaf | /data/multilingual/Latn.CJK/Sans_8/pdf_to_json_test_Latn.CJK_Sans_8.py | b43ef12d4a3dacb970cc1f264903b07fa9b8562d | [
"BSD-3-Clause"
] | permissive | antoinecarme/pdf_to_json_tests | 58bab9f6ba263531e69f793233ddc4d33b783b7e | d57a024fde862e698d916a1178f285883d7a3b2f | refs/heads/master | 2021-01-26T08:41:47.327804 | 2020-02-27T15:54:48 | 2020-02-27T15:54:48 | 243,359,934 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 301 | py | import pdf_to_json as p2j
import json
url = "file:data/multilingual/Latn.CJK/Sans_8/udhr_Latn.CJK_Sans_8.pdf"
lConverter = p2j.pdf_to_json.pdf_to_json_converter()
lConverter.mImageHashOnly = True
lDict = lConverter.convert(url)
print(json.dumps(lDict, indent=4, ensure_ascii=False, sort_keys=True))
| [
"[email protected]"
] | |
392606df599ab62daa2196de81f893ba6d951e9a | 3d705ec48c94373817e5f61d3f839988910431e3 | /lib/framework/executor_factory.py | eb8e1df9bd2b302e3f38b0f6e9fbf3d701c5b010 | [] | no_license | namesuqi/zeus | 937d3a6849523ae931162cd02c5a09b7e37ebdd8 | 3445b59b29854b70f25da2950016f135aa2a5204 | refs/heads/master | 2022-07-24T14:42:28.600288 | 2018-03-29T08:03:09 | 2018-03-29T08:03:09 | 127,256,973 | 0 | 0 | null | 2022-07-07T22:57:57 | 2018-03-29T07:53:16 | Python | UTF-8 | Python | false | false | 1,231 | py | # coding=utf-8
"""
The factory of Test Executor
__author__ = 'zengyuetian'
"""
from lib.framework.executor_server import *
from lib.framework.executor_sdk import *
from lib.framework.executor_system import *
from lib.framework.executor_idc import *
from lib.framework.executor_leifeng import *
from lib.framework.executor_live import *
from lib.framework.executor_vod import *
from lib.framework.executor_deploy import *
class ExecutorFactory(object):
"""
create object according to param
"""
@staticmethod
def make_executor(name):
"""
create executor
:param name:
:return:
"""
if name == "server":
return ExecutorServer()
elif name == "sdk":
return ExecutorSdk()
elif name == "idc":
return ExecutorIdc()
elif name == "live":
return ExecutorLive()
elif name == "leifeng":
return ExecutorLeifeng()
elif name == "vod":
return ExecutorVod()
elif name == "deploy":
return ExecutorDeploy()
elif name == "system":
return ExecutorSystem()
elif name == "dummy":
return ExecutorSystem()
| [
"[email protected]"
] | |
3c2efb6cba7827d71aa97b9a1dc2926375aafbe1 | d6e8601fa673876cb079b4eeaae6b40427371772 | /neurolib/encoder/normal.py | 7c183e035822c7ceeba4a1b916ce275b9bc21196 | [] | no_license | cunningham-lab/_neurolib_deprecated | 8e70703d32701983a8fed9df489360acba856831 | bf44a6b4c40347caeacd4fd38dd9d1c1680c9a65 | refs/heads/master | 2020-03-30T19:08:58.065181 | 2018-10-26T19:49:12 | 2018-10-26T19:49:12 | 151,530,405 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,911 | py | # Copyright 2018 Daniel Hernandez Diaz, Columbia University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ==============================================================================
import numpy as np
import tensorflow as tf
from tensorflow.contrib.layers.python.layers import fully_connected #pylint: disable=no-name-in-module
from neurolib.encoder.basic import InnerNode
from neurolib.encoder import MultivariateNormalTriL # @UnresolvedImport
act_fn_dict = {'relu' : tf.nn.relu,
'leaky_relu' : tf.nn.leaky_relu}
# pylint: disable=bad-indentation, no-member, protected-access
class NormalTriLNode(InnerNode):
"""
"""
num_expected_inputs = 1
num_expected_outputs = 3
def __init__(self,
label,
num_features,
builder,
name=None,
batch_size=1,
**dirs):
"""
Initialize a NormalInputNode
Args:
label (int): A unique identifier for the node
num_features (int): The size of the last dimension.
builder (Builder): An instance of Builder necessary to declare the
secondary output nodes
name (str): A unique string identifier for this node
batch_size (int): Self-explanatory.
dirs (dict): A set of user specified directives for constructing this
node
"""
self.name = "NormalTril_" + str(label) if name is None else name
self.builder = builder
self.num_declared_inputs = 0
self.batch_size = batch_size
super(NormalTriLNode, self).__init__(label)
self.num_features = num_features
self.main_oshape = self._oslot_to_shape[0] = [batch_size] + [num_features]
self._update_directives(**dirs)
self.free_oslots = list(range(self.num_expected_outputs))
self._declare_secondary_outputs()
def _declare_secondary_outputs(self):
"""
Declare outputs for the statistics of the distribution (mean and standard
deviation)
"""
main_oshape = self._oslot_to_shape[0]
# Mean oslot
self._oslot_to_shape[1] = main_oshape
o1 = self.builder.addOutput(name=self.directives['output_mean_name'])
self.builder.addDirectedLink(self, o1, oslot=1)
# Stddev oslot
self._oslot_to_shape[2] = main_oshape + [main_oshape[-1]]
o2 = self.builder.addOutput(name=self.directives['output_cholesky_name'])
print('_oslot_to_shape', self._oslot_to_shape)
self.builder.addDirectedLink(self, o2, oslot=2)
def _update_directives(self, **dirs):
"""
Update the node directives
"""
self.directives = {'num_layers' : 2,
'num_nodes' : 128,
'activation' : 'leaky_relu',
'net_grow_rate' : 1.0,
'share_params' : False,
'output_mean_name' : self.name + '_mean',
'output_cholesky_name' : self.name + '_cholesky'}
self.directives.update(dirs)
# Deal with directives that map to tensorflow objects hidden from the client
self.directives['activation'] = act_fn_dict[self.directives['activation']]
def _build(self, inputs=None):
"""
Builds the graph corresponding to a NormalTriL encoder.
TODO: Expand this a lot, many more specs necessary.
"""
dirs = self.directives
if inputs is not None:
raise NotImplementedError("") # TODO: Should I provide this option? meh
num_layers = dirs['num_layers']
num_nodes = dirs['num_nodes']
activation = dirs['activation']
net_grow_rate = dirs['net_grow_rate']
with tf.variable_scope(self.name, reuse=tf.AUTO_REUSE):
# Define the Means
x_in = self._islot_to_itensor[0]
output_dim = self._oslot_to_shape[0][-1] # Last dim
hid_layer = fully_connected(x_in, num_nodes, activation_fn=activation,
biases_initializer=tf.random_normal_initializer(stddev=1/np.sqrt(num_nodes)))
for _ in range(num_layers-1):
num_nodes = int(num_nodes*net_grow_rate)
hid_layer = fully_connected(hid_layer, num_nodes, activation_fn=activation,
biases_initializer=tf.random_normal_initializer(stddev=1/np.sqrt(num_nodes)))
mean = fully_connected(hid_layer, output_dim, activation_fn=None)
# Define the Cholesky Lower Decomposition
if dirs['share_params']:
output_chol = fully_connected(hid_layer, output_dim**2, activation_fn=None)
else:
hid_layer = fully_connected(x_in, num_nodes, activation_fn=activation,
biases_initializer=tf.random_normal_initializer(stddev=1/np.sqrt(num_nodes)))
for _ in range(num_layers-1):
num_nodes = int(num_nodes*net_grow_rate)
hid_layer = fully_connected(hid_layer, num_nodes, activation_fn=activation,
biases_initializer=tf.random_normal_initializer(stddev=1/np.sqrt(num_nodes)))
output_chol = fully_connected(hid_layer, output_dim**2,
activation_fn=None,
weights_initializer = tf.random_normal_initializer(stddev=1e-4),
# normalizer_fn=lambda x : x/tf.sqrt(x**2),
biases_initializer=tf.random_normal_initializer(stddev=1/np.sqrt(output_dim**2)))
output_chol = tf.reshape(output_chol,
# shape=[self.batch_size, output_dim, output_dim])
shape=[-1, output_dim, output_dim])
if 'output_mean_name' in self.directives:
mean_name = self.directives['output_mean_name']
else:
mean_name = "Mean_" + str(self.label) + '_0'
if 'output_cholesky_name' in self.directives:
cholesky_name = self.directives['output_cholesky_name']
else:
cholesky_name = 'CholTril_' + str(self.label) + '_0'
cholesky_tril = tf.identity(output_chol, name=cholesky_name)
# Get the tensorflow distribution for this node
self.dist = MultivariateNormalTriL(loc=mean, scale_tril=cholesky_tril)
# Fill the oslots
self._oslot_to_otensor[0] = self.dist.sample(name='Out' +
str(self.label) + '_0')
self._oslot_to_otensor[1] = tf.identity(mean, name=mean_name)
self._oslot_to_otensor[2] = cholesky_tril
self._is_built = True
def _log_prob(self, ipt):
"""
Define the loglikelihood of the distribution
"""
return self.dist.log_prob(ipt) | [
"[email protected]"
] | |
0ba77e2df756227b36206e5f571302ee7607a904 | 1ee3dc4fa096d12e409af3a298ba01f5558c62b5 | /ixnetwork_restpy/testplatform/sessions/ixnetwork/quicktest/openflowlayer3learningrate/testconfig/testconfig.py | 8610e460cf2b0510e643c2f4857f4fd09141ad33 | [
"MIT"
] | permissive | parthpower/ixnetwork_restpy | 321e64a87be0a4d990276d26f43aca9cf4d43cc9 | 73fa29796a5178c707ee4e21d90ff4dad31cc1ed | refs/heads/master | 2020-07-04T13:34:42.162458 | 2019-08-13T20:33:17 | 2019-08-13T20:33:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 22,490 | py | # MIT LICENSE
#
# Copyright 1997 - 2019 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
class TestConfig(Base):
"""The TestConfig class encapsulates a required testConfig node in the ixnetwork hierarchy.
An instance of the class can be obtained by accessing the TestConfig property from a parent instance.
The internal properties list will contain one and only one set of properties which is populated when the property is accessed.
"""
_SDM_NAME = 'testConfig'
def __init__(self, parent):
super(TestConfig, self).__init__(parent)
@property
def AddrRateNumFrames(self):
"""Indicates the address rate in number of frames.
Returns:
number
"""
return self._get_attribute('addrRateNumFrames')
@AddrRateNumFrames.setter
def AddrRateNumFrames(self, value):
self._set_attribute('addrRateNumFrames', value)
@property
def AddrRateValidationFpsRate(self):
"""Indicates that the step rate of the load unit is fpsRate.
Returns:
str
"""
return self._get_attribute('addrRateValidationFpsRate')
@AddrRateValidationFpsRate.setter
def AddrRateValidationFpsRate(self, value):
self._set_attribute('addrRateValidationFpsRate', value)
@property
def AddrRateValidationRate(self):
"""Indicates the address rate validation rate.
Returns:
number
"""
return self._get_attribute('addrRateValidationRate')
@AddrRateValidationRate.setter
def AddrRateValidationRate(self, value):
self._set_attribute('addrRateValidationRate', value)
@property
def AddrRateValidationRateUnit(self):
"""Indicates the address rate validation rate unit.
Returns:
str(fps|percentMaxRate)
"""
return self._get_attribute('addrRateValidationRateUnit')
@AddrRateValidationRateUnit.setter
def AddrRateValidationRateUnit(self, value):
self._set_attribute('addrRateValidationRateUnit', value)
@property
def AddressRatePassCriteriaMode(self):
"""Indicates the address rate pass criteria mode.
Returns:
str
"""
return self._get_attribute('addressRatePassCriteriaMode')
@AddressRatePassCriteriaMode.setter
def AddressRatePassCriteriaMode(self, value):
self._set_attribute('addressRatePassCriteriaMode', value)
@property
def AddressRatePassFailValue(self):
"""Indicates the Address Rate value.
Returns:
number
"""
return self._get_attribute('addressRatePassFailValue')
@AddressRatePassFailValue.setter
def AddressRatePassFailValue(self, value):
self._set_attribute('addressRatePassFailValue', value)
@property
def BinaryBackoff(self):
"""The binary search interval through which the next iteration's rate is obtained.
Returns:
number
"""
return self._get_attribute('binaryBackoff')
@BinaryBackoff.setter
def BinaryBackoff(self, value):
self._set_attribute('binaryBackoff', value)
@property
def BinaryLoadUnit(self):
"""Indicates the binary load unit.
Returns:
str(fpsRate)
"""
return self._get_attribute('binaryLoadUnit')
@BinaryLoadUnit.setter
def BinaryLoadUnit(self, value):
self._set_attribute('binaryLoadUnit', value)
@property
def BinaryResolution(self):
"""Indicates the resolution during the binary search.
Returns:
number
"""
return self._get_attribute('binaryResolution')
@BinaryResolution.setter
def BinaryResolution(self, value):
self._set_attribute('binaryResolution', value)
@property
def BinarySearchType(self):
"""Indicates the search type for a Binary search.
Returns:
str(linear)
"""
return self._get_attribute('binarySearchType')
@BinarySearchType.setter
def BinarySearchType(self, value):
self._set_attribute('binarySearchType', value)
@property
def CacheTimeout(self):
"""Indicates cache time out.
Returns:
number
"""
return self._get_attribute('cacheTimeout')
@CacheTimeout.setter
def CacheTimeout(self, value):
self._set_attribute('cacheTimeout', value)
@property
def DelayAfterTransmit(self):
"""A delay that is inserted after transmit is complete, before it continues with the test.
Returns:
number
"""
return self._get_attribute('delayAfterTransmit')
@DelayAfterTransmit.setter
def DelayAfterTransmit(self, value):
self._set_attribute('delayAfterTransmit', value)
@property
def EnableAddressRatePassFail(self):
"""If true, allows Address Rate to be used as a Pass/Fail criteria.
Returns:
bool
"""
return self._get_attribute('enableAddressRatePassFail')
@EnableAddressRatePassFail.setter
def EnableAddressRatePassFail(self, value):
self._set_attribute('enableAddressRatePassFail', value)
@property
def EnableCacheTimeout(self):
"""If true, enables cache time out.
Returns:
bool
"""
return self._get_attribute('enableCacheTimeout')
@EnableCacheTimeout.setter
def EnableCacheTimeout(self, value):
self._set_attribute('enableCacheTimeout', value)
@property
def EnableDaD(self):
"""If true, a Neighbor Solicitation is sent from the interface for Duplicate Address Detection (DAD), to confirm that no other node on the link has the same address.
Returns:
bool
"""
return self._get_attribute('enableDaD')
@EnableDaD.setter
def EnableDaD(self, value):
self._set_attribute('enableDaD', value)
@property
def EnableDropLink(self):
"""If true, allows Route Range to be dropped.
Returns:
bool
"""
return self._get_attribute('enableDropLink')
@EnableDropLink.setter
def EnableDropLink(self, value):
self._set_attribute('enableDropLink', value)
@property
def EnableExtraIterations(self):
"""If true, enables extra iterations. Sets extra iteration offset values.
Returns:
bool
"""
return self._get_attribute('enableExtraIterations')
@EnableExtraIterations.setter
def EnableExtraIterations(self, value):
self._set_attribute('enableExtraIterations', value)
@property
def EnableMinFrameSize(self):
"""If true, allows to set minimum frame size.
Returns:
bool
"""
return self._get_attribute('enableMinFrameSize')
@EnableMinFrameSize.setter
def EnableMinFrameSize(self, value):
self._set_attribute('enableMinFrameSize', value)
@property
def ExtraIterationOffsets(self):
"""Sets extra iteration offset values.
Returns:
str
"""
return self._get_attribute('extraIterationOffsets')
@ExtraIterationOffsets.setter
def ExtraIterationOffsets(self, value):
self._set_attribute('extraIterationOffsets', value)
@property
def FrameSizeMode(self):
"""Indicates the frame size mode.
Returns:
str(fixed)
"""
return self._get_attribute('frameSizeMode')
@FrameSizeMode.setter
def FrameSizeMode(self, value):
self._set_attribute('frameSizeMode', value)
@property
def Framesize(self):
"""The frame size used by the service.
Returns:
str
"""
return self._get_attribute('framesize')
@Framesize.setter
def Framesize(self, value):
self._set_attribute('framesize', value)
@property
def FramesizeFixedValue(self):
"""It signifies the frame size fixed value.
Returns:
number
"""
return self._get_attribute('framesizeFixedValue')
@FramesizeFixedValue.setter
def FramesizeFixedValue(self, value):
self._set_attribute('framesizeFixedValue', value)
@property
def FramesizeList(self):
"""The list of the available frame size.
Returns:
list(str)
"""
return self._get_attribute('framesizeList')
@FramesizeList.setter
def FramesizeList(self, value):
self._set_attribute('framesizeList', value)
@property
def InitialBinaryLoadRate(self):
"""Indicates the initial binary load rate.
Returns:
number
"""
return self._get_attribute('initialBinaryLoadRate')
@InitialBinaryLoadRate.setter
def InitialBinaryLoadRate(self, value):
self._set_attribute('initialBinaryLoadRate', value)
@property
def Layer3AddressCount(self):
"""Indicates the Layer 3 address count.
Returns:
number
"""
return self._get_attribute('layer3AddressCount')
@Layer3AddressCount.setter
def Layer3AddressCount(self, value):
self._set_attribute('layer3AddressCount', value)
@property
def LoadRateList(self):
"""Enter the Load Rate List.
Returns:
str
"""
return self._get_attribute('loadRateList')
@LoadRateList.setter
def LoadRateList(self, value):
self._set_attribute('loadRateList', value)
@property
def LoadType(self):
"""Indicates the load type.
Returns:
str(binary)
"""
return self._get_attribute('loadType')
@LoadType.setter
def LoadType(self, value):
self._set_attribute('loadType', value)
@property
def LoadUnit(self):
"""Indicates the load unit.
Returns:
str(fpsRate)
"""
return self._get_attribute('loadUnit')
@LoadUnit.setter
def LoadUnit(self, value):
self._set_attribute('loadUnit', value)
@property
def MapType(self):
"""Indicates the traffic map type.
Returns:
str
"""
return self._get_attribute('mapType')
@MapType.setter
def MapType(self, value):
self._set_attribute('mapType', value)
@property
def MaxBinaryLoadRate(self):
"""Indicates the maximum binary load rate.
Returns:
number
"""
return self._get_attribute('maxBinaryLoadRate')
@MaxBinaryLoadRate.setter
def MaxBinaryLoadRate(self, value):
self._set_attribute('maxBinaryLoadRate', value)
@property
def MaxOutstandingRequests(self):
"""Indicates maximum outstanding request.
Returns:
number
"""
return self._get_attribute('maxOutstandingRequests')
@MaxOutstandingRequests.setter
def MaxOutstandingRequests(self, value):
self._set_attribute('maxOutstandingRequests', value)
@property
def MinBinaryLoadRate(self):
"""Indicates the minimum binary load rate.
Returns:
number
"""
return self._get_attribute('minBinaryLoadRate')
@MinBinaryLoadRate.setter
def MinBinaryLoadRate(self, value):
self._set_attribute('minBinaryLoadRate', value)
@property
def Numtrials(self):
"""Number of trials that can be run.
Returns:
number
"""
return self._get_attribute('numtrials')
@Numtrials.setter
def Numtrials(self, value):
self._set_attribute('numtrials', value)
@property
def PortDelayEnabled(self):
"""NOT DEFINED
Returns:
bool
"""
return self._get_attribute('portDelayEnabled')
@PortDelayEnabled.setter
def PortDelayEnabled(self, value):
self._set_attribute('portDelayEnabled', value)
@property
def PortDelayUnit(self):
"""Sets the port delay unit in which it will be measured.
Returns:
str(bytes|nanoseconds)
"""
return self._get_attribute('portDelayUnit')
@PortDelayUnit.setter
def PortDelayUnit(self, value):
self._set_attribute('portDelayUnit', value)
@property
def PortDelayValue(self):
"""Sets the port delay value.
Returns:
number
"""
return self._get_attribute('portDelayValue')
@PortDelayValue.setter
def PortDelayValue(self, value):
self._set_attribute('portDelayValue', value)
@property
def PortDownTime(self):
"""During flapping, the amount of time during which the routes in the Route Range are withdrawn/down.
Returns:
number
"""
return self._get_attribute('portDownTime')
@PortDownTime.setter
def PortDownTime(self, value):
self._set_attribute('portDownTime', value)
@property
def ProtocolItem(self):
"""Protocol Items
Returns:
list(str[None|/api/v1/sessions/1/ixnetwork/vport|/api/v1/sessions/1/ixnetwork/vport?deepchild=lan])
"""
return self._get_attribute('protocolItem')
@ProtocolItem.setter
def ProtocolItem(self, value):
self._set_attribute('protocolItem', value)
@property
def StaggeredStart(self):
"""Enables a staggered start to traffic transmit.
Returns:
bool
"""
return self._get_attribute('staggeredStart')
@StaggeredStart.setter
def StaggeredStart(self, value):
self._set_attribute('staggeredStart', value)
@property
def SupportedTrafficTypes(self):
"""The traffic types supported.
Returns:
str
"""
return self._get_attribute('supportedTrafficTypes')
@SupportedTrafficTypes.setter
def SupportedTrafficTypes(self, value):
self._set_attribute('supportedTrafficTypes', value)
@property
def TxDelay(self):
"""Specifies the amount of delay after every transmit.
Returns:
number
"""
return self._get_attribute('txDelay')
@TxDelay.setter
def TxDelay(self, value):
self._set_attribute('txDelay', value)
def update(self, AddrRateNumFrames=None, AddrRateValidationFpsRate=None, AddrRateValidationRate=None, AddrRateValidationRateUnit=None, AddressRatePassCriteriaMode=None, AddressRatePassFailValue=None, BinaryBackoff=None, BinaryLoadUnit=None, BinaryResolution=None, BinarySearchType=None, CacheTimeout=None, DelayAfterTransmit=None, EnableAddressRatePassFail=None, EnableCacheTimeout=None, EnableDaD=None, EnableDropLink=None, EnableExtraIterations=None, EnableMinFrameSize=None, ExtraIterationOffsets=None, FrameSizeMode=None, Framesize=None, FramesizeFixedValue=None, FramesizeList=None, InitialBinaryLoadRate=None, Layer3AddressCount=None, LoadRateList=None, LoadType=None, LoadUnit=None, MapType=None, MaxBinaryLoadRate=None, MaxOutstandingRequests=None, MinBinaryLoadRate=None, Numtrials=None, PortDelayEnabled=None, PortDelayUnit=None, PortDelayValue=None, PortDownTime=None, ProtocolItem=None, StaggeredStart=None, SupportedTrafficTypes=None, TxDelay=None):
"""Updates a child instance of testConfig on the server.
Args:
AddrRateNumFrames (number): Indicates the address rate in number of frames.
AddrRateValidationFpsRate (str): Indicates that the step rate of the load unit is fpsRate.
AddrRateValidationRate (number): Indicates the address rate validation rate.
AddrRateValidationRateUnit (str(fps|percentMaxRate)): Indicates the address rate validation rate unit.
AddressRatePassCriteriaMode (str): Indicates the address rate pass criteria mode.
AddressRatePassFailValue (number): Indicates the Address Rate value.
BinaryBackoff (number): The binary search interval through which the next iteration's rate is obtained.
BinaryLoadUnit (str(fpsRate)): Indicates the binary load unit.
BinaryResolution (number): Indicates the resolution during the binary search.
BinarySearchType (str(linear)): Indicates the search type for a Binary search.
CacheTimeout (number): Indicates cache time out.
DelayAfterTransmit (number): A delay that is inserted after transmit is complete, before it continues with the test.
EnableAddressRatePassFail (bool): If true, allows Address Rate to be used as a Pass/Fail criteria.
EnableCacheTimeout (bool): If true, enables cache time out.
EnableDaD (bool): If true, a Neighbor Solicitation is sent from the interface for Duplicate Address Detection (DAD), to confirm that no other node on the link has the same address.
EnableDropLink (bool): If true, allows Route Range to be dropped.
EnableExtraIterations (bool): If true, enables extra iterations. Sets extra iteration offset values.
EnableMinFrameSize (bool): If true, allows to set minimum frame size.
ExtraIterationOffsets (str): Sets extra iteration offset values.
FrameSizeMode (str(fixed)): Indicates the frame size mode.
Framesize (str): The frame size used by the service.
FramesizeFixedValue (number): It signifies the frame size fixed value.
FramesizeList (list(str)): The list of the available frame size.
InitialBinaryLoadRate (number): Indicates the initial binary load rate.
Layer3AddressCount (number): Indicates the Layer 3 address count.
LoadRateList (str): Enter the Load Rate List.
LoadType (str(binary)): Indicates the load type.
LoadUnit (str(fpsRate)): Indicates the load unit.
MapType (str): Indicates the traffic map type.
MaxBinaryLoadRate (number): Indicates the maximum binary load rate.
MaxOutstandingRequests (number): Indicates maximum outstanding request.
MinBinaryLoadRate (number): Indicates the minimum binary load rate.
Numtrials (number): Number of trials that can be run.
PortDelayEnabled (bool): NOT DEFINED
PortDelayUnit (str(bytes|nanoseconds)): Sets the port delay unit in which it will be measured.
PortDelayValue (number): Sets the port delay value.
PortDownTime (number): During flapping, the amount of time during which the routes in the Route Range are withdrawn/down.
ProtocolItem (list(str[None|/api/v1/sessions/1/ixnetwork/vport|/api/v1/sessions/1/ixnetwork/vport?deepchild=lan])): Protocol Items
StaggeredStart (bool): Enables a staggered start to traffic transmit.
SupportedTrafficTypes (str): The traffic types supported.
TxDelay (number): Specifies the amount of delay after every transmit.
Raises:
ServerError: The server has encountered an uncategorized error condition
"""
self._update(locals())
def Apply(self):
"""Executes the apply operation on the server.
Applies the specified Quick Test.
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
return self._execute('apply', payload=payload, response_object=None)
def ApplyAsync(self):
"""Executes the applyAsync operation on the server.
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
return self._execute('applyAsync', payload=payload, response_object=None)
def ApplyAsyncResult(self):
"""Executes the applyAsyncResult operation on the server.
Returns:
bool:
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
return self._execute('applyAsyncResult', payload=payload, response_object=None)
def ApplyITWizardConfiguration(self):
"""Executes the applyITWizardConfiguration operation on the server.
Applies the specified Quick Test.
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
return self._execute('applyITWizardConfiguration', payload=payload, response_object=None)
def GenerateReport(self):
"""Executes the generateReport operation on the server.
Generate a PDF report for the last succesfull test run.
Returns:
str: This method is asynchronous and has no return value.
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
return self._execute('generateReport', payload=payload, response_object=None)
def Run(self, *args, **kwargs):
"""Executes the run operation on the server.
Starts the specified Quick Test and waits for its execution to finish.
The IxNetwork modeling infrastructure allows for multiple method Signatures with the same name while python does not.
The following correlates the modeling Signatures to the python *args variable length list:
run()list
Returns:
list(str): This method is synchronous and returns the result of the test.
run(InputParameters:string)list
Args:
args[0] is InputParameters (str): The input arguments of the test.
Returns:
list(str): This method is synchronous and returns the result of the test.
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('run', payload=payload, response_object=None)
def Start(self, *args, **kwargs):
"""Executes the start operation on the server.
Starts the specified Quick Test.
The IxNetwork modeling infrastructure allows for multiple method Signatures with the same name while python does not.
The following correlates the modeling Signatures to the python *args variable length list:
start()
start(InputParameters:string)
Args:
args[0] is InputParameters (str): The input arguments of the test.
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('start', payload=payload, response_object=None)
def Stop(self):
"""Executes the stop operation on the server.
Stops the currently running Quick Test.
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
return self._execute('stop', payload=payload, response_object=None)
def WaitForTest(self):
"""Executes the waitForTest operation on the server.
Waits for the execution of the specified Quick Test to be completed.
Returns:
list(str): This method is synchronous and returns the result of the test.
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
return self._execute('waitForTest', payload=payload, response_object=None)
| [
"[email protected]"
] | |
f03ff950e29f53407416b268cd4acfd7d155443a | bbf7787d94e97d4e0c9bceb46203c08939e6e67d | /django-python/static-folder/login/views.py | 0e51c75484eddd9ea3a346b0875ef9d67ba47164 | [] | no_license | llanoxdewa/python | 076e6fa3ed2128c21cdd26c1be6bc82ee6917f9c | 6586170c5f48827a5e1bcb35656870b5e4eed732 | refs/heads/main | 2023-06-16T05:31:52.494796 | 2021-07-09T09:04:30 | 2021-07-09T09:04:30 | 362,782,196 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 295 | py | from django.shortcuts import render
def loginPage(req):
data = {
'title':'login-page',
'header':'selamat datang di login page',
'fileG':"login/img/pemandangan-laut-es.png",
'fileCss':"login/css/style.css"
}
return render(req,'login/index.html',data)
| [
"[email protected]"
] | |
9c7f7b6ddabf4942b20c4d5a3a928eb8dcdb991a | 1424812c4f211d3d5e356e8b3889a689162062f3 | /arcade/python/07_simple_sort.py | 0effd8a406a26d4473e379c6084693df95b8d82e | [] | no_license | nazomeku/codefights | cb7d3c40be0809695ec524a87c88dbebcf5b47bc | b23f6816f9b5b0720feac1c49c31163923e0a554 | refs/heads/master | 2021-01-22T12:49:35.905165 | 2017-11-21T19:03:37 | 2017-11-21T19:03:37 | 102,357,617 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 396 | py | """Write a function that, given an array ofintegers arr, sorts its elements
in ascending order."""
def simple_sort(arr):
n = len(arr)
for i in range(n):
j = 0
stop = n - i
while j < stop - 1:
if arr[j] > arr[j + 1]:
temp = arr[j]
arr[j] = arr[j + 1]
arr[j + 1] = temp
j += 1
return arr
| [
"[email protected]"
] | |
9861697b7c1a4508bd7837414f8d091fc945c6be | 57b4ee27801c23cdd6a6d974dbc278f49740f770 | /re100-l.py | 55bc9b3a71d50c10ac3093535afa7ffe0b7a4c4b | [] | no_license | zwhubuntu/CTF-chal-code | 4de9fc0fe9ee85eab3906b36b8798ec959db628c | 8c912e165f9cc294b3b85fab3d776cd63acc203e | refs/heads/master | 2021-01-20T18:39:26.961563 | 2017-09-25T14:07:56 | 2017-09-25T14:07:56 | 62,563,092 | 7 | 2 | null | null | null | null | UTF-8 | Python | false | false | 454 | py | '''
@author: wenhuizone
'''
a=[23,5,1,2,4,6,734,3,12,98]
tmp=''
output=''
'''
for ( i = 1; i != 10; ++i )
{
v5 = *(&v6 + i);
for ( j = i - 1; j >= 0 && *(&v6 + j) < v5; --j )
*(&v6 + j + 1) = *(&v6 + j);
*(&v6 + j + 1) = v5;
}
'''
for i in range(1,len(a)):
tmp=a[i]
for j in range(0,i-1)[::-1]:
if a[j]<tmp:
a[j+1]=a[j]
a[j+1]=tmp
for i in range(0,len(a)):
output+=chr(a[i])
print output | [
"[email protected]"
] | |
4ead3a4d6b2dd62eb5e2c44cd2fdb8c23e30b661 | b424c3262c9eacf8dd4230019eba7e05a9b95461 | /.history/ndn_server_20200530135406.py | 840339130aa5f97be97960d50d1841aebddd005c | [] | no_license | leonerii/aer_tp | 30e47f29bcda69512718a6279a7cad32e9a01b14 | d8f46b188b5be9f315dd155ed147880ce7dce169 | refs/heads/master | 2022-09-30T03:27:24.375971 | 2020-06-04T14:23:16 | 2020-06-04T14:23:16 | 245,219,806 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,849 | py | from threading import Thread
from ndn_receive_handler import Receive_Handler
from ndn_hello_sender import NDN_HelloSender
import socket
class NDN_Server(Thread):
def __init__(self, localhost, port=9999, data_ids={}):
Thread.__init__(self)
self.localhost = localhost
self.port = port
self.data_ids = data_ids
def run(self):
#inicializar pit, fib e cs
self.data_ids = {
'104.continente': '',
'101.A3' : ''
}
fib = {
'104.continente': self.msg['source'],
'101.A3' : self.msg[]
}
cs = {
}
pit = {
}
if self.data_ids:
for key,value in data_ids.items():
self.fib[key] = value
self.cs[key]
#criar socket server tcp
tcp_socket = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
tcp_socket.bind((self.localhost, self.port))
# Receiving NDN Messages
while True:
rcv_msg = self.sock.recvfrom(10240)
ndn_handler = Receive_Handler(
self.lock, self.pit, self.fib,
self.cs, self.conn, self.queue, self.localhost,
self.udp_port
)
ndn_handler.start()
# Send NDN HELLO messages
ndn_hello_sender = NDN_HelloSender(
self.fib, self.lock,self.localhost,
self.hello_interval, self.cs,
self.mcast_group, self.mcast_port
)
ndn_hello_sender.start()
else:
print('data_ids is empty')
| [
"[email protected]"
] | |
a048bde9c8c91cb49b73978b44bfbf744c108af1 | a2af438d5180922fb55b0805f9702d4b93103202 | /setup.py | 0001d547f35d284ce13cca0cfcc1883359573c59 | [
"WTFPL"
] | permissive | JJediny/django-leaflet-storage | cff60100a8d721d202bb913051dc2b1abd89a53c | eb4dd4632f09241255bc13e30970ec55fafed816 | refs/heads/master | 2021-01-18T10:57:16.198138 | 2015-05-08T04:22:06 | 2015-05-08T04:22:06 | 32,661,595 | 0 | 2 | null | 2015-03-22T04:21:35 | 2015-03-22T04:21:35 | null | UTF-8 | Python | false | false | 1,342 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import codecs
from setuptools import setup, find_packages
import leaflet_storage
long_description = codecs.open('README.rst', "r", "utf-8").read()
with open('requirements.pip') as reqs:
install_requires = [
line for line in reqs.read().split('\n') if (line and not
line.startswith(('--', 'git')))
]
setup(
name="django-leaflet-storage",
version=leaflet_storage.__version__,
author=leaflet_storage.__author__,
author_email=leaflet_storage.__contact__,
description=leaflet_storage.__doc__,
keywords="django leaflet geodjango",
url=leaflet_storage.__homepage__,
download_url="https://github.com/yohanboniface/django-leaflet-storage/downloads",
packages=find_packages(),
include_package_data=True,
platforms=["any"],
zip_safe=True,
install_requires=install_requires,
long_description=long_description,
classifiers=[
"Development Status :: 3 - Alpha",
"Environment :: Web Environment",
"Intended Audience :: Developers",
#"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Topic :: Software Development :: Libraries :: Python Modules",
"Programming Language :: Python",
],
)
| [
"[email protected]"
] | |
14bdf727a834b4e51ab11f617e8bd79033ee437b | 8e24e8bba2dd476f9fe612226d24891ef81429b7 | /geeksforgeeks/python/medium/6_3.py | c29fdaedebb04ad1825458082585935d28145298 | [] | no_license | qmnguyenw/python_py4e | fb56c6dc91c49149031a11ca52c9037dc80d5dcf | 84f37412bd43a3b357a17df9ff8811eba16bba6e | refs/heads/master | 2023-06-01T07:58:13.996965 | 2021-06-15T08:39:26 | 2021-06-15T08:39:26 | 349,059,725 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,845 | py | Integrating TinyMCE with Django
TinyMCE is a online rich text editor which is fully flexible and provides
customisation. mostly used to get dynamic data such as articles in GFG and
much more, their is no static database for posts
**Installation –**
To integrate it with Django web app or website you need to first install its
pip library
pip install django-tinymce
**Integrate with Django Project –**
add tinyMCE as individual app in setting.py
INSTALLED_APPS = [
...
'tinymce',
...
]
Also add default configuration for tinyMCE editor in settings.py
TINYMCE_DEFAULT_CONFIG = {
'cleanup_on_startup': True,
'custom_undo_redo_levels': 20,
'selector': 'textarea',
'theme': 'silver',
'plugins': '''
textcolor save link image media preview codesample contextmenu
table code lists fullscreen insertdatetime nonbreaking
contextmenu directionality searchreplace wordcount visualblocks
visualchars code fullscreen autolink lists charmap print hr
anchor pagebreak
''',
'toolbar1': '''
fullscreen preview bold italic underline | fontselect,
fontsizeselect | forecolor backcolor | alignleft alignright |
aligncenter alignjustify | indent outdent | bullist numlist table |
| link image media | codesample |
''',
'toolbar2': '''
visualblocks visualchars |
charmap hr pagebreak nonbreaking anchor | code |
''',
'contextmenu': 'formats | link image',
'menubar': True,
'statusbar': True,
}
here in configuration dictionary you can customise editor by changing values
like theme and many more.
setting TinyMCE is done now to bring it into actions we need forms.py file
with some required values like needed size of input field it is used by
displaying content on html page
__
__
__
__
__
__
__
from django import forms
from tinymce import TinyMCE
from .models import _your_model_
class TinyMCEWidget(TinyMCE):
def use_required_attribute(self, *args):
return False
class PostForm(forms.ModelForm):
content = forms.CharField(
widget=TinyMCEWidget(
attrs={'required': False, 'cols': 30, 'rows':
10}
)
)
class Meta:
model = _your_model_
fields = '__all__'
---
__
__
Last step is to add htmlfield to your model you can also use different field
check out them on their official website
__
__
__
__
__
__
__
...
from tinymce.models import HTMLField
class article(models.Model):
...
content = HTMLField()
---
__
__
And its all set just make migrations for see changes in admin page by running
following commands
python manage.py makemigrations
python manage.py migrate
Now check it in admin area by running server
python manage.py runserver
**Output –**
here how it will look like it may have different appearance

Editor in admin area
Attention geek! Strengthen your foundations with the **Python Programming
Foundation** Course and learn the basics.
To begin with, your interview preparations Enhance your Data Structures
concepts with the **Python DS** Course.
My Personal Notes _arrow_drop_up_
Save
| [
"[email protected]"
] | |
c98d726a4abb22a8daeee2ba7c22d6dde58d525e | 7858da232b9dbfb9c32d6900de51e14e5d48e241 | /lesson_7_3_2.py | 1c43af55a0b611e9985a9c1383853dc4ac62717a | [] | no_license | Mameluke8888/QA_Automation_Lesson_7_3 | 4069e202ca3f5a0de1f1a0734654f7fd19e12ed5 | 9d2b5735da2fe4850c15236e675cc48b24d16a1d | refs/heads/main | 2023-04-27T19:32:48.868635 | 2021-05-06T09:06:10 | 2021-05-06T09:06:10 | 364,848,904 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 714 | py | # Exercise #2
# Find the mistake in the following code snippet and correct it:
# corrected snippet
def compute_patterns(inputs=None, pattern="new pattern"):
if inputs is None:
inputs = []
inputs.append(pattern)
patterns = ["a list based on "] + inputs
return patterns
# just some tests - you can remove them if you want
print("".join(compute_patterns()))
print("".join(compute_patterns()))
print("".join(compute_patterns()))
test_inputs = []
print(" ".join(compute_patterns(test_inputs, "very new pattern")))
print(" ".join(compute_patterns(test_inputs, "super new pattern")))
print(" ".join(compute_patterns(test_inputs, "super duper new pattern")))
print("".join(compute_patterns()))
| [
"[email protected]"
] | |
f4a3176508cead2efd0dac5166b08d926c5466c8 | 3f7d5999bb7e5a75454c8df2c5a8adcd1a8341ff | /tests/unit/modules/network/fortios/test_fortios_system_tos_based_priority.py | 830074ea6df0b233f478eb88f93417570258ce60 | [] | no_license | ansible-collection-migration/ansible.fortios | f7b1a7a0d4b69c832403bee9eb00d99f3be65e74 | edad6448f7ff4da05a6c856b0e7e3becd0460f31 | refs/heads/master | 2020-12-18T13:08:46.739473 | 2020-02-03T22:10:49 | 2020-02-03T22:10:49 | 235,393,556 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,345 | py | # Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <https://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
from mock import ANY
from ansible_collections.ansible.fortios.plugins.module_utils.network.fortios.fortios import FortiOSHandler
try:
from ansible_collections.ansible.fortios.plugins.modules import fortios_system_tos_based_priority
except ImportError:
pytest.skip("Could not load required modules for testing", allow_module_level=True)
@pytest.fixture(autouse=True)
def connection_mock(mocker):
connection_class_mock = mocker.patch('ansible_collections.ansible.fortios.plugins.modules.fortios_system_tos_based_priority.Connection')
return connection_class_mock
fos_instance = FortiOSHandler(connection_mock)
def test_system_tos_based_priority_creation(mocker):
schema_method_mock = mocker.patch('ansible_collections.ansible.fortios.plugins.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
set_method_mock = mocker.patch('ansible_collections.ansible.fortios.plugins.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'system_tos_based_priority': {
'id': '3',
'priority': 'low',
'tos': '5'
},
'vdom': 'root'}
is_error, changed, response = fortios_system_tos_based_priority.fortios_system(input_data, fos_instance)
expected_data = {
'id': '3',
'priority': 'low',
'tos': '5'
}
set_method_mock.assert_called_with('system', 'tos-based-priority', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
def test_system_tos_based_priority_creation_fails(mocker):
schema_method_mock = mocker.patch('ansible_collections.ansible.fortios.plugins.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'error', 'http_method': 'POST', 'http_status': 500}
set_method_mock = mocker.patch('ansible_collections.ansible.fortios.plugins.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'system_tos_based_priority': {
'id': '3',
'priority': 'low',
'tos': '5'
},
'vdom': 'root'}
is_error, changed, response = fortios_system_tos_based_priority.fortios_system(input_data, fos_instance)
expected_data = {
'id': '3',
'priority': 'low',
'tos': '5'
}
set_method_mock.assert_called_with('system', 'tos-based-priority', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 500
def test_system_tos_based_priority_removal(mocker):
schema_method_mock = mocker.patch('ansible_collections.ansible.fortios.plugins.module_utils.network.fortios.fortios.FortiOSHandler.schema')
delete_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
delete_method_mock = mocker.patch('ansible_collections.ansible.fortios.plugins.module_utils.network.fortios.fortios.FortiOSHandler.delete', return_value=delete_method_result)
input_data = {
'username': 'admin',
'state': 'absent',
'system_tos_based_priority': {
'id': '3',
'priority': 'low',
'tos': '5'
},
'vdom': 'root'}
is_error, changed, response = fortios_system_tos_based_priority.fortios_system(input_data, fos_instance)
delete_method_mock.assert_called_with('system', 'tos-based-priority', mkey=ANY, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
def test_system_tos_based_priority_deletion_fails(mocker):
schema_method_mock = mocker.patch('ansible_collections.ansible.fortios.plugins.module_utils.network.fortios.fortios.FortiOSHandler.schema')
delete_method_result = {'status': 'error', 'http_method': 'POST', 'http_status': 500}
delete_method_mock = mocker.patch('ansible_collections.ansible.fortios.plugins.module_utils.network.fortios.fortios.FortiOSHandler.delete', return_value=delete_method_result)
input_data = {
'username': 'admin',
'state': 'absent',
'system_tos_based_priority': {
'id': '3',
'priority': 'low',
'tos': '5'
},
'vdom': 'root'}
is_error, changed, response = fortios_system_tos_based_priority.fortios_system(input_data, fos_instance)
delete_method_mock.assert_called_with('system', 'tos-based-priority', mkey=ANY, vdom='root')
schema_method_mock.assert_not_called()
assert is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 500
def test_system_tos_based_priority_idempotent(mocker):
schema_method_mock = mocker.patch('ansible_collections.ansible.fortios.plugins.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'error', 'http_method': 'DELETE', 'http_status': 404}
set_method_mock = mocker.patch('ansible_collections.ansible.fortios.plugins.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'system_tos_based_priority': {
'id': '3',
'priority': 'low',
'tos': '5'
},
'vdom': 'root'}
is_error, changed, response = fortios_system_tos_based_priority.fortios_system(input_data, fos_instance)
expected_data = {
'id': '3',
'priority': 'low',
'tos': '5'
}
set_method_mock.assert_called_with('system', 'tos-based-priority', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 404
def test_system_tos_based_priority_filter_foreign_attributes(mocker):
schema_method_mock = mocker.patch('ansible_collections.ansible.fortios.plugins.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
set_method_mock = mocker.patch('ansible_collections.ansible.fortios.plugins.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'system_tos_based_priority': {
'random_attribute_not_valid': 'tag',
'id': '3',
'priority': 'low',
'tos': '5'
},
'vdom': 'root'}
is_error, changed, response = fortios_system_tos_based_priority.fortios_system(input_data, fos_instance)
expected_data = {
'id': '3',
'priority': 'low',
'tos': '5'
}
set_method_mock.assert_called_with('system', 'tos-based-priority', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
| [
"[email protected]"
] | |
6bf7f969c43b526df2273d8c69f24b6846b19657 | 96b6f183cda10aac03f9fb4ffa11cba6445c35aa | /algoriz/settings.py | 57f2ac9bf3f2798173af3decde8ebb0931baa17b | [] | no_license | akshar-raaj/algoriz | 3a6ea60b15dc35e848a5534cdd7f6b047fd9a7d3 | 21e0797a7d19248043c5810fed89ba7c50e551f9 | refs/heads/master | 2020-04-03T03:38:53.705683 | 2018-10-27T19:51:31 | 2018-10-27T19:51:31 | 154,991,553 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,429 | py | """
Django settings for algoriz project.
Generated by 'django-admin startproject' using Django 1.11.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'a#l@_bofm=q)&$i=t#u1$1x*sqa$nx6ms260p7d793+bz861vh'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'trades',
'graphos',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'algoriz.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(BASE_DIR, 'templates')
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'algoriz.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
# DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.sqlite3',
# 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
# }
# }
DATABASES = {
'default': {
'ENGINE':'django.db.backends.postgresql_psycopg2',
'NAME': 'algoriz',
'USER': 'akshar',
'PASSWORD': '',
'HOST': 'localhost',
'PORT': '5432',
},
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
| [
"[email protected]"
] | |
6188b38f2b472324d725074b31351f8de45d833f | 94b4177306b898b86601cae5ff1e580eb95e502f | /mysite/settings.py | 54d6592cf1cb0a445fbc3a37c96516f7e8d54fef | [] | no_license | inho2736/my-first-blog | c797b0d4b613fa17ac3b23962d39835df514926b | 591750ee222425fc96910040b6b84f4bc4236a7e | refs/heads/master | 2020-03-22T07:39:31.308553 | 2018-07-04T11:53:32 | 2018-07-04T11:53:32 | 139,715,658 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,194 | py | """
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 1.11.14.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'ha9kod=amx36+&xxrcbg!bk69vzzq)j=xsl=cb+k(u$b-g#)l3'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['127.0.0.1', '.pythonanywhere.com']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'blog',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'ko'
TIME_ZONE = 'Asia/Seoul'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
| [
"[email protected]"
] | |
cce11633fa807ab1e0c49731aec532e272ae6b07 | 00e16b7e658d3e173dfd626de2463defb9957684 | /goodbye.py | 3fd73bb00434824ea6cff73115fd84e642204a77 | [] | no_license | mohira-books/python-cli-introduction | 96cd05bc849dfbdc6d7c01425e8edf2aea316454 | 2fa9501e43c4a301657d3a4ddd1aed11cdfeb8d8 | refs/heads/main | 2023-01-31T03:00:32.263834 | 2020-12-07T08:28:22 | 2020-12-14T09:46:06 | 319,252,342 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 75 | py | def main():
print('Good Bye!')
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
2d89ffe2040450be51655549fc2da71608ecba62 | b6c931fea41658914844ceae8906a2cb03294614 | /math/0x06-multivariate_prob/0-mean_cov.py | 8ae4bbede7e0867b9a69d2b891f075cfb6ace21b | [] | no_license | sidneyriffic/holbertonschool-machine_learning | 05ccbe13e1b4b9cb773e0c531a1981a7970daa1b | 56356c56297d8391bad8a1607eb226489766bc63 | refs/heads/master | 2021-07-04T07:45:22.919076 | 2020-12-19T01:09:01 | 2020-12-19T01:09:01 | 207,622,396 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 770 | py | #!/usr/bin/env python3
"""Return means and covariance matrix of a multivariate data set"""
import numpy as np
def mean_cov(X):
"""Return means and covariance matrix of a multivariate data set"""
if type(X) != np.ndarray or len(X.shape) != 2:
raise TypeError("X must be a 2D numpy.ndarray")
if X.shape[0] < 2:
raise ValueError("X must contain multiple data points")
means = np.mean(X, axis=0, keepdims=True)
covmat = np.ndarray((X.shape[1], X.shape[1]))
for x in range(X.shape[1]):
for y in range(X.shape[1]):
covmat[x][y] = (((X[:, x] - means[:, x]) *
(X[:, y] - means[:, y])).sum() /
(X.shape[0] - 1))
return means, covmat
| [
"[email protected]"
] | |
c0db67f8c6058f56c14538cc13211ec25f597b7b | ca8d183f5d6f1f260483a3555efd05870fe1d891 | /com_blacktensor/cop/fin/model/finance_dto.py | 1bcd1fd81dbed5d0d30270dcf7bec28453dc22d2 | [
"MIT"
] | permissive | Jelly6489/Stock-Proj | b559304f10614122ddaa00e39c821a65faa9f91d | 3e7b1ad5cddc5b142f0069e024199fe969c7c7e8 | refs/heads/main | 2023-01-13T17:18:33.729747 | 2020-11-13T08:19:33 | 2020-11-13T08:19:33 | 312,512,688 | 0 | 0 | MIT | 2020-11-13T08:11:04 | 2020-11-13T08:11:04 | null | UTF-8 | Python | false | false | 2,764 | py | import csv
import json
import pandas as pd
from com_blacktensor.ext.db import db, openSession, engine
# from com_blacktensor.ext.routes import Resource
class FinanceDto(db.Model):
__tablename__ = 'finance'
__table_args__={'mysql_collate' : 'utf8_general_ci'}
no : int = db.Column(db.Integer, primary_key = True, index = True)
name : str = db.Column(db.String(10))
f_2015_12 : float = db.Column(db.Float)
f_2016_12 : float = db.Column(db.Float)
f_2017_12 : float = db.Column(db.Float)
f_2018_12 : float = db.Column(db.Float)
f_2019_12 : float = db.Column(db.Float)
f_2020_12 : float = db.Column(db.Float)
f_2021_12 : float = db.Column(db.Float)
f_2022_12 : float = db.Column(db.Float)
keyword : str = db.Column(db.String(10))
# def __init__(self, no, name, f_2015_12, f_2016_12, f_2017_12, f_2018_12, f_2019_12, f_2020_12, f_2021_12, f_2022_12, keyword):
# self.no = no
# self.name = name
# self.f_2015_12 = f_2015_12
# self.f_2016_12 = f_2016_12
# self.f_2017_12 = f_2017_12
# self.f_2018_12 = f_2018_12
# self.f_2019_12 = f_2019_12
# self.f_2020_12 = f_2020_12
# self.f_2021_12 = f_2021_12
# self.f_2022_12 = f_2022_12
# self.keyword = keyword
def __repr__(self):
return f'Finance(no={self.no}, name={self.name}, f_2015_12={self.f_2015_12}, \
f_2016_12={self.f_2016_12}, f_2017_12={self.f_2017_12}, f_2018_12={self.f_2018_12}, \
f_2019_12={self.f_2019_12}, f_2020_12={self.f_2020_12}, f_2021_12={self.f_2021_12}, \
f_2022_12={self.f_2022_12}, keyword={self.keyword})'
def __str__(self):
return f'Finance(no={self.no}, name={self.name}, f_2015_12={self.f_2015_12}, \
f_2016_12={self.f_2016_12}, f_2017_12={self.f_2017_12}, f_2018_12={self.f_2018_12}, \
f_2019_12={self.f_2019_12}, f_2020_12={self.f_2020_12}, f_2021_12={self.f_2021_12}, \
f_2022_12={self.f_2022_12}, keyword={self.keyword})'
@property
def json(self):
return {
'no' : self.no,
'name' : self.name,
'f_2015_12' : self.f_2015_12,
'f_2016_12' : self.f_2016_12,
'f_2017_12' : self.f_2017_12,
'f_2018_12' : self.f_2018_12,
'f_2019_12' : self.f_2019_12,
'f_2020_12' : self.f_2020_12,
'f_2021_12' : self.f_2021_12,
'f_2022_12' : self.f_2022_12,
'keyword' : self.keyword
}
class FinanceVo:
no : int = 0
name : str = ''
f_2015_12 : float = 0.0
f_2016_12 : float = 0.0
f_2017_12 : float = 0.0
f_2018_12 : float = 0.0
f_2019_12 : float = 0.0
f_2020_12 : float = 0.0
f_2021_12 : float = 0.0
f_2022_12 : float = 0.0
keyword : str = '' | [
"[email protected]"
] | |
432a27bf6bb59950798f0e4f47ac1df8b7450b5c | e32a75c44ef9c964bc5f97712c8e0e845ee3f6ca | /models_vqa/question_prior_net.py | be677105c8efe0cf16b6a818c5e33d76fc1e7e38 | [] | no_license | ankita-kalra/ivqa_belief_set | 29c40ec4076433ac412728aea603e4e69ce530eb | 6ebba50ff001e1af6695bb3f4d2643e7072ee153 | refs/heads/master | 2020-04-05T17:17:00.834303 | 2018-08-27T09:59:16 | 2018-08-27T09:59:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,121 | py | from __future__ import absolute_import, division, print_function
import tensorflow as tf
from tensorflow import convert_to_tensor as to_T
from tensorflow.python.ops.nn import dropout as drop
from n2mn_util.cnn import fc_layer as fc, fc_relu_layer as fc_relu
# The network that takes in the hidden state of the
def question_prior_net(encoder_states, num_choices, qpn_dropout, hidden_dim=500,
scope='question_prior_net', reuse=None):
with tf.variable_scope(scope, reuse=reuse):
# concate the LSTM states from all layers
assert (isinstance(encoder_states, tuple))
h_list = []
for s in encoder_states:
assert (isinstance(s, tf.contrib.rnn.LSTMStateTuple))
h_list.append(s.h)
# h_concat has shape [N, D_lstm1 + ... + D_lstm_n]
h_concat = tf.concat(h_list, axis=1)
if qpn_dropout:
h_concat = drop(h_concat, 0.5)
fc1 = fc_relu('fc1', h_concat, output_dim=hidden_dim)
if qpn_dropout:
fc1 = drop(fc1, 0.5)
fc2 = fc('fc2', fc1, output_dim=num_choices)
return fc2
| [
"[email protected]"
] | |
8b0efbb4b751dd8f8ecb1415f39e7f826639b65b | 7060196e3773efd535813c9adb0ea8eca9d46b6c | /stripe/api_resources/radar/value_list_item.py | be28d2723dbe75b3c3803bf54a5689df657277d2 | [
"MIT"
] | permissive | henry232323/stripe-python | 7fc7440a8e8e0a57a26df577d517d9ba36ca00d0 | 953faf3612522f4294393d341138800691f406e0 | refs/heads/master | 2020-05-01T06:27:05.154381 | 2019-03-23T19:21:20 | 2019-03-23T19:21:20 | 177,330,547 | 0 | 0 | MIT | 2019-03-23T19:17:54 | 2019-03-23T19:17:54 | null | UTF-8 | Python | false | false | 326 | py | from stripe.api_resources.abstract import CreateableAPIResource
from stripe.api_resources.abstract import DeletableAPIResource
from stripe.api_resources.abstract import ListableAPIResource
class ValueListItem(
CreateableAPIResource, DeletableAPIResource, ListableAPIResource
):
OBJECT_NAME = "radar.value_list_item"
| [
"[email protected]"
] | |
d1d63f4db31cb55a4752c2393b7ca13a60c16749 | 814fd0bea5bc063a4e34ebdd0a5597c9ff67532b | /mojo/mojo_edk_tests.gyp | f3809b4e9cbd7778f1890afa25d022e375b653cd | [
"BSD-3-Clause"
] | permissive | rzr/chromium-crosswalk | 1b22208ff556d69c009ad292bc17dca3fe15c493 | d391344809adf7b4f39764ac0e15c378169b805f | refs/heads/master | 2021-01-21T09:11:07.316526 | 2015-02-16T11:52:21 | 2015-02-16T11:52:21 | 38,887,985 | 0 | 0 | NOASSERTION | 2019-08-07T21:59:20 | 2015-07-10T15:35:50 | C++ | UTF-8 | Python | false | false | 10,276 | gyp | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'includes': [
'mojo_variables.gypi',
],
'targets': [
{
'target_name': 'mojo_edk_tests',
'type': 'none',
'dependencies': [
# NOTE: If adding a new dependency here, please consider whether it
# should also be added to the list of Mojo-related dependencies of
# build/all.gyp:All on iOS, as All cannot depend on the mojo_base
# target on iOS due to the presence of the js targets, which cause v8
# to be built.
'mojo_message_pipe_perftests',
'mojo_public_application_unittests',
'mojo_public_bindings_unittests',
'mojo_public_environment_unittests',
'mojo_public_system_perftests',
'mojo_public_system_unittests',
'mojo_public_utility_unittests',
'mojo_system_unittests',
'mojo_js_unittests',
'mojo_js_integration_tests',
],
},
# TODO(vtl): Reorganize the mojo_public_*_unittests.
{
# GN version: //mojo/edk/test:mojo_public_bindings_unittests
'target_name': 'mojo_public_bindings_unittests',
'type': 'executable',
'dependencies': [
'../testing/gtest.gyp:gtest',
'mojo_edk.gyp:mojo_run_all_unittests',
'mojo_public.gyp:mojo_cpp_bindings',
'mojo_public.gyp:mojo_environment_standalone',
'mojo_public.gyp:mojo_public_bindings_test_utils',
'mojo_public.gyp:mojo_public_test_interfaces',
'mojo_public.gyp:mojo_public_test_utils',
'mojo_public.gyp:mojo_utility',
],
'sources': [
'public/cpp/bindings/tests/array_unittest.cc',
'public/cpp/bindings/tests/bounds_checker_unittest.cc',
'public/cpp/bindings/tests/buffer_unittest.cc',
'public/cpp/bindings/tests/connector_unittest.cc',
'public/cpp/bindings/tests/container_test_util.cc',
'public/cpp/bindings/tests/equals_unittest.cc',
'public/cpp/bindings/tests/handle_passing_unittest.cc',
'public/cpp/bindings/tests/interface_ptr_unittest.cc',
'public/cpp/bindings/tests/map_unittest.cc',
'public/cpp/bindings/tests/request_response_unittest.cc',
'public/cpp/bindings/tests/router_unittest.cc',
'public/cpp/bindings/tests/sample_service_unittest.cc',
'public/cpp/bindings/tests/serialization_warning_unittest.cc',
'public/cpp/bindings/tests/string_unittest.cc',
'public/cpp/bindings/tests/struct_unittest.cc',
'public/cpp/bindings/tests/type_conversion_unittest.cc',
'public/cpp/bindings/tests/validation_unittest.cc',
],
},
{
# GN version: //mojo/edk/test:mojo_public_environment_unittests
'target_name': 'mojo_public_environment_unittests',
'type': 'executable',
'dependencies': [
'../testing/gtest.gyp:gtest',
'mojo_edk.gyp:mojo_run_all_unittests',
'mojo_public.gyp:mojo_environment_standalone',
'mojo_public.gyp:mojo_public_test_utils',
'mojo_public.gyp:mojo_utility',
],
'include_dirs': [ '..' ],
'sources': [
'public/cpp/environment/tests/async_wait_unittest.cc',
'public/cpp/environment/tests/async_waiter_unittest.cc',
'public/cpp/environment/tests/logger_unittest.cc',
'public/cpp/environment/tests/logging_unittest.cc',
],
},
{
# GN version: //mojo/edk/test:mojo_public_application_unittests
'target_name': 'mojo_public_application_unittests',
'type': 'executable',
'dependencies': [
'../base/base.gyp:base',
'../testing/gtest.gyp:gtest',
'mojo_edk.gyp:mojo_run_all_unittests',
'mojo_public.gyp:mojo_application_standalone',
'mojo_public.gyp:mojo_utility',
'mojo_public.gyp:mojo_environment_standalone',
],
'sources': [
'public/cpp/application/tests/service_registry_unittest.cc',
],
},
{
# GN version: //mojo/public/cpp/system/tests:mojo_public_system_unittests
# and //mojo/public/c/system/tests
'target_name': 'mojo_public_system_unittests',
'type': 'executable',
'dependencies': [
'../testing/gtest.gyp:gtest',
'mojo_edk.gyp:mojo_run_all_unittests',
'mojo_public.gyp:mojo_public_test_utils',
],
'include_dirs': [ '..' ],
'sources': [
'<@(mojo_public_system_unittest_sources)',
],
},
{
# GN version: //mojo/public/cpp/application/tests:mojo_public_utility_unittests
'target_name': 'mojo_public_utility_unittests',
'type': 'executable',
'dependencies': [
'../testing/gtest.gyp:gtest',
'mojo_edk.gyp:mojo_run_all_unittests',
'mojo_public.gyp:mojo_public_test_utils',
'mojo_public.gyp:mojo_utility',
],
'include_dirs': [ '..' ],
'sources': [
'public/cpp/utility/tests/mutex_unittest.cc',
'public/cpp/utility/tests/run_loop_unittest.cc',
'public/cpp/utility/tests/thread_unittest.cc',
],
'conditions': [
# See crbug.com/342893:
['OS=="win"', {
'sources!': [
'public/cpp/utility/tests/mutex_unittest.cc',
'public/cpp/utility/tests/thread_unittest.cc',
],
}],
],
},
{
# GN version: //mojo/edk/test:mojo_public_system_perftests
'target_name': 'mojo_public_system_perftests',
'type': 'executable',
'dependencies': [
'../base/base.gyp:base',
'../testing/gtest.gyp:gtest',
'mojo_edk.gyp:mojo_run_all_perftests',
'mojo_public.gyp:mojo_public_test_utils',
'mojo_public.gyp:mojo_utility',
],
'sources': [
'public/c/system/tests/core_perftest.cc',
],
},
{
# GN version: //mojo/edk/system:mojo_system_unittests
'target_name': 'mojo_system_unittests',
'type': 'executable',
'dependencies': [
'../base/base.gyp:base',
'../testing/gtest.gyp:gtest',
'mojo_edk.gyp:mojo_common_test_support',
'mojo_edk.gyp:mojo_system_impl',
],
'sources': [
'edk/embedder/embedder_unittest.cc',
'edk/embedder/platform_channel_pair_posix_unittest.cc',
'edk/embedder/simple_platform_shared_buffer_unittest.cc',
'edk/system/awakable_list_unittest.cc',
'edk/system/channel_endpoint_id_unittest.cc',
'edk/system/channel_manager_unittest.cc',
'edk/system/channel_unittest.cc',
'edk/system/core_unittest.cc',
'edk/system/core_test_base.cc',
'edk/system/core_test_base.h',
'edk/system/data_pipe_unittest.cc',
'edk/system/dispatcher_unittest.cc',
'edk/system/local_data_pipe_unittest.cc',
'edk/system/memory_unittest.cc',
'edk/system/message_pipe_dispatcher_unittest.cc',
'edk/system/message_pipe_test_utils.h',
'edk/system/message_pipe_test_utils.cc',
'edk/system/message_pipe_unittest.cc',
'edk/system/multiprocess_message_pipe_unittest.cc',
'edk/system/options_validation_unittest.cc',
'edk/system/platform_handle_dispatcher_unittest.cc',
'edk/system/raw_channel_unittest.cc',
'edk/system/remote_message_pipe_unittest.cc',
'edk/system/run_all_unittests.cc',
'edk/system/shared_buffer_dispatcher_unittest.cc',
'edk/system/simple_dispatcher_unittest.cc',
'edk/system/test_utils.cc',
'edk/system/test_utils.h',
'edk/system/waiter_test_utils.cc',
'edk/system/waiter_test_utils.h',
'edk/system/waiter_unittest.cc',
'edk/test/multiprocess_test_helper_unittest.cc',
],
'conditions': [
['OS=="ios"', {
'sources!': [
'edk/embedder/embedder_unittest.cc',
'edk/system/multiprocess_message_pipe_unittest.cc',
'edk/test/multiprocess_test_helper_unittest.cc',
],
}],
],
},
{
# GN version: //mojo/edk/system:mojo_message_pipe_perftests
'target_name': 'mojo_message_pipe_perftests',
'type': 'executable',
'dependencies': [
'../base/base.gyp:base',
'../base/base.gyp:test_support_base',
'../base/base.gyp:test_support_perf',
'../testing/gtest.gyp:gtest',
'mojo_edk.gyp:mojo_common_test_support',
'mojo_edk.gyp:mojo_system_impl',
],
'sources': [
'edk/system/message_pipe_perftest.cc',
'edk/system/message_pipe_test_utils.h',
'edk/system/message_pipe_test_utils.cc',
'edk/system/test_utils.cc',
'edk/system/test_utils.h',
],
},
{
# GN version: //mojo/edk/js/test:js_unittests
'target_name': 'mojo_js_unittests',
'type': 'executable',
'dependencies': [
'../gin/gin.gyp:gin_test',
'mojo_edk.gyp:mojo_common_test_support',
'mojo_edk.gyp:mojo_run_all_unittests',
'mojo_edk.gyp:mojo_js_lib',
'mojo_public.gyp:mojo_environment_standalone',
'mojo_public.gyp:mojo_public_test_interfaces',
'mojo_public.gyp:mojo_utility',
],
'sources': [
'edk/js/handle_unittest.cc',
'edk/js/test/run_js_tests.cc',
],
},
{
# GN version: //mojo/edk/js/test:js_integration_tests
'target_name': 'mojo_js_integration_tests',
'type': 'executable',
'dependencies': [
'../base/base.gyp:base',
'../gin/gin.gyp:gin_test',
'mojo_public.gyp:mojo_environment_standalone',
'mojo_public.gyp:mojo_public_test_interfaces',
'mojo_public.gyp:mojo_utility',
'mojo_edk.gyp:mojo_js_lib',
'mojo_edk.gyp:mojo_run_all_unittests',
'mojo_js_to_cpp_bindings',
],
'sources': [
'edk/js/test/run_js_integration_tests.cc',
'edk/js/tests/js_to_cpp_tests',
],
},
{
'target_name': 'mojo_js_to_cpp_bindings',
'type': 'none',
'variables': {
'mojom_files': [
'edk/js/tests/js_to_cpp.mojom',
],
},
'includes': [ 'mojom_bindings_generator_explicit.gypi' ],
},
],
}
| [
"[email protected]"
] | |
66f9427f087031cb76ce0ece746fb895f97913ca | 59c34dcbcc14b5482d5c41f174f5221b56ab87f0 | /api.py | 9270e163b157c793847eab967d0d7f3ba505c71d | [
"MIT"
] | permissive | wwhalljr/api.spaceprob.es | b73b670b65ff47537b1db7e02991134122a7807f | 20ee8f9d14314c83f07ec31d62601a75b62c7d44 | refs/heads/master | 2020-12-31T05:25:29.978826 | 2016-03-15T19:00:37 | 2016-03-15T19:00:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,401 | py | from __future__ import print_function
import os
import sys
import redis
import logging
import ephem
import requests
from flask import Flask, render_template, redirect, jsonify
from json import loads, dumps
from util import json, jsonp, support_jsonp
from scrapers.dsn import get_dsn_raw
app = Flask(__name__)
REDIS_URL = os.getenv('REDISTOGO_URL', 'redis://localhost:6379')
r_server = redis.StrictRedis.from_url(REDIS_URL)
app.logger.addHandler(logging.StreamHandler(sys.stdout))
app.logger.setLevel(logging.ERROR)
@app.route('/')
def hello():
return redirect("/dsn/probes.json", code=302)
@app.route('/dsn/mirror.json')
@json
def dsn_mirror():
""" a json view of the dsn xml feed """
dsn = loads(r_server.get('dsn_raw'))
return {'dsn': dsn }, 200
@app.route('/dsn/probes.json')
@app.route('/dsn/spaceprobes.json')
@support_jsonp
def dsn_by_probe():
""" dsn data aggregated by space probe """
dsn_by_probe = loads(r_server.get('dsn_by_probe'))
return jsonify({'dsn_by_probe': dsn_by_probe})
# for feeding the spaceprobes website
@app.route('/distances.json')
@support_jsonp
def all_probe_distances():
"""
endpoint to feed the spaceprobes website
this endpoint firsts asks the website what spaceprobes it has
and returns something for each. maybe this is a feature.
to test locally, edit the url below
and in the spaceprobes site main.js edit the distances_feed_url
you might also need to grab copy of this app's redis db from
heroku production to build locally
"""
# first get list of all probes from the webiste
url = 'http://spaceprob.es/probes.json'
all_probes_website = loads(requests.get(url).text)
# get probes according to our DSN mirror
dsn = loads(r_server.get('dsn_by_probe'))
# now loop through probes on website and try to find their distances
# some will have distances in dsn feed, others will have resource from website endpoint
# and others we will use pyephem for their host planet
distances = {}
for probe in all_probes_website:
dsn_name = probe['dsn_name']
slug = probe['slug']
if dsn_name and dsn_name in dsn:
distances[slug] = dsn[dsn_name]['uplegRange']
elif 'orbit_planet' in probe and probe['orbit_planet']:
# this probe's distance is same as a planet, so use pyephem
if probe['orbit_planet'] == 'Venus':
m = ephem.Venus()
if probe['orbit_planet'] == 'Mars':
m = ephem.Mars()
if probe['orbit_planet'] == 'Moon':
m = ephem.Moon()
if m:
m.compute()
earth_distance = m.earth_distance * 149597871 # convert from AU to kilometers
distances[slug] = str(earth_distance)
elif 'distance' in probe and probe['distance']:
# this probe's distance is hard coded at website, add that
try:
# make sure this is actually numeric
float(probe['distance'])
distances[slug] = str(probe['distance'])
except ValueError:
pass
return jsonify({'spaceprobe_distances': distances})
@app.route('/planets.json')
@support_jsonp
def planet_distances():
""" return current distances from earth for 9 planets """
meters_per_au = 149597870700
planet_ephem = [ephem.Mercury(), ephem.Venus(), ephem.Mars(), ephem.Saturn(), ephem.Jupiter(), ephem.Uranus(), ephem.Neptune(), ephem.Pluto()]
planets = {}
for p in planet_ephem:
p.compute()
planets[p.name] = p.earth_distance * meters_per_au / 10000 # km
return jsonify({'distance_from_earth_km': planets})
# the rest of this is old and like wolfram alpha hacking or something..
def get_detail(probe):
""" returns list of data we have for this probe
url = /<probe_name>
"""
try:
wolframalpha = loads(r_server.get('wolframalpha'))
detail = wolframalpha[probe]
return detail
except TypeError: # type error?
return {'Error': 'spacecraft not found'}, 404 # this doesn't work i dunno
@app.route('/probes/guide/')
def guide():
""" html api guide data viewer thingy
at </probes/guide/>
"""
try:
wolframalpha = loads(r_server.get('wolframalpha'))
kwargs = {'probe_details':wolframalpha}
return render_template('guide.html', **kwargs)
except:
return redirect("dsn/probes.json", code=302)
@app.route('/probes/<probe>/')
@support_jsonp
@json
def detail(probe):
""" returns list of data we have for this probe from wolfram alpha
url = /<probe_name>
ie
</Cassini>
"""
return get_detail(probe), 200
@app.route('/probes/<probe>/<field>/')
@support_jsonp
@json
def single_field(probe, field):
""" returns data for single field
url = /<probe_name>/<field>
ie
</Cassini/mass>
"""
field_value = get_detail(probe)
return {field: field_value[field]}, 200
@app.route('/probes/')
@support_jsonp
@json
def index():
""" returns list of all space probes in db
url = /
"""
probe_names = [k for k in loads(r_server.get('wolframalpha'))]
return {'spaceprobes': [p for p in probe_names]}, 200
if __name__ == '__main__':
app.debug = True
app.run()
| [
"[email protected]"
] | |
555210dfa338e3acc4ba9d4c8dd080d07b9e8135 | 115b5356242176b8873ae7e43cd313e41cbd0ee6 | /compustat/oct22/graph.py | ecfefa51ab30673d385a339b280ebcf6edfdde87 | [] | no_license | squeakus/bitsandbytes | b71ec737431bc46b7d93969a7b84bc4514fd365b | 218687d84db42c13bfd9296c476e54cf3d0b43d2 | refs/heads/master | 2023-08-26T19:37:15.190367 | 2023-07-18T21:41:58 | 2023-07-18T21:42:14 | 80,018,346 | 2 | 4 | null | 2022-06-22T04:08:35 | 2017-01-25T13:46:28 | C | UTF-8 | Python | false | false | 4,775 | py | """A module for plotting results"""
import pylab, pygame, sys
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
import matplotlib.pyplot as plt
filetype = '.png'
def plot_3d(results_list, title):
"""show all results in parallel"""
x_range = range(len(results_list[0]))
fig = plt.figure()
#plt.title(title)
axe = Axes3D(fig)
plt.title(title)
for idx, result in enumerate(results_list):
axe.plot(x_range, result, idx)
plt.show()
def plot_2d(results_list, title):
"""multiple runs single graph"""
pylab.clf()
pylab.figure().autofmt_xdate()
x_range = range(len(results_list[0]))
for result in results_list:
pylab.plot(x_range, result)
pylab.title(title)
title += filetype
pylab.savefig(title)
def boxplot_data(results_list, title):
pylab.clf()
pylab.figure(1)
result_cols = []
for i in range(len(results_list[0])):
res = [result[i] for result in results_list]
result_cols.append(res)
pylab.boxplot(result_cols)
pylab.figure(1).autofmt_xdate()
title += '_boxplot'
pylab.title(title)
title += filetype
pylab.savefig(title)
def plot_ave(results_list, title):
""" show average with error bars"""
pylab.clf()
pylab.figure().autofmt_xdate()
x_range = range(len(results_list[0]))
err_x, err_y, std_list = [], [], []
for i in x_range:
if i % 10 == 0:
#get average for each generation
column = []
for result in results_list:
column.append(result[i])
average = np.average(column)
std_dev = np.std(column)
err_x.append(i)
err_y.append(average)
std_list.append(std_dev)
pylab.errorbar(err_x, err_y, yerr=std_list)
title += '_average'
pylab.title(title)
title += filetype
pylab.savefig(title)
def continuous_plot(iterations, grn):
"""Uses pygame to draw concentrations in real time"""
width, height = size = (600, 600)
screen = pygame.display.set_mode(size)
colors = [] # list for protein colors
conc_list = [] # current concentrations
extra_list = [] # add variables for user input
key_list = [] # keyboard inputs
extra_colors = [(255, 0, 0),
(255, 255, 0),
(255, 0, 255),
(0, 255, 255)]
key_list.append([pygame.K_UP, pygame.K_DOWN])
key_list.append((pygame.K_a, pygame.K_z))
key_list.append((pygame.K_s, pygame.K_x))
key_list.append((pygame.K_d, pygame.K_c))
for gene in grn.genes:
# TF = Blue P = Green EXTRA = Red
if gene.gene_type == "TF":
colors.append((0, 0, 255))
elif gene.gene_type == "P":
colors.append((0, 255, 0))
elif gene.gene_type.startswith("EXTRA"):
extra_list.append({'name':gene.gene_type,
'up':False, 'down':False})
colors.append(extra_colors.pop())
conc_list.append(600-(gene.concentration * 600))
for _ in range(iterations):
#check for keypress
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
elif event.type == pygame.KEYDOWN:
for idx, key_tuple in enumerate(key_list):
if pygame.key.get_pressed()[key_tuple[0]]:
extra_list[idx]['up'] = True
elif pygame.key.get_pressed()[key_tuple[1]]:
extra_list[idx]['down'] = True
elif event.type == pygame.KEYUP:
for extra in extra_list:
extra['up'] = False
extra['down'] = False
# Update the extra protein concentration
for extra in extra_list:
if extra['up']:
grn.change_extra(extra['name'], 0.005)
if extra['down']:
grn.change_extra(extra['name'], -0.005)
# if extrab_up:
# grn.change_extra("EXTRA_B", 0.005)
# if extrab_down:
# grn.change_extra("EXTRA_B", -0.005)
#run grn and get protein concentrations
results = grn.regulate_matrix(2, False)
scaled = [int(600-(x * 600)) for x in results]
old_conc = conc_list
conc_list = scaled
for idx, conc in enumerate(conc_list):
pygame.draw.line(screen, colors[idx],
(width-3, old_conc[idx]),
(width-2, conc))
pygame.display.flip()
screen.scroll(-1, 0)
pygame.time.wait(5)
| [
"[email protected]"
] | |
73e6777165b5b279414a6bc9d929bcc99ec5ba2d | 4e7946cc3dfb2c5ff35f7506d467c06de0e5e842 | /dlldiag/common/FileIO.py | be06510d944586dde4e451fb901b193965ea2f8e | [
"MIT"
] | permissive | GabLeRoux/dll-diagnostics | 745a346ee6076a5e55dc852601afa2a5b5f99994 | df579e03dff28645d42eb582f44cb9d340ba08e5 | refs/heads/master | 2023-02-27T18:54:37.074222 | 2021-01-29T07:39:31 | 2021-01-29T07:39:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 232 | py | class FileIO(object):
'''
Provides functionality for performing file I/O
'''
@staticmethod
def writeFile(filename, data):
'''
Writes data to a file
'''
with open(filename, 'wb') as f:
f.write(data.encode('utf-8'))
| [
"[email protected]"
] | |
c92c67b45c126a4e1149a656da12ecf610334d07 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02836/s788041328.py | bf63c9bc5433a668bac86ce231dadf570827c52c | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 178 | py | S=input()
N=len(S)
ans=0
T=""
U=""
if N%2==0:
T=S[:N//2]
U=S[N//2:][::-1]
else:
T=S[:N//2]
U=S[N//2+1:][::-1]
for i in range(N//2):
if T[i]!=U[i]:
ans+=1
print(ans) | [
"[email protected]"
] | |
bf72c107e63b31d4671dcb3dfe4c0a70cbdb005f | 5b5c9a286e217340f816901b843281129ad0f596 | /plaso/cli/storage_media_tool.py | 2fee45d4f6963c9d263b32b6e95b687ddddd6c96 | [
"Apache-2.0"
] | permissive | aarontp/plaso | 403c43a47d5833a5ed3540cce907982bebaeac3c | 0ee446ebf03d17c515f76a666bd3795e91a2dd17 | refs/heads/master | 2021-01-20T23:41:01.382721 | 2016-10-11T19:47:21 | 2016-10-11T19:47:21 | 67,898,934 | 1 | 0 | null | 2016-09-10T23:07:53 | 2016-09-10T23:07:53 | null | UTF-8 | Python | false | false | 40,975 | py | # -*- coding: utf-8 -*-
"""The storage media CLI tool."""
import getpass
import logging
import os
import sys
from dfvfs.analyzer import analyzer as dfvfs_analyzer
from dfvfs.analyzer import fvde_analyzer_helper
from dfvfs.credentials import manager as credentials_manager
from dfvfs.helpers import source_scanner
from dfvfs.lib import definitions as dfvfs_definitions
from dfvfs.lib import errors as dfvfs_errors
from dfvfs.path import factory as path_spec_factory
from dfvfs.volume import tsk_volume_system
from dfvfs.volume import vshadow_volume_system
from plaso.cli import tools
from plaso.lib import errors
from plaso.lib import py2to3
from plaso.lib import timelib
try:
# Disable experimental FVDE support.
dfvfs_analyzer.Analyzer.DeregisterHelper(
fvde_analyzer_helper.FVDEAnalyzerHelper())
except KeyError:
pass
class StorageMediaTool(tools.CLITool):
"""Class that implements a storage media CLI tool."""
_DEFAULT_BYTES_PER_SECTOR = 512
_SOURCE_OPTION = u'source'
_BINARY_DATA_CREDENTIAL_TYPES = [u'key_data']
_SUPPORTED_CREDENTIAL_TYPES = [
u'key_data', u'password', u'recovery_password', u'startup_key']
# For context see: http://en.wikipedia.org/wiki/Byte
_UNITS_1000 = [u'B', u'kB', u'MB', u'GB', u'TB', u'EB', u'ZB', u'YB']
_UNITS_1024 = [u'B', u'KiB', u'MiB', u'GiB', u'TiB', u'EiB', u'ZiB', u'YiB']
def __init__(self, input_reader=None, output_writer=None):
"""Initializes the CLI tool object.
Args:
input_reader (Optional[InputReader]): input reader, where None indicates
that the stdin input reader should be used.
output_writer (Optional[OutputWriter]): output writer, where None
indicates that the stdout output writer should be used.
"""
super(StorageMediaTool, self).__init__(
input_reader=input_reader, output_writer=output_writer)
self._credentials = []
self._filter_file = None
self._partitions = None
self._partition_offset = None
self._process_vss = False
self._source_scanner = source_scanner.SourceScanner()
self._source_path = None
self._source_path_specs = []
self._vss_only = False
self._vss_stores = None
def _FormatHumanReadableSize(self, size):
"""Represents a number of bytes as as human readable string.
Args:
size (int): size in bytes.
Returns:
str: human readable string of the size.
"""
magnitude_1000 = 0
size_1000 = float(size)
while size_1000 >= 1000:
size_1000 /= 1000
magnitude_1000 += 1
magnitude_1024 = 0
size_1024 = float(size)
while size_1024 >= 1024:
size_1024 /= 1024
magnitude_1024 += 1
size_string_1000 = None
if magnitude_1000 > 0 and magnitude_1000 <= 7:
size_string_1000 = u'{0:.1f}{1:s}'.format(
size_1000, self._UNITS_1000[magnitude_1000])
size_string_1024 = None
if magnitude_1024 > 0 and magnitude_1024 <= 7:
size_string_1024 = u'{0:.1f}{1:s}'.format(
size_1024, self._UNITS_1024[magnitude_1024])
if not size_string_1000 or not size_string_1024:
return u'{0:d} B'.format(size)
return u'{0:s} / {1:s} ({2:d} B)'.format(
size_string_1024, size_string_1000, size)
def _GetNormalizedTSKVolumeIdentifiers(
self, volume_system, volume_identifiers):
"""Retrieves the normalized TSK volume identifiers.
Args:
volume_system (dfvfs.TSKVolumeSystem): volume system.
volume_identifiers (list[str]): allowed volume identifiers.
Returns:
list[int]: normalized volume identifiers.
"""
normalized_volume_identifiers = []
for volume_identifier in volume_identifiers:
volume = volume_system.GetVolumeByIdentifier(volume_identifier)
if not volume:
raise errors.SourceScannerError(
u'Volume missing for identifier: {0:s}.'.format(volume_identifier))
try:
volume_identifier = int(volume.identifier[1:], 10)
normalized_volume_identifiers.append(volume_identifier)
except ValueError:
pass
return normalized_volume_identifiers
def _GetNormalizedVShadowVolumeIdentifiers(
self, volume_system, volume_identifiers):
"""Retrieves the normalized VShadow volume identifiers.
Args:
volume_system (dfvfs.VShadowVolumeSystem): volume system.
volume_identifiers (list[str]): allowed volume identifiers.
Returns:
list[int]: normalized volume identifiers.
"""
normalized_volume_identifiers = []
for volume_identifier in volume_identifiers:
volume = volume_system.GetVolumeByIdentifier(volume_identifier)
if not volume:
raise errors.SourceScannerError(
u'Volume missing for identifier: {0:s}.'.format(volume_identifier))
try:
volume_identifier = int(volume.identifier[3:], 10)
normalized_volume_identifiers.append(volume_identifier)
except ValueError:
pass
return normalized_volume_identifiers
# TODO: refactor this method that it become more clear what it is
# supposed to do.
def _GetTSKPartitionIdentifiers(
self, scan_node, partition_offset=None, partitions=None):
"""Determines the TSK partition identifiers.
This method first checks for the preferred partition number, then for
the preferred partition offset and falls back to prompt the user if
no usable preferences were specified.
Args:
scan_node (dfvfs.SourceScanNode): scan node.
partition_offset (Optional[int]): preferred partition byte offset.
paritions (Optional[list[str]]): preferred partition identifiers.
Returns:
list[str]: partition identifiers.
Raises:
RuntimeError: if the volume for a specific identifier cannot be
retrieved.
SourceScannerError: if the format of or within the source
is not supported or the the scan node is invalid.
"""
if not scan_node or not scan_node.path_spec:
raise errors.SourceScannerError(u'Invalid scan node.')
volume_system = tsk_volume_system.TSKVolumeSystem()
volume_system.Open(scan_node.path_spec)
volume_identifiers = self._source_scanner.GetVolumeIdentifiers(
volume_system)
if not volume_identifiers:
self._output_writer.Write(u'[WARNING] No partitions found.\n')
return
normalized_volume_identifiers = self._GetNormalizedTSKVolumeIdentifiers(
volume_system, volume_identifiers)
if partitions:
if partitions == [u'all']:
partitions = range(1, volume_system.number_of_volumes + 1)
if not set(partitions).difference(normalized_volume_identifiers):
return [
u'p{0:d}'.format(partition_number)
for partition_number in partitions]
if partition_offset is not None:
for volume in volume_system.volumes:
volume_extent = volume.extents[0]
if volume_extent.offset == partition_offset:
return [volume.identifier]
self._output_writer.Write((
u'[WARNING] No such partition with offset: {0:d} '
u'(0x{0:08x}).\n').format(partition_offset))
if len(volume_identifiers) == 1:
return volume_identifiers
try:
selected_volume_identifier = self._PromptUserForPartitionIdentifier(
volume_system, volume_identifiers)
except KeyboardInterrupt:
raise errors.UserAbort(u'File system scan aborted.')
if selected_volume_identifier == u'all':
return volume_identifiers
return [selected_volume_identifier]
def _GetVSSStoreIdentifiers(self, scan_node, vss_stores=None):
"""Determines the VSS store identifiers.
Args:
scan_node (dfvfs.SourceScanNode): scan node.
vss_stores (Optional[list[str]]): preferred VSS store identifiers.
Returns:
list[str] VSS store identifiers.
Raises:
SourceScannerError: if the format of or within the source
is not supported or the the scan node is invalid.
"""
if not scan_node or not scan_node.path_spec:
raise errors.SourceScannerError(u'Invalid scan node.')
volume_system = vshadow_volume_system.VShadowVolumeSystem()
volume_system.Open(scan_node.path_spec)
volume_identifiers = self._source_scanner.GetVolumeIdentifiers(
volume_system)
if not volume_identifiers:
return []
try:
selected_store_identifiers = self._PromptUserForVSSStoreIdentifiers(
volume_system, volume_identifiers, vss_stores=vss_stores)
except KeyboardInterrupt:
raise errors.UserAbort(u'File system scan aborted.')
return selected_store_identifiers
def _ParseCredentialOptions(self, options):
"""Parses the credential options.
Args:
options (argparse.Namespace): command line arguments.
Raises:
BadConfigOption: if the options are invalid.
"""
credentials = getattr(options, u'credentials', [])
if not isinstance(credentials, list):
raise errors.BadConfigOption(u'Unsupported credentials value.')
for credential_string in credentials:
credential_type, _, credential_data = credential_string.partition(u':')
if not credential_type or not credential_data:
raise errors.BadConfigOption(
u'Badly formatted credential: {0:s}.'.format(credential_string))
if credential_type not in self._SUPPORTED_CREDENTIAL_TYPES:
raise errors.BadConfigOption(
u'Unsupported credential type for: {0:s}.'.format(
credential_string))
if credential_type in self._BINARY_DATA_CREDENTIAL_TYPES:
try:
credential_data = credential_data.decode(u'hex')
except TypeError:
raise errors.BadConfigOption(
u'Unsupported credential data for: {0:s}.'.format(
credential_string))
self._credentials.append((credential_type, credential_data))
def _ParseFilterOptions(self, options):
"""Parses the filter options.
Args:
options (argparse.Namespace): command line arguments.
Raises:
BadConfigOption: if the options are invalid.
"""
filter_file = self.ParseStringOption(options, u'file_filter')
if not filter_file:
return
if self._data_location:
filter_file_base = os.path.basename(filter_file)
filter_file_check = os.path.join(self._data_location, filter_file_base)
if os.path.isfile(filter_file_check):
self._filter_file = filter_file_check
return
if not os.path.isfile(filter_file):
raise errors.BadConfigOption(
u'No such collection filter file: {0:s}.'.format(filter_file))
self._filter_file = filter_file
def _ParsePartitionsString(self, partitions):
"""Parses the user specified partitions string.
Args:
partitions (str): partitions. A range of partitions can be defined
as: "3..5". Multiple partitions can be defined as: "1,3,5" (a list
of comma separated values). Ranges and lists can also be combined
as: "1,3..5". The first partition is 1. All partition can be
defined as: "all".
Returns:
list[str]: partitions.
Raises:
BadConfigOption: if the partitions option is invalid.
"""
if not partitions:
return []
if partitions == u'all':
return [u'all']
partition_numbers = []
for partition_range in partitions.split(u','):
# Determine if the range is formatted as 1..3 otherwise it indicates
# a single partition number.
if u'..' in partition_range:
first_partition, last_partition = partition_range.split(u'..')
try:
first_partition = int(first_partition, 10)
last_partition = int(last_partition, 10)
except ValueError:
raise errors.BadConfigOption(
u'Invalid partition range: {0:s}.'.format(partition_range))
for partition_number in range(first_partition, last_partition + 1):
if partition_number not in partition_numbers:
partition_numbers.append(partition_number)
else:
if partition_range.startswith(u'p'):
partition_range = partition_range[1:]
try:
partition_number = int(partition_range, 10)
except ValueError:
raise errors.BadConfigOption(
u'Invalid partition range: {0:s}.'.format(partition_range))
if partition_number not in partition_numbers:
partition_numbers.append(partition_number)
return sorted(partition_numbers)
def _ParseStorageMediaImageOptions(self, options):
"""Parses the storage media image options.
Args:
options (argparse.Namespace): command line arguments.
Raises:
BadConfigOption: if the options are invalid.
"""
partitions = getattr(options, u'partitions', None)
self._partitions = self._ParsePartitionsString(partitions)
partition = getattr(options, u'partition', None)
if self._partitions and partition is not None:
raise errors.BadConfigOption((
u'Option "--partition" can not be used in combination '
u'with "--partitions".'))
if not self._partitions and partition is not None:
self._partitions = self._ParsePartitionsString(partition)
image_offset_bytes = getattr(options, u'image_offset_bytes', None)
if self._partitions and image_offset_bytes is not None:
raise errors.BadConfigOption((
u'Option "--image_offset_bytes" can not be used in combination '
u'with "--partitions" or "--partition".'))
image_offset = getattr(options, u'image_offset', None)
if self._partitions and image_offset is not None:
raise errors.BadConfigOption((
u'Option "--image_offset" can not be used in combination with '
u'"--partitions" or "--partition".'))
if (image_offset_bytes is not None and
isinstance(image_offset_bytes, py2to3.STRING_TYPES)):
try:
image_offset_bytes = int(image_offset_bytes, 10)
except ValueError:
raise errors.BadConfigOption(
u'Invalid image offset bytes: {0:s}.'.format(image_offset_bytes))
if image_offset_bytes is None and image_offset is not None:
bytes_per_sector = getattr(
options, u'bytes_per_sector', self._DEFAULT_BYTES_PER_SECTOR)
if isinstance(image_offset, py2to3.STRING_TYPES):
try:
image_offset = int(image_offset, 10)
except ValueError:
raise errors.BadConfigOption(
u'Invalid image offset: {0:s}.'.format(image_offset))
if isinstance(bytes_per_sector, py2to3.STRING_TYPES):
try:
bytes_per_sector = int(bytes_per_sector, 10)
except ValueError:
raise errors.BadConfigOption(
u'Invalid bytes per sector: {0:s}.'.format(bytes_per_sector))
if image_offset_bytes:
self._partition_offset = image_offset_bytes
elif image_offset:
self._partition_offset = image_offset * bytes_per_sector
def _ParseVSSProcessingOptions(self, options):
"""Parses the VSS processing options.
Args:
options (argparse.Namespace): command line arguments.
Raises:
BadConfigOption: if the options are invalid.
"""
vss_only = False
vss_stores = None
self._process_vss = not getattr(options, u'no_vss', True)
if self._process_vss:
vss_only = getattr(options, u'vss_only', False)
vss_stores = getattr(options, u'vss_stores', None)
if vss_stores:
vss_stores = self._ParseVSSStoresString(vss_stores)
self._vss_only = vss_only
self._vss_stores = vss_stores
def _ParseVSSStoresString(self, vss_stores):
"""Parses the user specified VSS stores string.
Args:
vss_stores (str): VSS stores. A range of stores can be defined
as: "3..5". Multiple stores can be defined as: "1,3,5" (a list
of comma separated values). Ranges and lists can also be
combined as: "1,3..5". The first store is 1. All stores can be
defined as: "all".
Returns:
list[str]: VSS stores.
Raises:
BadConfigOption: if the VSS stores option is invalid.
"""
if not vss_stores:
return []
if vss_stores == u'all':
return [u'all']
store_numbers = []
for vss_store_range in vss_stores.split(u','):
# Determine if the range is formatted as 1..3 otherwise it indicates
# a single store number.
if u'..' in vss_store_range:
first_store, last_store = vss_store_range.split(u'..')
try:
first_store = int(first_store, 10)
last_store = int(last_store, 10)
except ValueError:
raise errors.BadConfigOption(
u'Invalid VSS store range: {0:s}.'.format(vss_store_range))
for store_number in range(first_store, last_store + 1):
if store_number not in store_numbers:
store_numbers.append(store_number)
else:
if vss_store_range.startswith(u'vss'):
vss_store_range = vss_store_range[3:]
try:
store_number = int(vss_store_range, 10)
except ValueError:
raise errors.BadConfigOption(
u'Invalid VSS store range: {0:s}.'.format(vss_store_range))
if store_number not in store_numbers:
store_numbers.append(store_number)
return sorted(store_numbers)
def _PromptUserForEncryptedVolumeCredential(
self, scan_context, locked_scan_node, credentials):
"""Prompts the user to provide a credential for an encrypted volume.
Args:
scan_context (dfvfs.SourceScannerContext): source scanner context.
locked_scan_node (dfvfs.SourceScanNode): locked scan node.
credentials (dfvfs.Credentials): credentials supported by the locked
scan node.
Returns:
bool: True if the volume was unlocked.
"""
# TODO: print volume description.
if locked_scan_node.type_indicator == dfvfs_definitions.TYPE_INDICATOR_BDE:
self._output_writer.Write(u'Found a BitLocker encrypted volume.\n')
else:
self._output_writer.Write(u'Found an encrypted volume.\n')
credentials_list = list(credentials.CREDENTIALS)
credentials_list.append(u'skip')
self._output_writer.Write(u'Supported credentials:\n')
self._output_writer.Write(u'\n')
for index, name in enumerate(credentials_list):
self._output_writer.Write(u' {0:d}. {1:s}\n'.format(index, name))
self._output_writer.Write(u'\nNote that you can abort with Ctrl^C.\n\n')
result = False
while not result:
self._output_writer.Write(u'Select a credential to unlock the volume: ')
# TODO: add an input reader.
input_line = self._input_reader.Read()
input_line = input_line.strip()
if input_line in credentials_list:
credential_type = input_line
else:
try:
credential_type = int(input_line, 10)
credential_type = credentials_list[credential_type]
except (IndexError, ValueError):
self._output_writer.Write(
u'Unsupported credential: {0:s}\n'.format(input_line))
continue
if credential_type == u'skip':
break
getpass_string = u'Enter credential data: '
if sys.platform.startswith(u'win') and sys.version_info[0] < 3:
# For Python 2 on Windows getpass (win_getpass) requires an encoded
# byte string. For Python 3 we need it to be a Unicode string.
getpass_string = self._EncodeString(getpass_string)
credential_data = getpass.getpass(getpass_string)
self._output_writer.Write(u'\n')
if credential_type in self._BINARY_DATA_CREDENTIAL_TYPES:
try:
credential_data = credential_data.decode(u'hex')
except TypeError:
self._output_writer.Write(u'Unsupported credential data.\n')
continue
try:
result = self._source_scanner.Unlock(
scan_context, locked_scan_node.path_spec, credential_type,
credential_data)
except IOError as exception:
logging.debug(u'Unable to unlock volume with error: {0:s}'.format(
exception))
result = False
if not result:
self._output_writer.Write(u'Unable to unlock volume.\n')
self._output_writer.Write(u'\n')
self._output_writer.Write(u'\n')
return result
def _PromptUserForPartitionIdentifier(
self, volume_system, volume_identifiers):
"""Prompts the user to provide a partition identifier.
Args:
volume_system (dfvfs.TSKVolumeSystem): volume system.
volume_identifiers (list[str]): allowed volume identifiers.
Returns:
str: partition identifier or 'all'.
Raises:
SourceScannerError: if the source cannot be processed.
"""
self._output_writer.Write(
u'The following partitions were found:\n'
u'Identifier\tOffset (in bytes)\tSize (in bytes)\n')
for volume_identifier in sorted(volume_identifiers):
volume = volume_system.GetVolumeByIdentifier(volume_identifier)
if not volume:
raise errors.SourceScannerError(
u'Volume missing for identifier: {0:s}.'.format(volume_identifier))
volume_extent = volume.extents[0]
self._output_writer.Write(
u'{0:s}\t\t{1:d} (0x{1:08x})\t{2:s}\n'.format(
volume.identifier, volume_extent.offset,
self._FormatHumanReadableSize(volume_extent.size)))
self._output_writer.Write(u'\n')
while True:
self._output_writer.Write(
u'Please specify the identifier of the partition that should be '
u'processed.\nAll partitions can be defined as: "all". Note that you '
u'can abort with Ctrl^C.\n')
selected_volume_identifier = self._input_reader.Read()
selected_volume_identifier = selected_volume_identifier.strip()
if not selected_volume_identifier.startswith(u'p'):
try:
partition_number = int(selected_volume_identifier, 10)
selected_volume_identifier = u'p{0:d}'.format(partition_number)
except ValueError:
pass
if (selected_volume_identifier == u'all' or
selected_volume_identifier in volume_identifiers):
break
self._output_writer.Write(
u'\n'
u'Unsupported partition identifier, please try again or abort '
u'with Ctrl^C.\n'
u'\n')
self._output_writer.Write(u'\n')
return selected_volume_identifier
def _PromptUserForVSSCurrentVolume(self):
"""Prompts the user if the current volume with VSS should be processed.
Returns:
bool: True if the current volume with VSS should be processed.
"""
while True:
self._output_writer.Write(
u'Volume Shadow Snapshots (VSS) were selected also process current\n'
u'volume? [yes, no]\n')
process_current_volume = self._input_reader.Read()
process_current_volume = process_current_volume.strip()
process_current_volume = process_current_volume.lower()
if (not process_current_volume or
process_current_volume in (u'no', u'yes')):
break
self._output_writer.Write(
u'\n'
u'Unsupported option, please try again or abort with Ctrl^C.\n'
u'\n')
self._output_writer.Write(u'\n')
return not process_current_volume or process_current_volume == u'yes'
def _PromptUserForVSSStoreIdentifiers(
self, volume_system, volume_identifiers, vss_stores=None):
"""Prompts the user to provide the VSS store identifiers.
This method first checks for the preferred VSS stores and falls back
to prompt the user if no usable preferences were specified.
Args:
volume_system (dfvfs.VShadowVolumeSystem): volume system.
volume_identifiers (list[str]): allowed volume identifiers.
vss_stores (Optional[list[str]]): preferred VSS store identifiers.
Returns:
list[str]: selected VSS store identifiers.
Raises:
SourceScannerError: if the source cannot be processed.
"""
normalized_volume_identifiers = self._GetNormalizedVShadowVolumeIdentifiers(
volume_system, volume_identifiers)
# TODO: refactor this to _GetVSSStoreIdentifiers.
if vss_stores:
if vss_stores == [u'all']:
# We need to set the stores to cover all vss stores.
vss_stores = range(1, volume_system.number_of_volumes + 1)
if not set(vss_stores).difference(normalized_volume_identifiers):
return vss_stores
print_header = True
while True:
if print_header:
self._output_writer.Write(
u'The following Volume Shadow Snapshots (VSS) were found:\n'
u'Identifier\t\tCreation Time\n')
for volume_identifier in volume_identifiers:
volume = volume_system.GetVolumeByIdentifier(volume_identifier)
if not volume:
raise errors.SourceScannerError(
u'Volume missing for identifier: {0:s}.'.format(
volume_identifier))
vss_creation_time = volume.GetAttribute(u'creation_time')
vss_creation_time = timelib.Timestamp.FromFiletime(
vss_creation_time.value)
vss_creation_time = timelib.Timestamp.CopyToIsoFormat(
vss_creation_time)
if volume.HasExternalData():
external_data = u'\tWARNING: data stored outside volume'
else:
external_data = u''
self._output_writer.Write(u'{0:s}\t\t\t{1:s}{2:s}\n'.format(
volume.identifier, vss_creation_time, external_data))
self._output_writer.Write(u'\n')
print_header = False
self._output_writer.Write(
u'Please specify the identifier(s) of the VSS that should be '
u'processed:\nNote that a range of stores can be defined as: 3..5. '
u'Multiple stores can\nbe defined as: 1,3,5 (a list of comma '
u'separated values). Ranges and lists can\nalso be combined '
u'as: 1,3..5. The first store is 1. All stores can be defined\n'
u'as "all". If no stores are specified none will be processed. You\n'
u'can abort with Ctrl^C.\n')
selected_vss_stores = self._input_reader.Read()
selected_vss_stores = selected_vss_stores.strip()
if not selected_vss_stores:
return []
try:
selected_vss_stores = self._ParseVSSStoresString(selected_vss_stores)
except errors.BadConfigOption:
selected_vss_stores = []
if selected_vss_stores == [u'all']:
# We need to set the stores to cover all vss stores.
selected_vss_stores = range(1, volume_system.number_of_volumes + 1)
if not set(selected_vss_stores).difference(normalized_volume_identifiers):
break
self._output_writer.Write(
u'\n'
u'Unsupported VSS identifier(s), please try again or abort with '
u'Ctrl^C.\n'
u'\n')
self._output_writer.Write(u'\n')
return selected_vss_stores
def _ScanVolume(self, scan_context, volume_scan_node):
"""Scans the volume scan node for volume and file systems.
Args:
scan_context (dfvfs.SourceScannerContext): source scanner context.
volume_scan_node (dfvfs.SourceScanNode): volume scan node.
Raises:
SourceScannerError: if the format of or within the source
is not supported or the the scan node is invalid.
"""
if not volume_scan_node or not volume_scan_node.path_spec:
raise errors.SourceScannerError(u'Invalid or missing volume scan node.')
selected_vss_stores = []
if len(volume_scan_node.sub_nodes) == 0:
self._ScanVolumeScanNode(
scan_context, volume_scan_node, selected_vss_stores)
else:
# Some volumes contain other volume or file systems e.g. BitLocker ToGo
# has an encrypted and unencrypted volume.
for sub_scan_node in volume_scan_node.sub_nodes:
self._ScanVolumeScanNode(
scan_context, sub_scan_node, selected_vss_stores)
def _ScanVolumeScanNode(
self, scan_context, volume_scan_node, selected_vss_stores):
"""Scans an individual volume scan node for volume and file systems.
Args:
scan_context (dfvfs.SourceScannerContext): source scanner context.
volume_scan_node (dfvfs.SourceScanNode): volume scan node.
selected_vss_stores (list[str]): selected VSS store identifiers.
Raises:
SourceScannerError: if the format of or within the source
is not supported or the the scan node is invalid.
"""
if not volume_scan_node or not volume_scan_node.path_spec:
raise errors.SourceScannerError(u'Invalid or missing volume scan node.')
# Get the first node where where we need to decide what to process.
scan_node = volume_scan_node
while len(scan_node.sub_nodes) == 1:
# Make sure that we prompt the user about VSS selection.
if scan_node.type_indicator == dfvfs_definitions.TYPE_INDICATOR_VSHADOW:
location = getattr(scan_node.path_spec, u'location', None)
if location == u'/':
break
scan_node = scan_node.sub_nodes[0]
# The source scanner found an encrypted volume and we need
# a credential to unlock the volume.
if scan_node.type_indicator in (
dfvfs_definitions.ENCRYPTED_VOLUME_TYPE_INDICATORS):
self._ScanVolumeScanNodeEncrypted(scan_context, scan_node)
elif scan_node.type_indicator == dfvfs_definitions.TYPE_INDICATOR_VSHADOW:
self._ScanVolumeScanNodeVSS(scan_node, selected_vss_stores)
elif scan_node.type_indicator in (
dfvfs_definitions.FILE_SYSTEM_TYPE_INDICATORS):
if (not self._vss_only or not selected_vss_stores or
self._PromptUserForVSSCurrentVolume()):
self._source_path_specs.append(scan_node.path_spec)
def _ScanVolumeScanNodeEncrypted(self, scan_context, volume_scan_node):
"""Scans an encrypted volume scan node for volume and file systems.
Args:
scan_context (dfvfs.SourceScannerContext): source scanner context.
volume_scan_node (dfvfs.SourceScanNode): volume scan node.
"""
result = not scan_context.IsLockedScanNode(volume_scan_node.path_spec)
if not result:
credentials = credentials_manager.CredentialsManager.GetCredentials(
volume_scan_node.path_spec)
result = False
for credential_type, credential_data in self._credentials:
if credential_type not in credentials.CREDENTIALS:
continue
result = self._source_scanner.Unlock(
scan_context, volume_scan_node.path_spec, credential_type,
credential_data)
if result:
break
if self._credentials and not result:
self._output_writer.Write(
u'[WARNING] Unable to unlock encrypted volume using the provided '
u'credentials.\n\n')
if not result:
result = self._PromptUserForEncryptedVolumeCredential(
scan_context, volume_scan_node, credentials)
if result:
self._source_scanner.Scan(
scan_context, scan_path_spec=volume_scan_node.path_spec)
self._ScanVolume(scan_context, volume_scan_node)
def _ScanVolumeScanNodeVSS(self, volume_scan_node, selected_vss_stores):
"""Scans a VSS volume scan node for volume and file systems.
Args:
scan_context (dfvfs.SourceScannerContext): source scanner context.
volume_scan_node (dfvfs.SourceScanNode): volume scan node.
selected_vss_stores (list[str]): selected VSS store identifiers.
Raises:
SourceScannerError: if a VSS sub scan node cannot be retrieved.
"""
if not self._process_vss:
return
# Do not scan inside individual VSS store scan nodes.
location = getattr(volume_scan_node.path_spec, u'location', None)
if location != u'/':
return
vss_store_identifiers = self._GetVSSStoreIdentifiers(
volume_scan_node, vss_stores=self._vss_stores)
selected_vss_stores.extend(vss_store_identifiers)
# Process VSS stores starting with the most recent one.
vss_store_identifiers.reverse()
for vss_store_identifier in vss_store_identifiers:
location = u'/vss{0:d}'.format(vss_store_identifier)
sub_scan_node = volume_scan_node.GetSubNodeByLocation(location)
if not sub_scan_node:
logging.error(
u'Scan node missing for VSS store identifier: {0:d}.'.format(
vss_store_identifier))
continue
# We "optimize" here for user experience, ideally we would scan for
# a file system instead of hard coding a TSK child path specification.
path_spec = path_spec_factory.Factory.NewPathSpec(
dfvfs_definitions.TYPE_INDICATOR_TSK, location=u'/',
parent=sub_scan_node.path_spec)
self._source_path_specs.append(path_spec)
def AddCredentialOptions(self, argument_group):
"""Adds the credential options to the argument group.
The credential options are use to unlock encrypted volumes.
Args:
argument_group (argparse._ArgumentGroup): argparse argument group.
"""
argument_group.add_argument(
u'--credential', action=u'append', default=[], type=str,
dest=u'credentials', metavar=u'TYPE:DATA', help=(
u'Define a credentials that can be used to unlock encrypted '
u'volumes e.g. BitLocker. The credential is defined as type:data '
u'e.g. "password:BDE-test". Supported credential types are: '
u'{0:s}. Binary key data is expected to be passed in BASE-16 '
u'encoding (hexadecimal). WARNING credentials passed via command '
u'line arguments can end up in logs, so use this option with '
u'care.').format(u', '.join(self._SUPPORTED_CREDENTIAL_TYPES)))
def AddFilterOptions(self, argument_group):
"""Adds the filter options to the argument group.
Args:
argument_group (argparse._ArgumentGroup): argparse argument group.
"""
argument_group.add_argument(
u'-f', u'--file_filter', u'--file-filter', dest=u'file_filter',
action=u'store', type=str, default=None, help=(
u'List of files to include for targeted collection of files to '
u'parse, one line per file path, setup is /path|file - where each '
u'element can contain either a variable set in the preprocessing '
u'stage or a regular expression.'))
def AddStorageMediaImageOptions(self, argument_group):
"""Adds the storage media image options to the argument group.
Args:
argument_group (argparse._ArgumentGroup): argparse argument group.
"""
argument_group.add_argument(
u'--partition', dest=u'partition', action=u'store', type=str,
default=None, help=(
u'Choose a partition number from a disk image. This partition '
u'number should correspond to the partition number on the disk '
u'image, starting from partition 1. All partitions can be '
u'defined as: "all".'))
argument_group.add_argument(
u'--partitions', dest=u'partitions', action=u'store', type=str,
default=None, help=(
u'Define partitions that need to be processed. A range of '
u'partitions can be defined as: "3..5". Multiple partitions can '
u'be defined as: "1,3,5" (a list of comma separated values). '
u'Ranges and lists can also be combined as: "1,3..5". The first '
u'partition is 1. All partition can be defined as: "all".'))
argument_group.add_argument(
u'-o', u'--offset', dest=u'image_offset', action=u'store', default=None,
type=int, help=(
u'The offset of the volume within the storage media image in '
u'number of sectors. A sector is {0:d} bytes in size by default '
u'this can be overwritten with the --sector_size option.').format(
self._DEFAULT_BYTES_PER_SECTOR))
argument_group.add_argument(
u'--ob', u'--offset_bytes', u'--offset_bytes',
dest=u'image_offset_bytes', action=u'store', default=None, type=int,
help=(
u'The offset of the volume within the storage media image in '
u'number of bytes.'))
argument_group.add_argument(
u'--sector_size', u'--sector-size', dest=u'bytes_per_sector',
action=u'store', type=int, default=self._DEFAULT_BYTES_PER_SECTOR,
help=(
u'The number of bytes per sector, which is {0:d} by '
u'default.').format(self._DEFAULT_BYTES_PER_SECTOR))
def AddVSSProcessingOptions(self, argument_group):
"""Adds the VSS processing options to the argument group.
Args:
argument_group (argparse._ArgumentGroup): argparse argument group.
"""
argument_group.add_argument(
u'--no_vss', u'--no-vss', dest=u'no_vss', action=u'store_true',
default=False, help=(
u'Do not scan for Volume Shadow Snapshots (VSS). This means that '
u'Volume Shadow Snapshots (VSS) are not processed.'))
argument_group.add_argument(
u'--vss_only', u'--vss-only', dest=u'vss_only', action=u'store_true',
default=False, help=(
u'Do not process the current volume if Volume Shadow Snapshots '
u'(VSS) have been selected.'))
argument_group.add_argument(
u'--vss_stores', u'--vss-stores', dest=u'vss_stores', action=u'store',
type=str, default=None, help=(
u'Define Volume Shadow Snapshots (VSS) (or stores that need to be '
u'processed. A range of stores can be defined as: "3..5". '
u'Multiple stores can be defined as: "1,3,5" (a list of comma '
u'separated values). Ranges and lists can also be combined as: '
u'"1,3..5". The first store is 1. All stores can be defined as: '
u'"all".'))
def ParseOptions(self, options):
"""Parses tool specific options.
Args:
options (argparse.Namespace): command line arguments.
Raises:
BadConfigOption: if the options are invalid.
"""
super(StorageMediaTool, self).ParseOptions(options)
self._ParseStorageMediaImageOptions(options)
self._ParseVSSProcessingOptions(options)
self._ParseCredentialOptions(options)
self._source_path = self.ParseStringOption(options, self._SOURCE_OPTION)
if not self._source_path:
raise errors.BadConfigOption(u'Missing source path.')
self._source_path = os.path.abspath(self._source_path)
def ScanSource(self):
"""Scans the source path for volume and file systems.
This function sets the internal source path specification and source
type values.
Returns:
dfvfs.SourceScannerContext: source scanner context.
Raises:
SourceScannerError: if the format of or within the source is
not supported.
"""
if (not self._source_path.startswith(u'\\\\.\\') and
not os.path.exists(self._source_path)):
raise errors.SourceScannerError(
u'No such device, file or directory: {0:s}.'.format(
self._source_path))
scan_context = source_scanner.SourceScannerContext()
scan_context.OpenSourcePath(self._source_path)
try:
self._source_scanner.Scan(scan_context)
except (dfvfs_errors.BackEndError, ValueError) as exception:
raise errors.SourceScannerError(
u'Unable to scan source with error: {0:s}.'.format(exception))
if scan_context.source_type not in (
scan_context.SOURCE_TYPE_STORAGE_MEDIA_DEVICE,
scan_context.SOURCE_TYPE_STORAGE_MEDIA_IMAGE):
scan_node = scan_context.GetRootScanNode()
self._source_path_specs.append(scan_node.path_spec)
return scan_context
# Get the first node where where we need to decide what to process.
scan_node = scan_context.GetRootScanNode()
while len(scan_node.sub_nodes) == 1:
scan_node = scan_node.sub_nodes[0]
# The source scanner found a partition table and we need to determine
# which partition needs to be processed.
if scan_node.type_indicator != (
dfvfs_definitions.TYPE_INDICATOR_TSK_PARTITION):
partition_identifiers = None
else:
partition_identifiers = self._GetTSKPartitionIdentifiers(
scan_node, partition_offset=self._partition_offset,
partitions=self._partitions)
if not partition_identifiers:
self._ScanVolume(scan_context, scan_node)
else:
for partition_identifier in partition_identifiers:
location = u'/{0:s}'.format(partition_identifier)
sub_scan_node = scan_node.GetSubNodeByLocation(location)
self._ScanVolume(scan_context, sub_scan_node)
if not self._source_path_specs:
raise errors.SourceScannerError(
u'No supported file system found in source.')
return scan_context
| [
"[email protected]"
] | |
715ba8bc8516cdd86958db4fbcfa74c56889341d | a7d5fad9c31dc2678505e2dcd2166ac6b74b9dcc | /tests/functional/utilities/authorization.py | aa7a619aa2545fac7fecf6f81b16c2cd68a23581 | [
"MIT"
] | permissive | mitsei/dlkit | 39d5fddbb8cc9a33e279036e11a3e7d4fa558f70 | 445f968a175d61c8d92c0f617a3c17dc1dc7c584 | refs/heads/master | 2022-07-27T02:09:24.664616 | 2018-04-18T19:38:17 | 2018-04-18T19:38:17 | 88,057,460 | 2 | 1 | MIT | 2022-07-06T19:24:50 | 2017-04-12T13:53:10 | Python | UTF-8 | Python | false | false | 20,086 | py | import pickle
from copy import deepcopy
from dlkit.primordium.id.primitives import Id
from dlkit.primordium.type.primitives import Type
from dlkit.runtime import PROXY_SESSION, RUNTIME
from dlkit.runtime.proxy_example import User, SimpleRequest
from dlkit.runtime import configs
BOOTSTRAP_VAULT_GENUS = Type(**{
'identifier': 'bootstrap-vault',
'namespace': 'authorization.Vault',
'authority': 'ODL.MIT.EDU'
})
try:
CONFIGURED_AUTHORITY = configs.JSON_1['parameters']['authority']['values'][0]['value']
except KeyError:
CONFIGURED_AUTHORITY = ''
BASE_AUTHORIZATIONS = (
('assessment.Bank', 'lookup', 'assessment.Bank'),
('authorization.Vault', 'lookup', 'authorization.Vault'),
('commenting.Book', 'lookup', 'commenting.Book'),
('hierarchy.Hierarchy', 'lookup', 'hierarchy.Hierarchy'),
('learning.ObjectiveBank', 'lookup', 'learning.ObjectiveBank'),
('repository.Repository', 'lookup', 'repository.Repository'),
('resource.Bin', 'lookup', 'resource.Bin'),
)
SUPER_USER_FUNCTIONS = (
('create', 'authorization.Authorization'),
('delete', 'authorization.Authorization'),
('lookup', 'authorization.Authorization'),
('search', 'authorization.Authorization'),
('create', 'authorization.Vault'),
('delete', 'authorization.Vault'),
('search', 'authorization.Vault'),
)
PROXY_USER_FUNCTIONS = (
('proxy', 'users.Proxy'),
)
INSTRUCTOR_FUNCTIONS = (
('assessment.Answer', 'lookup', 'assessment.Bank'),
('assessment.Answer', 'create', 'assessment.Bank'),
('assessment.Answer', 'delete', 'assessment.Bank'),
('assessment.Answer', 'update', 'assessment.Bank'),
('assessment.Assessment', 'author', 'assessment.Bank'),
('assessment.Assessment', 'lookup', 'assessment.Bank'),
('assessment.Assessment', 'create', 'assessment.Bank'),
('assessment.Assessment', 'delete', 'assessment.Bank'),
('assessment.Assessment', 'search', 'assessment.Bank'),
('assessment.Assessment', 'update', 'assessment.Bank'),
('assessment.Assessment', 'take', 'assessment.Bank'),
('assessment.AssessmentBank', 'assign', 'assessment.Bank'),
('assessment.AssessmentBank', 'lookup', 'assessment.Bank'),
('assessment.AssessmentOffered', 'lookup', 'assessment.Bank'),
('assessment.AssessmentOffered', 'create', 'assessment.Bank'),
('assessment.AssessmentOffered', 'delete', 'assessment.Bank'),
('assessment.AssessmentOffered', 'update', 'assessment.Bank'),
('assessment.AssessmentTaken', 'lookup', 'assessment.Bank'),
('assessment.AssessmentTaken', 'create', 'assessment.Bank'),
('assessment.AssessmentTaken', 'delete', 'assessment.Bank'),
('assessment.AssessmentTaken', 'search', 'assessment.Bank'),
('assessment.AssessmentTaken', 'update', 'assessment.Bank'),
('assessment.Item', 'lookup', 'assessment.Bank'),
('assessment.Item', 'create', 'assessment.Bank'),
('assessment.Item', 'delete', 'assessment.Bank'),
('assessment.Item', 'update', 'assessment.Bank'),
('assessment.Item', 'search', 'assessment.Bank'),
('assessment.Question', 'lookup', 'assessment.Bank'),
('assessment.Question', 'create', 'assessment.Bank'),
('assessment.Question', 'delete', 'assessment.Bank'),
('assessment.Question', 'update', 'assessment.Bank'),
('assessment.Bank', 'access', 'assessment.Bank'),
('assessment.Bank', 'create', 'assessment.Bank'),
('assessment.Bank', 'delete', 'assessment.Bank'),
('assessment.Bank', 'modify', 'assessment.Bank'),
('assessment.Bank', 'search', 'assessment.Bank'),
('assessment.Bank', 'update', 'assessment.Bank'),
('assessment_authoring.AssessmentPart', 'lookup', 'assessment_authoring.Bank'),
('assessment_authoring.AssessmentPart', 'create', 'assessment_authoring.Bank'),
('assessment_authoring.AssessmentPart', 'delete', 'assessment_authoring.Bank'),
('assessment_authoring.AssessmentPart', 'update', 'assessment_authoring.Bank'),
('commenting.Book', 'access', 'commenting.Book'),
('commenting.Book', 'create', 'commenting.Book'),
('commenting.Book', 'delete', 'commenting.Book'),
('commenting.Book', 'modify', 'commenting.Book'),
('commenting.Book', 'update', 'commenting.Book'),
('commenting.Comment', 'author', 'commenting.Book'),
('commenting.Comment', 'lookup', 'commenting.Book'),
('commenting.Comment', 'create', 'commenting.Book'),
('commenting.Comment', 'delete', 'commenting.Book'),
('commenting.Comment', 'update', 'commenting.Book'),
('hierarchy.Hierarchy', 'update', 'hierarchy.Hierarchy'),
('learning.ObjectiveBank', 'create', 'learning.ObjectiveBank'),
('learning.ObjectiveBank', 'delete', 'learning.ObjectiveBank'),
('learning.ObjectiveBank', 'update', 'learning.ObjectiveBank'),
('learning.Objective', 'create', 'learning.ObjectiveBank'),
('learning.Objective', 'delete', 'learning.ObjectiveBank'),
('learning.Objective', 'lookup', 'learning.ObjectiveBank'),
('learning.Objective', 'search', 'learning.ObjectiveBank'),
('learning.Objective', 'update', 'learning.ObjectiveBank'),
('learning.Proficiency', 'create', 'learning.ObjectiveBank'),
('learning.Proficiency', 'delete', 'learning.ObjectiveBank'),
('learning.Proficiency', 'lookup', 'learning.ObjectiveBank'),
('learning.Proficiency', 'search', 'learning.ObjectiveBank'),
('learning.Proficiency', 'update', 'learning.ObjectiveBank'),
('logging.Log', 'lookup', 'logging.Log'),
('logging.Log', 'create', 'logging.Log'),
('logging.Log', 'delete', 'logging.Log'),
('logging.Log', 'update', 'logging.Log'),
('logging.LogEntry', 'alias', 'logging.Log'),
('logging.LogEntry', 'create', 'logging.Log'),
('logging.LogEntry', 'delete', 'logging.Log'),
('logging.LogEntry', 'lookup', 'logging.Log'),
('logging.LogEntry', 'search', 'logging.Log'),
('logging.LogEntry', 'update', 'logging.Log'),
('repository.Repository', 'access', 'repository.Repository'),
('repository.Repository', 'author', 'repository.Repository'),
('repository.Repository', 'create', 'repository.Repository'),
('repository.Repository', 'delete', 'repository.Repository'),
('repository.Repository', 'modify', 'repository.Repository'),
('repository.Repository', 'search', 'repository.Repository'),
('repository.Repository', 'update', 'repository.Repository'),
('repository.Asset', 'author', 'repository.Repository'),
('repository.Asset', 'lookup', 'repository.Repository'),
('repository.Asset', 'create', 'repository.Repository'),
('repository.Asset', 'delete', 'repository.Repository'),
('repository.Asset', 'search', 'repository.Repository'),
('repository.Asset', 'update', 'repository.Repository'),
('repository.AssetComposition', 'access', 'repository.Repository'),
('repository.AssetComposition', 'lookup', 'repository.Repository'),
('repository.AssetComposition', 'compose', 'repository.Repository'),
('repository.AssetRepository', 'assign', 'repository.Repository'),
('repository.AssetRepository', 'lookup', 'repository.Repository'),
('repository.Composition', 'author', 'repository.Repository'),
('repository.Composition', 'lookup', 'repository.Repository'),
('repository.Composition', 'create', 'repository.Repository'),
('repository.Composition', 'delete', 'repository.Repository'),
('repository.Composition', 'search', 'repository.Repository'),
('repository.Composition', 'update', 'repository.Repository'),
('resource.Bin', 'access', 'resource.Bin'),
('resource.Bin', 'author', 'resource.Bin'),
('resource.Bin', 'create', 'resource.Bin'),
('resource.Bin', 'delete', 'resource.Bin'),
('resource.Bin', 'modify', 'resource.Bin'),
('resource.Bin', 'update', 'resource.Bin'),
('resource.Resource', 'author', 'resource.Bin'),
('resource.Resource', 'lookup', 'resource.Bin'),
('resource.Resource', 'create', 'resource.Bin'),
('resource.Resource', 'delete', 'resource.Bin'),
('resource.Resource', 'search', 'resource.Bin'),
('resource.Resource', 'update', 'resource.Bin'),
('resource.ResourceAgent', 'assign', 'resource.Bin'),
('resource.ResourceAgent', 'delete', 'resource.Bin'),
('resource.ResourceAgent', 'lookup', 'resource.Bin'),
('grading.Gradebook', 'lookup', 'grading.Gradebook'),
('grading.Gradebook', 'create', 'grading.Gradebook'),
('grading.Gradebook', 'delete', 'grading.Gradebook'),
('grading.Gradebook', 'update', 'grading.Gradebook'),
('grading.GradeEntry', 'create', 'grading.Gradebook'),
('grading.GradeEntry', 'delete', 'grading.Gradebook'),
('grading.GradeEntry', 'lookup', 'grading.Gradebook'),
('grading.GradeEntry', 'update', 'grading.Gradebook'),
('grading.GradeSystem', 'create', 'grading.Gradebook'),
('grading.GradeSystem', 'delete', 'grading.Gradebook'),
('grading.GradeSystem', 'lookup', 'grading.Gradebook'),
('grading.GradeSystem', 'update', 'grading.Gradebook'),
('grading.GradebookColumn', 'create', 'grading.Gradebook'),
('grading.GradebookColumn', 'delete', 'grading.Gradebook'),
('grading.GradebookColumn', 'lookup', 'grading.Gradebook'),
('grading.GradebookColumn', 'update', 'grading.Gradebook'),
)
STUDENT_FUNCTIONS = (
('assessment.AssessmentTaken', 'create', 'assessment.Bank'),
('assessment.AssessmentTaken', 'lookup', 'assessment.Bank'),
('assessment.Assessment', 'take', 'assessment.Bank'),
('commenting.Comment', 'lookup', 'commenting.Book'),
('repository.Asset', 'create', 'repository.Repository'),
('repository.Asset', 'delete', 'repository.Repository'),
('repository.Asset', 'lookup', 'repository.Repository'),
('repository.Asset', 'search', 'repository.Repository'),
('resource.Resource', 'lookup', 'resource.Bin'),
)
SUBPACKAGES = (
('assessment_authoring', 'assessment'),
)
def activate_managers(request):
"""
Create initial managers and store them in the user session
"""
managers = [('authzm', 'AUTHORIZATION'), ]
for manager in managers:
nickname = manager[0]
service_name = manager[1]
if nickname not in request.session:
condition = PROXY_SESSION.get_proxy_condition()
condition.set_http_request(request)
proxy = PROXY_SESSION.get_proxy(condition)
set_session_data(request, nickname, RUNTIME.get_service_manager(service_name,
proxy=proxy))
return request
def add_user_authz_to_settings(role, username, catalog_id=None, authority='MIT-ODL'):
from .testing import is_string
if is_string(catalog_id):
catalog_id = Id(catalog_id)
agent = create_agent_id(username, authority=authority)
if catalog_id is None:
qualifiers = ('ROOT', 24 * '0')
catalog_id = create_qualifier_id(24 * '0', 'authorization.Vault')
else:
qualifiers = (catalog_id,)
# first, add the base authorizations to the user for the catalog_id and ROOT / '0' * 24
req = get_super_authz_user_request()
vault = get_vault(req)
create_base_authorizations(vault,
agent,
qualifiers=qualifiers)
# then, depending on role, add additional functions
if role == 'instructor':
authorization_iterator(vault,
agent,
qualifiers,
INSTRUCTOR_FUNCTIONS)
elif role == 'student':
authorization_iterator(vault,
agent,
qualifiers,
STUDENT_FUNCTIONS)
def authorization_iterator(vault, agent, qualifiers, authz_list):
def first(namespace):
return str(namespace).split('.')[0]
for qualifier in qualifiers:
for function_tuple in authz_list:
namespace = function_tuple[0]
function_name = function_tuple[1]
function = create_function_id(function_name, namespace)
if not isinstance(qualifier, Id):
qualifier_id = create_qualifier_id(qualifier, function_tuple[2])
else:
qualifier_id = qualifier
# also need to handle subpackages!!
is_subpackage = False
for subpackage in SUBPACKAGES:
sub = subpackage[0]
parent = subpackage[1]
if first(qualifier_id.namespace) == parent and first(function.namespace) == sub:
is_subpackage = True
if (first(qualifier_id.namespace) == first(function.namespace) or
is_subpackage):
create_authz(vault, agent, function, qualifier_id)
def create_agent_id(username, authority='MIT-ODL'):
return Id(identifier=username,
namespace='osid.agent.Agent',
authority=authority)
def create_authz(vault, agent, function, qualifier):
form = vault.get_authorization_form_for_create_for_agent(agent, function, qualifier, [])
vault.create_authorization(form)
def create_authz_superuser():
original_config = open_up_services_config()
req = get_super_authz_user_request()
authzm = get_session_data(req, 'authzm')
vault = create_vault(req)
create_base_authorizations(vault, authzm.effective_agent_id)
create_super_authz_authorizations(vault)
restore_services_config(original_config)
def create_base_authorizations(vault, agent, qualifiers=()):
if len(qualifiers) == 0:
qualifiers = ('ROOT', 24 * '0')
authorization_iterator(vault, agent, qualifiers, BASE_AUTHORIZATIONS)
def create_function_id(function, namespace):
return Id(identifier=function,
namespace=namespace,
authority='ODL.MIT.EDU')
def create_qualifier_id(identifier, namespace, authority=CONFIGURED_AUTHORITY):
if identifier == 'ROOT':
authority = 'ODL.MIT.EDU'
return Id(identifier=identifier,
namespace=namespace,
authority=authority)
def create_super_authz_authorizations(vault):
req = get_super_authz_user_request()
authzm = get_session_data(req, 'authzm')
agent_id = authzm.effective_agent_id
for function_tuple in SUPER_USER_FUNCTIONS:
function = create_function_id(function_tuple[0],
function_tuple[1])
create_authz(vault, agent_id, function, vault.ident)
def create_test_request(test_user):
# from django.http import HttpRequest
# from django.conf import settings
# from django.utils.importlib import import_module
# #http://stackoverflow.com/questions/16865947/django-httprequest-object-has-no-attribute-session
# test_request = HttpRequest()
# engine = import_module(settings.SESSION_ENGINE)
# session_key = None
# test_request.user = test_user
# test_request.session = engine.SessionStore(session_key)
# return test_request
return SimpleRequest(username=test_user.username)
def create_vault(request):
authzm = get_session_data(request, 'authzm')
form = authzm.get_vault_form_for_create([])
form.display_name = "System Vault"
form.description = "Created during bootstrapping"
form.set_genus_type(BOOTSTRAP_VAULT_GENUS)
return authzm.create_vault(form)
def get_authz_user_request(username):
authz_user = User(username=username, authenticated=True)
req = create_test_request(authz_user)
activate_managers(req)
return req
def get_session_data(request, item_type):
# get a manager
try:
if item_type in request.session:
return pickle.loads(request.session[item_type])
else:
return None
except Exception as ex:
print("Exception! {0}".format(ex))
def get_super_authz_user_request():
return get_authz_user_request('dlkit-functional-tester')
def get_vault(request):
authzm = get_session_data(request, 'authzm')
return next(authzm.get_vaults_by_genus_type(BOOTSTRAP_VAULT_GENUS))
def open_up_services_config():
previous_version = deepcopy(configs.SERVICE)
configs.SERVICE = {
'id': 'dlkit.runtime_bootstrap_configuration',
'displayName': 'DLKit Runtime Bootstrap Configuration',
'description': 'Bootstrap Configuration for DLKit Runtime',
'parameters': {
'implKey': {
'syntax': 'STRING',
'displayName': 'Implementation Key',
'description': 'Implementation key used by Runtime for class loading',
'values': [
{'value': 'service', 'priority': 1}
]
},
'assessmentProviderImpl': {
'syntax': 'STRING',
'displayName': 'Assessment Provider Implementation',
'description': 'Implementation for assessment service provider',
'values': [
{'value': 'JSON_1', 'priority': 1}
]
},
'assessment_authoringProviderImpl': {
'syntax': 'STRING',
'displayName': 'Assessment Authoring Provider Implementation',
'description': 'Implementation for assessment authoring service provider',
'values': [
{'value': 'JSON_1', 'priority': 1}
]
},
'authorizationProviderImpl': {
'syntax': 'STRING',
'displayName': 'Authorization Provider Implementation',
'description': 'Implementation for authorization service provider',
'values': [
{'value': 'JSON_1', 'priority': 1}
]
},
'learningProviderImpl': {
'syntax': 'STRING',
'displayName': 'Learning Provider Implementation',
'description': 'Implementation for learning service provider',
'values': [
{'value': 'JSON_1', 'priority': 1}
]
},
'repositoryProviderImpl': {
'syntax': 'STRING',
'displayName': 'Repository Provider Implementation',
'description': 'Implementation for repository service provider',
'values': [
{'value': 'JSON_1', 'priority': 1}
]
},
'commentingProviderImpl': {
'syntax': 'STRING',
'displayName': 'Commenting Provider Implementation',
'description': 'Implementation for commenting service provider',
'values': [
{'value': 'JSON_1', 'priority': 1}
]
},
'resourceProviderImpl': {
'syntax': 'STRING',
'displayName': 'Resource Provider Implementation',
'description': 'Implementation for resource service provider',
'values': [
{'value': 'JSON_1', 'priority': 1}
]
},
'gradingProviderImpl': {
'syntax': 'STRING',
'displayName': 'Grading Provider Implementation',
'description': 'Implementation for grading provider',
'values': [
{'value': 'JSON_1', 'priority': 1}
]
},
'loggingProviderImpl': {
'syntax': 'STRING',
'displayName': 'Logging Provider Implementation',
'description': 'Implementation for logging provider',
'values': [
{'value': 'JSON_1', 'priority': 1}
]
},
}
}
return previous_version
def restore_services_config(original_version):
configs.SERVICE = original_version
def set_session_data(request, item_type, data):
request.session[item_type] = pickle.dumps(data)
# request.session.modified = True
| [
"[email protected]"
] | |
a7adbadd3ec5c7f3767cfb61f2e5937a2539c716 | 390f5efd244d9f9dba429702bf6edea6d920b604 | /simple_linear_regression.py | 1fc64badd3529b835b4fd4a272ae39864d51d18c | [] | no_license | pkdism/Machine-Learning-A-Z | a8a936ed607fe5f805f0e1aa54092f69a159bbce | 750c4e12dea53924323f94bb11bae3660ae89c17 | refs/heads/master | 2020-09-01T14:28:09.938111 | 2020-04-01T11:19:53 | 2020-04-01T11:19:53 | 218,979,889 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,412 | py | # Data Preprocessing
# Importing the libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# Import the dataset
dataset = pd.read_csv('Salary_Data.csv')
X = dataset.iloc[:, :-1].values # matrix of features
y = dataset.iloc[:, 1].values # dependent variable
# Splitting the dataset into the Training and the Test set
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 1/3, random_state = 0)
# Feature scaling
"""from sklearn.preprocessing import StandardScaler
sc_X = StandardScaler()
X_train = sc_X.fit_transform(X_train)
X_test = sc_X.transform(X_test)"""
# Fitting Simple Linear Regression to the Training set
from sklearn.linear_model import LinearRegression
regressor = LinearRegression()
regressor.fit(X_train, y_train)
# Predicting the Test set results
y_pred = regressor.predict(X_test)
# Visualizing the Training set results
plt.scatter(X_train, y_train, color = 'red')
plt.plot(X_train, regressor.predict(X_train), color = 'blue')
plt.title('Salary vs Experience (Training set)')
plt.xlabel('Years of Experience')
plt.ylabel('Salary')
plt.show()
# Visualizing the Test set results
plt.scatter(X_test, y_test, color = 'red')
plt.plot(X_train, regressor.predict(X_train), color = 'blue')
plt.title('Salary vs Experience (Test set)')
plt.xlabel('Years of Experience')
plt.ylabel('Salary')
plt.show() | [
"[email protected]"
] | |
7b5cc67b6668a3f476faa1fb8f3b6964c3dd2f05 | 32eeb97dff5b1bf18cf5be2926b70bb322e5c1bd | /benchmark/websms/testcase/firstcases/testcase7_024.py | 7dd0b4d9b7d57caf3af1ef44d6cac15cba5129d7 | [] | no_license | Prefest2018/Prefest | c374d0441d714fb90fca40226fe2875b41cf37fc | ac236987512889e822ea6686c5d2e5b66b295648 | refs/heads/master | 2021-12-09T19:36:24.554864 | 2021-12-06T12:46:14 | 2021-12-06T12:46:14 | 173,225,161 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,301 | py | #coding=utf-8
import os
import subprocess
import time
import traceback
from appium import webdriver
from appium.webdriver.common.touch_action import TouchAction
from selenium.common.exceptions import NoSuchElementException, WebDriverException
desired_caps = {
'platformName' : 'Android',
'deviceName' : 'Android Emulator',
'platformVersion' : '4.4',
'appPackage' : 'de.ub0r.android.websms',
'appActivity' : 'de.ub0r.android.websms.WebSMS',
'resetKeyboard' : True,
'androidCoverage' : 'de.ub0r.android.websms/de.ub0r.android.websms.JacocoInstrumentation',
'noReset' : True
}
def command(cmd, timeout=5):
p = subprocess.Popen(cmd, stderr=subprocess.STDOUT, stdout=subprocess.PIPE, shell=True)
time.sleep(timeout)
p.terminate()
return
def getElememt(driver, str) :
for i in range(0, 5, 1):
try:
element = driver.find_element_by_android_uiautomator(str)
except NoSuchElementException:
time.sleep(1)
else:
return element
os.popen("adb shell input tap 50 50")
element = driver.find_element_by_android_uiautomator(str)
return element
def getElememtBack(driver, str1, str2) :
for i in range(0, 2, 1):
try:
element = driver.find_element_by_android_uiautomator(str1)
except NoSuchElementException:
time.sleep(1)
else:
return element
for i in range(0, 5, 1):
try:
element = driver.find_element_by_android_uiautomator(str2)
except NoSuchElementException:
time.sleep(1)
else:
return element
os.popen("adb shell input tap 50 50")
element = driver.find_element_by_android_uiautomator(str2)
return element
def swipe(driver, startxper, startyper, endxper, endyper) :
size = driver.get_window_size()
width = size["width"]
height = size["height"]
try:
driver.swipe(start_x=int(width * startxper), start_y=int(height * startyper), end_x=int(width * endxper),
end_y=int(height * endyper), duration=1000)
except WebDriverException:
time.sleep(1)
driver.swipe(start_x=int(width * startxper), start_y=int(height * startyper), end_x=int(width * endxper),
end_y=int(height * endyper), duration=1000)
return
def scrollToFindElement(driver, str) :
for i in range(0, 5, 1):
try:
element = driver.find_element_by_android_uiautomator(str)
elements = driver.find_elements_by_android_uiautomator(str)
if (len(elements) > 1) :
for temp in elements :
if temp.get_attribute("enabled") == "true" :
element = temp
break
except NoSuchElementException:
swipe(driver, 0.5, 0.55, 0.5, 0.2)
else :
return element
for i in range(0, 4, 1):
try:
element = driver.find_element_by_android_uiautomator(str)
elements = driver.find_elements_by_android_uiautomator(str)
if (len(elements) > 1):
for temp in elements:
if temp.get_attribute("enabled") == "true":
element = temp
break
except NoSuchElementException:
swipe(driver, 0.5, 0.2, 0.5, 0.55)
else :
return element
return
def scrollToClickElement(driver, str) :
element = scrollToFindElement(driver, str)
if element is None :
return
else :
element.click()
def clickInList(driver, str) :
element = None
if (str is None) :
candidates = driver.find_elements_by_class_name("android.widget.CheckedTextView")
if len(candidates) >= 1 and checkWindow(driver):
element = candidates[len(candidates)-1]
else :
element = scrollToFindElement(driver, str)
if element is not None :
element.click()
else :
if checkWindow(driver) :
driver.press_keycode(4)
def clickOnCheckable(driver, str, value = "true") :
parents = driver.find_elements_by_class_name("android.widget.LinearLayout")
for parent in parents:
try :
parent.find_element_by_android_uiautomator(str)
lists = parent.find_elements_by_class_name("android.widget.LinearLayout")
if len(lists) == 1 :
innere = parent.find_element_by_android_uiautomator("new UiSelector().checkable(true)")
nowvalue = innere.get_attribute("checked")
if (nowvalue != value) :
innere.click()
break
except NoSuchElementException:
continue
def typeText(driver, value) :
element = getElememt(driver, "new UiSelector().className(\"android.widget.EditText\")")
element.clear()
element.send_keys(value)
enterelement = getElememt(driver, "new UiSelector().text(\"OK\")")
if (enterelement is None) :
if checkWindow(driver):
driver.press_keycode(4)
else :
enterelement.click()
def checkWindow(driver) :
dsize = driver.get_window_size()
nsize = driver.find_element_by_class_name("android.widget.FrameLayout").size
if dsize['height'] > nsize['height']:
return True
else :
return False
def testingSeekBar(driver, str, value):
try :
if(not checkWindow(driver)) :
element = seekForNearestSeekBar(driver, str)
else :
element = driver.find_element_by_class_name("android.widget.SeekBar")
if (None != element):
settingSeekBar(driver, element, value)
driver.find_element_by_android_uiautomator("new UiSelector().text(\"OK\")").click()
except NoSuchElementException:
time.sleep(1)
def seekForNearestSeekBar(driver, str):
parents = driver.find_elements_by_class_name("android.widget.LinearLayout")
for parent in parents:
try :
parent.find_element_by_android_uiautomator(str)
lists = parent.find_elements_by_class_name("android.widget.LinearLayout")
if len(lists) == 1 :
innere = parent.find_element_by_class_name("android.widget.SeekBar")
return innere
break
except NoSuchElementException:
continue
def settingSeekBar(driver, element, value) :
x = element.rect.get("x")
y = element.rect.get("y")
width = element.rect.get("width")
height = element.rect.get("height")
TouchAction(driver).press(None, x + 10, y + height/2).move_to(None, x + width * value,y + height/2).release().perform()
y = value
def clickInMultiList(driver, str) :
element = None
if (str is None) :
candidates = driver.find_elements_by_class_name("android.widget.CheckedTextView")
if len(candidates) >= 1 and checkWindow(driver):
element = candidates[len(candidates)-1]
else :
element = scrollToFindElement(driver, str)
if element is not None :
nowvalue = element.get_attribute("checked")
if (nowvalue != "true") :
element.click()
if checkWindow(driver) :
driver.find_element_by_android_uiautomator("new UiSelector().text(\"OK\")").click()
# testcase7_024
try :
starttime = time.time()
driver = webdriver.Remote('http://localhost:4723/wd/hub', desired_caps)
element = getElememt(driver, "new UiSelector().resourceId(\"de.ub0r.android.websms:id/text\").className(\"android.widget.EditText\")")
element.clear()
element.send_keys("Text");
element = getElememt(driver, "new UiSelector().resourceId(\"de.ub0r.android.websms:id/select\").className(\"android.widget.ImageButton\")")
TouchAction(driver).tap(element).perform()
except Exception, e:
print 'FAIL'
print 'str(e):\t\t', str(e)
print 'repr(e):\t', repr(e)
print traceback.format_exc()
else:
print 'OK'
finally:
cpackage = driver.current_package
endtime = time.time()
print 'consumed time:', str(endtime - starttime), 's'
command("adb shell am broadcast -a com.example.pkg.END_EMMA --es name \"7_024\"")
jacocotime = time.time()
print 'jacoco time:', str(jacocotime - endtime), 's'
driver.quit()
if (cpackage != 'de.ub0r.android.websms'):
cpackage = "adb shell am force-stop " + cpackage
os.popen(cpackage) | [
"[email protected]"
] | |
c8ba62216d55b8af369b3c3e792bbf4792c047b3 | a32ebed23c068ffcf88feccc795205fca9b67b89 | /python_curso-em-video_guanabara/Mundo 1/a10_x033.py | 0c4f87591963ae71bd6312074bbd375691c98f1d | [] | no_license | AlefAlencar/python-estudos | c942bc20696442c62782fe7e476cd837e612632e | 7e3807e6dbdec8037d688a986933eb8fd893c072 | refs/heads/master | 2023-08-27T23:38:30.397907 | 2021-11-03T02:18:51 | 2021-11-03T02:18:51 | 412,178,055 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 293 | py | # LEIA 3 números
# RETORNE qual é o maior e qual é o menor
import math
n1 = int(input('Digite um número: '))
n2 = int(input('Digite outro: '))
n3 = int(input('Digite só mais um outro: '))
n = [n1, n2, n3]
n.sort()
print('O menor número é o {}, e o maior é o {}'.format(n[0], n[-1]))
| [
"[email protected]"
] | |
b0a3b3b326b43c4ec7aa7be3ba5ecd6387a7746f | 5ffe544f2521eec78763a7e46e4a343ea37017df | /base/urls.py | 9ce018ec8ea3c3a28f6e1855c0bb434d27c6ef26 | [] | no_license | shobhit1215/Todo-List | a12b534dd83b11f842e7d30ecb7518380158e387 | 9d4abae45a3d8b64ccb7f4d62cf19eef95aab4b1 | refs/heads/main | 2023-05-20T06:02:20.199679 | 2021-06-12T08:26:10 | 2021-06-12T08:26:10 | 370,610,303 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 704 | py | from django.urls import path
from . import views
from django.contrib.auth.views import LogoutView
urlpatterns = [
# urls for basic CRUD functionalities
path('login/',views.CustomLoginView.as_view(),name='login'),
path('logout/',LogoutView.as_view(next_page='task'),name='logout'),
path('register/',views.RegisterPage.as_view(),name='register'),
path('',views.TaskList.as_view(),name='task'),
path('task/<int:id>',views.taskdetail,name='detail'),
path('create-task/',views.TaskCreate.as_view(),name='task-create'),
path('update-task/<int:pk>',views.TaskUpdate.as_view(),name='update-task'),
path('delete-task/<int:pk>',views.TaskDelete.as_view(),name='delete-task'),
] | [
"[email protected]"
] | |
daa3132c4e9943f96e7f3a82def5e1ddf2f19fce | 0a2fb03e288ab52c9f5c4a7a93151866543de259 | /examples/wav2vec/wav2vec_featurize.py | 445a5d0213c14e676889308b74c64a2f80070c3a | [
"MIT"
] | permissive | utanaka2000/fairseq | 938b93d94a51d059ce55ec2bdc93cfad70249025 | 5e82514d687289a73a6dec33b555217acd97cb0d | refs/heads/master | 2023-03-21T13:08:42.640563 | 2020-10-03T04:23:15 | 2020-10-03T04:25:31 | 299,215,321 | 33 | 20 | MIT | 2020-09-28T06:50:19 | 2020-09-28T06:50:18 | null | UTF-8 | Python | false | false | 7,110 | py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Helper script to pre-compute embeddings for a wav2letter++ dataset
"""
import argparse
import glob
import os
from shutil import copy
import h5py
import soundfile as sf
import numpy as np
import torch
from torch import nn
import tqdm
from fairseq.models.wav2vec.wav2vec import Wav2VecModel
def read_audio(fname):
""" Load an audio file and return PCM along with the sample rate """
wav, sr = sf.read(fname)
assert sr == 16e3
return wav, 16e3
class PretrainedWav2VecModel(nn.Module):
def __init__(self, fname):
super().__init__()
checkpoint = torch.load(fname)
self.args = checkpoint["args"]
model = Wav2VecModel.build_model(self.args, None)
model.load_state_dict(checkpoint["model"])
model.eval()
self.model = model
def forward(self, x):
with torch.no_grad():
z = self.model.feature_extractor(x)
if isinstance(z, tuple):
z = z[0]
c = self.model.feature_aggregator(z)
return z, c
class EmbeddingWriterConfig(argparse.ArgumentParser):
def __init__(self):
super().__init__("Pre-compute embeddings for wav2letter++ datasets")
kwargs = {"action": "store", "type": str, "required": True}
self.add_argument("--input", "-i",
help="Input Directory", **kwargs)
self.add_argument("--output", "-o",
help="Output Directory", **kwargs)
self.add_argument("--model",
help="Path to model checkpoint", **kwargs)
self.add_argument("--split",
help="Dataset Splits", nargs='+', **kwargs)
self.add_argument("--ext", default="wav", required=False,
help="Audio file extension")
self.add_argument("--no-copy-labels", action="store_true",
help="Do not copy label files. Useful for large datasets, use --targetdir in wav2letter then.")
self.add_argument("--use-feat", action="store_true",
help="Use the feature vector ('z') instead of context vector ('c') for features")
self.add_argument("--gpu",
help="GPU to use", default=0, type=int)
class Prediction():
""" Lightweight wrapper around a fairspeech embedding model """
def __init__(self, fname, gpu=0):
self.gpu = gpu
self.model = PretrainedWav2VecModel(fname).cuda(gpu)
def __call__(self, x):
x = torch.from_numpy(x).float().cuda(self.gpu)
with torch.no_grad():
z, c = self.model(x.unsqueeze(0))
return z.squeeze(0).cpu().numpy(), c.squeeze(0).cpu().numpy()
class H5Writer():
""" Write features as hdf5 file in wav2letter++ compatible format """
def __init__(self, fname):
self.fname = fname
os.makedirs(os.path.dirname(self.fname), exist_ok=True)
def write(self, data):
channel, T = data.shape
with h5py.File(self.fname, "w") as out_ds:
data = data.T.flatten()
out_ds["features"] = data
out_ds["info"] = np.array([16e3 // 160, T, channel])
class EmbeddingDatasetWriter(object):
""" Given a model and a wav2letter++ dataset, pre-compute and store embeddings
Args:
input_root, str :
Path to the wav2letter++ dataset
output_root, str :
Desired output directory. Will be created if non-existent
split, str :
Dataset split
"""
def __init__(self, input_root, output_root, split,
model_fname,
extension="wav",
gpu=0,
verbose=False,
use_feat=False,
):
assert os.path.exists(model_fname)
self.model_fname = model_fname
self.model = Prediction(self.model_fname, gpu)
self.input_root = input_root
self.output_root = output_root
self.split = split
self.verbose = verbose
self.extension = extension
self.use_feat = use_feat
assert os.path.exists(self.input_path), \
"Input path '{}' does not exist".format(self.input_path)
def _progress(self, iterable, **kwargs):
if self.verbose:
return tqdm.tqdm(iterable, **kwargs)
return iterable
def require_output_path(self, fname=None):
path = self.get_output_path(fname)
os.makedirs(path, exist_ok=True)
@property
def input_path(self):
return self.get_input_path()
@property
def output_path(self):
return self.get_output_path()
def get_input_path(self, fname=None):
if fname is None:
return os.path.join(self.input_root, self.split)
return os.path.join(self.get_input_path(), fname)
def get_output_path(self, fname=None):
if fname is None:
return os.path.join(self.output_root, self.split)
return os.path.join(self.get_output_path(), fname)
def copy_labels(self):
self.require_output_path()
labels = list(filter(lambda x: self.extension not in x, glob.glob(self.get_input_path("*"))))
for fname in tqdm.tqdm(labels):
copy(fname, self.output_path)
@property
def input_fnames(self):
return sorted(glob.glob(self.get_input_path("*.{}".format(self.extension))))
def __len__(self):
return len(self.input_fnames)
def write_features(self):
paths = self.input_fnames
fnames_context = map(lambda x: os.path.join(self.output_path, x.replace("." + self.extension, ".h5context")), \
map(os.path.basename, paths))
for name, target_fname in self._progress(zip(paths, fnames_context), total=len(self)):
wav, sr = read_audio(name)
z, c = self.model(wav)
feat = z if self.use_feat else c
writer = H5Writer(target_fname)
writer.write(feat)
def __repr__(self):
return "EmbeddingDatasetWriter ({n_files} files)\n\tinput:\t{input_root}\n\toutput:\t{output_root}\n\tsplit:\t{split})".format(
n_files=len(self), **self.__dict__)
if __name__ == "__main__":
args = EmbeddingWriterConfig().parse_args()
for split in args.split:
writer = EmbeddingDatasetWriter(
input_root=args.input,
output_root=args.output,
split=split,
model_fname=args.model,
gpu=args.gpu,
extension=args.ext,
use_feat=args.use_feat,
)
print(writer)
writer.require_output_path()
print("Writing Features...")
writer.write_features()
print("Done.")
if not args.no_copy_labels:
print("Copying label data...")
writer.copy_labels()
print("Done.")
| [
"[email protected]"
] | |
4c8749b2d80f01ba74b7c6db161be159e1559f96 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2969/60797/319692.py | 2a883483b572dc58a26b0baa0d5faca6cc0fe850 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 173 | py | # tag
if __name__ == '__main__':
s = input()
if s=='ababa':
print('2 4 5')
elif s=='XXQQQQTTTT':
print('1 2 10')
else:
print(s)
| [
"[email protected]"
] | |
50ac5625581762c31d894f94c285e8771cc518e4 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_155/534.py | fd9f27d19f9b6a1fc1d4744bb6ffa2d71c37595c | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,154 | py | #!/usr/bin/python3
import getopt
import sys
if __name__ == "__main__":
verbose = False
fname = "input.txt"
if sys.version_info[0] < 3:
print("This script requires Python 3. (You are running %d.%d)" % (
sys.version_info[0], sys.version_info[1]))
sys.exit()
try:
opts, args = getopt.getopt(sys.argv[1:], "hvf:",
["verbose","help","input="])
except getopt.GetoptError as err:
print (str(err))
sys.exit(2)
for o, a in opts:
if o in ("-h", "--help"): sys.exit()
elif o in ("-v", "--verbose"): verbose = True
elif o in ("-f", "--input"): fname = a
else: sys.exit()
f = open(fname, "rt")
ncases = int(f.readline())
for c in range(ncases):
i1,i2 = f.readline().split()
S = int(i1)
A = [int(x) for x in list(i2)]
friends, count = 0, 0
for l in range(S):
count += A[l]
if not(count + friends > l):
friends += (l+1)- (count + friends)
print("Case #%d: %d" % (c+1, friends))
| [
"[email protected]"
] | |
0decaa5a7e8de4ca806bec782fbfaf910bda6d33 | 0da100539db20cbac6af3da753b1e9a0540c6b63 | /uptime.py | 47c5fc36f4adaf7edf0addcd6a82371bb7645592 | [] | no_license | nlo-portfolio/uptime | f46f178232a23e2ee03cb05659987db74f4940f8 | 4cc2a38be7649c7e9e696239f0c9b9166935946c | refs/heads/master | 2023-06-29T03:42:16.465186 | 2021-02-01T16:00:00 | 2021-02-01T20:00:00 | 379,499,649 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,867 | py | #!/usr/bin/env python
import curses
import logging
import os
import queue
import requests
import socket
import sys
import threading
import time
import yaml
from collections import deque
from queue import Queue
from classes import Site
from modules import request_worker
def parse_config(filename):
'''
Opens and loads the yaml configuration file for reading and returns the configuration as a dictionary.
Paramaters:
filename (str): filename for the configuration file.
Returns:
dict: contains the keys and values for the configuration.
'''
with open(filename, 'r') as stream:
try:
return yaml.safe_load(stream)
except yaml.YAMLError as e:
print(e)
def print_and_log_sites(config, logger, stdscr, temp_deque):
"""
Output site status to string and log to failures to file.
Parameters:
config (dict): configuration to be used.
logger (logger): logging object to be used.
stdscr (curses): curses screen object to be used.
temp_deque (deque): deque of sites to display.
"""
try:
stdscr.erase()
stdscr.addstr(" Site - Status - Uptime Average\n")
stdscr.addstr("--------------------------------------------------------------\n")
for site in temp_deque:
# Form first part of site output string.
blank_space = (32 - len(site.url)) * ' '
site_title = '{}{} - '.format(site.url[:29] + (site.url[29:] and '...'), blank_space)
stdscr.addstr(site_title)
# Form second part of site output string.
if site.status:
stdscr.addstr(' UP - Uptime: ')
else:
stdscr.addstr('DOWN', curses.A_BLINK)
stdscr.addstr(' - Uptime: ')
# Form third part of site output string.
if site.uptime_avg > config['env']['uptime_threshhold']:
stdscr.addstr("{:.2f}%\n".format(round(site.uptime_avg * 100, 2)))
else:
stdscr.addstr("{:.2f}%\n".format(round(site.uptime_avg * 100, 2)), curses.A_BLINK)
stdscr.addstr("------------------------------------------------------------\n")
stdscr.addstr("Last updated: {}\n".format(
time.strftime("%a, %d %b %Y %H:%M:%S", time.localtime())))
stdscr.addstr('Press <CTRL> + C to exit.')
stdscr.refresh()
except curses.error as e:
stdscr.clear()
stdscr.addstr('Enlarge window to display data...')
stdscr.refresh()
def main():
"""
Main driver for the program: sets up the config, logger, site objects, and worker threads.
Also starts the main refresh loop which runs until the program exits, which continuously
passes site objects to the worker threads, waits for their return and outputs their status.
Parameters:
None
"""
logging.basicConfig(filename='log/uptime_-_{}.log'.format(time.strftime("%M-%d-%Y:%H:%M:%S", time.localtime())),
filemode='w+',
level=logging.WARNING)
logger = logging.getLogger()
handler = logging.StreamHandler(sys.stdout)
config = parse_config('config.yml')
thread_list = []
queue_in = Queue(maxsize=len(config['sites']))
queue_out = Queue(maxsize=len(config['sites']))
stdscr = curses.initscr()
# Append sites to the queue_in.
Site.Site.set_alpha_sort(config['env']['alphabetize'])
for id, site_url in enumerate(config['sites']):
queue_in.put(Site.Site(id, site_url))
# Start worker threads.
for i in range(config['env']['num_threads']):
thread = threading.Thread(target=request_worker.run, args=(config, queue_in, queue_out), daemon=True)
thread_list.append(thread)
thread.start()
stdscr.erase()
stdscr.addstr('Waiting for initial responses...')
stdscr.refresh()
# Start main refresh loop.
try:
while True:
# Wait for queue_in to be empty and queue_out to be full.
while True:
if queue_in.empty() and queue_out.full():
break
else:
time.sleep(0.05)
print_and_log_sites(config, logger, stdscr, sorted(deque(queue_out.queue)))
time.sleep(int(config['env']['refresh_normal']))
# Re-add sites to queue_in for processing by the workers.
while not queue_out.empty():
queue_in.put(queue_out.get())
except KeyboardInterrupt:
stdscr.clear()
stdscr.addstr("\nExiting...\n")
stdscr.refresh()
except Exception as e:
logger.error('Exception encountered: {}'.format(e))
raise e
if __name__ == '__main__':
main()
| [
"anonymous"
] | anonymous |
a5c960db7926692eb5a1ba8cf3eac7a66286c4dd | e14f85856a8b2e65199b441b7fb71bf862237cc5 | /scripts/tectonic_cache.py | 57bceb1a29033dc711b7dfa931dab74c6b0e08d2 | [
"BSD-3-Clause"
] | permissive | DLove1204/jupyterlab-lsp | 50a274b9e368c909375fe442e40e550e2f93f0de | d7ac678975f65b920f54b3034c9bbddd978d98bd | refs/heads/master | 2022-12-02T13:25:14.984264 | 2020-08-11T19:39:06 | 2020-08-11T19:39:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 723 | py | import subprocess
from pathlib import Path
from tempfile import TemporaryDirectory
HERE = Path(__file__).parent
EXAMPLE = HERE.parent / "atest/examples/example.tex"
def tectonic_cache():
""" warm up the tectonic cache so that it doesn't fail the acceptance test
"""
with TemporaryDirectory() as td:
tdp = Path(td)
tex = tdp / "example.tex"
tex.write_text(
"\n".join(
[
line
for line in EXAMPLE.read_text().splitlines()
if "\\foo" not in line
]
)
)
subprocess.check_call(["tectonic", str(tex)], cwd=td)
if __name__ == "__main__":
tectonic_cache()
| [
"[email protected]"
] | |
599ca3f5da22b0f37690706eb61e7de3aab99de1 | ade0043b6c686a65d8ee4cb412102755cd8464a2 | /scripts/fuzzing/merge_corpus.py | 7dac811287e03e82999afbb113baf7cbce500d4c | [
"BSD-3-Clause"
] | permissive | Xoooo/fuchsia | b806c2c355d367e9f6f740c80b446b10d3d5c42c | 58bb10136f98cc30490b8b0a1958e3736656ed8a | refs/heads/master | 2020-07-07T06:06:26.206594 | 2019-08-13T05:55:19 | 2019-08-13T05:55:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,269 | py | #!/usr/bin/env python2.7
# Copyright 2019 The Fuchsia Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import argparse
import os
import sys
from lib.args import Args
from lib.cipd import Cipd
from lib.device import Device
from lib.fuzzer import Fuzzer
from lib.host import Host
def main():
parser = Args.make_parser(
'Minimizes the current corpus for the named fuzzer. This should be ' +
'used after running the fuzzer for a while, or after incorporating a '
+ 'third-party corpus using \'fetch-corpus\'')
args, fuzzer_args = parser.parse_known_args()
host = Host.from_build()
device = Device.from_args(host, args)
fuzzer = Fuzzer.from_args(device, args)
with Cipd.from_args(fuzzer, args) as cipd:
if cipd.install():
device.store(
os.path.join(cipd.root, '*'), fuzzer.data_path('corpus'))
if fuzzer.merge(fuzzer_args) == (0, 0):
print('Corpus for ' + str(fuzzer) + ' is empty.')
return 1
device.fetch(fuzzer.data_path('corpus/*'), cipd.root)
if not cipd.create():
return 1
return 0
if __name__ == '__main__':
sys.exit(main())
| [
"[email protected]"
] | |
61197560944d89b4d35b1796a4d1e2220479dec1 | 2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02 | /PyTorch/built-in/nlp/Bert-CRF_for_PyTorch/examples/basic/basic_language_model_roformer.py | 51de31d6692c08e090cfe84e93550492e3736efb | [
"GPL-1.0-or-later",
"MIT",
"Apache-2.0",
"BSD-2-Clause",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | Ascend/ModelZoo-PyTorch | 4c89414b9e2582cef9926d4670108a090c839d2d | 92acc188d3a0f634de58463b6676e70df83ef808 | refs/heads/master | 2023-07-19T12:40:00.512853 | 2023-07-17T02:48:18 | 2023-07-17T02:48:18 | 483,502,469 | 23 | 6 | Apache-2.0 | 2022-10-15T09:29:12 | 2022-04-20T04:11:18 | Python | UTF-8 | Python | false | false | 3,351 | py | # -*- coding: utf-8 -*-
# BSD 3-Clause License
#
# Copyright (c) 2017
# All rights reserved.
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ==========================================================================
#! -*- coding: utf-8 -*-
# 基础测试:mlm测试roformer、roformer_v2模型
from bert4torch.models import build_transformer_model
from bert4torch.tokenizers import Tokenizer
import torch
choice = 'roformer_v2' # roformer roformer_v2
if choice == 'roformer':
args_model_path = "F:/Projects/pretrain_ckpt/roformer/[sushen_torch_base]--roformer_v1_base/"
args_model = 'roformer'
else:
args_model_path = "F:/Projects/pretrain_ckpt/roformer/[sushen_torch_base]--roformer_v2_char_base/"
args_model = 'roformer_v2'
# 加载模型,请更换成自己的路径
root_model_path = args_model_path
vocab_path = root_model_path + "/vocab.txt"
config_path = root_model_path + "/config.json"
checkpoint_path = root_model_path + '/pytorch_model.bin'
# 建立分词器
tokenizer = Tokenizer(vocab_path, do_lower_case=True)
model = build_transformer_model(config_path, checkpoint_path, model=args_model, with_mlm='softmax') # 建立模型,加载权重
token_ids, segments_ids = tokenizer.encode("今天M很好,我M去公园玩。")
token_ids[3] = token_ids[8] = tokenizer._token_mask_id
print(''.join(tokenizer.ids_to_tokens(token_ids)))
tokens_ids_tensor = torch.tensor([token_ids])
segment_ids_tensor = torch.tensor([segments_ids])
# 需要传入参数with_mlm
model.eval()
with torch.no_grad():
_, logits = model([tokens_ids_tensor, segment_ids_tensor])
pred_str = 'Predict: '
for i, logit in enumerate(logits[0]):
if token_ids[i] == tokenizer._token_mask_id:
pred_str += tokenizer.id_to_token(torch.argmax(logit, dim=-1).item())
else:
pred_str += tokenizer.id_to_token(token_ids[i])
print(pred_str)
| [
"[email protected]"
] | |
d0e3573e788ceea3f15dd4092633258a48664f50 | da29f1f5b4459fbfec968bb694bedb9586f87b14 | /new_algs/Numerical+algorithms/Metropolis-Hastings+algorithm/decode.py | 6d665dbad8e50ed9fa8c19f4970ee2a733f9e186 | [] | no_license | coolsnake/JupyterNotebook | 547806a45a663f090f313dc3e70f779ad9b213c0 | 20d8df6172906337f81583dabb841d66b8f31857 | refs/heads/master | 2023-01-13T18:55:38.615312 | 2020-11-17T22:55:12 | 2020-11-17T22:55:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,756 | py | import numpy as np
import csv
with open('test/jperczel/alphabet.csv', 'rb') as csvfile:
alphabet_reader = csv.reader(csvfile, delimiter=',', quotechar='|')
alphabet_string = ""
for row in alphabet_reader:
for character in row:
alphabet_string = alphabet_string + character[0]
#size of alphabet
alphabet_size = len(alphabet_string)
M_transition_matrix = np.genfromtxt('test/jperczel/letter_transition_matrix.csv', delimiter=',')
letter_probabilities = np.genfromtxt('test/jperczel/letter_probabilities.csv', delimiter=',')
########## convert string arrays to number arrays ############
#create distionary mapping alphabet to numbers:
char_map = {}
for num in range(0,len(alphabet_string)):
char_map[alphabet_string[num]]=num #maps members of alphabet to numbers
#create dictionary mappin numbers to alphabet
num_map = {}
for num in range(0,len(alphabet_string)):
num_map[num]=alphabet_string[num] #maps members of alphabet to numbers
#function to transform characters to numbers (a,b,c,...)=(0,1,2,...,27) & transform string into array!!!!
def char_to_num(text_in):
output_num = np.zeros(shape=(1,len(text_in)))
i=0
for lines in text_in:
for char in lines:
if char == "\n":
print ('issue!')
i = i + 1
continue
output_num[0,i] = char_map[char]
i = i + 1
return output_num
#function to transform numbers to alphabet (0,1,2,...,27) = (a,b,c,...) & transform array into string!!!!
def num_to_char(nums_in):
output_str = ''
for num in nums_in[0,:]:
char = num_map[num]
output_str = output_str + char
return output_str
alphabet_num = char_to_num(alphabet_string) #numerical alphabet (0,1,2,...,27)
#define dictionary to map between number_plain to number_cipher
def create_cipher_dict(cipher_function_input):
cipher_dict = {}
decipher_dict = {}
for char in range(0,np.size(alphabet_num[0,:])):
cipher_dict[alphabet_num[0,char]]=cipher_function_input[0,char] #maps members of alphabet to cipher
decipher_dict[cipher_function_input[0,char]]=alphabet_num[0,char]#reverse maps members of cipher to alphabet
return (cipher_dict,decipher_dict)
#function to use permutation of numbers (e.g. 0,27,13,26,5,...) to transform text and back
def permutation_mapping(permutation_array,text_num_input,cipher_or_decipher = 'cipher'):
(cipher_dict,decipher_dict)=create_cipher_dict(permutation_array)
if cipher_or_decipher == 'cipher': #cipher
dict_to_use = cipher_dict
elif cipher_or_decipher == 'decipher': #decipher
dict_to_use = decipher_dict
else:
raise Exception('Wrong mapping option!')
translated_string = np.zeros(shape=np.shape(text_num_input))
for index in range(0,np.size(text_num_input[0,:])):
translated_num = dict_to_use[text_num_input[0,index]]
translated_string[0,index] = translated_num
return translated_string
def generate_next_decipher_key(f_current):
f_updated = f_current[[0],:]
#generate two random numbers in interval range(0,27) = 0,1,2,...,27
entries_to_interchange = np.random.choice(alphabet_size, 2, replace=False) #NB: since the sampling is uniform, picking [(a,b) OR (b,a)] has twice the chance!
#find entries in f_current
first_entry = f_current[0,entries_to_interchange[0]]
second_entry = f_current[0,entries_to_interchange[1]]
#interchange entries:
f_updated[0,entries_to_interchange[0]] = second_entry
f_updated[0,entries_to_interchange[1]] = first_entry
return f_updated
########### calculate likelihood function of observed ciphered text (text_num_cipher) given a specific f_current
########### that deciphers text
#function to calculate likelihood:
def log_likelihood_of_f(y_given,f_current):
####use current f to decipher ciphered text:
deciphered_text = permutation_mapping(f_current,y_given,'decipher')
####calculate relevant probabilities in Markov chain:
p_X_0 = letter_probabilities[int(deciphered_text[0,0])] #probability of first character
log_p_y_F = np.log(p_X_0) #initialize likelihood probability
for index in range(np.size(deciphered_text[0,:])-1):
M_j = int(deciphered_text[0,index]) #the row index of matrix M_{i,j}
M_i = int(deciphered_text[0,index+1])
if M_transition_matrix[M_i,M_j] == 0:
return np.nan
log_M_i_j = np.log(M_transition_matrix[M_i,M_j])
log_p_y_F = log_p_y_F + log_M_i_j
return log_p_y_F
#Metropolis-Hastings algorithm:
def metropolis_hastings(ciphered_text_input):
#decide input text:
input_size = np.size(ciphered_text_input[0,:])
####(0) First start normal alphabet:
f_state = alphabet_num
####(1) Find an initial state with non-zero likelihood
log_like = log_likelihood_of_f(ciphered_text_input,f_state) #calc. initial likelihood from alphabet
while np.isnan(log_like): #check if likelihood is non-zero
f_state = np.random.permutation(alphabet_size).reshape((1, -1)) #if still zero, generate random new state
log_like = log_likelihood_of_f(ciphered_text_input,f_state) #calculate loglikelihood for new state
####(2) run the algorithm:
# print 'Iteration stage started.'
total_no_of_steps = 50000 #no of steps
recording_steps = 20 #steps to record at
#initialize tracking of log_likelihood, accuracy and acceptance rate
log_likelihood_iterations = np.zeros(shape=(1,total_no_of_steps/recording_steps))
entropy = np.zeros(shape=(1,total_no_of_steps/recording_steps))
rec_index = 0
#number of past entropies to check
entropy_check_no = 20 #20 works well
#Metropolis-Hastigs steps:
for step_no in range(total_no_of_steps):
if step_no > entropy_check_no*recording_steps:
#use no change in entropy as stopping condition
entropy_change = np.abs((entropy[0,rec_index-1]-np.sum(entropy[0,rec_index-entropy_check_no:rec_index-1])/float(entropy_check_no-1)))
if entropy_change < entropy[0,rec_index-1]*0.001: #0.001 works well
return f_state
#keep track of progress of likelihood, entropy and accuracy
if np.mod(step_no,recording_steps)==0:
log_likelihood_iterations[0,rec_index] = log_like
entropy[0,rec_index] = -log_like/np.log(2)/input_size
rec_index = rec_index + 1
# print step_no, log_like, -log_like/np.log(2)/input_size
f_state_proposed = generate_next_decipher_key(f_state)
log_like_proposed = log_likelihood_of_f(ciphered_text_input,f_state_proposed)
if np.isnan(log_like_proposed): #check if likelihood is zero
continue #likelihood=acceptance_factor = 0 anyway
log_ratios = log_like_proposed - log_like
#calculate min(1,exp(log_ratios):
if log_ratios < 0:
acceptance_factor = np.exp(log_ratios)
elif log_ratios >= 0: #equality also corresponds to acceptance = 1
acceptance_factor = 1
else:
raise Exception('Something is wrong with likelihood!')
Bernoulli_var_A = np.random.binomial(n=1, p=acceptance_factor, size=None)
if Bernoulli_var_A == 1:
f_state = f_state_proposed
log_like = log_like_proposed
else:
continue
if step_no > total_no_of_steps:
return f_state
def common_word_count(deciph_text_input):
word_to_check = [
'the','of','and','to','in','a','is','that','for','it','as','was','with','be','by','on',
'not','he','i','this','are','or','his','from','at','which','but','have','an','had','they',
'you','were','their','one','all','we','can','her','has','there','been','if','more','when',
'will','would','who','so','no']
input_length = np.size(deciph_text_input[[0],:])
total_count = 0
for word in word_to_check:
count_plain = num_to_char(deciph_text_input).count(" "+word+" ")
count_with_dot = num_to_char(deciph_text_input).count(" "+word+".")
total_count = total_count + count_plain + count_with_dot
return total_count/float(input_length)
def decode(ciphertext,out_put_filename):
full_ciphertext_num = char_to_num(ciphertext) #whole input in numerical form
input_text_length = np.size(full_ciphertext_num[[0],:]) #length of input
length_to_use = min(input_text_length,10000) #use min of (input length,10000)
#start work with potentially truncated input text
ciphered_text_input = full_ciphertext_num[[0],:length_to_use]
best_decipher = alphabet_num #just to start, will be overwritten
best_score = 0
for run in [1,2,3,4,5]:
f_state = metropolis_hastings(ciphered_text_input)
deciph_text = permutation_mapping(f_state,ciphered_text_input,'decipher')
score = common_word_count(deciph_text)
print (score)
print (f_state)
if score > best_score:
best_decipher = f_state #overwrite with new decipher
best_score = score #overwrite with new best score
#make sure there is output if time-out:
full_deciphered_text = num_to_char(permutation_mapping(best_decipher,full_ciphertext_num,'decipher'))
#write solution to file
f = open(out_put_filename,'w')
f.write(full_deciphered_text)
f.close()
#write final solution
full_deciphered_text = num_to_char(permutation_mapping(best_decipher,full_ciphertext_num,'decipher'))
#write solution to file
f = open(out_put_filename,'w')
f.write(full_deciphered_text)
f.close()
| [
"[email protected]"
] | |
195c5f2eb43979422738ca58a4619048f98a7214 | 1fe37d571b240274fd3aee724f57d8cd3a2aa34e | /detools/info.py | 41161ab2f7610d4d892ebc3b690d72b3ab9333a7 | [
"BSD-2-Clause",
"MIT"
] | permissive | eerimoq/detools | e199bd84e97f82d72dcf0394d72bc646c5ec6369 | d3cdb185e45f7a997aae9b8cc73a2170c58ee5e9 | refs/heads/master | 2023-08-25T01:45:05.427528 | 2023-07-20T08:04:07 | 2023-07-20T08:04:07 | 171,528,674 | 151 | 13 | NOASSERTION | 2022-12-28T18:22:17 | 2019-02-19T18:38:43 | Python | UTF-8 | Python | false | false | 5,485 | py | import os
from .errors import Error
from .apply import read_header_sequential
from .apply import read_header_in_place
from .apply import read_header_hdiffpatch
from .apply import PatchReader
from .common import PATCH_TYPE_SEQUENTIAL
from .common import PATCH_TYPE_IN_PLACE
from .common import PATCH_TYPE_HDIFFPATCH
from .common import file_size
from .common import unpack_size
from .common import unpack_size_with_length
from .common import data_format_number_to_string
from .common import peek_header_type
from .compression.heatshrink import HeatshrinkDecompressor
from .data_format import info as data_format_info
def _compression_info(patch_reader):
info = None
if patch_reader:
decompressor = patch_reader.decompressor
if isinstance(decompressor, HeatshrinkDecompressor):
info = {
'window-sz2': decompressor.window_sz2,
'lookahead-sz2': decompressor.lookahead_sz2
}
return info
def patch_info_sequential_inner(patch_reader, to_size):
to_pos = 0
number_of_size_bytes = 0
diff_sizes = []
extra_sizes = []
adjustment_sizes = []
while to_pos < to_size:
# Diff data.
size, number_of_bytes = unpack_size_with_length(patch_reader)
if to_pos + size > to_size:
raise Error("Patch diff data too long.")
diff_sizes.append(size)
number_of_size_bytes += number_of_bytes
patch_reader.decompress(size)
to_pos += size
# Extra data.
size, number_of_bytes = unpack_size_with_length(patch_reader)
number_of_size_bytes += number_of_bytes
if to_pos + size > to_size:
raise Error("Patch extra data too long.")
extra_sizes.append(size)
patch_reader.decompress(size)
to_pos += size
# Adjustment.
size, number_of_bytes = unpack_size_with_length(patch_reader)
number_of_size_bytes += number_of_bytes
adjustment_sizes.append(size)
return (to_size,
diff_sizes,
extra_sizes,
adjustment_sizes,
number_of_size_bytes)
def patch_info_sequential(fpatch, fsize):
patch_size = file_size(fpatch)
compression, to_size = read_header_sequential(fpatch)
dfpatch_size = 0
data_format = None
dfpatch_info = None
patch_reader = None
if to_size == 0:
info = (0, [], [], [], 0)
else:
patch_reader = PatchReader(fpatch, compression)
dfpatch_size = unpack_size(patch_reader)
if dfpatch_size > 0:
data_format = unpack_size(patch_reader)
patch = patch_reader.decompress(dfpatch_size)
dfpatch_info = data_format_info(data_format, patch, fsize)
data_format = data_format_number_to_string(data_format)
info = patch_info_sequential_inner(patch_reader, to_size)
if not patch_reader.eof:
raise Error('End of patch not found.')
return (patch_size,
compression,
_compression_info(patch_reader),
dfpatch_size,
data_format,
dfpatch_info,
*info)
def patch_info_in_place(fpatch):
patch_size = file_size(fpatch)
(compression,
memory_size,
segment_size,
shift_size,
from_size,
to_size) = read_header_in_place(fpatch)
segments = []
patch_reader = None
if to_size > 0:
patch_reader = PatchReader(fpatch, compression)
for to_pos in range(0, to_size, segment_size):
segment_to_size = min(segment_size, to_size - to_pos)
dfpatch_size = unpack_size(patch_reader)
if dfpatch_size > 0:
data_format = unpack_size(patch_reader)
data_format = data_format_number_to_string(data_format)
patch_reader.decompress(dfpatch_size)
else:
data_format = None
info = patch_info_sequential_inner(patch_reader, segment_to_size)
segments.append((dfpatch_size, data_format, info))
return (patch_size,
compression,
_compression_info(patch_reader),
memory_size,
segment_size,
shift_size,
from_size,
to_size,
segments)
def patch_info_hdiffpatch(fpatch):
patch_size = file_size(fpatch)
compression, to_size, _ = read_header_hdiffpatch(fpatch)
patch_reader = None
if to_size > 0:
patch_reader = PatchReader(fpatch, compression)
return (patch_size,
compression,
_compression_info(patch_reader),
to_size)
def patch_info(fpatch, fsize=None):
"""Get patch information from given file-like patch object `fpatch`.
"""
if fsize is None:
fsize = str
patch_type = peek_header_type(fpatch)
if patch_type == PATCH_TYPE_SEQUENTIAL:
return 'sequential', patch_info_sequential(fpatch, fsize)
elif patch_type == PATCH_TYPE_IN_PLACE:
return 'in-place', patch_info_in_place(fpatch)
elif patch_type == PATCH_TYPE_HDIFFPATCH:
return 'hdiffpatch', patch_info_hdiffpatch(fpatch)
else:
raise Error('Bad patch type {}.'.format(patch_type))
def patch_info_filename(patchfile, fsize=None):
"""Same as :func:`~detools.patch_info()`, but with a filename instead
of a file-like object.
"""
with open(patchfile, 'rb') as fpatch:
return patch_info(fpatch, fsize)
| [
"[email protected]"
] | |
542cdb30ac871d10f35856ab599f982138e1621d | 01382c58ae18404aa442533eea992330ec941d35 | /tests/conftest.py | cb9a33ea61524b31b55a6fa68da1b8f7b971a8e0 | [] | no_license | gitter-badger/ens.py | 124d5bfc0b27b2c3ebe7ff1c6f4c14eacc687f18 | 565bf0cb0afc1f628c6ba29616bb6bb362aa4de9 | refs/heads/master | 2021-01-01T17:41:33.595961 | 2017-07-24T01:12:27 | 2017-07-24T01:12:27 | 98,136,264 | 0 | 0 | null | 2017-07-24T01:16:31 | 2017-07-24T01:16:31 | null | UTF-8 | Python | false | false | 1,806 | py |
import pytest
from unittest.mock import Mock
from web3 import Web3
from web3.providers.tester import EthereumTesterProvider
from web3utils import web3 as REAL_WEB3
from ens import ENS
def mkhash(num, digits=40):
return '0x' + str(num) * digits
@pytest.fixture
def addr1():
return mkhash(1)
@pytest.fixture
def addr2():
return mkhash(2)
@pytest.fixture
def addr9():
return mkhash(9)
@pytest.fixture
def addrbytes1(addr1):
return Web3.toAscii(addr1)
@pytest.fixture
def hash1():
return mkhash(1, digits=64)
@pytest.fixture
def hash9():
return mkhash(9, digits=64)
@pytest.fixture
def hashbytes1(hash1):
return Web3.toAscii(hash1)
@pytest.fixture
def hashbytes9(hash9):
return Web3.toAscii(hash9)
@pytest.fixture
def name1():
return 'dennis.the.peasant'
@pytest.fixture
def label1():
return 'peasant'
@pytest.fixture
def label2():
return 'dennisthe'
@pytest.fixture
def value1():
return 1000000000000000000000002
@pytest.fixture
def secret1():
return 'SUCH_SAFE_MUCH_SECRET'
@pytest.fixture
def ens():
web3 = REAL_WEB3
web3.setProvider(EthereumTesterProvider())
web3 = Mock(wraps=REAL_WEB3)
return ENS(web3)
@pytest.fixture
def registrar(ens, monkeypatch, addr9):
monkeypatch.setattr(ens, 'owner', lambda namehash: addr9)
return ens.registrar
@pytest.fixture
def fake_hash():
def _fake_hash(tohash, encoding=None):
if type(tohash) == bytes and not encoding:
encoding = 'bytes'
assert encoding == 'bytes'
if isinstance(tohash, str):
tohash = tohash.encode('utf-8')
tohash = b'b'+tohash
return b'HASH(%s)' % tohash
return _fake_hash
@pytest.fixture
def fake_hash_utf8(fake_hash):
return lambda name: fake_hash(name, encoding='bytes')
| [
"[email protected]"
] | |
be2dc666d75895d3982eec08d8ee9aadf03f0225 | 5b46e6fd5bbd44a7ccd1333184e13fc4703a084b | /tcex/datastore/__init__.py | d577b98c26aec2178b1c0f1e03622b3b52f830dd | [
"Apache-2.0"
] | permissive | TpyoKnig/tcex | 1fc297a5f93a17e4bc4a7786335714eb89b6e21d | 7cf04fec048fadc71ff851970045b8a587269ccf | refs/heads/master | 2022-12-25T19:20:49.675331 | 2020-09-30T10:05:37 | 2020-09-30T10:05:37 | 254,921,370 | 0 | 0 | Apache-2.0 | 2020-08-24T23:21:27 | 2020-04-11T17:38:28 | null | UTF-8 | Python | false | false | 115 | py | """DataStore module for TcEx Framework"""
# flake8: noqa
from .cache import Cache
from .datastore import DataStore
| [
"[email protected]"
] | |
9200a771ec7bef264c6449eeb3e335264b19aa41 | 871b10e6abd1ca9db406af9acddb391e4c2ec693 | /scholar_sanction_criteria.py | 34a704895205a13e9804ad37b4d10eb132400b9b | [] | no_license | Novasoft-India/wakf | d64efc557584a2bb0fadfdebf33ee738b2063253 | d27ff6bb88a36f33bcf173d9c814345294ab7def | refs/heads/master | 2020-12-24T16:50:25.632826 | 2014-09-18T13:42:19 | 2014-09-18T13:42:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,366 | py | from osv import osv
from osv import fields
class SWS_Scholar_sanction_Criteria(osv.osv):
_name = 'sws.scholar.sanction.criteria'
_description = 'sws.scholar.sanction.criteria'
_columns = {
'name':fields.char('Criteria Name:', required=False),
'criteria_no':fields.integer('Criteria Number:', required=False),
'active_is':fields.boolean('Active',required=False),
'date_valid':fields.date('Date valid From',required=False),
'criteria_line_id':fields.one2many('sws.scholar.sanction.criteria.line','criteria1_id'),
}
SWS_Scholar_sanction_Criteria()
class SWS_Scholar_sanction_Criteria_line(osv.osv):
_name = 'sws.scholar.sanction.criteria.line'
_description = 'sws.scholar.sanction.criteria.line'
_columns = {
'category_course':fields.many2one('sws.scholar.criteria.course','Course', required=True),
'amount_sanction':fields.integer('Total Amount',required=True),
'amount_per_year':fields.integer('Amount Per Year',required=True),
'total_year':fields.integer('Total Years', required=True),
'criteria1_id':fields.many2one('sws.scholar.sanction.criteria','Line of Sanction Criteria')
}
SWS_Scholar_sanction_Criteria_line() | [
"[email protected]"
] | |
84b0505f88884cf5adf7c93c363c044118cdfb83 | 90ca69d5d6bd9d08ee2d2b8150eb2fa6a6b00e72 | /src/entities/metric.py | 868bfa0563f670af8963e4937589ca2bae921afc | [
"CC-BY-4.0"
] | permissive | budh333/UnSilence_VOC | 07a4a5a58fd772230bfe1ffbcb8407de89daa210 | f6ba687f96f2c23690c84590adcb24ee239aa86b | refs/heads/main | 2023-05-26T20:49:49.105492 | 2023-05-12T23:18:50 | 2023-05-12T23:18:50 | 388,462,045 | 6 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,064 | py | from __future__ import annotations # This is so we can use Metric as type hint
from typing import Dict, List
import numpy as np
class Metric:
def __init__(
self,
amount_limit: int = 5,
metric: Metric = None):
self._accuracies: Dict[str, List[float]] = {}
self._losses: List[float] = []
self._amount_limit = amount_limit
if metric:
self._amount_limit = metric._amount_limit
self.initialize(metric)
def add_accuracies(self, accuracies: Dict[str, float]):
for key, value in accuracies.items():
if key not in self._accuracies.keys():
self._accuracies[key] = []
self._accuracies[key].append(value)
if self._amount_limit:
self._accuracies[key] = self._accuracies[key][-self._amount_limit:]
def get_current_accuracies(self) -> Dict[str, float]:
result = {}
for key, value in self._accuracies.items():
result[key] = np.mean(value, axis=0)
return result
def get_accuracy_metric(self, metric_type: str) -> float:
if metric_type not in self._accuracies.keys():
return 0
result = np.mean(self._accuracies[metric_type], axis=0)
return result
def add_loss(self, loss_value: float):
self._losses.append(loss_value)
if self._amount_limit:
self._losses = self._losses[-self._amount_limit:]
def get_current_loss(self) -> float:
return np.mean(self._losses, axis=0)
def initialize(self, metric: Metric):
self._losses = metric._losses[-self._amount_limit:]
self._accuracies = {}
accuracies = metric._accuracies
for key, value in accuracies.items():
self._accuracies[key] = value[-self._amount_limit:]
def contains_accuracy_metric(self, metric_key: str) -> bool:
return metric_key in self._accuracies.keys()
@property
def is_new(self) -> bool:
return len(self._losses) == 0 and len(self._accuracies) == 0
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.