max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
benchmarks/scaling/debug_hooks.py | Pandinosaurus/KungFu | 291 | 11097492 | <filename>benchmarks/scaling/debug_hooks.py
import os
import time
import numpy as np
from kungfu._utils import (_log_event, _since_proc_start, one_based_range,
show_duration)
from kungfu.tensorflow.ops import all_reduce
import tensorflow as tf
def _cluster_size():
if os.getenv('KUNGFU_SELF_SPEC'):
from kungfu.python import current_cluster_size
return current_cluster_size()
else:
try:
import horovod.tensorflow as hvd
return hvd.size()
except:
return 1
class LogPerfHook(tf.train.SessionRunHook):
def __init__(self, batch_size, warmup_steps=5, drop_last=1):
self._batch_size = batch_size
self._warmup_steps = warmup_steps
self._drop_last = drop_last
self._step = 0
self._durations = []
self._t_last = time.time()
def before_run(self, run_context):
if self._step == 0:
from kungfu._utils import _since_job_start, show_duration
print('_since_job_start: %s' % (show_duration(_since_job_start())))
def after_run(self, run_context, run_values):
self._step += 1
t1 = time.time()
dur = t1 - self._t_last
step_per_sec = 1 / dur
sample_per_sec = step_per_sec * self._batch_size
print('local step %d, took %.3fs, %.2f samples / sec' %
(self._step, dur, sample_per_sec))
if self._step > self._warmup_steps:
self._durations.append(dur)
self._t_last = t1
def end(self, run_context):
durations = list(self._durations)
durations = durations[:len(durations) - self._drop_last]
ds = np.array(durations)
mean_duration = ds.mean()
# print('durations: %s' % (durations))
print('mean_duration: %.3fs' % (mean_duration))
step_per_sec = 1 / mean_duration
sample_per_sec = step_per_sec * self._batch_size
print('RESULT: %.2f samples / sec, batch size: %d, cluster size %d' %
(sample_per_sec, self._batch_size, _cluster_size()))
class LogStepHook(tf.train.SessionRunHook):
def __init__(self):
self._step = 0
def begin(self):
print('%s::%s %d steps' % ('LogStepHook', 'begin', self._step))
def after_create_session(self, sess, coord):
print('%s::%s %d steps' %
('LogStepHook', 'after_create_session', self._step))
def before_run(self, run_context):
if self._step == 0:
_log_event('before_run_step_0')
print('%s::%s %d steps' % ('LogStepHook', 'before_run', self._step))
def after_run(self, run_context, run_values):
self._step += 1
print('%s::%s after %d steps' %
('LogStepHook', 'after_run', self._step))
def end(self, run_context):
print('%s::%s after %d steps' % ('LogStepHook', 'end', self._step))
class ProfileResizeHook(tf.train.SessionRunHook):
def __init__(self, schedule):
from kungfu.python import current_rank
self._rank = current_rank()
self._step = 0
self._schedule = schedule
def before_run(self, run_context):
pass
def after_run(self, run_context, run_values):
self._step += 1
if self._rank != 0:
return
if self._step in self._schedule:
new_size = self._schedule[self._step]
print('ProfileResizeHook step %d, new_size: %d' %
(self._step, new_size))
from kungfu.python import propose_new_size
propose_new_size(new_size)
def end(self, run_context):
pass
class SyncStepHook(tf.train.SessionRunHook):
def __init__(self):
pass
def begin(self):
global_step = tf.train.get_or_create_global_step()
new_global_step = all_reduce(global_step, op='max')
self._sync_step_op = tf.assign(global_step, new_global_step)
from kungfu.tensorflow.initializer import BroadcastGlobalVariablesOp
self._sync_state_op = BroadcastGlobalVariablesOp()
def after_create_session(self, sess, coord):
gs = sess.run(self._sync_step_op)
sess.run(self._sync_state_op)
print('_sync_step_op result %d' % (gs))
_log_event('AFTER _sync_step_op')
def before_run(self, run_context):
pass
def after_run(self, run_context, run_values):
pass
def end(self, run_context):
_log_event('SyncStepHook::end')
|
tests/scenarios/test_using_make_with_custom_config.py | LaudateCorpus1/holodeck | 518 | 11097523 | import random
import holodeck
def test_using_make_with_custom_config():
"""
Validate that we can use holodeck.make with a custom configuration instead
of loading it from a config file
"""
conf = {"name": "test_randomization", "agents": []}
# pick a random world from the installed packages
pkg = random.choice(list(holodeck.packagemanager._iter_packages()))
world = random.choice(pkg[0]["worlds"])["name"]
conf["world"] = world
conf["package_name"] = pkg[0]["name"]
print("world: {} package: {}".format(world, pkg[0]["name"]))
with holodeck.make(scenario_cfg=conf, show_viewport=False) as env:
for _ in range(0, 10):
env.tick()
|
src/plugins_/cfcs/di.py | jcberquist/sublimetext-cfml | 130 | 11097525 | <filename>src/plugins_/cfcs/di.py
import sublime
import sublime_plugin
from . import cfcs, documentation
from ...cfml_view import CfmlView
class CfmlDiPropertyCommand(sublime_plugin.TextCommand):
def run(self, edit, property_name=None):
pt = self.view.sel()[0].begin()
if not self.view.match_selector(pt, "source.cfml meta.class"):
return
if property_name:
self.insert_property(edit, property_name)
return
cfml_view = CfmlView(self.view, pt)
if not cfml_view.project_name:
return
cfc_info, metadata, function_name, regions = documentation.find_cfc(cfml_view)
if cfc_info:
self.insert_property(edit, cfc_info["name"])
else:
cfc_list = cfcs.get_cfc_list(cfml_view.project_name)
def callback(i):
if i > -1:
self.view.run_command(
"cfml_di_property", {"property_name": cfc_list[i]}
)
self.view.window().show_quick_panel(
cfc_list, callback, flags=sublime.MONOSPACE_FONT
)
def insert_property(self, edit, property_name):
di_property = self.get_setting("di_property")
is_script = (
len(self.view.find_by_selector("source.cfml.script meta.class.body.cfml"))
> 0
)
property_string = (
di_property.get("script_template", "")
if is_script
else di_property.get("tag_template", "")
)
if "{name}" not in property_string:
return
properties = self.view.find_by_selector("meta.tag.property.cfml")
property_names = [
self.view.substr(r).lower()
for r in self.view.find_by_selector("meta.tag.property.name.cfml")
]
if property_name.lower() in property_names:
return
if len(properties) > 0:
indent_region = sublime.Region(
self.view.line(properties[-1]).begin(), properties[-1].begin()
)
indent_string = "\n" + self.view.substr(indent_region)
injection_pt = properties[-1].end()
else:
tab_size = self.view.settings().get("tab_size")
translate_tabs_to_spaces = self.view.settings().get(
"translate_tabs_to_spaces"
)
indent_string = "\n\n" + (
" " * tab_size if translate_tabs_to_spaces else "\t"
)
injection_pt = self.view.find_by_selector("source.cfml meta.class.body")[
0
].begin()
if is_script:
injection_pt += 1
property_string = indent_string + property_string.replace(
"{name}", property_name
)
self.view.insert(edit, injection_pt, property_string)
if di_property.get("sort_properties", False):
properties = self.view.find_by_selector("meta.tag.property.cfml")
property_names = self.view.find_by_selector("meta.tag.property.name.cfml")
if len(properties) != len(property_names):
return
sorted_properties = [
self.view.substr(r)
for r, name in sorted(
zip(properties, property_names),
reverse=True,
key=lambda x: self.view.substr(x[1]),
)
]
for i, r in enumerate(reversed(properties)):
self.view.replace(edit, r, sorted_properties[i])
def get_setting(self, setting_key):
if (
self.view.window().project_file_name()
and setting_key in self.view.window().project_data()
):
return self.view.window().project_data()[setting_key]
package_settings = sublime.load_settings("cfml_package.sublime-settings")
return package_settings.get(setting_key)
|
PythonClient/computer_vision/weather.py | jeyong/AirSim | 6,115 | 11097575 | <reponame>jeyong/AirSim<filename>PythonClient/computer_vision/weather.py
import setup_path
import airsim
client = airsim.VehicleClient()
client.confirmConnection()
client.simEnableWeather(True)
airsim.wait_key('Press any key to enable rain at 25%')
client.simSetWeatherParameter(airsim.WeatherParameter.Rain, 0.25);
airsim.wait_key('Press any key to enable rain at 75%')
client.simSetWeatherParameter(airsim.WeatherParameter.Rain, 0.75);
airsim.wait_key('Press any key to enable snow at 50%')
client.simSetWeatherParameter(airsim.WeatherParameter.Snow, 0.50);
airsim.wait_key('Press any key to enable maple leaves at 50%')
client.simSetWeatherParameter(airsim.WeatherParameter.MapleLeaf, 0.50);
airsim.wait_key('Press any key to set all effects to 0%')
client.simSetWeatherParameter(airsim.WeatherParameter.Rain, 0.0);
client.simSetWeatherParameter(airsim.WeatherParameter.Snow, 0.0);
client.simSetWeatherParameter(airsim.WeatherParameter.MapleLeaf, 0.0);
airsim.wait_key('Press any key to enable dust at 50%')
client.simSetWeatherParameter(airsim.WeatherParameter.Dust, 0.50);
airsim.wait_key('Press any key to enable fog at 50%')
client.simSetWeatherParameter(airsim.WeatherParameter.Fog, 0.50);
airsim.wait_key('Press any key to disable all weather effects')
client.simEnableWeather(False)
|
zippy/edu.uci.python.test/src/tests/mm2d_random_naive_5.py | lucapele/pele-c | 319 | 11097577 |
"""
# 3x3 matrix
X = [[12,7,3],
[4 ,5,6],
[7 ,8,9]]
# 3x4 matrix
Y = [[5,8,1,2],
[6,7,3,0],
[4,5,9,1]]
# result is 3x4
result = [[0,0,0,0],
[0,0,0,0],
[0,0,0,0]]
"""
import sys, time, random
# N = int(sys.argv[1])
N = 5
X = [[random.randint(0,1000) for i in range(N)] for j in range(N)]
Y = [[random.randint(0,1000) for i in range(N)] for j in range(N)]
result = [[0 for i in range(N)] for j in range(N)]
start = time.time()
def mm():
# iterate through rows of X
for i in range(len(X)):
# iterate through columns of Y
for j in range(len(Y[0])):
# iterate through rows of Y
for k in range(len(Y)):
result[i][j] += X[i][k] * Y[k][j]
mm()
duration = "N: %d Time: %.5f" % (N, (time.time() - start))
# print("Naive " + duration)
"""
for r in result:
print(r)
"""
|
bin/lib/cdn.py | dbabokin/infra | 135 | 11097592 | import hashlib
import logging
import mimetypes
import os
import shutil
import tarfile
from base64 import b64encode
from concurrent import futures
from datetime import datetime
from pathlib import Path
from tempfile import mkdtemp
from lib.amazon import botocore, s3_client, force_lazy_init
logger = logging.getLogger('ce-cdn')
def read_file_chunked(fobj, chunk_size=128 * 1024):
b = bytearray(chunk_size)
mv = memoryview(b)
for n in iter(lambda: fobj.readinto(mv), 0):
yield mv[:n]
def hash_fileobj(fobj, hash_type):
h = hash_type()
for chnk in read_file_chunked(fobj):
h.update(chnk)
return h
def hash_file_for_s3(f):
with open(f['path'], 'rb') as fobj:
sha256 = hash_fileobj(fobj, hashlib.sha256).digest()
sha256 = b64encode(sha256).decode()
return dict(hash=sha256, **f)
def get_directory_contents(basedir):
for f in Path(basedir).rglob('*'):
if not f.is_file():
continue
name = f.relative_to(basedir).as_posix()
yield dict(name=name, path=f)
# https://github.com/aws/aws-cli/blob/d5c0fce629eca740ed0bbe7e89579baf6a47d982/awscli/customizations/s3/utils.py#L288
def guess_content_type(filename):
"""Given a filename, guess it's content type.
If the type cannot be guessed, a value of None is returned.
"""
try:
return mimetypes.guess_type(filename)[0]
# This catches a bug in the mimetype libary where some MIME types
# specifically on windows machines cause a UnicodeDecodeError
# because the MIME type in the Windows registery has an encoding
# that cannot be properly encoded using the default system encoding.
# https://bugs.python.org/issue9291
#
# So instead of hard failing, just log the issue and fall back to the
# default guessed content type of None.
except UnicodeDecodeError:
logger.debug(
'Unable to guess content type for %s due to '
'UnicodeDecodeError: ', filename, exc_info=True
)
class DeploymentJob:
tmpdir = None
def __init__(self, tar_file_path, bucket_name, bucket_path='', version=None, max_workers=None, cache_control=None):
self.tar_file_path = tar_file_path
self.bucket_name = bucket_name
self.bucket_path = Path(bucket_path)
self.version = version
self.max_workers = max_workers or os.cpu_count() or 1
self.cache_control = cache_control
self.deploydate = datetime.utcnow().isoformat(timespec='seconds')
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.__cleanup_tempdir()
def __unpack_tar(self):
# ensure temp dir exists
if not self.tmpdir:
self.tmpdir = mkdtemp()
# unpack tar contents
logger.debug('unpacking "%s" into "%s"', self.tar_file_path, self.tmpdir)
with tarfile.open(self.tar_file_path) as tar:
tar.extractall(self.tmpdir)
return list(get_directory_contents(self.tmpdir))
def __cleanup_tempdir(self):
# bail if tmpdir is not set
if not self.tmpdir:
return
try:
# recursively remove directory tree
shutil.rmtree(self.tmpdir)
# only clear tmpdir if above succeeds
# maybe a file is still open or something
# and we can try again later in case of failure
self.tmpdir = None
except OSError:
logger.exception('failure to cleanup temp directory "%s"', self.tmpdir)
def __get_bucket_path(self, key):
return (self.bucket_path / key).as_posix()
def __s3_head_object(self, key, **kwargs):
try:
return s3_client.head_object(
Bucket=self.bucket_name,
Key=self.__get_bucket_path(key),
**kwargs
)
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] == "404":
return None
raise
def __s3_upload_file(self, filepath, key, **kwargs):
return s3_client.upload_file(
filepath,
self.bucket_name,
self.__get_bucket_path(key),
**kwargs
)
def __s3_get_object_tagging(self, key):
resp = s3_client.get_object_tagging(
Bucket=self.bucket_name,
Key=self.__get_bucket_path(key)
)
tags = dict()
for t in resp['TagSet']:
tags[t['Key']] = t['Value']
return tags
def __s3_put_object_tagging(self, key, tags, **kwargs):
tagset = list([dict(Key=k, Value=v) for k, v in tags.items()])
return s3_client.put_object_tagging(
Bucket=self.bucket_name,
Key=self.__get_bucket_path(key),
Tagging=dict(TagSet=tagset),
**kwargs
)
def _check_s3_hash(self, file):
ret = dict(exists=False, mismatch=False, s3hash=None, **file)
resp = self.__s3_head_object(ret['name'])
if resp:
ret['exists'] = True
ret['s3hash'] = resp.get('Metadata', {}).get('sha256')
ret['mismatch'] = ret['s3hash'] != ret['hash']
return ret
def _upload_file(self, file):
extra_args = dict(Metadata=dict(sha256=file['hash']))
# guess content type
guessed_type = guess_content_type(file['name'])
if guessed_type is not None:
extra_args['ContentType'] = guessed_type
if self.cache_control is not None:
extra_args['CacheControl'] = self.cache_control
# upload file to s3
self.__s3_upload_file(
str(file['path']),
file['name'],
ExtraArgs=extra_args
)
tags = dict(FirstDeployDate=self.deploydate, LastDeployDate=self.deploydate)
if self.version:
tags['FirstVersionSeen'] = tags['LastVersionSeen'] = str(self.version)
# attach s3 tags
self.__s3_put_object_tagging(file['name'], tags)
return file
def _update_tags(self, file):
# get existing tags
tags = self.__s3_get_object_tagging(file['name'])
# update tag values
tags['LastDeployDate'] = self.deploydate
if self.version:
tags['LastVersionSeen'] = str(self.version)
# store updated tags to s3
self.__s3_put_object_tagging(file['name'], tags)
return file
def run(self):
logger.debug('running with %d workers', self.max_workers)
# work around race condition with parallel lazy init of boto3
force_lazy_init(s3_client)
files = self.__unpack_tar()
with futures.ThreadPoolExecutor(max_workers=self.max_workers) as executor:
# calculate hashes for all the files
files = list(executor.map(hash_file_for_s3, files))
files_to_update = []
files_to_upload = []
files_with_mismatch = []
# check for existing files on s3 and compare hashes
for f in executor.map(self._check_s3_hash, files):
if f['exists']:
if f['mismatch']:
files_with_mismatch.append(f)
else:
files_to_update.append(f)
else:
files_to_upload.append(f)
if files_with_mismatch:
logger.error('%d files have mismatching hashes', len(files_with_mismatch))
for f in files_with_mismatch:
logger.error("%s: expected hash %s != %s", f['name'], f['hash'], f['s3hash'])
logger.error('aborting cdn deployment due to errors')
return False
logger.info("will update %d file tag%s", len(files_to_update), 's' if len(files_to_update) != 1 else '')
logger.info("will upload %d file tag%s", len(files_to_upload), 's' if len(files_to_upload) != 1 else '')
for f in executor.map(self._upload_file, files_to_upload):
logger.debug("uploaded %s", f['name'])
for f in executor.map(self._update_tags, files_to_update):
logger.debug("updated tags on %s", f['name'])
return True
|
src/automation/azext_automation/manual/_help.py | haroonf/azure-cli-extensions | 207 | 11097597 | <gh_stars>100-1000
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
# pylint: disable=too-many-lines
from knack.help_files import helps
helps['automation'] = """
type: group
short-summary: Manage Automation
"""
helps['automation account'] = """
type: group
short-summary: Automation Account
"""
helps['automation account list'] = """
type: command
short-summary: "Retrieve a list of accounts within a given resource group. And Retrieve a list of accounts within \
a given subscription."
examples:
- name: List automation accounts by resource group
text: |-
az automation account list --resource-group "rg"
- name: List automation accounts by resource group
text: |-
az automation account list
"""
helps['automation account show'] = """
type: command
short-summary: "Get information about an Automation Account."
examples:
- name: Get automation account
text: |-
az automation account show --name "myAutomationAccount" --resource-group "rg"
"""
helps['automation account create'] = """
type: command
short-summary: "Create automation account."
examples:
- name: Create an automation account
text: |-
az automation account create --automation-account-name "myAutomationAccount" --location "East US 2" \
--sku "Free" --resource-group "rg"
"""
helps['automation account update'] = """
type: command
short-summary: "Update an automation account."
examples:
- name: Update an automation account
text: |-
az automation account update --automation-account-name "myAutomationAccount" --tags KEY=VALUE \
--resource-group "rg"
"""
helps['automation account delete'] = """
type: command
short-summary: "Delete an automation account."
examples:
- name: Delete automation account
text: |-
az automation account delete --name "myAutomationAccount" --resource-group "rg"
"""
helps['automation runbook'] = """
type: group
short-summary: Automation Runbook
"""
helps['automation runbook list'] = """
type: command
short-summary: "Retrieve a list of runbooks."
examples:
- name: List runbooks by automation account
text: |-
az automation runbook list --automation-account-name "myAutomationAccount" --resource-group "rg"
"""
helps['automation runbook show'] = """
type: command
short-summary: "Retrieve the runbook identified by runbook name."
examples:
- name: Get runbook
text: |-
az automation runbook show --automation-account-name "myAutomationAccount" --resource-group "rg" \
--name "myRunbook"
"""
helps['automation runbook create'] = """
type: command
short-summary: "Create the runbook identified by runbook name."
examples:
- name: Create a runbook
text: |-
az automation runbook create --automation-account-name "myAutomationAccount" --resource-group "rg" \
--name "myRunbook" --type "PowerShell" --location "East US 2"
"""
helps['automation runbook update'] = """
type: command
short-summary: "Update the runbook identified by runbook name."
examples:
- name: Update a runbook
text: |-
az automation runbook update --automation-account-name "myAutomationAccount" --description \
"Runbook Description" --log-activity-trace 1 --log-progress true --log-verbose false \
--resource-group "rg" --runbook-name "myRunbook"
"""
helps['automation runbook delete'] = """
type: command
short-summary: "Delete the runbook by name."
examples:
- name: Delete a runbook
text: |-
az automation runbook delete --automation-account-name "myAutomationAccount" --resource-group "rg" \
--name "myRunbook"
"""
helps['automation runbook publish'] = """
type: command
short-summary: "Publish runbook draft."
examples:
- name: Publish runbook draft
text: |-
az automation runbook publish --automation-account-name "myAutomationAccount" --resource-group \
"rg" --name "myRunbook"
"""
helps['automation runbook wait'] = """
type: command
short-summary: Place the CLI in a waiting state until a condition of the automation runbook is met.
examples:
- name: Pause executing next line of CLI script until the automation runbook is successfully created.
text: |-
az automation runbook wait --automation-account-name "myAutomationAccount" --resource-group "rg" \
--name "myRunbook" --created
"""
helps['automation runbook start'] = """
type: command
short-summary: "Start the runbook"
examples:
- name: Start the runbook
text: |-
az automation runbook start --automation-account-name "myAutomationAccount" --resource-group "rg" \
--name "myRunbook"
"""
helps['automation runbook replace-content'] = """
type: command
short-summary: "Replace content of the runbook"
examples:
- name: Replace content of the runbook
text: |-
az automation runbook replace-content --automation-account-name "myAutomationAccount" --resource-group \
"rg" --name "myRunbook" --content @/path/to/script
"""
helps['automation runbook revert-to-published'] = """
type: command
short-summary: "Revert the runbook content to last known published state"
examples:
- name: Replace content of the runbook
text: |-
az automation runbook revert-to-published --automation-account-name "myAutomationAccount" \
--resource-group "rg" --name "myRunbook"
"""
helps['automation job'] = """
type: group
short-summary: Automation Job
"""
helps['automation job list'] = """
type: command
short-summary: "Retrieve a list of jobs."
examples:
- name: List jobs by automation account
text: |-
az automation job list --automation-account-name "myAutomationAccount" --resource-group "rg"
"""
helps['automation job show'] = """
type: command
short-summary: "Retrieve the job identified by job name."
examples:
- name: Get job
text: |-
az automation job show --automation-account-name "myAutomationAccount" --name "foo" --resource-group "rg"
"""
helps['automation job resume'] = """
type: command
short-summary: "Resume the job identified by jobName."
examples:
- name: Resume job
text: |-
az automation job resume --automation-account-name "myAutomationAccount" --name "foo" \
--resource-group "rg"
"""
helps['automation job stop'] = """
type: command
short-summary: "Stop the job identified by jobName."
examples:
- name: Stop job
text: |-
az automation job stop --automation-account-name "myAutomationAccount" --name "foo" --resource-group "rg"
"""
helps['automation job suspend'] = """
type: command
short-summary: "Suspend the job identified by job name."
examples:
- name: Suspend job
text: |-
az automation job suspend --automation-account-name "myAutomationAccount" --name "foo" \
--resource-group "rg"
"""
|
cx_Freeze/samples/ctypes/test_ctypes.py | lexa/cx_Freeze | 358 | 11097602 | import ctypes
print("Hello", ctypes.__name__)
|
floyd/model/credentials.py | longhuei/floyd-cli | 162 | 11097611 | <gh_stars>100-1000
from marshmallow import Schema, fields, post_load
from floyd.model.base import BaseModel
class CredentialsSchema(Schema):
"""
Floyd credentials schema
"""
username = fields.Str()
password = fields.Str()
@post_load
def make_credentials(self, data):
return Credentials(**data)
class Credentials(BaseModel):
"""
Floyd credentials consists of username and password
"""
schema = CredentialsSchema(strict=True)
def __init__(self,
username,
password):
self.username = username
self.password = password
def to_dict(self):
return {
"username": self.username,
"password": self.password,
}
|
python/ase/scripts/speak.py | vlinhd11/vlinhd11-android-scripting | 2,293 | 11097667 | """Speak user generated text."""
__author__ = '<NAME> <<EMAIL>>'
__copyright__ = 'Copyright (c) 2009, Google Inc.'
__license__ = 'Apache License, Version 2.0'
import android
droid = android.Android()
message = droid.getInput('TTS', 'What would you like to say?').result
droid.ttsSpeak(message)
|
pandas/check-in-ranges/main.py | whitmans-max/python-examples | 140 | 11097678 | <reponame>whitmans-max/python-examples<gh_stars>100-1000
# date: 2019.07.30
# https://stackoverflow.com/questions/57261926/get-all-items-from-dfs-column-that-are-within-a-range-using-ranges-iterable
import pandas as pd
df = pd.DataFrame([
['c1', 137674167],
['c2', 2166178],
['c3', 268],
], columns=['c', 'C2017Value'])
ranges = [
(261, 4760),
(12273391, 11104571063),
(45695385, 4134339925),
(15266178, 1376748162),
(10106104, 97810284),
(6492248, 588025190)
]
def check_ranges(value, ranges):
for a, b in ranges:
if a <= value <= b:
return True
return False
results = df[ df['C2017Value'].apply(lambda x, r=ranges:check_ranges(x,r)) ]
print(results)
def get_range(value, ranges):
for a, b in ranges:
if a <= value <= b:
return (a, b)
return None
df['range'] = df['C2017Value'].apply(lambda x, r=ranges:get_range(x,r))
print(df)
results = df[ df['range'].notnull() ]
print(results)
|
sphinx_source_files/update_doc_page_deprecated.py | caoliheng/open-dynamic-robot-initiative.github.io | 149 | 11097688 | <gh_stars>100-1000
#! /usr/bin/python
from os import path, mkdir, walk
import re
import shutil
import treep.treep_git
import treep.files
import rospkg
from bs4 import BeautifulSoup
import re
def get_ros_install_share_path():
"""
Use rospack to get the path to the installation folder (supposed sourced)
"""
ros_pack = rospkg.RosPack()
if "blmc_drivers" in ros_pack.list():
return path.dirname(ros_pack.get_path("blmc_drivers"))
else:
raise Exception('The blmc_drivers is not part of the cloned packages')
return
def copy_doc_package(package_name, share_path):
"""
Copy/Replace the documentation of the ros package in this repository.
"""
share_path = get_ros_install_share_path()
local_doc = path.join("code_documentation", package_name)
local_doc_html = path.join(share_path, package_name, "doc", "html")
if not path.isdir(local_doc_html):
print ("WARNING: cannot find the documentation for the package [",
package_name,
"]. Nothing to be done")
return
if path.isdir(local_doc):
shutil.rmtree(local_doc)
shutil.copytree(local_doc_html, local_doc)
return
def find_ros_packages(share_path):
"""
Find the ros packages cloned from the open-dynamic-robot-initiative github organisation
"""
treep_projects = treep.files.read_configuration_files(False, share_path)
repos_names = treep_projects.get_repos_names()
packages_list = []
for repos_name in repos_names:
repos_path = treep_projects.get_repo_path(repos_name)
repos_url = treep.treep_git.get_url(repos_path)
if repos_url and "open-dynamic-robot-initiative" in repos_url:
for root, _, files in walk(repos_path):
for file in files:
if file == "package.xml":
packages_list.append(path.basename(root))
return packages_list
def update_index_html(exported_doc_list, exported_code_cov_list):
"""
Parse the index_template.html and
"""
with open("documentation_portal_template.html") as fp:
soup = BeautifulSoup(fp, features="lxml")
pkg_tag_ul = soup.find(id="list_documentation")
for doc_folder in exported_doc_list:
string_href = (
"https://open-dynamic-robot-initiative.github.io/code_documentation/" +
doc_folder + "/"
)
string_displayed = doc_folder
pkg_tag_li = soup.new_tag("li")
pkg_tag_ul.append(pkg_tag_li)
pkg_tag_a = soup.new_tag("a", href=string_href)
pkg_tag_a.string = string_displayed
pkg_tag_li.append(pkg_tag_a)
pkg_tag_ul = soup.find(id="list_code_coverage")
for code_cov_folder in exported_code_cov_list:
string_href = (
"https://open-dynamic-robot-initiative.github.io/code_coverage/" +
code_cov_folder + "/"
)
string_displayed = code_cov_folder
pkg_tag_li = soup.new_tag("li")
pkg_tag_ul.append(pkg_tag_li)
pkg_tag_a = soup.new_tag("a", href=string_href)
pkg_tag_a.string = string_displayed
pkg_tag_li.append(pkg_tag_a)
with open("documentation_portal.html", 'w') as fp:
fp.write(soup.prettify())
if __name__ == "__main__":
# First we get the path to the catkin install share folder
share_path = get_ros_install_share_path()
print("The path to the installation folder")
print(share_path)
# Then we find all the catkin package wich are in the machines-in-motion url
packages_list = find_ros_packages(share_path)
print("The list of the cloned catkin package from the machines-in-motion github")
print (packages_list)
# We copy the built documentation inside this repository
for package in packages_list:
copy_doc_package(package, share_path)
# We get all the package names form which the documentation is available
exported_doc_list = []
for (dirpath, dirnames, filenames) in walk("code_documentation"):
exported_doc_list.extend(dirnames)
break
print("The list of all the available documentation yet")
print (exported_doc_list)
# We get all the code coverage computed from the bamboo agents
exported_code_cov_list = []
for (dirpath, dirnames, filenames) in walk("code_coverage"):
exported_code_cov_list.extend(dirnames)
break
print("The list of all the available code coverage yet")
print (exported_code_cov_list)
# We update the list in the website
update_index_html(exported_doc_list, exported_code_cov_list)
|
kashgari/types.py | SharpKoi/Kashgari | 2,422 | 11097741 | <gh_stars>1000+
# encoding: utf-8
# author: BrikerMan
# contact: <EMAIL>
# blog: https://eliyar.biz
# file: types.py
# time: 3:54 下午
from typing import List, Union, Tuple
TextSamplesVar = List[List[str]]
NumSamplesListVar = List[List[int]]
LabelSamplesVar = Union[TextSamplesVar, List[str]]
ClassificationLabelVar = List[str]
MultiLabelClassificationLabelVar = Union[List[List[str]], List[Tuple[str]]]
if __name__ == "__main__":
pass
|
pluribus/games/short_deck/player.py | keithlee96/pluribus-poker-AI | 113 | 11097742 | from pluribus.poker.player import Player
from pluribus.poker.pot import Pot
class ShortDeckPokerPlayer(Player):
"""Inherits from Player which will interface easily with the PokerEngine.
This class should manage the state of the players personal pot, and the
private cards that are dealt to the player. Also manages whether this
player has folded or not.
"""
def __init__(self, player_i: int, initial_chips: int, pot: Pot):
"""Instanciate a player."""
super().__init__(
name=f"player_{player_i}", initial_chips=initial_chips, pot=pot,
)
|
setup.py | oplatek/ALI | 329 | 11097749 | <reponame>oplatek/ALI<filename>setup.py
from setuptools import find_packages, setup
setup(
name='ali',
version='0.1.0',
description='Code for the "Adversarially Learned Inference" paper',
long_description='Code for the "Adversarially Learned Inference" paper',
url='https://github.com/IshmaelBelghazi/ALI',
author='<NAME>, <NAME>',
license='MIT',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Scientific/Engineering :: Image Recognition',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
],
keywords='theano blocks machine learning neural networks deep learning',
packages=find_packages(exclude=['scripts', 'experiments']),
install_requires=['numpy', 'theano', 'blocks', 'fuel'],
zip_safe=False)
|
Packs/PaloAltoNetworks_PAN_OS_EDL_Management/Integrations/PaloAltoNetworks_PAN_OS_EDL_Management/PaloAltoNetworks_PAN_OS_EDL_Management_test.py | diCagri/content | 799 | 11097751 | import pytest
from CommonServerPython import *
def test_get_external_file_too_many_new_lines(mocker):
"""
Given:
- an invalid file content with too many newlines
When:
- running edl_get_external_file_command
Then:
- Verify that an appropriate error message is shown to the user
- Verify that ssh_execute was executed the correct amount of times
"""
import PaloAltoNetworks_PAN_OS_EDL_Management as PANW_EDL
invalid_file = 'a\nb\na\nn\ni\nb\ni\na\nb\no\nd\ne\nb\ne\n'
mocker.patch.object(PANW_EDL, 'ssh_execute', return_value=invalid_file)
err_msg = 'The file contains too many newlines to be valid. ' \
'Please check the file contents on the external web server manually.'
with pytest.raises(DemistoException, match=err_msg):
PANW_EDL.edl_get_external_file_command(args={'file_path': 'lies', 'retries': '4'})
assert PANW_EDL.ssh_execute.call_count == 4
|
allennlp_models/rc/dataset_readers/record_reader.py | xinzhel/allennlp-models | 402 | 11097765 | <reponame>xinzhel/allennlp-models
"""
Dataset reader for SuperGLUE's Reading Comprehension with Commonsense Reasoning task (Zhang Et
al. 2018).
Reader Implemented by <NAME>
"""
import logging
from typing import Dict, List, Optional, Iterable, Union, Tuple, Any
from pathlib import Path
from allennlp.common.util import sanitize_wordpiece
from overrides import overrides
from allennlp.common.file_utils import cached_path
from allennlp.data.dataset_readers.dataset_reader import DatasetReader
from allennlp.data.fields import MetadataField, TextField, SpanField
from allennlp.data.instance import Instance
from allennlp_models.rc.dataset_readers.utils import char_span_to_token_span
from allennlp.data.token_indexers import PretrainedTransformerIndexer
from allennlp.data.tokenizers import Token, PretrainedTransformerTokenizer
import json
logger = logging.getLogger(__name__)
__all__ = ["RecordTaskReader"]
# TODO: Optimize this reader
@DatasetReader.register("superglue_record")
class RecordTaskReader(DatasetReader):
"""
Reader for Reading Comprehension with Commonsense Reasoning(ReCoRD) task from SuperGLUE. The
task is detailed in the paper ReCoRD: Bridging the Gap between Human and Machine Commonsense
Reading Comprehension (arxiv.org/pdf/1810.12885.pdf) by Zhang et al. Leaderboards and the
official evaluation script for the ReCoRD task can be found sheng-z.github.io/ReCoRD-explorer/.
The reader reads a JSON file in the format from
sheng-z.github.io/ReCoRD-explorer/dataset-readme.txt
# Parameters
tokenizer: `Tokenizer`, optional
The tokenizer class to use. Defaults to SpacyTokenizer
token_indexers : `Dict[str, TokenIndexer]`, optional
We similarly use this for both the question and the passage. See :class:`TokenIndexer`.
Default is `{"tokens": SingleIdTokenIndexer()}`.
passage_length_limit : `int`, optional (default=`None`)
If specified, we will cut the passage if the length of passage exceeds this limit.
question_length_limit : `int`, optional (default=`None`)
If specified, we will cut the question if the length of question exceeds this limit.
raise_errors: `bool`, optional (default=`False`)
If the reader should raise errors or just continue.
kwargs: `Dict`
Keyword arguments to be passed to the DatasetReader parent class constructor.
"""
def __init__(
self,
transformer_model_name: str = "bert-base-cased",
length_limit: int = 384,
question_length_limit: int = 64,
stride: int = 128,
raise_errors: bool = False,
tokenizer_kwargs: Dict[str, Any] = None,
one_instance_per_query: bool = False,
max_instances: int = None,
**kwargs,
) -> None:
"""
Initialize the RecordTaskReader.
"""
super(RecordTaskReader, self).__init__(
manual_distributed_sharding=True, max_instances=max_instances, **kwargs
)
self._kwargs = kwargs
self._model_name = transformer_model_name
self._tokenizer_kwargs = tokenizer_kwargs or {}
# Save the values passed to __init__ to protected attributes
self._tokenizer = PretrainedTransformerTokenizer(
transformer_model_name,
add_special_tokens=False,
tokenizer_kwargs=tokenizer_kwargs,
)
self._token_indexers = {
"tokens": PretrainedTransformerIndexer(
transformer_model_name, tokenizer_kwargs=tokenizer_kwargs
)
}
self._length_limit = length_limit
self._query_len_limit = question_length_limit
self._stride = stride
self._raise_errors = raise_errors
self._cls_token = <PASSWORD>"
self._one_instance_per_query = one_instance_per_query
def _to_params(self) -> Dict[str, Any]:
"""
Get the configuration dictionary for this class.
# Returns
`Dict[str, Any]` The config dict.
"""
return {
"type": "superglue_record",
"transformer_model_name": self._model_name,
"length_limit": self._length_limit,
"question_length_limit": self._query_len_limit,
"stride": self._stride,
"raise_errors": self._raise_errors,
"tokenizer_kwargs": self._tokenizer_kwargs,
"one_instance_per_query": self._one_instance_per_query,
"max_instances": self.max_instances,
**self._kwargs,
}
@overrides
def _read(self, file_path: Union[Path, str]) -> Iterable[Instance]:
# IF `file_path` is a URL, redirect to the cache
file_path = cached_path(file_path)
# Read the 'data' key from the dataset
logger.info(f"Reading '{file_path}'")
with open(file_path) as fp:
dataset = json.load(fp)["data"]
logger.info(f"Found {len(dataset)} examples from '{file_path}'")
# Keep track of certain stats while reading the file
# examples_multiple_instance_count: The number of questions with more than
# one instance. Can happen because there is multiple queries for a
# single passage.
# passages_yielded: The total number of instances found/yielded.
examples_multiple_instance_count = 0
examples_no_instance_count = 0
passages_yielded = 0
# Iterate through every example from the ReCoRD data file.
for example in dataset:
# Get the list of instances for the current example
instances_for_example = self.get_instances_from_example(example)
# Keep track of number of instances for this specific example that
# have been yielded. Since it instances_for_example is a generator, we
# do not know its length. To address this, we create an counter int.
instance_count = 0
# Iterate through the instances and yield them.
for instance in instances_for_example:
yield instance
instance_count += 1
if instance_count == 0:
logger.warning(f"Example '{example['id']}' had no instances.")
examples_no_instance_count += 1
# Check if there was more than one instance for this example. If
# there was we increase examples_multiple_instance_count by 1.
# Otherwise we increase by 0.
examples_multiple_instance_count += 1 if instance_count > 1 else 0
passages_yielded += instance_count
# Check to see if we are over the max_instances to yield.
if self.max_instances and passages_yielded > self.max_instances:
logger.info("Passed max instances")
break
# Log pertinent information.
if passages_yielded:
logger.info(
f"{examples_multiple_instance_count}/{passages_yielded} "
f"({examples_multiple_instance_count / passages_yielded * 100:.2f}%) "
f"examples had more than one instance"
)
logger.info(
f"{examples_no_instance_count}/{passages_yielded} "
f"({examples_no_instance_count / passages_yielded * 100:.2f}%) "
f"examples had no instances"
)
else:
logger.warning(f"Could not find any instances in '{file_path}'")
def get_instances_from_example(
self, example: Dict, always_add_answer_span: bool = False
) -> Iterable[Instance]:
"""
Helper function to get instances from an example.
Much of this comes from `transformer_squad.make_instances`
# Parameters
example: `Dict[str,Any]`
The example dict.
# Returns:
`Iterable[Instance]` The instances for each example
"""
# Get the passage dict from the example, it has text and
# entities
example_id: str = example["id"]
passage_dict: Dict = example["passage"]
passage_text: str = passage_dict["text"]
# Tokenize the passage
tokenized_passage: List[Token] = self.tokenize_str(passage_text)
# TODO: Determine what to do with entities. Superglue marks them
# explicitly as input (https://arxiv.org/pdf/1905.00537.pdf)
# Get the queries from the example dict
queries: List = example["qas"]
logger.debug(f"{len(queries)} queries for example {example_id}")
# Tokenize and get the context windows for each queries
for query in queries:
# Create the additional metadata dict that will be passed w/ extra
# data for each query. We store the question & query ids, all
# answers, and other data following `transformer_qa`.
additional_metadata = {
"id": query["id"],
"example_id": example_id,
}
instances_yielded = 0
# Tokenize, and truncate, the query based on the max set in
# `__init__`
tokenized_query = self.tokenize_str(query["query"])[: self._query_len_limit]
# Calculate where the context needs to start and how many tokens we have
# for it. This is due to the limit on the number of tokens that a
# transformer can use because they have quadratic memory usage. But if
# you are reading this code, you probably know that.
space_for_context = (
self._length_limit
- len(list(tokenized_query))
# Used getattr so I can test without having to load a
# transformer model.
- len(getattr(self._tokenizer, "sequence_pair_start_tokens", []))
- len(getattr(self._tokenizer, "sequence_pair_mid_tokens", []))
- len(getattr(self._tokenizer, "sequence_pair_end_tokens", []))
)
# Check if answers exist for this query. We assume that there are no
# answers for this query, and set the start and end index for the
# answer span to -1.
answers = query.get("answers", [])
if not answers:
logger.warning(f"Skipping {query['id']}, no answers")
continue
# Create the arguments needed for `char_span_to_token_span`
token_offsets = [
(t.idx, t.idx + len(sanitize_wordpiece(t.text))) if t.idx is not None else None
for t in tokenized_passage
]
# Get the token offsets for the answers for this current passage.
answer_token_start, answer_token_end = (-1, -1)
for answer in answers:
# Try to find the offsets.
offsets, _ = char_span_to_token_span(
token_offsets, (answer["start"], answer["end"])
)
# If offsets for an answer were found, it means the answer is in
# the passage, and thus we can stop looking.
if offsets != (-1, -1):
answer_token_start, answer_token_end = offsets
break
# Go through the context and find the window that has the answer in it.
stride_start = 0
while True:
tokenized_context_window = tokenized_passage[stride_start:]
tokenized_context_window = tokenized_context_window[:space_for_context]
# Get the token offsets w.r.t the current window.
window_token_answer_span = (
answer_token_start - stride_start,
answer_token_end - stride_start,
)
if any(
i < 0 or i >= len(tokenized_context_window) for i in window_token_answer_span
):
# The answer is not contained in the window.
window_token_answer_span = None
if (
# not self.skip_impossible_questions
window_token_answer_span
is not None
):
# The answer WAS found in the context window, and thus we
# can make an instance for the answer.
instance = self.text_to_instance(
query["query"],
tokenized_query,
passage_text,
tokenized_context_window,
answers=[answer["text"] for answer in answers],
token_answer_span=window_token_answer_span,
additional_metadata=additional_metadata,
always_add_answer_span=always_add_answer_span,
)
yield instance
instances_yielded += 1
if instances_yielded == 1 and self._one_instance_per_query:
break
stride_start += space_for_context
# If we have reached the end of the passage, stop.
if stride_start >= len(tokenized_passage):
break
# I am not sure what this does...but it is here?
stride_start -= self._stride
def tokenize_slice(self, text: str, start: int = None, end: int = None) -> Iterable[Token]:
"""
Get + tokenize a span from a source text.
*Originally from the `transformer_squad.py`*
# Parameters
text: `str`
The text to draw from.
start: `int`
The start index for the span.
end: `int`
The end index for the span. Assumed that this is inclusive.
# Returns
`Iterable[Token]` List of tokens for the retrieved span.
"""
start = start or 0
end = end or len(text)
text_to_tokenize = text[start:end]
# Check if this is the start of the text. If the start is >= 0, check
# for a preceding space. If it exists, then we need to tokenize a
# special way because of a bug with RoBERTa tokenizer.
if start - 1 >= 0 and text[start - 1].isspace():
# Per the original tokenize_slice function, you need to add a
# garbage token before the actual text you want to tokenize so that
# the tokenizer does not add a beginning of sentence token.
prefix = "a "
# Tokenize the combined prefix and text
wordpieces = self._tokenizer.tokenize(prefix + text_to_tokenize)
# Go through each wordpiece in the tokenized wordpieces.
for wordpiece in wordpieces:
# Because we added the garbage prefix before tokenize, we need
# to adjust the idx such that it accounts for this. Therefore we
# subtract the length of the prefix from each token's idx.
if wordpiece.idx is not None:
wordpiece.idx -= len(prefix)
# We do not want the garbage token, so we return all but the first
# token.
return wordpieces[1:]
else:
# Do not need any sort of prefix, so just return all of the tokens.
return self._tokenizer.tokenize(text_to_tokenize)
def tokenize_str(self, text: str) -> List[Token]:
"""
Helper method to tokenize a string.
Adapted from the `transformer_squad.make_instances`
# Parameters
text: `str`
The string to tokenize.
# Returns
`Iterable[Tokens]` The resulting tokens.
"""
# We need to keep track of the current token index so that we can update
# the results from self.tokenize_slice such that they reflect their
# actual position in the string rather than their position in the slice
# passed to tokenize_slice. Also used to construct the slice.
token_index = 0
# Create the output list (can be any iterable) that will store the
# tokens we found.
tokenized_str = []
# Helper function to update the `idx` and add every wordpiece in the
# `tokenized_slice` to the `tokenized_str`.
def add_wordpieces(tokenized_slice: Iterable[Token]) -> None:
for wordpiece in tokenized_slice:
if wordpiece.idx is not None:
wordpiece.idx += token_index
tokenized_str.append(wordpiece)
# Iterate through every character and their respective index in the text
# to create the slices to tokenize.
for i, c in enumerate(text):
# Check if the current character is a space. If it is, we tokenize
# the slice of `text` from `token_index` to `i`.
if c.isspace():
add_wordpieces(self.tokenize_slice(text, token_index, i))
token_index = i + 1
# Add the end slice that is not collected by the for loop.
add_wordpieces(self.tokenize_slice(text, token_index, len(text)))
return tokenized_str
@staticmethod
def get_spans_from_text(text: str, spans: List[Tuple[int, int]]) -> List[str]:
"""
Helper function to get a span from a string
# Parameter
text: `str`
The source string
spans: `List[Tuple[int,int]]`
List of start and end indices for spans.
Assumes that the end index is inclusive. Therefore, for start
index `i` and end index `j`, retrieves the span at `text[i:j+1]`.
# Returns
`List[str]` The extracted string from text.
"""
return [text[start : end + 1] for start, end in spans]
@overrides
def text_to_instance(
self,
query: str,
tokenized_query: List[Token],
passage: str,
tokenized_passage: List[Token],
answers: List[str],
token_answer_span: Optional[Tuple[int, int]] = None,
additional_metadata: Optional[Dict[str, Any]] = None,
always_add_answer_span: Optional[bool] = False,
) -> Instance:
"""
A lot of this comes directly from the `transformer_squad.text_to_instance`
"""
fields = {}
# Create the query field from the tokenized question and context. Use
# `self._tokenizer.add_special_tokens` function to add the necessary
# special tokens to the query.
query_field = TextField(
self._tokenizer.add_special_tokens(
# The `add_special_tokens` function automatically adds in the
# separation token to mark the separation between the two lists of
# tokens. Therefore, we can create the query field WITH context
# through passing them both as arguments.
tokenized_query,
tokenized_passage,
),
self._token_indexers,
)
# Add the query field to the fields dict that will be outputted as an
# instance. Do it here rather than assign above so that we can use
# attributes from `query_field` rather than continuously indexing
# `fields`.
fields["question_with_context"] = query_field
# Calculate the index that marks the start of the context.
start_of_context = (
+len(tokenized_query)
# Used getattr so I can test without having to load a
# transformer model.
+ len(getattr(self._tokenizer, "sequence_pair_start_tokens", []))
+ len(getattr(self._tokenizer, "sequence_pair_mid_tokens", []))
)
# make the answer span
if token_answer_span is not None:
assert all(i >= 0 for i in token_answer_span)
assert token_answer_span[0] <= token_answer_span[1]
fields["answer_span"] = SpanField(
token_answer_span[0] + start_of_context,
token_answer_span[1] + start_of_context,
query_field,
)
# make the context span, i.e., the span of text from which possible
# answers should be drawn
fields["context_span"] = SpanField(
start_of_context, start_of_context + len(tokenized_passage) - 1, query_field
)
# make the metadata
metadata = {
"question": query,
"question_tokens": tokenized_query,
"context": passage,
"context_tokens": tokenized_passage,
"answers": answers or [],
}
if additional_metadata is not None:
metadata.update(additional_metadata)
fields["metadata"] = MetadataField(metadata)
return Instance(fields)
def _find_cls_index(self, tokens: List[Token]) -> int:
"""
From transformer_squad
Args:
self:
tokens:
Returns:
"""
return next(i for i, t in enumerate(tokens) if t.text == self._cls_token)
|
plan2explore/control/batch_env.py | sarthak268/plan2explore | 189 | 11097769 | # Copyright 2019 The Dreamer Authors. Copyright 2020 Plan2Explore Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gym
import numpy as np
import tensorflow as tf
class TFBatchEnv(object):
def __init__(self, envs, blocking):
self._batch_env = PyBatchEnv(envs, blocking, flatten=True)
spaces = self._batch_env.observation_space.spaces
self._dtypes = [self._parse_dtype(spaces[key]) for key in self._keys[:-2]]
self._dtypes += [tf.float32, tf.bool] # Reward and done flag.
self._shapes = [self._parse_shape(spaces[key]) for key in self._keys[:-2]]
self._shapes += [(), ()] # Reward and done flag.
def __getattr__(self, name):
return getattr(self._batch_env, name)
def __len__(self):
return len(self._batch_env)
def __getitem__(self, index):
return self._batch_env[index]
def step(self, action):
output = tf.py_func(
self._batch_env.step, [action], self._dtypes, name='step')
return self._process_output(output, len(self._batch_env))
def reset(self, indices=None):
if indices is None:
indices = tf.range(len(self._batch_env))
output = tf.py_func(
self._batch_env.reset, [indices], self._dtypes, name='reset')
return self._process_output(output, None)
def _process_output(self, output, batch_size):
for tensor, shape in zip(output, self._shapes):
tensor.set_shape((batch_size,) + shape)
return {key: tensor for key, tensor in zip(self._keys, output)}
def _parse_dtype(self, space):
if isinstance(space, gym.spaces.Discrete):
return tf.int32
if isinstance(space, gym.spaces.Box):
if space.low.dtype == np.uint8:
return tf.uint8
else:
return tf.float32
raise NotImplementedError()
def _parse_shape(self, space):
if isinstance(space, gym.spaces.Discrete):
return ()
if isinstance(space, gym.spaces.Box):
return space.shape
raise NotImplementedError("Unsupported space '{}.'".format(space))
class PyBatchEnv(object):
def __init__(self, envs, blocking, flatten=False):
observ_space = envs[0].observation_space
if not all(env.observation_space == observ_space for env in envs):
raise ValueError('All environments must use the same observation space.')
action_space = envs[0].action_space
if not all(env.action_space == action_space for env in envs):
raise ValueError('All environments must use the same observation space.')
self._envs = envs
self._blocking = blocking
self._flatten = flatten
self._keys = list(sorted(observ_space.spaces.keys())) + ['reward', 'done']
def __len__(self):
return len(self._envs)
def __getitem__(self, index):
return self._envs[index]
def __getattr__(self, name):
return getattr(self._envs[0], name)
def step(self, actions):
for index, (env, action) in enumerate(zip(self._envs, actions)):
if not env.action_space.contains(action):
message = 'Invalid action for batch index {}: {}'
raise ValueError(message.format(index, action))
if self._blocking:
transitions = [
env.step(action)
for env, action in zip(self._envs, actions)]
else:
transitions = [
env.step(action, blocking=False)
for env, action in zip(self._envs, actions)]
transitions = [transition() for transition in transitions]
outputs = {key: [] for key in self._keys}
for observ, reward, done, _ in transitions:
for key, value in observ.items():
outputs[key].append(np.array(value))
outputs['reward'].append(np.array(reward, np.float32))
outputs['done'].append(np.array(done, np.bool))
outputs = {key: np.stack(value) for key, value in outputs.items()}
if self._flatten:
outputs = tuple(outputs[key] for key in self._keys)
return outputs
def reset(self, indices=None):
if indices is None:
indices = range(len(self._envs))
if self._blocking:
observs = [self._envs[index].reset() for index in indices]
else:
observs = [self._envs[index].reset(blocking=False) for index in indices]
observs = [observ() for observ in observs]
outputs = {key: [] for key in self._keys}
for observ in observs:
for key, value in observ.items():
outputs[key].append(np.array(value))
outputs['reward'].append(np.array(0.0, np.float32))
outputs['done'].append(np.array(False, np.bool))
outputs = {key: np.stack(value) for key, value in outputs.items()}
if self._flatten:
outputs = tuple(outputs[key] for key in self._keys)
return outputs
def close(self):
for env in self._envs:
if hasattr(env, 'close'):
env.close()
|
crankycoin/models/block.py | Rajatkalsotra/crankycoin | 162 | 11097779 | <reponame>Rajatkalsotra/crankycoin
import codecs
import hashlib
import json
import time
import pyscrypt
from crankycoin.models.transaction import Transaction
from crankycoin.models.errors import InvalidTransactions
from crankycoin import config
class BlockHeader(object):
def __init__(self, previous_hash, merkle_root, timestamp=None, nonce=0, version=None):
self.version = config['network']['version'] if version is None else int(version)
self.previous_hash = previous_hash
self.merkle_root = merkle_root
self.nonce = int(nonce)
self.timestamp = int(time.time()) if timestamp is None else int(timestamp)
def to_hashable(self):
return "{0:0>8x}".format(self.version) + \
self.previous_hash + \
self.merkle_root + \
"{0:0>8x}".format(self.timestamp) + \
"{0:0>8x}".format(self.nonce)
@property
def hash(self):
"""
:return: scrypt hash
:rtype: str
"""
hashable = self.to_hashable().encode('utf-8')
hash_object = pyscrypt.hash(
password=<PASSWORD>,
salt=hashable,
N=1024,
r=1,
p=1,
dkLen=32)
return codecs.encode(hash_object, 'hex')
@property
def hash_difficulty(self):
difficulty = 0
for c in self.hash:
if c != '0':
break
difficulty += 1
return difficulty
def to_json(self):
return json.dumps(self, default=lambda o: {key.lstrip('_'): value for key, value in o.__dict__.items()},
sort_keys=True)
def to_dict(self):
return {key.lstrip('_'): value for key, value in self.__dict__.items()}
@classmethod
def from_dict(cls, block_header_dict):
return cls(
block_header_dict['previous_hash'],
block_header_dict['merkle_root'],
block_header_dict['timestamp'],
block_header_dict['nonce'],
block_header_dict['version']
)
def __repr__(self):
return "<Block Header {}>".format(self.merkle_root)
def __str__(self):
return str(self.__dict__)
def __eq__(self, other):
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
class Block(object):
transactions = []
def __init__(self, height, transactions, previous_hash, timestamp=None, nonce=0):
"""
:param height: height # of block
:type height: int
:param transactions: list of transactions
:type transactions: list of transaction objects
:param previous_hash: previous block hash
:type previous_hash: str
:param timestamp: timestamp of block mined
:type timestamp: int
"""
self._height = height
self._transactions = transactions
merkle_root = self._calculate_merkle_root()
self.block_header = BlockHeader(previous_hash, merkle_root, timestamp, nonce)
@property
def height(self):
return self._height
@property
def transactions(self):
if len(self._transactions) <= 1:
return self._transactions
coinbase = self._transactions[0]
sorted_transactions = sorted(self._transactions[1:], key=lambda x: x.tx_hash)
sorted_transactions.insert(0, coinbase)
return sorted_transactions
def _calculate_merkle_root(self):
if len(self._transactions) < 1:
raise InvalidTransactions(self._height, "Zero transactions in block. Coinbase transaction required")
merkle_base = [t.tx_hash for t in self.transactions]
while len(merkle_base) > 1:
temp_merkle_base = []
for i in range(0, len(merkle_base), 2):
if i == len(merkle_base) - 1:
temp_merkle_base.append(
hashlib.sha256(merkle_base[i]).hexdigest()
)
else:
temp_merkle_base.append(
hashlib.sha256(merkle_base[i] + merkle_base[i+1]).hexdigest()
)
merkle_base = temp_merkle_base
return merkle_base[0]
def to_json(self):
return json.dumps(self, default=lambda o: {key.lstrip('_'): value for key, value in o.__dict__.items()},
sort_keys=True)
def to_dict(self):
d = dict()
for key, value in self.__dict__.items():
if isinstance(value, list):
d[key] = [v.to_dict() for v in value]
elif hasattr(value, "to_dict"):
d[key] = value.to_dict()
else:
d[key] = value
return d
@classmethod
def from_dict(cls, block_dict):
return cls(
block_dict['height'],
[Transaction(
transaction['source'],
transaction['destination'],
transaction['amount'],
transaction['fee'],
tx_type=transaction['tx_type'],
timestamp=transaction['timestamp'],
asset=transaction['asset'],
data=transaction['data'],
prev_hash=transaction['prev_hash'],
signature=transaction['signature'])
for transaction in block_dict['transactions']
],
block_dict['previous_hash'],
block_dict['timestamp'],
block_dict['nonce']
)
def __repr__(self):
return "<Block {}>".format(self.block_header.hash)
def __str__(self):
return str(self.__dict__)
def __eq__(self, other):
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
if __name__ == "__main__":
pass
|
Src/StdLib/Lib/site-packages/win32comext/internet/inetcon.py | cwensley/ironpython2 | 1,078 | 11097797 | <gh_stars>1000+
INET_E_USE_DEFAULT_PROTOCOLHANDLER = -2146697199 # _HRESULT_TYPEDEF_(0x800C0011L)
INET_E_USE_DEFAULT_SETTING = -2146697198 # _HRESULT_TYPEDEF_(0x800C0012L)
INET_E_DEFAULT_ACTION = INET_E_USE_DEFAULT_PROTOCOLHANDLER
INET_E_QUERYOPTION_UNKNOWN = -2146697197 # _HRESULT_TYPEDEF_(0x800C0013L)
INET_E_REDIRECTING = -2146697196 #_HRESULT_TYPEDEF_(0x800C0014L)
INET_E_INVALID_URL = -2146697214 # _HRESULT_TYPEDEF_(0x800C0002L)
INET_E_NO_SESSION = -2146697213 # _HRESULT_TYPEDEF_(0x800C0003L)
INET_E_CANNOT_CONNECT = -2146697212 # _HRESULT_TYPEDEF_(0x800C0004L)
INET_E_RESOURCE_NOT_FOUND = -2146697211 # _HRESULT_TYPEDEF_(0x800C0005L)
INET_E_OBJECT_NOT_FOUND = -2146697210 # _HRESULT_TYPEDEF_(0x800C0006L)
INET_E_DATA_NOT_AVAILABLE = -2146697209 # _HRESULT_TYPEDEF_(0x800C0007L)
INET_E_DOWNLOAD_FAILURE = -2146697208 # _HRESULT_TYPEDEF_(0x800C0008L)
INET_E_AUTHENTICATION_REQUIRED = -2146697207 # _HRESULT_TYPEDEF_(0x800C0009L)
INET_E_NO_VALID_MEDIA = -2146697206 # _HRESULT_TYPEDEF_(0x800C000AL)
INET_E_CONNECTION_TIMEOUT = -2146697205 # _HRESULT_TYPEDEF_(0x800C000BL)
INET_E_INVALID_REQUEST = -2146697204 # _HRESULT_TYPEDEF_(0x800C000CL)
INET_E_UNKNOWN_PROTOCOL = -2146697203 # _HRESULT_TYPEDEF_(0x800C000DL)
INET_E_SECURITY_PROBLEM = -2146697202 # _HRESULT_TYPEDEF_(0x800C000EL)
INET_E_CANNOT_LOAD_DATA = -2146697201 # _HRESULT_TYPEDEF_(0x800C000FL)
INET_E_CANNOT_INSTANTIATE_OBJECT = -2146697200 # _HRESULT_TYPEDEF_(0x800C0010L)
INET_E_INVALID_CERTIFICATE = -2146697191 # _HRESULT_TYPEDEF_(0x800C0019L)
INET_E_REDIRECT_FAILED = -2146697196 # _HRESULT_TYPEDEF_(0x800C0014L)
INET_E_REDIRECT_TO_DIR = -2146697195 # _HRESULT_TYPEDEF_(0x800C0015L)
INET_E_CANNOT_LOCK_REQUEST = -2146697194 # _HRESULT_TYPEDEF_(0x800C0016L)
INET_E_USE_EXTEND_BINDING = -2146697193 # _HRESULT_TYPEDEF_(0x800C0017L)
INET_E_TERMINATED_BIND = -2146697192 # _HRESULT_TYPEDEF_(0x800C0018L)
INET_E_CODE_DOWNLOAD_DECLINED = -2146696960 #_HRESULT_TYPEDEF_(0x800C0100L)
INET_E_RESULT_DISPATCHED = -2146696704 # _HRESULT_TYPEDEF_(0x800C0200L)
INET_E_CANNOT_REPLACE_SFP_FILE = -2146696448 # _HRESULT_TYPEDEF_(0x800C0300L)
INET_E_CODE_INSTALL_SUPPRESSED = -2146696192 # _HRESULT_TYPEDEF_(0x800C0400L)
INET_E_CODE_INSTALL_BLOCKED_BY_HASH_POLICY = -2146695936 # _HRESULT_TYPEDEF_(0x800C0500L)
# Generated by h2py from UrlMon.h
MKSYS_URLMONIKER = 6
URL_MK_LEGACY = 0
URL_MK_UNIFORM = 1
URL_MK_NO_CANONICALIZE = 2
FIEF_FLAG_FORCE_JITUI = 0x1
FIEF_FLAG_PEEK = 0x2
FIEF_FLAG_SKIP_INSTALLED_VERSION_CHECK = 0x4
FMFD_DEFAULT = 0x00000000
FMFD_URLASFILENAME = 0x00000001
FMFD_ENABLEMIMESNIFFING = 0x00000002
FMFD_IGNOREMIMETEXTPLAIN = 0x00000004
URLMON_OPTION_USERAGENT = 0x10000001
URLMON_OPTION_USERAGENT_REFRESH = 0x10000002
URLMON_OPTION_URL_ENCODING = 0x10000004
URLMON_OPTION_USE_BINDSTRINGCREDS = 0x10000008
URLMON_OPTION_USE_BROWSERAPPSDOCUMENTS = 0x10000010
CF_NULL = 0
Uri_CREATE_ALLOW_RELATIVE = 0x00000001
Uri_CREATE_ALLOW_IMPLICIT_WILDCARD_SCHEME = 0x00000002
Uri_CREATE_ALLOW_IMPLICIT_FILE_SCHEME = 0x00000004
Uri_CREATE_NOFRAG = 0x00000008
Uri_CREATE_NO_CANONICALIZE = 0x00000010
Uri_CREATE_CANONICALIZE = 0x00000100
Uri_CREATE_FILE_USE_DOS_PATH = 0x00000020
Uri_CREATE_DECODE_EXTRA_INFO = 0x00000040
Uri_CREATE_NO_DECODE_EXTRA_INFO = 0x00000080
Uri_CREATE_CRACK_UNKNOWN_SCHEMES = 0x00000200
Uri_CREATE_NO_CRACK_UNKNOWN_SCHEMES = 0x00000400
Uri_CREATE_PRE_PROCESS_HTML_URI = 0x00000800
Uri_CREATE_NO_PRE_PROCESS_HTML_URI = 0x00001000
Uri_CREATE_IE_SETTINGS = 0x00002000
Uri_CREATE_NO_IE_SETTINGS = 0x00004000
Uri_CREATE_NO_ENCODE_FORBIDDEN_CHARACTERS = 0x00008000
Uri_DISPLAY_NO_FRAGMENT = 0x00000001
Uri_PUNYCODE_IDN_HOST = 0x00000002
Uri_DISPLAY_IDN_HOST = 0x00000004
Uri_ENCODING_USER_INFO_AND_PATH_IS_PERCENT_ENCODED_UTF8 = 0x00000001
Uri_ENCODING_USER_INFO_AND_PATH_IS_CP = 0x00000002
Uri_ENCODING_HOST_IS_IDN = 0x00000004
Uri_ENCODING_HOST_IS_PERCENT_ENCODED_UTF8 = 0x00000008
Uri_ENCODING_HOST_IS_PERCENT_ENCODED_CP = 0x00000010
Uri_ENCODING_QUERY_AND_FRAGMENT_IS_PERCENT_ENCODED_UTF8 = 0x00000020
Uri_ENCODING_QUERY_AND_FRAGMENT_IS_CP = 0x00000040
Uri_ENCODING_RFC = (Uri_ENCODING_USER_INFO_AND_PATH_IS_PERCENT_ENCODED_UTF8 | Uri_ENCODING_HOST_IS_PERCENT_ENCODED_UTF8 | Uri_ENCODING_QUERY_AND_FRAGMENT_IS_PERCENT_ENCODED_UTF8)
UriBuilder_USE_ORIGINAL_FLAGS = 0x00000001
WININETINFO_OPTION_LOCK_HANDLE = 65534
URLOSTRM_USECACHEDCOPY_ONLY = 0x1
URLOSTRM_USECACHEDCOPY = 0x2
URLOSTRM_GETNEWESTVERSION = 0x3
SET_FEATURE_ON_THREAD = 0x00000001
SET_FEATURE_ON_PROCESS = 0x00000002
SET_FEATURE_IN_REGISTRY = 0x00000004
SET_FEATURE_ON_THREAD_LOCALMACHINE = 0x00000008
SET_FEATURE_ON_THREAD_INTRANET = 0x00000010
SET_FEATURE_ON_THREAD_TRUSTED = 0x00000020
SET_FEATURE_ON_THREAD_INTERNET = 0x00000040
SET_FEATURE_ON_THREAD_RESTRICTED = 0x00000080
GET_FEATURE_FROM_THREAD = 0x00000001
GET_FEATURE_FROM_PROCESS = 0x00000002
GET_FEATURE_FROM_REGISTRY = 0x00000004
GET_FEATURE_FROM_THREAD_LOCALMACHINE = 0x00000008
GET_FEATURE_FROM_THREAD_INTRANET = 0x00000010
GET_FEATURE_FROM_THREAD_TRUSTED = 0x00000020
GET_FEATURE_FROM_THREAD_INTERNET = 0x00000040
GET_FEATURE_FROM_THREAD_RESTRICTED = 0x00000080
PROTOCOLFLAG_NO_PICS_CHECK = 0x00000001
MUTZ_NOSAVEDFILECHECK = 0x00000001
MUTZ_ISFILE = 0x00000002
MUTZ_ACCEPT_WILDCARD_SCHEME = 0x00000080
MUTZ_ENFORCERESTRICTED = 0x00000100
MUTZ_RESERVED = 0x00000200
MUTZ_REQUIRESAVEDFILECHECK = 0x00000400
MUTZ_DONT_UNESCAPE = 0x00000800
MUTZ_DONT_USE_CACHE = 0x00001000
MUTZ_FORCE_INTRANET_FLAGS = 0x00002000
MUTZ_IGNORE_ZONE_MAPPINGS = 0x00004000
MAX_SIZE_SECURITY_ID = 512
URLACTION_MIN = 0x00001000
URLACTION_DOWNLOAD_MIN = 0x00001000
URLACTION_DOWNLOAD_SIGNED_ACTIVEX = 0x00001001
URLACTION_DOWNLOAD_UNSIGNED_ACTIVEX = 0x00001004
URLACTION_DOWNLOAD_CURR_MAX = 0x00001004
URLACTION_DOWNLOAD_MAX = 0x000011FF
URLACTION_ACTIVEX_MIN = 0x00001200
URLACTION_ACTIVEX_RUN = 0x00001200
URLPOLICY_ACTIVEX_CHECK_LIST = 0x00010000
URLACTION_ACTIVEX_OVERRIDE_OBJECT_SAFETY = 0x00001201
URLACTION_ACTIVEX_OVERRIDE_DATA_SAFETY = 0x00001202
URLACTION_ACTIVEX_OVERRIDE_SCRIPT_SAFETY = 0x00001203
URLACTION_SCRIPT_OVERRIDE_SAFETY = 0x00001401
URLACTION_ACTIVEX_CONFIRM_NOOBJECTSAFETY = 0x00001204
URLACTION_ACTIVEX_TREATASUNTRUSTED = 0x00001205
URLACTION_ACTIVEX_NO_WEBOC_SCRIPT = 0x00001206
URLACTION_ACTIVEX_OVERRIDE_REPURPOSEDETECTION = 0x00001207
URLACTION_ACTIVEX_OVERRIDE_OPTIN = 0x00001208
URLACTION_ACTIVEX_SCRIPTLET_RUN = 0x00001209
URLACTION_ACTIVEX_DYNSRC_VIDEO_AND_ANIMATION = 0x0000120A
URLACTION_ACTIVEX_CURR_MAX = 0x0000120A
URLACTION_ACTIVEX_MAX = 0x000013ff
URLACTION_SCRIPT_MIN = 0x00001400
URLACTION_SCRIPT_RUN = 0x00001400
URLACTION_SCRIPT_JAVA_USE = 0x00001402
URLACTION_SCRIPT_SAFE_ACTIVEX = 0x00001405
URLACTION_CROSS_DOMAIN_DATA = 0x00001406
URLACTION_SCRIPT_PASTE = 0x00001407
URLACTION_ALLOW_XDOMAIN_SUBFRAME_RESIZE = 0x00001408
URLACTION_SCRIPT_CURR_MAX = 0x00001408
URLACTION_SCRIPT_MAX = 0x000015ff
URLACTION_HTML_MIN = 0x00001600
URLACTION_HTML_SUBMIT_FORMS = 0x00001601
URLACTION_HTML_SUBMIT_FORMS_FROM = 0x00001602
URLACTION_HTML_SUBMIT_FORMS_TO = 0x00001603
URLACTION_HTML_FONT_DOWNLOAD = 0x00001604
URLACTION_HTML_JAVA_RUN = 0x00001605
URLACTION_HTML_USERDATA_SAVE = 0x00001606
URLACTION_HTML_SUBFRAME_NAVIGATE = 0x00001607
URLACTION_HTML_META_REFRESH = 0x00001608
URLACTION_HTML_MIXED_CONTENT = 0x00001609
URLACTION_HTML_INCLUDE_FILE_PATH = 0x0000160A
URLACTION_HTML_MAX = 0x000017ff
URLACTION_SHELL_MIN = 0x00001800
URLACTION_SHELL_INSTALL_DTITEMS = 0x00001800
URLACTION_SHELL_MOVE_OR_COPY = 0x00001802
URLACTION_SHELL_FILE_DOWNLOAD = 0x00001803
URLACTION_SHELL_VERB = 0x00001804
URLACTION_SHELL_WEBVIEW_VERB = 0x00001805
URLACTION_SHELL_SHELLEXECUTE = 0x00001806
URLACTION_SHELL_EXECUTE_HIGHRISK = 0x00001806
URLACTION_SHELL_EXECUTE_MODRISK = 0x00001807
URLACTION_SHELL_EXECUTE_LOWRISK = 0x00001808
URLACTION_SHELL_POPUPMGR = 0x00001809
URLACTION_SHELL_RTF_OBJECTS_LOAD = 0x0000180A
URLACTION_SHELL_ENHANCED_DRAGDROP_SECURITY = 0x0000180B
URLACTION_SHELL_EXTENSIONSECURITY = 0x0000180C
URLACTION_SHELL_SECURE_DRAGSOURCE = 0x0000180D
URLACTION_SHELL_CURR_MAX = 0x0000180D
URLACTION_SHELL_MAX = 0x000019ff
URLACTION_NETWORK_MIN = 0x00001A00
URLACTION_CREDENTIALS_USE = 0x00001A00
URLPOLICY_CREDENTIALS_SILENT_LOGON_OK = 0x00000000
URLPOLICY_CREDENTIALS_MUST_PROMPT_USER = 0x00010000
URLPOLICY_CREDENTIALS_CONDITIONAL_PROMPT = 0x00020000
URLPOLICY_CREDENTIALS_ANONYMOUS_ONLY = 0x00030000
URLACTION_AUTHENTICATE_CLIENT = 0x00001A01
URLPOLICY_AUTHENTICATE_CLEARTEXT_OK = 0x00000000
URLPOLICY_AUTHENTICATE_CHALLENGE_RESPONSE = 0x00010000
URLPOLICY_AUTHENTICATE_MUTUAL_ONLY = 0x00030000
URLACTION_COOKIES = 0x00001A02
URLACTION_COOKIES_SESSION = 0x00001A03
URLACTION_CLIENT_CERT_PROMPT = 0x00001A04
URLACTION_COOKIES_THIRD_PARTY = 0x00001A05
URLACTION_COOKIES_SESSION_THIRD_PARTY = 0x00001A06
URLACTION_COOKIES_ENABLED = 0x00001A10
URLACTION_NETWORK_CURR_MAX = 0x00001A10
URLACTION_NETWORK_MAX = 0x00001Bff
URLACTION_JAVA_MIN = 0x00001C00
URLACTION_JAVA_PERMISSIONS = 0x00001C00
URLPOLICY_JAVA_PROHIBIT = 0x00000000
URLPOLICY_JAVA_HIGH = 0x00010000
URLPOLICY_JAVA_MEDIUM = 0x00020000
URLPOLICY_JAVA_LOW = 0x00030000
URLPOLICY_JAVA_CUSTOM = 0x00800000
URLACTION_JAVA_CURR_MAX = 0x00001C00
URLACTION_JAVA_MAX = 0x00001Cff
URLACTION_INFODELIVERY_MIN = 0x00001D00
URLACTION_INFODELIVERY_NO_ADDING_CHANNELS = 0x00001D00
URLACTION_INFODELIVERY_NO_EDITING_CHANNELS = 0x00001D01
URLACTION_INFODELIVERY_NO_REMOVING_CHANNELS = 0x00001D02
URLACTION_INFODELIVERY_NO_ADDING_SUBSCRIPTIONS = 0x00001D03
URLACTION_INFODELIVERY_NO_EDITING_SUBSCRIPTIONS = 0x00001D04
URLACTION_INFODELIVERY_NO_REMOVING_SUBSCRIPTIONS = 0x00001D05
URLACTION_INFODELIVERY_NO_CHANNEL_LOGGING = 0x00001D06
URLACTION_INFODELIVERY_CURR_MAX = 0x00001D06
URLACTION_INFODELIVERY_MAX = 0x00001Dff
URLACTION_CHANNEL_SOFTDIST_MIN = 0x00001E00
URLACTION_CHANNEL_SOFTDIST_PERMISSIONS = 0x00001E05
URLPOLICY_CHANNEL_SOFTDIST_PROHIBIT = 0x00010000
URLPOLICY_CHANNEL_SOFTDIST_PRECACHE = 0x00020000
URLPOLICY_CHANNEL_SOFTDIST_AUTOINSTALL = 0x00030000
URLACTION_CHANNEL_SOFTDIST_MAX = 0x00001Eff
URLACTION_BEHAVIOR_MIN = 0x00002000
URLACTION_BEHAVIOR_RUN = 0x00002000
URLPOLICY_BEHAVIOR_CHECK_LIST = 0x00010000
URLACTION_FEATURE_MIN = 0x00002100
URLACTION_FEATURE_MIME_SNIFFING = 0x00002100
URLACTION_FEATURE_ZONE_ELEVATION = 0x00002101
URLACTION_FEATURE_WINDOW_RESTRICTIONS = 0x00002102
URLACTION_FEATURE_SCRIPT_STATUS_BAR = 0x00002103
URLACTION_FEATURE_FORCE_ADDR_AND_STATUS = 0x00002104
URLACTION_FEATURE_BLOCK_INPUT_PROMPTS = 0x00002105
URLACTION_AUTOMATIC_DOWNLOAD_UI_MIN = 0x00002200
URLACTION_AUTOMATIC_DOWNLOAD_UI = 0x00002200
URLACTION_AUTOMATIC_ACTIVEX_UI = 0x00002201
URLACTION_ALLOW_RESTRICTEDPROTOCOLS = 0x00002300
URLACTION_ALLOW_APEVALUATION = 0x00002301
URLACTION_WINDOWS_BROWSER_APPLICATIONS = 0x00002400
URLACTION_XPS_DOCUMENTS = 0x00002401
URLACTION_LOOSE_XAML = 0x00002402
URLACTION_LOWRIGHTS = 0x00002500
URLACTION_WINFX_SETUP = 0x00002600
URLPOLICY_ALLOW = 0x00
URLPOLICY_QUERY = 0x01
URLPOLICY_DISALLOW = 0x03
URLPOLICY_NOTIFY_ON_ALLOW = 0x10
URLPOLICY_NOTIFY_ON_DISALLOW = 0x20
URLPOLICY_LOG_ON_ALLOW = 0x40
URLPOLICY_LOG_ON_DISALLOW = 0x80
URLPOLICY_MASK_PERMISSIONS = 0x0f
URLPOLICY_DONTCHECKDLGBOX = 0x100
URLZONE_ESC_FLAG = 0x100
SECURITY_IE_STATE_GREEN = 0x00000000
SECURITY_IE_STATE_RED = 0x00000001
SOFTDIST_FLAG_USAGE_EMAIL = 0x00000001
SOFTDIST_FLAG_USAGE_PRECACHE = 0x00000002
SOFTDIST_FLAG_USAGE_AUTOINSTALL = 0x00000004
SOFTDIST_FLAG_DELETE_SUBSCRIPTION = 0x00000008
SOFTDIST_ADSTATE_NONE = 0x00000000
SOFTDIST_ADSTATE_AVAILABLE = 0x00000001
SOFTDIST_ADSTATE_DOWNLOADED = 0x00000002
SOFTDIST_ADSTATE_INSTALLED = 0x00000003
CONFIRMSAFETYACTION_LOADOBJECT = 0x00000001
|
scripts/external_libs/scapy-2.4.3/scapy/contrib/automotive/obd/obd.py | timgates42/trex-core | 956 | 11097803 | # flake8: noqa: F405
# This file is part of Scapy
# See http://www.secdev.org/projects/scapy for more information
# Copyright (C) <NAME> <<EMAIL>>
# Copyright (C) <NAME> <<EMAIL>>
# This program is published under a GPLv2 license
# scapy.contrib.description = On Board Diagnostic Protocol (OBD-II)
# scapy.contrib.status = loads
import struct
from scapy.contrib.automotive.obd.iid.iids import *
from scapy.contrib.automotive.obd.mid.mids import *
from scapy.contrib.automotive.obd.pid.pids import *
from scapy.contrib.automotive.obd.tid.tids import *
from scapy.contrib.automotive.obd.services import *
from scapy.packet import Packet, bind_layers
from scapy.fields import XByteEnumField
class OBD(Packet):
services = {
0x01: 'CurrentPowertrainDiagnosticDataRequest',
0x02: 'PowertrainFreezeFrameDataRequest',
0x03: 'EmissionRelatedDiagnosticTroubleCodesRequest',
0x04: 'ClearResetDiagnosticTroubleCodesRequest',
0x05: 'OxygenSensorMonitoringTestResultsRequest',
0x06: 'OnBoardMonitoringTestResultsRequest',
0x07: 'PendingEmissionRelatedDiagnosticTroubleCodesRequest',
0x08: 'ControlOperationRequest',
0x09: 'VehicleInformationRequest',
0x0A: 'PermanentDiagnosticTroubleCodesRequest',
0x41: 'CurrentPowertrainDiagnosticDataResponse',
0x42: 'PowertrainFreezeFrameDataResponse',
0x43: 'EmissionRelatedDiagnosticTroubleCodesResponse',
0x44: 'ClearResetDiagnosticTroubleCodesResponse',
0x45: 'OxygenSensorMonitoringTestResultsResponse',
0x46: 'OnBoardMonitoringTestResultsResponse',
0x47: 'PendingEmissionRelatedDiagnosticTroubleCodesResponse',
0x48: 'ControlOperationResponse',
0x49: 'VehicleInformationResponse',
0x4A: 'PermanentDiagnosticTroubleCodesResponse',
0x7f: 'NegativeResponse'}
name = "On-board diagnostics"
fields_desc = [
XByteEnumField('service', 0, services)
]
def hashret(self):
if self.service == 0x7f:
return struct.pack('B', self.request_service_id & ~0x40)
return struct.pack('B', self.service & ~0x40)
def answers(self, other):
"""DEV: true if self is an answer from other"""
if other.__class__ == self.__class__:
return (other.service + 0x40) == self.service or \
(self.service == 0x7f and
self.request_service_id == other.service)
return False
# Service Bindings
bind_layers(OBD, OBD_S01, service=0x01)
bind_layers(OBD, OBD_S02, service=0x02)
bind_layers(OBD, OBD_S03, service=0x03)
bind_layers(OBD, OBD_S04, service=0x04)
bind_layers(OBD, OBD_S06, service=0x06)
bind_layers(OBD, OBD_S07, service=0x07)
bind_layers(OBD, OBD_S08, service=0x08)
bind_layers(OBD, OBD_S09, service=0x09)
bind_layers(OBD, OBD_S0A, service=0x0A)
bind_layers(OBD, OBD_S01_PR, service=0x41)
bind_layers(OBD, OBD_S02_PR, service=0x42)
bind_layers(OBD, OBD_S03_PR, service=0x43)
bind_layers(OBD, OBD_S04_PR, service=0x44)
bind_layers(OBD, OBD_S06_PR, service=0x46)
bind_layers(OBD, OBD_S07_PR, service=0x47)
bind_layers(OBD, OBD_S08_PR, service=0x48)
bind_layers(OBD, OBD_S09_PR, service=0x49)
bind_layers(OBD, OBD_S0A_PR, service=0x4A)
bind_layers(OBD, OBD_NR, service=0x7F)
|
websockets/handlers/stash_responder_blocking_wsh.py | meyerweb/wpt | 14,668 | 11097809 | #!/usr/bin/python
import json
import threading
import wptserve.stash
from mod_pywebsocket import msgutil
address, authkey = wptserve.stash.load_env_config()
path = "/stash_responder_blocking"
stash = wptserve.stash.Stash(path, address=address, authkey=authkey)
cv = threading.Condition()
def handle_set(key, value):
with cv:
stash.put(key, value)
cv.notify_all()
def handle_get(key):
with cv:
while True:
value = stash.take(key)
if value is not None:
return value
cv.wait()
def web_socket_do_extra_handshake(request):
pass
def web_socket_transfer_data(request):
line = request.ws_stream.receive_message()
query = json.loads(line)
action = query["action"]
key = query["key"]
if action == "set":
value = query["value"]
handle_set(key, value)
response = {}
elif action == "get":
value = handle_get(key)
response = {"value": value}
else:
response = {}
msgutil.send_message(request, json.dumps(response))
|
aws_lambda_builders/workflows/ruby_bundler/workflow.py | eldritchideen/aws-lambda-builders | 180 | 11097849 | <reponame>eldritchideen/aws-lambda-builders
"""
Ruby Bundler Workflow
"""
import logging
from aws_lambda_builders.workflow import BaseWorkflow, Capability
from aws_lambda_builders.actions import CopySourceAction, CopyDependenciesAction, CleanUpAction
from .actions import RubyBundlerInstallAction, RubyBundlerVendorAction
from .utils import OSUtils
from .bundler import SubprocessBundler
LOG = logging.getLogger(__name__)
class RubyBundlerWorkflow(BaseWorkflow):
"""
A Lambda builder workflow that knows how to build
Ruby projects using Bundler.
"""
NAME = "RubyBundlerBuilder"
CAPABILITY = Capability(language="ruby", dependency_manager="bundler", application_framework=None)
EXCLUDED_FILES = (".aws-sam", ".git")
def __init__(self, source_dir, artifacts_dir, scratch_dir, manifest_path, runtime=None, osutils=None, **kwargs):
super(RubyBundlerWorkflow, self).__init__(
source_dir, artifacts_dir, scratch_dir, manifest_path, runtime=runtime, **kwargs
)
if osutils is None:
osutils = OSUtils()
self.actions = [CopySourceAction(source_dir, artifacts_dir, excludes=self.EXCLUDED_FILES)]
if self.download_dependencies:
# installed the dependencies into artifact folder
subprocess_bundler = SubprocessBundler(osutils)
bundle_install = RubyBundlerInstallAction(artifacts_dir, subprocess_bundler=subprocess_bundler)
bundle_deployment = RubyBundlerVendorAction(artifacts_dir, subprocess_bundler=subprocess_bundler)
self.actions.append(bundle_install)
self.actions.append(bundle_deployment)
# if dependencies folder exists, copy dependencies into dependencies into dependencies folder
if self.dependencies_dir:
# clean up the dependencies first
self.actions.append(CleanUpAction(self.dependencies_dir))
self.actions.append(CopyDependenciesAction(source_dir, artifacts_dir, self.dependencies_dir))
else:
# if dependencies folder exists and not download dependencies, simply copy the dependencies from the
# dependencies folder to artifact folder
if self.dependencies_dir:
self.actions.append(CopySourceAction(self.dependencies_dir, artifacts_dir))
else:
LOG.info(
"download_dependencies is False and dependencies_dir is None. Copying the source files into the "
"artifacts directory. "
)
|
allennlp_models/vision/dataset_readers/gqa.py | gabeorlanski/allennlp-models | 402 | 11097863 | from os import PathLike
from typing import (
Dict,
Union,
Optional,
Tuple,
Iterable,
)
import json
import os
from overrides import overrides
import torch
from torch import Tensor
from allennlp.common.file_utils import cached_path
from allennlp.common.lazy import Lazy
from allennlp.data.vocabulary import Vocabulary
from allennlp.data.dataset_readers.dataset_reader import DatasetReader
from allennlp.data.fields import ArrayField, LabelField, ListField, TextField
from allennlp.data.image_loader import ImageLoader
from allennlp.data.instance import Instance
from allennlp.data.token_indexers import TokenIndexer
from allennlp.data.tokenizers import Tokenizer
from allennlp.modules.vision.grid_embedder import GridEmbedder
from allennlp.modules.vision.region_detector import RegionDetector
from allennlp_models.vision.dataset_readers.vision_reader import VisionReader
@DatasetReader.register("gqa")
class GQAReader(VisionReader):
"""
Parameters
----------
image_dir: `str`
Path to directory containing `png` image files.
image_loader : `ImageLoader`
image_featurizer: `Lazy[GridEmbedder]`
The backbone image processor (like a ResNet), whose output will be passed to the region
detector for finding object boxes in the image.
region_detector: `Lazy[RegionDetector]`
For pulling out regions of the image (both coordinates and features) that will be used by
downstream models.
data_dir: `str`
Path to directory containing text files for each dataset split. These files contain
the sentences and metadata for each task instance.
tokenizer: `Tokenizer`, optional
token_indexers: `Dict[str, TokenIndexer]`
"""
def __init__(
self,
image_dir: Union[str, PathLike],
*,
image_loader: Optional[ImageLoader] = None,
image_featurizer: Optional[Lazy[GridEmbedder]] = None,
region_detector: Optional[Lazy[RegionDetector]] = None,
answer_vocab: Optional[Union[str, Vocabulary]] = None,
feature_cache_dir: Optional[Union[str, PathLike]] = None,
data_dir: Optional[Union[str, PathLike]] = None,
tokenizer: Tokenizer = None,
token_indexers: Dict[str, TokenIndexer] = None,
cuda_device: Optional[Union[int, torch.device]] = None,
max_instances: Optional[int] = None,
image_processing_batch_size: int = 8,
write_to_cache: bool = True,
) -> None:
super().__init__(
image_dir,
image_loader=image_loader,
image_featurizer=image_featurizer,
region_detector=region_detector,
feature_cache_dir=feature_cache_dir,
tokenizer=tokenizer,
token_indexers=token_indexers,
cuda_device=cuda_device,
max_instances=max_instances,
image_processing_batch_size=image_processing_batch_size,
write_to_cache=write_to_cache,
)
self.data_dir = data_dir
# read answer vocab
if answer_vocab is None:
self.answer_vocab = None
else:
if isinstance(answer_vocab, str):
answer_vocab = cached_path(answer_vocab, extract_archive=True)
answer_vocab = Vocabulary.from_files(answer_vocab)
self.answer_vocab = frozenset(
answer_vocab.get_token_to_index_vocabulary("answers").keys()
)
@overrides
def _read(self, split_or_filename: str):
if not self.data_dir:
self.data_dir = "https://nlp.stanford.edu/data/gqa/questions1.2.zip!"
splits = {
"challenge_all": f"{self.data_dir}challenge_all_questions.json",
"challenge_balanced": f"{self.data_dir}challenge_balanced_questions.json",
"test_all": f"{self.data_dir}test_all_questions.json",
"test_balanced": f"{self.data_dir}test_balanced_questions.json",
"testdev_all": f"{self.data_dir}testdev_all_questions.json",
"testdev_balanced": f"{self.data_dir}testdev_balanced_questions.json",
"train_balanced": f"{self.data_dir}train_balanced_questions.json",
"train_all": f"{self.data_dir}train_all_questions",
"val_all": f"{self.data_dir}val_all_questions.json",
"val_balanced": f"{self.data_dir}val_balanced_questions.json",
}
filename = splits.get(split_or_filename, split_or_filename)
filename = cached_path(filename, extract_archive=True)
# If we're considering a directory of files (such as train_all)
# loop through each in file in generator
if os.path.isdir(filename):
files = [os.path.join(filename, file_path) for file_path in os.listdir(filename)]
else:
files = [filename]
# Ensure order is deterministic.
files.sort()
for data_file in files:
with open(data_file) as f:
questions_with_annotations = json.load(f)
question_dicts = list(
self.shard_iterable(
questions_with_annotations[q_id] for q_id in questions_with_annotations
)
)
processed_images: Iterable[
Optional[Tuple[Tensor, Tensor, Optional[Tensor], Optional[Tensor]]]
]
if self.produce_featurized_images:
# It would be much easier to just process one image at a time, but it's faster to process
# them in batches. So this code gathers up instances until it has enough to fill up a batch
# that needs processing, and then processes them all.
filenames = [f"{question_dict['imageId']}.jpg" for question_dict in question_dicts]
try:
processed_images = self._process_image_paths(
self.images[filename] for filename in filenames
)
except KeyError as e:
missing_filename = e.args[0]
raise KeyError(
missing_filename,
f"We could not find an image with the name {missing_filename}. "
"Because of the size of the image datasets, we don't download them automatically. "
"Please download the images from"
"https://nlp.stanford.edu/data/gqa/images.zip, "
"extract them into a directory, and set the image_dir parameter to point to that "
"directory. This dataset reader does not care about the exact directory structure. It "
"finds the images wherever they are.",
)
else:
processed_images = [None] * len(question_dicts)
for question_dict, processed_image in zip(question_dicts, processed_images):
answer = {
"answer": question_dict["answer"],
}
instance = self.text_to_instance(question_dict["question"], processed_image, answer)
if instance is not None:
yield instance
@overrides
def text_to_instance(
self, # type: ignore
question: str,
image: Optional[Union[str, Tuple[Tensor, Tensor, Optional[Tensor], Optional[Tensor]]]],
answer: Optional[Dict[str, str]] = None,
*,
use_cache: bool = True,
) -> Optional[Instance]:
from allennlp.data import Field
tokenized_question = self._tokenizer.tokenize(question)
fields: Dict[str, Field] = {"question": TextField(tokenized_question, None)}
if answer is not None:
labels_fields = []
weights = []
if not self.answer_vocab or answer["answer"] in self.answer_vocab:
labels_fields.append(LabelField(answer["answer"], label_namespace="answers"))
weights.append(1.0)
if len(labels_fields) <= 0:
return None
fields["label_weights"] = ArrayField(torch.tensor(weights))
fields["labels"] = ListField(labels_fields)
if image is not None:
if isinstance(image, str):
features, coords, _, _ = next(
self._process_image_paths([image], use_cache=use_cache)
)
else:
features, coords, _, _ = image
fields["box_features"] = ArrayField(features)
fields["box_coordinates"] = ArrayField(coords)
fields["box_mask"] = ArrayField(
features.new_ones((features.shape[0],), dtype=torch.bool),
padding_value=False,
dtype=torch.bool,
)
return Instance(fields)
@overrides
def apply_token_indexers(self, instance: Instance) -> None:
instance["question"].token_indexers = self._token_indexers # type: ignore
|
app/utils/get_list_of_app_exceptions_for_frontend.py | AmitArie/abstracting-fastapi-services | 224 | 11097873 | from utils.app_exceptions import AppException
print([e for e in dir(AppException) if "__" not in e])
# ['FooCreateItem', 'FooGetItem', 'FooItemRequiresAuth']
|
ahk/window.py | scslmd/ahk | 502 | 11097885 | <reponame>scslmd/ahk
import ast
import asyncio
import collections
from contextlib import suppress
import warnings
from types import CoroutineType
from ahk.script import ScriptEngine, AsyncScriptEngine
from ahk.utils import escape_sequence_replace, make_logger, async_filter
logger = make_logger(__name__)
class WindowNotFoundError(ValueError):
pass
class Control:
def __init__(self):
raise NotImplementedError
def click(self):
"""
REF: https://www.autohotkey.com/docs/commands/ControlClick.htm
:return:
"""
raise NotImplementedError
def focus(self):
"""
REF: https://www.autohotkey.com/docs/commands/ControlFocus.htm
:return:
"""
raise NotImplementedError
def get(self, key):
"""
REF: https://www.autohotkey.com/docs/commands/ControlGet.htm
:param key:
:return:
"""
raise NotImplementedError
def has_focus(self):
raise NotImplementedError
@property
def position(self):
"""
REF: https://www.autohotkey.com/docs/commands/ControlGetPos.htm
:return:
"""
raise NotImplementedError
@property
def text(self):
"""
REF: https://www.autohotkey.com/docs/commands/ControlGetText.htm
:return:
"""
raise NotImplementedError
@text.setter
def text(self, new_text):
"""
REF: https://www.autohotkey.com/docs/commands/ControlSetText.htm
:param new_text:
:return:
"""
raise NotImplementedError
def move(self):
"""
REF: https://www.autohotkey.com/docs/commands/ControlMove.htm
:return:
"""
raise NotImplementedError
def send(self, raw=False):
"""
REF: https://www.autohotkey.com/docs/commands/ControlSend.htm
:param raw:
:return:
"""
raise NotImplementedError
class Window(object):
MINIMIZED = '-1'
MAXIMIZED = '1'
NON_MIN_NON_MAX = '0'
_set_subcommands = {
'always_on_top': 'AlwaysOnTop',
'bottom': 'Bottom',
'top': 'Top',
'disable': 'Disable',
'enable': 'Enable',
'redraw': 'Redraw',
'style': 'Style',
'ex_style': 'ExStyle',
'region': 'Region',
'transparent': 'Transparent',
'transcolor': 'TransColor',
}
_get_subcommands = {
'id': 'ID',
'id_last': 'IDLast',
'pid': 'PID',
'process_name': 'ProcessName',
'process_path': 'ProcessPath',
'process': 'ProcessPath',
'count': 'count',
'list': 'list',
'min_max': 'MinMax',
'controls': 'ControlList',
'controls_hwnd': 'ControlListHwnd',
'transparent': 'Transparent',
'trans_color': 'TransColor',
'style': 'Style', # This will probably get a property later
'ex_style': 'ExStyle', # This will probably get a property later
}
# add reverse lookups
_set_subcommands.update({value: value for value in _set_subcommands.values()})
_get_subcommands.update({value: value for value in _get_subcommands.values()})
def __init__(self, engine: ScriptEngine, ahk_id: str, encoding=None):
self.engine = engine # should this be a weakref instead?
self.id = ahk_id
self.encoding = encoding
@classmethod
def from_mouse_position(cls, engine: ScriptEngine, **kwargs):
script = engine.render_template('window/from_mouse.ahk')
ahk_id = engine.run_script(script)
return cls(engine=engine, ahk_id=ahk_id, **kwargs)
@classmethod
def from_pid(cls, engine: ScriptEngine, pid, **kwargs):
script = engine.render_template('window/get.ahk', subcommand='ID', title=f'ahk_pid {pid}')
ahk_id = engine.run_script(script)
return cls(engine=engine, ahk_id=ahk_id, **kwargs)
def __getattr__(self, attr):
if attr.lower() in self._get_subcommands:
return self.get(attr)
raise AttributeError(f"'{self.__class__.__name__}' object has no attribute '{attr}'")
def _get(self, subcommand):
sub = self._get_subcommands.get(subcommand)
if not sub:
raise ValueError(f'No such subcommand {subcommand}')
script = self._render_template(
'window/get.ahk',
subcommand=sub,
title=f'ahk_id {self.id}',
)
return script
def get(self, subcommand):
script = self._get(subcommand)
return self.engine.run_script(script)
def __repr__(self):
return f'<ahk.window.Window ahk_id={self.id}>'
def _set(self, subcommand, value):
sub = self._set_subcommands.get(subcommand)
if not sub:
raise ValueError(f'No such subcommand {subcommand}')
script = self._render_template(
'window/win_set.ahk', subcommand=subcommand, value=value, title=f'ahk_id {self.id}'
)
return script
def set(self, subcommand, value):
script = self._set(subcommand, value)
return self.engine.run_script(script)
def _get_pos(self, info=None):
script = self._render_template('window/win_position.ahk', title=f'ahk_id {self.id}', pos_info=info)
return script
def get_pos(self, info=None):
"""
:return:
"""
script = self._get_pos(info=info)
resp = self.engine.run_script(script)
try:
value = ast.literal_eval(resp)
return value
except SyntaxError:
raise WindowNotFoundError('No window found')
@property
def rect(self):
return self.get_pos()
@rect.setter
def rect(self, new_position):
x, y, width, height = new_position
self.move(x=x, y=y, width=width, height=height)
@property
def position(self):
return self.get_pos('position')
@position.setter
def position(self, new_position):
self.set_position(new_position)
def set_position(self, new_position):
x, y = new_position
return self.move(x, y)
@property
def width(self):
width = self.get_pos('width')
return width
@width.setter
def width(self, new_width):
self.move(width=new_width)
@property
def height(self):
height = self.get_pos('height')
return height
@height.setter
def height(self, new_height):
self.move(height=new_height)
def _base_check(self, command):
script = self._render_template('window/base_check.ahk', command=command, title=f'ahk_id {self.id}')
return script
def _base_property(self, command):
script = self._base_check(command)
resp = self.engine.run_script(script)
return bool(ast.literal_eval(resp))
@property
def active(self):
return self.is_active()
def is_active(self):
return self._base_property(command='WinActive')
@property
def exist(self):
return self.exists()
def exists(self):
return self._base_property(command='WinExist')
def _base_get_method_(self, command):
script = self._render_template('window/base_get_command.ahk', command=command, title=f'ahk_id {self.id}')
return script
def _base_get_method(self, command):
script = self._base_get_method_(command)
result = self.engine.run_script(script, decode=False)
if self.encoding:
if isinstance(result, bytes):
return result.decode(encoding=self.encoding)
else:
return result.stdout.decode(encoding=self.encoding)
if isinstance(result, bytes):
return result
return result.stdout
@property
def title(self):
return self.get_title()
def get_title(self):
return self._base_get_method('WinGetTitle')
def _set_title(self, value):
script = self._render_template('window/win_set_title.ahk', title=f'ahk_id {self.id}', new_title=value)
return script
@title.setter
def title(self, value):
self.set_title(value)
def set_title(self, value):
script = self._set_title(value)
return self.engine.run_script(script) or None
@property
def class_name(self):
return self.get_class_name()
@property
def text(self):
return self.get_text()
@property
def minimized(self):
return self.is_minimized()
@property
def maximized(self):
return self.is_maximized()
def is_minimized(self):
return self.get('MinMax') == self.MINIMIZED
def is_maximized(self):
return self.get('MinMax') == self.MAXIMIZED
@property
def non_max_non_min(self):
return self.get('MinMax') == self.NON_MIN_NON_MAX
def is_minmax(self):
return self.get('MinMax') != self.NON_MIN_NON_MAX
def get_class_name(self):
return self._base_get_method('WinGetClass')
def get_text(self):
return self._base_get_method('WinGetText')
@property
def transparent(self) -> int:
return self.get_transparency()
def get_transparency(self) -> int:
result = self.get('Transparent')
if result:
return int(result)
else:
return 255
@transparent.setter
def transparent(self, value):
self.set_transparency(value)
def set_transparency(self, value):
if isinstance(value, int) and 0 <= value <= 255:
return self.set('Transparent', value) or None
else:
raise ValueError(f'"{value}" not a valid option. Please use [0, 255] integer')
def _always_on_top(self):
script = self._render_template('window/win_is_always_on_top.ahk', title=f'ahk_id {self.id}')
return script
@property
def always_on_top(self) -> bool:
return self.is_always_on_top()
def is_always_on_top(self) -> bool:
script = self._always_on_top()
resp = self.engine.run_script(script)
return bool(ast.literal_eval(resp))
@always_on_top.setter
def always_on_top(self, value):
self.set_always_on_top(value)
def set_always_on_top(self, value):
if value in ('on', 'On', True, 1):
return self.set('AlwaysOnTop', 'On') or None
elif value in ('off', 'Off', False, 0):
return self.set('AlwaysOnTop', 'Off') or None
elif value in ('toggle', 'Toggle', -1):
return self.set('AlwaysOnTop', 'Toggle') or None
else:
raise ValueError(f'"{value}" not a valid option. Please use On/Off/Toggle/True/False/0/1/-1')
def disable(self):
"""
Distable the window
:return:
"""
return self.set('Disable', '') or None
def enable(self):
"""
Enable the window
:return:
"""
return self.set('Enable', '') or None
def redraw(self):
return self.set('Redraw', '') or None
def to_bottom(self):
"""
Send window to bottom (behind other windows)
:return:
"""
return self.set('Bottom', '') or None
def to_top(self):
"""
Bring the window to the foreground (above other windows)
:return:
"""
return self.set('Top', '') or None
def _render_template(self, *args, **kwargs):
kwargs['win'] = self
return self.engine.render_template(*args, **kwargs)
def _base_method_(self, command, seconds_to_wait='', blocking=False):
script = self._render_template(
'window/base_command.ahk', command=command, title=f'ahk_id {self.id}', seconds_to_wait=seconds_to_wait
)
return script
def _base_method(self, command, seconds_to_wait='', blocking=True):
script = self._base_method_(command, seconds_to_wait=seconds_to_wait)
return self.engine.run_script(script, blocking=blocking)
def activate(self):
"""
Activate the window.
See also: `WinActivate`_
.. _WinActivate: https://www.autohotkey.com/docs/commands/WinActivate.htm
:return:
"""
return self._base_method('WinActivate') or None
def activate_bottom(self):
"""
Calls `WinActivateBottom`_ on the window
.. _WinActivateBottom: https://www.autohotkey.com/docs/commands/WinActivateBottom.htm
:return:
"""
return self._base_method('WinActivateBottom') or None
def close(self, seconds_to_wait=''):
"""
Closes the Window. See also: `WinClose`_
.. _WinClose: https://www.autohotkey.com/docs/commands/WinClose.htm
:param seconds_to_wait:
:return:
"""
return self._base_method('WinClose', seconds_to_wait=seconds_to_wait) or None
def hide(self):
"""
Hides the window. See also: `WinHide`_
.. _WinHide: https://www.autohotkey.com/docs/commands/WinHide.htm
:return:
"""
return self._base_method('WinHide') or None
def kill(self, seconds_to_wait=''):
return self._base_method('WinKill', seconds_to_wait=seconds_to_wait) or None
def maximize(self):
"""
maximize the window
:return:
"""
return self._base_method('WinMaximize') or None
def minimize(self):
"""
minimize the window
:return:
"""
return self._base_method('WinMinimize') or None
def restore(self):
"""
restore the window
:return:
"""
return self._base_method('WinRestore') or None
def show(self):
"""
show the window
:return:
"""
return self._base_method('WinShow') or None
def wait(self, seconds_to_wait=''):
"""
:param seconds_to_wait:
:return:
"""
return self._base_method('WinWait', seconds_to_wait=seconds_to_wait, blocking=True) or None
def wait_active(self, seconds_to_wait=''):
"""
:param seconds_to_wait:
:return:
"""
return self._base_method('WinWaitActive', seconds_to_wait=seconds_to_wait, blocking=True) or None
def wait_not_active(self, seconds_to_wait=''):
"""
:param seconds_to_wait:
:return:
"""
return self._base_method('WinWaitNotActive', seconds_to_wait=seconds_to_wait, blocking=True) or None
def wait_close(self, seconds_to_wait=''):
"""
:param seconds_to_wait:
:return:
"""
return self._base_method('WinWaitClose', seconds_to_wait=seconds_to_wait, blocking=True) or None
def _move(self, x='', y='', width=None, height=None):
script = self._render_template(
'window/win_move.ahk', title=f'ahk_id {self.id}', x=x, y=y, width=width, height=height
)
return script
def move(self, x='', y='', width=None, height=None):
"""
Move the window to a position and/or change its geometry
:param x:
:param y:
:param width:
:param height:
:return:
"""
script = self._move(x=x, y=y, width=width, height=height)
return self.engine.run_script(script) or None
def _send(self, keys, delay=10, raw=False, blocking=False, escape=False, press_duration=-1):
if escape:
keys = self.engine.escape_sequence_replace(keys)
script = self._render_template(
'window/win_send.ahk',
title=f'ahk_id {self.id}',
keys=keys,
raw=raw,
delay=delay,
press_duration=press_duration,
blocking=blocking,
)
return script
def send(self, keys, delay=10, raw=False, blocking=True, escape=False, press_duration=-1):
"""
Send keystrokes directly to the window.
Uses ControlSend
https://autohotkey.com/docs/commands/Send.htm
"""
script = self._send(keys, delay=delay, raw=raw, blocking=blocking, escape=escape, press_duration=press_duration)
return self.engine.run_script(script, blocking=blocking)
def _click(self, x=None, y=None, *, button=None, n=1, options=None, blocking=True):
from ahk.mouse import resolve_button
if x or y:
if y is None and isinstance(x, collections.abc.Sequence) and len(x) == 2:
# alow position to be specified by a sequence of length 2
x, y = x
assert x is not None and y is not None, 'If provided, position must be specified by x AND y'
button = resolve_button(button)
script = self._render_template(
'window/win_click.ahk', x=x, y=y, hwnd=f'ahk_id {self.id}', button=button, n=n, options=options
)
return script
def click(self, *args, **kwargs):
"""
Click at an x/y location on the screen.
Uses ControlClick
https://autohotkey.com/docs/commands/ControlClick.htm
x/y position params may also be specified as a 2-item sequence
:param x: x offset relative to topleft corner of the window
:param y: y offset relative to the top of the window
:param button: the button to press (default is left mouse)
:param n: number of times to click
:param options: per ControlClick documentation
:param blocking:
:return:
"""
blocking = kwargs.get('blocking', True)
script = self._click(*args, **kwargs)
return self.engine.run_script(script, blocking=blocking)
def __eq__(self, other):
if not isinstance(other, Window):
return False
return self.id == other.id
def __hash__(self):
return hash(repr(self))
class WindowMixin(ScriptEngine):
def __init__(self, *args, **kwargs):
self.window_encoding = kwargs.pop('window_encoding', None)
super().__init__(*args, **kwargs)
def _win_get(self, title='', text='', exclude_title='', exclude_text=''):
script = self.render_template(
'window/get.ahk',
subcommand='ID',
title=title,
text=text,
exclude_text=exclude_text,
exclude_title=exclude_title,
)
return script
def win_get(self, title='', text='', exclude_title='', exclude_text='', encoding=None):
script = self._win_get(title=title, text=text, exclude_title=exclude_title, exclude_text=exclude_text)
encoding = encoding or self.window_encoding
ahk_id = self.run_script(script)
return Window(engine=self, ahk_id=ahk_id, encoding=encoding)
def _win_set(self, subcommand, *args, blocking=True):
script = self.render_template('window/set.ahk', subcommand=subcommand, *args, blocking=blocking)
return script
def win_set(self, subcommand, *args, blocking=True):
script = self.render_template('window/set.ahk', subcommand=subcommand, *args, blocking=blocking)
return self.run_script(script, blocking=blocking) or None
@property
def active_window(self):
return self.win_get(title='A')
def get_active_window(self):
return self.active_window
def _all_window_ids_(self):
script = self.render_template('window/id_list.ahk')
return script
def _all_window_ids(self):
script = self._all_window_ids_()
result = self.run_script(script)
return result.split(',')[:-1] # last one is always an empty string
def windows(self):
"""
Returns a list of windows
:return:
"""
windowze = []
for ahk_id in self._all_window_ids():
win = Window(engine=self, ahk_id=ahk_id, encoding=self.window_encoding)
windowze.append(win)
return windowze
def find_windows(self, func=None, **kwargs):
"""
Find all matching windows
:param func: a callable to filter windows
:param bool exact: if False (the default) partial matches are found. If True, only exact matches are returned
:param kwargs: keywords of attributes of the window (has no effect if ``func`` is provided)
:return: a generator containing any matching :py:class:`~ahk.window.Window` objects.
"""
if func is None:
exact = kwargs.pop('exact', False)
def func(win):
for attr, expected in kwargs.items():
if exact:
result = getattr(win, attr) == expected
else:
result = expected in getattr(win, attr)
if result is False:
return False
return True
for window in filter(func, self.windows()):
yield window
def find_window(self, func=None, **kwargs):
"""
Like ``find_windows`` but only returns the first found window
:param func:
:param kwargs:
:return: a :py:class:`~ahk.window.Window` object or ``None`` if no matching window is found
"""
with suppress(StopIteration):
return next(self.find_windows(func=func, **kwargs))
def find_windows_by_title(self, title, exact=False):
"""
Equivalent to ``find_windows(title=title)```
Note that ``title`` is a ``bytes`` object
:param bytes title:
:param exact:
:return:
"""
for window in self.find_windows(title=title, exact=exact):
yield window
def find_window_by_title(self, *args, **kwargs):
"""
Like ``find_windows_by_title`` but only returns the first result.
:return: a :py:class:`~ahk.window.Window` object or ``None`` if no matching window is found
"""
with suppress(StopIteration):
return next(self.find_windows_by_title(*args, **kwargs))
def find_windows_by_text(self, text, exact=False):
"""
:param text:
:param exact:
:return: a generator containing any matching :py:class:`~ahk.window.Window` objects.
"""
for window in self.find_windows(text=text, exact=exact):
yield window
def find_window_by_text(self, *args, **kwargs):
"""
:param args:
:param kwargs:
:return: a :py:class:`~ahk.window.Window` object or ``None`` if no matching window is found
"""
with suppress(StopIteration):
return next(self.find_windows_by_text(*args, **kwargs))
def find_windows_by_class(self, class_name, exact=False):
"""
:param class_name:
:param exact:
:return: a generator containing any matching :py:class:`~ahk.window.Window` objects.
"""
for window in self.find_windows(class_name=class_name, exact=exact):
yield window
def find_window_by_class(self, *args, **kwargs):
"""
:param args:
:param kwargs:
:return: a :py:class:`~ahk.window.Window` object or ``None`` if no matching window is found
"""
with suppress(StopIteration):
return next(self.find_windows_by_class(*args, **kwargs))
class AsyncWindow(Window):
@classmethod
async def from_mouse_position(cls, engine: ScriptEngine, **kwargs):
script = engine.render_template('window/from_mouse.ahk')
ahk_id = await engine.a_run_script(script)
return cls(engine=engine, ahk_id=ahk_id, **kwargs)
@classmethod
async def from_pid(cls, engine: ScriptEngine, pid, **kwargs):
script = engine.render_template('window/get.ahk', subcommand='ID', title=f'ahk_pid {pid}')
ahk_id = await engine.a_run_script(script)
return cls(engine=engine, ahk_id=ahk_id, **kwargs)
# def __getattr__(self, item):
# if item in self._get_subcommands:
# raise AttributeError(f"Unaccessable Attribute. Use get({item}) instead")
# raise AttributeError(f"'{self.__class__.__name__}' object has no attribute '{item}'")
#
async def get_pos(self, info=None):
script = self._get_pos(info)
resp = await self.engine.a_run_script(script)
try:
value = ast.literal_eval(resp)
return value
except SyntaxError:
raise WindowNotFoundError('No window found')
@Window.rect.setter
def rect(self, new_position):
warnings.warn(
'rect setter only schedules coroutine. window may not change immediately. Use move() instead', stacklevel=2
)
x, y, width, height = new_position
coro = self.move(x=x, y=y, width=width, height=height)
asyncio.create_task(coro)
@Window.position.setter
def position(self, new_position):
warnings.warn(
'position setter only schedules coroutine. window may not change immediately. use set_position() instead',
stacklevel=2,
)
x, y = new_position
coro = self.move(x, y)
asyncio.create_task(coro)
@Window.width.setter
def width(self, new_width):
warnings.warn(
'width setter only schedules coroutine. window may not change immediately. use move() instead', stacklevel=2
)
coro = self.move(width=new_width)
asyncio.create_task(coro)
@Window.height.setter
def height(self, new_height):
warnings.warn(
'height setter only schedules coroutine. window may not change immediately. use move() instead',
stacklevel=2,
)
coro = self.move(height=new_height)
asyncio.create_task(coro)
async def _base_property(self, command):
script = self._base_check(command)
resp = await self.engine.a_run_script(script)
return bool(ast.literal_eval(resp))
async def _base_get_method(self, command):
script = self._base_get_method_(command)
result = await self.engine.a_run_script(script, decode=False)
if self.encoding:
return result.decode(encoding=self.encoding)
return result
@Window.title.setter
def title(self, new_title):
warnings.warn(
'title setter only schedules coroutine. window may not change immediately. use set_title() instead',
stacklevel=2,
)
coro = self.set_title(new_title)
asyncio.create_task(coro)
async def is_minimized(self):
return await self.get('MinMax') == self.MINIMIZED
async def is_maximized(self):
return await self.get('MinMax') == self.MAXIMIZED
@property
async def non_max_non_min(self):
return await self.get('MinMax') == self.NON_MIN_NON_MAX
async def is_minmax(self):
return await self.get('MinMax') != self.NON_MIN_NON_MAX
@property
async def transparent(self) -> int:
result = await self.get('Transparent')
if result:
return int(result)
else:
return 255
@transparent.setter
def transparent(self, value):
warnings.warn(
'transparent setter only schedules coroutine. window may not change immediately. use set_transparency() instead',
stacklevel=2,
)
if isinstance(value, int) and 0 <= value <= 255:
coro = self.set('Transparent', value)
asyncio.create_task(coro)
else:
raise ValueError('transparency must be integer in range [0, 255]')
async def get_transparency(self) -> int:
result = await self.get('Transparent')
if result:
return int(result)
else:
return 255
async def set_transparency(self, value):
if isinstance(value, int) and 0 <= value <= 255:
await self.set('Transparent', value)
else:
raise ValueError(f'"{value}" not a valid option. Please use [0, 255] integer')
async def is_always_on_top(self):
script = self._always_on_top()
resp = await self.engine.a_run_script(script)
return bool(ast.literal_eval(resp))
@Window.always_on_top.setter
def always_on_top(self, value):
warnings.warn(
f'always_on_top setter only schedules coroutine. changes may not happen immediately. use set_always_on_top({repr(value)}) instead'
)
if value in ('on', 'On', True, 1):
coro = self.set('AlwaysOnTop', 'On')
elif value in ('off', 'Off', False, 0):
coro = self.set('AlwaysOnTop', 'Off')
elif value in ('toggle', 'Toggle', -1):
coro = self.set('AlwaysOnTop', 'Toggle')
else:
raise ValueError(f'"{value}" not a valid option. Please use On/Off/Toggle/True/False/0/1/-1')
asyncio.create_task(coro)
class AsyncWindowMixin(AsyncScriptEngine, WindowMixin):
async def win_get(self, *args, **kwargs):
encoding = kwargs.pop('encoding', self.window_encoding)
script = self._win_get(*args, **kwargs)
ahk_id = await self.a_run_script(script)
return AsyncWindow(engine=self, ahk_id=ahk_id, encoding=encoding)
async def _all_window_ids(self):
script = self._all_window_ids_()
result = await self.a_run_script(script)
return result.split(',')[:-1] # last one is always an empty string
async def windows(self):
"""
Returns a list of windows
:return:
"""
windowze = []
for ahk_id in await self._all_window_ids():
win = AsyncWindow(engine=self, ahk_id=ahk_id, encoding=self.window_encoding)
windowze.append(win)
return windowze
async def find_windows(self, func=None, **kwargs):
"""
Find all matching windows
:param func: a callable to filter windows
:param bool exact: if False (the default) partial matches are found. If True, only exact matches are returned
:param kwargs: keywords of attributes of the window (has no effect if ``func`` is provided)
:return: a generator containing any matching :py:class:`~ahk.window.Window` objects.
"""
if func is None:
exact = kwargs.pop('exact', False)
async def func(win):
for attr, expected in kwargs.items():
if exact:
result = await getattr(win, attr) == expected
else:
result = expected in await getattr(win, attr)
if result is False:
return False
return True
async for window in async_filter(func, await self.windows()):
yield window
async def find_window(self, func=None, **kwargs):
"""
Like ``find_windows`` but only returns the first found window
:param func:
:param kwargs:
:return: a :py:class:`~ahk.window.Window` object or ``None`` if no matching window is found
"""
async for window in self.find_windows(func=func, **kwargs):
return window # return the first result
raise WindowNotFoundError('yikes')
async def find_windows_by_title(self, title, exact=False):
"""
Equivalent to ``find_windows(title=title)```
Note that ``title`` is a ``bytes`` object
:param bytes title:
:param exact:
:return:
"""
async for window in self.find_windows(title=title, exact=exact):
yield window
async def find_window_by_title(self, title):
"""
Like ``find_windows_by_title`` but only returns the first result.
:return: a :py:class:`~ahk.window.Window` object or ``None`` if no matching window is found
"""
async for window in self.find_windows_by_title(title=title):
return window
async def find_windows_by_text(self, text, exact=False):
"""
:param text:
:param exact:
:return: a generator containing any matching :py:class:`~ahk.window.Window` objects.
"""
async for window in self.find_windows(text=text, exact=exact):
yield window
async def find_window_by_text(self, *args, **kwargs):
"""
:param args:
:param kwargs:
:return: a :py:class:`~ahk.window.Window` object or ``None`` if no matching window is found
"""
async for window in self.find_windows_by_text(*args, **kwargs):
return window
async def find_windows_by_class(self, class_name, exact=False):
"""
:param class_name:
:param exact:
:return: a generator containing any matching :py:class:`~ahk.window.Window` objects.
"""
async for window in self.find_windows(class_name=class_name, exact=exact):
yield window
async def find_window_by_class(self, *args, **kwargs):
"""
:param args:
:param kwargs:
:return: a :py:class:`~ahk.window.Window` object or ``None`` if no matching window is found
"""
async for window in self.find_windows_by_class(*args, **kwargs):
return window
|
softlearning/policies/uniform_policy_test.py | limash/softlearning | 920 | 11097900 | <reponame>limash/softlearning<filename>softlearning/policies/uniform_policy_test.py<gh_stars>100-1000
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
import tree
from softlearning import policies
from softlearning.policies.uniform_policy import ContinuousUniformPolicy
from softlearning.environments.utils import get_environment
from softlearning.samplers import utils as sampler_utils
class ContinuousUniformPolicyTest(tf.test.TestCase):
def setUp(self):
self.env = get_environment('gym', 'Swimmer', 'v3', {})
self.policy = ContinuousUniformPolicy(
action_range=(
self.env.action_space.low,
self.env.action_space.high,
),
input_shapes=self.env.observation_shape,
output_shape=self.env.action_shape,
observation_keys=self.env.observation_keys)
def test_actions_and_log_probs(self):
observation1_np = self.env.reset()
observation2_np = self.env.step(self.env.action_space.sample())[0]
observations_np = type(observation1_np)((
(key, np.stack((
observation1_np[key], observation2_np[key]
), axis=0).astype(np.float32))
for key in observation1_np.keys()
))
observations_tf = tree.map_structure(
lambda x: tf.constant(x, dtype=x.dtype), observations_np)
for observations in (observations_np, observations_tf):
actions = self.policy.actions(observations)
log_pis = self.policy.log_probs(observations, actions)
self.assertAllEqual(
log_pis,
tfp.distributions.Independent(
tfp.distributions.Uniform(
low=self.env.action_space.low,
high=self.env.action_space.high,
),
reinterpreted_batch_ndims=1,
).log_prob(actions)[..., None])
self.assertEqual(actions.shape, (2, *self.env.action_shape))
def test_env_step_with_actions(self):
observation_np = self.env.reset()
action = self.policy.action(observation_np).numpy()
self.env.step(action)
def test_trainable_variables(self):
self.assertEqual(len(self.policy.trainable_variables), 0)
def test_get_diagnostics(self):
observation1_np = self.env.reset()
observation2_np = self.env.step(self.env.action_space.sample())[0]
observations_np = {}
observations_np = type(observation1_np)((
(key, np.stack((
observation1_np[key], observation2_np[key]
), axis=0).astype(np.float32))
for key in observation1_np.keys()
))
diagnostics = self.policy.get_diagnostics(observations_np)
self.assertTrue(isinstance(diagnostics, OrderedDict))
self.assertFalse(diagnostics)
def test_serialize_deserialize(self):
policy_1 = ContinuousUniformPolicy(
action_range=(
self.env.action_space.low,
self.env.action_space.high,
),
input_shapes=self.env.observation_shape,
output_shape=self.env.action_shape,
observation_keys=self.env.observation_keys)
self.assertFalse(policy_1.trainable_weights)
config = policies.serialize(policy_1)
policy_2 = policies.deserialize(config)
self.assertEqual(policy_2._action_range, policy_1._action_range)
self.assertEqual(policy_2._input_shapes, policy_1._input_shapes)
self.assertEqual(policy_2._output_shape, policy_1._output_shape)
self.assertEqual(
policy_2._observation_keys, policy_1._observation_keys)
path = sampler_utils.rollout(
self.env,
policy_2,
path_length=10,
break_on_terminal=False)
observations = path['observations']
np.testing.assert_equal(
policy_1.actions(observations).numpy().shape,
policy_2.actions(observations).numpy().shape)
if __name__ == '__main__':
tf.test.main()
|
beta_rec/data/__init__.py | mengzaiqiao/TVBR | 126 | 11097904 | <filename>beta_rec/data/__init__.py
"""Data Module."""
from .base_data import BaseData
__all__ = ["BaseData"]
|
smbprotocol/exceptions.py | allblue147/CVE-2020-0796-PoC | 306 | 11097905 | <gh_stars>100-1000
# -*- coding: utf-8 -*-
# Copyright: (c) 2019, <NAME> (@jborean93) <<EMAIL>>
# MIT License (see LICENSE or https://opensource.org/licenses/MIT)
import binascii
import errno
import ntpath
import os
import socket
from collections import (
OrderedDict,
)
from smbprotocol import (
Dialects,
)
from smbprotocol._text import (
to_bytes,
to_native,
to_text,
)
from smbprotocol.reparse_point import (
SymbolicLinkReparseDataBuffer,
)
from smbprotocol.structure import (
BytesField,
EnumField,
IntField,
ListField,
Structure,
StructureField,
)
class NtStatus(object):
"""
[MS-ERREF] https://msdn.microsoft.com/en-au/library/cc704588.aspx
2.3.1 NTSTATUS Values
These values are set in the status field of an SMB2Header response. This is
not an exhaustive list but common values that are returned.
"""
STATUS_SUCCESS = 0x00000000
STATUS_PENDING = 0x00000103
STATUS_NOTIFY_CLEANUP = 0x0000010B
STATUS_NOTIFY_ENUM_DIR = 0x0000010C
STATUS_BUFFER_OVERFLOW = 0x80000005
STATUS_NO_MORE_FILES = 0x80000006
STATUS_END_OF_FILE = 0xC0000011
STATUS_INVALID_EA_NAME = 0x80000013
STATUS_EA_LIST_INCONSISTENT = 0x80000014
STATUS_STOPPED_ON_SYMLINK = 0x8000002D
STATUS_INFO_LENGTH_MISMATCH = 0xC0000004
STATUS_INVALID_PARAMETER = 0xC000000D
STATUS_NO_SUCH_FILE = 0xC000000F
STATUS_END_OF_FILE = 0xC0000011
STATUS_MORE_PROCESSING_REQUIRED = 0xC0000016
STATUS_ACCESS_DENIED = 0xC0000022
STATUS_BUFFER_TOO_SMALL = 0xC0000023
STATUS_OBJECT_NAME_INVALID = 0xC0000033
STATUS_OBJECT_NAME_NOT_FOUND = 0xC0000034
STATUS_OBJECT_NAME_COLLISION = 0xC0000035
STATUS_OBJECT_PATH_INVALID = 0xC0000039
STATUS_OBJECT_PATH_NOT_FOUND = 0xC000003A
STATUS_OBJECT_PATH_SYNTAX_BAD = 0xC000003B
STATUS_SHARING_VIOLATION = 0xC0000043
STATUS_EAS_NOT_SUPPORTED = 0xC000004F
STATUS_EA_TOO_LARGE = 0xC0000050
STATUS_NONEXISTENT_EA_ENTRY = 0xC0000051
STATUS_NO_EAS_ON_FILE = 0xC0000052
STATUS_EA_CORRUPT_ERROR = 0xC0000053
STATUS_PRIVILEGE_NOT_HELD = 0xC0000061
STATUS_LOGON_FAILURE = 0xC000006D
STATUS_PASSWORD_EXPIRED = 0xC<PASSWORD>0071
STATUS_INSUFFICIENT_RESOURCES = 0xC000009A
STATUS_PIPE_BUSY = 0xC00000AE
STATUS_PIPE_CLOSING = 0xC00000B1
STATUS_PIPE_DISCONNECTED = 0xC00000B0
STATUS_FILE_IS_A_DIRECTORY = 0xC00000BA
STATUS_NOT_SUPPORTED = 0xC00000BB
STATUS_BAD_NETWORK_NAME = 0xC00000CC
STATUS_REQUEST_NOT_ACCEPTED = 0xC00000D0
STATUS_PIPE_EMPTY = 0xC00000D9
STATUS_INTERNAL_ERROR = 0xC00000E5
STATUS_DIRECTORY_NOT_EMPTY = 0xC0000101
STATUS_NOT_A_DIRECTORY = 0xC0000103
STATUS_CANCELLED = 0xC0000120
STATUS_CANNOT_DELETE = 0xC0000121
STATUS_FILE_CLOSED = 0xC0000128
STATUS_PIPE_BROKEN = 0xC000014B
STATUS_USER_SESSION_DELETED = 0xC0000203
STATUS_NOT_A_REPARSE_POINT = 0xC0000275
class SMBException(Exception):
# Generic SMB Exception with a message
pass
class SMBAuthenticationError(SMBException):
# Used for authentication specific errors
pass
class SMBOSError(OSError, SMBException):
def __init__(self, ntstatus, filename, filename2=None):
self.ntstatus = ntstatus
self.filename2 = to_native(filename2) if filename2 else None
ntstatus_name = 'STATUS_UNKNOWN'
for name, val in vars(NtStatus).items():
if ntstatus == val:
ntstatus_name = name
break
error_details = {
NtStatus.STATUS_OBJECT_NAME_NOT_FOUND: errno.ENOENT,
NtStatus.STATUS_OBJECT_PATH_NOT_FOUND: errno.ENOENT,
NtStatus.STATUS_OBJECT_NAME_COLLISION: errno.EEXIST,
NtStatus.STATUS_PRIVILEGE_NOT_HELD: (errno.EACCES, "Required privilege not held"),
NtStatus.STATUS_SHARING_VIOLATION: (errno.EPERM, "The process cannot access the file because it is being "
"used by another process"),
NtStatus.STATUS_NOT_A_REPARSE_POINT: (errno.EINVAL, "The file or directory is not a reparse point"),
NtStatus.STATUS_FILE_IS_A_DIRECTORY: errno.EISDIR,
NtStatus.STATUS_NOT_A_DIRECTORY: errno.ENOTDIR,
NtStatus.STATUS_DIRECTORY_NOT_EMPTY: errno.ENOTEMPTY,
NtStatus.STATUS_END_OF_FILE: getattr(errno, 'ENODATA', 120), # Not present on py2 for Windows.
}.get(ntstatus, (0, "Unknown NtStatus error returned '%s'" % ntstatus_name))
if not isinstance(error_details, tuple):
error_details = (error_details, os.strerror(error_details))
super(SMBOSError, self).__init__(error_details[0], error_details[1], to_native(filename))
def __str__(self):
msg = "[Error {0}] [NtStatus 0x{1}] {2}: '{3}'".format(self.errno, format(self.ntstatus, 'x').zfill(8),
self.strerror, self.filename)
if self.filename2:
msg += " -> '%s'" % self.filename2
return msg
class SMBLinkRedirectionError(SMBException):
@property
def message(self):
msg = "Encountered symlink at '%s' that points to '%s' which cannot be redirected: %s" \
% (to_native(self.path), to_native(self.target), to_native(self.args[0]))
return msg
@property
def path(self):
return self.args[1]
@property
def target(self):
return self.args[2]
def __str__(self):
return self.message
class SMBUnsupportedFeature(SMBException):
@property
def negotiated_dialect(self):
return self.args[0]
@property
def required_dialect(self):
return self.args[1]
@property
def feature_name(self):
return self.args[2]
@property
def requires_newer(self):
if len(self.args) > 3:
return self.args[3]
else:
return None
@property
def message(self):
if self.requires_newer is None:
msg_suffix = ""
elif self.requires_newer:
msg_suffix = " or newer"
else:
msg_suffix = " or older"
required_dialect = self._get_dialect_name(self.required_dialect)
negotiated_dialect = self._get_dialect_name(self.negotiated_dialect)
msg = "%s is not available on the negotiated dialect %s, " \
"requires dialect %s%s"\
% (self.feature_name, negotiated_dialect, required_dialect,
msg_suffix)
return msg
def __str__(self):
return self.message
def _get_dialect_name(self, dialect):
dialect_field = EnumField(
enum_type=Dialects,
enum_strict=False,
size=2)
dialect_field.set_value(dialect)
return str(dialect_field)
class SMBResponseException(SMBException):
@property
def header(self):
# the full message that was returned by the server
return self.args[0]
@property
def status(self):
# the raw int status value, used by method that catch this exception
# for control flow
return self.args[1]
@property
def error_details(self):
# list of error_details returned by the server, currently used in
# the SMB 3.1.1 error response for certain situations
error = SMB2ErrorResponse()
error.unpack(self.header['data'].get_value())
error_details = []
for raw_error_data in error['error_data'].get_value():
error_id = raw_error_data['error_id'].get_value()
raw_data = raw_error_data['error_context_data'].get_value()
if self.status == NtStatus.STATUS_STOPPED_ON_SYMLINK:
error_data = SMB2SymbolicLinkErrorResponse()
error_data.unpack(raw_data)
elif self.status == NtStatus.STATUS_BAD_NETWORK_NAME and \
error_id == ErrorContextId.SMB2_ERROR_ID_SHARE_REDIRECT:
error_data = SMB2ShareRedirectErrorContext()
error_data.unpack(raw_data)
else:
# unknown context data so we just set it the raw bytes
error_data = raw_data
error_details.append(error_data)
return error_details
@property
def message(self):
error_details_msg = ""
for error_detail in self.error_details:
if isinstance(error_detail, SMB2SymbolicLinkErrorResponse):
detail_msg = self._get_symlink_error_detail_msg(error_detail)
elif isinstance(error_detail, SMB2ShareRedirectErrorContext):
detail_msg = self._get_share_redirect_detail_msg(error_detail)
else:
# unknown error details in response, output raw bytes
detail_msg = "Raw: %s" % binascii.hexlify(error_detail).decode('utf-8')
# the first details message is set differently
if error_details_msg == "":
error_details_msg = "%s - %s" % (error_details_msg, detail_msg)
else:
error_details_msg = "%s, %s" % (error_details_msg, detail_msg)
status_hex = format(self.status, 'x')
error_message = "%s: 0x%s%s" % (str(self.header['status']),
status_hex, error_details_msg)
return "Received unexpected status from the server: %s" % error_message
def __str__(self):
return self.message
def _get_share_redirect_detail_msg(self, error_detail):
ip_addresses = []
for ip_addr in error_detail['ip_addr_move_list'].get_value():
ip_addresses.append(ip_addr.get_ipaddress())
resource_name = error_detail['resource_name'].get_value(). \
decode('utf-16-le')
detail_msg = "IP Addresses: '%s', Resource Name: %s" \
% ("', '".join(ip_addresses), resource_name)
return detail_msg
def _get_symlink_error_detail_msg(self, error_detail):
flag = str(error_detail['flags'])
print_name = error_detail.get_print_name()
sub_name = error_detail.get_substitute_name()
detail_msg = "Flag: %s, Print Name: %s, Substitute Name: %s" \
% (flag, print_name, sub_name)
return detail_msg
class ErrorContextId(object):
"""
[MS-SMB2] v53.0 2017-09-15
2.2.2.1 SMB2 Error Context Response ErrorId
An identifier for the error context, it MUST be set to one of the following
values.
"""
SMB2_ERROR_ID_DEFAULT = 0x00000000
SMB2_ERROR_ID_SHARE_REDIRECT = 0x53526472
class SymbolicLinkErrorFlags(object):
"""
[MS-SMB2] v53.0 2017-09-15
2.2.2.2.1 Symbolic Link Error Response Flags
Specifies whether the substitute name is an absolute target path or a path
name relative to the directory containing the symbolic link
"""
SYMLINK_FLAG_ABSOLUTE = 0x00000000
SYMLINK_FLAG_RELATIVE = 0x00000001
class IpAddrType(object):
"""
[MS-SM2] v53.0 2017-09-15
2.2.2.2.2.1 MOVE_DST_IPADDR structure Type
Indicates the type of the destionation IP address.
"""
MOVE_DST_IPADDR_V4 = 0x00000001
MOVE_DST_IPADDR_V6 = 0x00000002
class SMB2ErrorResponse(Structure):
"""
[MS-SMB2] v53.0 2017-09-15
2.2.2 SMB2 Error Response
The SMB2 Error Response packet is sent by the server to respond to a
request that has failed or encountered an error. This is only used in the
SMB 3.1.1 dialect and this code won't decode values based on older versions
"""
def __init__(self):
self.fields = OrderedDict([
('structure_size', IntField(
size=2,
default=9,
)),
('error_context_count', IntField(
size=1,
default=lambda s: len(s['error_data'].get_value()),
)),
('reserved', IntField(size=1)),
('byte_count', IntField(
size=4,
default=lambda s: len(s['error_data']),
)),
('error_data', ListField(
size=lambda s: s['byte_count'].get_value(),
list_count=lambda s: s['error_context_count'].get_value(),
list_type=StructureField(
structure_type=SMB2ErrorContextResponse
),
unpack_func=lambda s, d: self._error_data_value(s, d)
)),
])
super(SMB2ErrorResponse, self).__init__()
def _error_data_value(self, structure, data):
context_responses = []
while len(data) > 0:
response = SMB2ErrorContextResponse()
if structure['error_context_count'].get_value() > 0:
# Working with SMB 3.1.1+ where the errors are already in an SMB2ErrorContextReponse packet, unpack the
# data as usual
data = response.unpack(data)
else:
# Working with an older SMB dialect where the response is set directly in the error_data field, need to
# manually craft the SMB2ErrorContextResponse with the data returned.
response['error_context_data'] = data
data = b""
context_responses.append(response)
return context_responses
class SMB2ErrorContextResponse(Structure):
"""
[MS-SMB2] v53.0 2017-09-15
2.2.2.1 SMB2 ERROR Context Response
For the SMB dialect 3.1.1, the server formats the error data as an array of
SMB2 Error Context structures in the SMB2ErrorResponse message.
"""
def __init__(self):
self.fields = OrderedDict([
('error_data_length', IntField(
size=4,
default=lambda s: len(s['error_context_data']),
)),
('error_id', EnumField(
size=4,
default=ErrorContextId.SMB2_ERROR_ID_DEFAULT,
enum_type=ErrorContextId,
)),
('error_context_data', BytesField(
size=lambda s: s['error_data_length'].get_value(),
)),
])
super(SMB2ErrorContextResponse, self).__init__()
class SMB2SymbolicLinkErrorResponse(Structure):
"""
[MS-SMB2] v53.0 2017-09-15
2.2.2.2.1 Symbolic Link Error Response
The Symbolic Link Error Response is used to indicate that a symbolic link
was encountered on the create. It describes the target path that the client
MUST use if it requires to follow the symbolic link.
"""
def __init__(self):
self.fields = OrderedDict([
('symlink_length', IntField(
size=4,
default=lambda s: len(s) - 4
)),
('symlink_error_tag', BytesField(
size=4,
default=b"\x53\x59\x4d\x4c"
)),
('reparse_tag', BytesField(
size=4,
default=b"\x0c\x00\x00\xa0"
)),
('reparse_data_length', IntField(
size=2,
default=lambda s: len(s['path_buffer']) + 12
)),
# the len in utf-16-le bytes of the path beyond the substitute name
# of the original target, e.g. \\server\share\symlink\file.txt
# would be length of \file.txt in utf-16-le form, this is used by
# the client to find out what part of the original path to append
# to the substitute name returned by the server.
('unparsed_path_length', IntField(size=2)),
('substitute_name_offset', IntField(size=2)),
('substitute_name_length', IntField(size=2)),
('print_name_offset', IntField(size=2)),
('print_name_length', IntField(size=2)),
('flags', EnumField(
size=2,
enum_type=SymbolicLinkErrorFlags,
)),
# Not in the spec but Windows seems to add \x00\x80 to the end of flags which breaks our parsing. Cannot
# seem to figure out why but this just ignored that field.
('reserved', IntField(size=2)),
# use the get/set_name functions to get/set these values as they
# also (d)encode the text and set the length and offset accordingly
('path_buffer', BytesField(
size=lambda s: self._get_name_length(s, True)
))
])
super(SMB2SymbolicLinkErrorResponse, self).__init__()
def _get_name_length(self, structure, first):
print_name_len = structure['print_name_length'].get_value()
sub_name_len = structure['substitute_name_length'].get_value()
return print_name_len + sub_name_len
def set_name(self, print_name, substitute_name):
"""
Set's the path_buffer and print/substitute name length of the message
with the values passed in. These values should be a string and not a
byte string as it is encoded in this function.
:param print_name: The print name string to set
:param substitute_name: The substitute name string to set
"""
# Ensure that the to_bytes input is an actual text string for py2 compat with native strings.
print_bytes = to_bytes(to_text(print_name), encoding='utf-16-le')
sub_bytes = to_bytes(to_text(substitute_name), encoding='utf-16-le')
path_buffer = print_bytes + sub_bytes
self['print_name_offset'].set_value(0)
self['print_name_length'].set_value(len(print_bytes))
self['substitute_name_offset'].set_value(len(print_bytes))
self['substitute_name_length'].set_value(len(sub_bytes))
self['path_buffer'].set_value(path_buffer)
def get_print_name(self):
offset = self['print_name_offset'].get_value()
length = self['print_name_length'].get_value()
name_bytes = self['path_buffer'].get_value()[offset:offset + length]
return to_text(name_bytes, encoding='utf-16-le')
def get_substitute_name(self):
offset = self['substitute_name_offset'].get_value()
length = self['substitute_name_length'].get_value()
name_bytes = self['path_buffer'].get_value()[offset:offset + length]
return to_text(name_bytes, encoding='utf-16-le')
def resolve_path(self, link_path):
"""
[MS-SMB2] 2.2.2.2.1.1 Handling the Symbolic Link Error Response
https://docs.microsoft.com/en-us/openspecs/windows_protocols/ms-smb2/a8da655c-8b0b-415a-b726-16dc33fa5827
Attempts to resolve the link target path. Will fail if the link is pointing to a local path or a UNC path
on another host or share.
:param link_path: The original path to the symbolic link to resolve relative paths from.
:return: The resolved link target path.
"""
substitute_name = self.get_substitute_name()
print_name = self.get_print_name()
unparsed_path_length = self['unparsed_path_length'].get_value()
b_link_path = to_bytes(to_text(link_path), encoding='utf-16-le')
unparsed_idx = len(b_link_path) - unparsed_path_length
base_link_path = to_text(b_link_path[:unparsed_idx], encoding='utf-16-le')
unparsed_path = to_text(b_link_path[unparsed_idx:], encoding='utf-16-le')
# Use the common code in SymbolicLinkReparseDataBuffer() to resolve the link target.
symlink_buffer = SymbolicLinkReparseDataBuffer()
symlink_buffer['flags'] = self['flags'].get_value()
symlink_buffer.set_name(substitute_name, print_name)
target_path = symlink_buffer.resolve_link(base_link_path) + unparsed_path
if not target_path.startswith('\\\\'):
raise SMBLinkRedirectionError("Cannot resolve link targets that point to a local path", link_path,
print_name)
link_share = ntpath.splitdrive(link_path)[0]
target_share = ntpath.splitdrive(target_path)[0]
if link_share != target_share:
raise SMBLinkRedirectionError("Cannot resolve link targets that point to a different host/share",
link_path, print_name)
return target_path
class SMB2ShareRedirectErrorContext(Structure):
"""
[MS-SMB2] v53.0 2017-09-15
2.2.2.2.2 Share Redirect Error Context Response
Response to a Tree Connect with the
SMB2_TREE_CONNECT_FLAG_REDIRECT_TO_OWNER flag set.
"""
def __init__(self):
self.fields = OrderedDict([
('structure_size', IntField(
size=4,
default=lambda s: len(s)
)),
('notification_type', IntField(
size=4,
default=3
)),
('resource_name_offset', IntField(
size=4,
default=lambda s: self._resource_name_offset(s)
)),
('resource_name_length', IntField(
size=4,
default=lambda s: len(s['resource_name'])
)),
('flags', IntField(
size=2,
default=0
)),
('target_type', IntField(
size=2,
default=0
)),
('ip_addr_count', IntField(
size=4,
default=lambda s: len(s['ip_addr_move_list'].get_value())
)),
('ip_addr_move_list', ListField(
size=lambda s: s['ip_addr_count'].get_value() * 24,
list_count=lambda s: s['ip_addr_count'].get_value(),
list_type=StructureField(
size=24,
structure_type=SMB2MoveDstIpAddrStructure
)
)),
('resource_name', BytesField(
size=lambda s: s['resource_name_length'].get_value()
))
])
super(SMB2ShareRedirectErrorContext, self).__init__()
def _resource_name_offset(self, structure):
min_structure_size = 24
addr_list_size = len(structure['ip_addr_move_list'])
return min_structure_size + addr_list_size
class SMB2MoveDstIpAddrStructure(Structure):
"""
[MS-SMB2] c53.0 2017-09-15
2.2.2.2.2.1 MOVE_DST_IPADDR structure
Used to indicate the destination IP address.
"""
def __init__(self):
self.fields = OrderedDict([
('type', EnumField(
size=4,
enum_type=IpAddrType
)),
('reserved', IntField(size=4)),
('ip_address', BytesField(
size=lambda s: self._ip_address_size(s)
)),
('reserved2', BytesField(
size=lambda s: self._reserved2_size(s),
default=lambda s: b"\x00" * self._reserved2_size(s)
))
])
super(SMB2MoveDstIpAddrStructure, self).__init__()
def _ip_address_size(self, structure):
if structure['type'].get_value() == IpAddrType.MOVE_DST_IPADDR_V4:
return 4
else:
return 16
def _reserved2_size(self, structure):
if structure['type'].get_value() == IpAddrType.MOVE_DST_IPADDR_V4:
return 12
else:
return 0
def get_ipaddress(self):
# get's the IP address in a human readable format
ip_address = self['ip_address'].get_value()
if self['type'].get_value() == IpAddrType.MOVE_DST_IPADDR_V4:
return socket.inet_ntoa(ip_address)
else:
addr = binascii.hexlify(ip_address).decode('utf-8')
return ":".join([addr[i:i + 4] for i in range(0, len(addr), 4)])
def set_ipaddress(self, address):
# set's the IP address from a human readable format, for IPv6, this
# needs to be the full IPv6 address
if self['type'].get_value() == IpAddrType.MOVE_DST_IPADDR_V4:
self['ip_address'].set_value(socket.inet_aton(address))
else:
addr = address.replace(":", "")
if len(addr) != 32:
raise ValueError("When setting an IPv6 address, it must be in "
"the full form without concatenation")
self['ip_address'].set_value(binascii.unhexlify(addr))
|
machina/apps/forum_member/models.py | BrendaH/django-machina | 572 | 11097942 | <gh_stars>100-1000
"""
Forum member models
===================
This module defines models provided by the ``forum_member`` application.
"""
from machina.apps.forum_member.abstract_models import AbstractForumProfile
from machina.core.db.models import model_factory
ForumProfile = model_factory(AbstractForumProfile)
|
froide/campaign/migrations/0003_campaign_ident.py | xenein/froide | 198 | 11097952 | <filename>froide/campaign/migrations/0003_campaign_ident.py
# Generated by Django 2.1.7 on 2019-03-09 12:17
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("campaign", "0002_auto_20190309_1241"),
]
operations = [
migrations.AddField(
model_name="campaign",
name="ident",
field=models.CharField(blank=True, max_length=50),
),
]
|
tests/framework/RunFailures/codeFailer.py | rinelson456/raven | 159 | 11097970 | <gh_stars>100-1000
# Copyright 2017 Battelle Energy Alliance, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
infile = sys.argv[1]
for line in open(infile,'r'):
if line.startswith('x ='):
x=float(line.split('=')[1])
if line.startswith('y ='):
y=float(line.split('=')[1])
if line.startswith('out ='):
out=line.split('=')[1].strip()
# generate fails roughly half the time.
if x+y>0:
raise RuntimeError('Answer is bigger than 0. Just a test error.')
outfile = open(out+'.csv','w')
outfile.writelines('x,y,ans\n')
outfile.writelines(','.join([str(x),str(y),str(x+y)]))
outfile.close()
|
xtune/src/pequod/model/roberta.py | aimore-globality/unilm | 5,129 | 11097974 | <reponame>aimore-globality/unilm
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from transformers.modeling_bert import BertPreTrainedModel, BertForQuestionAnswering
from transformers.modeling_roberta import RobertaModel
class RobertaForQuestionAnswering(BertPreTrainedModel):
base_model_prefix = "roberta"
def __init__(self, config):
BertPreTrainedModel.__init__(self, config)
self.num_labels = config.num_labels
self.roberta = RobertaModel(config)
self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
BertPreTrainedModel.init_weights(self)
def forward(self, input_ids, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, start_positions=None, end_positions=None, **kwargs):
outputs = self.roberta(input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
**kwargs)
sequence_output = outputs[0]
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1)
end_logits = end_logits.squeeze(-1)
outputs = (start_logits, end_logits,) + outputs[2:]
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, split add a dimension
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
# sometimes the start/end positions are outside our model inputs, we ignore these terms
ignored_index = start_logits.size(1)
start_positions.clamp_(0, ignored_index)
end_positions.clamp_(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
outputs = (total_loss,) + outputs
return outputs # (loss), start_logits, end_logits, (hidden_states), (attentions) |
algorithms/recursion/Fibonacci-Series-by-python.py | yashaswiadyalu/python | 204 | 11097975 |
### Program to calculate fibonacci series
def fibo(n): // function to calculate fibonacci series
if(n == 0):
return 0
elif(n == 1):
return 1
else:
return(fibo(n-1) + fibo(n-2))
num = int(input("Enter a number: ")) // enter number upto which you want to calculate fibonacci series
for n in range(0,(num+1)):
print(fibo(n),end=" ")
|
devito/finite_differences/finite_difference.py | guaacoelho/devito | 199 | 11098028 | <gh_stars>100-1000
from sympy import sympify
from devito.finite_differences.differentiable import EvalDerivative
from devito.finite_differences.tools import (numeric_weights, symbolic_weights, left,
right, generate_indices, centered, direct,
transpose, check_input, check_symbolic)
__all__ = ['first_derivative', 'cross_derivative', 'generic_derivative',
'left', 'right', 'centered', 'transpose', 'generate_indices']
# Number of digits for FD coefficients to avoid roundup errors and non-deterministic
# code generation
_PRECISION = 9
@check_input
@check_symbolic
def first_derivative(expr, dim, fd_order=None, side=centered, matvec=direct, x0=None,
symbolic=False):
"""
First-order derivative of a given expression.
Parameters
----------
expr : expr-like
Expression for which the first-order derivative is produced.
dim : Dimension
The Dimension w.r.t. which to differentiate.
fd_order : int, optional
Coefficient discretization order. Note: this impacts the width of
the resulting stencil. Defaults to `expr.space_order`.
side : Side, optional
Side of the finite difference location, centered (at x), left (at x - 1)
or right (at x +1). Defaults to `centered`.
matvec : Transpose, optional
Forward (matvec=direct) or transpose (matvec=transpose) mode of the
finite difference. Defaults to `direct`.
symbolic : bool, optional
Use default or custom coefficients (weights). Defaults to False.
x0 : dict, optional
Origin of the finite-difference scheme as a map dim: origin_dim.
Returns
-------
expr-like
First-order derivative of ``expr``.
Examples
--------
>>> from devito import Function, Grid, first_derivative, transpose
>>> grid = Grid(shape=(4, 4))
>>> x, _ = grid.dimensions
>>> f = Function(name='f', grid=grid)
>>> g = Function(name='g', grid=grid)
>>> first_derivative(f*g, dim=x)
-f(x, y)*g(x, y)/h_x + f(x + h_x, y)*g(x + h_x, y)/h_x
Semantically, this is equivalent to
>>> (f*g).dx
Derivative(f(x, y)*g(x, y), x)
The only difference is that in the latter case derivatives remain unevaluated.
The expanded form is obtained via ``evaluate``
>>> (f*g).dx.evaluate
-f(x, y)*g(x, y)/h_x + f(x + h_x, y)*g(x + h_x, y)/h_x
For the adjoint mode of the first derivative, pass ``matvec=transpose``
>>> g = Function(name='g', grid=grid)
>>> first_derivative(f*g, dim=x, matvec=transpose)
-f(x, y)*g(x, y)/h_x + f(x - h_x, y)*g(x - h_x, y)/h_x
This is also accessible via the .T shortcut
>>> (f*g).dx.T.evaluate
-f(x, y)*g(x, y)/h_x + f(x - h_x, y)*g(x - h_x, y)/h_x
Finally the x0 argument allows to choose the origin of the finite-difference
>>> first_derivative(f, dim=x, x0={x: 1})
-f(1, y)/h_x + f(h_x + 1, y)/h_x
"""
fd_order = fd_order or expr.space_order
deriv_order = 1
return make_derivative(expr, dim, fd_order, deriv_order, side, matvec, x0, symbolic)
@check_input
@check_symbolic
def cross_derivative(expr, dims, fd_order, deriv_order, **kwargs):
"""
Arbitrary-order cross derivative of a given expression.
Parameters
----------
expr : expr-like
Expression for which the cross derivative is produced.
dims : tuple of Dimension
Dimensions w.r.t. which to differentiate.
fd_order : tuple of ints
Coefficient discretization order. Note: this impacts the width of
the resulting stencil.
deriv_order : tuple of ints
Derivative order, e.g. 2 for a second-order derivative.
matvec : Transpose, optional
Forward (matvec=direct) or transpose (matvec=transpose) mode of the
finite difference. Defaults to `direct`.
symbolic : bool, optional
Use default or custom coefficients (weights). Defaults to False.
x0 : dict, optional
Origin of the finite-difference scheme as a map dim: origin_dim.
Returns
-------
expr-like
Cross-derivative of ``expr``.
Examples
--------
>>> from devito import Function, Grid
>>> grid = Grid(shape=(4, 4))
>>> x, y = grid.dimensions
>>> f = Function(name='f', grid=grid, space_order=2)
>>> g = Function(name='g', grid=grid, space_order=2)
>>> cross_derivative(f*g, dims=(x, y), fd_order=(2, 2), deriv_order=(1, 1))
(-1/h_y)*(-f(x, y)*g(x, y)/h_x + f(x + h_x, y)*g(x + h_x, y)/h_x) + \
(-f(x, y + h_y)*g(x, y + h_y)/h_x + f(x + h_x, y + h_y)*g(x + h_x, y + h_y)/h_x)/h_y
Semantically, this is equivalent to
>>> (f*g).dxdy
Derivative(f(x, y)*g(x, y), x, y)
The only difference is that in the latter case derivatives remain unevaluated.
The expanded form is obtained via ``evaluate``
>>> (f*g).dxdy.evaluate
(-1/h_y)*(-f(x, y)*g(x, y)/h_x + f(x + h_x, y)*g(x + h_x, y)/h_x) + \
(-f(x, y + h_y)*g(x, y + h_y)/h_x + f(x + h_x, y + h_y)*g(x + h_x, y + h_y)/h_x)/h_y
Finally the x0 argument allows to choose the origin of the finite-difference
>>> cross_derivative(f*g, dims=(x, y), fd_order=(2, 2), deriv_order=(1, 1), \
x0={x: 1, y: 2})
(-1/h_y)*(-f(1, 2)*g(1, 2)/h_x + f(h_x + 1, 2)*g(h_x + 1, 2)/h_x) + (-f(1, h_y + 2)*\
g(1, h_y + 2)/h_x + f(h_x + 1, h_y + 2)*g(h_x + 1, h_y + 2)/h_x)/h_y
"""
x0 = kwargs.get('x0', {})
for d, fd, dim in zip(deriv_order, fd_order, dims):
expr = generic_derivative(expr, dim=dim, fd_order=fd, deriv_order=d, x0=x0)
return expr
@check_input
@check_symbolic
def generic_derivative(expr, dim, fd_order, deriv_order, matvec=direct, x0=None,
symbolic=False):
"""
Arbitrary-order derivative of a given expression.
Parameters
----------
expr : expr-like
Expression for which the derivative is produced.
dim : Dimension
The Dimension w.r.t. which to differentiate.
fd_order : int
Coefficient discretization order. Note: this impacts the width of
the resulting stencil.
deriv_order : int
Derivative order, e.g. 2 for a second-order derivative.
matvec : Transpose, optional
Forward (matvec=direct) or transpose (matvec=transpose) mode of the
finite difference. Defaults to `direct`.
x0 : dict, optional
Origin of the finite-difference scheme as a map dim: origin_dim.
symbolic : bool, optional
Use default or custom coefficients (weights). Defaults to False.
Returns
-------
expr-like
``deriv-order`` derivative of ``expr``.
"""
side = None
# First order derivative with 2nd order FD is highly non-recommended so taking
# first order fd that is a lot better
if deriv_order == 1 and fd_order == 2 and not symbolic:
fd_order = 1
return make_derivative(expr, dim, fd_order, deriv_order, side, matvec, x0, symbolic)
def make_derivative(expr, dim, fd_order, deriv_order, side, matvec, x0, symbolic):
# The stencil positions
indices, x0 = generate_indices(expr, dim, fd_order, side=side, x0=x0)
# Finite difference weights from Taylor approximation with these positions
if symbolic:
weights = symbolic_weights(expr, deriv_order, indices, x0)
else:
weights = numeric_weights(deriv_order, indices, x0)
return indices_weights_to_fd(expr, dim, indices, weights, matvec=matvec.val)
def indices_weights_to_fd(expr, dim, indices, weights, matvec=1):
"""Expression from lists of indices and weights."""
mapper = {dim.spacing: matvec*dim.spacing}
terms = []
for i, c in zip(indices, weights):
# Transpose FD if necessary through `mapper`
try:
iloc = i.xreplace(mapper)
except AttributeError:
# Pure number -> sympify
iloc = sympify(i).xreplace(mapper)
# Shift index due to staggering, if any
iloc -= expr.indices_ref[dim] - dim
# Enforce fixed precision FD coefficients to avoid variations in results
v = sympify(c).evalf(_PRECISION)
# The FD term
term = expr._subs(dim, iloc) * v
# Re-evaluate any off-the-grid Functions potentially impacted by the FD
try:
term = term.evaluate
except AttributeError:
# Pure number
pass
terms.append(term)
deriv = EvalDerivative(*terms, base=expr)
return deriv
|
airflow/providers/apache/hive/example_dags/example_twitter_dag.py | ChaseKnowlden/airflow | 15,947 | 11098040 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# --------------------------------------------------------------------------------
# Written By: <NAME>
# Last Update: 8th April 2016
# Caveat: This Dag will not run because of missing scripts.
# The purpose of this is to give you a sample of a real world example DAG!
# --------------------------------------------------------------------------------
# --------------------------------------------------------------------------------
# Load The Dependencies
# --------------------------------------------------------------------------------
"""
This is an example dag for managing twitter data.
"""
from datetime import date, timedelta
from airflow import DAG
from airflow.decorators import task
from airflow.operators.bash import BashOperator
from airflow.providers.apache.hive.operators.hive import HiveOperator
from airflow.utils.dates import days_ago
@task
def fetch_tweets():
"""
This task should call Twitter API and retrieve tweets from yesterday from and to for the four twitter
users (Twitter_A,..,Twitter_D) There should be eight csv output files generated by this task and naming
convention is direction(from or to)_twitterHandle_date.csv
"""
@task
def clean_tweets():
"""
This is a placeholder to clean the eight files. In this step you can get rid of or cherry pick columns
and different parts of the text.
"""
@task
def analyze_tweets():
"""
This is a placeholder to analyze the twitter data. Could simply be a sentiment analysis through algorithms
like bag of words or something more complicated. You can also take a look at Web Services to do such
tasks.
"""
@task
def transfer_to_db():
"""
This is a placeholder to extract summary from Hive data and store it to MySQL.
"""
with DAG(
dag_id='example_twitter_dag',
default_args={
'owner': 'Ekhtiar',
'retries': 1,
},
schedule_interval="@daily",
start_date=days_ago(5),
tags=['example'],
) as dag:
fetch_tweets = fetch_tweets()
clean_tweets = clean_tweets()
analyze_tweets = analyze_tweets()
hive_to_mysql = transfer_to_db()
fetch_tweets >> clean_tweets >> analyze_tweets
# --------------------------------------------------------------------------------
# The following tasks are generated using for loop. The first task puts the eight
# csv files to HDFS. The second task loads these files from HDFS to respected Hive
# tables. These two for loops could be combined into one loop. However, in most cases,
# you will be running different analysis on your incoming and outgoing tweets,
# and hence they are kept separated in this example.
# --------------------------------------------------------------------------------
from_channels = ['fromTwitter_A', 'fromTwitter_B', 'fromTwitter_C', 'fromTwitter_D']
to_channels = ['toTwitter_A', 'toTwitter_B', 'toTwitter_C', 'toTwitter_D']
yesterday = date.today() - timedelta(days=1)
dt = yesterday.strftime("%Y-%m-%d")
# define where you want to store the tweets csv file in your local directory
local_dir = "/tmp/"
# define the location where you want to store in HDFS
hdfs_dir = " /tmp/"
for channel in to_channels:
file_name = f"to_{channel}_{dt}.csv"
load_to_hdfs = BashOperator(
task_id=f"put_{channel}_to_hdfs",
bash_command=(
f"HADOOP_USER_NAME=hdfs hadoop fs -put -f {local_dir}{file_name}{hdfs_dir}{channel}/"
),
)
load_to_hive = HiveOperator(
task_id=f"load_{channel}_to_hive",
hql=(
f"LOAD DATA INPATH '{hdfs_dir}{channel}/{file_name}'"
f"INTO TABLE {channel}"
f"PARTITION(dt='{dt}')"
),
)
analyze_tweets >> load_to_hdfs >> load_to_hive >> hive_to_mysql
for channel in from_channels:
file_name = f"from_{channel}_{dt}.csv"
load_to_hdfs = BashOperator(
task_id=f"put_{channel}_to_hdfs",
bash_command=(
f"HADOOP_USER_NAME=hdfs hadoop fs -put -f {local_dir}{file_name}{hdfs_dir}{channel}/"
),
)
load_to_hive = HiveOperator(
task_id=f"load_{channel}_to_hive",
hql=(
f"LOAD DATA INPATH '{hdfs_dir}{channel}/{file_name}' "
f"INTO TABLE {channel} "
f"PARTITION(dt='{dt}')"
),
)
analyze_tweets >> load_to_hdfs >> load_to_hive >> hive_to_mysql
|
examples/xor_5C.py | Liblor/arybo | 223 | 11098051 | <reponame>Liblor/arybo<gh_stars>100-1000
from arybo.lib import MBA
import arybo.lib.mba_exprs as EX
import sys
use_exprs = False
if len(sys.argv) >= 2:
use_exprs = int(sys.argv[1])
def f(x):
v0 = x*0xe5 + 0xF7
v0 = v0&0xFF
v3 = (((((v0*0x26)+0x55)&0xFE)+(v0*0xED)+0xD6)&0xFF )
v4 = ((((((- (v3*0x2))+0xFF)&0xFE)+v3)*0x03)+0x4D)
v5 = (((((v4*0x56)+0x24)&0x46)*0x4B)+(v4*0xE7)+0x76)
v7 = ((((v5*0x3A)+0xAF)&0xF4)+(v5*0x63)+0x2E)
v6 = (v7&0x94)
v8 = ((((v6+v6+(- (v7&0xFF)))*0x67)+0xD))
res = ((v8*0x2D)+(((v8*0xAE)|0x22)*0xE5)+0xC2)&0xFF
return (0xed*(res-0xF7))&0xff
mba8 = MBA(8)
X = mba8.var('X')
if use_exprs:
X = EX.ExprBV(X)
res = f(X)
if use_exprs:
res = EX.eval_expr(res,use_esf=False)
print(res)
if use_exprs:
X = X.v
VD = res.vectorial_decomp([X])
print("====")
print("Cst = " + hex(VD.cst().get_int_be()))
|
namedtuple__typing__example/main.py | DazEB2/SimplePyScripts | 117 | 11098116 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
from collections import namedtuple
Point = namedtuple('Point', ['x', 'y'])
# # TypeError: __new__() missing 2 required positional arguments: 'x' and 'y'
# p = Point()
# print(p)
p = Point(1, 2)
print(p) # Point(x=1, y=2)
print(p._asdict()) # OrderedDict([('x', 1), ('y', 2)])
print()
# SOURCE: https://docs.python.org/3/library/typing.html#typing.NamedTuple
from typing import NamedTuple
class Point(NamedTuple):
x: int = 0
y: int = 0
p = Point()
print(p) # Point(x=0, y=0)
p = Point(1, 2)
print(p) # Point(x=1, y=2)
print(p._asdict()) # OrderedDict([('x', 1), ('y', 2)])
|
python/tvm/topi/nn/loss.py | XiaoSong9905/tvm | 4,640 | 11098149 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,unused-argument
"""Loss functions definitions."""
from __future__ import absolute_import
from . import cpp
def nll_loss(predictions, targets, weights, reduction, ignore_index):
"""Negative log likelihood loss on the input data.
output{n, i_1, i_2, ..., i_k} = -p * w
where t = target{n, i_1, i_2, ..., i_k}
p = predictions{n, t, i_1, i_2, i_k}
w = weights{n, i_1, i_2, ..., i_k} if t != ignore_index else 0
result = reduction(output)
Parameters
----------
predictions : tvm.te.Tensor
(k+2)-D with shape (N, C, d_1, d_2, ..., d_k),
where C is the number of target classes
targets : tvm.te.Tensor
(k+1)-D with shape (N, d_1, d_2, ..., d_k)
The target value of the input.
weights : tvm.te.Tensor
1-D with shape (C,)
The weight of each target value.
reduction : string
The reduction method to apply to output.
Can be "mean", "sum" or "none".
ignore_index : int
The target value to ignore.
Returns
-------
output : tvm.te.Tensor
a scalar if the reduction type is "mean" or "sum",
otherwise the same shape as `target`.
"""
return cpp.nn.nll_loss(predictions, targets, weights, reduction, ignore_index)
|
pyNastran/dev/bdf_vectorized/cards/elements/spring/celas1.py | ACea15/pyNastran | 293 | 11098164 | from numpy import array, zeros, unique, searchsorted, where, arange
from pyNastran.dev.bdf_vectorized.cards.elements.spring.spring_element import SpringElement
from pyNastran.bdf.field_writer_8 import print_card_8
from pyNastran.bdf.field_writer_16 import print_card_16
from pyNastran.bdf.bdf_interface.assign_type import integer, integer_or_blank
class CELAS1(SpringElement):
type = 'CELAS1'
def __init__(self, model):
"""
Defines the CELAS1 object.
Parameters
----------
model : BDF
the BDF object
"""
SpringElement.__init__(self, model)
def allocate(self, card_count):
ncards = card_count[self.type]
if ncards:
self.n = ncards
#: Element ID
self.element_id = zeros(ncards, 'int32')
#: Property ID
self.property_id = zeros(ncards, 'int32')
#: Node IDs
self.node_ids = zeros((ncards, 2), 'int32')
#: component number
self.components = zeros((ncards, 2), 'int32')
def add_card(self, card, comment=''):
i = self.i
self.element_id[i] = integer(card, 1, 'eid')
self.property_id[i] = integer_or_blank(card, 2, 'pid', self.element_id[i])
self.node_ids[i, :] = [integer(card, 3, 'n1'),
integer(card, 5, 'n2')]
self.components[i, :] = [integer_or_blank(card, 4, 'c1', 0),
integer_or_blank(card, 6, 'c2', 0)]
assert len(card) <= 7, 'len(CELAS1 card) = %i\ncard=%s' % (len(card), card)
self.i += 1
def build(self):
if self.n:
i = self.element_id.argsort()
self.element_id = self.element_id[i]
self.property_id = self.property_id[i]
self.node_ids = self.node_ids[i, :]
self.components = self.components[i, :]
unique_eids = unique(self.element_id)
if len(unique_eids) != len(self.element_id):
raise RuntimeError('There are duplicate CELAS1 IDs...')
self._cards = []
else:
self.element_id = array([], dtype='int32')
self.property_id = array([], dtype='int32')
def update(self, maps):
"""
maps = {
'node_id' : nid_map,
'property' : pid_map,
}
"""
if self.n:
eid_map = maps['element']
pid_map = maps['property']
nid_map = maps['node']
for i, eid, pid, nids in enumerate(zip(self.element_id, self.property_id, self.node_ids)):
self.element_id[i] = eid_map[eid]
self.property_id[i] = pid_map[pid]
self.node_ids[i, 0] = nid_map[nids[0]]
self.node_ids[i, 1] = nid_map[nids[1]]
def write_card(self, bdf_file, size=8, eids=None):
if self.n:
if eids is None:
i = arange(self.n)
else:
i = searchsorted(self.element_id, self.eid)
for (eid, pid, n, c) in zip(self.element_id[i], self.property_id[i], self.node_ids[i], self.components[i]):
card = ['CELAS1', eid, pid, n[0], n[1], c[0], c[1]]
if size == 8:
bdf_file.write(print_card_8(card))
else:
bdf_file.write(print_card_16(card))
def get_stiffness_matrix(self, i, model, positions, index0s, fnorm=1.0):
"""gets the stiffness matrix for CELAS1"""
#print("----------------")
ipid = where(self.model.pelas.property_id==self.property_id[i])[0][0]
prop = self.model.pelas
ki = prop.K[ipid]
k = ki * array([[1, -1,],
[-1, 1]])
#========================
n1, n2 = self.node_ids[i, :]
c1, c2 = self.components[i, :]
#i0, i1 = index0s
delta1 = 0 if c1 in [0, 1, 2, 3] else 3
delta2 = 0 if c2 in [0, 1, 2, 3] else 3
c1b = c1-1 if c1 > 0 else c1
c2b = c2-1 if c2 > 0 else c2
i1 = index0s[n1]
i2 = index0s[n2]
dofs = [
i1 + c1b,
i2 + c1b,
]
n_ijv = [
(n1, 1 + delta1),
(n2, 1 + delta2),
]
return (k, dofs, n_ijv)
def displacement_stress(self, model, positions, q, dofs,
ni, o1, e1, f1):
n = self.n
du_axial = zeros(n, 'float64')
for i in range(self.n):
(n1, n2) = self.node_ids[i, :]
n11 = dofs[(n1, 1)]
n21 = dofs[(n2, 1)]
q_axial = array([
q[n11],
q[n21],
])
u_axial = q_axial
du_axial[i] = u_axial[0] - u_axial[1]
self.model.log.debug("len(pelas) = %s" % self.model.pelas.n)
i = searchsorted(self.model.pelas.property_id, self.property_id)
k = self.model.pelas.K[i]
s = self.model.pelas.s[i]
self.model.log.debug("k=%s s=%s du_axial=%s" % (k, s, du_axial))
e1[ni: ni+n] = du_axial * s
f1[ni: ni+n] = k * du_axial
o1[ni: ni+n] = f1[ni: ni+n] * s
#return (axial_strain, axial_stress, axial_force)
|
tests/test_minimum_version.py | oprypin/copier | 438 | 11098190 | <filename>tests/test_minimum_version.py
import warnings
import pytest
from packaging.version import Version
from plumbum import local
from plumbum.cmd import git
import copier
from copier.errors import (
OldTemplateWarning,
UnknownCopierVersionWarning,
UnsupportedVersionError,
)
from .helpers import build_file_tree
@pytest.fixture(scope="module")
def template_path(tmp_path_factory) -> str:
root = tmp_path_factory.mktemp("template")
build_file_tree(
{
root
/ "copier.yaml": """\
_min_copier_version: "10.5.1"
""",
root / "README.md": "",
}
)
return str(root)
def test_version_less_than_required(template_path, tmp_path, monkeypatch):
monkeypatch.setattr("copier.__version__", "0.0.0a0")
with pytest.raises(UnsupportedVersionError):
copier.copy(template_path, tmp_path)
def test_version_equal_required(template_path, tmp_path, monkeypatch):
monkeypatch.setattr("copier.__version__", "10.5.1")
# assert no error
copier.copy(template_path, tmp_path)
def test_version_greater_than_required(template_path, tmp_path, monkeypatch):
monkeypatch.setattr("copier.__version__", "99.99.99")
# assert no error
copier.copy(template_path, tmp_path)
def test_minimum_version_update(template_path, tmp_path, monkeypatch):
monkeypatch.setattr("copier.__version__", "11.0.0")
copier.copy(template_path, tmp_path)
with local.cwd(tmp_path):
git("init")
git("config", "user.name", "Copier Test")
git("config", "user.email", "<EMAIL>")
git("add", ".")
git("commit", "-m", "hello world")
monkeypatch.setattr("copier.__version__", "0.0.0.post0")
with pytest.raises(UnsupportedVersionError):
copier.copy(template_path, tmp_path)
monkeypatch.setattr("copier.__version__", "10.5.1")
# assert no error
copier.copy(template_path, tmp_path)
monkeypatch.setattr("copier.__version__", "99.99.99")
# assert no error
copier.copy(template_path, tmp_path)
def test_version_0_0_0_ignored(template_path, tmp_path, monkeypatch):
monkeypatch.setattr("copier.template.copier_version", lambda: Version("0.0.0"))
# assert no error
with warnings.catch_warnings():
warnings.simplefilter("error")
with pytest.raises(UnknownCopierVersionWarning):
copier.run_copy(template_path, tmp_path)
def test_version_bigger_major_warning(template_path, tmp_path, monkeypatch):
monkeypatch.setattr("copier.__version__", "11.0.0a0")
with warnings.catch_warnings():
warnings.simplefilter("error")
with pytest.raises(OldTemplateWarning):
copier.run_copy(template_path, tmp_path)
|
scripts/agglomDemo.py | vipavlovic/pyprobml | 4,895 | 11098199 | <filename>scripts/agglomDemo.py
# Agglomerative Clustering Demo
# Author: <NAME>
import superimport
import numpy as np
from scipy.cluster.hierarchy import dendrogram, linkage
import matplotlib.pyplot as plt
import pyprobml_utils as pml
X = np.array([[1,2],
[2.5,4.5],
[2,2],
[4,1.5],
[4,2.5],])
labels = range(1, 6)
plt.figure(figsize=(10, 6))
plt.yticks(np.linspace(0,5,11))
plt.ylim(0,5)
plt.grid(color='gray', linestyle='dashed')
plt.scatter(X[:,0],X[:,1], label='True Position')
for label, x, y in zip(labels, X[:, 0], X[:, 1]):
plt.annotate(
label,
xy=(x, y), xytext=(-3, 3),
textcoords='offset points', ha='right', va='bottom', fontsize=25, color="red")
pml.savefig("agglom_demo_data.pdf", dpi=300)
linked = linkage(X, 'single')
labelList = range(1, 6)
plt.figure(figsize=(10, 7))
dendrogram(linked,
orientation='top',
labels=labelList,
distance_sort='descending',
show_leaf_counts=True)
pml.savefig("agglom_demo_dendrogram.pdf", dpi=300)
plt.show() |
gamY/extract_equations.py | gemal/MAKRO | 1,018 | 11098200 | <reponame>gemal/MAKRO
import os
import sys
import csv
import gamY
import re
import textwrap
def main():
"""
Read model from gamY pickle
Translate Equations to Gekko format
Export equations to csv file
"""
file_path = "../model/saved/model" # sys.argv[1]
file_dir, file_name = os.path.split(file_path)
file_dir = os.path.abspath(file_dir)
model_name = "M_base" # sys.argv[2]
db = gamY.Precompiler(file_path)
db.read(file_name)
with open(os.path.join(file_dir, file_name+'_equations.csv'), 'w', newline='') as file:
writer = csv.writer(file, quoting=csv.QUOTE_ALL)
writer.writerow(["Name", "Sets", "Conditionals", "Left hand side", "Right hand side", ""])
for eq in db.blocks[model_name].values():
LHS = textwrap.dedent(eq.LHS).replace("\n","")
RHS = textwrap.dedent(eq.RHS).replace("\n","")
while " " in LHS:
LHS = LHS.replace(" ", " ")
while " " in RHS:
RHS = RHS.replace(" ", " ")
writer.writerow([
eq.name,
eq.sets,
eq.conditions,
LHS,
RHS,
])
t_only_pattern = re.compile(r"[(\[]t([+-]1)?[)\]]", re.IGNORECASE | re.MULTILINE | re.DOTALL)
t_end_pattern = re.compile(r",t([+-]1)?([)\]])", re.IGNORECASE | re.MULTILINE | re.DOTALL)
def remove_t(text):
"""Remove time dimension, t, as it is explicit in Gekko
>>> remove_t("foo[a,t] =E= bar[t];")
'foo[a] =E= bar;'
"""
for m in t_only_pattern.finditer(text):
if m.group(1):
text = text.replace(m.group(0), f"[{m.group(1)}]")
else:
text = text.replace(m.group(0), "")
for m in t_end_pattern.finditer(text):
if m.group(1):
text = text.replace(m.group(0), f")[{m.group(1)}]")
else:
text = text.replace(m.group(0), m.group(2))
return text
sets_pattern = re.compile(r"[(\[](?:['\"a-z_,]|(?:\+1)|(?:-1))+[)\]]", re.IGNORECASE | re.MULTILINE | re.DOTALL)
def gekkofy_sets(text):
"""
>>> gekkofy_sets("foo[a,t] =E= bar[t] + foobar[t+1];")
'foo[#a] =E= bar + foobar[+1];'
"""
text = remove_t(text)
for match_text in sets_pattern.findall(text):
sets = []
for i in match_text[1:-1].split(","):
if i[0] in "'\"":
sets.append(i[1:-1])
elif i[0] in "+-":
sets.append(i)
else:
sets.append(f"#{i}")
text = text.replace(match_text, "[{}]".format(','.join(sets)))
return text
if __name__ == "__main__":
import doctest
main()
|
accessories/TV.py | jerr0328/HAP-python | 462 | 11098206 | from pyhap.accessory import Accessory
from pyhap.const import CATEGORY_TELEVISION
class TV(Accessory):
category = CATEGORY_TELEVISION
NAME = 'Sample TV'
SOURCES = {
'HDMI 1': 3,
'HDMI 2': 3,
'HDMI 3': 3,
}
def __init__(self, *args, **kwargs):
super(TV, self).__init__(*args, **kwargs)
self.set_info_service(
manufacturer='HaPK',
model='Raspberry Pi',
firmware_revision='1.0',
serial_number='1'
)
tv_service = self.add_preload_service(
'Television', ['Name',
'ConfiguredName',
'Active',
'ActiveIdentifier',
'RemoteKey',
'SleepDiscoveryMode'],
)
self._active = tv_service.configure_char(
'Active', value=0,
setter_callback=self._on_active_changed,
)
tv_service.configure_char(
'ActiveIdentifier', value=1,
setter_callback=self._on_active_identifier_changed,
)
tv_service.configure_char(
'RemoteKey', setter_callback=self._on_remote_key,
)
tv_service.configure_char('Name', value=self.NAME)
# TODO: implement persistence for ConfiguredName
tv_service.configure_char('ConfiguredName', value=self.NAME)
tv_service.configure_char('SleepDiscoveryMode', value=1)
for idx, (source_name, source_type) in enumerate(self.SOURCES.items()):
input_source = self.add_preload_service('InputSource', ['Name', 'Identifier'])
input_source.configure_char('Name', value=source_name)
input_source.configure_char('Identifier', value=idx + 1)
# TODO: implement persistence for ConfiguredName
input_source.configure_char('ConfiguredName', value=source_name)
input_source.configure_char('InputSourceType', value=source_type)
input_source.configure_char('IsConfigured', value=1)
input_source.configure_char('CurrentVisibilityState', value=0)
tv_service.add_linked_service(input_source)
tv_speaker_service = self.add_preload_service(
'TelevisionSpeaker', ['Active',
'VolumeControlType',
'VolumeSelector']
)
tv_speaker_service.configure_char('Active', value=1)
# Set relative volume control
tv_speaker_service.configure_char('VolumeControlType', value=1)
tv_speaker_service.configure_char(
'Mute', setter_callback=self._on_mute,
)
tv_speaker_service.configure_char(
'VolumeSelector', setter_callback=self._on_volume_selector,
)
def _on_active_changed(self, value):
print('Turn %s' % ('on' if value else 'off'))
def _on_active_identifier_changed(self, value):
print('Change input to %s' % list(self.SOURCES.keys())[value-1])
def _on_remote_key(self, value):
print('Remote key %d pressed' % value)
def _on_mute(self, value):
print('Mute' if value else 'Unmute')
def _on_volume_selector(self, value):
print('%screase volume' % ('In' if value == 0 else 'De'))
def main():
import logging
import signal
from pyhap.accessory_driver import AccessoryDriver
logging.basicConfig(level=logging.INFO)
driver = AccessoryDriver(port=51826)
accessory = TV(driver, 'TV')
driver.add_accessory(accessory=accessory)
signal.signal(signal.SIGTERM, driver.signal_handler)
driver.start()
if __name__ == '__main__':
main()
|
llvmpy/src/TargetTransformInfo.py | KennethNielsen/llvmpy | 140 | 11098214 | from binding import *
from src.namespace import llvm
from src.Pass import ImmutablePass
if LLVM_VERSION >= (3, 3):
llvm.includes.add('llvm/Analysis/TargetTransformInfo.h')
else:
llvm.includes.add('llvm/TargetTransformInfo.h')
TargetTransformInfo = llvm.Class(ImmutablePass)
ScalarTargetTransformInfo = llvm.Class()
VectorTargetTransformInfo = llvm.Class()
@ScalarTargetTransformInfo
class ScalarTargetTransformInfo:
if LLVM_VERSION < (3, 3):
delete = Destructor()
@VectorTargetTransformInfo
class VectorTargetTransformInfo:
if LLVM_VERSION < (3, 3):
delete = Destructor()
@TargetTransformInfo
class TargetTransformInfo:
if LLVM_VERSION < (3, 3):
new = Constructor(ptr(ScalarTargetTransformInfo),
ptr(VectorTargetTransformInfo))
|
vae/standalone/vae_mmd_celeba_lightning.py | vipavlovic/pyprobml | 4,895 | 11098228 | """
Author: <NAME>
Please run the following command before running the script
wget -q https://raw.githubusercontent.com/sayantanauddy/vae_lightning/main/data.py
or curl https://raw.githubusercontent.com/sayantanauddy/vae_lightning/main/data.py > data.py
Then, make sure to get your kaggle.json from kaggle.com then run
mkdir /root/.kaggle
cp kaggle.json /root/.kaggle/kaggle.json
chmod 600 /root/.kaggle/kaggle.json
rm kaggle.json
to copy kaggle.json into a folder first
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision.transforms as transforms
from pytorch_lightning import LightningModule, Trainer
from data import CelebADataModule
from argparse import ArgumentParser
from einops import rearrange
IMAGE_SIZE = 64
CROP = 128
DATA_PATH = "kaggle"
trans = []
trans.append(transforms.RandomHorizontalFlip())
if CROP > 0:
trans.append(transforms.CenterCrop(CROP))
trans.append(transforms.Resize(IMAGE_SIZE))
trans.append(transforms.ToTensor())
transform = transforms.Compose(trans)
def compute_kernel(x1: torch.Tensor,
x2: torch.Tensor,
kernel_type: str = 'rbf') -> torch.Tensor:
# Convert the tensors into row and column vectors
D = x1.size(1)
N = x1.size(0)
x1 = x1.unsqueeze(-2) # Make it into a column tensor
x2 = x2.unsqueeze(-3) # Make it into a row tensor
"""
Usually the below lines are not required, especially in our case,
but this is useful when x1 and x2 have different sizes
along the 0th dimension.
"""
x1 = x1.expand(N, N, D)
x2 = x2.expand(N, N, D)
if kernel_type == 'rbf':
result = compute_rbf(x1, x2)
elif kernel_type == 'imq':
result = compute_inv_mult_quad(x1, x2)
else:
raise ValueError('Undefined kernel type.')
return result
def compute_rbf(x1: torch.Tensor,
x2: torch.Tensor,
latent_var: float = 2.,
eps: float = 1e-7) -> torch.Tensor:
"""
Computes the RBF Kernel between x1 and x2.
:param x1: (Tensor)
:param x2: (Tensor)
:param eps: (Float)
:return:
"""
z_dim = x2.size(-1)
sigma = 2. * z_dim * latent_var
result = torch.exp(-((x1 - x2).pow(2).mean(-1) / sigma))
return result
def compute_inv_mult_quad(x1: torch.Tensor,
x2: torch.Tensor,
latent_var: float = 2.,
eps: float = 1e-7) -> torch.Tensor:
"""
Computes the Inverse Multi-Quadratics Kernel between x1 and x2,
given by
k(x_1, x_2) = \sum \frac{C}{C + \|x_1 - x_2 \|^2}
:param x1: (Tensor)
:param x2: (Tensor)
:param eps: (Float)
:return:
"""
z_dim = x2.size(-1)
C = 2 * z_dim * latent_var
kernel = C / (eps + C + (x1 - x2).pow(2).sum(dim = -1))
# Exclude diagonal elements
result = kernel.sum() - kernel.diag().sum()
return result
def MMD(prior_z:torch.Tensor, z: torch.Tensor):
prior_z__kernel = compute_kernel(prior_z, prior_z)
z__kernel = compute_kernel(z, z)
priorz_z__kernel = compute_kernel(prior_z, z)
mmd = prior_z__kernel.mean() + \
z__kernel.mean() - \
2 * priorz_z__kernel.mean()
return mmd
class VAE(LightningModule):
"""
Standard VAE with Gaussian Prior and approx posterior.
"""
def __init__(
self,
input_height: int,
hidden_dims = None,
in_channels = 3,
enc_out_dim: int = 512,
beta: float = 1,
latent_dim: int = 256,
lr: float = 1e-3
):
"""
Args:
input_height: height of the images
enc_type: option between resnet18 or resnet50
first_conv: use standard kernel_size 7, stride 2 at start or
replace it with kernel_size 3, stride 1 conv
maxpool1: use standard maxpool to reduce spatial dim of feat by a factor of 2
enc_out_dim: set according to the out_channel count of
encoder used (512 for resnet18, 2048 for resnet50)
kl_coeff: coefficient for kl term of the loss
latent_dim: dim of latent space
lr: learning rate for Adam
"""
super(VAE, self).__init__()
self.save_hyperparameters()
self.lr = lr
self.beta = beta
self.enc_out_dim = enc_out_dim
self.latent_dim = latent_dim
self.input_height = input_height
modules = []
if hidden_dims is None:
hidden_dims = [32, 64, 128, 256, 512]
# Build Encoder
for h_dim in hidden_dims:
modules.append(
nn.Sequential(
nn.Conv2d(in_channels, out_channels=h_dim,
kernel_size= 3, stride= 2, padding = 1),
nn.BatchNorm2d(h_dim),
nn.LeakyReLU())
)
in_channels = h_dim
self.encoder = nn.Sequential(*modules)
self.fc_mu = nn.Linear(hidden_dims[-1]*4, latent_dim)
self.fc_var = nn.Linear(hidden_dims[-1]*4, latent_dim)
# Build Decoder
modules = []
self.decoder_input = nn.Linear(latent_dim, hidden_dims[-1] * 4)
hidden_dims.reverse()
for i in range(len(hidden_dims) - 1):
modules.append(
nn.Sequential(
nn.ConvTranspose2d(hidden_dims[i],
hidden_dims[i + 1],
kernel_size=3,
stride = 2,
padding=1,
output_padding=1),
nn.BatchNorm2d(hidden_dims[i + 1]),
nn.LeakyReLU())
)
self.decoder = nn.Sequential(*modules)
self.final_layer = nn.Sequential(
nn.ConvTranspose2d(hidden_dims[-1],
hidden_dims[-1],
kernel_size=3,
stride=2,
padding=1,
output_padding=1),
nn.BatchNorm2d(hidden_dims[-1]),
nn.LeakyReLU(),
nn.Conv2d(hidden_dims[-1], out_channels= 3,
kernel_size= 3, padding= 1),
nn.Sigmoid())
@staticmethod
def pretrained_weights_available():
return list(VAE.pretrained_urls.keys())
def from_pretrained(self, checkpoint_name):
if checkpoint_name not in VAE.pretrained_urls:
raise KeyError(str(checkpoint_name) + ' not present in pretrained weights.')
return self.load_from_checkpoint(VAE.pretrained_urls[checkpoint_name], strict=False)
def encode(self, x):
x = self.encoder(x)
x = torch.flatten(x, start_dim=1)
mu = self.fc_mu(x)
return mu
def forward(self, x):
z = self.encode(x)
return self.decode(z)
def _run_step(self, x):
z = self.encode(x)
return z, self.decode(z)
def step(self, batch, batch_idx):
x, y = batch
z, x_hat = self._run_step(x)
recon_loss = F.mse_loss(x_hat, x, reduction='mean')
mmd = MMD(torch.randn_like(z), z)
loss = recon_loss + \
self.beta * mmd
logs = {
"recon_loss": recon_loss,
"mmd": mmd,
}
return loss, logs
def decode(self, z):
result = self.decoder_input(z)
result = result.view(-1, 512, 2, 2)
result = self.decoder(result)
result = self.final_layer(result)
return result
def training_step(self, batch, batch_idx):
loss, logs = self.step(batch, batch_idx)
self.log_dict({f"train_{k}": v for k, v in logs.items()}, on_step=True, on_epoch=False)
return loss
def validation_step(self, batch, batch_idx):
loss, logs = self.step(batch, batch_idx)
self.log_dict({f"val_{k}": v for k, v in logs.items()})
return loss
def configure_optimizers(self):
return torch.optim.Adam(self.parameters(), lr=self.lr)
if __name__ == "__main__":
parser = ArgumentParser(description='Hyperparameters for our experiments')
parser.add_argument('--latent-dim', type=int, default=128, help="size of latent dim for our vae")
parser.add_argument('--epochs', type=int, default=50, help="num epochs")
parser.add_argument('--gpus', type=int, default=1, help="gpus, if no gpu set to 0, to run on all gpus set to -1")
parser.add_argument('--bs', type=int, default=256, help="batch size")
parser.add_argument('--beta', type=int, default=1, help="kl coeff aka beta term in the elbo loss function")
parser.add_argument('--lr', type=int, default=1e-3, help="learning rate")
hparams = parser.parse_args()
m = VAE(input_height=IMAGE_SIZE, latent_dim=hparams.latent_dim, beta=hparams.beta, lr=hparams.lr)
dm = CelebADataModule(data_dir=DATA_PATH,
target_type='attr',
train_transform=transform,
val_transform=transform,
download=True,
batch_size=hparams.bs)
trainer = Trainer(gpus=1, weights_summary='full', max_epochs=10, auto_lr_find=True)
# Run learning rate finder
lr_finder = trainer.tuner.lr_find(m, dm)
# Results can be found in
lr_finder.results
# Plot with
fig = lr_finder.plot(suggest=True)
fig.show()
# Pick point based on plot, or get suggestion
new_lr = lr_finder.suggestion()
# update hparams of the model
m.lr = new_lr
trainer= Trainer(gpus = hparams.gpus,
max_epochs = hparams.epochs)
trainer.fit(m, datamodule=dm)
torch.save(m.state_dict(), "mmd-vae-celeba-conv.ckpt")
|
pdbr/middlewares/django.py | giladbarnea/pdbr | 222 | 11098283 | <reponame>giladbarnea/pdbr
import sys
from django.conf import settings
from django.core.exceptions import MiddlewareNotUsed
from pdbr.__main__ import post_mortem
class PdbrMiddleware:
def __init__(self, get_response):
if not settings.DEBUG:
raise MiddlewareNotUsed()
self.get_response = get_response
def __call__(self, request):
return self.get_response(request)
def process_exception(self, request, exception): # noqa: F841
post_mortem(sys.exc_info()[2])
|
alipay/aop/api/domain/AlipayFinancialnetAuthPaymentNotifyModel.py | antopen/alipay-sdk-python-all | 213 | 11098288 | <reponame>antopen/alipay-sdk-python-all
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AlipayFinancialnetAuthPaymentNotifyModel(object):
def __init__(self):
self._basic_amount = None
self._benefit_amount = None
self._biz_status = None
self._commodity_type = None
self._out_commodity_id = None
self._out_order_no = None
self._platform_id = None
self._trade_no = None
self._user_id = None
self._validate_token = None
@property
def basic_amount(self):
return self._basic_amount
@basic_amount.setter
def basic_amount(self, value):
self._basic_amount = value
@property
def benefit_amount(self):
return self._benefit_amount
@benefit_amount.setter
def benefit_amount(self, value):
self._benefit_amount = value
@property
def biz_status(self):
return self._biz_status
@biz_status.setter
def biz_status(self, value):
self._biz_status = value
@property
def commodity_type(self):
return self._commodity_type
@commodity_type.setter
def commodity_type(self, value):
self._commodity_type = value
@property
def out_commodity_id(self):
return self._out_commodity_id
@out_commodity_id.setter
def out_commodity_id(self, value):
self._out_commodity_id = value
@property
def out_order_no(self):
return self._out_order_no
@out_order_no.setter
def out_order_no(self, value):
self._out_order_no = value
@property
def platform_id(self):
return self._platform_id
@platform_id.setter
def platform_id(self, value):
self._platform_id = value
@property
def trade_no(self):
return self._trade_no
@trade_no.setter
def trade_no(self, value):
self._trade_no = value
@property
def user_id(self):
return self._user_id
@user_id.setter
def user_id(self, value):
self._user_id = value
@property
def validate_token(self):
return self._validate_token
@validate_token.setter
def validate_token(self, value):
self._validate_token = value
def to_alipay_dict(self):
params = dict()
if self.basic_amount:
if hasattr(self.basic_amount, 'to_alipay_dict'):
params['basic_amount'] = self.basic_amount.to_alipay_dict()
else:
params['basic_amount'] = self.basic_amount
if self.benefit_amount:
if hasattr(self.benefit_amount, 'to_alipay_dict'):
params['benefit_amount'] = self.benefit_amount.to_alipay_dict()
else:
params['benefit_amount'] = self.benefit_amount
if self.biz_status:
if hasattr(self.biz_status, 'to_alipay_dict'):
params['biz_status'] = self.biz_status.to_alipay_dict()
else:
params['biz_status'] = self.biz_status
if self.commodity_type:
if hasattr(self.commodity_type, 'to_alipay_dict'):
params['commodity_type'] = self.commodity_type.to_alipay_dict()
else:
params['commodity_type'] = self.commodity_type
if self.out_commodity_id:
if hasattr(self.out_commodity_id, 'to_alipay_dict'):
params['out_commodity_id'] = self.out_commodity_id.to_alipay_dict()
else:
params['out_commodity_id'] = self.out_commodity_id
if self.out_order_no:
if hasattr(self.out_order_no, 'to_alipay_dict'):
params['out_order_no'] = self.out_order_no.to_alipay_dict()
else:
params['out_order_no'] = self.out_order_no
if self.platform_id:
if hasattr(self.platform_id, 'to_alipay_dict'):
params['platform_id'] = self.platform_id.to_alipay_dict()
else:
params['platform_id'] = self.platform_id
if self.trade_no:
if hasattr(self.trade_no, 'to_alipay_dict'):
params['trade_no'] = self.trade_no.to_alipay_dict()
else:
params['trade_no'] = self.trade_no
if self.user_id:
if hasattr(self.user_id, 'to_alipay_dict'):
params['user_id'] = self.user_id.to_alipay_dict()
else:
params['user_id'] = self.user_id
if self.validate_token:
if hasattr(self.validate_token, 'to_alipay_dict'):
params['validate_token'] = self.validate_token.to_alipay_dict()
else:
params['validate_token'] = self.validate_token
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayFinancialnetAuthPaymentNotifyModel()
if 'basic_amount' in d:
o.basic_amount = d['basic_amount']
if 'benefit_amount' in d:
o.benefit_amount = d['benefit_amount']
if 'biz_status' in d:
o.biz_status = d['biz_status']
if 'commodity_type' in d:
o.commodity_type = d['commodity_type']
if 'out_commodity_id' in d:
o.out_commodity_id = d['out_commodity_id']
if 'out_order_no' in d:
o.out_order_no = d['out_order_no']
if 'platform_id' in d:
o.platform_id = d['platform_id']
if 'trade_no' in d:
o.trade_no = d['trade_no']
if 'user_id' in d:
o.user_id = d['user_id']
if 'validate_token' in d:
o.validate_token = d['validate_token']
return o
|
tensorflow_datasets/core/proto/feature_generated_pb2.py | justHungryMan/datasets | 3,380 | 11098291 | # coding=utf-8
# Copyright 2021 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: skip-file
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: feature.proto
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='feature.proto',
package='tensorflow_datasets',
syntax='proto3',
serialized_options=b'\370\001\001',
serialized_pb=b'\n\rfeature.proto\x12\x13tensorflow_datasets\"\xa0\x01\n\x0c\x46\x65\x61turesDict\x12\x41\n\x08\x66\x65\x61tures\x18\x01 \x03(\x0b\x32/.tensorflow_datasets.FeaturesDict.FeaturesEntry\x1aM\n\rFeaturesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12+\n\x05value\x18\x02 \x01(\x0b\x32\x1c.tensorflow_datasets.Feature:\x02\x38\x01\"\xa5\x01\n\x07\x46\x65\x61ture\x12\x19\n\x11python_class_name\x18\x01 \x01(\t\x12\x38\n\x0cjson_feature\x18\x02 \x01(\x0b\x32 .tensorflow_datasets.JsonFeatureH\x00\x12:\n\rfeatures_dict\x18\x03 \x01(\x0b\x32!.tensorflow_datasets.FeaturesDictH\x00\x42\t\n\x07\x63ontent\"\x1b\n\x0bJsonFeature\x12\x0c\n\x04json\x18\x01 \x01(\tB\x03\xf8\x01\x01\x62\x06proto3'
)
_FEATURESDICT_FEATURESENTRY = _descriptor.Descriptor(
name='FeaturesEntry',
full_name='tensorflow_datasets.FeaturesDict.FeaturesEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key',
full_name='tensorflow_datasets.FeaturesDict.FeaturesEntry.key',
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b''.decode('utf-8'),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value',
full_name='tensorflow_datasets.FeaturesDict.FeaturesEntry.value',
index=1,
number=2,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=b'8\001',
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[],
serialized_start=122,
serialized_end=199,
)
_FEATURESDICT = _descriptor.Descriptor(
name='FeaturesDict',
full_name='tensorflow_datasets.FeaturesDict',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='features',
full_name='tensorflow_datasets.FeaturesDict.features',
index=0,
number=1,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR),
],
extensions=[],
nested_types=[
_FEATURESDICT_FEATURESENTRY,
],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[],
serialized_start=39,
serialized_end=199,
)
_FEATURE = _descriptor.Descriptor(
name='Feature',
full_name='tensorflow_datasets.Feature',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='python_class_name',
full_name='tensorflow_datasets.Feature.python_class_name',
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b''.decode('utf-8'),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='json_feature',
full_name='tensorflow_datasets.Feature.json_feature',
index=1,
number=2,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='features_dict',
full_name='tensorflow_datasets.Feature.features_dict',
index=2,
number=3,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='content',
full_name='tensorflow_datasets.Feature.content',
index=0,
containing_type=None,
fields=[]),
],
serialized_start=202,
serialized_end=367,
)
_JSONFEATURE = _descriptor.Descriptor(
name='JsonFeature',
full_name='tensorflow_datasets.JsonFeature',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='json',
full_name='tensorflow_datasets.JsonFeature.json',
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b''.decode('utf-8'),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[],
serialized_start=369,
serialized_end=396,
)
_FEATURESDICT_FEATURESENTRY.fields_by_name['value'].message_type = _FEATURE
_FEATURESDICT_FEATURESENTRY.containing_type = _FEATURESDICT
_FEATURESDICT.fields_by_name[
'features'].message_type = _FEATURESDICT_FEATURESENTRY
_FEATURE.fields_by_name['json_feature'].message_type = _JSONFEATURE
_FEATURE.fields_by_name['features_dict'].message_type = _FEATURESDICT
_FEATURE.oneofs_by_name['content'].fields.append(
_FEATURE.fields_by_name['json_feature'])
_FEATURE.fields_by_name[
'json_feature'].containing_oneof = _FEATURE.oneofs_by_name['content']
_FEATURE.oneofs_by_name['content'].fields.append(
_FEATURE.fields_by_name['features_dict'])
_FEATURE.fields_by_name[
'features_dict'].containing_oneof = _FEATURE.oneofs_by_name['content']
DESCRIPTOR.message_types_by_name['FeaturesDict'] = _FEATURESDICT
DESCRIPTOR.message_types_by_name['Feature'] = _FEATURE
DESCRIPTOR.message_types_by_name['JsonFeature'] = _JSONFEATURE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
FeaturesDict = _reflection.GeneratedProtocolMessageType(
'FeaturesDict',
(_message.Message,),
{
'FeaturesEntry':
_reflection.GeneratedProtocolMessageType(
'FeaturesEntry',
(_message.Message,),
{
'DESCRIPTOR': _FEATURESDICT_FEATURESENTRY,
'__module__': 'feature_pb2'
# @@protoc_insertion_point(class_scope:tensorflow_datasets.FeaturesDict.FeaturesEntry)
}),
'DESCRIPTOR':
_FEATURESDICT,
'__module__':
'feature_pb2'
# @@protoc_insertion_point(class_scope:tensorflow_datasets.FeaturesDict)
})
_sym_db.RegisterMessage(FeaturesDict)
_sym_db.RegisterMessage(FeaturesDict.FeaturesEntry)
Feature = _reflection.GeneratedProtocolMessageType(
'Feature',
(_message.Message,),
{
'DESCRIPTOR': _FEATURE,
'__module__': 'feature_pb2'
# @@protoc_insertion_point(class_scope:tensorflow_datasets.Feature)
})
_sym_db.RegisterMessage(Feature)
JsonFeature = _reflection.GeneratedProtocolMessageType(
'JsonFeature',
(_message.Message,),
{
'DESCRIPTOR': _JSONFEATURE,
'__module__': 'feature_pb2'
# @@protoc_insertion_point(class_scope:tensorflow_datasets.JsonFeature)
})
_sym_db.RegisterMessage(JsonFeature)
DESCRIPTOR._options = None
_FEATURESDICT_FEATURESENTRY._options = None
# @@protoc_insertion_point(module_scope)
|
kafka/tools/protocol/requests/join_group_v1.py | akashvacher/kafka-tools | 578 | 11098299 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from kafka.tools.protocol.requests import ArgumentError
from kafka.tools.protocol.requests.join_group_v0 import JoinGroupV0Request, _parse_group_protocol
from kafka.tools.protocol.responses.join_group_v1 import JoinGroupV1Response
class JoinGroupV1Request(JoinGroupV0Request):
api_version = 1
cmd = "JoinGroup"
response = JoinGroupV1Response
supports_cli = True
help_string = ("Request: {0}V{1}\n".format(cmd, api_version) +
"Format: {0}V{1} group_id session_timeout rebalance_timeout member_id protocol_type ".format(cmd, api_version) +
"(group_protocol_name,group_protocol_metadata ...)\n" +
"Description: Join or create a consumer group\n")
schema = [
{'name': 'group_id', 'type': 'string'},
{'name': 'session_timeout', 'type': 'int32'},
{'name': 'rebalance_timeout', 'type': 'int32'},
{'name': 'member_id', 'type': 'string'},
{'name': 'protocol_type', 'type': 'string'},
{'name': 'group_protocols',
'type': 'array',
'item_type': [
{'name': 'protocol_name', 'type': 'string'},
{'name': 'protocol_metadata', 'type': 'bytes'},
]},
]
@classmethod
def process_arguments(cls, cmd_args):
if len(cmd_args) < 6:
raise ArgumentError("JoinGroupV1 requires at least 6 arguments")
try:
values = {'group_id': cmd_args[0],
'session_timeout': int(cmd_args[1]),
'rebalance_timeout': int(cmd_args[2]),
'member_id': cmd_args[3],
'protocol_type': cmd_args[4],
'group_protocols': []}
except ValueError:
raise ArgumentError("The session_timeout and rebalance_timeout must be integers")
for protocol in cmd_args[5:]:
values['group_protocols'].append(_parse_group_protocol(protocol))
return values
|
config/patient_otp_token.py | gigincg/care | 189 | 11098300 | <filename>config/patient_otp_token.py
from rest_framework_simplejwt.tokens import Token
from datetime import timedelta
class PatientToken(Token):
lifetime = timedelta(hours=1)
token_type = "patient_login"
|
pylibs/lmdb_util/__init__.py | leoshine/Spherical_Regression | 133 | 11098322 | <gh_stars>100-1000
"""
@Author : <NAME>
"""
from basic.common import env
from .imagedata_lmdb import ImageData_lmdb
|
examples/00-mapdl-examples/3d_plate_thermal.py | RGPATCHI/pymapdl | 194 | 11098329 | """
.. _ref_3d_plate_thermal:
Basic Thermal Analysis with pyMAPDL
-----------------------------------
This example demonstrates how you can use MAPDL to create a plate,
impose thermal boundary conditions, solve, and plot it all within
pyMAPDL.
First, start MAPDL as a service and disable all but error messages.
"""
# sphinx_gallery_thumbnail_number = 2
from ansys.mapdl.core import launch_mapdl
mapdl = launch_mapdl()
###############################################################################
# Geometry and Material Properties
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Create a simple beam, specify the material properties, and mesh it.
mapdl.prep7()
mapdl.mp("kxx", 1, 45)
mapdl.et(1, 90)
mapdl.block(-0.3, 0.3, -0.46, 1.34, -0.2, -0.2 + 0.02)
mapdl.vsweep(1)
mapdl.eplot()
###############################################################################
# Boundary Conditions
# ~~~~~~~~~~~~~~~~~~~
# Set the thermal boundary conditions
mapdl.asel("S", vmin=3)
mapdl.nsla()
mapdl.d("all", "temp", 5)
mapdl.asel("S", vmin=4)
mapdl.nsla()
mapdl.d("all", "temp", 100)
out = mapdl.allsel()
###############################################################################
# Solve
# ~~~~~
# Solve the thermal static analysis and print the results
mapdl.vsweep(1)
mapdl.run("/SOLU")
print(mapdl.solve())
out = mapdl.finish()
###############################################################################
# Post-Processing using MAPDL
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~
# View the thermal solution of the beam by getting the results
# directly through MAPDL.
mapdl.post1()
mapdl.set(1, 1)
mapdl.post_processing.plot_nodal_temperature()
###############################################################################
# Alternatively you could also use the result object that reads in the
# result file using pyansys
result = mapdl.result
nnum, temp = result.nodal_temperature(0)
# this is the same as pyansys.read_binary(mapdl._result_file)
print(nnum, temp)
|
thespian/system/systemBase.py | dendron2000/Thespian | 210 | 11098368 | '''The systemBase provides the base class implementation for standard
system Base implementations. This systemBase itself is not intended
to be instantiated as the regular Thespian System Base, but instead it
provides a base class that should be subclassed by the various System
Base implementations.
'''
import logging
from thespian.actors import *
from thespian.system import *
from thespian.system.utilis import thesplog
from thespian.system.timing import toTimeDeltaOrNone, ExpirationTimer, unexpired
from thespian.system.messages.admin import *
from thespian.system.messages.status import *
from thespian.system.transport import *
import threading
from contextlib import closing
from datetime import timedelta
import os
MAX_SYSTEM_SHUTDOWN_DELAY = timedelta(seconds=10)
MAX_CHILD_ACTOR_CREATE_DELAY = timedelta(seconds=50)
MAX_CAPABILITY_UPDATE_DELAY = timedelta(seconds=5)
MAX_LOAD_SOURCE_DELAY = timedelta(seconds=61)
MAX_ADMIN_STATUS_REQ_DELAY = timedelta(seconds=2)
MAX_TELL_PERIOD = timedelta(seconds=60)
def ensure_TZ_set():
# Actor engines handle timeouts and tend to sample system time
# frequently. Under Linux, if TZ is not set to a value,
# /etc/localtime or similar is consulted on each call to obtain
# system time which can negatively affect performance. This
# function attempts to set TZ if possible/reasonable.
if 'TZ' in os.environ:
return
for fname in ('/etc/localtime',
'/usr/local/etc/localtime'):
if os.path.exists(fname):
os.environ['TZ'] = ':' + fname
return
# OK if it's not set, just may be slower
class TransmitTrack(object):
def __init__(self, transport, adminAddr):
self._newActorAddress = None
self._pcrFAILED = None
self._transport = transport
self._adminAddr = adminAddr
@property
def failed(self):
return self._pcrFAILED is not None
@property
def failure(self):
return self._pcrFAILED
@property
def failure_message(self):
return getattr(self, '_pcrMessage', None)
def transmit_failed(self, result, intent):
if result == SendStatus.DeadTarget and \
intent.targetAddr != self._adminAddr:
# Forward message to the dead letter handler; if the
# forwarding fails, just discard the message.
self._transport.scheduleTransmit(
None,
TransmitIntent(self._adminAddr,
DeadEnvelope(intent.targetAddr, intent.message)))
self._pcrFAILED = result
self._transport.abort_run()
class NewActorResponse(TransmitTrack):
def __init__(self, transport, adminAddr, *args, **kw):
super(NewActorResponse, self).__init__(transport, adminAddr, *args, **kw)
self._newActorAddress = None
@property
def pending(self):
return self._newActorAddress is None and not self.failed
@property
def actor_address(self):
return self._newActorAddress
def __call__(self, envelope):
if isinstance(envelope.message, PendingActorResponse):
self._newActorAddress = False if envelope.message.errorCode else \
envelope.message.actualAddress
self._pcrFAILED = envelope.message.errorCode
self._pcrMessage = getattr(envelope.message, 'errorStr', None)
# Stop running transport; got new actor address (or failure)
return False
# Discard everything else. Previous requests and operations
# may have caused there to be messages sent back to this
# endpoint that are queued ahead of the PendingActorResponse.
return True # Keep waiting for the PendingActorResponse
class ExternalOpsToActors(object):
def __init__(self, adminAddr, transport=None):
self._numPrimaries = 0
self._cv = threading.Condition()
self._transport_runner = False
# Expects self.transport has already been set by subclass __init__
self.adminAddr = adminAddr
if transport:
self.transport = transport
def _run_transport(self, maximumDuration=None, txonly=False,
incomingHandler=None):
# This is where multiple external threads are synchronized for
# receives. Transmits will flow down into the transmit layer
# where they are queued with thread safety, but threads
# blocking on a receive will all be lined up through this point.
max_runtime = ExpirationTimer(maximumDuration)
with self._cv:
while self._transport_runner:
self._cv.wait(max_runtime.view().remainingSeconds())
if max_runtime.view().expired():
return None
self._transport_runner = True
try:
r = Thespian__UpdateWork()
while isinstance(r, Thespian__UpdateWork):
r = self.transport.run(TransmitOnly if txonly else incomingHandler,
max_runtime.view().remaining())
return r
# incomingHandler callback could deadlock on this same thread; is it ever not None?
finally:
with self._cv:
self._transport_runner = False
self._cv.notify()
def _tx_to_actor(self, actorAddress, message):
# Send a message from this external process to an actor.
# Returns a TransmitTrack object that can be used to check for
# transmit errors.
txwatch = TransmitTrack(self.transport, self.adminAddr)
self.transport.scheduleTransmit(
None,
TransmitIntent(actorAddress, message,
onError=txwatch.transmit_failed))
return txwatch
def _tx_to_admin(self, message):
return self._tx_to_actor(self.adminAddr, message)
def newPrimaryActor(self, actorClass, targetActorRequirements, globalName,
sourceHash=None):
self._numPrimaries = self._numPrimaries + 1
actorClassName = '%s.%s'%(actorClass.__module__, actorClass.__name__) \
if hasattr(actorClass, '__name__') else actorClass
with closing(self.transport.external_transport_clone()) as tx_external:
response = NewActorResponse(tx_external, self.adminAddr)
tx_external.scheduleTransmit(
None,
TransmitIntent(self.adminAddr,
PendingActor(actorClassName,
None, self._numPrimaries,
targetActorRequirements,
globalName=globalName,
sourceHash=sourceHash),
onError=response.transmit_failed))
endwait = ExpirationTimer(MAX_CHILD_ACTOR_CREATE_DELAY)
# Do not use _run_transport: the tx_external transport
# context acquired above is unique to this thread and
# should not be synchronized/restricted by other threads.
tx_external.run(response, MAX_CHILD_ACTOR_CREATE_DELAY)
# Other items might abort the transport run... like transmit
# failures on a previous ask() that itself already timed out.
while response.pending and not endwait.view().expired():
tx_external.run(response, MAX_CHILD_ACTOR_CREATE_DELAY)
if response.failed:
if response.failure == PendingActorResponse.ERROR_Invalid_SourceHash:
raise InvalidActorSourceHash(sourceHash)
if response.failure == PendingActorResponse.ERROR_Invalid_ActorClass:
raise InvalidActorSpecification(actorClass,
response.failure_message)
if response.failure == PendingActorResponse.ERROR_Import:
info = response.failure_message
if info:
thesplog('Actor Create Failure, Import Error: %s', info)
raise ImportError(str(actorClass) + ': ' + info)
thesplog('Actor Create Failure, Import Error')
raise ImportError(actorClass)
if response.failure == PendingActorResponse.ERROR_No_Compatible_ActorSystem:
raise NoCompatibleSystemForActor(
actorClass, 'No compatible ActorSystem could be found')
raise ActorSystemFailure("Could not request new Actor from Admin (%s)"
% (response.failure))
if response.actor_address:
return response.actor_address
if response.actor_address is False:
raise NoCompatibleSystemForActor(
actorClass, 'No compatible ActorSystem could be found')
raise ActorSystemRequestTimeout(
'No response received to PendingActor request to Admin'
' at %s from %s'%(str(self.adminAddr),
str(self.transport.myAddress)))
def tell(self, anActor, msg):
attemptLimit = ExpirationTimer(MAX_TELL_PERIOD)
# transport may not use sockets, but this helps error handling
# in case it does.
import socket
for attempt in range(5000):
try:
txwatch = self._tx_to_actor(anActor, msg)
for attemptTime in unexpired(attemptLimit):
if not self._run_transport(attemptTime.remaining(),
txonly=True):
# all transmits completed
return
if txwatch.failed:
raise ActorSystemFailure(
'Error sending to %s: %s' % (str(anActor),
str(txwatch.failure)))
raise ActorSystemRequestTimeout(
'Unable to send to %s within %s' %
(str(anActor), str(MAX_TELL_PERIOD)))
except socket.error as ex:
import errno
if errno.EMFILE == ex.errno:
import time
time.sleep(0.1)
else:
raise
def listen(self, timeout):
while True:
response = self._run_transport(toTimeDeltaOrNone(timeout))
if not isinstance(response, ReceiveEnvelope):
break
# Do not send miscellaneous ActorSystemMessages to the caller
# that it might not recognize.
if not isInternalActorSystemMessage(response.message):
return response.message
return None
def ask(self, anActor, msg, timeout):
txwatch = self._tx_to_actor(anActor, msg) # KWQ: pass timeout on tx??
askLimit = ExpirationTimer(toTimeDeltaOrNone(timeout))
for remTime in unexpired(askLimit):
response = self._run_transport(remTime.remaining())
if txwatch.failed:
if txwatch.failure in [SendStatus.DeadTarget,
SendStatus.Failed,
SendStatus.NotSent]:
# Silent failure; not all transports can indicate
# this, so for conformity the Dead Letter handler is
# the intended method of handling this issue.
return None
raise ActorSystemFailure('Transmit of ask message to %s failed (%s)'%(
str(anActor),
str(txwatch.failure)))
if not isinstance(response, ReceiveEnvelope):
# Timed out or other failure, give up.
break
# Do not send miscellaneous ActorSystemMessages to the
# caller that it might not recognize. If one of those was
# recieved, loop to get another response.
if not isInternalActorSystemMessage(response.message):
return response.message
return None
class systemBase(ExternalOpsToActors):
"""This is the systemBase base class that various Thespian System Base
implementations should subclass. The System Base is
instantiated by each process that wishes to utilize an Actor
System and runs in the context of that process (as opposed to
the System Admin that may run in its own process).
This base is not present in the Actors themselves, only in the
external application that wish to talk to Actors.
Depending on the System Base implementation chosen by that
process, the instantiation may be private to that process or
shared by other processes; in the former case, there will be an
instance of this class in each process accessing the shared
Actor System, representing the Portal between the "external"
environment of that process and the shared Actor System
Implementation.
All ActorAddresses generated via newActor and newPrimaryActor
are local to this ActorSystemBase instance. Any and *all*
messages sent to other Actors must be able to be appropriately
serialized; this allows the pickling/unpickling process to
translate an ActorAddress from a local representation to a
global or remote representation.
"""
def __init__(self, system, logDefs = None):
ensure_TZ_set()
# Expects self.transport has already been set by subclass __init__
super(systemBase, self).__init__(
self.transport.getAdminAddr(system.capabilities))
tryingTime = ExpirationTimer(MAX_SYSTEM_SHUTDOWN_DELAY + timedelta(seconds=1))
while not tryingTime.view().expired():
if not self.transport.probeAdmin(self.adminAddr):
self._startAdmin(self.adminAddr,
self.transport.myAddress,
system.capabilities,
logDefs)
if self._verifyAdminRunning(): return
import time
time.sleep(0.5) # Previous version may have been exiting
if not self._verifyAdminRunning():
raise InvalidActorAddress(self.adminAddr,
'not a valid or useable ActorSystem Admin')
# KWQ: more details? couldn't start @ addr? response was ? instead of expected Thespian_SystemStatus?
def _verifyAdminRunning(self):
"""Returns boolean verification that the Admin is running and
available. Will query the admin for a positive response,
blocking until one is received.
"""
txwatch = self._tx_to_admin(QueryExists())
response = self._run_transport(MAX_ADMIN_STATUS_REQ_DELAY)
return not txwatch.failed and \
isinstance(response, ReceiveEnvelope) and \
isinstance(response.message, QueryAck) \
and not response.message.inShutdown
def __getstate__(self):
raise CannotPickle('ActorSystem cannot be Pickled.')
def shutdown(self):
thesplog('ActorSystem shutdown requested.', level=logging.INFO)
time_to_quit = ExpirationTimer(MAX_SYSTEM_SHUTDOWN_DELAY)
txwatch = self._tx_to_admin(SystemShutdown())
for remaining_time in unexpired(time_to_quit):
response = self._run_transport(remaining_time.remaining())
if txwatch.failed:
thesplog('Could not send shutdown request to Admin'
'; aborting but not necessarily stopped',
level=logging.WARNING)
return
if isinstance(response, ReceiveEnvelope):
if isinstance(response.message, SystemShutdownCompleted):
break
else:
thesplog('Expected shutdown completed message, got: %s', response.message,
level=logging.WARNING)
elif isinstance(response, (Thespian__Run_Expired,
Thespian__Run_Terminated,
Thespian__Run_Expired)):
break
else:
thesplog('No response to Admin shutdown request; Actor system not completely shutdown',
level=logging.ERROR)
self.transport.close()
thesplog('ActorSystem shutdown complete.')
def updateCapability(self, capabilityName, capabilityValue=None):
attemptLimit = ExpirationTimer(MAX_CAPABILITY_UPDATE_DELAY)
txwatch = self._tx_to_admin(CapabilityUpdate(capabilityName,
capabilityValue))
for remaining_time in unexpired(attemptLimit):
if not self._run_transport(remaining_time.remaining(), txonly=True):
return # all transmits completed
if txwatch.failed:
raise ActorSystemFailure(
'Error sending capability updates to Admin: %s' %
str(txwatch.failure))
raise ActorSystemRequestTimeout(
'Unable to confirm capability update in %s' %
str(MAX_CAPABILITY_UPDATE_DELAY))
def loadActorSource(self, fname):
loadLimit = ExpirationTimer(MAX_LOAD_SOURCE_DELAY)
f = fname if hasattr(fname, 'read') else open(fname, 'rb')
try:
d = f.read()
import hashlib
hval = hashlib.md5(d).hexdigest()
txwatch = self._tx_to_admin(
ValidateSource(hval, d, getattr(f, 'name',
str(fname)
if hasattr(fname, 'read')
else fname)))
for load_time in unexpired(loadLimit):
if not self._run_transport(load_time.remaining(), txonly=True):
# All transmits completed
return hval
if txwatch.failed:
raise ActorSystemFailure(
'Error sending source load to Admin: %s' %
str(txwatch.failure))
raise ActorSystemRequestTimeout('Load source timeout: ' +
str(loadLimit))
finally:
f.close()
def unloadActorSource(self, sourceHash):
loadLimit = ExpirationTimer(MAX_LOAD_SOURCE_DELAY)
txwatch = self._tx_to_admin(ValidateSource(sourceHash, None))
for load_time in unexpired(loadLimit):
if not self._run_transport(load_time.remaining(), txonly=True):
return # all transmits completed
if txwatch.failed:
raise ActorSystemFailure(
'Error sending source unload to Admin: %s' %
str(txwatch.failure))
raise ActorSystemRequestTimeout('Unload source timeout: ' +
str(loadLimit))
def external_clone(self):
"""Get a separate local endpoint that does not commingle traffic with
the the main ActorSystem or other contexts. Makes internal
blocking calls, so primarily appropriate for a
multi-threaded client environment.
"""
return BaseContext(self.adminAddr, self.transport)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Actors that involve themselves in topology
def preRegisterRemoteSystem(self, remoteAddress, remoteCapabilities):
self.send(self.adminAddr,
ConventionRegister(
self.transport.getAddressFromString(remoteAddress),
remoteCapabilities,
preRegister=True))
def deRegisterRemoteSystem(self, remoteAddress):
self.send(
self.adminAddr,
ConventionDeRegister(
remoteAddress
if isinstance(remoteAddress, ActorAddress) else
self.transport.getAddressFromString(remoteAddress)))
class BaseContext(ExternalOpsToActors):
def __init__(self, adminAddr, transport):
super(BaseContext, self).__init__(adminAddr,
transport.external_transport_clone())
def exit_context(self):
self.transport.close()
|
analysis_engine.py | pbiernat/ripr | 338 | 11098381 | '''
This code should encapsulate product specific API's so that they
can be accessed in other components cleanly.
'''
# Try to import stuff.
try:
from binaryninja import *
except:
print ("[!!] Not running in Binary Ninja")
try:
import r2pipe
except:
print ("[!!] Not running in Radare2")
import json
import re
import sys
from .codegen import *
from binascii import unhexlify
def get_engine(*args):
'''
Return an instance of the correct analysis engine class.
'''
if ("r2pipe" in sys.argv[0]):
return radare2_engine(r2pipe.open())
if ("binaryninja" in sys.modules.keys()):
return bn_engine(args[0])
raise (ValueError, "No analysis engine found!")
class aengine(object):
def __init__(self):
pass
def find_section(self, addr):
'''
Function should find what segment/section $addr is in and return a tuple
(StartAddress, Endaddress, Segment Name)
Error: Return -1
'''
pass
def get_arch(self):
'''
Function should return a string of the architecture of the currently loaded binary.
Architecture should be one of:
'x86'
'x64'
'arm'
'arm64'
'mips'
'''
pass
def get_function_bytes(self, address=None, name=None):
'''
Function should return a dictionary of address:string pairs where address
is a starting address and string is a string of bytes of the function at that location.
This allows for handling functions that are non-contiguous in memory.
'''
pass
def get_page_bytes(self, address):
'''
Funtion should return a string of bytes from the page where address
is located.
'''
pass
def get_nop_opcode(self):
'''
Function should return a string corresponding to a NOP on the specified
architecture.
'''
pass
def get_region_bytes(self, start, end):
'''
Function should return a tuple (address, string) where address
is the starting address of the region and string contains the bytes
between start and end, inclusive.
'''
return (start, self.read_bytes(start, end - start))
def read_bytes(self, address, len):
'''
Function should return a string containing
$len bytes at $address.
'''
pass
def get_imports(self):
raise NotImplementedError
def get_instruction_length(self, address):
raise NotImplementedError
def get_data_symbols(self):
raise NotImplementedError
def get_strings(self):
raise NotImplementedError
def get_refs_to(self, address):
raise NotImplementedError
def function_contains_addr(self, func_addr, testAddr):
raise NotImplementedError
def get_page_size(self):
raise NotImplementedError
def generate_invalid_access(self, address, arch, size=None):
raise NotImplementedError
def branches_from_func(self, address, callCallback, branchCallback):
raise NotImplementedError
def scan_potential_pointers(self, func_addr):
raise NotImplementedError
def is_plausible_pointer(self, candidate_ptr):
raise NotImplementedError
def highlight_instr(self, func_addr, instrAddr, color):
pass
def add_comment(self, func_addr, instrAddr, comment):
pass
def display_info(self, info1, info2):
pass
class bn_engine(aengine):
'''
This class should encapsulate all binary-ninja api calls cleanly.
Comments in this class relate to why a certain function is implemented the way it is,
mostly relating to Binary Ninja specifics. Descriptions of what a function should do
are in the aengine class.
'''
def __init__(self, view):
# We will need a BinaryViewType.
self.bv = view
aengine.__init__(self)
def read_bytes(self, address, len):
return self.bv.read(address, len)
def get_arch(self):
'''
ripr uses its own architecture names that map onto Unicorn architectures.
These can be different from Binary Ninja names, so they are explicitly mapped
into the ripr names, even if they are the same in some cases.
'''
print (self.bv.arch.name)
if (self.bv.arch.name == 'x86'):
return 'x86'
elif (self.bv.arch.name == 'x86_64'):
return 'x64'
elif (self.bv.arch.name == 'armv7'):
return 'arm'
elif (self.bv.arch.name == 'aarch64'):
return 'arm64'
def mark_gathered_basic_block(self, address):
fobj = self.bv.get_functions_containing(address)[0]
if (fobj == None):
print ("FOBJ IS NONE")
bb = fobj.get_basic_block_at(address)
bb.highlight = HighlightStandardColor.BlackHighlightColor
fobj.set_comment_at(bb.start, "[ripr] Basic Block will be included in package")
def clean_gathered_basic_block(self, address):
fobj = self.bv.get_functions_containing(address)[0]
bb = fobj.get_basic_block_at(address)
bb.highlight = HighlightStandardColor.NoHighlightColor
fobj.set_comment_at(bb.start, '')
def get_basic_block_bytes(self, address):
bb = self.bv.get_basic_blocks_at(address)
if len(bb) != 1:
print ("[ripr] Address belongs to more than one basic block!")
bb = bb[0]
return {bb.start: codeSlice(self.read_bytes(bb.start, bb.end-bb.start), bb.start)}
def get_function_bytes(self, address=None, name=None):
'''
Binary Ninja does not seem to assume Functions are contiguous; rather they
are treated as a collection of basic blocks.
'''
print ("[ripr] Inside get_function_bytes()")
if (address != None):
fobj = self.bv.get_function_at(address)
elif (name != None):
print ("[ripr] TODO")
return
else:
print ("[ripr] No arguments supplied to get_function_bytes")
return None
if self.bv.get_function_at(address)==None:
print ("[ripr] Couldn't get function binary view. Maybe code arch is thumb2?")
return None
# Sort the basic blocks in ascending order
bblist = sorted(fobj.basic_blocks, key=lambda x: x.start)
map(lambda bb: bb.set_user_highlight(HighlightStandardColor.BlackHighlightColor), bblist)
# Create units of contiguous blocks
clist = [[bblist[0]]]
for bb in bblist[1:]:
if (bb.start == clist[-1][-1].end):
clist[-1].append(bb)
else:
clist.append([bb])
# Print out the list if the function is not contiguous
if (len(clist) > 1):
print (clist)
# Create a return list in the expected format from the contiguous units.
retdir = {unit[0].start : codeSlice(self.read_bytes(unit[0].start, unit[-1].start - unit[0].start + unit[-1].length), unit[0].start) for unit in clist}
return retdir
def get_page_bytes(self, address):
# Should get this dynamically if possible based on arch/mode/etc
pagesize = self.get_page_size()
pageaddr = (address & ~(pagesize - 1))
return self.read_bytes(pageaddr, pagesize)
def get_page_size(self):
return 4096
def get_region_bytes(self, start, end):
return (start, self.read_bytes(start, end-start))
def get_nop_opcode(self):
return self.bv.arch.assemble('nop')[0]
def generate_invalid_access(self, address, arch, size=None):
'''
Generates an invalid memory access for use in function hooking.
pad to size if applicable
'''
if arch in ['x86', 'x64']:
if (size):
opcodes = self.bv.arch.assemble('mov al, [%s]' % address)[0]
nop = self.get_nop_opcode()
if len(opcodes) >= size:
return opcodes
return opcodes + nop * (size - len(opcodes))
else:
return self.bv.arch.assemble('mov al, [%s]' % address)[0]
def get_imports(self):
return {imp.address : imp.name for imp in self.bv.get_symbols_of_type(SymbolType.ImportedFunctionSymbol)}
def get_instruction_length(self, address):
return self.bv.get_instruction_length(address)
def find_llil_block_from_addr(self, address):
fobj = self.bv.get_functions_containing(address)
if len(fobj) > 1:
print ("[ripr] Multiple Functions contain this address!!")
return None
fobj = fobj[0]
bbindex = fobj.get_basic_block_at(address).index
return fobj.low_level_il.basic_blocks[bbindex]
def find_mlil_block_from_addr(self, address):
fobj = self.bv.get_functions_containing(address)
if len(fobj) > 1:
print ("[ripr] Multiple Functions contain this address!!")
return None
fobj = fobj[0]
bbindex = fobj.get_basic_block_at(address).index
try:
ret = fobj.medium_level_il.basic_blocks[bbindex]
return ret
except:
return None
def branches_from_block(self, block, callCallback, branchCallback):
for il_inst in block:
if (il_inst.operation == LowLevelILOperation.LLIL_CALL):
callCallback(il_inst.dest.value, il_inst.address)
# Check Jump targets
elif (il_inst.operation in [LowLevelILOperation.LLIL_JUMP,\
LowLevelILOperation.LLIL_JUMP_TO,\
LowLevelILOperation.LLIL_GOTO]):
branchCallback(il_inst.dest, il_inst.address)
else:
pass
def branches_from_func(self, address, callCallback, branchCallback):
fobj = self.bv.get_function_at(address)
if (fobj==None):
return
for block in fobj.low_level_il:
self.branches_from_block(block, callCallback, branchCallback)
def get_data_symbols(self):
for sym in self.bv.get_symbols_of_type(SymbolType.DataSymbol):
yield sym.address
def get_strings(self):
for st in self.bv.strings:
yield (st.start, st.length)
def get_refs_to(self, address):
fobj = self.bv.get_function_at(address)
for ref in self.bv.get_code_refs(address):
yield ref.address
def function_contains_addr(self, func_addr, testAddr):
fobj = self.bv.get_function_at(func_addr)
if (fobj==None):
return False
return (fobj.get_basic_block_at(testAddr) != None)
def scan_potential_pointers_bb(self, il_block, fobj):
for il_inst in il_block:
# We are only interested in data references here.
if il_inst.operation in [LowLevelILOperation.LLIL_CALL, \
LowLevelILOperation.LLIL_JUMP, \
LowLevelILOperation.LLIL_GOTO, \
LowLevelILOperation.LLIL_IF, \
LowLevelILOperation.LLIL_JUMP_TO]:
continue
constants = fobj.get_constants_referenced_by(il_inst.address)
# Check if constant is a likely pointer
for const in constants:
yield const.value, il_inst.address
# Memory things
if il_inst.operation in [LowLevelILOperation.LLIL_LOAD,\
LowLevelILOperation.LLIL_STORE,\
LowLevelILOperation.LLIL_CONST,\
LowLevelILOperation.LLIL_UNIMPL_MEM,\
LowLevelILOperation.LLIL_SET_REG]:
# TODO
if (il_inst.operation == LowLevelILOperation.LLIL_STORE):
#yield il_inst.address
try:
yield self.bv.is_valid_offset(il_inst.operands[0].value), il_inst.address
except:
pass
def scan_potential_pointers(self, func_addr):
# Iterate over all instructions in each basic block
fobj = self.bv.get_function_at(func_addr)
for block in fobj.low_level_il:
for target, instAddr in self.scan_potential_pointers_bb(block, fobj):
yield target, instAddr
def is_plausible_pointer(self, candidate_ptr):
return self.bv.is_valid_offset(candidate_ptr)
def find_section(self, addr):
'''
Function should find what segment/section $addr is in and return a tuple
(StartAddress, Endaddress, Segment Name)
Error: Return -1
'''
res = []
for sec in self.bv.get_sections_at(addr):
return ((sec.start, sec.end, sec.name))
return -1
def highlight_instr(self, func_addr, instrAddr, color):
fobj = self.bv.get_functions_containing(func_addr)[0]
if color == "red":
bn_color = HighlightStandardColor.RedHighlightColor
elif color == "blue":
bn_color = HighlightStandardColor.BlueHighlightColor
elif color == "yellow":
bn_color = HighlightStandardColor.YellowHighlightColor
elif color == "orange":
bn_color = HighlightStandardColor.OrangeHighlightColor
else:
raise (ValueError, "Unsupported color")
fobj.set_user_instr_highlight(instrAddr, bn_color)
def add_comment(self, func_addr, instrAddr, comment):
fobj = self.bv.get_functions_containing(func_addr)[0]
fobj.set_comment(instrAddr, "[ripr] " + comment)
def display_info(self, info1, info2):
self.bv.show_plain_text_report(info1, info2)
class radare2_engine(aengine):
def get_data_symbols(self):
for symbol in self.r2.cmdj("isj"):
if symbol['type'] == "OBJECT":
yield symbol['vaddr']
def get_strings(self):
for symbol in self.r2.cmdj("izj"):
yield symbol['vaddr'], symbol['size']
def get_refs_to(self, address):
res = self.r2.cmd("axtj {}".format(hex(address)))
if res is None or len(res) == 0:
return
res = json.loads(res)
for ref in res:
yield ref['from']
def function_contains_addr(self, func_addr, testAddr):
func = self.r2.cmdj("afij @{}".format(hex(func_addr)))
func = func[0]
return testAddr >= func['offset'] and testAddr < func['offset']+func['size']
def __init__(self, r2):
self.r2 = r2
aengine.__init__(self)
def read_bytes(self, address, size):
bytes = []
hexdump = self.r2.cmd("pc {} @ {}".format(size,hex(address)))
for line in hexdump.split("\n"):
if "0x" in line:
for byte in line.split(","):
byte = byte.strip()
if len(byte) == 0:
continue
byte = int(byte, 16)
bytes.append(chr(byte))
assert len(bytes) == size
return ''.join(bytes)
def get_arch(self):
info = self.r2.cmdj("ifj")
arch = info['bin']['arch']
bits = info['bin']['bits']
if arch == "x86" and bits == 32:
return 'x86'
elif arch == "x86" and bits == 64:
return 'x64'
else:
raise (NotImplementedError, "Only tested with x86 & x86_64")
'''
elif arch == "arm" and bits == 32:
return 'arm'
elif arch == "arm" and bits == 64:
return 'arm64'
'''
def get_function_bytes(self, address=None, name=None):
if (address != None):
funcInfo = self.r2.cmd("afij {}".format(hex(address)))
elif (name != None):
print ("[ripr] TODO")
return
else:
print ("[ripr] No arguments supplied to get_function_bytes")
return None
if funcInfo.strip() == "":
raise (ValueError, "Function not found at {}".format(address))
funcInfo = json.loads(funcInfo, strict=False)
if len(funcInfo) == 0:
raise (ValueError, "Function not found at {}".format(address))
print (funcInfo)
offset = funcInfo[0]["offset"]
size = funcInfo[0]["size"]
bytes = self.read_bytes(offset, size)
retdir = {offset: codeSlice(bytes, offset)}
return retdir
def get_page_bytes(self, address):
# Should get this dynamically if possible based on arch/mode/etc
pagesize = self.get_page_size()
pageaddr = (address & ~(pagesize - 1))
return self.read_bytes(pageaddr, pagesize)
def get_page_size(self):
return 4096
def get_region_bytes(self, start, end):
return (start, self.read_bytes(start, end-start))
def get_nop_opcode(self):
return self.r2.cmd("pa nop").decode('hex')
def generate_invalid_access(self, address, arch, size=None):
'''
Generates an invalid memory access for use in function hooking.
pad to size if applicable
'''
# TODO: Radare2 seems to assemble this to a rip-relative access?
if arch in ['x86', 'x64']:
if (size):
opcodes = self.r2.cmd('pa mov al, [%s]' % address).decode('hex')
nop = self.get_nop_opcode()
if len(opcodes) >= size:
return opcodes
return opcodes + nop * (size - len(opcodes))
else:
return self.r2.cmd('pa mov al, [%s]' % address).decode('hex')
def get_imports(self):
# Iterate through symbols and grab everything that starts with 'sym.'
res = {}
for sym in self.r2.cmdj("isj"):
if sym['name'].startswith("imp."):
res[sym['vaddr']] = sym['name'][4:]
return res
def branches_from_func(self, address, callCallback, branchCallback):
func = self.r2.cmdj("pdfj @ {}".format(hex(address)))
instructions = func['ops']
for instr in instructions:
if instr['type'] == 'call':
callCallback(instr['jump'], instr['offset'])
elif instr['type'] == 'cjmp' or instr['type'] == 'jmp':
branchCallback(instr['jump'], instr['offset'])
#TODO: Any other?
def scan_potential_pointers(self, func_addr):
# Leverage Radare2 automatic pointer detection
func = self.r2.cmdj("pdfj @ {}".format(hex(func_addr)))
res = []
for line in func['ops']:
if 'ptr' in line:
yield line['ptr'], line['offset']
def is_plausible_pointer(self, candidate_ptr):
# A manual scan of all sections
for section in self.r2.cmdj("Sj"):
if candidate_ptr >= section['vaddr'] and \
candidate_ptr < section['vaddr'] + section['vsize']:
return True
return False
def find_section(self, addr):
'''
Function should find what segment/section $addr is in and return a tuple
(StartAddress, Endaddress, Segment Name)
Error: Return -1
'''
# A manual scan of all sections
res = []
for section in self.r2.cmdj("Sj"):
if addr >= section['vaddr'] and \
addr < section['vaddr'] + section['vsize']:
return (
section['vaddr'],
section['vaddr'] + section['vsize'],
section['name'])
return -1
def get_instruction_length(self, address):
return self.r2.cmdj("pdj 1 @{}".format(hex(address)))[0]['size']
def highlight_instr(self, func_addr, instrAddr, color):
# No highlighting yet
pass
def add_comment(self, func_addr, instrAddr, comment):
if not re.compile("^[a-z0-9 !\\-\\_]+$", re.IGNORECASE).match(comment):
# Don't send arbitrary contents to radare pipe
print ("Ignoring malformed comment: {}".format(comment))
else:
self.r2.cmd("CC [ripr] {} @{}".format(comment, hex(instrAddr)))
|
recipes/Python/578557_Pythlist_files_their_path_folders/recipe-578557.py | tdiprima/code | 2,023 | 11098383 | # Required module
import os
# Function for getting files from a folder
def fetchFiles(pathToFolder, flag, keyWord):
''' fetchFiles() requires three arguments: pathToFolder, flag and keyWord
flag must be 'STARTS_WITH' or 'ENDS_WITH'
keyWord is a string to search the file's name
Be careful, the keyWord is case sensitive and must be exact
Example: fetchFiles('/Documents/Photos/','ENDS_WITH','.jpg')
returns: _pathToFiles and _fileNames '''
_pathToFiles = []
_fileNames = []
for dirPath, dirNames, fileNames in os.walk(pathToFolder):
if flag == 'ENDS_WITH':
selectedPath = [os.path.join(dirPath,item) for item in fileNames if item.endswith(keyWord)]
_pathToFiles.extend(selectedPath)
selectedFile = [item for item in fileNames if item.endswith(keyWord)]
_fileNames.extend(selectedFile)
elif flag == 'STARTS_WITH':
selectedPath = [os.path.join(dirPath,item) for item in fileNames if item.startswith(keyWord)]
_pathToFiles.extend(selectedPath)
selectedFile = [item for item in fileNames if item.startswith(keyWord)]
_fileNames.extend(selectedFile)
else:
print fetchFiles.__doc__
break
# Try to remove empty entries if none of the required files are in directory
try:
_pathToFiles.remove('')
_imageFiles.remove('')
except ValueError:
pass
# Warn if nothing was found in the given path
if selectedFile == []:
print 'No files with given parameters were found in:\n', dirPath, '\n'
print len(_fileNames), 'files were found is searched folder(s)'
return _pathToFiles, _fileNames
|
evaluate/previous_works/Utils/SpherePad.py | Syniez/Joint_360depth | 167 | 11098389 | import os
import sys
import matplotlib.pyplot as plt
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from .Equirec2Cube import Equirec2Cube
class SpherePad(nn.Module):
def __init__(self, pad_size):
super(SpherePad, self).__init__()
self.pad_size = pad_size
self.data = {}
# pad order: up, down, left, right sides
# use yes/no flag to choose flip/transpose or not
# notation: #face-#side_#flip-hor_#flip_ver_#transpose
# transpose is applied first
self.relation = {
'back': ['top-up_yes_yes_no', 'down-down_yes_yes_no', 'right-right_no_no_no', 'left-left_no_no_no'],
'down': ['front-down_no_no_no', 'back-down_yes_yes_no', 'left-down_yes_no_yes', 'right-down_no_yes_yes'],
'front': ['top-down_no_no_no', 'down-up_no_no_no', 'left-right_no_no_no', 'right-left_no_no_no'],
'left': ['top-left_yes_no_yes', 'down-left_no_yes_yes', 'back-right_no_no_no', 'front-left_no_no_no'],
'right': ['top-right_no_yes_yes', 'down-right_yes_no_yes', 'front-right_no_no_no', 'back-left_no_no_no'],
'top': ['back-up_yes_yes_no', 'front-up_no_no_no', 'left-up_no_yes_yes', 'right-up_yes_no_yes']
}
def _GetLoc(self, R_lst, grid_lst, K):
out = {}
pad = self.pad_size
f, cx, cy = K['f'], K['cx'], K['cy']
K_mat = torch.FloatTensor(
np.array([[f, 0, cx], [0, f, cy], [0, 0, 1]]))
grid_front = grid_lst[2] # 1 x h x h x 3
orders = ['back', 'down', 'front', 'left', 'right', 'top']
for i, face in enumerate(orders):
out[face] = {}
for j, connect_side in enumerate(['up', 'down', 'left', 'right']):
connected_face = self.relation[face][j].split('-')[0]
idx = orders.index(connected_face)
R_world_to_connected = R_lst[idx] # 3 x 3
R_world_to_itself = R_lst[i] # 3 x 3
R_itself_to_connected = torch.matmul(
R_world_to_connected, R_world_to_itself.transpose(0, 1))
new_grid = torch.matmul(
grid_front, R_itself_to_connected.transpose(0, 1))
proj = torch.matmul(new_grid, K_mat.transpose(0, 1))
x = proj[:, :, :, 0:1] / proj[:, :, :, 2:3]
y = proj[:, :, :, 1:2] / proj[:, :, :, 2:3]
x = (x - cx) / cx
y = (y - cy) / cy
xy = torch.cat([x, y], dim=3) # 1 x h x w x 2
out[face][connect_side] = {}
x = xy[:, :, :, 0:1]
y = xy[:, :, :, 1:2]
'''
mask1 = np.logical_and(x >= -1.01, x <= 1.01)
mask2 = np.logical_and(y >= -1.01, y <= 1.01)
mask = np.logical_and(mask1, mask2)
'''
mask1 = (x >= -1.01) & (x <= 1.01)
mask2 = (y >= -1.01) & (y <= 1.01)
mask = mask1 & mask2
xy = torch.clamp(xy, -1, 1)
if connect_side == 'up':
out[face][connect_side]['mask'] = mask[:, :pad, :, :]
out[face][connect_side]['xy'] = xy[:, :pad, :, :]
elif connect_side == 'down':
out[face][connect_side]['mask'] = mask[:, -pad:, :, :]
out[face][connect_side]['xy'] = xy[:, -pad:, :, :]
elif connect_side == 'left':
out[face][connect_side]['mask'] = mask[:, :, :pad, :]
out[face][connect_side]['xy'] = xy[:, :, :pad, :]
elif connect_side == 'right':
out[face][connect_side]['mask'] = mask[:, :, -pad:, :]
out[face][connect_side]['xy'] = xy[:, :, -pad:, :]
return out
def forward(self, inputs):
[bs, c, h, w] = inputs.shape
assert bs % 6 == 0 and h == w
key = '(%d,%d,%d)' % (h, w, self.pad_size)
if key not in self.data:
theta = 2 * np.arctan((0.5 * h + self.pad_size) / (0.5 * h))
e2c_ori = Equirec2Cube(1, 2*h, 4*h, h, 90)
e2c = Equirec2Cube(
1, 2*h, 4*h, h+2*self.pad_size, theta/np.pi * 180)
R_lst = [x.transpose(0, 1) for x in e2c.R_lst]
grid_lst = e2c.grid_lst
K = e2c_ori.intrisic
self.data[key] = self._GetLoc(R_lst, grid_lst, K)
pad = self.pad_size
orders = ['back', 'down', 'front', 'left', 'right', 'top']
out = []
for i, face in enumerate(orders):
this_face = inputs[i::6]
this_face = F.pad(this_face, (pad, pad, pad, pad))
repeats = this_face.shape[0]
for j, connect_side in enumerate(['up', 'down', 'left', 'right']):
connected_face_name = self.relation[face][j].split('-')[0]
connected_face = inputs[orders.index(connected_face_name)::6]
mask = self.data[key][face][connect_side]['mask'].cuda().repeat(repeats, 1, 1, c).permute(0, 3, 1, 2)
xy = self.data[key][face][connect_side]['xy'].cuda().repeat(repeats, 1, 1, 1)
interpo = F.grid_sample(connected_face, xy, mode='bilinear')
if connect_side == 'up':
this_face[:, :, :pad, :][mask] = interpo[mask]
elif connect_side == 'down':
this_face[:, :, -pad:, :][mask] = interpo[mask]
elif connect_side == 'left':
this_face[:, :, :, :pad][mask] = interpo[mask]
elif connect_side == 'right':
this_face[:, :, :, -pad:][mask] = interpo[mask]
out.append(this_face)
out = torch.cat(out, dim=0)
[bs, c, h, w] = out.shape
out = out.view(-1, bs//6, c, h, w).transpose(0,
1).contiguous().view(bs, c, h, w)
return out
|
tests/unit/test_metadata.py | ajw0100/professional-services-data-validator | 167 | 11098439 | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
@pytest.fixture
def module_under_test():
from data_validation import metadata
return metadata
@pytest.mark.parametrize(
("source_column", "target_column", "result_type", "expected"),
(
("source_col", "target_col", "source", "source_col"),
("source_col", "target_col", "target", "target_col"),
(None, None, "source", None),
(None, None, "target", None),
),
)
def test_get_column_name(
module_under_test, source_column, target_column, result_type, expected
):
validation = module_under_test.ValidationMetadata(
"", "", "", "", "", "", source_column, target_column, ""
)
column_name = validation.get_column_name(result_type)
assert column_name == expected
def test_get_column_name_with_unexpected_result_type(module_under_test):
validation = module_under_test.ValidationMetadata(
"", "", "", "", "", "", "", "", "",
)
with pytest.raises(ValueError, match="Unexpected result_type"):
validation.get_column_name("oops_i_goofed")
@pytest.mark.parametrize(
(
"source_table_schema",
"source_table",
"target_table_schema",
"target_table",
"result_type",
"expected",
),
(
(
"source_table_schema",
"source_tbl",
"target_table_schema",
"target_tbl",
"source",
"source_table_schema.source_tbl",
),
(
"source_table_schema",
"source_tbl",
"target_table_schema",
"target_tbl",
"target",
"target_table_schema.target_tbl",
),
),
)
def test_get_table_name(
module_under_test,
source_table_schema,
source_table,
target_table_schema,
target_table,
result_type,
expected,
):
validation = module_under_test.ValidationMetadata(
"",
"",
source_table_schema,
source_table,
target_table_schema,
target_table,
None,
None,
"",
)
table_name = validation.get_table_name(result_type)
assert table_name == expected
def test_get_table_name_with_unexpected_result_type(module_under_test):
validation = module_under_test.ValidationMetadata(
"", "", "", "", "", "", "", "", ""
)
with pytest.raises(ValueError, match="Unexpected result_type"):
validation.get_table_name("oops_i_goofed")
|
gen_partial_dataset.py | LT1st/ReID_Alined_beginer | 370 | 11098481 | import cv2
import os
import numpy as np
data_root = "/home/csc302/workspace/luohao/code/AlignedReID/data/market1501/query"
gen_root = "/home/csc302/workspace/luohao/code/AlignedReID/data/market1501_partial/query"
def random_crop(img, sample_rate=0.6):
h,w = img.shape[:2]
sh = np.random.randint(sample_rate*h, h*0.9,1)[0]
bh = np.random.randint(0, h-sh, 1)[0]
img = img[bh:sh+bh,:,:]
img = cv2.resize(img, (w,h))
return img
for image_name in os.listdir(data_root):
if image_name[-3:] != 'jpg':
continue
img_path = os.path.join(data_root, image_name)
img = cv2.imread(img_path)
img = random_crop(img)
save_path = os.path.join(gen_root, image_name)
cv2.imwrite(save_path, img) |
freight/models/user.py | armandomeeuwenoord/freight | 562 | 11098518 | <filename>freight/models/user.py
from datetime import datetime
from sqlalchemy import Column, DateTime, Integer, String
from freight.config import db
class User(db.Model):
__tablename__ = "user"
id = Column(Integer, primary_key=True)
name = Column(String(200), nullable=False, unique=True)
date_created = Column(DateTime, default=datetime.utcnow, nullable=False)
|
get_search_data.py | Solitarliy/GenshinMapAutoMarkTools | 167 | 11098535 | <gh_stars>100-1000
import os
import re
from fuzzywuzzy import process
from xpinyin import Pinyin
base_dir = os.path.dirname(os.path.abspath(__file__))
name_list=[]
def listname():
global name_list
if len(name_list):
return name_list;
file_name = os.listdir(r'{0}\data'.format(base_dir))
for nm in file_name:
name_list.append(re.sub(r'.txt','',nm))
return name_list
def inputandfind():
lst=listname()
pin = Pinyin()
pinlist=[]
for i in lst:
pinlist.append([re.sub('-','',pin.get_pinyin(i)),i])
name=input('请输入名称:(输入lst获取上次参数)\n')
if name=='lst':
return (2,None)
rs_0=process.extract(name, lst, limit=10)
rs_1=process.extract(name, pinlist, limit=10)
rs=[]
cnt=0
for rss in rs_0:
if rss[1]>80:
rs.append((cnt,rss[0]))
cnt+=1
flag=0
for rss in rs_1:
flag=1
if rss[1]>80:
flag=0
for i in rs:
if rss[0][1]==i[1]:
flag=1
break
if flag==0:
rs.append((cnt,rss[0][1]))
cnt+=1
print(rs)
if len(rs)>0:
nid=int(input('请输入序号:\n'))
if nid>=0 and nid <len(rs):
return (1,rs[int(nid)][1])
else:
print('序号不在范围,请重新输入')
return (0,None)
else:
print('无匹配结果')
return (0,None)
def ThirdKeySort(e):
return e[2]
def SearchName(name):
lst=listname()
pin = Pinyin()
pinlist=[]
for i in lst:
pinlist.append([re.sub('-','',pin.get_pinyin(i)),i])
rs_0=process.extract(name, lst, limit=10)
rs_1=process.extract(name, pinlist, limit=10)
rs=[]
for rss in rs_0:
rs.append([rss[1],rss[0]])
flag=0
for rss in rs_1:
flag=0
for i in rs:
if rss[0][1]==i[1]:
flag=1
break
if flag==0:
rs.append([rss[1],rss[0][1]])
rs.sort(reverse=True)
rslist=[]
cnt=0
for i in rs:
if i[1]=='deny1' or i[1]=='deny0':continue
rslist.append(i[1])
cnt+=1
if cnt>=7:break
return rslist
if __name__ == '__main__':
print(SearchName('deny')) |
code/Evaluation/evaluation.py | INK-USC/DS-RelationExtraction | 156 | 11098548 | <filename>code/Evaluation/evaluation.py
__author__ = 'xiang'
import sys
from collections import defaultdict
def find_none_index(file_name):
with open(file_name) as f:
for line in f:
entry = line.strip('\r\n').split('\t')
if entry[0] == 'None':
return int(entry[1])
print 'No None label!!!'
return
def load_labels(file_name):
### To Do: "None" RMs should NOT in ground_truth (double check whether we will have that)
labels = defaultdict(set)
with open(file_name) as f:
for line in f:
seg = line.strip('\r\n').split('\t')
try:
labels[int(seg[0])].add(int(seg[1]))
except:
labels[int(seg[0])].add(int(float(seg[1])))
f.close()
return labels
def load_raw_labels(file_name, ground_truth):
labels = defaultdict(set)
with open(file_name) as f:
for line in f:
seg = line.strip('\r\n').split('\t')
if int(seg[0]) in ground_truth:
labels[int(seg[0])].add(int(seg[1]))
f.close()
return labels
def load_label_score(file_name):
labels = defaultdict(tuple)
with open(file_name) as f:
for line in f:
seg = line.strip('\r\n').split('\t')
try:
if seg[2] == '-Infinity':
labels[int(seg[0])] = (int(float(seg[1])), 0.0)
else:
labels[int(seg[0])] = (int(seg[1]), float(seg[2]))
except:
if seg[2] == '-Infinity':
labels[int(seg[0])] = (int(float(seg[1])), 0.0)
else:
labels[int(seg[0])] = (int(float(seg[1])), float(seg[2]))
f.close()
return labels
###
def evaluate_rm(prediction, ground_truth):
"""
Evaluation matrix.
:param prediction: a dictionary of labels. e.g {0:[1,0],1:[2],2:[3,4],3:[5,6,7]}
:param ground_truth: a dictionary of labels
:return:
"""
pos_pred = 0.0
pos_gt = len(ground_truth) + 0.0
true_pos = 0.0
for i in prediction:
# classified as pos example (Is-A-Relation)
pos_pred += 1.0
if i in ground_truth and prediction[i] == ground_truth[i]:
true_pos += 1.0
precision = true_pos / (pos_pred + 1e-8)
recall = true_pos / (pos_gt + 1e-8)
f1 = 2 * precision * recall / (precision + recall + 1e-8)
# print "predicted # Pos RMs:%d, ground-truth #Pos RMs:%d"%(int(pos_pred), int(pos_gt))
return precision,recall,f1
###
def evaluate_rm_neg(prediction, ground_truth, none_label_index):
"""
Evaluation matrix.
:param prediction: a dictionary of labels. e.g {0:[1,0],1:[2],2:[3,4],3:[5,6,7]}
:param ground_truth: a dictionary of labels
:return:
"""
# print '[None] label index:', none_label_index
pos_pred = 0.0
pos_gt = 0.0
true_pos = 0.0
for i in ground_truth:
if ground_truth[i] != set([none_label_index]):
pos_gt += 1.0
for i in prediction:
if prediction[i] != set([none_label_index]):
# classified as pos example (Is-A-Relation)
pos_pred += 1
if prediction[i] == ground_truth[i]:
true_pos += 1.0
precision = true_pos / (pos_pred + 1e-8)
recall = true_pos / (pos_gt + 1e-8)
f1 = 2 * precision * recall / (precision + recall + 1e-8)
# print "predicted # Pos RMs:%d, ground-truth #Pos RMs:%d"%(int(pos_pred), int(pos_gt))
return precision,recall,f1
if __name__ == "__main__":
if len(sys.argv) != 6:
print 'Usage: evaluation.py -TASK (classify/extract) -DATA(nyt_candidates) -MODE(classifier/emb) -METHOD(retypeRM) -SIM(cosine/dot)'
exit(-1)
# do prediction here
_task = sys.argv[1] # classifer / extract
_data = sys.argv[2]
_mode = sys.argv[3] # emb or classifier/method name
_method = sys.argv[4] # emb method or null
_sim_func = sys.argv[5] # similarity functin or null
indir = 'data/intermediate/' + _data + '/rm'
outdir = 'data/results/' + _data + '/rm'
output = outdir +'/prediction_' + _mode + '_' + _method + '_' + _sim_func + '.txt'
ground_truth = load_labels(indir + '/mention_type_test.txt')
predictions = load_labels(output)
if _task == 'extract':
none_label_index = find_none_index(indir + '/type.txt')
prec, rec, f1 = evaluate_rm_neg(predictions, ground_truth, none_label_index)
print 'precision:', prec
print 'recall:', rec
print 'f1:', f1
elif _task == 'classify':
prec, rec, f1 = evaluate_rm(predictions, ground_truth)
print 'accuracy:', prec
else:
print 'wrong TASK argument.'
exit(1)
|
py-server/src/controller/__init__.py | Jonnytoshen/wind-layer | 285 | 11098550 | <reponame>Jonnytoshen/wind-layer
from src.app import app
import src.controller.index
import src.controller.process
# app.register_blueprint(index, url_prefix='/')
|
tests/test_dfp_get_orders.py | Pubmatic-Dhruv-Sonone/dfp-prebid-setup | 111 | 11098574 | <reponame>Pubmatic-Dhruv-Sonone/dfp-prebid-setup
#!/usr/bin/env python
from unittest import TestCase
from mock import MagicMock, Mock, patch
import dfp.get_orders
@patch('googleads.ad_manager.AdManagerClient.LoadFromStorage')
class DFPServiceTests(TestCase):
def test_get_all_orders(self, mock_dfp_client):
"""
Ensure `get_all_orders` makes one call to DFP.
"""
mock_dfp_client.return_value = MagicMock()
# Response for fetching orders.
(mock_dfp_client.return_value
.GetService.return_value
.getOrdersByStatement) = MagicMock()
dfp.get_orders.get_all_orders()
# Confirm that it loaded the mock DFP client.
mock_dfp_client.assert_called_once()
expected_arg = {'query': ' LIMIT 500 OFFSET 0', 'values': None}
(mock_dfp_client.return_value
.GetService.return_value
.getOrdersByStatement.assert_called_once_with(expected_arg)
)
def test_get_all_orders_unicode(self, mock_dfp_client):
"""
Ensure `get_all_orders` prints when orders contain unicode characters.
"""
mock_dfp_client.return_value = MagicMock()
# Response for fetching orders.
(mock_dfp_client.return_value
.GetService.return_value
.getOrdersByStatement).side_effect = [
{
'totalResultSetSize': 1,
'startIndex': 0,
'results': [{
'id': 152637489,
'name': u'\xe4',
'startDateTime': {},
'endDateTime': {},
'unlimitedEndDateTime': True,
'status': 'DRAFT',
'isArchived': False,
'externalOrderId': 0,
'currencyCode': 'USD',
'advertiserId': 123456789,
'creatorId': 123456789,
'traffickerId': 123456789,
'totalImpressionsDelivered': 0,
'totalClicksDelivered': 0,
'totalViewableImpressionsDelivered': 0,
'totalBudget': {
'currencyCode': 'USD',
'microAmount': 0,
},
'lastModifiedByApp': 'tab-for-',
'isProgrammatic': False,
'lastModifiedDateTime': {},
}]
},
{
'totalResultSetSize': 0,
'startIndex': 0,
},
]
dfp.get_orders.get_all_orders()
def test_get_order_by_name(self, mock_dfp_client):
"""
Ensure we make the correct call to DFP when getting an order
by name.
"""
mock_dfp_client.return_value = MagicMock()
order_name = 'My Fake Order'
# Response for fetching orders.
(mock_dfp_client.return_value
.GetService.return_value
.getOrdersByStatement) = MagicMock()
dfp.get_orders.get_order_by_name(order_name)
# Expected argument to use in call to DFP.
expected_statement = {
'query': 'WHERE name = :name LIMIT 500 OFFSET 0',
'values': [{
'value': {
'value': order_name,
'xsi_type': 'TextValue'
},
'key': 'name'
}]
}
(mock_dfp_client.return_value
.GetService.return_value
.getOrdersByStatement.assert_called_once_with(expected_statement)
)
def test_get_order_by_name_return(self, mock_dfp_client):
"""
Ensure we return the order when we get an order.
"""
mock_dfp_client.return_value = MagicMock()
order_name = 'My Fake Order'
# Response for fetching orders.
(mock_dfp_client.return_value
.GetService.return_value
.getOrdersByStatement) = MagicMock(
return_value={
'totalResultSetSize': 1,
'startIndex': 0,
'results': [{
'id': 152637489,
'name': order_name,
'startDateTime': {},
'endDateTime': {},
'unlimitedEndDateTime': True,
'status': 'DRAFT',
'isArchived': False,
'externalOrderId': 0,
'currencyCode': 'USD',
'advertiserId': 123456789,
'creatorId': 123456789,
'traffickerId': 123456789,
'totalImpressionsDelivered': 0,
'totalClicksDelivered': 0,
'totalViewableImpressionsDelivered': 0,
'totalBudget': {
'currencyCode': 'USD',
'microAmount': 0,
},
'lastModifiedByApp': 'tab-for-',
'isProgrammatic': False,
'lastModifiedDateTime': {},
}]
}
)
order = dfp.get_orders.get_order_by_name(order_name)
self.assertEqual(order['id'], 152637489)
def test_get_no_order_by_name(self, mock_dfp_client):
"""
Ensure we return None when an order does not exist.
"""
mock_dfp_client.return_value = MagicMock()
# Response for fetching orders.
(mock_dfp_client.return_value
.GetService.return_value
.getOrdersByStatement) = MagicMock(
return_value={
'totalResultSetSize': 0,
'startIndex': 0,
}
)
order = dfp.get_orders.get_order_by_name('A new order')
self.assertIsNone(order)
|
terrascript/data/gridscale/gridscale.py | mjuenema/python-terrascript | 507 | 11098635 | # terrascript/data/gridscale/gridscale.py
# Automatically generated by tools/makecode.py (24-Sep-2021 15:17:40 UTC)
import terrascript
class gridscale_backup_list(terrascript.Data):
pass
class gridscale_backupschedule(terrascript.Data):
pass
class gridscale_firewall(terrascript.Data):
pass
class gridscale_ipv4(terrascript.Data):
pass
class gridscale_ipv6(terrascript.Data):
pass
class gridscale_isoimage(terrascript.Data):
pass
class gridscale_loadbalancer(terrascript.Data):
pass
class gridscale_marketplace_application(terrascript.Data):
pass
class gridscale_network(terrascript.Data):
pass
class gridscale_object_storage_accesskey(terrascript.Data):
pass
class gridscale_paas(terrascript.Data):
pass
class gridscale_paas_securityzone(terrascript.Data):
pass
class gridscale_public_network(terrascript.Data):
pass
class gridscale_server(terrascript.Data):
pass
class gridscale_snapshot(terrascript.Data):
pass
class gridscale_snapshotschedule(terrascript.Data):
pass
class gridscale_sshkey(terrascript.Data):
pass
class gridscale_ssl_certificate(terrascript.Data):
pass
class gridscale_storage(terrascript.Data):
pass
class gridscale_template(terrascript.Data):
pass
__all__ = [
"gridscale_backup_list",
"gridscale_backupschedule",
"gridscale_firewall",
"gridscale_ipv4",
"gridscale_ipv6",
"gridscale_isoimage",
"gridscale_loadbalancer",
"gridscale_marketplace_application",
"gridscale_network",
"gridscale_object_storage_accesskey",
"gridscale_paas",
"gridscale_paas_securityzone",
"gridscale_public_network",
"gridscale_server",
"gridscale_snapshot",
"gridscale_snapshotschedule",
"gridscale_sshkey",
"gridscale_ssl_certificate",
"gridscale_storage",
"gridscale_template",
]
|
beanie/odm/operators/find/__init__.py | yo-mo/beanie | 574 | 11098641 | from abc import ABC
from beanie.odm.operators import BaseOperator
class BaseFindOperator(BaseOperator, ABC):
...
|
test/clustering/test_pqkmeans.py | Hi-king/pqkmeans | 224 | 11098659 | import unittest
import pqkmeans
import numpy
import collections
import pickle
class TestPQKMeans(unittest.TestCase):
def data_source(self, n: int):
for i in range(n):
yield [i * 100] * 6
def setUp(self):
# Train PQ encoder
self.encoder = pqkmeans.encoder.PQEncoder(num_subdim=3, Ks=20)
self.encoder.fit(numpy.array(list(self.data_source(200))))
def test_just_construction(self):
pqkmeans.clustering.PQKMeans(encoder=self.encoder, k=5, iteration=10, verbose=False)
def test_fit_and_predict(self):
engine = pqkmeans.clustering.PQKMeans(encoder=self.encoder, k=2, iteration=10, verbose=False)
codes = self.encoder.transform(numpy.array(list(self.data_source(100))))
predicted = engine.fit_predict(codes)
count = collections.defaultdict(int)
for cluster in predicted:
count[cluster] += 1
# roughly balanced clusters
self.assertGreaterEqual(min(count.values()), max(count.values()) * 0.7)
a = engine.predict(codes[0:1, :])
b = engine.predict(codes[0:1, :])
self.assertEqual(a, b)
def test_cluster_centers_are_really_nearest(self):
engine = pqkmeans.clustering.PQKMeans(encoder=self.encoder, k=2, iteration=10, verbose=False)
codes = self.encoder.transform(numpy.array(list(self.data_source(100))))
fit_predicted = engine.fit_predict(codes)
cluster_centers = numpy.array(engine.cluster_centers_, dtype=numpy.uint8)
predicted = engine.predict(codes)
self.assertTrue((fit_predicted == predicted).all())
# Reconstruct the original vectors
codes_decoded = self.encoder.inverse_transform(codes)
cluster_centers_decoded = self.encoder.inverse_transform(cluster_centers)
for cluster, code_decoded in zip(predicted, codes_decoded):
other_cluster = (cluster + 1) % max(predicted)
self.assertLessEqual(
numpy.linalg.norm(cluster_centers_decoded[cluster] - code_decoded),
numpy.linalg.norm(cluster_centers_decoded[other_cluster] - code_decoded)
)
def test_constructor_with_cluster_center(self):
# Run pqkmeans first.
engine = pqkmeans.clustering.PQKMeans(encoder=self.encoder, k=5, iteration=10, verbose=False)
codes = self.encoder.transform(numpy.array(list(self.data_source(100))))
fit_predicted = engine.fit_predict(codes)
cluster_centers = numpy.array(engine.cluster_centers_, dtype=numpy.uint8)
predicted = engine.predict(codes)
# save current engine and recover from savedata
engine_savedata = pickle.dumps(engine)
engine_recovered = pickle.loads(engine_savedata)
fit_predicted_from_recovered_obj = engine_recovered.predict(codes)
numpy.testing.assert_array_equal(predicted, fit_predicted_from_recovered_obj)
|
data_structures/queues/tests/test_array_based_queue.py | vinta/fuck-coding-interviews | 590 | 11098665 | <reponame>vinta/fuck-coding-interviews
# coding: utf-8
import unittest
from data_structures.queues.array_based_queue import ArrayBasedQueue
class TestCase(unittest.TestCase):
def setUp(self):
self.queue = ArrayBasedQueue()
def test_enqueue(self):
self.queue.enqueue(0)
self.queue.enqueue(1)
self.queue.enqueue(2)
self.assertEqual(len(self.queue), 3)
self.assertEqual(list(self.queue), [0, 1, 2])
def test_dequeue(self):
with self.assertRaises(ValueError):
print(self.queue.dequeue())
self.queue.enqueue(0)
self.queue.enqueue(1)
self.queue.enqueue(2)
self.assertEqual(self.queue.dequeue(), 0)
self.assertEqual(self.queue.dequeue(), 1)
self.assertEqual(self.queue.dequeue(), 2)
self.assertEqual(len(self.queue), 0)
self.assertEqual(list(self.queue), [])
with self.assertRaises(ValueError):
print(self.queue.dequeue())
if __name__ == '__main__':
unittest.main()
|
deepy/layers/maxout.py | uaca/deepy | 260 | 11098689 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import theano.tensor as T
from . import NeuralLayer
from dense import Dense
class Maxout(NeuralLayer):
"""
Maxout activation unit.
- http://arxiv.org/pdf/1302.4389.pdf
"""
def __init__(self, output_dim=None, num_pieces=4, init=None, linear_transform=True):
"""
:param num_pieces: pieces of sub maps
"""
super(Maxout, self).__init__("maxout")
self.num_pieces = num_pieces
self.output_dim = output_dim
self.linear_transform = linear_transform
self.init = init
def prepare(self):
if self.output_dim is None:
self.output_dim = self.input_dim // self.num_pieces
if self.linear_transform:
self.transformer = Dense(self.output_dim * self.num_pieces).init(self.input_dim)
self.register(self.transformer)
def compute_tensor(self, x):
if self.linear_transform:
x = self.transformer.compute_tensor(x)
# x ~ batch, time, size / batch, size
new_shape = [x.shape[i] for i in range(x.ndim - 1)] + [self.output_dim, self.num_pieces]
# new_shape ~ batch, time, out_dim, pieces / batch, out_dim, pieces
output = T.max(x.reshape(new_shape, ndim=x.ndim + 1), axis=x.ndim)
return output |
slybot/slybot/randomproxy.py | rmdes/portia-dashboard | 223 | 11098709 | # Copyright (C) 2013 by <NAME> <<EMAIL>>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import re
import random
import base64
import logging
import requests
import json
import numpy
log = logging.getLogger('slybot.RandomProxy')
class Mode:
RANDOMIZE_PROXY_EVERY_REQUESTS, RANDOMIZE_PROXY_ONCE, SET_CUSTOM_PROXY = range(3)
class RandomProxy(object):
def __init__(self, settings):
self.mode = settings.get('PROXY_MODE')
self.chosen_proxy = ''
self.proxy_pool_server = settings.get('PROXY_POOL_SERVER')
@classmethod
def from_crawler(cls, crawler):
return cls(crawler.settings)
def get_proxy(self):
return requests.get("http://%s/get/" % (self.proxy_pool_server)).content
def get_proxies(self):
content = requests.get("http://%s/get_all/" % (self.proxy_pool_server)).content
return json.loads(content)
def delete_proxy(self, proxy):
requests.get("http://%s/delete/?proxy=%s" % (self.proxy_pool_server, proxy))
def process_request(self, request, spider):
'''
#todo
# Don't overwrite with a random one (server-side state for IP)
if 'proxy' in request.meta:
if request.meta["exception"] is False:
return
'''
request.meta["exception"] = False
if len(self.get_proxies()) == 0 :
raise ValueError('All proxies are unusable, cannot proceed')
if self.mode == Mode.RANDOMIZE_PROXY_EVERY_REQUESTS:
proxy_address = "http://%s" % self.get_proxy()
else:
proxy_address = self.chosen_proxy
'''
#todo
proxy_user_pass = self.proxies[proxy_address]
if proxy_user_pass:
request.meta['proxy'] = proxy_address
basic_auth = 'Basic ' + base64.b64encode(proxy_user_pass.encode()).decode()
request.headers['Proxy-Authorization'] = basic_auth
else:
log.debug('Proxy user pass not found')
'''
request.meta['proxy'] = proxy_address
log.debug('Using proxy <%s>, %d proxies left' % ( proxy_address,
len(self.get_proxies())))
def process_exception(self, request, exception, spider):
if 'proxy' not in request.meta:
return
if self.mode == Mode.RANDOMIZE_PROXY_EVERY_REQUESTS or self.mode == Mode.RANDOMIZE_PROXY_ONCE:
proxy = request.meta['proxy']
try:
self.delete_proxy(proxy[7:])
except KeyError:
pass
request.meta["exception"] = True
if self.mode == Mode.RANDOMIZE_PROXY_ONCE:
self.chosen_proxy = "http://%s" % self.get_proxy()
log.info('Removing failed proxy <%s>, %d proxies left' % ( proxy[7:],
len(self.get_proxies())))
return request
|
setup.py | Jaykingamez/evennia | 1,544 | 11098726 | import os
import sys
from setuptools import setup, find_packages
os.chdir(os.path.dirname(os.path.realpath(__file__)))
VERSION_PATH = os.path.join("evennia", "VERSION.txt")
OS_WINDOWS = os.name == "nt"
def get_requirements():
"""
To update the requirements for Evennia, edit the requirements.txt file.
"""
with open("requirements.txt", "r") as f:
req_lines = f.readlines()
reqs = []
for line in req_lines:
# Avoid adding comments.
line = line.split("#")[0].strip()
if line:
reqs.append(line)
return reqs
def get_scripts():
"""
Determine which executable scripts should be added. For Windows,
this means creating a .bat file.
"""
if OS_WINDOWS:
batpath = os.path.join("bin", "windows", "evennia.bat")
scriptpath = os.path.join(sys.prefix, "Scripts", "evennia_launcher.py")
with open(batpath, "w") as batfile:
batfile.write('@"%s" "%s" %%*' % (sys.executable, scriptpath))
return [batpath, os.path.join("bin", "windows", "evennia_launcher.py")]
else:
return [os.path.join("bin", "unix", "evennia")]
def get_version():
"""
When updating the Evennia package for release, remember to increment the
version number in evennia/VERSION.txt
"""
return open(VERSION_PATH).read().strip()
def package_data():
"""
By default, the distribution tools ignore all non-python files.
Make sure we get everything.
"""
file_set = []
for root, dirs, files in os.walk("evennia"):
for f in files:
if ".git" in f.split(os.path.normpath(os.path.join(root, f))):
# Prevent the repo from being added.
continue
file_name = os.path.relpath(os.path.join(root, f), "evennia")
file_set.append(file_name)
return file_set
# setup the package
setup(
name="evennia",
version=get_version(),
author="The Evennia community",
maintainer="Griatch",
url="http://www.evennia.com",
description="A full-featured toolkit and server for text-based multiplayer games (MUDs, MU*).",
license="BSD",
long_description="""
_Evennia_ is an open-source library and toolkit for building multi-player
online text games (MUD, MUX, MUSH, MUCK and other MU*). You easily design
your entire game using normal Python modules, letting Evennia handle the
boring stuff all multiplayer games need. Apart from supporting traditional
MUD clients, Evennia comes with both a HTML5 game web-client and a
web-server out of the box.
""",
long_description_content_type="text/markdown",
packages=find_packages(),
scripts=get_scripts(),
install_requires=get_requirements(),
package_data={"": package_data()},
zip_safe=False,
classifiers=[
"Programming Language :: Python :: 3 :: Only",
"Programming Language :: Python :: 3.7",
"Programming Language :: JavaScript",
"Development Status :: 4 - Beta",
"License :: OSI Approved :: BSD License",
"Environment :: Console",
"Environment :: Web Environment",
"Framework :: Django",
"Framework :: Django :: 2.2",
"Framework :: Twisted",
"Intended Audience :: Developers",
"Intended Audience :: Education",
"Operating System :: MacOS",
"Operating System :: Microsoft :: Windows",
"Operating System :: POSIX :: Linux",
"Topic :: Database",
"Topic :: Education",
"Topic :: Games/Entertainment :: Multi-User Dungeons (MUD)",
"Topic :: Games/Entertainment :: Puzzle Games",
"Topic :: Games/Entertainment :: Role-Playing",
"Topic :: Games/Entertainment :: Simulation",
"Topic :: Software Development :: Libraries :: Application Frameworks",
"Topic :: Internet :: WWW/HTTP :: WSGI :: Server",
],
python_requires=">=3.7",
project_urls={
"Source": "https://github.com/evennia/evennia",
"Issue tracker": "https://github.com/evennia/evennia/issues",
"Chat": "http://www.evennia.com/chat-redirect-3",
"Forum": "https://groups.google.com/forum/#%21forum/evennia",
"Dev Blog": "http://evennia.blogspot.com/",
"Patreon": "https://www.patreon.com/griatch",
},
)
|
mxnet/test/gen_py_json.py | yaozhang2016/deepwater | 317 | 11098740 | <gh_stars>100-1000
import mxnet as mx
import importlib
for name in ["alexnet", "googlenet", "inception-bn", "inception-v3", "lenet", "mlp", "resnet", "vgg"]:
net = importlib.import_module("symbol_" + name).get_symbol(10)
net.save("symbol_" + name + "-py.json")
name = "unet"
net = importlib.import_module("symbol_" + name).get_symbol()
net.save("symbol_" + name + "-py.json")
|
tests/fixers/test_utils_encoding.py | barseghyanartur/django-upgrade | 284 | 11098743 | from __future__ import annotations
from django_upgrade.data import Settings
from tests.fixers.tools import check_noop, check_transformed
settings = Settings(target_version=(3, 0))
def test_no_deprecated_alias():
check_noop(
"""\
from django.utils.encoding import something
something("yada")
""",
settings,
)
def test_encoding_module_imported():
check_transformed(
"""\
from django.utils import encoding
encoding.force_text("yada")
encoding.smart_text("yada")
""",
"""\
from django.utils import encoding
encoding.force_str("yada")
encoding.smart_str("yada")
""",
settings,
)
def test_direct_import():
check_transformed(
"""\
from django.utils.encoding import force_text, smart_text
def main(*, argv):
print(
force_text("yada"),
smart_text("yada"),
)
""",
"""\
from django.utils.encoding import force_str, smart_str
def main(*, argv):
print(
force_str("yada"),
smart_str("yada"),
)
""",
settings,
)
def test_success_alias():
check_transformed(
"""\
from django.utils.encoding import force_text as ft
ft("yada")
""",
"""\
from django.utils.encoding import force_str as ft
ft("yada")
""",
settings,
)
|
habitat_baselines/motion_planning/mp_spaces.py | jturner65/habitat-api | 489 | 11098756 | import os
import os.path as osp
from abc import ABC, abstractmethod
from typing import Any, Dict, Iterable, List, Tuple, Union
import numpy as np
from PIL import Image
from habitat.tasks.rearrange.rearrange_sim import RearrangeSim
from habitat.tasks.rearrange.utils import IkHelper
from habitat_baselines.motion_planning.robot_target import RobotTarget
try:
from ompl import base as ob # pylint: disable=import-error
from ompl import geometric as og # pylint: disable=import-error
except ImportError:
pass
def to_ob_state(vec: np.ndarray, space: "ob.StateSpace", dim: int):
ob_vec = ob.State(space)
for i in range(dim):
ob_vec[i] = vec[i]
return ob_vec
class MpSpace(ABC):
"""
Defines an abstract planning space for OMPL to interface with.
"""
def __init__(self, use_sim: RearrangeSim, ik: IkHelper):
self._mp_sim = use_sim
self._ik = ik
@abstractmethod
def convert_state(self, x: Iterable) -> np.ndarray:
pass
@abstractmethod
def set_arm(self, x: Union[List[float], np.ndarray]):
pass
def set_env_state(self, env_state: Dict[str, Any]):
self.env_state = env_state
@abstractmethod
def get_range(self) -> float:
"""
Gets the planner step size range.
"""
@abstractmethod
def get_state_lims(self, restrictive: bool = False) -> np.ndarray:
"""
Get the state limits of the planning problem.
"""
@abstractmethod
def get_state_dim(self) -> int:
"""
Get the dimensionality of the planning problem
"""
@abstractmethod
def get_start_goal(self) -> Tuple[np.ndarray, np.ndarray]:
"""
Gets the used start and goal states for the planner. This is after
clipping and any additional pre-processing.
"""
@abstractmethod
def convert_sol(self, path) -> np.ndarray:
"""
Convert a solution from OMPL format to numpy array
"""
@abstractmethod
def get_planner(self, si: "ob.SpaceInformation"):
pass
@abstractmethod
def set_problem(
self,
pdef: "ob.ProblemDefinition",
space: "ob.StateSpace",
si: "ob.SpaceInformation",
start_state: "ob.State",
targ_state: RobotTarget,
):
"""
Sets up the planning problem
"""
def render_start_targ(
self,
render_dir: str,
subdir: str,
targ_state: np.ndarray,
suffix: str = "targ",
):
"""
Renders the start and target to images for visualization
"""
def getPathLengthObjWithCostToGo(si):
obj = ob.PathLengthOptimizationObjective(si)
obj.setCostToGoHeuristic(ob.CostToGoHeuristic(ob.goalRegionCostToGo))
return obj
class JsMpSpace(MpSpace):
def __init__(self, use_sim, ik, start_num_calls, should_render):
super().__init__(use_sim, ik)
# self._lower_joint_lims, self._upper_joint_lims = self._ik.get_joint_limits()
joint_lims = self.get_state_lims(True)
self._lower_joint_lims, self._upper_joint_lims = (
joint_lims[:, 0],
joint_lims[:, 1],
)
self.num_calls = start_num_calls
self._should_render = should_render
def convert_state(self, x):
return np.array([x[i] for i in range(7)])
def _norm_joint_angle(self, angles):
return np.arctan2(np.sin(angles), np.cos(angles))
def get_planner(self, si):
return og.RRTConnect(si)
def get_state_lims(self, restrictive=False):
"""Get the state limits of the planning problem. If restrictive is true then
this returns the joint limts based on the PyBullet joint limits
"""
if restrictive:
lower_joint_lims, upper_joint_lims = self._ik.get_joint_limits()
lower_joint_lims = [
-np.pi if np.isclose(a, 0.0) else a for a in lower_joint_lims
]
upper_joint_lims = [
np.pi if np.isclose(a, 2 * np.pi) else a
for a in upper_joint_lims
]
lower_joint_lims = self._norm_joint_angle(lower_joint_lims)
upper_joint_lims = self._norm_joint_angle(upper_joint_lims)
return np.stack([lower_joint_lims, upper_joint_lims], axis=-1)
else:
return np.stack([[-2 * np.pi] * 7, [2 * np.pi] * 7], axis=-1)
def get_state_dim(self):
return len(self._mp_sim._sim.robot.arm_joint_pos)
def _fk(self, joints):
"""Sets the joint state and applys the change"""
self._mp_sim.set_arm_pos(joints)
self._mp_sim.micro_step()
def get_start_goal(self) -> Tuple[np.ndarray, np.ndarray]:
return (self.used_js_start, self.used_js_goal)
def set_problem(
self,
pdef,
space,
si,
js_start,
robot_targ,
):
"""
Sets up the OMPL problem
"""
js_end = robot_targ.joints_target
joint_shape = self._lower_joint_lims.shape
js_start = self._norm_joint_angle(js_start)
js_end = self._norm_joint_angle(js_end)
# In case you want some padding to the limits for extra safety
eps = np.full(joint_shape, 0.000)
js_start = np.clip(
js_start,
self._lower_joint_lims + eps,
self._upper_joint_lims - eps,
)
js_end = np.clip(
js_end, self._lower_joint_lims + eps, self._upper_joint_lims - eps
)
self.used_js_start = js_start
self.used_js_goal = js_end
self.num_calls += 1
js_start = to_ob_state(js_start, space, self.get_state_dim())
js_end = to_ob_state(js_end, space, self.get_state_dim())
def admiss_heuristic(cur_state, goal):
use_cur_state = self.convert_state(cur_state)
# FK to get both in EE space.
self._fk(use_cur_state)
cur_ee_state = self._mp_sim.get_ee_pos()
ret = np.linalg.norm(robot_targ.ee_target_pos - cur_ee_state)
return ret
def getPathLengthObjWithCostToGo(si):
obj = ob.PathLengthOptimizationObjective(si)
obj.setCostToGoHeuristic(ob.CostToGoHeuristic(admiss_heuristic))
return obj
pdef.setStartAndGoalStates(js_start, js_end)
pdef.setOptimizationObjective(getPathLengthObjWithCostToGo(si))
def render_start_targ(self, render_dir, subdir, targ_state, suffix="targ"):
if targ_state is not None:
targ_viz_id = self._mp_sim.add_sphere(0.06, color=[0, 0, 1, 1])
self._mp_sim.set_position(targ_state, targ_viz_id)
use_dir = osp.join(render_dir, subdir)
os.makedirs(use_dir, exist_ok=True)
# Visualize the target position.
# NOTE: The object will not immediately snap to the robot's hand if a target joint
# state is provided. This is not an issue, it only affects this one
# rendering.
self._fk(self.used_js_goal)
Image.fromarray(self._mp_sim.render()).save(
osp.join(use_dir, f"{suffix}_goal_{self.num_calls}.jpeg")
)
self._fk(self.used_js_start)
save_f_name = osp.join(
use_dir, f"{suffix}_start_{self.num_calls}.jpeg"
)
Image.fromarray(self._mp_sim.render()).save(save_f_name)
print("Rendered start / goal MP to ", save_f_name)
if targ_state is not None:
self._mp_sim.remove_object(targ_viz_id)
def get_range(self):
return 0.1
def set_arm(self, des_joint_pos):
des_joint_pos = self.convert_state(des_joint_pos)
self._fk(des_joint_pos)
self._mp_sim.set_state(self.env_state)
des_joint_pos = np.array(des_joint_pos)[:7]
def convert_sol(self, path):
plan = np.array([self.convert_state(x) for x in path.getStates()])
return plan
|
test_protocol/megaface/face_cropper/crop_eye.py | weihaoxie/FaceX-Zoo | 1,329 | 11098766 | <reponame>weihaoxie/FaceX-Zoo
"""
@author: <NAME>
@date: 20201014
@contact: <EMAIL>
"""
import os
import cv2
def crop_facescrub(facescrub_root, facescrub_img_list, target_folder):
facescrub_img_list_buf = open(facescrub_img_list)
line = facescrub_img_list_buf.readline().strip()
while line:
image_path = os.path.join(facescrub_root, line)
target_path = os.path.join(target_folder, line)
target_dir = os.path.dirname(target_path)
if not os.path.exists(target_dir):
os.makedirs(target_dir)
image = cv2.imread(image_path)
image = image[:60, :]
cv2.imwrite(target_path, image)
line = facescrub_img_list_buf.readline().strip()
if __name__ == '__main__':
facescrub_root = '/export2/wangjun492/face_database/public_data/final_data/test_data/megaface/facescrub_mask_crop_arcface'
facescrub_img_list = '/export2/wangjun492/face_database/public_data/meta_data/test_data/megaface/mid_files/facescrub_img_list.txt'
target_folder = '/export2/wangjun492/face_database/public_data/final_data/test_data/megaface/facescrub_eye_crop'
crop_facescrub(facescrub_root, facescrub_img_list, target_folder)
|
examples/multitag_web_scraper.py | dimanil/fast_request | 857 | 11098790 | from faster_than_requests import scraper2
print(scraper2(["https://nim-lang.org", "https://nim-lang.org"], list_of_tags=["h1", "a"], case_insensitive=False, deduplicate_urls=False))
|
files-to-c-arrays.py | suraj-testing2/Toilet_Video_games | 146 | 11098802 | <filename>files-to-c-arrays.py
#!/usr/bin/env python
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
if len(sys.argv) < 3:
print 'Usage: ' + sys.argv[0] + 'output_file input_file1 input_file2 ... input_fileN'
print
print 'Generates a .c file containing all of the input files as static'
print 'character arrays, along with a function to retrieve them.'
print
print 'const char *get_file(const char *path, size_t *out_size)'
exit(1)
def chunk(list, n):
"""Split a list into size n chunks (the last chunk may be shorter)."""
return (list[i : i + n] for i in range(0, len(list), n))
filesizes = []
filepaths = []
filearrays = []
for filepath in sys.argv[2:]:
filepaths.append(filepath.replace('\\', '/').lstrip('./'))
file = open(filepath, 'rb').read()
filesizes.append(len(file))
escapedfile = '\\x' + '\\x'.join(chunk(file.encode('hex'), 2))
filearrays.append('"\n "'.join(chunk(escapedfile, 76)))
template = """#include <stdint.h>
#include <string.h>
static const char *file_paths[] = {"%s"};
static const size_t file_sizes[] = {%s};
static const int num_files = %d;
static const char *files[] = {
"%s"
};
const char *get_file(const char *path, size_t *out_size) {
for (int i = 0; i < num_files; i++) {
if (strcmp(file_paths[i], path) == 0) {
*out_size = file_sizes[i];
return files[i];
}
}
return NULL;
}
"""
output = open(sys.argv[1], 'w')
output.write(template % ('", "'.join(filepaths),
', '.join(str(x) for x in filesizes),
len(filepaths),
'",\n "'.join(filearrays)))
|
dependency.py | pbiernat/ripr | 338 | 11098810 | <reponame>pbiernat/ripr
'''
Code in this file deals with finding data and/or other code that must be
included in the emulation environment for the target code to be able to run
properly.
'''
from .analysis_engine import aengine as ae
# Try to import stuff.
try:
from binaryninja import *
except:
print ("[!!] Not running in Binary Ninja")
try:
import r2pipe
except:
print ("[!!] Not running in Radare2")
class ImportedCall(object):
'''
Convenience class for storing information about imported Calls.
'''
def __init__(self, address, instlen, target, symname):
self.address = address
self.inst_len = instlen
self.target = target
self.symbol = symname
class riprDataRef(object):
'''
Convenience class for storing information on data references we find.
'''
def __init__(self, address, length, _type):
self.address = address
self.length = length
self._type = _type
class riprCodeRef(object):
def __init__(self, address, _type):
self.address = address
self.type = _type
class depScanner(object):
def __init__(self, engine, codeobj):
self.engine = engine
self.codeobj = codeobj
self.impCalls = []
self.dataRefs = []
self.codeRefs = []
self.imports = self.engine.get_imports()
def _mark_imported_call(self, func_addr, address, target):
'''
Create an ImportedCall object for possible later use in "python-hooking"
Note: We /do/ want duplicates (multiple ImportedCall objects for "puts" for example)
as we map expected return addresses to our hooked functions.
'''
self.engine.highlight_instr(func_addr, address, "red")
self.engine.add_comment(func_addr, address, "Imported Call !!")
symname = str(target)
if target in self.imports.keys():
symname = self.imports[target]
icall = ImportedCall(address, self.engine.get_instruction_length(address), target, symname)
self.impCalls.append(icall)
def _mark_additional_branch(self, func_addr, address, destination, _type):
ref = riprCodeRef(destination, _type)
self.engine.highlight_instr(func_addr, address, "blue")
self.codeRefs.append(ref)
def _mark_identified_data(self, func_addr, ref_addr):
self.engine.highlight_instr(func_addr, ref_addr, "yellow")
def branchScan(self, address, isFunc=True):
'''
Function is responsible for mapping calls and jumps
that are outside the current selection's bounds, if possible.
'''
print ("[ripr] Inside branchScan")
def callCallback(dest, instr_addr):
if type(dest) != int:
try:
dest = dest.value
except:
return
if (dest in self.imports):
print ("[ripr] Found imported Call target...")
self._mark_imported_call(address, instr_addr, dest)
elif (self.codeobj.data_saved(dest) == False):
print ("[ripr] Found LLIL CALL instruction")
self._mark_additional_branch(address, instr_addr, dest, "call")
else:
print ("[ripr] Target address already mapped")
def jumpCallback(dest, instr_addr):
print ("[ripr] JUMP TARGET: %s" % (dest))
if isFunc:
self.engine.branches_from_func(address, callCallback, jumpCallback)
else:
ibb = self.engine.find_llil_block_from_addr(address)
if ibb == None:
return self.codeRefs
self.engine.branches_from_block(ibb, callCallback, jumpCallback)
return self.codeRefs
def _find_stringRefs(self, address):
'''
Look for strings that are referenced in the selected code.
'''
ret = []
for stringStart,stringLength in self.engine.get_strings():
for refAddress in self.engine.get_refs_to(stringStart): # Ignored the length
if (self.engine.function_contains_addr(address, refAddress)):
print ("[ripr] Found string reference: 0x%x" % (refAddress))
self._mark_identified_data(address, refAddress)
dref = riprDataRef(stringStart, stringLength, 'str')
self.dataRefs.append(dref)
return ret
def _find_symbolRefs(self, address):
'''
Look for data symbols that are referenced in the selected code.
'''
ret = []
symbols = self.engine.get_data_symbols()
for symStart in symbols:
for refAddress in self.engine.get_refs_to(symStart):
if self.engine.function_contains_addr(address, refAddress):
print ("[ripr] Found Symbol Reference: 0x%x references 0x%x" % (refAddress, symStart))
self._mark_identified_data(address, refAddress)
dref = riprDataRef(symStart, -1, 'sym')
self.dataRefs.append(dref)
ret.append(symStart)
return ret
def _simpleDataScan(self, address):
ret = []
ret += self._find_stringRefs(address)
ret += self._find_symbolRefs(address)
return ret
def dataScan(self, address):
'''
Function is responsible for finding data the target code
needs in order to run correctly.
'''
print ("[ripr] Inside dataScan")
ret = []
# Find the low-hanging fruit
ret += self._simpleDataScan(address)
# Iterate over all instructions for potential pointers
for target, instrAddr in self.engine.scan_potential_pointers(address):
if self.engine.is_plausible_pointer(target):
print ("Found Potential Pointer: %s instaddr %s" % (hex(target), hex(instrAddr)))
self._mark_identified_data(address, instrAddr)
dref = riprDataRef(target, -1, 'ptr')
self.dataRefs.append(dref)
ret.append(target)
return set(ret)
|
scripts/reader/train.py | rajarshd/Multi-Step-Reasoning | 122 | 11098814 | #!/usr/bin/env python3
# Copyright 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""Main DrQA reader training script."""
import socket
import argparse
import torch
import numpy as np
import json
import os
import sys
import subprocess
import logging
from tqdm import tqdm
import pickle
from collections import defaultdict
from msr.reader import utils, vector, config, data
from msr.reader.model import Model
from multi_corpus import MultiCorpus
from torch.utils.data.sampler import SequentialSampler, RandomSampler
logger = logging.getLogger()
# ------------------------------------------------------------------------------
# Training arguments.
# ------------------------------------------------------------------------------
# Defaults
ROOT_DIR = '.'
def str2bool(v):
return v.lower() in ('yes', 'true', 't', '1', 'y')
def add_train_args(parser):
"""Adds commandline arguments pertaining tos training a model. These
are different from the arguments dictating the model architecture.
"""
parser.register('type', 'bool', str2bool)
# Runtime environment
runtime = parser.add_argument_group('Environment')
runtime.add_argument('--no-cuda', type='bool', default=False,
help='Train on CPU, even if GPUs are available.')
runtime.add_argument('--gpu', type=int, default=-1,
help='Run on a specific GPU')
runtime.add_argument('--data-workers', type=int, default=10,
help='Number of subprocesses for data loading')
runtime.add_argument('--parallel', type='bool', default=False,
help='Use DataParallel on all available GPUs')
runtime.add_argument('--random-seed', type=int, default=1013,
help=('Random seed for all numpy/torch/cuda'
'operations (for reproducibility)'))
runtime.add_argument('--num-epochs', type=int, default=40,
help='Train data iterations')
runtime.add_argument('--batch_size', type=int, default=64,
help='Batch size for training')
runtime.add_argument('--test_batch_size', type=int, default=2,
help='Batch size during validation/testing')
runtime.add_argument('--multi_step_reasoning_steps', type=int, default=3,
help='Number of steps of mult-step-reasoning')
runtime.add_argument('--multi_step_reading_steps', type=int, default=1,
help='Number of steps of mult-step-reading')
runtime.add_argument('--dropout-san-prediction', type=float, default=0.4,
help='During training, dropout few predictions')
runtime.add_argument('--num_gru_layers', type=int, default=3,
help='Number of layers of GRU')
runtime.add_argument('--domain', type=str, default="web-open",
help='wiki/web/web-open')
runtime.add_argument('--dataset_name', type=str, default="triviaqa",
help='triviaqa/searchqa/')
runtime.add_argument('--freeze_reader', type=int, default=0,
help='Donot train the reader?')
runtime.add_argument('--fine_tune_RL', type=int, default=0,
help='Keep everything fixed, fine tune reasoner with RL')
runtime.add_argument('--test', type=int, default=0,
help='eval on test data?')
runtime.add_argument('--drqa_plus', type=int, default=1,
help='Use reader of DrQA++')
runtime.add_argument('--num_positive_paras', type=int, default=1,
help='DrQA++ relies on few paras containing the answer, '
'returned by the retriever during training. Default 1')
runtime.add_argument('--num_paras_test', type=int, default=15,
help='Number of paras to read at test time. Default 1')
runtime.add_argument('--num_low_ranked_paras', type=int, default=0,
help='DrQA++ relies on few low ranked paras by the retriever during training.')
runtime.add_argument('--cheat', type=int, default=0,
help='at test time, overwrite the retriever output and put correct paras containign annotations')
# Files
files = parser.add_argument_group('Filesystem')
files.add_argument('--model_dir', type=str, default="",
help='Directory for saved models/checkpoints/logs')
files.add_argument('--model-name', type=str, default='',
help='Unique model identifier (.mdl, .txt, .checkpoint)')
files.add_argument('--data_dir', type=str,
help='Directory of training/validation data')
files.add_argument('--train-file', type=str,
default='SQuAD-v1.1-train-processed-corenlp.txt',
help='Preprocessed train file')
files.add_argument('--dev-file', type=str,
default='SQuAD-v1.1-dev-processed-corenlp.txt',
help='Preprocessed dev file')
files.add_argument('--dev-json', type=str, default='SQuAD-v1.1-dev.json',
help=('Unprocessed dev file to run validation '
'while training on'))
files.add_argument('--embed-dir', type=str, default="",
help='Directory of pre-trained embedding files')
files.add_argument('--embedding-file', type=str,
default='crawl-300d-2M.txt',
help='Space-separated pretrained embeddings file')
files.add_argument('--official_output_json', type=str, default="official_output.json",
help='Directory of pre-trained embedding files')
files.add_argument('--saved_para_vectors_dir', type=str,
help='Directory where para and query vectors are saved by the retrievers')
# Saving + loading
save_load = parser.add_argument_group('Saving/Loading')
save_load.add_argument('--checkpoint', type='bool', default=True,
help='Save model + optimizer state after each epoch')
save_load.add_argument('--pretrained', type=str, default='',
help='Path to a pretrained model to warm-start with')
save_load.add_argument('--expand-dictionary', type='bool', default=False,
help='Expand dictionary of pretrained model to ' +
'include training/dev words of new data')
save_load.add_argument('--create_vocab', type=int, default=0,
help='Create vocab or load saved')
save_load.add_argument('--vocab_dir', type=str, default="")
save_load.add_argument('--embedding_table_path', type=str, default='embedding_table.mdl')
save_load.add_argument('--save_pickle_files', type=int, default=0,
help='Save the processed train, dev files for faster loading')
save_load.add_argument('--load_pickle_files', type=int, default=1,
help='Load the processed train, dev files for faster loading')
save_load.add_argument('--small', type=int, default=0,
help='Experiment on small files (for debugging)')
# Data preprocessing
preprocess = parser.add_argument_group('Preprocessing')
preprocess.add_argument('--uncased-question', type='bool', default=False,
help='Question words will be lower-cased')
preprocess.add_argument('--uncased-doc', type='bool', default=False,
help='Document words will be lower-cased')
preprocess.add_argument('--restrict-vocab', type='bool', default=True,
help='Only use pre-trained words in embedding_file')
preprocess.add_argument('--use_pretrained_para_clf', type=int, default=1, help=" use pretrained para clf...")
preprocess.add_argument('--require_answer', type=int, default=0,
help="Retriever only sends paragraphs which have the answers")
# General
general = parser.add_argument_group('General')
general.add_argument('--official-eval', type='bool', default=True,
help='Validate with official SQuAD eval')
general.add_argument('--eval_only', type=int, default=0,
help='Evaluate only after loading a pretrained model')
general.add_argument('--valid-metric', type=str, default='f1',
help='The evaluation metric used for model selection')
general.add_argument('--display-iter', type=int, default=25,
help='Log state after every <display_iter> epochs')
general.add_argument('--sort-by-len', type='bool', default=True,
help='Sort batches by length for speed')
def make_data_loader(args, exs, train_time=False):
dataset = data.ReaderDataset(
args,
exs,
args.word_dict,
args.feature_dict,
single_answer=False,
train_time=train_time
)
sampler = SequentialSampler(dataset) if not train_time else RandomSampler(dataset)
batch_size = args.batch_size if train_time else args.test_batch_size
loader = torch.utils.data.DataLoader(
dataset,
batch_size=batch_size,
sampler=sampler,
num_workers=0,
collate_fn=vector.batchify,
pin_memory=True
)
return loader
def set_defaults(args):
"""Make sure the commandline arguments are initialized properly."""
# Set model name
args.vocab_dir = os.path.join(args.data_dir, args.dataset_name, "vocab", args.domain)
args.embedding_file = os.path.join(args.data_dir, args.dataset_name, "embeddings", args.embedding_file)
args.embedding_table_path = os.path.join(args.data_dir, args.dataset_name, "embeddings", args.domain,
args.embedding_table_path)
args.origin_data_dir = args.data_dir
args.data_dir = os.path.join(args.data_dir, args.dataset_name, "data", args.domain)
if os.path.exists(args.embedding_table_path):
args.embedding_table = True
else:
args.embedding_table = False
if not args.model_name:
import uuid
import time
args.model_name = time.strftime("%Y%m%d-") + str(uuid.uuid4())[:8]
if args.small == 0: # only save on full experiments, saves disk space
args.model_dir = os.path.join(args.model_dir, args.dataset_name, "expts", args.model_name)
subprocess.call(['mkdir', '-p', args.model_dir])
# subprocess.call(['cp', '-r', ROOT_DIR, args.model_dir])
# Set log + model file names
args.log_file = os.path.join(args.model_dir, 'log.txt')
args.model_file = os.path.join(args.model_dir, 'model.mdl')
else:
args.model_file = ""
args.model_dir = ""
args.log_file = None
args.official_output_json = os.path.join(args.model_dir, args.official_output_json)
args.use_pretrained_para_clf = (args.use_pretrained_para_clf == 1)
args.create_vocab = (args.create_vocab == 1)
args.eval_only = (args.eval_only == 1)
args.require_answer = (args.require_answer == 1)
args.drqa_plus = (args.drqa_plus == 1)
# args.saved_para_vectors_dir = os.path.join(DATA_DIR, args.dataset_name, 'paragraph_vectors', args.domain)
args.freeze_reader = (args.freeze_reader == 1)
args.cheat = (args.cheat == 1)
args.fine_tune_RL = (args.fine_tune_RL == 1)
if args.fine_tune_RL:
assert args.freeze_reader is True
args.test = (args.test == 1)
return args
# ------------------------------------------------------------------------------
# Initalization from scratch.
# ------------------------------------------------------------------------------
def init_from_scratch(args, train_exs, dev_exs):
"""New model, new data, new dictionary."""
# Create a feature dict out of the annotations in the data
logger.info('-' * 100)
logger.info('Generate features')
feature_dict = utils.build_feature_dict(args, train_exs)
logger.info('Num features = %d' % len(feature_dict))
logger.info(feature_dict)
# Build a dictionary from the data questions + words (train/dev splits)
logger.info('-' * 100)
logger.info('Build dictionary')
word_dict = utils.build_word_dict(args, train_exs, dev_exs)
logger.info('Num words = %d' % len(word_dict))
# Initialize model
logger.info('Initializing model')
model = Model(args, word_dict, feature_dict)
# Load pretrained embeddings for words in dictionary
if args.embedding_file:
model.load_embeddings(args, word_dict.tokens(), args.embedding_file)
return model
# ------------------------------------------------------------------------------
# Train loop.
# ------------------------------------------------------------------------------
def train(args, data_loader, model, global_stats, ground_truths_map):
"""Run through one epoch of model training with the provided data loader."""
# Initialize meters + timers
train_loss = utils.AverageMeter()
epoch_time = utils.Timer()
# Run one epoch
for idx, ex in enumerate(data_loader):
ret = model.update(ex, epoch_counter=global_stats['epoch'], ground_truths_map=ground_truths_map)
if ret is None:
continue
train_loss.update(*ret)
if idx % args.display_iter == 0:
logger.info('train: Epoch = %d | iter = %d/%d | ' %
(global_stats['epoch'], idx, len(data_loader)) +
'loss = %.2f | elapsed time = %.2f (s)' %
(train_loss.avg, global_stats['timer'].time()))
train_loss.reset()
logger.info('train: Epoch %d done. Time for epoch = %.2f (s)' %
(global_stats['epoch'], epoch_time.time()))
# Checkpoint
if args.checkpoint and (args.small == 0):
logger.info("Checkpointing...")
model.checkpoint(args.model_file + '.checkpoint',
global_stats['epoch'] + 1)
def validate_official(args, data_loader, model, global_stats,
offsets, texts, answers, ground_truths_map=None, official_eval_output=False):
"""Run one full official validation. Uses exact spans and same
exact match/F1 score computation as in the SQuAD script.
Extra arguments:
offsets: The character start/end indices for the tokens in each context.
texts: Map of qid --> raw text of examples context (matches offsets).
answers: Map of qid --> list of accepted answers.
"""
eval_time = utils.Timer()
f1 = utils.AverageMeter()
exact_match = utils.AverageMeter()
# Run through examples
examples = 0
official_output_json = {}
fout = None
if args.eval_only:
fout = open(os.path.join(args.model_dir, "outputs.txt"), "w")
for ex in tqdm(data_loader):
ex_id, batch_size = ex[-1], ex[0].size(0)
outputs, query_norms, all_query_vectors = model.predict(ex)
max_scores, max_spans = [], []
for i in range(ex[0].size(0)):
span_scores_map = defaultdict(float)
max_score_i = float('-inf')
max_span = None
for step_counter, output in enumerate(outputs): # for each time step
pred_s, pred_e, pred_score, para_ids = output
start = pred_s[i]
end = pred_e[i]
span_scores = pred_score[i]
doc_tensor = ex[0][i, para_ids[i]]
for s_counter, (s, e) in enumerate(zip(start, end)):
int_words = doc_tensor[s_counter, s:e+1]
predicted_span = " ".join(args.word_dict.ind2tok[str(w.item())] for w in int_words)
span_scores_map[predicted_span] += span_scores[s_counter]
if max_score_i < span_scores_map[predicted_span]:
max_score_i = span_scores_map[predicted_span]
max_span = predicted_span
max_scores.append(max_score_i)
max_spans.append(max_span)
# calculate em and f1
ground_truths = ground_truths_map[ex_id[i]]
ground_truths = list(set(ground_truths))
em = utils.metric_max_over_ground_truths(utils.exact_match_score, max_span, ground_truths)
exact_match.update(em)
f1.update(utils.metric_max_over_ground_truths(utils.f1_score, max_span, ground_truths))
examples += 1
official_output_json[ex_id[i]] = max_span
if fout is not None:
fout.close()
logger.info('dev valid official: Epoch = %d | EM = %.2f | ' %
(global_stats['epoch'], exact_match.avg * 100) +
'F1 = %.2f | examples = %d | valid time = %.2f (s)' %
(f1.avg * 100, examples, eval_time.time()))
logger.info("Writing official output at {}".format(args.official_output_json))
json.dump(official_output_json, open(args.official_output_json, "w"))
return {'exact_match': exact_match.avg * 100, 'f1': f1.avg * 100}
def eval_accuracies(pred_s, target_s, pred_e, target_e):
"""An unofficial evalutation helper.
Compute exact start/end/complete match accuracies for a batch.
"""
# Convert 1D tensors to lists of lists (compatibility)
if torch.is_tensor(target_s):
target_s = [[e] for e in target_s]
target_e = [[e] for e in target_e]
# Compute accuracies from targets
batch_size = len(pred_s)
start = utils.AverageMeter()
end = utils.AverageMeter()
em = utils.AverageMeter()
for i in range(batch_size):
# Start matches
if pred_s[i] in target_s[i]:
start.update(1)
else:
start.update(0)
# End matches
if pred_e[i] in target_e[i]:
end.update(1)
else:
end.update(0)
# Both start and end match
if any([1 for _s, _e in zip(target_s[i], target_e[i])
if _s == pred_s[i] and _e == pred_e[i]]):
em.update(1)
else:
em.update(0)
return start.avg * 100, end.avg * 100, em.avg * 100
# ------------------------------------------------------------------------------
# Main.
# ------------------------------------------------------------------------------
def main(args):
# --------------------------------------------------------------------------
# DATA
logger.info('-' * 100)
logger.info('Load data files')
max_para_len = 400
logger.info("Domain: {}".format(args.domain))
train_exs, dev_exs = None, None
if args.small == 1:
train_file_name = "processed_train_small.pkl"
dev_file_name = "processed_dev_small.pkl"
else:
train_file_name = "processed_train.pkl"
dev_file_name = "processed_test.pkl" if args.test else "processed_dev.pkl"
logger.info("Loading pickle files")
fin = open(os.path.join(args.data_dir, train_file_name), "rb")
train_exs = pickle.load(fin)
fin.close()
fin = open(os.path.join(args.data_dir, dev_file_name), "rb")
dev_exs = pickle.load(fin)
fin.close()
logger.info("Loading done!")
logger.info('Num train examples = %d' % len(train_exs.questions))
# dev_exs = utils.load_data(args, args.dev_file)
logger.info('Num dev examples = %d' % len(dev_exs.questions))
# --------------------------------------------------------------------------
# MODEL
logger.info('-' * 100)
start_epoch = 0
if args.checkpoint and os.path.isfile(args.model_file + '.checkpoint'):
# Just resume training, no modifications.
logger.info('Found a checkpoint...')
checkpoint_file = args.model_file + '.checkpoint'
model, start_epoch = Model.load_checkpoint(checkpoint_file, args)
else:
# Training starts fresh. But the model state is either pretrained or
# newly (randomly) initialized.
if args.pretrained:
logger.info('Using pretrained model...')
model = Model.load(args.pretrained, args)
if args.expand_dictionary:
logger.info('Expanding dictionary for new data...')
# Add words in training + dev examples
words = utils.load_words(args, train_exs + dev_exs)
added = model.expand_dictionary(words)
# Load pretrained embeddings for added words
if args.embedding_file:
model.load_embeddings(added, args.embedding_file)
else:
logger.info('Training model from scratch...')
model = init_from_scratch(args, train_exs, dev_exs)
# Set up partial tuning of embeddings
if args.tune_partial > 0:
logger.info('-' * 100)
logger.info('Counting %d most frequent question words' %
args.tune_partial)
top_words = utils.top_question_words(
args, train_exs, model.word_dict
)
for word in top_words[:5]:
logger.info(word)
logger.info('...')
for word in top_words[-6:-1]:
logger.info(word)
model.tune_embeddings([w[0] for w in top_words])
# Set up optimizer
model.init_optimizer()
# Use the GPU?
if args.cuda:
model.cuda()
# Use multiple GPUs?
if args.parallel:
model.parallelize()
# --------------------------------------------------------------------------
# DATA ITERATORS
# Two datasets: train and dev. If we sort by length it's faster.
# -------------------------------------------------------------------------
# PRINT CONFIG
logger.info('-' * 100)
logger.info('CONFIG:\n%s' %
json.dumps(vars(args), indent=4, sort_keys=True))
logger.info('-' * 100)
logger.info('Make data loaders')
args.word_dict = model.word_dict
args.feature_dict = model.feature_dict
# train_dataset = data.ReaderDataset(train_exs, model, single_answer=True)
train_loader = make_data_loader(args, train_exs, train_time=True)
dev_loader = make_data_loader(args, dev_exs)
# --------------------------------------------------------------------------
# TRAIN/VALID LOOP
stats = {'timer': utils.Timer(), 'epoch': 0, 'best_valid': 0}
logger.info('-' * 100)
logger.info("Reading ground truths for train")
fin = open(os.path.join(args.data_dir, "train_testing.txt"))
train_ground_truths_map = {}
for line in fin:
line = line.strip()
qid, ground_truth = line.split("\t")
train_ground_truths_map[qid] = ground_truth.split(
"$@#$@#") # this is the special char with which the gt ans are separated
fin.close()
logger.info("Reading ground truths for dev")
fin = open(os.path.join(args.data_dir, "test_testing.txt")) if args.test else open(
os.path.join(args.data_dir, "dev_testing.txt"))
dev_ground_truths_map = {}
for line in fin:
line = line.strip()
qid, ground_truth = line.split("\t")
dev_ground_truths_map[qid] = ground_truth.split(
"$@#$@#") # this is the special char with which the gt ans are separated
fin.close()
if args.eval_only:
logger.info("Eval only mode")
result = validate_official(args, dev_loader, model, stats, None, None, None,
ground_truths_map=dev_ground_truths_map, official_eval_output=True)
logger.info("Exiting...")
sys.exit(0)
logger.info('Starting training...')
for epoch in range(start_epoch, args.num_epochs):
stats['epoch'] = epoch
# Train
train(args, train_loader, model, stats, train_ground_truths_map)
# Validate official
if args.official_eval:
result = validate_official(args, dev_loader, model, stats, None, None, None, ground_truths_map=dev_ground_truths_map)
# Save best valid
if result[args.valid_metric] > stats['best_valid']:
logger.info('Best valid: %s = %.2f (epoch %d, %d updates)' %
(args.valid_metric, result[args.valid_metric],
stats['epoch'], model.updates))
model.save(args.model_file)
stats['best_valid'] = result[args.valid_metric]
if __name__ == '__main__':
# Parse cmdline args and setup environment
parser = argparse.ArgumentParser(
'DrQA Document Reader',
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
add_train_args(parser)
config.add_model_args(parser)
args = parser.parse_args()
set_defaults(args)
# Set cuda
args.cuda = not args.no_cuda and torch.cuda.is_available()
if args.cuda:
torch.cuda.set_device(args.gpu)
# Set random state
np.random.seed(args.random_seed)
torch.manual_seed(args.random_seed)
if args.cuda:
torch.cuda.manual_seed(args.random_seed)
# Set logging
logger.setLevel(logging.INFO)
fmt = logging.Formatter('%(asctime)s: [ %(message)s ]',
'%m/%d/%Y %I:%M:%S %p')
console = logging.StreamHandler()
console.setFormatter(fmt)
logger.addHandler(console)
if args.log_file:
if args.checkpoint:
logfile = logging.FileHandler(args.log_file, 'a')
else:
logfile = logging.FileHandler(args.log_file, 'w')
logfile.setFormatter(fmt)
logger.addHandler(logfile)
logger.info('COMMAND: %s' % ' '.join(sys.argv))
# Run!
main(args)
|
hypergan/modules/no_op.py | limberc/HyperGAN | 889 | 11098842 | <filename>hypergan/modules/no_op.py
import torch.nn as nn
class NoOp(nn.Module):
def __init__(self):
super(NoOp, self).__init__()
def forward(self, x):
return x
|
test cases/common/150 reserved targets/runtarget/echo.py | kira78/meson | 4,047 | 11098924 | <gh_stars>1000+
#!/usr/bin/env python3
import sys
if len(sys.argv) > 1:
print(sys.argv[1])
|
buildroot/support/testing/tests/package/test_python_pytest.py | superm1/operating-system | 349 | 11098934 | <gh_stars>100-1000
import os
from tests.package.test_python import TestPythonPackageBase
class TestPythonPy3Pytest(TestPythonPackageBase):
__test__ = True
config = TestPythonPackageBase.config + \
"""
BR2_PACKAGE_PYTHON3=y
BR2_PACKAGE_PYTHON_PYTEST=y
"""
sample_scripts = ["tests/package/sample_python_pytest.py"]
def run_sample_scripts(self):
for script in self.sample_scripts:
cmd = self.interpreter + " -m pytest " + os.path.basename(script)
_, exit_code = self.emulator.run(cmd, timeout=self.timeout)
self.assertEqual(exit_code, 0)
|
planet/api/exceptions.py | karrabatcheller/planet-client-python | 210 | 11098955 | <filename>planet/api/exceptions.py
# Copyright 2015 Planet Labs, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class APIException(Exception):
'''General unexpected response'''
pass
class BadQuery(APIException):
'''Invalid inputs, HTTP 400'''
pass
class InvalidAPIKey(APIException):
'''Invalid key, HTTP 401'''
pass
class NoPermission(APIException):
'''Insufficient permissions, HTTP 403'''
pass
class MissingResource(APIException):
'''Request for non existing resource, HTTP 404'''
pass
class TooManyRequests(APIException):
'''Too many requests, HTTP 429'''
pass
class OverQuota(APIException):
'''Quota exceeded, HTTP 429'''
pass
class ServerError(APIException):
'''Unexpected internal server error, HTTP 500'''
pass
class InvalidIdentity(APIException):
'''Raised when logging in with invalid credentials'''
pass
class RequestCancelled(Exception):
'''Internal exception when a request is cancelled'''
pass
|
snippets/ch09/graph.py | megtatehh/atap | 360 | 11098968 | <gh_stars>100-1000
#!/usr/bin/env python3
import heapq
import collections
import seaborn as sns
import networkx as nx
import matplotlib.pyplot as plt
from operator import itemgetter
from entities import pairs
from reader import PickledCorpusReader
def graph(corpus):
# Create an undirected graph
G = nx.Graph(name="Baleen Entity Graph")
# Create category, feed, and document nodes
G.add_nodes_from(corpus.categories(), type='category')
G.add_nodes_from([feed['title'] for feed in corpus.feeds()], type='feed')
G.add_nodes_from(corpus.fileids(), type='document')
# Create feed-category edges
G.add_edges_from([
(feed['title'], feed['category']) for feed in corpus.feeds()
])
# Create document-category edges
G.add_edges_from([
(fileid, corpus.categories(fileids=fileid)[0])
for fileid in corpus.fileids()
])
# Add edges for each document-entities and between pairs
for idx, doc in enumerate(corpus.docs()):
fileid = corpus.fileids()[idx]
for pair in pairs(doc):
# NOTE: each pair is a tuple with (entity,tag)
# here I'm adding only the entity to the graph,
# though it might be interesting to add the tags
# so we can filter the graph by entity type...
# G.add_edge(fileid, pair[0][0])
# G.add_edge(fileid, pair[1][0])
# Now add edges between entity pairs with a weight
# of 1 for every document they co-appear in
if (pair[0][0], pair[1][0]) in G.edges():
G.edges[(pair[0][0], pair[1][0])]['weight'] += 1
else:
G.add_edge(pair[0][0], pair[1][0], weight=1)
return G
def nbest_centrality(G, metric, n=10, attr="centrality", **kwargs):
# Compute the centrality scores for each vertex
scores = metric(G, **kwargs)
# Set the score as a property on each node
nx.set_node_attributes(G, name=attr, values=scores)
# Find the top n scores and print them along with their index
topn = heapq.nlargest(n, scores.items(), key=itemgetter(1))
for idx, item in enumerate(topn):
print("{}. {}: {:0.4f}".format(idx + 1, *item))
return G
if __name__ == '__main__':
corpus = PickledCorpusReader('../corpus')
G = graph(corpus)
# # Write the graph to disk, if needed
# nx.write_graphml(G, "entities.graphml")
# # Get summary stats for the full graph
# print(nx.info(G))
# # find the most central entities in the social network
# print("Degree centrality")
# nbest_centrality(G, nx.degree_centrality)
# print("Betweenness centrality")
# nbest_centrality(G, nx.betweenness_centrality, 10, "betweenness", normalized=True)
# # Extract and visualize an ego graph
# H = nx.ego_graph(G, "Hollywood")
# edges, weights = zip(*nx.get_edge_attributes(C, "weight").items())
# pos = nx.spring_layout(C, k=0.3, iterations=40)
# nx.draw(
# C, pos, node_color="skyblue", node_size=20, edgelist=edges,
# edge_color=weights, width=0.25, edge_cmap=plt.cm.Pastel2,
# with_labels=True, font_size=6, alpha=0.8)
# plt.show()
# plt.savefig("atap_ch09_hollywood_entity_graph.png", transparent=True)
# # Compare centrality measures for an ego graph
# print("Closeness centrality for Hollywood")
# nbest_centrality(H, nx.closeness_centrality, 10, "closeness")
# print("Eigenvector centrality for Hollywood")
# nbest_centrality(H, nx.eigenvector_centrality_numpy, 10, "eigenvector")
# print("Pagerank centrality for Hollywood")
# nbest_centrality(H, nx.pagerank_numpy, 10, "pagerank")
# print("Katz centrality for Hollywood")
# nbest_centrality(H, nx.katz_centrality_numpy, 10, "katz")
# T = nx.ego_graph(G, "Twitter")
# E = nx.ego_graph(G, "Earth")
# # Examine degree distributions with histograms
# sns.distplot(
# [G.degree(v) for v in G.nodes()], norm_hist=True
# )
# plt.show()
#
# sns.distplot(
# [H.degree(v) for v in H.nodes()], norm_hist=True
# )
# plt.show()
#
# sns.distplot(
# [T.degree(v) for v in T.nodes()], norm_hist=True
# )
# plt.show()
#
# sns.distplot(
# [E.degree(v) for v in E.nodes()], norm_hist=True
# )
# plt.show()
#
# print("Baleen Entity Graph")
# print("Transitivity: {}".format(nx.transitivity(G)))
# print("Average clustering coefficient: {}".format(nx.average_clustering(G)))
# print("Number of cliques: {}".format(nx.graph_number_of_cliques(G)))
#
# print("Hollywood Ego Graph")
# print("Transitivity: {}".format(nx.transitivity(H)))
# print("Average clustering coefficient: {}".format(nx.average_clustering(H)))
# print("Number of cliques: {}".format(nx.graph_number_of_cliques(H)))
|
poco/drivers/unity3d/test/tutorial/overview.py | HBoPRC/Poco | 1,444 | 11098974 | <gh_stars>1000+
# coding=utf-8
import time
from poco.drivers.unity3d.test.tutorial.case import TutorialCase
class OverviewTutorial(TutorialCase):
def runTest(self):
self.poco('btn_start').click()
time.sleep(1)
self.poco(textMatches='drag.*').click()
time.sleep(1)
shell = self.poco('shell').focus('center')
for star in self.poco('star'):
star.drag_to(shell)
time.sleep(1)
self.assertEqual(self.poco('scoreVal').get_text(), "100", "score correct.")
self.poco('btn_back', type='Button').click()
def tearDown(self):
time.sleep(2)
if __name__ == '__main__':
import pocounit
pocounit.main()
|
examples/pybullet/examples/vr_kuka_setup.py | stolk/bullet3 | 158 | 11099003 | <reponame>stolk/bullet3<filename>examples/pybullet/examples/vr_kuka_setup.py
import pybullet as p
import time
#p.connect(p.UDP,"192.168.86.100")
cid = p.connect(p.SHARED_MEMORY)
if (cid < 0):
p.connect(p.GUI)
p.resetSimulation()
#disable rendering during loading makes it much faster
p.configureDebugVisualizer(p.COV_ENABLE_RENDERING, 0)
objects = [
p.loadURDF("plane.urdf", 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 1.000000)
]
objects = [
p.loadURDF("samurai.urdf", 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000,
1.000000)
]
objects = [
p.loadURDF("pr2_gripper.urdf", 0.500000, 0.300006, 0.700000, -0.000000, -0.000000, -0.000031,
1.000000)
]
pr2_gripper = objects[0]
print("pr2_gripper=")
print(pr2_gripper)
jointPositions = [0.550569, 0.000000, 0.549657, 0.000000]
for jointIndex in range(p.getNumJoints(pr2_gripper)):
p.resetJointState(pr2_gripper, jointIndex, jointPositions[jointIndex])
pr2_cid = p.createConstraint(pr2_gripper, -1, -1, -1, p.JOINT_FIXED, [0, 0, 0], [0.2, 0, 0],
[0.500000, 0.300006, 0.700000])
print("pr2_cid")
print(pr2_cid)
objects = [
p.loadURDF("kuka_iiwa/model_vr_limits.urdf", 1.400000, -0.200000, 0.600000, 0.000000, 0.000000,
0.000000, 1.000000)
]
kuka = objects[0]
jointPositions = [-0.000000, -0.000000, 0.000000, 1.570793, 0.000000, -1.036725, 0.000001]
for jointIndex in range(p.getNumJoints(kuka)):
p.resetJointState(kuka, jointIndex, jointPositions[jointIndex])
p.setJointMotorControl2(kuka, jointIndex, p.POSITION_CONTROL, jointPositions[jointIndex], 0)
objects = [
p.loadURDF("lego/lego.urdf", 1.000000, -0.200000, 0.700000, 0.000000, 0.000000, 0.000000,
1.000000)
]
objects = [
p.loadURDF("lego/lego.urdf", 1.000000, -0.200000, 0.800000, 0.000000, 0.000000, 0.000000,
1.000000)
]
objects = [
p.loadURDF("lego/lego.urdf", 1.000000, -0.200000, 0.900000, 0.000000, 0.000000, 0.000000,
1.000000)
]
objects = p.loadSDF("gripper/wsg50_one_motor_gripper_new_free_base.sdf")
kuka_gripper = objects[0]
print("kuka gripper=")
print(kuka_gripper)
p.resetBasePositionAndOrientation(kuka_gripper, [0.923103, -0.200000, 1.250036],
[-0.000000, 0.964531, -0.000002, -0.263970])
jointPositions = [
0.000000, -0.011130, -0.206421, 0.205143, -0.009999, 0.000000, -0.010055, 0.000000
]
for jointIndex in range(p.getNumJoints(kuka_gripper)):
p.resetJointState(kuka_gripper, jointIndex, jointPositions[jointIndex])
p.setJointMotorControl2(kuka_gripper, jointIndex, p.POSITION_CONTROL, jointPositions[jointIndex],
0)
kuka_cid = p.createConstraint(kuka, 6, kuka_gripper, 0, p.JOINT_FIXED, [0, 0, 0], [0, 0, 0.05],
[0, 0, 0])
pr2_cid2 = p.createConstraint(kuka_gripper,
4,
kuka_gripper,
6,
jointType=p.JOINT_GEAR,
jointAxis=[1, 1, 1],
parentFramePosition=[0, 0, 0],
childFramePosition=[0, 0, 0])
p.changeConstraint(pr2_cid2, gearRatio=-1, erp=0.5, relativePositionTarget=0, maxForce=100)
objects = [
p.loadURDF("jenga/jenga.urdf", 1.300000, -0.700000, 0.750000, 0.000000, 0.707107, 0.000000,
0.707107)
]
objects = [
p.loadURDF("jenga/jenga.urdf", 1.200000, -0.700000, 0.750000, 0.000000, 0.707107, 0.000000,
0.707107)
]
objects = [
p.loadURDF("jenga/jenga.urdf", 1.100000, -0.700000, 0.750000, 0.000000, 0.707107, 0.000000,
0.707107)
]
objects = [
p.loadURDF("jenga/jenga.urdf", 1.000000, -0.700000, 0.750000, 0.000000, 0.707107, 0.000000,
0.707107)
]
objects = [
p.loadURDF("jenga/jenga.urdf", 0.900000, -0.700000, 0.750000, 0.000000, 0.707107, 0.000000,
0.707107)
]
objects = [
p.loadURDF("jenga/jenga.urdf", 0.800000, -0.700000, 0.750000, 0.000000, 0.707107, 0.000000,
0.707107)
]
objects = [
p.loadURDF("table/table.urdf", 1.000000, -0.200000, 0.000000, 0.000000, 0.000000, 0.707107,
0.707107)
]
objects = [
p.loadURDF("teddy_vhacd.urdf", 1.050000, -0.500000, 0.700000, 0.000000, 0.000000, 0.707107,
0.707107)
]
objects = [
p.loadURDF("cube_small.urdf", 0.950000, -0.100000, 0.700000, 0.000000, 0.000000, 0.707107,
0.707107)
]
objects = [
p.loadURDF("sphere_small.urdf", 0.850000, -0.400000, 0.700000, 0.000000, 0.000000, 0.707107,
0.707107)
]
objects = [
p.loadURDF("duck_vhacd.urdf", 0.850000, -0.400000, 0.900000, 0.000000, 0.000000, 0.707107,
0.707107)
]
objects = p.loadSDF("kiva_shelf/model.sdf")
ob = objects[0]
p.resetBasePositionAndOrientation(ob, [0.000000, 1.000000, 1.204500],
[0.000000, 0.000000, 0.000000, 1.000000])
objects = [
p.loadURDF("teddy_vhacd.urdf", -0.100000, 0.600000, 0.850000, 0.000000, 0.000000, 0.000000,
1.000000)
]
objects = [
p.loadURDF("sphere_small.urdf", -0.100000, 0.955006, 1.169706, 0.633232, -0.000000, -0.000000,
0.773962)
]
objects = [
p.loadURDF("cube_small.urdf", 0.300000, 0.600000, 0.850000, 0.000000, 0.000000, 0.000000,
1.000000)
]
objects = [
p.loadURDF("table_square/table_square.urdf", -1.000000, 0.000000, 0.000000, 0.000000, 0.000000,
0.000000, 1.000000)
]
ob = objects[0]
jointPositions = [0.000000]
for jointIndex in range(p.getNumJoints(ob)):
p.resetJointState(ob, jointIndex, jointPositions[jointIndex])
objects = [
p.loadURDF("husky/husky.urdf", 2.000000, -5.000000, 1.000000, 0.000000, 0.000000, 0.000000,
1.000000)
]
ob = objects[0]
jointPositions = [
0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000,
0.000000
]
for jointIndex in range(p.getNumJoints(ob)):
p.resetJointState(ob, jointIndex, jointPositions[jointIndex])
p.configureDebugVisualizer(p.COV_ENABLE_RENDERING, 1)
p.setGravity(0.000000, 0.000000, 0.000000)
p.setGravity(0, 0, -10)
##show this for 10 seconds
#now = time.time()
#while (time.time() < now+10):
# p.stepSimulation()
p.setRealTimeSimulation(1)
while (1):
p.setGravity(0, 0, -10)
p.disconnect()
|
core/csrc/fps/setup.py | AlbertoRemus/GDR_Net | 132 | 11099011 | <filename>core/csrc/fps/setup.py<gh_stars>100-1000
import os
from cffi import FFI
os.system(
"gcc -shared src/farthest_point_sampling.cpp -c -o src/farthest_point_sampling.cpp.o -fopenmp -fPIC -O2 -std=c++11"
)
ffibuilder = FFI()
with open(os.path.join(os.path.dirname(__file__), "src/ext.h")) as f:
ffibuilder.cdef(f.read())
ffibuilder.set_source(
"_ext",
"""
#include "src/ext.h"
""",
extra_objects=["src/farthest_point_sampling.cpp.o"],
libraries=["stdc++"],
)
if __name__ == "__main__":
ffibuilder.compile(verbose=True)
os.system("rm src/*.o")
os.system("rm *.o")
|
chainer_chemistry/dataset/preprocessors/base_preprocessor.py | pfnet/chainerchem | 184 | 11099033 | <filename>chainer_chemistry/dataset/preprocessors/base_preprocessor.py<gh_stars>100-1000
"""
Preprocessor supports feature extraction for each model (network)
"""
class BasePreprocessor(object):
"""Base class for preprocessor"""
def __init__(self):
pass
def process(self, filepath):
pass
|
eval/eval_exp.py | urasakikeisuke/rigidmask | 138 | 11099041 | <filename>eval/eval_exp.py
import glob
import os
import sys
sys.path.insert(0,os.getcwd())
import numpy as np
from matplotlib import pyplot as plt
from utils.flowlib import read_flow, flow_to_image
from utils.util_flow import write_flow, readPFM
import cv2
import pdb
import PIL.Image as Image
from dataloader.robloader import disparity_loader
from utils.sintel_io import disparity_read
from joblib import Parallel, delayed
import argparse
parser = argparse.ArgumentParser(description='')
parser.add_argument('--path', default='/data/ptmodel/',
help='database')
parser.add_argument('--dataset', default='2015',
help='database')
args = parser.parse_args()
## KITTI
# dataloader
if args.dataset == '2015':
from dataloader import kitti15list as DA
datapath = '/ssd/kitti_scene/training/'
elif args.dataset == '2015val':
from dataloader import kitti15list_val as DA
datapath = '/ssd/kitti_scene/training/'
elif args.dataset == '2015vallidar':
from dataloader import kitti15list_val_lidar as DA
datapath = '/ssd/kitti_scene/training/'
elif args.dataset == 'sintelval':
from dataloader import sintellist_val as DA
datapath = '/ssd/rob_flow/training/'
test_left_img, test_right_img ,flow_paths= DA.dataloader(datapath)
expansionp = [i.replace('flow_occ','expansion').replace('.png', '.pfm') for i in flow_paths]
if '2015' in args.dataset:
disp0p = [i.replace('flow_occ','disp_occ_0') for i in flow_paths]
disp1p = [i.replace('flow_occ','disp_occ_1') for i in flow_paths]
else:
disp0p = []
disp1p = []
for fp in flow_paths:
seqname1 = fp.split('/')[-1].split('_')[-3]
seqname2 = fp.split('/')[-1].split('_')[-2]
framename = int(fp.split('/')[-1].split('_')[-1].split('.')[0])
disp0p.append('%s/disparities/%s_%s/frame_%04d.png'%(fp.rsplit('/',2)[0], seqname1, seqname2,framename+1))
disp1p.append('%s/disparities/%s_%s/frame_%04d.png'%(fp.rsplit('/',2)[0], seqname1, seqname2,framename+2))
def eval_f(fp):
import warnings
warnings.filterwarnings("ignore")
# gt
gt_oe = disparity_loader(expansionp[fp])
gt_logexp = -np.log(gt_oe)
oemask = gt_oe>0
if '2015' in args.dataset:
gt_disp0 = disparity_loader(disp0p[fp])
gt_disp1 = disparity_loader(disp1p[fp])
elif args.dataset == 'sintelval':
gt_disp0 = disparity_read(disp0p[fp])
gt_disp1 = disparity_read(disp1p[fp])
gt_logdc = np.log(gt_disp0/gt_disp1)
d1mask = gt_disp0>0
d2mask = gt_disp1>0
dcmask = np.logical_and(d1mask,d2mask)
dcmask = np.logical_and(dcmask, np.abs(gt_logdc)<np.log(2))
# pred
logexp = disparity_loader( '%s/%s/exp-%s.pfm'%(args.path,args.dataset,expansionp[fp].split('/')[-1].split('.')[0]))
logexp = np.clip(logexp,-np.log(2),np.log(2))
logexp_error = np.abs(gt_logexp-logexp)[oemask].mean()
logdc = disparity_loader( '%s/%s/mid-%s.pfm'%(args.path,args.dataset,expansionp[fp].split('/')[-1].split('.')[0]))
logdc = np.clip(logdc,-np.log(2),np.log(2))
logmid_err = np.abs(gt_logdc-logdc)[dcmask].mean()
return logexp_error, logmid_err
rt = Parallel(n_jobs=1)(delayed(eval_f)(fp) for fp in range(len(test_left_img)) )
logexp_error = [k[0] for k in rt]
logmid_error = [k[1] for k in rt]
print('logexp-err:\t%.1f (1e4)'%(10000*np.mean(logexp_error)))
print('logmid-err:\t%.1f (1e4)'%(10000*np.mean(logmid_error)))
|
tools/autograd/templates/annotated_fn_args.py | deltabravozulu/pytorch | 206 | 11099094 | <gh_stars>100-1000
"""
This file is needed for generating procedural tests required for
testing __torch_function__. See tests/test_overrides.py.
"""
# flake8: noqa
import torch
annotated_args = {
${annotated_args}
}
|
src/genie/libs/parser/iosxe/tests/ShowNetconfYangDatastores/cli/equal/golden_output_expected.py | balmasea/genieparser | 204 | 11099130 | expected_output = {
'datastores': {
'names': [
'running',
'candidate'
]
}
}
|
problems/20/problem_20.py | r1cc4rdo/daily_coding_problem | 158 | 11099141 | <filename>problems/20/problem_20.py
def coding_problem_20(list_a, list_b):
"""
Given two singly linked lists that intersect at some point, find the intersecting node.
Do this in O(M + N) time (where M and N are the lengths of the lists) and constant space.
For example, given A = 3 -> 7 -> 8 -> 10 -> 1 and B = 99 -> 1 -> 8 -> 10, return the node with value 8.
Example:
>>> class LinkedListNode(object):
...
... def __init__(self, value, child=None):
... self.value = value
... self.next = child
...
... def add(self, value):
... return LinkedListNode(value, self)
...
... @classmethod
... def len(cls, node):
... count = 0
... while node:
... node = node.next
... count += 1
... return count
>>> common_tail = LinkedListNode(1).add(10).add(8)
>>> list_a = LinkedListNode(7, common_tail).add(3)
>>> list_b = LinkedListNode(1, common_tail).add(99).add(14)
>>> coding_problem_20(list_a, list_b)
8
"""
pass
|
tests/conftest.py | Itsindigo/rele | 183 | 11099149 | import concurrent
import decimal
import json
from unittest.mock import MagicMock, patch
import pytest
from google.cloud.pubsub_v1 import PublisherClient
from google.cloud.pubsub_v1.exceptions import TimeoutError
from rele import Publisher
from rele.client import Subscriber
from rele.config import Config
from rele.middleware import register_middleware
@pytest.fixture
def project_id():
return "rele-test"
@pytest.fixture
def config(project_id):
return Config(
{
"APP_NAME": "rele",
"SUB_PREFIX": "rele",
"GC_CREDENTIALS_PATH": "tests/dummy-pub-sub-credentials.json",
"MIDDLEWARE": ["rele.contrib.LoggingMiddleware"],
}
)
@pytest.fixture
def subscriber(project_id, config):
return Subscriber(config.gc_project_id, config.credentials, 60)
@pytest.fixture
def mock_future():
return MagicMock(spec=concurrent.futures.Future)
@pytest.fixture
def publisher(config, mock_future):
publisher = Publisher(
gc_project_id=config.gc_project_id,
credentials=config.credentials,
encoder=config.encoder,
timeout=config.publisher_timeout,
)
publisher._client = MagicMock(spec=PublisherClient)
publisher._client.publish.return_value = mock_future
return publisher
@pytest.fixture
def published_at():
return 1560244246.863829
@pytest.fixture
def time_mock(published_at):
with patch("time.time") as mock:
mock.return_value = published_at
yield mock
@pytest.fixture(autouse=True)
def default_middleware(config):
register_middleware(config=config)
@pytest.fixture
def custom_encoder():
class DecimalEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, decimal.Decimal):
return float(obj)
return DecimalEncoder
@pytest.fixture
def mock_publish_timeout():
with patch("rele.client.Publisher.publish") as mock:
mock.side_effect = TimeoutError()
yield mock
@pytest.fixture
def mock_post_publish_failure():
with patch(
"rele.contrib.logging_middleware.LoggingMiddleware.post_publish_failure"
) as mock:
yield mock
|
PyEngine3D/OpenGLContext/Shader.py | ubuntunux/PyEngine3D | 121 | 11099150 | # reference - http://www.labri.fr/perso/nrougier/teaching/opengl
import codecs
import configparser
from collections import OrderedDict
import copy
import os
import re
import traceback
import uuid
from OpenGL.GL import *
from PyEngine3D.Common import logger
from PyEngine3D.Utilities import GetClassName, Attributes, Logger, AutoEnum
from PyEngine3D.App import CoreManager
reInclude = re.compile('\#include\s+[\"|\<](.+?)[\"|\>]') # [include file name, ]
reVersion = re.compile("(\#version\s+.+)") # [version code, ]
reComment = re.compile("\/\*.+?\*\/", re.DOTALL)
reMacroStart = re.compile('\#(define|undef|endif|ifdef|ifndef|if|elif|else)\s*(.*)') # [macro type, expression]
reDefineMacro = re.compile('\#define\s*(.*)') # [macro type, expression]
reVariable = re.compile('[a-z|A-Z|_]+[a-z|A-Z|_|0-9]*')
reVoidMain = re.compile('void\s+main\s*\(')
reFindUniform = re.compile("uniform\s+(.+?)\s+(.+?)\s*;") # [Variable Type, Variable Name]
reMacro = re.compile('\#(ifdef|ifndef|if|elif|else|endif)\s*(.*)') # [macro type, expression]
shader_types = OrderedDict(
VERTEX_SHADER=GL_VERTEX_SHADER,
GEOMETRY_SHADER=GL_GEOMETRY_SHADER,
FRAGMENT_SHADER=GL_FRAGMENT_SHADER,
TESS_CONTROL_SHADER=GL_TESS_CONTROL_SHADER,
TESS_EVALUATION_SHADER=GL_TESS_EVALUATION_SHADER,
COMPUTE_SHADER=GL_COMPUTE_SHADER
)
texture_targets = ["texture2D", "texture2DLod", "texture2DGrad",
"texture2DArray", "texture2DArrayLod", "texture2DArrayGrad",
"texture3D", "texture3DLod", "texture3DGrad",
"textureCube", "textureCubeLod", "textureCubeGrad"]
class ShaderCompileOption(AutoEnum):
USE_GLOBAL_TEXTURE_FUNCTION = ()
class ShaderCompileMessage:
TEXTURE_NO_MATCHING_OVERLOADED_FUNCTION = """'texture' : no matching overloaded function found"""
default_compile_option = [ShaderCompileOption.USE_GLOBAL_TEXTURE_FUNCTION, ]
def parsing_macros(shader_code_list):
shader_macros = []
for shader_code in shader_code_list:
shader_macros.extend(re.findall(reDefineMacro, shader_code))
macros = OrderedDict()
def is_reserved_word(define_name):
return define_name == 'MATERIAL_COMPONENTS' or \
define_name in shader_types.keys() or \
define_name.startswith('UUID_')
for expression in shader_macros:
define_expression = expression.split('(')[0].strip()
if ' ' in define_expression:
define_name, define_value = define_expression.split(' ', 1)
else:
define_name, define_value = define_expression, ''
define_name = define_name.strip()
define_value = define_value.strip()
try:
if define_value not in ('float', 'int', 'bool'):
define_value = eval(define_value)
except:
pass
if not is_reserved_word(define_name):
macros[define_name] = define_value
all_variables = []
for shader_code in shader_code_list:
all_variables.extend(re.findall(reVariable, re.sub(reDefineMacro, '', shader_code)))
final_macros = OrderedDict()
for macro in macros:
# ignore reserved words
if macro in texture_targets:
continue
if macro in all_variables:
final_macros[macro] = macros[macro]
return final_macros
def parsing_uniforms(shader_code_list):
shader_uniforms = []
for shader_code in shader_code_list:
shader_uniforms.extend(re.findall(reFindUniform, shader_code))
uniforms = []
for uniform in shader_uniforms:
uniform_type, uniform_name = uniform
if '[' in uniform_name:
uniform = (uniform_type, uniform_name[:uniform_name.find('[')])
if uniform not in uniforms:
uniforms.append(uniform)
return uniforms
def parsing_material_components(shader_code_list):
material_components = []
for code in shader_code_list:
depth = 0
is_in_material_block = False
# remove comment block
code = re.sub(reComment, "", code)
code_lines = code.splitlines()
for code_line in code_lines:
# remove comment
if "//" in code_line:
code_line = code_line.split("//")[0]
m = re.search(reMacro, code_line)
# find macro
if m is not None:
macro_type, macro_value = [group.strip() for group in m.groups()]
if macro_type in ('ifdef', 'ifndef', 'if'):
# increase depth
if is_in_material_block:
depth += 1
# start material block
elif macro_type == 'ifdef' and 'MATERIAL_COMPONENTS' == macro_value.split(" ")[0]:
is_in_material_block = True
depth = 1
elif macro_type == 'endif' and is_in_material_block:
depth -= 1
if depth == 0:
# exit material block
is_in_material_block = False
# gather common code in material component
elif is_in_material_block:
material_components.append(code_line)
return re.findall(reFindUniform, "\n".join(material_components))
class Shader:
default_macros = dict(MATERIAL_COMPONENTS=1)
def __init__(self, shader_name, shader_code):
logger.info("Load " + GetClassName(self) + " : " + shader_name)
self.name = shader_name
self.shader_code = shader_code
self.include_files = []
self.attribute = Attributes()
def get_save_data(self):
return self.shader_code
def get_attribute(self):
self.attribute.set_attribute("name", self.name)
return self.attribute
def generate_shader_codes(self, is_engine_resource, engine_shader_directory, project_shader_directory, shader_version, compile_option, external_macros={}):
shader_codes = {}
for shader_type_name in shader_types:
shader_type = shader_types[shader_type_name]
shader_code = self.__parsing_final_code__(
is_engine_resource,
engine_shader_directory,
project_shader_directory,
shader_type_name,
shader_version,
compile_option,
external_macros
)
# check void main
if re.search(reVoidMain, shader_code) is not None:
shader_codes[shader_type] = shader_code
return shader_codes
def __parsing_final_code__(self, is_engine_resource, engine_shader_directory, project_shader_directory, shader_type_name, shader_version, compile_option, external_macros={}):
if self.shader_code == "" or self.shader_code is None:
return ""
# remove comment block
shader_code = re.sub(reComment, "", self.shader_code)
code_lines = shader_code.splitlines()
# combine macro
combined_macros = OrderedDict()
# default macro
for macro in self.default_macros:
combined_macros[macro] = self.default_macros[macro]
# shader type macro
combined_macros[shader_type_name] = "1"
# external macro
if external_macros is None:
external_macros = {}
for macro in external_macros:
if external_macros[macro] is None or external_macros[macro] is '':
combined_macros[macro] = 0
else:
combined_macros[macro] = external_macros[macro]
# insert shader version - ex) #version 430 core
final_code_lines = [shader_version, "# extension GL_EXT_texture_array : enable"]
# insert defines to final code
for macro in combined_macros:
final_code_lines.append("#define %s %s" % (macro, str(combined_macros[macro])))
# global texture function
if ShaderCompileOption.USE_GLOBAL_TEXTURE_FUNCTION in compile_option:
final_code_lines.append("#if __VERSION__ >= 130")
# ex) replace texture2D -> texutre, textureCubeLod -> textureLod
for texture_target in texture_targets:
if "Lod" in texture_target:
final_code_lines.append("#define %s textureLod" % texture_target)
elif "Grad" in texture_target:
final_code_lines.append("#define %s textureGrad" % texture_target)
else:
final_code_lines.append("#define %s texture" % texture_target)
final_code_lines.append("#endif")
# insert version as comment
include_files = dict() # { 'filename': uuid }
# do parsing
line_num = 0
macro_depth = 0
macro_result = [True, ]
macro_code_remove = True
while line_num < len(code_lines):
code = code_lines[line_num]
line_num += 1
# remove comment
if "//" in code:
code = code.split("//")[0]
# macro parsing
m = re.search(reMacroStart, code)
if m is not None:
macro, expression = m.groups()
expression = expression.strip()
if macro == 'define' or macro == 'undef':
define_expression = expression.split('(')[0].strip()
if ' ' in define_expression:
define_name, define_value = define_expression.split(' ', 1)
else:
define_name, define_value = define_expression, None
# check external macro
if macro == 'define' and define_name in external_macros:
continue # ignore legacy macro
if macro == 'define' and define_name not in combined_macros:
combined_macros[define_name] = define_value
elif macro == 'undef' and define_name in combined_macros:
combined_macros.pop(define_name)
elif macro == 'ifdef':
macro_depth += 1
if expression in combined_macros:
macro_result.append(True)
else:
macro_result.append(False)
elif macro == 'ifndef':
macro_depth += 1
if expression not in combined_macros:
macro_result.append(True)
else:
macro_result.append(False)
elif macro == 'if' or macro == 'elif' and not macro_result[macro_depth]:
variables = re.findall(reVariable, expression)
variables.sort(key=lambda x: len(x), reverse=True)
for variable in variables:
if variable in combined_macros:
while True:
final_value = combined_macros[variable]
if final_value not in combined_macros:
break
variable = final_value
expression = re.sub(reVariable, str(final_value), expression, 1)
expression = expression.replace('&&', ' and ')
expression = expression.replace('||', ' or ')
# expression = re.sub('\!?!\=', 'not ', expression)
# Important : To avoid errors, convert the undecalred variables to zero.
expression = re.sub(reVariable, '0', expression)
result = True if eval(expression) else False
if macro == 'if':
macro_depth += 1
macro_result.append(result)
elif macro == 'elif':
macro_result[macro_depth] = result
elif macro == 'else':
macro_result[macro_depth] = not macro_result[macro_depth]
elif macro == 'endif':
macro_depth -= 1
macro_result.pop()
# be in failed macro block. continue
elif not macro_result[macro_depth]:
if not macro_code_remove:
# make comment
final_code_lines.append("// " + code)
continue
# is version code?
m = re.search(reVersion, code)
if m is not None:
version_code = m.groups()[0].strip()
if final_code_lines[0] == "" or version_code > final_code_lines[0]:
final_code_lines[0] = version_code
continue
# find include block
m = re.search(reInclude, code)
if m is not None:
is_include_file_exists = False
include_file_in_engine = os.path.join(engine_shader_directory, m.groups()[0])
include_file_in_project = os.path.join(project_shader_directory, m.groups()[0])
if is_engine_resource:
if os.path.exists(include_file_in_engine):
include_file = include_file_in_engine
is_include_file_exists = True
else:
include_file = include_file_in_project
else:
if os.path.exists(include_file_in_project):
include_file = include_file_in_project
is_include_file_exists = True
else:
include_file = include_file_in_engine
# insert include code
valid = False
if is_include_file_exists or os.path.exists(include_file):
try:
f = codecs.open(include_file, mode='r', encoding='utf-8')
include_source = f.read()
# remove comment block
include_source = re.sub(reComment, "", include_source)
include_code_lines = include_source.splitlines()
f.close()
valid = True
except BaseException:
logger.error(traceback.format_exc())
if valid:
if include_file in include_files:
unique_id = include_files[include_file]
else:
unique_id = "UUID_" + str(uuid.uuid3(uuid.NAMESPACE_DNS, include_file)).replace("-", "_")
include_files[include_file] = unique_id
if include_file not in self.include_files:
self.include_files.append(include_file)
# insert included code
final_code_lines.append("//------------ INCLUDE -------------//")
final_code_lines.append("// " + code) # include comment
include_code_lines.insert(0, "#ifndef %s" % unique_id)
include_code_lines.insert(1, "#define %s" % unique_id)
include_code_lines.append("#endif /* %s */" % unique_id)
code_lines = include_code_lines + code_lines[line_num:]
line_num = 0
if not valid:
logger.error("Shader parsing error.\n\t--> Cannot open %s file." % include_file)
continue
# append code block
final_code_lines.append(code)
return '\n'.join(final_code_lines)
|
backdoors/access/ssh_key.py | mehrdad-shokri/backdoorme | 796 | 11099188 | from backdoors.backdoor import *
class SSHKey(Backdoor):
prompt = Fore.RED + "(sshkey) " + Fore.BLUE + ">> " + Fore.RESET
def __init__(self, core):
cmd.Cmd.__init__(self)
self.intro = GOOD + "Using ssh keys backdoor..."
self.core = core
self.options = {
}
self.allow_modules = False
self.help_text = INFO + "The SetUID backdoor works by setting the setuid bit on a binary while the user has root acccess, so that when that binary is later run by a user without root access, the binary is executed with root access.\n" + INFO +"By default, this backdoor flips the setuid bit on nano, so that if root access is ever lost, the attacker can SSH back in as an unpriviledged user and still be able to run nano (or any binary) as root. ('nano /etc/shadow')."
def get_command(self):
return "echo " + self.core.curtarget.pword + " | sudo -S chmod u+s %s" % (self.get_value("program"))
def do_exploit(self, args):
port = self.get_value("port")
target = self.core.curtarget
print(GOOD + "Initializing backdoor...")
target.ssh.exec_command("echo -e \"\n\n\n\" | ssh-keygen -t rsa")
os.system("ssh-copy-id " + target.uname + "@" + target.hostname)
#os.system("sshpass -p %s ssh-copy-id %s@%s" % (t.pword, t.uname, t.hostname))
print(GOOD + "Added SSH keys to target.")
|
admintools/decorators.py | goztrk/django-htk | 206 | 11099234 | # Django Imports
from django.contrib.auth.decorators import login_required
from django.core.exceptions import PermissionDenied
def company_officer_required(view_func):
"""Decorator for views that require access by company officer or staff user
"""
@login_required
def wrapped_view(request, *args, **kwargs):
user = request.user
if not(user.profile and user.profile.is_company_officer):
raise PermissionDenied
return view_func(request, *args, **kwargs)
return wrapped_view
def company_employee_required(view_func):
"""Decorator for views that require access by company employee or staff user
"""
@login_required
def wrapped_view(request, *args, **kwargs):
user = request.user
if not(user.profile and user.profile.is_company_employee):
raise PermissionDenied
return view_func(request, *args, **kwargs)
return wrapped_view
|
xnu-4903.241.1/tools/lldbmacros/kauth.py | DogeCoding/iOSCompiledRuntime | 672 | 11099281 | <filename>xnu-4903.241.1/tools/lldbmacros/kauth.py<gh_stars>100-1000
""" Please make sure you read the README file COMPLETELY BEFORE reading anything below.
It is very critical that you read coding guidelines in Section E in README file.
"""
from xnu import *
from utils import *
# Macro: walkkauthcache
@lldb_command('walkkauthcache')
def WalkKauthCache(cmd_args=None):
""" Walks the bins of the kauth credential hash cache and prints out the
number of bins and bin usage information.
"""
PrintKauthCache()
# EndMacro: walkkauthcache
def PrintKauthCache(cmd_args=None):
""" Routine to determine the size of the kauth cache, walk the bins
and print out usage information.
"""
anchor = unsigned(kern.globals.kauth_cred_table_anchor)
alloc_info_struct = anchor - sizeof('struct _mhead')
alloc_info = kern.GetValueFromAddress(alloc_info_struct, 'struct _mhead*')
alloc_size = unsigned(alloc_info.mlen) - (sizeof('struct _mhead'))
table_entries = alloc_size / sizeof('struct kauth_cred_entry_head')
anchor = kern.globals.kauth_cred_table_anchor
print "Cred cache has: " + str(table_entries) + " buckets\n"
print "Number of items in each bucket ... \n"
for i in range(0, table_entries):
numinbucket = 0
for kauth_cred in IterateTAILQ_HEAD(anchor[i], "cr_link"):
numinbucket += 1
#print str(kauth_cred.cr_posix)
#print str(kauth_cred.cr_ref)
print str(numinbucket) + "\n"
|
atlas/testing/acceptance/cleanup.py | DeepLearnI/atlas | 296 | 11099284 |
from common.cleanup import cleanup |
tools/tpm2_pkcs11/tpm2_ptool.py | shearl/tpm2-pkcs11 | 155 | 11099298 | <gh_stars>100-1000
# SPDX-License-Identifier: BSD-2-Clause
from .command import commandlet
# These imports are required to add the commandlet even though they appear unused
# Store level commands
from .commandlets_store import InitCommand # pylint: disable=unused-import # noqa
from .commandlets_store import DestroyCommand # pylint: disable=unused-import # noqa
# Token Level Commands
from .commandlets_token import AddTokenCommand # pylint: disable=unused-import # noqa
from .commandlets_token import AddEmptyTokenCommand # pylint: disable=unused-import # noqa
from .commandlets_token import RmTokenCommand # pylint: disable=unused-import # noqa
from .commandlets_token import VerifyCommand # pylint: disable=unused-import # noqa
from .commandlets_token import InitPinCommand # pylint: disable=unused-import # noqa
from .commandlets_token import ChangePinCommand # pylint: disable=unused-import # noqa
from .commandlets_keys import AddKeyCommand # pylint: disable=unused-import # noqa
from .commandlets_keys import ImportCommand # pylint: disable=unused-import # noqa
def main():
'''The main entry point.'''
commandlet.init('A tool for manipulating the tpm2-pkcs11 database')
if __name__ == '__main__':
main()
|
src/gino/aiocontextvars.py | maestro-1/gino | 1,376 | 11099310 | <filename>src/gino/aiocontextvars.py<gh_stars>1000+
import asyncio
import contextvars
import sys
import types
def patch_asyncio():
"""Patches asyncio to support :mod:`contextvars`.
This is automatically called when :mod:`gino` is imported. If Python version is 3.7
or greater, this function is a no-op.
"""
if not sys.version_info < (3, 7):
return
def _get_context():
state = _get_state()
ctx = getattr(state, "context", None)
if ctx is None:
ctx = contextvars.Context()
state.context = ctx
return ctx
def _set_context(ctx):
state = _get_state()
state.context = ctx
def _get_state():
loop = asyncio._get_running_loop()
if loop is None:
return contextvars._state
task = asyncio.Task.current_task(loop=loop)
return contextvars._state if task is None else task
contextvars._get_context = _get_context
contextvars._set_context = _set_context
def create_task(loop, coro):
task = loop._orig_create_task(coro)
if task._source_traceback:
del task._source_traceback[-1]
task.context = contextvars.copy_context()
return task
def _patch_loop(loop):
if loop and not hasattr(loop, "_orig_create_task"):
loop._orig_create_task = loop.create_task
loop.create_task = types.MethodType(create_task, loop)
return loop
def get_event_loop():
return _patch_loop(_get_event_loop())
def set_event_loop(loop):
return _set_event_loop(_patch_loop(loop))
def new_event_loop():
return _patch_loop(_new_event_loop())
_get_event_loop = asyncio.get_event_loop
_set_event_loop = asyncio.set_event_loop
_new_event_loop = asyncio.new_event_loop
asyncio.get_event_loop = asyncio.events.get_event_loop = get_event_loop
asyncio.set_event_loop = asyncio.events.set_event_loop = set_event_loop
asyncio.new_event_loop = asyncio.events.new_event_loop = new_event_loop
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.